From fe5e8ea403583a9f2d7cc4113dacc8a674fcb391 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 28 Jan 2022 22:01:06 +0100 Subject: [PATCH 0001/1003] refactor: port udp to aquatic_udp_protocol --- Cargo.lock | 12 +- Cargo.toml | 4 +- src/common.rs | 65 ++---- src/http_api_server.rs | 16 +- src/http_server.rs | 13 +- src/lib.rs | 2 - src/response.rs | 126 ----------- src/tracker.rs | 44 ++-- src/udp_server.rs | 469 +++++++++++------------------------------ src/utils.rs | 2 +- 10 files changed, 186 insertions(+), 567 deletions(-) delete mode 100644 src/response.rs diff --git a/Cargo.lock b/Cargo.lock index 5e90db008..af57a3768 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,6 +11,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aquatic_udp_protocol" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c3026324bc5073042edfbc47699cc60c1a9cf24849f5b104c18f98d2ad4175" +dependencies = [ + "byteorder", +] + [[package]] name = "arrayvec" version = "0.5.2" @@ -1733,8 +1742,9 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.0.1" +version = "2.1.0" dependencies = [ + "aquatic_udp_protocol", "binascii", "byteorder", "chrono", diff --git a/Cargo.toml b/Cargo.toml index db9b89856..6e78211ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "torrust-tracker" -version = "2.0.1" +version = "2.1.0" authors = ["Mick van Dijke ", "Naim A. "] description = "A feature rich BitTorrent tracker." edition = "2018" @@ -30,3 +30,5 @@ rand = "0.8.4" env_logger = "0.9.0" config = "0.11" derive_more = "0.99" + +aquatic_udp_protocol = "0.1.0" diff --git a/src/common.rs b/src/common.rs index 82ea19ab8..1ee1190ab 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; pub const MAX_PACKET_SIZE: usize = 0xffff; pub const MAX_SCRAPE_TORRENTS: u8 = 74; @@ -14,42 +15,25 @@ pub enum Actions { Error = 3, } -#[repr(u32)] -#[derive(Serialize, Deserialize, Clone, Copy)] -pub enum Events { - None = 0, - Complete = 1, - Started = 2, - Stopped = 3, -} - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub enum AnnounceEvent { - None, - Completed, +#[derive(Serialize, Deserialize)] +#[serde(remote = "AnnounceEvent")] +pub enum AnnounceEventDef { Started, Stopped, + Completed, + None } -impl AnnounceEvent { - #[inline] - pub fn from_i32(i: i32) -> Self { - match i { - 0 => Self::None, - 1 => Self::Completed, - 2 => Self::Started, - 3 => Self::Stopped, - _ => Self::None, - } - } -} - -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct AnnounceInterval(pub i32); +#[derive(Serialize, Deserialize)] +#[serde(remote = "NumberOfBytes")] +pub struct NumberOfBytesDef(pub i64); #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Ord)] pub struct InfoHash(pub [u8; 20]); +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, PartialOrd, Ord)] +pub struct PeerId(pub [u8; 20]); + impl InfoHash { pub fn to_string(&self) -> String { let mut buffer = [0u8; 40]; @@ -146,31 +130,6 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { } } -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct ConnectionId(pub i64); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct TransactionId(pub i32); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct NumberOfBytes(pub i64); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct NumberOfPeers(pub i32); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct NumberOfDownloads(pub i32); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct Port(pub u16); - -#[repr(transparent)] -#[derive(Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug, PartialOrd, Ord)] -pub struct PeerId(pub [u8; 20]); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct PeerKey(pub u32); - impl PeerId { pub fn get_client_name(&self) -> Option<&'static str> { if self.0[0] == b'M' { diff --git a/src/http_api_server.rs b/src/http_api_server.rs index 5f7339036..d33fa3ada 100644 --- a/src/http_api_server.rs +++ b/src/http_api_server.rs @@ -4,6 +4,7 @@ use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use warp::{filters, reply, reply::Reply, serve, Filter, Server}; +use crate::TorrentPeer; use super::common::*; #[derive(Deserialize, Debug)] @@ -15,14 +16,11 @@ struct TorrentInfoQuery { #[derive(Serialize)] struct Torrent<'a> { info_hash: &'a InfoHash, - #[serde(flatten)] - data: &'a crate::tracker::TorrentEntry, seeders: u32, completed: u32, leechers: u32, - #[serde(skip_serializing_if = "Option::is_none")] - peers: Option>, + peers: Option>, } #[derive(Serialize, Debug)] @@ -87,7 +85,6 @@ pub fn build_server(tracker: Arc) -> Server) -> Server) -> Server = torrent_entry - .get_peers_iter() - .take(1000) - .map(|(peer_id, peer_info)| (peer_id.clone(), peer_info.clone())) - .collect(); + let peers = torrent_entry.get_peers(None); Ok(reply::json(&Torrent { info_hash: &info_hash, - data: torrent_entry, seeders, completed, leechers, diff --git a/src/http_server.rs b/src/http_server.rs index bf1f7f88d..f54f36eeb 100644 --- a/src/http_server.rs +++ b/src/http_server.rs @@ -17,13 +17,13 @@ use super::common::*; #[derive(Deserialize, Debug)] pub struct AnnounceRequest { - pub downloaded: NumberOfBytes, - pub uploaded: NumberOfBytes, + pub downloaded: u32, + pub uploaded: u32, pub key: String, pub peer_id: String, pub port: u16, pub info_hash: String, - pub left: NumberOfBytes, + pub left: u32, pub event: Option, pub compact: Option, } @@ -283,7 +283,7 @@ impl HttpServer { Some(v) => AuthKey::from_string(&v) }; - if let Err(e) = self.tracker.authenticate_request(&info_hash.unwrap(), &auth_key).await { + if let Err(e) = self.tracker.authenticate_request(&info_hash.unwrap(), auth_key).await { return match e { TorrentError::TorrentNotWhitelisted => { debug!("Info_hash not whitelisted."); @@ -297,8 +297,13 @@ impl HttpServer { debug!("Peer not authenticated."); Some(HttpServer::send_error("peer not authenticated")) } + _ => { + debug!("Unhandled HTTP error."); + Some(HttpServer::send_error("oops")) + } } } + None } diff --git a/src/lib.rs b/src/lib.rs index 375c0f903..d040d3719 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,7 +4,6 @@ pub mod http_server; pub mod tracker; pub mod http_api_server; pub mod common; -pub mod response; pub mod utils; pub mod database; pub mod key_manager; @@ -16,4 +15,3 @@ pub use self::http_server::*; pub use self::tracker::*; pub use self::http_api_server::*; pub use self::common::*; -pub use self::response::*; diff --git a/src/response.rs b/src/response.rs deleted file mode 100644 index 9734e3769..000000000 --- a/src/response.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std; -use std::io::{Write}; -use std::net::{SocketAddr}; -use byteorder::{NetworkEndian, WriteBytesExt}; -use super::common::*; -use std::io; -use crate::TorrentPeer; - -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum UdpResponse { - Connect(UdpConnectionResponse), - Announce(UdpAnnounceResponse), - Scrape(UdpScrapeResponse), - Error(UdpErrorResponse), -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpConnectionResponse { - pub action: Actions, - pub transaction_id: TransactionId, - pub connection_id: ConnectionId, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpAnnounceResponse { - pub action: Actions, - pub transaction_id: TransactionId, - pub interval: u32, - pub leechers: u32, - pub seeders: u32, - pub peers: Vec, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpScrapeResponse { - pub action: Actions, - pub transaction_id: TransactionId, - pub torrent_stats: Vec, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpScrapeResponseEntry { - pub seeders: i32, - pub completed: i32, - pub leechers: i32, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpErrorResponse { - pub action: Actions, - pub transaction_id: TransactionId, - pub message: String, -} - -impl From for UdpResponse { - fn from(r: UdpConnectionResponse) -> Self { - Self::Connect(r) - } -} - -impl From for UdpResponse { - fn from(r: UdpAnnounceResponse) -> Self { - Self::Announce(r) - } -} - -impl From for UdpResponse { - fn from(r: UdpScrapeResponse) -> Self { - Self::Scrape(r) - } -} - -impl From for UdpResponse { - fn from(r: UdpErrorResponse) -> Self { - Self::Error(r) - } -} - -impl UdpResponse { - pub fn write_to_bytes(self, bytes: &mut impl Write) -> Result<(), io::Error> { - match self { - UdpResponse::Connect(r) => { - bytes.write_i32::(0)?; // 0 = connect - bytes.write_i32::(r.transaction_id.0)?; - bytes.write_i64::(r.connection_id.0)?; - }, - UdpResponse::Announce(r) => { - bytes.write_i32::(1)?; // 1 = announce - bytes.write_i32::(r.transaction_id.0)?; - bytes.write_u32::(r.interval)?; - bytes.write_u32::(r.leechers)?; - bytes.write_u32::(r.seeders)?; - - for peer in r.peers { - match peer.peer_addr { - SocketAddr::V4(socket_addr) => { - bytes.write_all(&socket_addr.ip().octets())?; - bytes.write_u16::(socket_addr.port())?; - } - SocketAddr::V6(socket_addr) => { - bytes.write_all(&socket_addr.ip().octets())?; - bytes.write_u16::(socket_addr.port())?; - } - } - } - }, - UdpResponse::Scrape(r) => { - bytes.write_i32::(2)?; // 2 = scrape - bytes.write_i32::(r.transaction_id.0)?; - - for torrent_stat in r.torrent_stats { - bytes.write_i32::(torrent_stat.seeders)?; - bytes.write_i32::(torrent_stat.completed)?; - bytes.write_i32::(torrent_stat.leechers)?; - } - }, - UdpResponse::Error(r) => { - bytes.write_i32::(3)?; - bytes.write_i32::(r.transaction_id.0)?; - bytes.write_all(r.message.as_bytes())?; - }, - } - - Ok(()) - } -} diff --git a/src/tracker.rs b/src/tracker.rs index 3e6bcca3e..768a4d03f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,14 +1,16 @@ use serde::{Deserialize, Serialize}; +use serde; use std::borrow::Cow; use std::collections::BTreeMap; use tokio::sync::RwLock; -use crate::common::{NumberOfBytes, InfoHash}; +use crate::common::{InfoHash}; use super::common::*; use std::net::{SocketAddr, IpAddr}; -use crate::{Configuration, http_server, key_manager, udp_server}; +use crate::{Configuration, http_server, key_manager}; use std::collections::btree_map::Entry; use crate::database::SqliteDatabase; use std::sync::Arc; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::debug; use crate::key_manager::{AuthKey}; use r2d2_sqlite::rusqlite; @@ -37,20 +39,22 @@ pub enum TrackerMode { #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { - #[serde(skip)] pub peer_id: PeerId, - #[serde(rename = "ip")] pub peer_addr: SocketAddr, #[serde(serialize_with = "ser_instant")] pub updated: std::time::Instant, + #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] pub downloaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, + #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &udp_server::AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { + pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { // Potentially substitute localhost IP with external IP let peer_addr = match peer_addr { None => SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port.0), @@ -64,7 +68,7 @@ impl TorrentPeer { }; TorrentPeer { - peer_id: announce_request.peer_id, + peer_id: PeerId(announce_request.peer_id.0), peer_addr, updated: std::time::Instant::now(), uploaded: announce_request.bytes_uploaded, @@ -102,9 +106,9 @@ impl TorrentPeer { peer_id: PeerId::from(announce_request.peer_id.as_bytes()), peer_addr, updated: std::time::Instant::now(), - uploaded: announce_request.uploaded, - downloaded: announce_request.downloaded, - left: announce_request.left, + uploaded: NumberOfBytes(announce_request.uploaded as i64), + downloaded: NumberOfBytes(announce_request.downloaded as i64), + left: NumberOfBytes(announce_request.left as i64), event } } @@ -151,7 +155,7 @@ impl TorrentEntry { } } - pub fn get_peers(&self, remote_addr: &std::net::SocketAddr) -> Vec { + pub fn get_peers(&self, remote_addr: Option<&std::net::SocketAddr>) -> Vec { let mut list = Vec::new(); for (_, peer) in self .peers @@ -161,8 +165,10 @@ impl TorrentEntry { { // skip ip address of client - if peer.peer_addr == *remote_addr { - //continue; + if let Some(remote_addr) = remote_addr { + if peer.peer_addr == *remote_addr { + continue; + } } list.push(peer.clone()); @@ -170,10 +176,6 @@ impl TorrentEntry { list } - pub fn get_peers_iter(&self) -> impl Iterator { - self.peers.iter() - } - pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { match peer_old { None => { @@ -243,6 +245,8 @@ pub enum TorrentError { TorrentNotWhitelisted, PeerNotAuthenticated, PeerKeyNotValid, + NoPeersFound, + CouldNotSendResponse } pub struct TorrentTracker { @@ -282,7 +286,7 @@ impl TorrentTracker { key_manager::verify_auth_key(&db_key) } - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: Option) -> Result<(), TorrentError> { match self.config.mode { TrackerMode::PublicMode => Ok(()), TrackerMode::ListedMode => { @@ -295,7 +299,7 @@ impl TorrentTracker { TrackerMode::PrivateMode => { match key { Some(key) => { - if self.verify_auth_key(key).await.is_err() { + if self.verify_auth_key(&key).await.is_err() { return Err(TorrentError::PeerKeyNotValid) } @@ -309,7 +313,7 @@ impl TorrentTracker { TrackerMode::PrivateListedMode => { match key { Some(key) => { - if self.verify_auth_key(key).await.is_err() { + if self.verify_auth_key(&key).await.is_err() { return Err(TorrentError::PeerKeyNotValid) } @@ -356,7 +360,7 @@ impl TorrentTracker { None } Some(entry) => { - Some(entry.get_peers(peer_addr)) + Some(entry.get_peers(Some(peer_addr))) } } } diff --git a/src/udp_server.rs b/src/udp_server.rs index 079ff67a0..cf0474f7c 100644 --- a/src/udp_server.rs +++ b/src/udp_server.rs @@ -1,236 +1,31 @@ use log::{debug}; use std; -use std::convert::TryInto; -use std::io; -use std::net::{Ipv4Addr, SocketAddr}; +use std::net::{SocketAddr}; use std::sync::Arc; -use std::io::{Cursor, Read}; +use std::io::{Cursor}; +use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, IpVersion, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; use tokio::net::UdpSocket; -use byteorder::{NetworkEndian, ReadBytesExt}; use super::common::*; -use crate::response::*; use crate::utils::get_connection_id; use crate::tracker::TorrentTracker; -use crate::{TorrentPeer, TrackerMode, TorrentError}; -use crate::key_manager::AuthKey; - -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum Request { - Connect(ConnectRequest), - Announce(AnnounceRequest), - Scrape(ScrapeRequest), -} +use crate::{TorrentPeer, TorrentError}; -impl From for Request { - fn from(r: ConnectRequest) -> Self { - Self::Connect(r) - } +struct RequestError { + error: TorrentError, + transaction_id: TransactionId } -impl From for Request { - fn from(r: AnnounceRequest) -> Self { - Self::Announce(r) - } +struct AnnounceRequestWrapper { + announce_request: AnnounceRequest, + info_hash: super::common::InfoHash, } -impl From for Request { - fn from(r: ScrapeRequest) -> Self { - Self::Scrape(r) - } -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct ConnectRequest { - pub transaction_id: TransactionId, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct AnnounceRequest { - pub connection_id: ConnectionId, - pub transaction_id: TransactionId, - pub info_hash: InfoHash, - pub peer_id: PeerId, - pub bytes_downloaded: NumberOfBytes, - pub bytes_uploaded: NumberOfBytes, - pub bytes_left: NumberOfBytes, - pub event: AnnounceEvent, - pub ip_address: Option, - pub key: PeerKey, - pub peers_wanted: NumberOfPeers, - pub port: Port, - pub auth_key: Option, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct ScrapeRequest { - pub connection_id: ConnectionId, - pub transaction_id: TransactionId, - pub info_hashes: Vec, -} - -#[derive(Debug)] -pub struct RequestParseError { - pub transaction_id: Option, - pub message: Option, - pub error: Option, -} - -impl RequestParseError { - pub fn new(err: io::Error, transaction_id: i32) -> Self { - Self { - transaction_id: Some(TransactionId(transaction_id)), - message: None, - error: Some(err), - } - } - pub fn io(err: io::Error) -> Self { - Self { - transaction_id: None, - message: None, - error: Some(err), - } - } - pub fn text(transaction_id: i32, message: &str) -> Self { - Self { - transaction_id: Some(TransactionId(transaction_id)), - message: Some(message.to_string()), - error: None, - } - } -} - -impl Request { - pub fn from_bytes(bytes: &[u8]) -> Result { - let mut cursor = Cursor::new(bytes); - - let connection_id = cursor - .read_i64::() - .map_err(RequestParseError::io)?; - let action = cursor - .read_i32::() - .map_err(RequestParseError::io)?; - let transaction_id = cursor - .read_i32::() - .map_err(RequestParseError::io)?; - - - - match action { - // Connect - 0 => { - if connection_id == PROTOCOL_ID { - Ok((ConnectRequest { - transaction_id: TransactionId(transaction_id), - }) - .into()) - } else { - Err(RequestParseError::text( - transaction_id, - "Protocol identifier missing", - )) - } - } - - // Announce - 1 => { - let mut info_hash = [0; 20]; - let mut peer_id = [0; 20]; - let mut ip = [0; 4]; - - cursor - .read_exact(&mut info_hash) - .map_err(|err| RequestParseError::new(err, transaction_id))?; - cursor - .read_exact(&mut peer_id) - .map_err(|err| RequestParseError::new(err, transaction_id))?; - - let bytes_downloaded = cursor - .read_i64::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let bytes_left = cursor - .read_i64::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let bytes_uploaded = cursor - .read_i64::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let event = cursor - .read_i32::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - - cursor - .read_exact(&mut ip) - .map_err(|err| RequestParseError::new(err, transaction_id))?; - - let key = cursor - .read_u32::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let peers_wanted = cursor - .read_i32::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let port = cursor - .read_u16::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - - // BEP 41: add auth key if available - let auth_key: Option = if bytes.len() > 98 + AUTH_KEY_LENGTH { - let mut key_buffer = [0; AUTH_KEY_LENGTH]; - // key should be the last bytes - cursor.set_position((bytes.len() - AUTH_KEY_LENGTH) as u64); - if cursor.read_exact(&mut key_buffer).is_ok() { - debug!("AuthKey buffer: {:?}", key_buffer); - AuthKey::from_buffer(key_buffer) - } else { - None - } - } else { - None - }; - - let opt_ip = if ip == [0; 4] { - None - } else { - Some(Ipv4Addr::from(ip)) - }; - - Ok((AnnounceRequest { - connection_id: ConnectionId(connection_id), - transaction_id: TransactionId(transaction_id), - info_hash: InfoHash(info_hash), - peer_id: PeerId(peer_id), - bytes_downloaded: NumberOfBytes(bytes_downloaded), - bytes_uploaded: NumberOfBytes(bytes_uploaded), - bytes_left: NumberOfBytes(bytes_left), - event: AnnounceEvent::from_i32(event), - ip_address: opt_ip, - key: PeerKey(key), - peers_wanted: NumberOfPeers(peers_wanted), - port: Port(port), - auth_key, - }) - .into()) - } - - // Scrape - 2 => { - let position = cursor.position() as usize; - let inner = cursor.into_inner(); - - let info_hashes = (&inner[position..]) - .chunks_exact(20) - .take(MAX_SCRAPE_TORRENTS as usize) - .map(|chunk| InfoHash(chunk.try_into().unwrap())) - .collect(); - - Ok((ScrapeRequest { - connection_id: ConnectionId(connection_id), - transaction_id: TransactionId(transaction_id), - info_hashes, - }) - .into()) - } - - _ => Err(RequestParseError::text(transaction_id, "Invalid action")), +impl AnnounceRequestWrapper { + pub fn new(announce_request: AnnounceRequest) -> Self { + AnnounceRequestWrapper { + announce_request: announce_request.clone(), + info_hash: InfoHash(announce_request.info_hash.0) } } } @@ -250,51 +45,6 @@ impl UdpServer { }) } - pub async fn authenticate_announce_request(&self, announce_request: &AnnounceRequest) -> Result<(), TorrentError> { - match self.tracker.config.mode { - TrackerMode::PublicMode => Ok(()), - TrackerMode::ListedMode => { - if !self.tracker.is_info_hash_whitelisted(&announce_request.info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted) - } - - Ok(()) - } - TrackerMode::PrivateMode => { - match &announce_request.auth_key { - Some(auth_key) => { - if self.tracker.verify_auth_key(auth_key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } - - Ok(()) - } - None => { - return Err(TorrentError::PeerNotAuthenticated) - } - } - } - TrackerMode::PrivateListedMode => { - match &announce_request.auth_key { - Some(auth_key) => { - if self.tracker.verify_auth_key(auth_key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } - - if !self.tracker.is_info_hash_whitelisted(&announce_request.info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted) - } - - Ok(()) - } - None => { - return Err(TorrentError::PeerNotAuthenticated) - } - } - } - } - } - pub async fn accept_packets(self) -> Result<(), std::io::Error> { let tracker = Arc::new(self); @@ -311,38 +61,12 @@ impl UdpServer { } async fn handle_packet(&self, remote_addr: SocketAddr, payload: &[u8]) { - let request = Request::from_bytes(&payload[..payload.len()]); + let request = Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS); match request { Ok(request) => { debug!("New request: {:?}", request); - - // todo: check for expired connection_id - match request { - Request::Connect(r) => self.handle_connect(remote_addr, r).await, - Request::Announce(r) => { - match self.tracker.authenticate_request(&r.info_hash, &r.auth_key).await { - Ok(()) => self.handle_announce(remote_addr, r).await, - Err(e) => { - match e { - TorrentError::TorrentNotWhitelisted => { - debug!("Info_hash not whitelisted."); - self.send_error(remote_addr, &r.transaction_id, "torrent not whitelisted").await; - } - TorrentError::PeerKeyNotValid => { - debug!("Peer key not valid."); - self.send_error(remote_addr, &r.transaction_id, "peer key not valid").await; - } - TorrentError::PeerNotAuthenticated => { - debug!("Peer not authenticated."); - self.send_error(remote_addr, &r.transaction_id, "peer not authenticated").await; - } - } - } - } - }, - Request::Scrape(r) => self.handle_scrape(remote_addr, r).await - } + self.handle_request(request, remote_addr).await; } Err(err) => { debug!("request_from_bytes error: {:?}", err); @@ -350,94 +74,148 @@ impl UdpServer { } } - async fn handle_connect(&self, remote_addr: SocketAddr, request: ConnectRequest) { + async fn handle_request(&self, request: Request, remote_addr: SocketAddr) { + // todo: check for expired connection_id + let request_result = match request { + Request::Connect(connect_request) => { + self.handle_connect(remote_addr, &connect_request).await + .map_err(|error| RequestError { error, transaction_id: connect_request.transaction_id }) + } + Request::Announce(announce_request) => { + self.handle_announce(remote_addr, &announce_request).await + .map_err(|error| RequestError { error, transaction_id: announce_request.transaction_id }) + } + Request::Scrape(scrape_request) => { + self.handle_scrape(&scrape_request).await + .map_err(|error| RequestError { error, transaction_id: scrape_request.transaction_id }) + } + }; + + match request_result { + Ok(response) => { + let _ = self.send_response(remote_addr, response).await; + } + Err(request_error) => { + let _ = self.handle_error(request_error.error, remote_addr, request_error.transaction_id).await; + } + } + } + + async fn handle_connect(&self, remote_addr: SocketAddr, request: &ConnectRequest) -> Result { let connection_id = get_connection_id(&remote_addr); - let response = UdpResponse::from(UdpConnectionResponse { - action: Actions::Connect, + let response = Response::from(ConnectResponse { transaction_id: request.transaction_id, connection_id, }); - let _ = self.send_response(remote_addr, response).await; + Ok(response) } - async fn handle_announce(&self, remote_addr: SocketAddr, request: AnnounceRequest) { - let peer = TorrentPeer::from_udp_announce_request(&request, remote_addr, self.tracker.config.get_ext_ip()); + async fn handle_announce(&self, remote_addr: SocketAddr, announce_request: &AnnounceRequest) -> Result { + let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); + self.tracker.authenticate_request(&wrapped_announce_request.info_hash, None).await?; + + let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr, self.tracker.config.get_ext_ip()); - match self.tracker.update_torrent_with_peer_and_get_stats(&request.info_hash, &peer).await { + return match self.tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await { Ok(torrent_stats) => { // get all peers excluding the client_addr - let peers = match self.tracker.get_torrent_peers(&request.info_hash, &peer.peer_addr).await { + let peers = match self.tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await { Some(v) => v, None => { - debug!("announce: No peers found."); - return; + return Err(TorrentError::NoPeersFound); } }; - let response = UdpResponse::from(UdpAnnounceResponse { - action: Actions::Announce, - transaction_id: request.transaction_id, - interval: self.tracker.config.udp_tracker.announce_interval, - leechers: torrent_stats.leechers, - seeders: torrent_stats.seeders, - peers, + let response = Response::from(AnnounceResponse { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(self.tracker.config.udp_tracker.announce_interval as i32), + leechers: NumberOfPeers(torrent_stats.leechers as i32), + seeders: NumberOfPeers(torrent_stats.seeders as i32), + peers: peers.iter().map(|peer| + ResponsePeer { + ip_address: peer.peer_addr.ip(), + port: Port(peer.peer_addr.port()) + }).collect() }); - let _ = self.send_response(remote_addr, response).await; - } - Err(e) => { - debug!("{:?}", e); - self.send_error(remote_addr, &request.transaction_id, "error adding torrent").await; + Ok(response) } + Err(e) => Err(e) } } - async fn handle_scrape(&self, remote_addr: SocketAddr, request: ScrapeRequest) { - let mut scrape_response = UdpScrapeResponse { - action: Actions::Scrape, - transaction_id: request.transaction_id, - torrent_stats: Vec::new(), - }; - + async fn handle_scrape(&self, request: &ScrapeRequest) -> Result { let db = self.tracker.get_torrents().await; + let mut torrent_stats: Vec = Vec::new(); + for info_hash in request.info_hashes.iter() { + let info_hash = InfoHash(info_hash.0); let scrape_entry = match db.get(&info_hash) { Some(torrent_info) => { let (seeders, completed, leechers) = torrent_info.get_stats(); - UdpScrapeResponseEntry { - seeders: seeders as i32, - completed: completed as i32, - leechers: leechers as i32, + TorrentScrapeStatistics { + seeders: NumberOfPeers(seeders as i32), + completed: NumberOfDownloads(completed as i32), + leechers: NumberOfPeers(leechers as i32), } } None => { - UdpScrapeResponseEntry { - seeders: 0, - completed: 0, - leechers: 0, + TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), } } }; - scrape_response.torrent_stats.push(scrape_entry); + torrent_stats.push(scrape_entry); } - let response = UdpResponse::from(scrape_response); + let response = Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats + }); + + Ok(response) + } + + async fn handle_error(&self, e: TorrentError, remote_addr: SocketAddr, tx_id: TransactionId) { + let mut err_msg = "oops"; - let _ = self.send_response(remote_addr, response).await; + match e { + TorrentError::TorrentNotWhitelisted => { + debug!("Info_hash not whitelisted."); + err_msg = "info hash not whitelisted"; + } + TorrentError::PeerKeyNotValid => { + debug!("Peer key not valid."); + err_msg = "peer key not valid"; + } + TorrentError::PeerNotAuthenticated => { + debug!("Peer not authenticated."); + err_msg = "peer not authenticated"; + } + TorrentError::NoPeersFound => { + debug!("No peers found."); + err_msg = "no peers found"; + } + _ => {} + } + + self.send_error(remote_addr, tx_id, err_msg).await; } - async fn send_response(&self, remote_addr: SocketAddr, response: UdpResponse) -> Result { + async fn send_response(&self, remote_addr: SocketAddr, response: Response) -> Result { debug!("sending response to: {:?}", &remote_addr); let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); - match response.write_to_bytes(&mut cursor) { + match response.write(&mut cursor, IpVersion::IPv4) { Ok(_) => { let position = cursor.position() as usize; let inner = cursor.get_ref(); @@ -458,6 +236,15 @@ impl UdpServer { } } + async fn send_error(&self, remote_addr: SocketAddr, transaction_id: TransactionId, error_msg: &str) { + let response = Response::from(ErrorResponse { + transaction_id, + message: error_msg.to_string(), + }); + + let _ = self.send_response(remote_addr, response).await; + } + async fn send_packet(&self, remote_addr: &SocketAddr, payload: &[u8]) -> Result { match self.socket.send_to(payload, remote_addr).await { Err(err) => { @@ -467,16 +254,4 @@ impl UdpServer { Ok(sz) => Ok(sz), } } - - async fn send_error(&self, remote_addr: SocketAddr, transaction_id: &TransactionId, error_msg: &str) { - let error_response = UdpErrorResponse { - action: Actions::Error, - transaction_id: transaction_id.clone(), - message: error_msg.to_string(), - }; - - let response = UdpResponse::from(error_response); - - let _ = self.send_response(remote_addr, response).await; - } } diff --git a/src/utils.rs b/src/utils.rs index 11c61e4fb..5790e6067 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,8 +1,8 @@ use std::net::SocketAddr; -use crate::common::*; use std::time::SystemTime; use std::error::Error; use std::fmt::Write; +use aquatic_udp_protocol::ConnectionId; pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) { From 713cfe4811c7a564cab21df9d9920d187a968036 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 29 Jan 2022 16:29:47 +0100 Subject: [PATCH 0002/1003] workflow: disable binary releases until properly licensed --- .github/workflows/test_build_release.yml | 26 ++++++++++++------------ NOTICE | 0 2 files changed, 13 insertions(+), 13 deletions(-) create mode 100644 NOTICE diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 344b8a025..45bf94894 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -42,16 +42,16 @@ jobs: name: torrust-tracker path: ./target/release/torrust-tracker - release: - needs: build - runs-on: ubuntu-latest - steps: - - name: Download build artifact - uses: actions/download-artifact@v2 - with: - name: torrust-tracker - - name: Release - uses: softprops/action-gh-release@v1 - with: - files: | - torrust-tracker +# release: +# needs: build +# runs-on: ubuntu-latest +# steps: +# - name: Download build artifact +# uses: actions/download-artifact@v2 +# with: +# name: torrust-tracker +# - name: Release +# uses: softprops/action-gh-release@v1 +# with: +# files: | +# torrust-tracker diff --git a/NOTICE b/NOTICE new file mode 100644 index 000000000..e69de29bb From d5cd825c3fb0d9ff44fc9eee2dbf8e8484b4b09e Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 29 Jan 2022 16:35:36 +0100 Subject: [PATCH 0003/1003] workflow: enable release without binary --- .github/workflows/test_build_release.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 45bf94894..7be5626e5 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -42,16 +42,16 @@ jobs: name: torrust-tracker path: ./target/release/torrust-tracker -# release: -# needs: build -# runs-on: ubuntu-latest -# steps: -# - name: Download build artifact -# uses: actions/download-artifact@v2 -# with: -# name: torrust-tracker -# - name: Release -# uses: softprops/action-gh-release@v1 + release: + needs: build + runs-on: ubuntu-latest + steps: + - name: Download build artifact + uses: actions/download-artifact@v2 + with: + name: torrust-tracker + - name: Release + uses: softprops/action-gh-release@v1 # with: # files: | # torrust-tracker From bc5b17cb60363a520a38bbb1ac3913bfd4b4240b Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 29 Jan 2022 16:55:23 +0100 Subject: [PATCH 0004/1003] updated README.md --- README.md | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 9e09a7739..087edc6fd 100644 --- a/README.md +++ b/README.md @@ -35,29 +35,24 @@ cd torrust-tracker cargo build --release ``` -3. Copy binaries: `torrust-tracker/target/torrust-tracker` to a new folder. - ### Usage -1. Navigate to the folder you put the torrust-tracker binaries in. - - -2. Run the torrust-tracker once to create the `config.toml` file: +1. Run the torrust-tracker once to create the `config.toml` file: ```bash -./torrust-tracker +./target/release/torrust-tracker ``` -3. Edit the newly created config.toml file in the same folder as your torrust-tracker binaries according to your liking. See [configuration documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/config/). +2. Edit the newly created config.toml file according to your liking, see [configuration documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/config/). -4. Run the torrust-tracker again: +3. Run the torrust-tracker again: ```bash -./torrust-tracker +./target/release/torrust-tracker ``` ### Tracker URL -Your tracker will be `udp://tracker-ip:port/announce` or `https://tracker-ip:port/announce` depending on your tracker mode. -In private mode, tracker keys are added after the tracker URL like: `https://tracker-ip:port/announce/tracker-key`. +Your tracker announce URL will be **udp://{tracker-ip:port}** or **https://{tracker-ip:port}/announce** depending on your tracker mode. +In private & private_listed mode, tracker keys are added after the tracker URL like: **https://{tracker-ip:port}/announce/{key}**. ### Built-in API Read the API documentation [here](https://torrust.github.io/torrust-documentation/torrust-tracker/api/). From 1782ba530a20255dde99977623047e7f77b95314 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 29 Jan 2022 21:02:26 +0100 Subject: [PATCH 0005/1003] fix: panic when trying to run private mode on udp --- src/config.rs | 51 ++++++++++++++++++++++++++++++++---------- src/http_api_server.rs | 2 +- src/http_server.rs | 2 +- src/lib.rs | 2 ++ src/main.rs | 22 ++++++++---------- src/tracker.rs | 5 ++--- 6 files changed, 54 insertions(+), 30 deletions(-) diff --git a/src/config.rs b/src/config.rs index 9a7e47e37..c2b335d5d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -9,6 +9,12 @@ use std::path::Path; use std::str::FromStr; use config::{ConfigError, Config, File}; +#[derive(Serialize, Deserialize, PartialEq)] +pub enum TrackerServer { + UDP, + HTTP +} + #[derive(Serialize, Deserialize)] pub struct UdpTrackerConfig { pub bind_address: String, @@ -48,21 +54,23 @@ pub struct Configuration { pub cleanup_interval: Option, pub external_ip: Option, pub udp_tracker: UdpTrackerConfig, - pub http_tracker: Option, - pub http_api: Option, + pub http_tracker: HttpTrackerConfig, + pub http_api: HttpApiConfig, } #[derive(Debug)] pub enum ConfigurationError { IOError(std::io::Error), ParseError(toml::de::Error), + TrackerModeIncompatible, } impl std::fmt::Display for ConfigurationError { - fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - ConfigurationError::IOError(e) => e.fmt(formatter), - ConfigurationError::ParseError(e) => e.fmt(formatter), + ConfigurationError::IOError(e) => e.fmt(f), + ConfigurationError::ParseError(e) => e.fmt(f), + _ => write!(f, "{:?}", self) } } } @@ -125,20 +133,29 @@ impl Configuration { bind_address: String::from("0.0.0.0:6969"), announce_interval: 120, }, - http_tracker: Option::from(HttpTrackerConfig { + http_tracker: HttpTrackerConfig { enabled: false, bind_address: String::from("0.0.0.0:7878"), announce_interval: 120, ssl_enabled: false, ssl_cert_path: None, ssl_key_path: None - }), - http_api: Option::from(HttpApiConfig { + }, + http_api: HttpApiConfig { enabled: true, bind_address: String::from("127.0.0.1:1212"), access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), - }), + }, + } + } + + pub fn verify(&self) -> Result<(), ConfigurationError> { + // UDP is not secure for sending private keys + if (self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode) && self.get_tracker_server() == TrackerServer::UDP { + return Err(ConfigurationError::TrackerModeIncompatible) } + + Ok(()) } pub fn load_from_file() -> Result { @@ -156,9 +173,11 @@ impl Configuration { return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))) } - match config.try_into() { - Ok(data) => Ok(data), - Err(e) => Err(ConfigError::Message(format!("Errors while processing config: {}.", e))), + let torrust_config: Configuration = config.try_into().map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; + + match torrust_config.verify() { + Ok(_) => Ok(torrust_config), + Err(e) => Err(ConfigError::Message(format!("Errors while processing config: {}.", e))) } } @@ -167,4 +186,12 @@ impl Configuration { fs::write("config.toml", toml_string).expect("Could not write to file!"); Ok(()) } + + pub fn get_tracker_server(&self) -> TrackerServer { + if self.http_tracker.enabled { + TrackerServer::HTTP + } else { + TrackerServer::UDP + } + } } diff --git a/src/http_api_server.rs b/src/http_api_server.rs index d33fa3ada..f87f460a4 100644 --- a/src/http_api_server.rs +++ b/src/http_api_server.rs @@ -224,7 +224,7 @@ pub fn build_server(tracker: Arc) -> Server { start_udp_tracker_server(&config.udp_tracker, tracker.clone()).await } - } else { - start_udp_tracker_server(&config.udp_tracker, tracker.clone()).await + TrackerServer::HTTP => { + start_http_tracker_server(&config.http_tracker, tracker.clone()) + } }; let ctrl_c = tokio::signal::ctrl_c(); - tokio::select! { _ = _tracker_server => { panic!("Tracker server exited.") }, _ = ctrl_c => { info!("Torrust shutting down..") } diff --git a/src/tracker.rs b/src/tracker.rs index 768a4d03f..86a2d2e7e 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,10 +3,9 @@ use serde; use std::borrow::Cow; use std::collections::BTreeMap; use tokio::sync::RwLock; -use crate::common::{InfoHash}; -use super::common::*; +use crate::common::{InfoHash, NumberOfBytesDef, AnnounceEventDef, PeerId}; use std::net::{SocketAddr, IpAddr}; -use crate::{Configuration, http_server, key_manager}; +use crate::{Configuration, http_server, key_manager, MAX_SCRAPE_TORRENTS}; use std::collections::btree_map::Entry; use crate::database::SqliteDatabase; use std::sync::Arc; From 5546898f7df5557a2f2f1a0381370ab8c4785144 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 29 Jan 2022 22:13:14 +0100 Subject: [PATCH 0006/1003] refactor: separating tracker servers --- src/lib.rs | 10 +- src/main.rs | 2 +- src/torrust_http_tracker/mod.rs | 7 + src/torrust_http_tracker/request.rs | 14 ++ src/torrust_http_tracker/response.rs | 94 +++++++++++++ .../server.rs} | 126 +----------------- src/torrust_udp_tracker/mod.rs | 1 + .../server.rs} | 12 +- src/tracker.rs | 10 +- 9 files changed, 137 insertions(+), 139 deletions(-) create mode 100644 src/torrust_http_tracker/mod.rs create mode 100644 src/torrust_http_tracker/request.rs create mode 100644 src/torrust_http_tracker/response.rs rename src/{http_server.rs => torrust_http_tracker/server.rs} (76%) create mode 100644 src/torrust_udp_tracker/mod.rs rename src/{udp_server.rs => torrust_udp_tracker/server.rs} (98%) diff --git a/src/lib.rs b/src/lib.rs index 58f1d4dac..c055cfae4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,4 @@ pub mod config; -pub mod udp_server; -pub mod http_server; pub mod tracker; pub mod http_api_server; pub mod common; @@ -8,12 +6,12 @@ pub mod utils; pub mod database; pub mod key_manager; pub mod logging; -pub mod udp_tracker; -mod http_tracker; +pub mod torrust_udp_tracker; +pub mod torrust_http_tracker; pub use self::config::*; -pub use self::udp_server::*; -pub use self::http_server::*; +pub use torrust_udp_tracker::server::*; +pub use torrust_http_tracker::server::*; pub use self::tracker::*; pub use self::http_api_server::*; pub use self::common::*; diff --git a/src/main.rs b/src/main.rs index cfb5365ed..45b42ef63 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ use log::{info}; use torrust_tracker::{http_api_server, Configuration, TorrentTracker, UdpServer, HttpTrackerConfig, UdpTrackerConfig, HttpApiConfig, logging, TrackerServer}; use std::sync::Arc; use tokio::task::JoinHandle; -use torrust_tracker::http_server::HttpServer; +use torrust_tracker::torrust_http_tracker::server::HttpServer; #[tokio::main] async fn main() { diff --git a/src/torrust_http_tracker/mod.rs b/src/torrust_http_tracker/mod.rs new file mode 100644 index 000000000..236f6e996 --- /dev/null +++ b/src/torrust_http_tracker/mod.rs @@ -0,0 +1,7 @@ +pub mod server; +pub mod request; +pub mod response; + +pub use self::server::*; +pub use self::request::*; +pub use self::response::*; diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs new file mode 100644 index 000000000..d88f36d3a --- /dev/null +++ b/src/torrust_http_tracker/request.rs @@ -0,0 +1,14 @@ +use serde::{Deserialize}; + +#[derive(Deserialize)] +pub struct AnnounceRequest { + pub downloaded: u32, + pub uploaded: u32, + pub key: String, + pub peer_id: String, + pub port: u16, + pub info_hash: String, + pub left: u32, + pub event: Option, + pub compact: Option, +} diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs new file mode 100644 index 000000000..a30fc89f6 --- /dev/null +++ b/src/torrust_http_tracker/response.rs @@ -0,0 +1,94 @@ +use std::collections::HashMap; +use std::error::Error; +use std::io::Write; +use std::net::IpAddr; +use serde::{Serialize}; +use warp::http::Response; + +#[derive(Serialize)] +pub struct Peer { + pub peer_id: String, + pub ip: IpAddr, + pub port: u16, +} + +#[derive(Serialize)] +pub struct AnnounceResponse { + pub interval: u32, + //pub tracker_id: String, + pub complete: u32, + pub incomplete: u32, + pub peers: Vec +} + +impl AnnounceResponse { + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } + + pub fn write_compact(&self) -> Result, Box> { + let mut peers_v4: Vec = Vec::new(); + let mut peers_v6: Vec = Vec::new(); + + for peer in &self.peers { + match peer.ip { + IpAddr::V4(ip) => { + peers_v4.write(&u32::from(ip).to_be_bytes())?; + peers_v4.write(&peer.port.to_be_bytes())?; + } + IpAddr::V6(ip) => { + peers_v6.write(&u128::from(ip).to_be_bytes())?; + peers_v6.write(&peer.port.to_be_bytes())?; + } + } + } + + let mut bytes: Vec = Vec::new(); + bytes.write(b"d8:intervali")?; + bytes.write(&self.interval.to_string().as_bytes())?; + bytes.write(b"e8:completei")?; + bytes.write(&self.complete.to_string().as_bytes())?; + bytes.write(b"e10:incompletei")?; + bytes.write(&self.incomplete.to_string().as_bytes())?; + bytes.write(b"e5:peers")?; + bytes.write(&peers_v4.len().to_string().as_bytes())?; + bytes.write(b":")?; + bytes.write(peers_v4.as_slice())?; + bytes.write(b"e6:peers6")?; + bytes.write(&peers_v6.len().to_string().as_bytes())?; + bytes.write(b":")?; + bytes.write(peers_v6.as_slice())?; + bytes.write(b"e")?; + + Ok(bytes) + } +} + +#[derive(Serialize)] +pub struct ScrapeResponseEntry { + pub complete: u32, + pub downloaded: u32, + pub incomplete: u32, +} + +#[derive(Serialize)] +pub struct ScrapeResponse { + pub files: HashMap +} + +impl ScrapeResponse { + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } +} + +#[derive(Serialize)] +pub struct ErrorResponse { + pub failure_reason: String +} + +impl warp::Reply for ErrorResponse { + fn into_response(self) -> warp::reply::Response { + Response::new(format!("{}", serde_bencode::to_string(&self).unwrap()).into()) + } +} diff --git a/src/http_server.rs b/src/torrust_http_tracker/server.rs similarity index 76% rename from src/http_server.rs rename to src/torrust_http_tracker/server.rs index 9c942856c..af1774403 100644 --- a/src/http_server.rs +++ b/src/torrust_http_tracker/server.rs @@ -1,135 +1,19 @@ use std::collections::{HashMap}; use crate::tracker::{TorrentTracker}; -use serde::{Deserialize, Serialize}; use std::convert::Infallible; -use std::error::Error; -use std::io::Write; -use std::net::{IpAddr, SocketAddr}; +use std::net::{SocketAddr}; use std::sync::Arc; use std::str::FromStr; +use super::{AnnounceResponse, ScrapeResponse}; use log::{debug}; use warp::{filters, reply::Reply, Filter}; use warp::http::Response; use crate::{TorrentError, TorrentPeer, TorrentStats}; use crate::key_manager::AuthKey; use crate::utils::url_encode_bytes; -use super::common::*; - -#[derive(Deserialize, Debug)] -pub struct AnnounceRequest { - pub downloaded: u32, - pub uploaded: u32, - pub key: String, - pub peer_id: String, - pub port: u16, - pub info_hash: String, - pub left: u32, - pub event: Option, - pub compact: Option, -} - -impl AnnounceRequest { - pub fn is_compact(&self) -> bool { - self.compact.unwrap_or(0) == 1 - } -} - -#[derive(Deserialize, Debug)] -pub struct ScrapeRequest { - pub info_hash: String, -} - -#[derive(Serialize)] -struct Peer { - peer_id: String, - ip: IpAddr, - port: u16, -} - -#[derive(Serialize)] -struct AnnounceResponse { - interval: u32, - //tracker_id: String, - complete: u32, - incomplete: u32, - peers: Vec -} - -impl AnnounceResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } - - pub fn write_compact(&self) -> Result, Box> { - let mut peers_v4: Vec = Vec::new(); - let mut peers_v6: Vec = Vec::new(); - - for peer in &self.peers { - match peer.ip { - IpAddr::V4(ip) => { - peers_v4.write(&u32::from(ip).to_be_bytes())?; - peers_v4.write(&peer.port.to_be_bytes())?; - } - IpAddr::V6(ip) => { - peers_v6.write(&u128::from(ip).to_be_bytes())?; - peers_v6.write(&peer.port.to_be_bytes())?; - } - } - } - - debug!("{:?}", String::from_utf8_lossy(peers_v4.as_slice())); - debug!("{:?}", String::from_utf8_lossy(peers_v6.as_slice())); - - let mut bytes: Vec = Vec::new(); - bytes.write(b"d8:intervali")?; - bytes.write(&self.interval.to_string().as_bytes())?; - bytes.write(b"e8:completei")?; - bytes.write(&self.complete.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(&self.incomplete.to_string().as_bytes())?; - bytes.write(b"e5:peers")?; - bytes.write(&peers_v4.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v4.as_slice())?; - bytes.write(b"e6:peers6")?; - bytes.write(&peers_v6.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v6.as_slice())?; - bytes.write(b"e")?; - - debug!("{:?}", String::from_utf8_lossy(bytes.as_slice())); - Ok(bytes) - } -} - -#[derive(Serialize)] -struct ScrapeResponse { - files: HashMap -} - -impl ScrapeResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } -} - -#[derive(Serialize)] -struct ScrapeResponseEntry { - complete: u32, - downloaded: u32, - incomplete: u32, -} - -#[derive(Serialize)] -struct ErrorResponse { - failure_reason: String -} - -impl warp::Reply for ErrorResponse { - fn into_response(self) -> warp::reply::Response { - Response::new(format!("{}", serde_bencode::to_string(&self).unwrap()).into()) - } -} +use crate::common::*; +use crate::torrust_http_tracker::request::AnnounceRequest; +use crate::torrust_http_tracker::{ErrorResponse, Peer, ScrapeResponseEntry}; #[derive(Clone)] pub struct HttpServer { diff --git a/src/torrust_udp_tracker/mod.rs b/src/torrust_udp_tracker/mod.rs new file mode 100644 index 000000000..74f47ad34 --- /dev/null +++ b/src/torrust_udp_tracker/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/src/udp_server.rs b/src/torrust_udp_tracker/server.rs similarity index 98% rename from src/udp_server.rs rename to src/torrust_udp_tracker/server.rs index cf0474f7c..681a46f47 100644 --- a/src/udp_server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -1,15 +1,15 @@ -use log::{debug}; +use log::debug; use std; -use std::net::{SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; -use std::io::{Cursor}; +use std::io::Cursor; use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, IpVersion, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; use tokio::net::UdpSocket; -use super::common::*; +use crate::common::*; use crate::utils::get_connection_id; use crate::tracker::TorrentTracker; -use crate::{TorrentPeer, TorrentError}; +use crate::{InfoHash, TorrentError, TorrentPeer}; struct RequestError { error: TorrentError, @@ -18,7 +18,7 @@ struct RequestError { struct AnnounceRequestWrapper { announce_request: AnnounceRequest, - info_hash: super::common::InfoHash, + info_hash: InfoHash, } impl AnnounceRequestWrapper { diff --git a/src/tracker.rs b/src/tracker.rs index 86a2d2e7e..7a8dae82f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,15 +3,15 @@ use serde; use std::borrow::Cow; use std::collections::BTreeMap; use tokio::sync::RwLock; -use crate::common::{InfoHash, NumberOfBytesDef, AnnounceEventDef, PeerId}; -use std::net::{SocketAddr, IpAddr}; -use crate::{Configuration, http_server, key_manager, MAX_SCRAPE_TORRENTS}; +use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; +use std::net::{IpAddr, SocketAddr}; +use crate::{Configuration, torrust_http_tracker, key_manager, MAX_SCRAPE_TORRENTS}; use std::collections::btree_map::Entry; use crate::database::SqliteDatabase; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::debug; -use crate::key_manager::{AuthKey}; +use crate::key_manager::AuthKey; use r2d2_sqlite::rusqlite; const TWO_HOURS: std::time::Duration = std::time::Duration::from_secs(3600 * 2); @@ -77,7 +77,7 @@ impl TorrentPeer { } } - pub fn from_http_announce_request(announce_request: &http_server::AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { + pub fn from_http_announce_request(announce_request: &torrust_http_tracker::request::AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { // Potentially substitute localhost IP with external IP let peer_addr = match peer_addr { None => SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port), From efb52017d4a4ccb08107d076e56b12db09f61b15 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 29 Jan 2022 22:14:38 +0100 Subject: [PATCH 0007/1003] fix: missing modules --- src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 58f1d4dac..d040d3719 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,8 +8,6 @@ pub mod utils; pub mod database; pub mod key_manager; pub mod logging; -pub mod udp_tracker; -mod http_tracker; pub use self::config::*; pub use self::udp_server::*; From ed37171f084b49321ae39000898f4795f6f8b98b Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 29 Jan 2022 22:29:18 +0100 Subject: [PATCH 0008/1003] refactor: reordered imports --- src/torrust_http_tracker/server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs index af1774403..3b556c344 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/torrust_http_tracker/server.rs @@ -1,13 +1,13 @@ use std::collections::{HashMap}; -use crate::tracker::{TorrentTracker}; use std::convert::Infallible; use std::net::{SocketAddr}; use std::sync::Arc; use std::str::FromStr; -use super::{AnnounceResponse, ScrapeResponse}; use log::{debug}; use warp::{filters, reply::Reply, Filter}; use warp::http::Response; +use super::{AnnounceResponse, ScrapeResponse}; +use crate::tracker::{TorrentTracker}; use crate::{TorrentError, TorrentPeer, TorrentStats}; use crate::key_manager::AuthKey; use crate::utils::url_encode_bytes; From 9360231b64ebc14a80c7002aede54b58ab9cd202 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 00:22:49 +0100 Subject: [PATCH 0009/1003] refactor: http tracker basically done --- Cargo.lock | 3 +- Cargo.toml | 6 +- src/main.rs | 21 +- src/torrust_http_tracker/errors.rs | 45 +++ src/torrust_http_tracker/mod.rs | 1 + src/torrust_http_tracker/request.rs | 21 +- src/torrust_http_tracker/response.rs | 7 - src/torrust_http_tracker/server.rs | 418 ++++++++++++++------------- src/torrust_udp_tracker/server.rs | 2 +- src/tracker.rs | 77 +++-- 10 files changed, 328 insertions(+), 273 deletions(-) create mode 100644 src/torrust_http_tracker/errors.rs diff --git a/Cargo.lock b/Cargo.lock index af57a3768..08a4722e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1742,7 +1742,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.1.0" +version = "2.1.1" dependencies = [ "aquatic_udp_protocol", "binascii", @@ -1763,6 +1763,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", + "thiserror", "tokio", "toml", "warp", diff --git a/Cargo.toml b/Cargo.toml index 6e78211ea..e3d0a06a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,8 @@ [package] name = "torrust-tracker" -version = "2.1.0" -authors = ["Mick van Dijke ", "Naim A. "] +version = "2.1.1" +license = "AGPL-3.0" +authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." edition = "2018" @@ -30,5 +31,6 @@ rand = "0.8.4" env_logger = "0.9.0" config = "0.11" derive_more = "0.99" +thiserror = "1.0" aquatic_udp_protocol = "0.1.0" diff --git a/src/main.rs b/src/main.rs index 45b42ef63..ce8887742 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use log::{info}; use torrust_tracker::{http_api_server, Configuration, TorrentTracker, UdpServer, HttpTrackerConfig, UdpTrackerConfig, HttpApiConfig, logging, TrackerServer}; use std::sync::Arc; @@ -74,25 +75,21 @@ fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> Joi } fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - info!("Starting HTTP server on: {}", config.bind_address); - let http_tracker = Arc::new(HttpServer::new(tracker)); - let bind_addr = config.bind_address.parse::().unwrap(); + let http_tracker = HttpServer::new(tracker); + let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; let ssl_cert_path = config.ssl_cert_path.clone(); let ssl_key_path = config.ssl_key_path.clone(); + tokio::spawn(async move { // run with tls if ssl_enabled and cert and key path are set - if ssl_enabled { - info!("SSL enabled."); - warp::serve(HttpServer::routes(http_tracker)) - .tls() - .cert_path(ssl_cert_path.as_ref().unwrap()) - .key_path(ssl_key_path.as_ref().unwrap()) - .run(bind_addr).await; + if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting HTTPS server on: {} (TLS)", bind_addr); + http_tracker.start_tls(bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; } else { - warp::serve(HttpServer::routes(http_tracker)) - .run(bind_addr).await; + info!("Starting HTTP server on: {}", bind_addr); + http_tracker.start(bind_addr).await; } }) } diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs new file mode 100644 index 000000000..d6a24ac38 --- /dev/null +++ b/src/torrust_http_tracker/errors.rs @@ -0,0 +1,45 @@ +use warp::reject::Reject; +use thiserror::Error; +use crate::TorrentError; + +#[derive(Error, Debug)] +pub enum ServerError { + #[error("internal server error")] + InternalServerError, + + #[error("info_hash is either missing or invalid")] + InvalidInfoHash, + + #[error("could not find remote address")] + AddressNotFound, + + #[error("torrent has no peers")] + NoPeersFound, + + #[error("torrent not on whitelist")] + TorrentNotWhitelisted, + + #[error("peer not authenticated")] + PeerNotAuthenticated, + + #[error("invalid authentication key")] + PeerKeyNotValid, + + #[error("exceeded info_hash limit")] + ExceededInfoHashLimit, +} + +impl Reject for ServerError {} + +impl From for ServerError { + fn from(e: TorrentError) -> Self { + match e { + TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, + TorrentError::NoPeersFound => ServerError::NoPeersFound, + TorrentError::CouldNotSendResponse => ServerError::InternalServerError, + TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + } + } +} diff --git a/src/torrust_http_tracker/mod.rs b/src/torrust_http_tracker/mod.rs index 236f6e996..f105e320c 100644 --- a/src/torrust_http_tracker/mod.rs +++ b/src/torrust_http_tracker/mod.rs @@ -1,6 +1,7 @@ pub mod server; pub mod request; pub mod response; +mod errors; pub use self::server::*; pub use self::request::*; diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index d88f36d3a..b62b9430a 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -1,14 +1,31 @@ +use std::net::SocketAddr; use serde::{Deserialize}; +use crate::InfoHash; #[derive(Deserialize)] -pub struct AnnounceRequest { +pub struct AnnounceRequestQuery { pub downloaded: u32, pub uploaded: u32, pub key: String, pub peer_id: String, pub port: u16, - pub info_hash: String, pub left: u32, pub event: Option, pub compact: Option, } + +pub struct AnnounceRequest { + pub info_hash: InfoHash, + pub peer_addr: SocketAddr, + pub downloaded: u32, + pub uploaded: u32, + pub peer_id: String, + pub port: u16, + pub left: u32, + pub event: Option, + pub compact: Option, +} + +pub struct ScrapeRequest { + pub info_hashes: Vec, +} diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs index a30fc89f6..df039a1c2 100644 --- a/src/torrust_http_tracker/response.rs +++ b/src/torrust_http_tracker/response.rs @@ -3,7 +3,6 @@ use std::error::Error; use std::io::Write; use std::net::IpAddr; use serde::{Serialize}; -use warp::http::Response; #[derive(Serialize)] pub struct Peer { @@ -86,9 +85,3 @@ impl ScrapeResponse { pub struct ErrorResponse { pub failure_reason: String } - -impl warp::Reply for ErrorResponse { - fn into_response(self) -> warp::reply::Response { - Response::new(format!("{}", serde_bencode::to_string(&self).unwrap()).into()) - } -} diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs index 3b556c344..5ac6f57cf 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/torrust_http_tracker/server.rs @@ -1,20 +1,24 @@ -use std::collections::{HashMap}; +use std::collections::HashMap; use std::convert::Infallible; use std::net::{SocketAddr}; use std::sync::Arc; use std::str::FromStr; use log::{debug}; -use warp::{filters, reply::Reply, Filter}; -use warp::http::Response; -use super::{AnnounceResponse, ScrapeResponse}; +use warp::{reply::Reply, Filter, Rejection, reject}; +use warp::http::{Response, StatusCode}; +use super::{AnnounceResponse}; use crate::tracker::{TorrentTracker}; -use crate::{TorrentError, TorrentPeer, TorrentStats}; +use crate::{TorrentPeer, TorrentStats}; use crate::key_manager::AuthKey; -use crate::utils::url_encode_bytes; use crate::common::*; -use crate::torrust_http_tracker::request::AnnounceRequest; -use crate::torrust_http_tracker::{ErrorResponse, Peer, ScrapeResponseEntry}; +use crate::torrust_http_tracker::request::AnnounceRequestQuery; +use crate::torrust_http_tracker::{AnnounceRequest, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry}; +use crate::torrust_http_tracker::errors::ServerError; +use crate::utils::url_encode_bytes; +type WebResult = std::result::Result; + +/// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] pub struct HttpServer { tracker: Arc, @@ -27,232 +31,230 @@ impl HttpServer { } } - // &self did not work here - pub fn routes(http_server: Arc) -> impl Filter + Clone + Send + Sync + 'static { - // optional tracker key - let opt_key = warp::path::param::() - .map(Some) - .or_else(|_| async { - // Ok(None) - Ok::<(Option,), std::convert::Infallible>((None,)) - }); - - // GET /announce?key=:String - // Announce peer - let hs1 = http_server.clone(); - let announce_route = - filters::path::path("announce") - .and(filters::method::get()) - .and(warp::addr::remote()) - .and(opt_key) - .and(filters::query::raw()) - .and(filters::query::query()) - .map(move |remote_addr, key, raw_query, query| { - debug!("Request: {}", raw_query); - (remote_addr, key, raw_query, query, hs1.clone()) - }) - .and_then(move |(remote_addr, key, raw_query, mut query, http_server): (Option, Option, String, AnnounceRequest, Arc)| { - async move { - if remote_addr.is_none() { return HttpServer::send_error("could not get remote address") } - - // query.info_hash somehow receives a corrupt string - // so we have to get the info_hash manually from the raw query - let info_hashes = HttpServer::info_hashes_from_raw_query(&raw_query); - if info_hashes.len() < 1 { return HttpServer::send_error("info_hash not found") } - query.info_hash = info_hashes[0].to_string(); - debug!("{:?}", query.info_hash); - - if let Some(err) = http_server.authenticate_request(&query.info_hash, key).await { return err } - - http_server.handle_announce(query, remote_addr.unwrap()).await - } - }); - - // GET /scrape?key=:String - // Get torrent info - let hs2 = http_server.clone(); - let scrape_route = - filters::path::path("scrape") - .and(filters::method::get()) - .and(opt_key) - .and(filters::query::raw()) - .map(move |key, raw_query| { - debug!("Request: {}", raw_query); - (key, raw_query, hs2.clone()) - }) - .and_then(move |(key, raw_query, http_server): (Option, String, Arc)| { - async move { - let info_hashes = HttpServer::info_hashes_from_raw_query(&raw_query); - if info_hashes.len() < 1 { return HttpServer::send_error("info_hash not found") } - if info_hashes.len() > 50 { return HttpServer::send_error("exceeded the max of 50 info_hashes") } - debug!("{:?}", info_hashes); - - // todo: verify all info_hashes before scrape - if let Some(err) = http_server.authenticate_request(&info_hashes[0].to_string(), key).await { return err } - - http_server.handle_scrape(info_hashes).await - } - }); - - // all routes - warp::any().and(announce_route.or(scrape_route)) + /// Start the HttpServer + pub async fn start(&self, socket_addr: SocketAddr) { + warp::serve(routes(self.tracker.clone())) + .run(socket_addr).await; } - fn info_hashes_from_raw_query(raw_query: &str) -> Vec { - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); - let mut info_hashes: Vec = Vec::new(); - - for v in split_raw_query { - if v.contains("info_hash") { - let raw_info_hash = v.split("=").collect::>()[1]; - let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); - if let Ok(ih) = info_hash { - info_hashes.push(ih); - } + /// Start the HttpServer in TLS mode + pub async fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: &str, ssl_key_path: &str) { + warp::serve(routes(self.tracker.clone())) + .tls() + .cert_path(ssl_cert_path) + .key_path(ssl_key_path) + .run(socket_addr).await; + } +} + +/// All routes +fn routes(tracker: Arc,) -> impl Filter + Clone { + announce(tracker.clone()) + .or(scrape(tracker.clone())) + .recover(handle_error) +} + +/// Pass Arc along +fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { + warp::any() + .map(move || tracker.clone()) +} + +/// Check for infoHash +fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { + warp::filters::query::raw() + .and_then(info_hashes) +} + +/// Parse InfoHash from raw query string +async fn info_hashes(raw_query: String) -> WebResult> { + let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + let mut info_hashes: Vec = Vec::new(); + + for v in split_raw_query { + if v.contains("info_hash") { + let raw_info_hash = v.split("=").collect::>()[1]; + let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); + let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); + if let Ok(ih) = info_hash { + info_hashes.push(ih); } } + } - info_hashes + if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { + Err(reject::custom(ServerError::ExceededInfoHashLimit)) + } else if info_hashes.len() < 1 { + Err(reject::custom(ServerError::InvalidInfoHash)) + } else { + Ok(info_hashes) } +} - fn send_announce_response(query: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32) -> Result { - let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: String::from_utf8_lossy(&peer.peer_id.0).to_string(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port() - }).collect(); - - let res = AnnounceResponse { - interval, - complete: torrent_stats.seeders, - incomplete: torrent_stats.leechers, - peers: http_peers - }; +/// Pass Arc along +fn with_auth_key() -> impl Filter,), Error = warp::Rejection> + Clone { + warp::path::param::() + .map(|key_string: String| { + AuthKey::from_string(&key_string) + }) +} - // check for compact response request - let response = match query.compact { - None => Response::new(res.write().into()), - Some(int) => { - if int == 1 { - let res_compact = res.write_compact(); - match res_compact { - Ok(response) => Response::new(response.into()), - Err(e) => { - debug!("{}", e); - HttpServer::send_error("server error").unwrap() - } - } - } else { - Response::new(res.write().into()) - } - } - }; +/// Check for AnnounceRequest +fn with_announce_request() -> impl Filter + Clone { + warp::filters::query::query::() + .and(with_info_hash()) + .and(warp::addr::remote()) + .and_then(announce_request) +} + +/// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option +async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, remote_addr: Option) -> WebResult { + if remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } + + Ok(AnnounceRequest { + info_hash: info_hashes[0], + peer_addr: remote_addr.unwrap(), + downloaded: announce_request_query.downloaded, + uploaded: announce_request_query.uploaded, + peer_id: announce_request_query.peer_id, + port: announce_request_query.port, + left: announce_request_query.left, + event: announce_request_query.event, + compact: announce_request_query.compact + }) +} + +/// Check for ScrapeRequest +fn with_scrape_request() -> impl Filter + Clone { + warp::any() + .and(with_info_hash()) + .and_then(scrape_request) +} + +/// Parse ScrapeRequest from InfoHash +async fn scrape_request(info_hashes: Vec) -> WebResult { + Ok(ScrapeRequest { + info_hashes, + }) +} - Ok(response) +/// Authenticate AnnounceRequest using optional AuthKey +async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { + match tracker.authenticate_request(info_hash, auth_key).await { + Ok(_) => Ok(()), + Err(e) => Err(ServerError::from(e)) } +} + +/// GET /announce/ +fn announce(tracker: Arc,) -> impl Filter + Clone { + warp::path::path("announce") + .and(warp::filters::method::get()) + .and(with_announce_request()) + .and(with_auth_key()) + .and(with_tracker(tracker)) + .and_then(handle_announce) +} - fn send_error(msg: &str) -> Result { - Ok(ErrorResponse { - failure_reason: msg.to_string() - }.into_response()) +/// GET /scrape/ +fn scrape(tracker: Arc,) -> impl Filter + Clone { + warp::path::path("scrape") + .and(warp::filters::method::get()) + .and(with_scrape_request()) + .and(with_auth_key()) + .and(with_tracker(tracker)) + .and_then(handle_scrape) +} + +/// Handle announce request +pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc,) -> WebResult { + if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { + return Err(reject::custom(e)) } - async fn authenticate_request(&self, info_hash_str: &str, key: Option) -> Option> { - let info_hash= InfoHash::from_str(info_hash_str); - if info_hash.is_err() { return Some(HttpServer::send_error("invalid info_hash")) } + let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); - let auth_key = match key { - None => None, - Some(v) => AuthKey::from_string(&v) - }; + match tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await { + Err(e) => Err(reject::custom(ServerError::from(e))), + Ok(torrent_stats) => { + // get all peers excluding the client_addr + let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; + if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } - if let Err(e) = self.tracker.authenticate_request(&info_hash.unwrap(), auth_key).await { - return match e { - TorrentError::TorrentNotWhitelisted => { - debug!("Info_hash not whitelisted."); - Some(HttpServer::send_error("torrent not whitelisted")) - } - TorrentError::PeerKeyNotValid => { - debug!("Peer key not valid."); - Some(HttpServer::send_error("peer key not valid")) - } - TorrentError::PeerNotAuthenticated => { - debug!("Peer not authenticated."); - Some(HttpServer::send_error("peer not authenticated")) - } - _ => { - debug!("Unhandled HTTP error."); - Some(HttpServer::send_error("oops")) - } - } + // success response + let announce_interval = tracker.config.http_tracker.announce_interval; + send_announce_response(&announce_request, torrent_stats, peers.unwrap(), announce_interval) } - - None } +} - async fn handle_announce(&self, query: AnnounceRequest, remote_addr: SocketAddr) -> Result { - let info_hash = match InfoHash::from_str(&query.info_hash) { - Ok(v) => v, - Err(_) => { - return HttpServer::send_error("info_hash is invalid") - } - }; +/// Handle scrape request +pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc,) -> WebResult { + let mut files: HashMap = HashMap::new(); + let db = tracker.get_torrents().await; + + for info_hash in scrape_request.info_hashes.iter() { + // authenticate every info_hash + if authenticate(info_hash, &auth_key, tracker.clone()).await.is_err() { continue } - let peer = TorrentPeer::from_http_announce_request(&query, remote_addr, self.tracker.config.get_ext_ip()); + let scrape_entry = match db.get(&info_hash) { + Some(torrent_info) => { + let (seeders, completed, leechers) = torrent_info.get_stats(); - match self.tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await { - Err(e) => { - debug!("{:?}", e); - HttpServer::send_error("server error") + ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } } - Ok(torrent_stats) => { - // get all peers excluding the client_addr - let peers = self.tracker.get_torrent_peers(&info_hash, &peer.peer_addr).await; - if peers.is_none() { - debug!("No peers found after announce."); - return HttpServer::send_error("peer is invalid") - } - - // todo: add http announce interval config option - // success response - let announce_interval = self.tracker.config.http_tracker.announce_interval; - HttpServer::send_announce_response(&query, torrent_stats, peers.unwrap(), announce_interval) + None => { + ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } } + }; + + if let Ok(encoded_info_hash) = url_encode_bytes(&info_hash.0) { + files.insert(encoded_info_hash, scrape_entry); } } - async fn handle_scrape(&self, info_hashes: Vec) -> Result { - let mut res = ScrapeResponse { - files: HashMap::new() - }; - let db = self.tracker.get_torrents().await; - - for info_hash in info_hashes.iter() { - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - ScrapeResponseEntry { - complete: seeders, - downloaded: completed, - incomplete: leechers - } - } - None => { - ScrapeResponseEntry { - complete: 0, - downloaded: 0, - incomplete: 0 - } - } - }; - - if let Ok(encoded_info_hash) = url_encode_bytes(&info_hash.0) { - res.files.insert(encoded_info_hash, scrape_entry); - } - } + send_scrape_response(files) +} +/// Send announce response +fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32) -> WebResult { + let http_peers: Vec = peers.iter().map(|peer| Peer { + peer_id: String::from_utf8_lossy(&peer.peer_id.0).to_string(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port() + }).collect(); + + let res = AnnounceResponse { + interval, + complete: torrent_stats.seeders, + incomplete: torrent_stats.leechers, + peers: http_peers + }; + + // check for compact response request + if let Some(1) = announce_request.compact { + match res.write_compact() { + Ok(body) => Ok(Response::new(body)), + Err(_) => Err(reject::custom(ServerError::InternalServerError)) + } + } else { Ok(Response::new(res.write().into())) } } + +/// Send scrape response +fn send_scrape_response(files: HashMap) -> WebResult { + Ok(Response::new(ScrapeResponse { files }.write())) +} + +/// Handle all server errors and send error reply +async fn handle_error(r: Rejection) -> std::result::Result { + if let Some(e) = r.find::() { + debug!("{:?}", e); + let reply = warp::reply::json(&ErrorResponse { failure_reason: e.to_string() }); + Ok(warp::reply::with_status(reply, StatusCode::BAD_REQUEST)) + } else { + let reply = warp::reply::json(&ErrorResponse { failure_reason: "internal server error".to_string() }); + Ok(warp::reply::with_status(reply, StatusCode::INTERNAL_SERVER_ERROR)) + } +} diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 681a46f47..b3cbccf7f 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -114,7 +114,7 @@ impl UdpServer { async fn handle_announce(&self, remote_addr: SocketAddr, announce_request: &AnnounceRequest) -> Result { let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); - self.tracker.authenticate_request(&wrapped_announce_request.info_hash, None).await?; + self.tracker.authenticate_request(&wrapped_announce_request.info_hash, &None).await?; let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr, self.tracker.config.get_ext_ip()); diff --git a/src/tracker.rs b/src/tracker.rs index 7a8dae82f..33d7ddb5f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -5,7 +5,7 @@ use std::collections::BTreeMap; use tokio::sync::RwLock; use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; use std::net::{IpAddr, SocketAddr}; -use crate::{Configuration, torrust_http_tracker, key_manager, MAX_SCRAPE_TORRENTS}; +use crate::{Configuration, key_manager, MAX_SCRAPE_TORRENTS}; use std::collections::btree_map::Entry; use crate::database::SqliteDatabase; use std::sync::Arc; @@ -13,6 +13,7 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::debug; use crate::key_manager::AuthKey; use r2d2_sqlite::rusqlite; +use crate::torrust_http_tracker::AnnounceRequest; const TWO_HOURS: std::time::Duration = std::time::Duration::from_secs(3600 * 2); const FIVE_MINUTES: std::time::Duration = std::time::Duration::from_secs(300); @@ -77,7 +78,7 @@ impl TorrentPeer { } } - pub fn from_http_announce_request(announce_request: &torrust_http_tracker::request::AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { + pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { // Potentially substitute localhost IP with external IP let peer_addr = match peer_addr { None => SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port), @@ -245,7 +246,8 @@ pub enum TorrentError { PeerNotAuthenticated, PeerKeyNotValid, NoPeersFound, - CouldNotSendResponse + CouldNotSendResponse, + InvalidInfoHash, } pub struct TorrentTracker { @@ -267,6 +269,18 @@ impl TorrentTracker { } } + fn is_public(&self) -> bool { + self.config.mode == TrackerMode::PublicMode + } + + fn is_private(&self) -> bool { + self.config.mode == TrackerMode::PrivateMode || self.config.mode == TrackerMode::PrivateListedMode + } + + fn is_whitelisted(&self) -> bool { + self.config.mode == TrackerMode::ListedMode || self.config.mode == TrackerMode::PrivateListedMode + } + pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { let auth_key = key_manager::generate_auth_key(seconds_valid); @@ -285,49 +299,32 @@ impl TorrentTracker { key_manager::verify_auth_key(&db_key) } - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: Option) -> Result<(), TorrentError> { - match self.config.mode { - TrackerMode::PublicMode => Ok(()), - TrackerMode::ListedMode => { - if !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted) - } - - Ok(()) - } - TrackerMode::PrivateMode => { - match key { - Some(key) => { - if self.verify_auth_key(&key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + // no authentication needed in public mode + if self.is_public() { return Ok(()) } - Ok(()) - } - None => { - return Err(TorrentError::PeerNotAuthenticated) + // check if auth_key is set and valid + if self.is_private() { + match key { + Some(key) => { + if self.verify_auth_key(key).await.is_err() { + return Err(TorrentError::PeerKeyNotValid) } } + None => { + return Err(TorrentError::PeerNotAuthenticated) + } } - TrackerMode::PrivateListedMode => { - match key { - Some(key) => { - if self.verify_auth_key(&key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } - - if !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted) - } + } - Ok(()) - } - None => { - return Err(TorrentError::PeerNotAuthenticated) - } - } + // check if info_hash is whitelisted + if self.is_whitelisted() { + if self.is_info_hash_whitelisted(info_hash).await == false { + return Err(TorrentError::TorrentNotWhitelisted) } } + + Ok(()) } // Adding torrents is not relevant to public trackers. @@ -351,7 +348,7 @@ impl TorrentTracker { pub async fn get_torrent_peers( &self, info_hash: &InfoHash, - peer_addr: &std::net::SocketAddr + peer_addr: &SocketAddr ) -> Option> { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { From e173b9bf5e8a0993d3e7187fd182f44d01549216 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 00:38:28 +0100 Subject: [PATCH 0010/1003] refactor: http tracker additional separation --- src/torrust_http_tracker/filters.rs | 83 +++++++++++ src/torrust_http_tracker/handlers.rs | 100 +++++++++++++ src/torrust_http_tracker/mod.rs | 9 +- src/torrust_http_tracker/routes.rs | 32 ++++ src/torrust_http_tracker/server.rs | 212 --------------------------- 5 files changed, 223 insertions(+), 213 deletions(-) create mode 100644 src/torrust_http_tracker/filters.rs create mode 100644 src/torrust_http_tracker/handlers.rs create mode 100644 src/torrust_http_tracker/routes.rs diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs new file mode 100644 index 000000000..be1d234f6 --- /dev/null +++ b/src/torrust_http_tracker/filters.rs @@ -0,0 +1,83 @@ +/// Pass Arc along +fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { + warp::any() + .map(move || tracker.clone()) +} + +/// Check for infoHash +fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { + warp::filters::query::raw() + .and_then(info_hashes) +} + +/// Parse InfoHash from raw query string +async fn info_hashes(raw_query: String) -> WebResult> { + let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + let mut info_hashes: Vec = Vec::new(); + + for v in split_raw_query { + if v.contains("info_hash") { + let raw_info_hash = v.split("=").collect::>()[1]; + let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); + let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); + if let Ok(ih) = info_hash { + info_hashes.push(ih); + } + } + } + + if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { + Err(reject::custom(ServerError::ExceededInfoHashLimit)) + } else if info_hashes.len() < 1 { + Err(reject::custom(ServerError::InvalidInfoHash)) + } else { + Ok(info_hashes) + } +} + +/// Pass Arc along +fn with_auth_key() -> impl Filter,), Error = warp::Rejection> + Clone { + warp::path::param::() + .map(|key_string: String| { + AuthKey::from_string(&key_string) + }) +} + +/// Check for AnnounceRequest +fn with_announce_request() -> impl Filter + Clone { + warp::filters::query::query::() + .and(with_info_hash()) + .and(warp::addr::remote()) + .and_then(announce_request) +} + +/// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option +async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, remote_addr: Option) -> WebResult { + if remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } + + Ok(AnnounceRequest { + info_hash: info_hashes[0], + peer_addr: remote_addr.unwrap(), + downloaded: announce_request_query.downloaded, + uploaded: announce_request_query.uploaded, + peer_id: announce_request_query.peer_id, + port: announce_request_query.port, + left: announce_request_query.left, + event: announce_request_query.event, + compact: announce_request_query.compact + }) +} + +/// Check for ScrapeRequest +fn with_scrape_request() -> impl Filter + Clone { + warp::any() + .and(with_info_hash()) + .and_then(scrape_request) +} + +/// Parse ScrapeRequest from InfoHash +async fn scrape_request(info_hashes: Vec) -> WebResult { + Ok(ScrapeRequest { + info_hashes, + }) +} diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs new file mode 100644 index 000000000..1016b4668 --- /dev/null +++ b/src/torrust_http_tracker/handlers.rs @@ -0,0 +1,100 @@ +/// Authenticate AnnounceRequest using optional AuthKey +async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { + match tracker.authenticate_request(info_hash, auth_key).await { + Ok(_) => Ok(()), + Err(e) => Err(ServerError::from(e)) + } +} + +/// Handle announce request +pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc,) -> WebResult { + if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { + return Err(reject::custom(e)) + } + + let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + + match tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await { + Err(e) => Err(reject::custom(ServerError::from(e))), + Ok(torrent_stats) => { + // get all peers excluding the client_addr + let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; + if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } + + // success response + let announce_interval = tracker.config.http_tracker.announce_interval; + send_announce_response(&announce_request, torrent_stats, peers.unwrap(), announce_interval) + } + } +} + +/// Handle scrape request +pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc,) -> WebResult { + let mut files: HashMap = HashMap::new(); + let db = tracker.get_torrents().await; + + for info_hash in scrape_request.info_hashes.iter() { + // authenticate every info_hash + if authenticate(info_hash, &auth_key, tracker.clone()).await.is_err() { continue } + + let scrape_entry = match db.get(&info_hash) { + Some(torrent_info) => { + let (seeders, completed, leechers) = torrent_info.get_stats(); + + ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } + } + None => { + ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } + } + }; + + if let Ok(encoded_info_hash) = url_encode_bytes(&info_hash.0) { + files.insert(encoded_info_hash, scrape_entry); + } + } + + send_scrape_response(files) +} + +/// Handle all server errors and send error reply +async fn handle_error(r: Rejection) -> std::result::Result { + if let Some(e) = r.find::() { + debug!("{:?}", e); + let reply = warp::reply::json(&ErrorResponse { failure_reason: e.to_string() }); + Ok(warp::reply::with_status(reply, StatusCode::BAD_REQUEST)) + } else { + let reply = warp::reply::json(&ErrorResponse { failure_reason: "internal server error".to_string() }); + Ok(warp::reply::with_status(reply, StatusCode::INTERNAL_SERVER_ERROR)) + } +} + +/// Send announce response +fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32) -> WebResult { + let http_peers: Vec = peers.iter().map(|peer| Peer { + peer_id: String::from_utf8_lossy(&peer.peer_id.0).to_string(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port() + }).collect(); + + let res = AnnounceResponse { + interval, + complete: torrent_stats.seeders, + incomplete: torrent_stats.leechers, + peers: http_peers + }; + + // check for compact response request + if let Some(1) = announce_request.compact { + match res.write_compact() { + Ok(body) => Ok(Response::new(body)), + Err(_) => Err(reject::custom(ServerError::InternalServerError)) + } + } else { + Ok(Response::new(res.write().into())) + } +} + +/// Send scrape response +fn send_scrape_response(files: HashMap) -> WebResult { + Ok(Response::new(ScrapeResponse { files }.write())) +} diff --git a/src/torrust_http_tracker/mod.rs b/src/torrust_http_tracker/mod.rs index f105e320c..733e69704 100644 --- a/src/torrust_http_tracker/mod.rs +++ b/src/torrust_http_tracker/mod.rs @@ -1,8 +1,15 @@ pub mod server; pub mod request; pub mod response; -mod errors; +pub mod errors; +pub mod routes; +pub mod handlers; +pub mod filters; pub use self::server::*; pub use self::request::*; pub use self::response::*; +pub use self::errors::*; +pub use self::routes::*; +pub use self::handlers::*; +pub use self::filters::*; diff --git a/src/torrust_http_tracker/routes.rs b/src/torrust_http_tracker/routes.rs new file mode 100644 index 000000000..20821f511 --- /dev/null +++ b/src/torrust_http_tracker/routes.rs @@ -0,0 +1,32 @@ +use std::convert::Infallible; +use std::sync::Arc; +use warp::{Filter, Rejection}; +use crate::TorrentTracker; +use crate::torrust_http_tracker::{handle_announce, handle_scrape}; + +/// All routes +fn routes(tracker: Arc,) -> impl Filter + Clone { + announce(tracker.clone()) + .or(scrape(tracker.clone())) + .recover(handle_error) +} + +/// GET /announce/ +fn announce(tracker: Arc,) -> impl Filter + Clone { + warp::path::path("announce") + .and(warp::filters::method::get()) + .and(with_announce_request()) + .and(with_auth_key()) + .and(with_tracker(tracker)) + .and_then(handle_announce) +} + +/// GET /scrape/ +fn scrape(tracker: Arc,) -> impl Filter + Clone { + warp::path::path("scrape") + .and(warp::filters::method::get()) + .and(with_scrape_request()) + .and(with_auth_key()) + .and(with_tracker(tracker)) + .and_then(handle_scrape) +} diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs index 5ac6f57cf..da739453d 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/torrust_http_tracker/server.rs @@ -46,215 +46,3 @@ impl HttpServer { .run(socket_addr).await; } } - -/// All routes -fn routes(tracker: Arc,) -> impl Filter + Clone { - announce(tracker.clone()) - .or(scrape(tracker.clone())) - .recover(handle_error) -} - -/// Pass Arc along -fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { - warp::any() - .map(move || tracker.clone()) -} - -/// Check for infoHash -fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { - warp::filters::query::raw() - .and_then(info_hashes) -} - -/// Parse InfoHash from raw query string -async fn info_hashes(raw_query: String) -> WebResult> { - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); - let mut info_hashes: Vec = Vec::new(); - - for v in split_raw_query { - if v.contains("info_hash") { - let raw_info_hash = v.split("=").collect::>()[1]; - let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); - if let Ok(ih) = info_hash { - info_hashes.push(ih); - } - } - } - - if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(ServerError::ExceededInfoHashLimit)) - } else if info_hashes.len() < 1 { - Err(reject::custom(ServerError::InvalidInfoHash)) - } else { - Ok(info_hashes) - } -} - -/// Pass Arc along -fn with_auth_key() -> impl Filter,), Error = warp::Rejection> + Clone { - warp::path::param::() - .map(|key_string: String| { - AuthKey::from_string(&key_string) - }) -} - -/// Check for AnnounceRequest -fn with_announce_request() -> impl Filter + Clone { - warp::filters::query::query::() - .and(with_info_hash()) - .and(warp::addr::remote()) - .and_then(announce_request) -} - -/// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, remote_addr: Option) -> WebResult { - if remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } - - Ok(AnnounceRequest { - info_hash: info_hashes[0], - peer_addr: remote_addr.unwrap(), - downloaded: announce_request_query.downloaded, - uploaded: announce_request_query.uploaded, - peer_id: announce_request_query.peer_id, - port: announce_request_query.port, - left: announce_request_query.left, - event: announce_request_query.event, - compact: announce_request_query.compact - }) -} - -/// Check for ScrapeRequest -fn with_scrape_request() -> impl Filter + Clone { - warp::any() - .and(with_info_hash()) - .and_then(scrape_request) -} - -/// Parse ScrapeRequest from InfoHash -async fn scrape_request(info_hashes: Vec) -> WebResult { - Ok(ScrapeRequest { - info_hashes, - }) -} - -/// Authenticate AnnounceRequest using optional AuthKey -async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, auth_key).await { - Ok(_) => Ok(()), - Err(e) => Err(ServerError::from(e)) - } -} - -/// GET /announce/ -fn announce(tracker: Arc,) -> impl Filter + Clone { - warp::path::path("announce") - .and(warp::filters::method::get()) - .and(with_announce_request()) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_announce) -} - -/// GET /scrape/ -fn scrape(tracker: Arc,) -> impl Filter + Clone { - warp::path::path("scrape") - .and(warp::filters::method::get()) - .and(with_scrape_request()) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_scrape) -} - -/// Handle announce request -pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc,) -> WebResult { - if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { - return Err(reject::custom(e)) - } - - let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); - - match tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await { - Err(e) => Err(reject::custom(ServerError::from(e))), - Ok(torrent_stats) => { - // get all peers excluding the client_addr - let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } - - // success response - let announce_interval = tracker.config.http_tracker.announce_interval; - send_announce_response(&announce_request, torrent_stats, peers.unwrap(), announce_interval) - } - } -} - -/// Handle scrape request -pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc,) -> WebResult { - let mut files: HashMap = HashMap::new(); - let db = tracker.get_torrents().await; - - for info_hash in scrape_request.info_hashes.iter() { - // authenticate every info_hash - if authenticate(info_hash, &auth_key, tracker.clone()).await.is_err() { continue } - - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } - } - None => { - ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } - } - }; - - if let Ok(encoded_info_hash) = url_encode_bytes(&info_hash.0) { - files.insert(encoded_info_hash, scrape_entry); - } - } - - send_scrape_response(files) -} - -/// Send announce response -fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32) -> WebResult { - let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: String::from_utf8_lossy(&peer.peer_id.0).to_string(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port() - }).collect(); - - let res = AnnounceResponse { - interval, - complete: torrent_stats.seeders, - incomplete: torrent_stats.leechers, - peers: http_peers - }; - - // check for compact response request - if let Some(1) = announce_request.compact { - match res.write_compact() { - Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)) - } - } else { - Ok(Response::new(res.write().into())) - } -} - -/// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { - Ok(Response::new(ScrapeResponse { files }.write())) -} - -/// Handle all server errors and send error reply -async fn handle_error(r: Rejection) -> std::result::Result { - if let Some(e) = r.find::() { - debug!("{:?}", e); - let reply = warp::reply::json(&ErrorResponse { failure_reason: e.to_string() }); - Ok(warp::reply::with_status(reply, StatusCode::BAD_REQUEST)) - } else { - let reply = warp::reply::json(&ErrorResponse { failure_reason: "internal server error".to_string() }); - Ok(warp::reply::with_status(reply, StatusCode::INTERNAL_SERVER_ERROR)) - } -} From 68fb76b7a646a622ada1a391f83e5e6e0b73a56a Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 00:42:52 +0100 Subject: [PATCH 0011/1003] refactor: http tracker additional separation imports --- src/torrust_http_tracker/filters.rs | 9 +++++++++ src/torrust_http_tracker/handlers.rs | 12 +++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index be1d234f6..0c2a2f635 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -1,3 +1,12 @@ +use std::convert::Infallible; +use std::net::SocketAddr; +use std::str::FromStr; +use std::sync::Arc; +use warp::{Filter, reject, Rejection}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; +use crate::key_manager::AuthKey; +use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError}; + /// Pass Arc along fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { warp::any() diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 1016b4668..1037852ec 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -1,3 +1,14 @@ +use std::collections::HashMap; +use std::convert::Infallible; +use std::sync::Arc; +use log::debug; +use warp::{reject, Rejection, Reply}; +use warp::http::{Response, StatusCode}; +use crate::{InfoHash, TorrentPeer, TorrentStats, TorrentTracker}; +use crate::key_manager::AuthKey; +use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError}; +use crate::utils::url_encode_bytes; + /// Authenticate AnnounceRequest using optional AuthKey async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, auth_key).await { @@ -40,7 +51,6 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } } None => { From 87c218cca79506f180fcd4e5dcc3644a373ccc94 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 00:44:47 +0100 Subject: [PATCH 0012/1003] refactor: http tracker added pub to some fn --- src/torrust_http_tracker/handlers.rs | 4 ++-- src/torrust_http_tracker/routes.rs | 4 ++-- src/torrust_http_tracker/server.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 1037852ec..7237d4817 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -10,7 +10,7 @@ use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorRespon use crate::utils::url_encode_bytes; /// Authenticate AnnounceRequest using optional AuthKey -async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { +pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, auth_key).await { Ok(_) => Ok(()), Err(e) => Err(ServerError::from(e)) @@ -67,7 +67,7 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option std::result::Result { +pub async fn handle_error(r: Rejection) -> std::result::Result { if let Some(e) = r.find::() { debug!("{:?}", e); let reply = warp::reply::json(&ErrorResponse { failure_reason: e.to_string() }); diff --git a/src/torrust_http_tracker/routes.rs b/src/torrust_http_tracker/routes.rs index 20821f511..64a448d4d 100644 --- a/src/torrust_http_tracker/routes.rs +++ b/src/torrust_http_tracker/routes.rs @@ -2,10 +2,10 @@ use std::convert::Infallible; use std::sync::Arc; use warp::{Filter, Rejection}; use crate::TorrentTracker; -use crate::torrust_http_tracker::{handle_announce, handle_scrape}; +use crate::torrust_http_tracker::{handle_announce, handle_error, handle_scrape}; /// All routes -fn routes(tracker: Arc,) -> impl Filter + Clone { +pub fn routes(tracker: Arc,) -> impl Filter + Clone { announce(tracker.clone()) .or(scrape(tracker.clone())) .recover(handle_error) diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs index da739453d..c521d797b 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/torrust_http_tracker/server.rs @@ -12,7 +12,7 @@ use crate::{TorrentPeer, TorrentStats}; use crate::key_manager::AuthKey; use crate::common::*; use crate::torrust_http_tracker::request::AnnounceRequestQuery; -use crate::torrust_http_tracker::{AnnounceRequest, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry}; +use crate::torrust_http_tracker::{AnnounceRequest, ErrorResponse, Peer, routes, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry}; use crate::torrust_http_tracker::errors::ServerError; use crate::utils::url_encode_bytes; From 0b7a1c7a776252151b7d7f5ca9665fe647991b1a Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 00:52:44 +0100 Subject: [PATCH 0013/1003] refactor: http tracker imports --- src/torrust_http_tracker/filters.rs | 12 ++++++------ src/torrust_http_tracker/handlers.rs | 2 ++ src/torrust_http_tracker/mod.rs | 2 ++ src/torrust_http_tracker/routes.rs | 2 +- src/torrust_http_tracker/server.rs | 21 +++------------------ 5 files changed, 14 insertions(+), 25 deletions(-) diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 0c2a2f635..85e345b12 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -5,16 +5,16 @@ use std::sync::Arc; use warp::{Filter, reject, Rejection}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; use crate::key_manager::AuthKey; -use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError}; +use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; /// Pass Arc along -fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { +pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { warp::any() .map(move || tracker.clone()) } /// Check for infoHash -fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { +pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { warp::filters::query::raw() .and_then(info_hashes) } @@ -45,7 +45,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { } /// Pass Arc along -fn with_auth_key() -> impl Filter,), Error = warp::Rejection> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = warp::Rejection> + Clone { warp::path::param::() .map(|key_string: String| { AuthKey::from_string(&key_string) @@ -53,7 +53,7 @@ fn with_auth_key() -> impl Filter,), Error = warp::Re } /// Check for AnnounceRequest -fn with_announce_request() -> impl Filter + Clone { +pub fn with_announce_request() -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(warp::addr::remote()) @@ -78,7 +78,7 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has } /// Check for ScrapeRequest -fn with_scrape_request() -> impl Filter + Clone { +pub fn with_scrape_request() -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and_then(scrape_request) diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 7237d4817..6f7ad4aa9 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -9,6 +9,8 @@ use crate::key_manager::AuthKey; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError}; use crate::utils::url_encode_bytes; +type WebResult = std::result::Result; + /// Authenticate AnnounceRequest using optional AuthKey pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, auth_key).await { diff --git a/src/torrust_http_tracker/mod.rs b/src/torrust_http_tracker/mod.rs index 733e69704..0e5cf91ee 100644 --- a/src/torrust_http_tracker/mod.rs +++ b/src/torrust_http_tracker/mod.rs @@ -13,3 +13,5 @@ pub use self::errors::*; pub use self::routes::*; pub use self::handlers::*; pub use self::filters::*; + +pub type WebResult = std::result::Result; diff --git a/src/torrust_http_tracker/routes.rs b/src/torrust_http_tracker/routes.rs index 64a448d4d..9c7b45b4c 100644 --- a/src/torrust_http_tracker/routes.rs +++ b/src/torrust_http_tracker/routes.rs @@ -2,7 +2,7 @@ use std::convert::Infallible; use std::sync::Arc; use warp::{Filter, Rejection}; use crate::TorrentTracker; -use crate::torrust_http_tracker::{handle_announce, handle_error, handle_scrape}; +use crate::torrust_http_tracker::{handle_announce, handle_error, handle_scrape, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; /// All routes pub fn routes(tracker: Arc,) -> impl Filter + Clone { diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs index c521d797b..90f8a84d0 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/torrust_http_tracker/server.rs @@ -1,22 +1,7 @@ -use std::collections::HashMap; -use std::convert::Infallible; -use std::net::{SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; -use std::str::FromStr; -use log::{debug}; -use warp::{reply::Reply, Filter, Rejection, reject}; -use warp::http::{Response, StatusCode}; -use super::{AnnounceResponse}; -use crate::tracker::{TorrentTracker}; -use crate::{TorrentPeer, TorrentStats}; -use crate::key_manager::AuthKey; -use crate::common::*; -use crate::torrust_http_tracker::request::AnnounceRequestQuery; -use crate::torrust_http_tracker::{AnnounceRequest, ErrorResponse, Peer, routes, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry}; -use crate::torrust_http_tracker::errors::ServerError; -use crate::utils::url_encode_bytes; - -type WebResult = std::result::Result; +use crate::TorrentTracker; +use crate::torrust_http_tracker::routes; /// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] From 7c86f45d2e77db49b7a2f96be8176384f6bf9656 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 00:57:53 +0100 Subject: [PATCH 0014/1003] refactor: http tracker --- src/torrust_http_tracker/handlers.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 6f7ad4aa9..cd7a8269c 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -6,12 +6,10 @@ use warp::{reject, Rejection, Reply}; use warp::http::{Response, StatusCode}; use crate::{InfoHash, TorrentPeer, TorrentStats, TorrentTracker}; use crate::key_manager::AuthKey; -use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError}; +use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; use crate::utils::url_encode_bytes; -type WebResult = std::result::Result; - -/// Authenticate AnnounceRequest using optional AuthKey +/// Authenticate InfoHash using optional AuthKey pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, auth_key).await { Ok(_) => Ok(()), From f23d38a9b454b82b9ffd804b5a09d52deb6ac124 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 01:00:35 +0100 Subject: [PATCH 0015/1003] refactor: default http port from 7878 > 6969 --- src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index c2b335d5d..07eb418cd 100644 --- a/src/config.rs +++ b/src/config.rs @@ -135,7 +135,7 @@ impl Configuration { }, http_tracker: HttpTrackerConfig { enabled: false, - bind_address: String::from("0.0.0.0:7878"), + bind_address: String::from("0.0.0.0:6969"), announce_interval: 120, ssl_enabled: false, ssl_cert_path: None, From 007a4219b69ff74ca488c64e49034e22dee27c41 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 01:14:15 +0100 Subject: [PATCH 0016/1003] refactor: changed bytes from u32 to u64 --- src/torrust_http_tracker/mod.rs | 1 + src/torrust_http_tracker/request.rs | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/torrust_http_tracker/mod.rs b/src/torrust_http_tracker/mod.rs index 0e5cf91ee..ea6675dce 100644 --- a/src/torrust_http_tracker/mod.rs +++ b/src/torrust_http_tracker/mod.rs @@ -14,4 +14,5 @@ pub use self::routes::*; pub use self::handlers::*; pub use self::filters::*; +pub type Bytes = u64; pub type WebResult = std::result::Result; diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index b62b9430a..8d90ede1f 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -1,15 +1,16 @@ use std::net::SocketAddr; use serde::{Deserialize}; use crate::InfoHash; +use crate::torrust_http_tracker::Bytes; #[derive(Deserialize)] pub struct AnnounceRequestQuery { - pub downloaded: u32, - pub uploaded: u32, + pub downloaded: Bytes, + pub uploaded: Bytes, pub key: String, pub peer_id: String, pub port: u16, - pub left: u32, + pub left: Bytes, pub event: Option, pub compact: Option, } @@ -17,11 +18,11 @@ pub struct AnnounceRequestQuery { pub struct AnnounceRequest { pub info_hash: InfoHash, pub peer_addr: SocketAddr, - pub downloaded: u32, - pub uploaded: u32, + pub downloaded: Bytes, + pub uploaded: Bytes, pub peer_id: String, pub port: u16, - pub left: u32, + pub left: Bytes, pub event: Option, pub compact: Option, } From d3270888b74936d3118b743b9b43df30a82191fc Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 02:24:48 +0100 Subject: [PATCH 0017/1003] fix: udp thread spawn overhead --- src/common.rs | 2 - src/main.rs | 7 +-- src/torrust_udp_tracker/errors.rs | 45 +++++++++++++++ src/torrust_udp_tracker/mod.rs | 15 +++++ src/torrust_udp_tracker/request.rs | 33 +++++++++++ src/torrust_udp_tracker/response.rs | 6 ++ src/torrust_udp_tracker/server.rs | 90 ++++++++++------------------- 7 files changed, 130 insertions(+), 68 deletions(-) create mode 100644 src/torrust_udp_tracker/errors.rs create mode 100644 src/torrust_udp_tracker/request.rs create mode 100644 src/torrust_udp_tracker/response.rs diff --git a/src/common.rs b/src/common.rs index 1ee1190ab..d47205c3c 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,9 +1,7 @@ use serde::{Deserialize, Serialize}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -pub const MAX_PACKET_SIZE: usize = 0xffff; pub const MAX_SCRAPE_TORRENTS: u8 = 74; -pub const PROTOCOL_ID: i64 = 4_497_486_125_440; // protocol constant pub const AUTH_KEY_LENGTH: usize = 32; #[repr(u32)] diff --git a/src/main.rs b/src/main.rs index ce8887742..d8a73854e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -95,15 +95,12 @@ fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - info!("Starting UDP server on: {}", config.bind_address); let udp_server = UdpServer::new(tracker).await.unwrap_or_else(|e| { panic!("Could not start UDP server: {}", e); }); - info!("Starting UDP tracker server.."); + info!("Starting UDP server on: {}", config.bind_address); tokio::spawn(async move { - if let Err(e) = udp_server.accept_packets().await { - panic!("Could not start UDP server: {}", e); - } + udp_server.start().await; }) } diff --git a/src/torrust_udp_tracker/errors.rs b/src/torrust_udp_tracker/errors.rs new file mode 100644 index 000000000..d6a24ac38 --- /dev/null +++ b/src/torrust_udp_tracker/errors.rs @@ -0,0 +1,45 @@ +use warp::reject::Reject; +use thiserror::Error; +use crate::TorrentError; + +#[derive(Error, Debug)] +pub enum ServerError { + #[error("internal server error")] + InternalServerError, + + #[error("info_hash is either missing or invalid")] + InvalidInfoHash, + + #[error("could not find remote address")] + AddressNotFound, + + #[error("torrent has no peers")] + NoPeersFound, + + #[error("torrent not on whitelist")] + TorrentNotWhitelisted, + + #[error("peer not authenticated")] + PeerNotAuthenticated, + + #[error("invalid authentication key")] + PeerKeyNotValid, + + #[error("exceeded info_hash limit")] + ExceededInfoHashLimit, +} + +impl Reject for ServerError {} + +impl From for ServerError { + fn from(e: TorrentError) -> Self { + match e { + TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, + TorrentError::NoPeersFound => ServerError::NoPeersFound, + TorrentError::CouldNotSendResponse => ServerError::InternalServerError, + TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + } + } +} diff --git a/src/torrust_udp_tracker/mod.rs b/src/torrust_udp_tracker/mod.rs index 74f47ad34..dc653f020 100644 --- a/src/torrust_udp_tracker/mod.rs +++ b/src/torrust_udp_tracker/mod.rs @@ -1 +1,16 @@ +pub mod errors; +pub mod request; +pub mod response; pub mod server; + +use self::errors::*; +use self::request::*; +use self::response::*; +use self::server::*; + +pub type Bytes = u64; +pub type Port = u16; +pub type TransactionId = i64; + +pub const MAX_PACKET_SIZE: usize = 0xffff; +pub const PROTOCOL_ID: i64 = 0x41727101980; diff --git a/src/torrust_udp_tracker/request.rs b/src/torrust_udp_tracker/request.rs new file mode 100644 index 000000000..e5fcfdda5 --- /dev/null +++ b/src/torrust_udp_tracker/request.rs @@ -0,0 +1,33 @@ +use std::net::Ipv4Addr; +use aquatic_udp_protocol::{AnnounceEvent, AnnounceRequest}; +use crate::{InfoHash, PeerId}; +use crate::torrust_udp_tracker::{Bytes, Port, TransactionId}; + +struct AnnounceRequest2 { + pub connection_id: i64, + pub transaction_id: i32, + pub info_hash: InfoHash, + pub peer_id: PeerId, + pub bytes_downloaded: Bytes, + pub bytes_uploaded: Bytes, + pub bytes_left: Bytes, + pub event: AnnounceEvent, + pub ip_address: Option, + pub key: u32, + pub peers_wanted: u32, + pub port: Port +} + +pub struct AnnounceRequestWrapper { + pub announce_request: AnnounceRequest, + pub info_hash: InfoHash, +} + +impl AnnounceRequestWrapper { + pub fn new(announce_request: AnnounceRequest) -> Self { + AnnounceRequestWrapper { + announce_request: announce_request.clone(), + info_hash: InfoHash(announce_request.info_hash.0) + } + } +} diff --git a/src/torrust_udp_tracker/response.rs b/src/torrust_udp_tracker/response.rs new file mode 100644 index 000000000..18b3c8807 --- /dev/null +++ b/src/torrust_udp_tracker/response.rs @@ -0,0 +1,6 @@ +use crate::torrust_udp_tracker::TransactionId; + +pub struct ErrorResponse { + pub transaction_id: TransactionId, + pub message: String, +} diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index b3cbccf7f..5d6666468 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -10,25 +10,9 @@ use crate::common::*; use crate::utils::get_connection_id; use crate::tracker::TorrentTracker; use crate::{InfoHash, TorrentError, TorrentPeer}; - -struct RequestError { - error: TorrentError, - transaction_id: TransactionId -} - -struct AnnounceRequestWrapper { - announce_request: AnnounceRequest, - info_hash: InfoHash, -} - -impl AnnounceRequestWrapper { - pub fn new(announce_request: AnnounceRequest) -> Self { - AnnounceRequestWrapper { - announce_request: announce_request.clone(), - info_hash: InfoHash(announce_request.info_hash.0) - } - } -} +use crate::torrust_udp_tracker::errors::ServerError; +use crate::torrust_udp_tracker::MAX_PACKET_SIZE; +use crate::torrust_udp_tracker::request::{AnnounceRequestWrapper}; pub struct UdpServer { socket: UdpSocket, @@ -45,18 +29,15 @@ impl UdpServer { }) } - pub async fn accept_packets(self) -> Result<(), std::io::Error> { - let tracker = Arc::new(self); - + pub async fn start(&self) { loop { - let mut packet = vec![0u8; MAX_PACKET_SIZE]; - let (size, remote_address) = tracker.socket.recv_from(packet.as_mut_slice()).await?; - - let tracker = tracker.clone(); - tokio::spawn(async move { - debug!("Received {} bytes from {}", size, remote_address); - tracker.handle_packet(remote_address, &packet[..size]).await; - }); + let mut data = [0; MAX_PACKET_SIZE]; + if let Ok((valid_bytes, remote_addr)) = self.socket.recv_from(&mut data).await { + let data = &data[..valid_bytes]; + + debug!("Received {} bytes from {}", data.len(), remote_addr); + self.handle_packet(remote_addr, data).await; + } } } @@ -75,28 +56,36 @@ impl UdpServer { } async fn handle_request(&self, request: Request, remote_addr: SocketAddr) { - // todo: check for expired connection_id - let request_result = match request { + let transaction_id = match &request { + Request::Connect(connect_request) => { + connect_request.transaction_id + } + Request::Announce(announce_request) => { + announce_request.transaction_id + } + Request::Scrape(scrape_request) => { + scrape_request.transaction_id + } + }; + + let res = match request { Request::Connect(connect_request) => { self.handle_connect(remote_addr, &connect_request).await - .map_err(|error| RequestError { error, transaction_id: connect_request.transaction_id }) } Request::Announce(announce_request) => { self.handle_announce(remote_addr, &announce_request).await - .map_err(|error| RequestError { error, transaction_id: announce_request.transaction_id }) } Request::Scrape(scrape_request) => { self.handle_scrape(&scrape_request).await - .map_err(|error| RequestError { error, transaction_id: scrape_request.transaction_id }) } }; - match request_result { + match res { Ok(response) => { let _ = self.send_response(remote_addr, response).await; } - Err(request_error) => { - let _ = self.handle_error(request_error.error, remote_addr, request_error.transaction_id).await; + Err(e) => { + let _ = self.handle_error(e, remote_addr, transaction_id).await; } } } @@ -184,29 +173,8 @@ impl UdpServer { } async fn handle_error(&self, e: TorrentError, remote_addr: SocketAddr, tx_id: TransactionId) { - let mut err_msg = "oops"; - - match e { - TorrentError::TorrentNotWhitelisted => { - debug!("Info_hash not whitelisted."); - err_msg = "info hash not whitelisted"; - } - TorrentError::PeerKeyNotValid => { - debug!("Peer key not valid."); - err_msg = "peer key not valid"; - } - TorrentError::PeerNotAuthenticated => { - debug!("Peer not authenticated."); - err_msg = "peer not authenticated"; - } - TorrentError::NoPeersFound => { - debug!("No peers found."); - err_msg = "no peers found"; - } - _ => {} - } - - self.send_error(remote_addr, tx_id, err_msg).await; + let err = ServerError::from(e); + self.send_error(remote_addr, tx_id, &err.to_string()).await; } async fn send_response(&self, remote_addr: SocketAddr, response: Response) -> Result { From f8b6a97ef8eb74e8781b45ef606e5e3ee04ed932 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 03:41:23 +0100 Subject: [PATCH 0018/1003] refactor: completed udp tracker refactor --- src/torrust_http_tracker/errors.rs | 13 -- src/torrust_http_tracker/handlers.rs | 34 ++--- src/torrust_udp_tracker/errors.rs | 17 +-- src/torrust_udp_tracker/handlers.rs | 145 ++++++++++++++++++++++ src/torrust_udp_tracker/mod.rs | 10 +- src/torrust_udp_tracker/request.rs | 30 ++--- src/torrust_udp_tracker/server.rs | 177 ++------------------------- src/tracker.rs | 23 ++-- 8 files changed, 206 insertions(+), 243 deletions(-) create mode 100644 src/torrust_udp_tracker/handlers.rs diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs index d6a24ac38..76a3e2330 100644 --- a/src/torrust_http_tracker/errors.rs +++ b/src/torrust_http_tracker/errors.rs @@ -30,16 +30,3 @@ pub enum ServerError { } impl Reject for ServerError {} - -impl From for ServerError { - fn from(e: TorrentError) -> Self { - match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, - } - } -} diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index cd7a8269c..cb972d69a 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use log::debug; use warp::{reject, Rejection, Reply}; use warp::http::{Response, StatusCode}; -use crate::{InfoHash, TorrentPeer, TorrentStats, TorrentTracker}; +use crate::{InfoHash, TorrentError, TorrentPeer, TorrentStats, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; use crate::utils::url_encode_bytes; @@ -13,7 +13,18 @@ use crate::utils::url_encode_bytes; pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, auth_key).await { Ok(_) => Ok(()), - Err(e) => Err(ServerError::from(e)) + Err(e) => { + let err = match e { + TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, + TorrentError::NoPeersFound => ServerError::NoPeersFound, + TorrentError::CouldNotSendResponse => ServerError::InternalServerError, + TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + }; + + Err(err) + } } } @@ -24,19 +35,14 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option } let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; + // get all peers excluding the client_addr + let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; + if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } - match tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await { - Err(e) => Err(reject::custom(ServerError::from(e))), - Ok(torrent_stats) => { - // get all peers excluding the client_addr - let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } - - // success response - let announce_interval = tracker.config.http_tracker.announce_interval; - send_announce_response(&announce_request, torrent_stats, peers.unwrap(), announce_interval) - } - } + // success response + let announce_interval = tracker.config.http_tracker.announce_interval; + send_announce_response(&announce_request, torrent_stats, peers.unwrap(), announce_interval) } /// Handle scrape request diff --git a/src/torrust_udp_tracker/errors.rs b/src/torrust_udp_tracker/errors.rs index d6a24ac38..d487a4487 100644 --- a/src/torrust_udp_tracker/errors.rs +++ b/src/torrust_udp_tracker/errors.rs @@ -1,4 +1,3 @@ -use warp::reject::Reject; use thiserror::Error; use crate::TorrentError; @@ -27,19 +26,7 @@ pub enum ServerError { #[error("exceeded info_hash limit")] ExceededInfoHashLimit, -} -impl Reject for ServerError {} - -impl From for ServerError { - fn from(e: TorrentError) -> Self { - match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, - } - } + #[error("bad request")] + BadRequest, } diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs new file mode 100644 index 000000000..8549580d7 --- /dev/null +++ b/src/torrust_udp_tracker/handlers.rs @@ -0,0 +1,145 @@ +use std::net::SocketAddr; +use std::sync::Arc; +use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentError, TorrentPeer, TorrentTracker}; +use crate::torrust_udp_tracker::errors::ServerError; +use crate::torrust_udp_tracker::request::AnnounceRequestWrapper; +use crate::utils::get_connection_id; + +pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { + match tracker.authenticate_request(info_hash, &None).await { + Ok(_) => Ok(()), + Err(e) => { + let err = match e { + TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, + TorrentError::NoPeersFound => ServerError::NoPeersFound, + TorrentError::CouldNotSendResponse => ServerError::InternalServerError, + TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + }; + + Err(err) + } + } +} + +pub async fn handle_packet(remote_addr: SocketAddr, payload: &[u8], tracker: Arc) -> Response { + match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { + Ok(request) => { + let transaction_id = match &request { + Request::Connect(connect_request) => { + connect_request.transaction_id + } + Request::Announce(announce_request) => { + announce_request.transaction_id + } + Request::Scrape(scrape_request) => { + scrape_request.transaction_id + } + }; + + match handle_request(request, remote_addr, tracker).await { + Ok(response) => response, + Err(e) => handle_error(e, transaction_id) + } + } + // bad request + Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)) + } +} + +pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: Arc) -> Result { + match request { + Request::Connect(connect_request) => { + handle_connect(remote_addr, &connect_request).await + } + Request::Announce(announce_request) => { + handle_announce(remote_addr, &announce_request, tracker).await + } + Request::Scrape(scrape_request) => { + handle_scrape(&scrape_request, tracker).await + } + } +} + +pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest) -> Result { + let connection_id = get_connection_id(&remote_addr); + + let response = Response::from(ConnectResponse { + transaction_id: request.transaction_id, + connection_id, + }); + + Ok(response) +} + +pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: Arc) -> Result { + let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); + + authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; + + let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr, tracker.config.get_ext_ip()); + + let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; + // get all peers excluding the client_addr + let peers = match tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await { + Some(v) => v, + None => { return Err(ServerError::NoPeersFound); } + }; + + Ok(Response::from(AnnounceResponse { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(tracker.config.udp_tracker.announce_interval as i32), + leechers: NumberOfPeers(torrent_stats.leechers as i32), + seeders: NumberOfPeers(torrent_stats.seeders as i32), + peers: peers.iter().map(|peer| + ResponsePeer { + ip_address: peer.peer_addr.ip(), + port: Port(peer.peer_addr.port()) + }).collect() + })) +} + +pub async fn handle_scrape(request: &ScrapeRequest, tracker: Arc) -> Result { + let db = tracker.get_torrents().await; + + let mut torrent_stats: Vec = Vec::new(); + + for info_hash in request.info_hashes.iter() { + let info_hash = InfoHash(info_hash.0); + + if authenticate(&info_hash, tracker.clone()).await.is_err() { continue } + + let scrape_entry = match db.get(&info_hash) { + Some(torrent_info) => { + let (seeders, completed, leechers) = torrent_info.get_stats(); + + TorrentScrapeStatistics { + seeders: NumberOfPeers(seeders as i32), + completed: NumberOfDownloads(completed as i32), + leechers: NumberOfPeers(leechers as i32), + } + } + None => { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + } + } + }; + + torrent_stats.push(scrape_entry); + } + + Ok(Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats + })) +} + +fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { + let message = e.to_string(); + Response::from(ErrorResponse { transaction_id, message }) +} diff --git a/src/torrust_udp_tracker/mod.rs b/src/torrust_udp_tracker/mod.rs index dc653f020..1e7d5d68d 100644 --- a/src/torrust_udp_tracker/mod.rs +++ b/src/torrust_udp_tracker/mod.rs @@ -2,11 +2,13 @@ pub mod errors; pub mod request; pub mod response; pub mod server; +pub mod handlers; -use self::errors::*; -use self::request::*; -use self::response::*; -use self::server::*; +pub use self::errors::*; +pub use self::request::*; +pub use self::response::*; +pub use self::server::*; +pub use self::handlers::*; pub type Bytes = u64; pub type Port = u16; diff --git a/src/torrust_udp_tracker/request.rs b/src/torrust_udp_tracker/request.rs index e5fcfdda5..f3757fe56 100644 --- a/src/torrust_udp_tracker/request.rs +++ b/src/torrust_udp_tracker/request.rs @@ -1,22 +1,22 @@ use std::net::Ipv4Addr; use aquatic_udp_protocol::{AnnounceEvent, AnnounceRequest}; use crate::{InfoHash, PeerId}; -use crate::torrust_udp_tracker::{Bytes, Port, TransactionId}; +use crate::torrust_udp_tracker::{Bytes, Port}; -struct AnnounceRequest2 { - pub connection_id: i64, - pub transaction_id: i32, - pub info_hash: InfoHash, - pub peer_id: PeerId, - pub bytes_downloaded: Bytes, - pub bytes_uploaded: Bytes, - pub bytes_left: Bytes, - pub event: AnnounceEvent, - pub ip_address: Option, - pub key: u32, - pub peers_wanted: u32, - pub port: Port -} +// struct AnnounceRequest { +// pub connection_id: i64, +// pub transaction_id: i32, +// pub info_hash: InfoHash, +// pub peer_id: PeerId, +// pub bytes_downloaded: Bytes, +// pub bytes_uploaded: Bytes, +// pub bytes_left: Bytes, +// pub event: AnnounceEvent, +// pub ip_address: Option, +// pub key: u32, +// pub peers_wanted: u32, +// pub port: Port +// } pub struct AnnounceRequestWrapper { pub announce_request: AnnounceRequest, diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 5d6666468..94108c767 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -11,7 +11,7 @@ use crate::utils::get_connection_id; use crate::tracker::TorrentTracker; use crate::{InfoHash, TorrentError, TorrentPeer}; use crate::torrust_udp_tracker::errors::ServerError; -use crate::torrust_udp_tracker::MAX_PACKET_SIZE; +use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; use crate::torrust_udp_tracker::request::{AnnounceRequestWrapper}; pub struct UdpServer { @@ -34,150 +34,14 @@ impl UdpServer { let mut data = [0; MAX_PACKET_SIZE]; if let Ok((valid_bytes, remote_addr)) = self.socket.recv_from(&mut data).await { let data = &data[..valid_bytes]; - debug!("Received {} bytes from {}", data.len(), remote_addr); - self.handle_packet(remote_addr, data).await; - } - } - } - - async fn handle_packet(&self, remote_addr: SocketAddr, payload: &[u8]) { - let request = Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS); - - match request { - Ok(request) => { - debug!("New request: {:?}", request); - self.handle_request(request, remote_addr).await; - } - Err(err) => { - debug!("request_from_bytes error: {:?}", err); - } - } - } - - async fn handle_request(&self, request: Request, remote_addr: SocketAddr) { - let transaction_id = match &request { - Request::Connect(connect_request) => { - connect_request.transaction_id - } - Request::Announce(announce_request) => { - announce_request.transaction_id - } - Request::Scrape(scrape_request) => { - scrape_request.transaction_id - } - }; - - let res = match request { - Request::Connect(connect_request) => { - self.handle_connect(remote_addr, &connect_request).await - } - Request::Announce(announce_request) => { - self.handle_announce(remote_addr, &announce_request).await - } - Request::Scrape(scrape_request) => { - self.handle_scrape(&scrape_request).await - } - }; - - match res { - Ok(response) => { - let _ = self.send_response(remote_addr, response).await; - } - Err(e) => { - let _ = self.handle_error(e, remote_addr, transaction_id).await; - } - } - } - - async fn handle_connect(&self, remote_addr: SocketAddr, request: &ConnectRequest) -> Result { - let connection_id = get_connection_id(&remote_addr); - - let response = Response::from(ConnectResponse { - transaction_id: request.transaction_id, - connection_id, - }); - - Ok(response) - } - - async fn handle_announce(&self, remote_addr: SocketAddr, announce_request: &AnnounceRequest) -> Result { - let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); - self.tracker.authenticate_request(&wrapped_announce_request.info_hash, &None).await?; - - let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr, self.tracker.config.get_ext_ip()); - - return match self.tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await { - Ok(torrent_stats) => { - // get all peers excluding the client_addr - let peers = match self.tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await { - Some(v) => v, - None => { - return Err(TorrentError::NoPeersFound); - } - }; - - let response = Response::from(AnnounceResponse { - transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(self.tracker.config.udp_tracker.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), - peers: peers.iter().map(|peer| - ResponsePeer { - ip_address: peer.peer_addr.ip(), - port: Port(peer.peer_addr.port()) - }).collect() - }); - - Ok(response) + let response = handle_packet(remote_addr, data, self.tracker.clone()).await; + self.send_response(remote_addr, response).await; } - Err(e) => Err(e) - } - } - - async fn handle_scrape(&self, request: &ScrapeRequest) -> Result { - let db = self.tracker.get_torrents().await; - - let mut torrent_stats: Vec = Vec::new(); - - for info_hash in request.info_hashes.iter() { - let info_hash = InfoHash(info_hash.0); - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - TorrentScrapeStatistics { - seeders: NumberOfPeers(seeders as i32), - completed: NumberOfDownloads(completed as i32), - leechers: NumberOfPeers(leechers as i32), - } - } - None => { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), - } - } - }; - - torrent_stats.push(scrape_entry); } - - let response = Response::from(ScrapeResponse { - transaction_id: request.transaction_id, - torrent_stats - }); - - Ok(response) - } - - async fn handle_error(&self, e: TorrentError, remote_addr: SocketAddr, tx_id: TransactionId) { - let err = ServerError::from(e); - self.send_error(remote_addr, tx_id, &err.to_string()).await; } - async fn send_response(&self, remote_addr: SocketAddr, response: Response) -> Result { + async fn send_response(&self, remote_addr: SocketAddr, response: Response) { debug!("sending response to: {:?}", &remote_addr); let buffer = vec![0u8; MAX_PACKET_SIZE]; @@ -189,37 +53,14 @@ impl UdpServer { let inner = cursor.get_ref(); debug!("{:?}", &inner[..position]); - match self.send_packet(&remote_addr, &inner[..position]).await { - Ok(byte_size) => Ok(byte_size), - Err(e) => { - debug!("{:?}", e); - Err(()) - } - } - } - Err(_) => { - debug!("could not write response to bytes."); - Err(()) + self.send_packet(&remote_addr, &inner[..position]).await; } + Err(_) => { debug!("could not write response to bytes."); } } } - async fn send_error(&self, remote_addr: SocketAddr, transaction_id: TransactionId, error_msg: &str) { - let response = Response::from(ErrorResponse { - transaction_id, - message: error_msg.to_string(), - }); - - let _ = self.send_response(remote_addr, response).await; - } - - async fn send_packet(&self, remote_addr: &SocketAddr, payload: &[u8]) -> Result { - match self.socket.send_to(payload, remote_addr).await { - Err(err) => { - debug!("failed to send a packet: {}", err); - Err(err) - }, - Ok(sz) => Ok(sz), - } + async fn send_packet(&self, remote_addr: &SocketAddr, payload: &[u8]) { + // doesn't matter if it reaches or not + let _ = self.socket.send_to(payload, remote_addr).await; } } diff --git a/src/tracker.rs b/src/tracker.rs index 33d7ddb5f..b8b4ac823 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -361,31 +361,26 @@ impl TorrentTracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> Result { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(info_hash.clone()) { Entry::Vacant(vacant) => { - Ok(vacant.insert(TorrentEntry::new())) + vacant.insert(TorrentEntry::new()) } Entry::Occupied(entry) => { - Ok(entry.into_mut()) + entry.into_mut() } }; - match torrent_entry { - Ok(torrent_entry) => { - torrent_entry.update_peer(peer); + torrent_entry.update_peer(peer); - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let (seeders, completed, leechers) = torrent_entry.get_stats(); - Ok(TorrentStats { - seeders, - leechers, - completed, - }) - } - Err(e) => Err(e) + TorrentStats { + seeders, + leechers, + completed, } } From f290b294443c4cd2500596575440f8ad5ec52956 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 03:52:38 +0100 Subject: [PATCH 0019/1003] feat: added ipv6 support for udp --- src/torrust_http_tracker/errors.rs | 1 - src/torrust_udp_tracker/errors.rs | 1 - src/torrust_udp_tracker/request.rs | 6 ++---- src/torrust_udp_tracker/server.rs | 24 +++++++++++------------- 4 files changed, 13 insertions(+), 19 deletions(-) diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs index 76a3e2330..f0bedfe1b 100644 --- a/src/torrust_http_tracker/errors.rs +++ b/src/torrust_http_tracker/errors.rs @@ -1,6 +1,5 @@ use warp::reject::Reject; use thiserror::Error; -use crate::TorrentError; #[derive(Error, Debug)] pub enum ServerError { diff --git a/src/torrust_udp_tracker/errors.rs b/src/torrust_udp_tracker/errors.rs index d487a4487..fb29e969e 100644 --- a/src/torrust_udp_tracker/errors.rs +++ b/src/torrust_udp_tracker/errors.rs @@ -1,5 +1,4 @@ use thiserror::Error; -use crate::TorrentError; #[derive(Error, Debug)] pub enum ServerError { diff --git a/src/torrust_udp_tracker/request.rs b/src/torrust_udp_tracker/request.rs index f3757fe56..f3f67fdc1 100644 --- a/src/torrust_udp_tracker/request.rs +++ b/src/torrust_udp_tracker/request.rs @@ -1,7 +1,5 @@ -use std::net::Ipv4Addr; -use aquatic_udp_protocol::{AnnounceEvent, AnnounceRequest}; -use crate::{InfoHash, PeerId}; -use crate::torrust_udp_tracker::{Bytes, Port}; +use aquatic_udp_protocol::{AnnounceRequest}; +use crate::{InfoHash}; // struct AnnounceRequest { // pub connection_id: i64, diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 94108c767..0da4ce140 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -1,18 +1,11 @@ -use log::debug; -use std; -use std::net::SocketAddr; -use std::sync::Arc; use std::io::Cursor; -use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, IpVersion, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; +use std::net::{SocketAddr}; +use std::sync::Arc; +use aquatic_udp_protocol::{IpVersion, Response}; +use log::debug; use tokio::net::UdpSocket; - -use crate::common::*; -use crate::utils::get_connection_id; -use crate::tracker::TorrentTracker; -use crate::{InfoHash, TorrentError, TorrentPeer}; -use crate::torrust_udp_tracker::errors::ServerError; +use crate::TorrentTracker; use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; -use crate::torrust_udp_tracker::request::{AnnounceRequestWrapper}; pub struct UdpServer { socket: UdpSocket, @@ -47,7 +40,12 @@ impl UdpServer { let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); - match response.write(&mut cursor, IpVersion::IPv4) { + let ip_version = match remote_addr { + SocketAddr::V4(_) => IpVersion::IPv4, + SocketAddr::V6(_) => IpVersion::IPv6 + }; + + match response.write(&mut cursor, ip_version) { Ok(_) => { let position = cursor.position() as usize; let inner = cursor.get_ref(); From 88f87192ff79756f346bb86b85cf4976d1428465 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 03:56:07 +0100 Subject: [PATCH 0020/1003] refactor: removed unused file --- src/torrust_udp_tracker/mod.rs | 2 -- src/torrust_udp_tracker/response.rs | 6 ------ 2 files changed, 8 deletions(-) delete mode 100644 src/torrust_udp_tracker/response.rs diff --git a/src/torrust_udp_tracker/mod.rs b/src/torrust_udp_tracker/mod.rs index 1e7d5d68d..cd4b99f5b 100644 --- a/src/torrust_udp_tracker/mod.rs +++ b/src/torrust_udp_tracker/mod.rs @@ -1,12 +1,10 @@ pub mod errors; pub mod request; -pub mod response; pub mod server; pub mod handlers; pub use self::errors::*; pub use self::request::*; -pub use self::response::*; pub use self::server::*; pub use self::handlers::*; diff --git a/src/torrust_udp_tracker/response.rs b/src/torrust_udp_tracker/response.rs deleted file mode 100644 index 18b3c8807..000000000 --- a/src/torrust_udp_tracker/response.rs +++ /dev/null @@ -1,6 +0,0 @@ -use crate::torrust_udp_tracker::TransactionId; - -pub struct ErrorResponse { - pub transaction_id: TransactionId, - pub message: String, -} From d705fba1027ad859ac85200ed9be5787f719ac2e Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 04:16:48 +0100 Subject: [PATCH 0021/1003] fix: http tracker optional path param (authentication) --- src/torrust_http_tracker/filters.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 85e345b12..9e82fe946 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -45,10 +45,13 @@ async fn info_hashes(raw_query: String) -> WebResult> { } /// Pass Arc along -pub fn with_auth_key() -> impl Filter,), Error = warp::Rejection> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key_string: String| { - AuthKey::from_string(&key_string) + .map(|key: String| { + AuthKey::from_string(&key) + }) + .or_else(|_| async { + Ok::<(Option,), Infallible>((None,)) }) } From 984ec793bc1091c615e83075879b4b94cd4f7a52 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 31 Jan 2022 04:33:48 +0100 Subject: [PATCH 0022/1003] feat: added root path for announcing to http tracker --- src/torrust_http_tracker/routes.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/torrust_http_tracker/routes.rs b/src/torrust_http_tracker/routes.rs index 9c7b45b4c..ad873e83e 100644 --- a/src/torrust_http_tracker/routes.rs +++ b/src/torrust_http_tracker/routes.rs @@ -6,12 +6,23 @@ use crate::torrust_http_tracker::{handle_announce, handle_error, handle_scrape, /// All routes pub fn routes(tracker: Arc,) -> impl Filter + Clone { - announce(tracker.clone()) + root(tracker.clone()) + .or(announce(tracker.clone())) .or(scrape(tracker.clone())) .recover(handle_error) } -/// GET /announce/ +/// GET / or / +fn root(tracker: Arc,) -> impl Filter + Clone { + warp::any() + .and(warp::filters::method::get()) + .and(with_announce_request()) + .and(with_auth_key()) + .and(with_tracker(tracker)) + .and_then(handle_announce) +} + +/// GET /announce or /announce/ fn announce(tracker: Arc,) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) From b8501f4da6a0aefdc48b68481c2672d4320656ea Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 3 Feb 2022 01:31:24 +0100 Subject: [PATCH 0023/1003] feat: added support for a reverse proxy setup using 'X-Forwarded-For' headers --- .gitignore | 1 + README.md | 33 ++++++++++++++++++++++--- src/config.rs | 2 ++ src/torrust_http_tracker/filters.rs | 15 +++++++++-- src/torrust_http_tracker/handlers.rs | 11 ++++++++- src/torrust_http_tracker/request.rs | 3 ++- src/torrust_udp_tracker/handlers.rs | 2 +- src/tracker.rs | 37 ++++++++++------------------ 8 files changed, 72 insertions(+), 32 deletions(-) diff --git a/.gitignore b/.gitignore index cc36c1e59..99a07430b 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ /database.db /.idea/ /config.toml +/data.db diff --git a/README.md b/README.md index 087edc6fd..e3f2ed5be 100644 --- a/README.md +++ b/README.md @@ -36,16 +36,43 @@ cargo build --release ``` ### Usage -1. Run the torrust-tracker once to create the `config.toml` file: +* Run the torrust-tracker once to create the `config.toml` file: ```bash ./target/release/torrust-tracker ``` -2. Edit the newly created config.toml file according to your liking, see [configuration documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/config/). +* Edit the newly created config.toml file according to your liking, see [configuration documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/config/). Eg: +```toml +log_level = "trace" +mode = "public" +db_path = "data.db" +cleanup_interval = 600 +external_ip = "YOUR_EXTERNAL_IP" +[udp_tracker] +bind_address = "0.0.0.0:6969" +announce_interval = 120 -3. Run the torrust-tracker again: +[http_tracker] +enabled = true +bind_address = "0.0.0.0:6969" +on_reverse_proxy = false +announce_interval = 120 +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api] +enabled = true +bind_address = "127.0.0.1:1212" + +[http_api.access_tokens] +admin = "MyAccessToken" +``` + + +* Run the torrust-tracker again: ```bash ./target/release/torrust-tracker ``` diff --git a/src/config.rs b/src/config.rs index 07eb418cd..429997fcc 100644 --- a/src/config.rs +++ b/src/config.rs @@ -25,6 +25,7 @@ pub struct UdpTrackerConfig { pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, + pub on_reverse_proxy: bool, pub announce_interval: u32, pub ssl_enabled: bool, #[serde(serialize_with = "none_as_empty_string")] @@ -136,6 +137,7 @@ impl Configuration { http_tracker: HttpTrackerConfig { enabled: false, bind_address: String::from("0.0.0.0:6969"), + on_reverse_proxy: false, announce_interval: 120, ssl_enabled: false, ssl_cert_path: None, diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 9e82fe946..85a4d3a99 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -1,5 +1,5 @@ use std::convert::Infallible; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; use warp::{Filter, reject, Rejection}; @@ -60,16 +60,27 @@ pub fn with_announce_request() -> impl Filter() .and(with_info_hash()) .and(warp::addr::remote()) + .and(warp::header::optional::("X-Forwarded-For")) .and_then(announce_request) } /// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, remote_addr: Option) -> WebResult { +async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, remote_addr: Option, forwarded_for: Option) -> WebResult { if remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } + // get first forwarded ip + let forwarded_ip = match forwarded_for { + None => None, + Some(forwarded_for_str) => { + forwarded_for_str.split(",").next() + .and_then(|ip_str| IpAddr::from_str(ip_str).ok()) + } + }; + Ok(AnnounceRequest { info_hash: info_hashes[0], peer_addr: remote_addr.unwrap(), + forwarded_ip, downloaded: announce_request_query.downloaded, uploaded: announce_request_query.uploaded, peer_id: announce_request_query.peer_id, diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index cb972d69a..c2eea52b6 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -34,7 +34,16 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option return Err(reject::custom(e)) } - let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + if tracker.config.http_tracker.on_reverse_proxy && announce_request.forwarded_ip.is_none() { + return Err(reject::custom(ServerError::AddressNotFound)) + } + + let peer_ip = match tracker.config.http_tracker.on_reverse_proxy { + true => announce_request.forwarded_ip.unwrap(), + false => announce_request.peer_addr.ip() + }; + + let peer = TorrentPeer::from_http_announce_request(&announce_request, peer_ip, tracker.config.get_ext_ip()); let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; // get all peers excluding the client_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index 8d90ede1f..2039de72e 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use serde::{Deserialize}; use crate::InfoHash; use crate::torrust_http_tracker::Bytes; @@ -18,6 +18,7 @@ pub struct AnnounceRequestQuery { pub struct AnnounceRequest { pub info_hash: InfoHash, pub peer_addr: SocketAddr, + pub forwarded_ip: Option, pub downloaded: Bytes, pub uploaded: Bytes, pub peer_id: String, diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index 8549580d7..e10001527 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -79,7 +79,7 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; - let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr, tracker.config.get_ext_ip()); + let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip()); let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; // get all peers excluding the client_addr diff --git a/src/tracker.rs b/src/tracker.rs index b8b4ac823..ed358e205 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -54,18 +54,8 @@ pub struct TorrentPeer { } impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { - // Potentially substitute localhost IP with external IP - let peer_addr = match peer_addr { - None => SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port.0), - Some(peer_addr) => { - if remote_addr.ip().is_loopback() { - SocketAddr::new(IpAddr::from(peer_addr), announce_request.port.0) - } else { - SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port.0) - } - } - }; + pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); TorrentPeer { peer_id: PeerId(announce_request.peer_id.0), @@ -78,18 +68,8 @@ impl TorrentPeer { } } - pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { - // Potentially substitute localhost IP with external IP - let peer_addr = match peer_addr { - None => SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port), - Some(peer_addr) => { - if remote_addr.ip().is_loopback() { - SocketAddr::new(IpAddr::from(peer_addr), announce_request.port) - } else { - SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port) - } - } - }; + pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { @@ -113,6 +93,15 @@ impl TorrentPeer { } } + // potentially substitute localhost ip with external ip + pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { + if remote_ip.is_loopback() && host_opt_ip.is_some() { + SocketAddr::new(host_opt_ip.unwrap(), port) + } else { + SocketAddr::new(remote_ip, port) + } + } + fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } fn is_completed(&self) -> bool { From a0c855a64c04c0312392a233a89c552b6d6465d0 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 3 Feb 2022 01:32:03 +0100 Subject: [PATCH 0024/1003] upped version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e3d0a06a4..01e3dbb76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "torrust-tracker" -version = "2.1.1" +version = "2.1.2" license = "AGPL-3.0" authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." From 5ff339a898c827d5e0792e2c861f6d51bf281c54 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 9 Feb 2022 21:11:52 +0100 Subject: [PATCH 0025/1003] fix: announcing to http tracker with unusually long peer_id --- Cargo.lock | 2 +- src/common.rs | 26 +++++++++----------------- src/torrust_http_tracker/handlers.rs | 2 +- src/tracker.rs | 15 ++++++++++----- 4 files changed, 21 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08a4722e6..8ba818230 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1742,7 +1742,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.1.1" +version = "2.1.2" dependencies = [ "aquatic_udp_protocol", "binascii", diff --git a/src/common.rs b/src/common.rs index d47205c3c..b73a0511b 100644 --- a/src/common.rs +++ b/src/common.rs @@ -29,9 +29,6 @@ pub struct NumberOfBytesDef(pub i64); #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Ord)] pub struct InfoHash(pub [u8; 20]); -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, PartialOrd, Ord)] -pub struct PeerId(pub [u8; 20]); - impl InfoHash { pub fn to_string(&self) -> String { let mut buffer = [0u8; 40]; @@ -128,13 +125,16 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { } } +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] +pub struct PeerId(pub String); + impl PeerId { pub fn get_client_name(&self) -> Option<&'static str> { - if self.0[0] == b'M' { + if self.0.as_bytes()[0] == b'M' { return Some("BitTorrent"); } - if self.0[0] == b'-' { - let name = match &self.0[1..3] { + if self.0.as_bytes()[0] == b'-' { + let name = match &self.0.as_bytes()[1..3] { b"AG" => "Ares", b"A~" => "Ares", b"AR" => "Arctic", @@ -211,8 +211,9 @@ impl Serialize for PeerId { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { - let mut tmp = [0u8; 40]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); + let buff_size = self.0.as_bytes().len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0.as_bytes(), &mut tmp).unwrap(); let id = std::str::from_utf8(&tmp).ok(); #[derive(Serialize)] @@ -228,12 +229,3 @@ impl Serialize for PeerId { obj.serialize(serializer) } } - -impl std::convert::From<&[u8]> for PeerId { - fn from(data: &[u8]) -> PeerId { - assert_eq!(data.len(), 20); - let mut ret = PeerId { 0: [0u8; 20] }; - ret.0.clone_from_slice(data); - return ret; - } -} diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index c2eea52b6..6a25016ad 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -96,7 +96,7 @@ pub async fn handle_error(r: Rejection) -> std::result::Result, interval: u32) -> WebResult { let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: String::from_utf8_lossy(&peer.peer_id.0).to_string(), + peer_id: peer.peer_id.0.clone(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port() }).collect(); diff --git a/src/tracker.rs b/src/tracker.rs index ed358e205..67acd7583 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -57,8 +57,10 @@ impl TorrentPeer { pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); + let peer_id = String::from_utf8_lossy(&announce_request.peer_id.0).parse().unwrap_or("unknown".to_string()); + TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), + peer_id: PeerId(peer_id), peer_addr, updated: std::time::Instant::now(), uploaded: announce_request.bytes_uploaded, @@ -71,6 +73,9 @@ impl TorrentPeer { pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); + let max_string_size = announce_request.peer_id.len().clamp(0, 40); + let peer_id = announce_request.peer_id[..max_string_size].to_string(); + let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { "started" => AnnounceEvent::Started, @@ -83,7 +88,7 @@ impl TorrentPeer { }; TorrentPeer { - peer_id: PeerId::from(announce_request.peer_id.as_bytes()), + peer_id: PeerId(peer_id), peer_addr, updated: std::time::Instant::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), @@ -138,7 +143,7 @@ impl TorrentEntry { self.update_torrent_stats_with_peer(peer, peer_old); } _ => { - let peer_old = self.peers.insert(peer.peer_id, peer.clone()); + let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); self.update_torrent_stats_with_peer(peer, peer_old); } } @@ -394,12 +399,12 @@ impl TorrentTracker { if peer.is_seeder() { if peer.updated.elapsed() > FIVE_MINUTES { // remove seeders after 5 minutes since last update... - peers_to_remove.push(*peer_id); + peers_to_remove.push(peer_id.clone()); torrent_entry.seeders -= 1; } } else if peer.updated.elapsed() > TWO_HOURS { // remove peers after 2 hours since last update... - peers_to_remove.push(*peer_id); + peers_to_remove.push(peer_id.clone()); } } From 443b8889bf96233dba0ef0ceb7671c289caf5a30 Mon Sep 17 00:00:00 2001 From: WarmBeer Date: Thu, 10 Feb 2022 10:53:51 +0100 Subject: [PATCH 0026/1003] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index e3f2ed5be..64f88abc6 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys ### Implemented BEPs +* [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol +* [BEP 7](https://www.bittorrent.org/beps/bep_0007.html): IPv6 Support * [BEP 15](http://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for BitTorrent * [BEP 23](http://bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists * [BEP 27](http://bittorrent.org/beps/bep_0027.html): Private Torrents From 672864e2a5606b747caff2c0ec0adb2b3e323775 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 11 Mar 2022 22:27:16 +0100 Subject: [PATCH 0027/1003] feat: print announce request in debug log --- src/torrust_http_tracker/handlers.rs | 2 ++ src/torrust_http_tracker/request.rs | 1 + 2 files changed, 3 insertions(+) diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 6a25016ad..9dc7a47c9 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -34,6 +34,8 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option return Err(reject::custom(e)) } + debug!("{:?}", announce_request); + if tracker.config.http_tracker.on_reverse_proxy && announce_request.forwarded_ip.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index 2039de72e..9a34ce433 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -15,6 +15,7 @@ pub struct AnnounceRequestQuery { pub compact: Option, } +#[derive(Debug)] pub struct AnnounceRequest { pub info_hash: InfoHash, pub peer_addr: SocketAddr, From 9637661f10c52507884c0dc9b809d22ebcea9d50 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 14 Mar 2022 21:53:15 +0100 Subject: [PATCH 0028/1003] refactor: made all fields optional in http announce request except peer_id, info_hash and port --- src/torrust_http_tracker/filters.rs | 8 ++++---- src/torrust_http_tracker/request.rs | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 85a4d3a99..3e05d58ef 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -76,16 +76,16 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has .and_then(|ip_str| IpAddr::from_str(ip_str).ok()) } }; - + Ok(AnnounceRequest { info_hash: info_hashes[0], peer_addr: remote_addr.unwrap(), forwarded_ip, - downloaded: announce_request_query.downloaded, - uploaded: announce_request_query.uploaded, + downloaded: announce_request_query.downloaded.unwrap_or(0), + uploaded: announce_request_query.uploaded.unwrap_or(0), peer_id: announce_request_query.peer_id, port: announce_request_query.port, - left: announce_request_query.left, + left: announce_request_query.left.unwrap_or(0), event: announce_request_query.event, compact: announce_request_query.compact }) diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index 9a34ce433..9eed15d74 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -5,12 +5,12 @@ use crate::torrust_http_tracker::Bytes; #[derive(Deserialize)] pub struct AnnounceRequestQuery { - pub downloaded: Bytes, - pub uploaded: Bytes, - pub key: String, + pub downloaded: Option, + pub uploaded: Option, + pub key: Option, pub peer_id: String, pub port: u16, - pub left: Bytes, + pub left: Option, pub event: Option, pub compact: Option, } From f2125f279087a9e4beb90c38b599ac02ac5497bb Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 14 Mar 2022 21:53:44 +0100 Subject: [PATCH 0029/1003] refactor: return http announce errors in bencoded format instead of json --- src/torrust_http_tracker/handlers.rs | 10 +++++----- src/torrust_http_tracker/response.rs | 6 ++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 9dc7a47c9..84394a7b2 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -3,7 +3,7 @@ use std::convert::Infallible; use std::sync::Arc; use log::debug; use warp::{reject, Rejection, Reply}; -use warp::http::{Response, StatusCode}; +use warp::http::{Response}; use crate::{InfoHash, TorrentError, TorrentPeer, TorrentStats, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; @@ -87,11 +87,11 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option std::result::Result { if let Some(e) = r.find::() { debug!("{:?}", e); - let reply = warp::reply::json(&ErrorResponse { failure_reason: e.to_string() }); - Ok(warp::reply::with_status(reply, StatusCode::BAD_REQUEST)) + let body: String = ErrorResponse { failure_reason: e.to_string() }.write(); + Ok(Response::new(body)) } else { - let reply = warp::reply::json(&ErrorResponse { failure_reason: "internal server error".to_string() }); - Ok(warp::reply::with_status(reply, StatusCode::INTERNAL_SERVER_ERROR)) + let body: String = ErrorResponse { failure_reason: "internal server error".to_string() }.write(); + Ok(Response::new(body)) } } diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs index df039a1c2..a80ee27c5 100644 --- a/src/torrust_http_tracker/response.rs +++ b/src/torrust_http_tracker/response.rs @@ -85,3 +85,9 @@ impl ScrapeResponse { pub struct ErrorResponse { pub failure_reason: String } + +impl ErrorResponse { + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } +} From f596f226e9e1dd5662ba5f0079f0469d89e4f76d Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 13:23:46 +0100 Subject: [PATCH 0030/1003] Added: Separate HTTP/HTTPS/IPv4/IPv6 binding of sockets, persistant saving and loading of torrent hashes with completed count --- src/config.rs | 25 ++++++++ src/database.rs | 56 +++++++++++++++++- src/main.rs | 96 +++++++++++++++++++++++++------ src/torrust_udp_tracker/server.rs | 9 +++ src/tracker.rs | 28 ++++++++- 5 files changed, 193 insertions(+), 21 deletions(-) diff --git a/src/config.rs b/src/config.rs index 429997fcc..4efcb2825 100644 --- a/src/config.rs +++ b/src/config.rs @@ -17,6 +17,7 @@ pub enum TrackerServer { #[derive(Serialize, Deserialize)] pub struct UdpTrackerConfig { + pub enabled: bool, pub bind_address: String, pub announce_interval: u32, } @@ -28,6 +29,7 @@ pub struct HttpTrackerConfig { pub on_reverse_proxy: bool, pub announce_interval: u32, pub ssl_enabled: bool, + pub ssl_bind_address: String, #[serde(serialize_with = "none_as_empty_string")] pub ssl_cert_path: Option, #[serde(serialize_with = "none_as_empty_string")] @@ -52,10 +54,14 @@ pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, pub db_path: String, + pub persistence: bool, pub cleanup_interval: Option, + pub cleanup_peerless: bool, pub external_ip: Option, pub udp_tracker: UdpTrackerConfig, + pub udp_tracker_ipv6: UdpTrackerConfig, pub http_tracker: HttpTrackerConfig, + pub http_tracker_ipv6: HttpTrackerConfig, pub http_api: HttpApiConfig, } @@ -128,18 +134,37 @@ impl Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::PublicMode, db_path: String::from("data.db"), + persistence: false, cleanup_interval: Some(600), + cleanup_peerless: true, external_ip: Some(String::from("0.0.0.0")), udp_tracker: UdpTrackerConfig { + enabled: true, bind_address: String::from("0.0.0.0:6969"), announce_interval: 120, }, + udp_tracker_ipv6: UdpTrackerConfig { + enabled: false, + bind_address: String::from("[::]:6969"), + announce_interval: 120, + }, http_tracker: HttpTrackerConfig { enabled: false, bind_address: String::from("0.0.0.0:6969"), on_reverse_proxy: false, announce_interval: 120, ssl_enabled: false, + ssl_bind_address: String::from("0.0.0.0:6868"), + ssl_cert_path: None, + ssl_key_path: None + }, + http_tracker_ipv6: HttpTrackerConfig { + enabled: false, + bind_address: String::from("[::]:6969"), + on_reverse_proxy: false, + announce_interval: 120, + ssl_enabled: false, + ssl_bind_address: String::from("[::]:6868"), ssl_cert_path: None, ssl_key_path: None }, diff --git a/src/database.rs b/src/database.rs index fbec824a0..827c7dbed 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,10 +1,11 @@ -use crate::{InfoHash, AUTH_KEY_LENGTH}; +use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentTracker}; use log::debug; use r2d2_sqlite::{SqliteConnectionManager, rusqlite}; use r2d2::{Pool}; use r2d2_sqlite::rusqlite::NO_PARAMS; use crate::key_manager::AuthKey; use std::str::FromStr; +use std::sync::Arc; pub struct SqliteDatabase { pool: Pool @@ -32,6 +33,13 @@ impl SqliteDatabase { info_hash VARCHAR(20) NOT NULL UNIQUE );".to_string(); + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTOINCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );".to_string(); + let create_keys_table = format!(" CREATE TABLE IF NOT EXISTS keys ( id integer PRIMARY KEY AUTOINCREMENT, @@ -43,7 +51,15 @@ impl SqliteDatabase { match conn.execute(&create_whitelist_table, NO_PARAMS) { Ok(updated) => { match conn.execute(&create_keys_table, NO_PARAMS) { - Ok(updated2) => Ok(updated + updated2), + Ok(updated2) => { + match conn.execute(&create_torrents_table, NO_PARAMS) { + Ok(updated3) => Ok(updated + updated2 + updated3), + Err(e) => { + debug!("{:?}", e); + Err(e) + } + } + } Err(e) => { debug!("{:?}", e); Err(e) @@ -57,6 +73,42 @@ impl SqliteDatabase { } } + pub async fn load_persistent_torrent_data(&self, tracker: Arc) -> Result { + let tracker_copy = tracker.clone(); + let conn = self.pool.get().unwrap(); + let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; + + let info_hash_iter = stmt.query_map(NO_PARAMS, |row| { + let info_hash: String = row.get(0)?; + let info_hash_converted = InfoHash::from_str(&info_hash).unwrap(); + let completed: u32 = row.get(1)?; + Ok((info_hash_converted, completed)) + })?; + + for info_hash_item in info_hash_iter { + let (info_hash, completed): (InfoHash, u32) = info_hash_item.unwrap(); + tracker_copy.add_torrent(&info_hash, 0u32, completed, 0u32).await; + } + + Ok(true) + } + + pub async fn save_persistent_torrent_data(&self, tracker: Arc) -> Result { + let tracker_copy = tracker.clone(); + let mut conn = self.pool.get().unwrap(); + let db = tracker_copy.get_torrents().await; + let db_transaction = conn.transaction()?; + let _: Vec<_> = db + .iter() + .map(|(info_hash, torrent_entry)| { + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + }) + .collect(); + let _ = db_transaction.commit(); + Ok(true) + } + pub async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { let conn = self.pool.get().unwrap(); let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; diff --git a/src/main.rs b/src/main.rs index d8a73854e..741f05061 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,6 @@ use std::net::SocketAddr; use log::{info}; -use torrust_tracker::{http_api_server, Configuration, TorrentTracker, UdpServer, HttpTrackerConfig, UdpTrackerConfig, HttpApiConfig, logging, TrackerServer}; +use torrust_tracker::{http_api_server, Configuration, TorrentTracker, UdpServer, HttpTrackerConfig, UdpTrackerConfig, HttpApiConfig, logging}; use std::sync::Arc; use tokio::task::JoinHandle; use torrust_tracker::torrust_http_tracker::server::HttpServer; @@ -19,6 +19,11 @@ async fn main() { // the singleton torrent tracker that gets passed to the HTTP and UDP server let tracker = Arc::new(TorrentTracker::new(config.clone())); + // Load torrents if enabled + if config.persistence { + load_torrents_into_memory(tracker.clone()).await; + } + // start torrent cleanup job (periodically removes old peers) let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()).unwrap(); @@ -27,21 +32,58 @@ async fn main() { let _api_server = start_api_server(&config.http_api, tracker.clone()); } - // check which tracker to run, UDP (Default) or HTTP - let _tracker_server = match config.get_tracker_server() { - TrackerServer::UDP => { - start_udp_tracker_server(&config.udp_tracker, tracker.clone()).await - } - TrackerServer::HTTP => { - start_http_tracker_server(&config.http_tracker, tracker.clone()) - } - }; + // start UDP tracker if enabled + if config.udp_tracker.enabled { + let _udp_server = start_udp_tracker_server(&config.udp_tracker, tracker.clone()).await; + } + + // start UDP tracker for IPv6 if enabled + if config.udp_tracker_ipv6.enabled { + let _udp_server_ipv6 = start_udp_ipv6_tracker_server(&config.udp_tracker_ipv6, tracker.clone()).await; + } + + // start HTTP tracker if enabled + if config.http_tracker.enabled { + let _http_server = start_http_tracker_server(&config.http_tracker, tracker.clone()); + } + // start HTTPS tracker if enabled + if config.http_tracker.ssl_enabled { + let _http_ssl_server = start_http_ssl_tracker_server(&config.http_tracker, tracker.clone()); + } + + //start HTTP tracker for IPv6 if enabled + if config.http_tracker_ipv6.enabled { + let _http_server_ipv6 = start_http_tracker_server(&config.http_tracker_ipv6, tracker.clone()); + } + + // start HTTPS tracker for IPv6 if enabled + if config.http_tracker_ipv6.ssl_enabled { + let _http_ssl_server_ipv6 = start_http_ssl_tracker_server(&config.http_tracker_ipv6, tracker.clone()); + } + + // handle the signals here let ctrl_c = tokio::signal::ctrl_c(); tokio::select! { - _ = _tracker_server => { panic!("Tracker server exited.") }, _ = ctrl_c => { info!("Torrust shutting down..") } } + + // Save torrents if enabled + if config.persistence { + save_torrents_into_memory(tracker.clone()).await; + } +} + +async fn load_torrents_into_memory(tracker: Arc) { + info!("Loading torrents from SQL into memory..."); + let _ = tracker.load_torrents(tracker.clone()).await; + info!("Torrents loaded"); +} + +async fn save_torrents_into_memory(tracker: Arc) { + info!("Saving torrents into SQL from memory..."); + let _ = tracker.save_torrents(tracker.clone()).await; + info!("Torrents saved"); } fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> Option> { @@ -77,19 +119,26 @@ fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> Joi fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let http_tracker = HttpServer::new(tracker); let bind_addr = config.bind_address.parse::().unwrap(); - let ssl_enabled = config.ssl_enabled; + + tokio::spawn(async move { + // run with tls if ssl_enabled and cert and key path are set + info!("Starting HTTP server on: {}", bind_addr); + http_tracker.start(bind_addr).await; + }) +} + +fn start_http_ssl_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { + let http_tracker = HttpServer::new(tracker); + let ssl_bind_addr = config.ssl_bind_address.parse::().unwrap(); let ssl_cert_path = config.ssl_cert_path.clone(); let ssl_key_path = config.ssl_key_path.clone(); tokio::spawn(async move { // run with tls if ssl_enabled and cert and key path are set - if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: {} (TLS)", bind_addr); - http_tracker.start_tls(bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; - } else { - info!("Starting HTTP server on: {}", bind_addr); - http_tracker.start(bind_addr).await; + if ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting HTTPS server on: {} (TLS)", ssl_bind_addr); + http_tracker.start_tls(ssl_bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; } }) } @@ -104,3 +153,14 @@ async fn start_udp_tracker_server(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { + let udp_server = UdpServer::new_ipv6(tracker).await.unwrap_or_else(|e| { + panic!("Could not start UDP server (IPv6): {}", e); + }); + + info!("Starting UDP server on: {}", config.bind_address); + tokio::spawn(async move { + udp_server.start().await; + }) +} diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 0da4ce140..86cc9727f 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -22,6 +22,15 @@ impl UdpServer { }) } + pub async fn new_ipv6(tracker: Arc) -> Result { + let srv = UdpSocket::bind(&tracker.config.udp_tracker_ipv6.bind_address).await?; + + Ok(UdpServer { + socket: srv, + tracker, + }) + } + pub async fn start(&self) { loop { let mut data = [0; MAX_PACKET_SIZE]; diff --git a/src/tracker.rs b/src/tracker.rs index 67acd7583..c53367f95 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -321,6 +321,16 @@ impl TorrentTracker { Ok(()) } + // Loading the torrents into memory + pub async fn load_torrents(&self, tracker: Arc) -> Result { + self.database.load_persistent_torrent_data(tracker).await + } + + // Saving the torrents from memory + pub async fn save_torrents(&self, tracker: Arc) -> Result { + self.database.save_persistent_torrent_data(tracker).await + } + // Adding torrents is not relevant to public trackers. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { self.database.add_info_hash_to_whitelist(info_hash.clone()).await @@ -378,6 +388,22 @@ impl TorrentTracker { } } + pub async fn add_torrent(&self, info_hash: &InfoHash, seeders: u32, completed: u32, leechers: u32) -> TorrentStats { + let mut torrents = self.torrents.write().await; + + if !torrents.contains_key(&info_hash) { + let mut torrent_entry = TorrentEntry::new(); + torrent_entry.completed = completed; + torrents.insert(info_hash.clone(), torrent_entry); + } + + TorrentStats { + seeders, + completed, + leechers, + } + } + pub async fn get_torrents(&self) -> tokio::sync::RwLockReadGuard<'_, BTreeMap> { self.torrents.read().await } @@ -413,7 +439,7 @@ impl TorrentTracker { } } - if self.config.mode.clone() == TrackerMode::PublicMode { + if self.config.mode.clone() == TrackerMode::PublicMode && self.config.cleanup_peerless && !self.config.persistence { // peer-less torrents.. if torrent_entry.peers.len() == 0 { torrents_to_remove.push(k.clone()); From afe75a7d03c9ff81ba65997ceb5fd3ad62c0c264 Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 16:07:01 +0100 Subject: [PATCH 0031/1003] Changing the static binding options to a dynamical one, you can now bind to as many sockets as you like, and for HTTP it can be either HTTP or HTTPS, or One of the both at the same time with each a custom binding address --- src/config.rs | 68 ++++++++++------------------ src/main.rs | 67 ++++++++++----------------- src/torrust_http_tracker/handlers.rs | 6 +-- src/torrust_udp_tracker/handlers.rs | 2 +- src/torrust_udp_tracker/server.rs | 15 ++---- 5 files changed, 55 insertions(+), 103 deletions(-) diff --git a/src/config.rs b/src/config.rs index 4efcb2825..f23a13c16 100644 --- a/src/config.rs +++ b/src/config.rs @@ -19,15 +19,12 @@ pub enum TrackerServer { pub struct UdpTrackerConfig { pub enabled: bool, pub bind_address: String, - pub announce_interval: u32, } #[derive(Serialize, Deserialize)] pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, - pub on_reverse_proxy: bool, - pub announce_interval: u32, pub ssl_enabled: bool, pub ssl_bind_address: String, #[serde(serialize_with = "none_as_empty_string")] @@ -58,10 +55,10 @@ pub struct Configuration { pub cleanup_interval: Option, pub cleanup_peerless: bool, pub external_ip: Option, - pub udp_tracker: UdpTrackerConfig, - pub udp_tracker_ipv6: UdpTrackerConfig, - pub http_tracker: HttpTrackerConfig, - pub http_tracker_ipv6: HttpTrackerConfig, + pub announce_interval: u32, + pub on_reverse_proxy: bool, + pub udp_trackers: Vec, + pub http_trackers: Vec, pub http_api: HttpApiConfig, } @@ -130,7 +127,7 @@ impl Configuration { impl Configuration { pub fn default() -> Configuration { - Configuration { + let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::PublicMode, db_path: String::from("data.db"), @@ -138,47 +135,38 @@ impl Configuration { cleanup_interval: Some(600), cleanup_peerless: true, external_ip: Some(String::from("0.0.0.0")), - udp_tracker: UdpTrackerConfig { + announce_interval: 0, + on_reverse_proxy: false, + udp_trackers: Vec::new(), + http_trackers: Vec::new(), + http_api: HttpApiConfig { enabled: true, - bind_address: String::from("0.0.0.0:6969"), - announce_interval: 120, + bind_address: String::from("127.0.0.1:1212"), + access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), }, - udp_tracker_ipv6: UdpTrackerConfig { + }; + configuration.udp_trackers.push( + UdpTrackerConfig{ enabled: false, - bind_address: String::from("[::]:6969"), - announce_interval: 120, - }, - http_tracker: HttpTrackerConfig { + bind_address: String::from("0.0.0.0:6969") + } + ); + configuration.http_trackers.push( + HttpTrackerConfig{ enabled: false, bind_address: String::from("0.0.0.0:6969"), - on_reverse_proxy: false, - announce_interval: 120, ssl_enabled: false, ssl_bind_address: String::from("0.0.0.0:6868"), ssl_cert_path: None, ssl_key_path: None - }, - http_tracker_ipv6: HttpTrackerConfig { - enabled: false, - bind_address: String::from("[::]:6969"), - on_reverse_proxy: false, - announce_interval: 120, - ssl_enabled: false, - ssl_bind_address: String::from("[::]:6868"), - ssl_cert_path: None, - ssl_key_path: None - }, - http_api: HttpApiConfig { - enabled: true, - bind_address: String::from("127.0.0.1:1212"), - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), - }, - } + } + ); + configuration } pub fn verify(&self) -> Result<(), ConfigurationError> { // UDP is not secure for sending private keys - if (self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode) && self.get_tracker_server() == TrackerServer::UDP { + if self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode { return Err(ConfigurationError::TrackerModeIncompatible) } @@ -213,12 +201,4 @@ impl Configuration { fs::write("config.toml", toml_string).expect("Could not write to file!"); Ok(()) } - - pub fn get_tracker_server(&self) -> TrackerServer { - if self.http_tracker.enabled { - TrackerServer::HTTP - } else { - TrackerServer::UDP - } - } } diff --git a/src/main.rs b/src/main.rs index 741f05061..f6f9fefb8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -32,34 +32,17 @@ async fn main() { let _api_server = start_api_server(&config.http_api, tracker.clone()); } - // start UDP tracker if enabled - if config.udp_tracker.enabled { - let _udp_server = start_udp_tracker_server(&config.udp_tracker, tracker.clone()).await; - } - - // start UDP tracker for IPv6 if enabled - if config.udp_tracker_ipv6.enabled { - let _udp_server_ipv6 = start_udp_ipv6_tracker_server(&config.udp_tracker_ipv6, tracker.clone()).await; - } - - // start HTTP tracker if enabled - if config.http_tracker.enabled { - let _http_server = start_http_tracker_server(&config.http_tracker, tracker.clone()); - } - - // start HTTPS tracker if enabled - if config.http_tracker.ssl_enabled { - let _http_ssl_server = start_http_ssl_tracker_server(&config.http_tracker, tracker.clone()); - } - - //start HTTP tracker for IPv6 if enabled - if config.http_tracker_ipv6.enabled { - let _http_server_ipv6 = start_http_tracker_server(&config.http_tracker_ipv6, tracker.clone()); + // start the udp blocks + for udp_tracker in &config.udp_trackers { + if udp_tracker.enabled { + let _ = start_udp_tracker_server(&udp_tracker, tracker.clone()).await; + } } - // start HTTPS tracker for IPv6 if enabled - if config.http_tracker_ipv6.ssl_enabled { - let _http_ssl_server_ipv6 = start_http_ssl_tracker_server(&config.http_tracker_ipv6, tracker.clone()); + // start the http blocks + for http_tracker in &config.http_trackers { + let _ = start_http_tracker_server(&http_tracker, tracker.clone(), true); + let _ = start_http_tracker_server(&http_tracker, tracker.clone(), false); } // handle the signals here @@ -116,14 +99,25 @@ fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> Joi }) } -fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { +fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc, ssl: bool) -> JoinHandle<()> { let http_tracker = HttpServer::new(tracker); + let enabled = config.enabled; let bind_addr = config.bind_address.parse::().unwrap(); + let ssl_enabled = config.ssl_enabled; + let ssl_bind_addr = config.ssl_bind_address.parse::().unwrap(); + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); tokio::spawn(async move { // run with tls if ssl_enabled and cert and key path are set - info!("Starting HTTP server on: {}", bind_addr); - http_tracker.start(bind_addr).await; + if ssl && ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting HTTPS server on: {} (TLS)", ssl_bind_addr); + http_tracker.start_tls(ssl_bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; + } + if !ssl && enabled { + info!("Starting HTTP server on: {}", bind_addr); + http_tracker.start(bind_addr).await; + } }) } @@ -137,14 +131,12 @@ fn start_http_ssl_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - let udp_server = UdpServer::new(tracker).await.unwrap_or_else(|e| { + let udp_server = UdpServer::new(tracker, config).await.unwrap_or_else(|e| { panic!("Could not start UDP server: {}", e); }); @@ -153,14 +145,3 @@ async fn start_udp_tracker_server(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - let udp_server = UdpServer::new_ipv6(tracker).await.unwrap_or_else(|e| { - panic!("Could not start UDP server (IPv6): {}", e); - }); - - info!("Starting UDP server on: {}", config.bind_address); - tokio::spawn(async move { - udp_server.start().await; - }) -} diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 84394a7b2..81b17bd24 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -36,11 +36,11 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option debug!("{:?}", announce_request); - if tracker.config.http_tracker.on_reverse_proxy && announce_request.forwarded_ip.is_none() { + if tracker.config.on_reverse_proxy && announce_request.forwarded_ip.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } - let peer_ip = match tracker.config.http_tracker.on_reverse_proxy { + let peer_ip = match tracker.config.on_reverse_proxy { true => announce_request.forwarded_ip.unwrap(), false => announce_request.peer_addr.ip() }; @@ -52,7 +52,7 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } // success response - let announce_interval = tracker.config.http_tracker.announce_interval; + let announce_interval = tracker.config.announce_interval; send_announce_response(&announce_request, torrent_stats, peers.unwrap(), announce_interval) } diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index e10001527..84cba3f45 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -90,7 +90,7 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc Ok(Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.udp_tracker.announce_interval as i32), + announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), peers: peers.iter().map(|peer| diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 86cc9727f..0e24fd81c 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{IpVersion, Response}; use log::debug; use tokio::net::UdpSocket; -use crate::TorrentTracker; +use crate::{TorrentTracker, UdpTrackerConfig}; use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { @@ -13,17 +13,8 @@ pub struct UdpServer { } impl UdpServer { - pub async fn new(tracker: Arc) -> Result { - let srv = UdpSocket::bind(&tracker.config.udp_tracker.bind_address).await?; - - Ok(UdpServer { - socket: srv, - tracker, - }) - } - - pub async fn new_ipv6(tracker: Arc) -> Result { - let srv = UdpSocket::bind(&tracker.config.udp_tracker_ipv6.bind_address).await?; + pub async fn new(tracker: Arc, config: &UdpTrackerConfig) -> Result { + let srv = UdpSocket::bind(&config.bind_address).await?; Ok(UdpServer { socket: srv, From 16f9d8af15a160a2cddb75f2104799cb5bf58682 Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 16:20:51 +0100 Subject: [PATCH 0032/1003] Readme updated with latest changes so far --- README.md | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index e3f2ed5be..9a85dddcd 100644 --- a/README.md +++ b/README.md @@ -7,11 +7,15 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor ### Features * [X] UDP server -* [X] HTTP (optional SSL) server +* [X] HTTP and/or HTTPS (SSL) server +* [X] Multiple UDP and HTTP(S) blocks for socket binding possible +* [X] Full IPv4 and IPv6 support for both UDP and HTTP(S) * [X] Private & Whitelisted mode * [X] Built-in API * [X] Torrent whitelisting * [X] Peer authentication using time-bound keys +* [ ] NewTrackOn check supported +* [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count ### Implemented BEPs * [BEP 15](http://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for BitTorrent @@ -44,24 +48,27 @@ cargo build --release * Edit the newly created config.toml file according to your liking, see [configuration documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/config/). Eg: ```toml -log_level = "trace" +log_level = "info" mode = "public" db_path = "data.db" +persistence = false cleanup_interval = 600 -external_ip = "YOUR_EXTERNAL_IP" +cleanup_peerless = true +external_ip = "0.0.0.0" +announce_interval = 0 +on_reverse_proxy = false -[udp_tracker] +[[udp_trackers]] +enabled = true bind_address = "0.0.0.0:6969" -announce_interval = 120 -[http_tracker] +[[http_trackers]] enabled = true bind_address = "0.0.0.0:6969" -on_reverse_proxy = false -announce_interval = 120 -ssl_enabled = false -ssl_cert_path = "" -ssl_key_path = "" +ssl_enabled = true +ssl_bind_address = "0.0.0.0:6868" +ssl_cert_path = "cert.pem" +ssl_key_path = "key.pem" [http_api] enabled = true @@ -78,7 +85,7 @@ admin = "MyAccessToken" ``` ### Tracker URL -Your tracker announce URL will be **udp://{tracker-ip:port}** or **https://{tracker-ip:port}/announce** depending on your tracker mode. +Your tracker announce URL will be **udp://{tracker-ip:port}** and/or **http://{tracker-ip:port}/announce** and/or **https://{tracker-ip:port}/announce** depending on your bindings. In private & private_listed mode, tracker keys are added after the tracker URL like: **https://{tracker-ip:port}/announce/{key}**. ### Built-in API @@ -87,3 +94,4 @@ Read the API documentation [here](https://torrust.github.io/torrust-documentatio ### Credits This project was a joint effort by [Nautilus Cyberneering GmbH](https://nautilus-cyberneering.de/) and [Dutch Bits](https://dutchbits.nl). Also thanks to [Naim A.](https://github.com/naim94a/udpt) and [greatest-ape](https://github.com/greatest-ape/aquatic) for some parts of the code. +Further added features and functions thanks to [Power2All](https://github.com/power2all). \ No newline at end of file From b91c5e4ead25ff41244d367293cb45d4795968f3 Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 16:55:46 +0100 Subject: [PATCH 0033/1003] Fixing a nasty peer_id bug --- src/torrust_http_tracker/errors.rs | 3 +++ src/torrust_http_tracker/handlers.rs | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs index f0bedfe1b..d8d6c7623 100644 --- a/src/torrust_http_tracker/errors.rs +++ b/src/torrust_http_tracker/errors.rs @@ -9,6 +9,9 @@ pub enum ServerError { #[error("info_hash is either missing or invalid")] InvalidInfoHash, + #[error("peer_id is either missing or invalid")] + InvalidPeerId, + #[error("could not find remote address")] AddressNotFound, diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 81b17bd24..21c92f1b1 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -45,6 +45,10 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option false => announce_request.peer_addr.ip() }; + if announce_request.peer_id.len() != 40 { + return Err(reject::custom(ServerError::InvalidPeerId)) + } + let peer = TorrentPeer::from_http_announce_request(&announce_request, peer_ip, tracker.config.get_ext_ip()); let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; // get all peers excluding the client_addr From ca2d118a573b6156ea1812340db4c6604dae4073 Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 17:15:07 +0100 Subject: [PATCH 0034/1003] Fixing the peer timeout for both seed and peers, and put the config into the configuration as expected --- src/config.rs | 6 ++++-- src/tracker.rs | 7 ++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/config.rs b/src/config.rs index f23a13c16..3ec55f857 100644 --- a/src/config.rs +++ b/src/config.rs @@ -56,6 +56,7 @@ pub struct Configuration { pub cleanup_peerless: bool, pub external_ip: Option, pub announce_interval: u32, + pub peer_timeout: u32, pub on_reverse_proxy: bool, pub udp_trackers: Vec, pub http_trackers: Vec, @@ -135,7 +136,8 @@ impl Configuration { cleanup_interval: Some(600), cleanup_peerless: true, external_ip: Some(String::from("0.0.0.0")), - announce_interval: 0, + announce_interval: 120, + peer_timeout: 900, on_reverse_proxy: false, udp_trackers: Vec::new(), http_trackers: Vec::new(), @@ -143,7 +145,7 @@ impl Configuration { enabled: true, bind_address: String::from("127.0.0.1:1212"), access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), - }, + } }; configuration.udp_trackers.push( UdpTrackerConfig{ diff --git a/src/tracker.rs b/src/tracker.rs index c53367f95..80190dac4 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -15,9 +15,6 @@ use crate::key_manager::AuthKey; use r2d2_sqlite::rusqlite; use crate::torrust_http_tracker::AnnounceRequest; -const TWO_HOURS: std::time::Duration = std::time::Duration::from_secs(3600 * 2); -const FIVE_MINUTES: std::time::Duration = std::time::Duration::from_secs(300); - #[derive(Serialize, Deserialize, Clone, PartialEq)] pub enum TrackerMode { // Will track every new info hash and serve every peer. @@ -423,12 +420,12 @@ impl TorrentTracker { for (peer_id, peer) in torrent_peers.iter() { if peer.is_seeder() { - if peer.updated.elapsed() > FIVE_MINUTES { + if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { // remove seeders after 5 minutes since last update... peers_to_remove.push(peer_id.clone()); torrent_entry.seeders -= 1; } - } else if peer.updated.elapsed() > TWO_HOURS { + } else if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { // remove peers after 2 hours since last update... peers_to_remove.push(peer_id.clone()); } From e9e992fe11d0902047288b67b37c71b421aa89be Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 15 Mar 2022 20:43:10 +0100 Subject: [PATCH 0035/1003] fix: correctly decoding peer_id from http announce request --- src/common.rs | 20 +++++++---- src/torrust_http_tracker/errors.rs | 3 ++ src/torrust_http_tracker/filters.rs | 52 +++++++++++++++++++++++++--- src/torrust_http_tracker/handlers.rs | 2 +- src/torrust_http_tracker/request.rs | 5 ++- src/tracker.rs | 9 ++--- 6 files changed, 70 insertions(+), 21 deletions(-) diff --git a/src/common.rs b/src/common.rs index b73a0511b..0037f058f 100644 --- a/src/common.rs +++ b/src/common.rs @@ -126,15 +126,23 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { } #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] -pub struct PeerId(pub String); +pub struct PeerId(pub [u8; 20]); + +impl PeerId { + pub fn to_string(&self) -> String { + let mut buffer = [0u8; 20]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + String::from(std::str::from_utf8(bytes_out).unwrap()) + } +} impl PeerId { pub fn get_client_name(&self) -> Option<&'static str> { - if self.0.as_bytes()[0] == b'M' { + if self.0[0] == b'M' { return Some("BitTorrent"); } - if self.0.as_bytes()[0] == b'-' { - let name = match &self.0.as_bytes()[1..3] { + if self.0[0] == b'-' { + let name = match &self.0[1..3] { b"AG" => "Ares", b"A~" => "Ares", b"AR" => "Arctic", @@ -211,9 +219,9 @@ impl Serialize for PeerId { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { - let buff_size = self.0.as_bytes().len() * 2; + let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0.as_bytes(), &mut tmp).unwrap(); + binascii::bin2hex(&self.0, &mut tmp).unwrap(); let id = std::str::from_utf8(&tmp).ok(); #[derive(Serialize)] diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs index f0bedfe1b..d8d6c7623 100644 --- a/src/torrust_http_tracker/errors.rs +++ b/src/torrust_http_tracker/errors.rs @@ -9,6 +9,9 @@ pub enum ServerError { #[error("info_hash is either missing or invalid")] InvalidInfoHash, + #[error("peer_id is either missing or invalid")] + InvalidPeerId, + #[error("could not find remote address")] AddressNotFound, diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 85a4d3a99..24d155238 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -3,7 +3,7 @@ use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; use warp::{Filter, reject, Rejection}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; @@ -44,6 +44,49 @@ async fn info_hashes(raw_query: String) -> WebResult> { } } +/// Check for PeerId +pub fn with_peer_id() -> impl Filter + Clone { + warp::filters::query::raw() + .and_then(peer_id) +} + +/// Parse PeerId from raw query string +async fn peer_id(raw_query: String) -> WebResult { + // put all query params in a vec + let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + + let mut peer_id: Option = None; + + for v in split_raw_query { + // look for the peer_id param + if v.contains("peer_id") { + // get raw percent_encoded peer_id + let raw_peer_id = v.split("=").collect::>()[1]; + + // decode peer_id + let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); + + // peer_id must be 20 bytes + if peer_id_bytes.len() > 20 { + return Err(reject::custom(ServerError::InvalidPeerId)); + } + + // clone peer_id_bytes into fixed length array + let mut byte_arr: [u8; 20] = Default::default(); + byte_arr.clone_from_slice(peer_id_bytes.as_slice()); + + peer_id = Some(PeerId(byte_arr)); + break; + } + } + + if peer_id.is_none() { + Err(reject::custom(ServerError::InvalidPeerId)) + } else { + Ok(peer_id.unwrap()) + } +} + /// Pass Arc along pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() @@ -59,13 +102,14 @@ pub fn with_auth_key() -> impl Filter,), Error = Infa pub fn with_announce_request() -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) + .and(with_peer_id()) .and(warp::addr::remote()) .and(warp::header::optional::("X-Forwarded-For")) .and_then(announce_request) } /// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, remote_addr: Option, forwarded_for: Option) -> WebResult { +async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, peer_id: PeerId, remote_addr: Option, forwarded_for: Option) -> WebResult { if remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } // get first forwarded ip @@ -76,14 +120,14 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has .and_then(|ip_str| IpAddr::from_str(ip_str).ok()) } }; - + Ok(AnnounceRequest { info_hash: info_hashes[0], peer_addr: remote_addr.unwrap(), forwarded_ip, downloaded: announce_request_query.downloaded, uploaded: announce_request_query.uploaded, - peer_id: announce_request_query.peer_id, + peer_id, port: announce_request_query.port, left: announce_request_query.left, event: announce_request_query.event, diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 6a25016ad..922577c85 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -96,7 +96,7 @@ pub async fn handle_error(r: Rejection) -> std::result::Result, interval: u32) -> WebResult { let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: peer.peer_id.0.clone(), + peer_id: peer.peer_id.to_string(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port() }).collect(); diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index 2039de72e..4e40b604c 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -1,6 +1,6 @@ use std::net::{IpAddr, SocketAddr}; use serde::{Deserialize}; -use crate::InfoHash; +use crate::{InfoHash, PeerId}; use crate::torrust_http_tracker::Bytes; #[derive(Deserialize)] @@ -8,7 +8,6 @@ pub struct AnnounceRequestQuery { pub downloaded: Bytes, pub uploaded: Bytes, pub key: String, - pub peer_id: String, pub port: u16, pub left: Bytes, pub event: Option, @@ -21,7 +20,7 @@ pub struct AnnounceRequest { pub forwarded_ip: Option, pub downloaded: Bytes, pub uploaded: Bytes, - pub peer_id: String, + pub peer_id: PeerId, pub port: u16, pub left: Bytes, pub event: Option, diff --git a/src/tracker.rs b/src/tracker.rs index 67acd7583..2a4f51189 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -57,10 +57,8 @@ impl TorrentPeer { pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - let peer_id = String::from_utf8_lossy(&announce_request.peer_id.0).parse().unwrap_or("unknown".to_string()); - TorrentPeer { - peer_id: PeerId(peer_id), + peer_id: PeerId(announce_request.peer_id.0), peer_addr, updated: std::time::Instant::now(), uploaded: announce_request.bytes_uploaded, @@ -73,9 +71,6 @@ impl TorrentPeer { pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - let max_string_size = announce_request.peer_id.len().clamp(0, 40); - let peer_id = announce_request.peer_id[..max_string_size].to_string(); - let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { "started" => AnnounceEvent::Started, @@ -88,7 +83,7 @@ impl TorrentPeer { }; TorrentPeer { - peer_id: PeerId(peer_id), + peer_id: announce_request.peer_id.clone(), peer_addr, updated: std::time::Instant::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), From e1ccb1b55ae337facb313f8045896d84968845f6 Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 22:51:24 +0100 Subject: [PATCH 0036/1003] Revert "Fixing a nasty peer_id bug" This reverts commit b91c5e4ead25ff41244d367293cb45d4795968f3. --- src/torrust_http_tracker/errors.rs | 3 --- src/torrust_http_tracker/handlers.rs | 4 ---- 2 files changed, 7 deletions(-) diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs index d8d6c7623..f0bedfe1b 100644 --- a/src/torrust_http_tracker/errors.rs +++ b/src/torrust_http_tracker/errors.rs @@ -9,9 +9,6 @@ pub enum ServerError { #[error("info_hash is either missing or invalid")] InvalidInfoHash, - #[error("peer_id is either missing or invalid")] - InvalidPeerId, - #[error("could not find remote address")] AddressNotFound, diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 21c92f1b1..81b17bd24 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -45,10 +45,6 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option false => announce_request.peer_addr.ip() }; - if announce_request.peer_id.len() != 40 { - return Err(reject::custom(ServerError::InvalidPeerId)) - } - let peer = TorrentPeer::from_http_announce_request(&announce_request, peer_ip, tracker.config.get_ext_ip()); let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; // get all peers excluding the client_addr From 5a075178a6bf7f3845c8879b1a056c66f41d7a73 Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 23:03:53 +0100 Subject: [PATCH 0037/1003] Applied the Peer_ID fix from master of torrust tracker to development --- src/common.rs | 20 +++++++---- src/torrust_http_tracker/errors.rs | 3 ++ src/torrust_http_tracker/filters.rs | 52 +++++++++++++++++++++++++--- src/torrust_http_tracker/handlers.rs | 2 +- src/torrust_http_tracker/request.rs | 5 ++- src/tracker.rs | 9 ++--- 6 files changed, 69 insertions(+), 22 deletions(-) diff --git a/src/common.rs b/src/common.rs index b73a0511b..0037f058f 100644 --- a/src/common.rs +++ b/src/common.rs @@ -126,15 +126,23 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { } #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] -pub struct PeerId(pub String); +pub struct PeerId(pub [u8; 20]); + +impl PeerId { + pub fn to_string(&self) -> String { + let mut buffer = [0u8; 20]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + String::from(std::str::from_utf8(bytes_out).unwrap()) + } +} impl PeerId { pub fn get_client_name(&self) -> Option<&'static str> { - if self.0.as_bytes()[0] == b'M' { + if self.0[0] == b'M' { return Some("BitTorrent"); } - if self.0.as_bytes()[0] == b'-' { - let name = match &self.0.as_bytes()[1..3] { + if self.0[0] == b'-' { + let name = match &self.0[1..3] { b"AG" => "Ares", b"A~" => "Ares", b"AR" => "Arctic", @@ -211,9 +219,9 @@ impl Serialize for PeerId { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { - let buff_size = self.0.as_bytes().len() * 2; + let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0.as_bytes(), &mut tmp).unwrap(); + binascii::bin2hex(&self.0, &mut tmp).unwrap(); let id = std::str::from_utf8(&tmp).ok(); #[derive(Serialize)] diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs index f0bedfe1b..d8d6c7623 100644 --- a/src/torrust_http_tracker/errors.rs +++ b/src/torrust_http_tracker/errors.rs @@ -9,6 +9,9 @@ pub enum ServerError { #[error("info_hash is either missing or invalid")] InvalidInfoHash, + #[error("peer_id is either missing or invalid")] + InvalidPeerId, + #[error("could not find remote address")] AddressNotFound, diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 3e05d58ef..0e2b364b5 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -3,7 +3,7 @@ use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; use warp::{Filter, reject, Rejection}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; @@ -44,6 +44,49 @@ async fn info_hashes(raw_query: String) -> WebResult> { } } +/// Check for PeerId +pub fn with_peer_id() -> impl Filter + Clone { + warp::filters::query::raw() + .and_then(peer_id) +} + +/// Parse PeerId from raw query string +async fn peer_id(raw_query: String) -> WebResult { + // put all query params in a vec + let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + + let mut peer_id: Option = None; + + for v in split_raw_query { + // look for the peer_id param + if v.contains("peer_id") { + // get raw percent_encoded peer_id + let raw_peer_id = v.split("=").collect::>()[1]; + + // decode peer_id + let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); + + // peer_id must be 20 bytes + if peer_id_bytes.len() > 20 { + return Err(reject::custom(ServerError::InvalidPeerId)); + } + + // clone peer_id_bytes into fixed length array + let mut byte_arr: [u8; 20] = Default::default(); + byte_arr.clone_from_slice(peer_id_bytes.as_slice()); + + peer_id = Some(PeerId(byte_arr)); + break; + } + } + + if peer_id.is_none() { + Err(reject::custom(ServerError::InvalidPeerId)) + } else { + Ok(peer_id.unwrap()) + } +} + /// Pass Arc along pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() @@ -59,15 +102,14 @@ pub fn with_auth_key() -> impl Filter,), Error = Infa pub fn with_announce_request() -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) + .and(with_peer_id()) .and(warp::addr::remote()) .and(warp::header::optional::("X-Forwarded-For")) .and_then(announce_request) } /// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, remote_addr: Option, forwarded_for: Option) -> WebResult { - if remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } - +async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, peer_id: PeerId, remote_addr: Option, forwarded_for: Option) -> WebResult { // get first forwarded ip let forwarded_ip = match forwarded_for { None => None, @@ -83,7 +125,7 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has forwarded_ip, downloaded: announce_request_query.downloaded.unwrap_or(0), uploaded: announce_request_query.uploaded.unwrap_or(0), - peer_id: announce_request_query.peer_id, + peer_id, port: announce_request_query.port, left: announce_request_query.left.unwrap_or(0), event: announce_request_query.event, diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 81b17bd24..586c520f8 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -98,7 +98,7 @@ pub async fn handle_error(r: Rejection) -> std::result::Result, interval: u32) -> WebResult { let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: peer.peer_id.0.clone(), + peer_id: peer.peer_id.to_string(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port() }).collect(); diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index 9eed15d74..6630e5660 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -1,6 +1,6 @@ use std::net::{IpAddr, SocketAddr}; use serde::{Deserialize}; -use crate::InfoHash; +use crate::{InfoHash, PeerId}; use crate::torrust_http_tracker::Bytes; #[derive(Deserialize)] @@ -8,7 +8,6 @@ pub struct AnnounceRequestQuery { pub downloaded: Option, pub uploaded: Option, pub key: Option, - pub peer_id: String, pub port: u16, pub left: Option, pub event: Option, @@ -22,7 +21,7 @@ pub struct AnnounceRequest { pub forwarded_ip: Option, pub downloaded: Bytes, pub uploaded: Bytes, - pub peer_id: String, + pub peer_id: PeerId, pub port: u16, pub left: Bytes, pub event: Option, diff --git a/src/tracker.rs b/src/tracker.rs index 80190dac4..3a717f239 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -54,10 +54,8 @@ impl TorrentPeer { pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - let peer_id = String::from_utf8_lossy(&announce_request.peer_id.0).parse().unwrap_or("unknown".to_string()); - TorrentPeer { - peer_id: PeerId(peer_id), + peer_id: PeerId(announce_request.peer_id.0), peer_addr, updated: std::time::Instant::now(), uploaded: announce_request.bytes_uploaded, @@ -70,9 +68,6 @@ impl TorrentPeer { pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - let max_string_size = announce_request.peer_id.len().clamp(0, 40); - let peer_id = announce_request.peer_id[..max_string_size].to_string(); - let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { "started" => AnnounceEvent::Started, @@ -85,7 +80,7 @@ impl TorrentPeer { }; TorrentPeer { - peer_id: PeerId(peer_id), + peer_id: announce_request.peer_id.clone(), peer_addr, updated: std::time::Instant::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), From 18e7057fc02565a34711cc2c3f745c32e6b3ebad Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 23:06:13 +0100 Subject: [PATCH 0038/1003] Removed a unneeded function --- src/main.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/main.rs b/src/main.rs index f6f9fefb8..bd5ff3b40 100644 --- a/src/main.rs +++ b/src/main.rs @@ -121,20 +121,6 @@ fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - let http_tracker = HttpServer::new(tracker); - let ssl_bind_addr = config.ssl_bind_address.parse::().unwrap(); - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - - tokio::spawn(async move { - // run with tls if ssl_enabled and cert and key path are set - if ssl_cert_path.is_some() && ssl_key_path.is_some() { - } - }) -} - async fn start_udp_tracker_server(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let udp_server = UdpServer::new(tracker, config).await.unwrap_or_else(|e| { panic!("Could not start UDP server: {}", e); From 74649d4acb8a29c1641bd9fe5ec8947acaa9a69f Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 23:12:54 +0100 Subject: [PATCH 0039/1003] Trying to debug --- src/common.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/common.rs b/src/common.rs index 0037f058f..e36013db0 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,5 +1,6 @@ use serde::{Deserialize, Serialize}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use log::info; pub const MAX_SCRAPE_TORRENTS: u8 = 74; pub const AUTH_KEY_LENGTH: usize = 32; @@ -132,6 +133,7 @@ impl PeerId { pub fn to_string(&self) -> String { let mut buffer = [0u8; 20]; let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + info!("{:#?}", bytes_out); String::from(std::str::from_utf8(bytes_out).unwrap()) } } From d3e96881ff9f8e8f4ac2305b0286d6dc5af98466 Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 23:30:20 +0100 Subject: [PATCH 0040/1003] Solving a bug --- src/common.rs | 3 --- src/torrust_http_tracker/filters.rs | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/common.rs b/src/common.rs index e36013db0..92bd1d1e9 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,6 +1,5 @@ use serde::{Deserialize, Serialize}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use log::info; pub const MAX_SCRAPE_TORRENTS: u8 = 74; pub const AUTH_KEY_LENGTH: usize = 32; @@ -85,7 +84,6 @@ impl serde::ser::Serialize for InfoHash { let mut buffer = [0u8; 40]; let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) } } @@ -133,7 +131,6 @@ impl PeerId { pub fn to_string(&self) -> String { let mut buffer = [0u8; 20]; let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - info!("{:#?}", bytes_out); String::from(std::str::from_utf8(bytes_out).unwrap()) } } diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 0e2b364b5..1015b2b38 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -67,7 +67,7 @@ async fn peer_id(raw_query: String) -> WebResult { let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); // peer_id must be 20 bytes - if peer_id_bytes.len() > 20 { + if peer_id_bytes.len() != 20 { return Err(reject::custom(ServerError::InvalidPeerId)); } From f478f3b02d5e8e51fe6326b238fcb7e8d20e1c82 Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 15 Mar 2022 23:45:15 +0100 Subject: [PATCH 0041/1003] Trying to fix again a bug --- src/common.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/common.rs b/src/common.rs index 92bd1d1e9..4d2f5ec71 100644 --- a/src/common.rs +++ b/src/common.rs @@ -130,8 +130,12 @@ pub struct PeerId(pub [u8; 20]); impl PeerId { pub fn to_string(&self) -> String { let mut buffer = [0u8; 20]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - String::from(std::str::from_utf8(bytes_out).unwrap()) + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); + return if let Some(bytes_out) = bytes_out { + String::from(std::str::from_utf8(bytes_out).unwrap()) + } else { + "".to_string() + } } } From b0417a3d5cf7074874dd13a9e06dc6fa7c8383ca Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 16 Mar 2022 07:37:32 +0100 Subject: [PATCH 0042/1003] Prevent error when no peers with udp --- src/torrust_udp_tracker/handlers.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index 84cba3f45..ea9043697 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -85,7 +85,11 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc // get all peers excluding the client_addr let peers = match tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await { Some(v) => v, - None => { return Err(ServerError::NoPeersFound); } + None => { + // return Err(ServerError::NoPeersFound); + let return_data: Vec = Vec::new(); + return_data + } }; Ok(Response::from(AnnounceResponse { From f6eb8533909678c6f5547a9d1a95b26b81978c68 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 16 Mar 2022 09:20:17 +0100 Subject: [PATCH 0043/1003] First version of stats tracking, but needs improvements --- src/http_api_server.rs | 84 +++++++++++++++++++++++++++- src/torrust_http_tracker/filters.rs | 17 +++++- src/torrust_http_tracker/handlers.rs | 31 +++++++++- src/torrust_http_tracker/request.rs | 2 + src/torrust_udp_tracker/handlers.rs | 38 +++++++++++-- src/tracker.rs | 41 +++++++++++++- 6 files changed, 203 insertions(+), 10 deletions(-) diff --git a/src/http_api_server.rs b/src/http_api_server.rs index f87f460a4..a6bee4a14 100644 --- a/src/http_api_server.rs +++ b/src/http_api_server.rs @@ -23,6 +23,26 @@ struct Torrent<'a> { peers: Option>, } +#[derive(Serialize)] +struct Stats { + torrents: u32, + seeders: u32, + completed: u32, + leechers: u32, + tcp4_connections_handled: u32, + tcp4_announces_handled: u32, + tcp4_scrapes_handled: u32, + tcp6_connections_handled: u32, + tcp6_announces_handled: u32, + tcp6_scrapes_handled: u32, + udp4_connections_handled: u32, + udp4_announces_handled: u32, + udp4_scrapes_handled: u32, + udp6_connections_handled: u32, + udp6_announces_handled: u32, + udp6_scrapes_handled: u32, +} + #[derive(Serialize, Debug)] #[serde(tag = "status", rename_all = "snake_case")] enum ActionStatus<'a> { @@ -64,13 +84,13 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> Server + Clone + Send + Sync + 'static> { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list - let t1 = tracker.clone(); + let api_torrents = tracker.clone(); let view_torrent_list = filters::method::get() .and(filters::path::path("torrents")) .and(filters::path::end()) .and(filters::query::query()) .map(move |limits| { - let tracker = t1.clone(); + let tracker = api_torrents.clone(); (limits, tracker) }) .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| { @@ -99,6 +119,65 @@ pub fn build_server(tracker: Arc) -> Server| { + async move { + let mut results = Stats{ + torrents: 0, + seeders: 0, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0 + }; + let db = tracker.get_torrents().await; + let _: Vec<_> = db + .iter() + .map(|(_info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }) + .collect(); + let stats = tracker.get_stats().await; + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + } + }); + // GET /api/torrent/:info_hash // View torrent info let t2 = tracker.clone(); @@ -219,6 +298,7 @@ pub fn build_server(tracker: Arc) -> Server impl Filter + Clone { warp::any() .and(with_info_hash()) + .and(warp::addr::remote()) + .and(warp::header::optional::("X-Forwarded-For")) .and_then(scrape_request) } /// Parse ScrapeRequest from InfoHash -async fn scrape_request(info_hashes: Vec) -> WebResult { +async fn scrape_request(info_hashes: Vec, remote_addr: Option, forwarded_for: Option) -> WebResult { + if remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } + + // get first forwarded ip + let forwarded_ip = match forwarded_for { + None => None, + Some(forwarded_for_str) => { + forwarded_for_str.split(",").next() + .and_then(|ip_str| IpAddr::from_str(ip_str).ok()) + } + }; + Ok(ScrapeRequest { info_hashes, + remote_addr: remote_addr.unwrap(), + forwarded_ip, }) } diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 586c520f8..f410a7403 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -29,7 +29,7 @@ pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, trac } /// Handle announce request -pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc,) -> WebResult { +pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc) -> WebResult { if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { return Err(reject::custom(e)) } @@ -52,12 +52,23 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } // success response + let tracker_copy = tracker.clone(); + tokio::spawn(async move { + let mut status_writer = tracker_copy.set_stats().await; + if peer_ip.is_ipv4() { + status_writer.tcp4_connections_handled += 1; + status_writer.tcp4_announces_handled += 1; + } else { + status_writer.tcp6_connections_handled += 1; + status_writer.tcp6_announces_handled += 1; + } + }); let announce_interval = tracker.config.announce_interval; send_announce_response(&announce_request, torrent_stats, peers.unwrap(), announce_interval) } /// Handle scrape request -pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc,) -> WebResult { +pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc) -> WebResult { let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; @@ -80,6 +91,22 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option scrape_request.forwarded_ip.unwrap(), + false => scrape_request.remote_addr.ip() + }; + + let tracker_copy = tracker.clone(); + tokio::spawn(async move { + let mut status_writer = tracker_copy.set_stats().await; + if ip.is_ipv4() { + status_writer.tcp4_connections_handled += 1; + status_writer.tcp4_scrapes_handled += 1; + } else { + status_writer.tcp6_connections_handled += 1; + status_writer.tcp6_scrapes_handled += 1; + } + }); send_scrape_response(files) } diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index 6630e5660..ac0e9602f 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -30,4 +30,6 @@ pub struct AnnounceRequest { pub struct ScrapeRequest { pub info_hashes: Vec, + pub forwarded_ip: Option, + pub remote_addr: SocketAddr, } diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index ea9043697..d51dd7995 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -52,18 +52,18 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: &[u8], tracker: Arc pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: Arc) -> Result { match request { Request::Connect(connect_request) => { - handle_connect(remote_addr, &connect_request).await + handle_connect(remote_addr, &connect_request, tracker).await } Request::Announce(announce_request) => { handle_announce(remote_addr, &announce_request, tracker).await } Request::Scrape(scrape_request) => { - handle_scrape(&scrape_request, tracker).await + handle_scrape(remote_addr, &scrape_request, tracker).await } } } -pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest) -> Result { +pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: Arc) -> Result { let connection_id = get_connection_id(&remote_addr); let response = Response::from(ConnectResponse { @@ -71,6 +71,16 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest) - connection_id, }); + let tracker_copy = tracker.clone(); + tokio::spawn(async move { + let mut status_writer = tracker_copy.set_stats().await; + if remote_addr.is_ipv4() { + status_writer.udp4_connections_handled += 1; + } else { + status_writer.udp6_connections_handled += 1; + } + }); + Ok(response) } @@ -92,6 +102,16 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc } }; + let tracker_copy = tracker.clone(); + tokio::spawn(async move { + let mut status_writer = tracker_copy.set_stats().await; + if remote_addr.is_ipv4() { + status_writer.udp4_announces_handled += 1; + } else { + status_writer.udp6_announces_handled += 1; + } + }); + Ok(Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), @@ -105,7 +125,7 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc })) } -pub async fn handle_scrape(request: &ScrapeRequest, tracker: Arc) -> Result { +pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc) -> Result { let db = tracker.get_torrents().await; let mut torrent_stats: Vec = Vec::new(); @@ -137,6 +157,16 @@ pub async fn handle_scrape(request: &ScrapeRequest, tracker: Arc torrent_stats.push(scrape_entry); } + let tracker_copy = tracker.clone(); + tokio::spawn(async move { + let mut status_writer = tracker_copy.set_stats().await; + if remote_addr.is_ipv4() { + status_writer.udp4_scrapes_handled += 1; + } else { + status_writer.udp6_scrapes_handled += 1; + } + }); + Ok(Response::from(ScrapeResponse { transaction_id: request.transaction_id, torrent_stats diff --git a/src/tracker.rs b/src/tracker.rs index 3a717f239..e25e2d803 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use serde; use std::borrow::Cow; use std::collections::BTreeMap; -use tokio::sync::RwLock; +use tokio::sync::{RwLock, RwLockWriteGuard}; use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; use std::net::{IpAddr, SocketAddr}; use crate::{Configuration, key_manager, MAX_SCRAPE_TORRENTS}; @@ -236,10 +236,27 @@ pub enum TorrentError { InvalidInfoHash, } +#[derive(Debug)] +pub struct TrackerStats { + pub tcp4_connections_handled: u64, + pub tcp4_announces_handled: u64, + pub tcp4_scrapes_handled: u64, + pub tcp6_connections_handled: u64, + pub tcp6_announces_handled: u64, + pub tcp6_scrapes_handled: u64, + pub udp4_connections_handled: u64, + pub udp4_announces_handled: u64, + pub udp4_scrapes_handled: u64, + pub udp6_connections_handled: u64, + pub udp6_announces_handled: u64, + pub udp6_scrapes_handled: u64, +} + pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, database: SqliteDatabase, + stats: tokio::sync::RwLock, } impl TorrentTracker { @@ -252,6 +269,20 @@ impl TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), database, + stats: RwLock::new(TrackerStats { + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }), } } @@ -400,6 +431,14 @@ impl TorrentTracker { self.torrents.read().await } + pub async fn set_stats(&self) -> RwLockWriteGuard<'_, TrackerStats> { + self.stats.write().await + } + + pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, TrackerStats> { + self.stats.read().await + } + // remove torrents without peers pub async fn cleanup_torrents(&self) { debug!("Cleaning torrents.."); From 25e9476263b196377cc8a526577655056bdbbff4 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 16 Mar 2022 16:15:11 +0100 Subject: [PATCH 0044/1003] refactor: replace error message on empty peers with empty vec --- src/torrust_http_tracker/handlers.rs | 4 ++-- src/torrust_udp_tracker/handlers.rs | 12 ++++-------- src/torrust_udp_tracker/server.rs | 1 + src/tracker.rs | 8 +++----- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index f410a7403..e56fe1bd0 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -49,7 +49,7 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; // get all peers excluding the client_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } + //if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } // success response let tracker_copy = tracker.clone(); @@ -64,7 +64,7 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option } }); let announce_interval = tracker.config.announce_interval; - send_announce_response(&announce_request, torrent_stats, peers.unwrap(), announce_interval) + send_announce_response(&announce_request, torrent_stats, peers, announce_interval) } /// Handle scrape request diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index d51dd7995..6f1a6f685 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -91,16 +91,12 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip()); + //let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; + let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; + // get all peers excluding the client_addr - let peers = match tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await { - Some(v) => v, - None => { - // return Err(ServerError::NoPeersFound); - let return_data: Vec = Vec::new(); - return_data - } - }; + let peers = tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await; let tracker_copy = tracker.clone(); tokio::spawn(async move { diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 0e24fd81c..9e001347b 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -28,6 +28,7 @@ impl UdpServer { if let Ok((valid_bytes, remote_addr)) = self.socket.recv_from(&mut data).await { let data = &data[..valid_bytes]; debug!("Received {} bytes from {}", data.len(), remote_addr); + debug!("{:?}", data); let response = handle_packet(remote_addr, data, self.tracker.clone()).await; self.send_response(remote_addr, response).await; } diff --git a/src/tracker.rs b/src/tracker.rs index e25e2d803..f6a162782 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -376,14 +376,12 @@ impl TorrentTracker { &self, info_hash: &InfoHash, peer_addr: &SocketAddr - ) -> Option> { + ) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { - None => { - None - } + None => vec![], Some(entry) => { - Some(entry.get_peers(Some(peer_addr))) + entry.get_peers(Some(peer_addr)) } } } From 5a07d89a09ad49220316f3976f102a2eacd27f43 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 16 Mar 2022 16:51:09 +0100 Subject: [PATCH 0045/1003] refactor: updated to newest aquatic_udp_protocol --- Cargo.lock | 10 ++++-- Cargo.toml | 3 +- src/torrust_udp_tracker/handlers.rs | 52 +++++++++++++++++++++-------- src/torrust_udp_tracker/server.rs | 9 ++--- 4 files changed, 50 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8ba818230..f56a7f003 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,10 +14,10 @@ dependencies = [ [[package]] name = "aquatic_udp_protocol" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c3026324bc5073042edfbc47699cc60c1a9cf24849f5b104c18f98d2ad4175" +source = "git+https://github.com/greatest-ape/aquatic#065e007ede84de20f20983b4b504471bbda2fdf2" dependencies = [ "byteorder", + "either", ] [[package]] @@ -263,6 +263,12 @@ dependencies = [ "generic-array 0.14.4", ] +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + [[package]] name = "enum-as-inner" version = "0.3.3" diff --git a/Cargo.toml b/Cargo.toml index 01e3dbb76..77e478988 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,5 +32,4 @@ env_logger = "0.9.0" config = "0.11" derive_more = "0.99" thiserror = "1.0" - -aquatic_udp_protocol = "0.1.0" +aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index 6f1a6f685..0aeb804d7 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentError, TorrentPeer, TorrentTracker}; @@ -108,17 +108,43 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc } }); - Ok(Response::from(AnnounceResponse { - transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), - peers: peers.iter().map(|peer| - ResponsePeer { - ip_address: peer.peer_addr.ip(), - port: Port(peer.peer_addr.port()) - }).collect() - })) + let announce_response = if remote_addr.is_ipv4() { + Response::from(AnnounceResponse { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), + leechers: NumberOfPeers(torrent_stats.leechers as i32), + seeders: NumberOfPeers(torrent_stats.seeders as i32), + peers: peers.iter() + .filter_map(|peer| if let IpAddr::V4(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip, + port: Port(peer.peer_addr.port()) + }) + } else { + None + } + ).collect() + }) + } else { + Response::from(AnnounceResponse { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), + leechers: NumberOfPeers(torrent_stats.leechers as i32), + seeders: NumberOfPeers(torrent_stats.seeders as i32), + peers: peers.iter() + .filter_map(|peer| if let IpAddr::V6(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip, + port: Port(peer.peer_addr.port()) + }) + } else { + None + } + ).collect() + }) + }; + + Ok(announce_response) } pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc) -> Result { @@ -171,5 +197,5 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { let message = e.to_string(); - Response::from(ErrorResponse { transaction_id, message }) + Response::from(ErrorResponse { transaction_id, message: message.into() }) } diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 9e001347b..2f472ef54 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -1,7 +1,7 @@ use std::io::Cursor; use std::net::{SocketAddr}; use std::sync::Arc; -use aquatic_udp_protocol::{IpVersion, Response}; +use aquatic_udp_protocol::{Response}; use log::debug; use tokio::net::UdpSocket; use crate::{TorrentTracker, UdpTrackerConfig}; @@ -41,12 +41,7 @@ impl UdpServer { let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); - let ip_version = match remote_addr { - SocketAddr::V4(_) => IpVersion::IPv4, - SocketAddr::V6(_) => IpVersion::IPv6 - }; - - match response.write(&mut cursor, ip_version) { + match response.write(&mut cursor) { Ok(_) => { let position = cursor.position() as usize; let inner = cursor.get_ref(); From 9d6db191de7a7646d7e974dd2c6047081c559119 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 16 Mar 2022 17:10:54 +0100 Subject: [PATCH 0046/1003] fix: no more panic on http scrape on_reverse_proxy but no forwarded_ip --- src/torrust_http_tracker/handlers.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index e56fe1bd0..0d7277226 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -91,6 +91,10 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option scrape_request.forwarded_ip.unwrap(), false => scrape_request.remote_addr.ip() From b34f56405824ee85bfbef9c942214934adf4979e Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 16 Mar 2022 17:36:27 +0100 Subject: [PATCH 0047/1003] fix: now able to get ipv6 peers from ipv6 remote_addr --- src/tracker.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/tracker.rs b/src/tracker.rs index f6a162782..af7aa3455 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -146,7 +146,17 @@ impl TorrentEntry { for (_, peer) in self .peers .iter() - .filter(|e| e.1.peer_addr.is_ipv4()) + .filter(|e| match remote_addr { + // don't filter on ip_version + None => true, + // filter out different ip_version from remote_addr + Some(remote_address) => { + match e.1.peer_addr.ip() { + IpAddr::V4(_) => { remote_address.is_ipv4() } + IpAddr::V6(_) => { remote_address.is_ipv6() } + } + } + }) .take(MAX_SCRAPE_TORRENTS as usize) { From ab605455ddb0861d4059dc60e4f37c384e5b3527 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 16 Mar 2022 21:36:51 +0100 Subject: [PATCH 0048/1003] refactor: refactored persistent torrent loading/saving --- src/database.rs | 38 ++++++++++++++++---------------------- src/main.rs | 39 ++++++++++++++++++--------------------- src/tracker.rs | 36 ++++++++++++++++++++++-------------- 3 files changed, 56 insertions(+), 57 deletions(-) diff --git a/src/database.rs b/src/database.rs index 827c7dbed..82d26d6d8 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,11 +1,11 @@ -use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentTracker}; +use std::collections::BTreeMap; +use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry}; use log::debug; use r2d2_sqlite::{SqliteConnectionManager, rusqlite}; use r2d2::{Pool}; use r2d2_sqlite::rusqlite::NO_PARAMS; use crate::key_manager::AuthKey; use std::str::FromStr; -use std::sync::Arc; pub struct SqliteDatabase { pool: Pool @@ -73,40 +73,34 @@ impl SqliteDatabase { } } - pub async fn load_persistent_torrent_data(&self, tracker: Arc) -> Result { - let tracker_copy = tracker.clone(); + pub async fn load_persistent_torrent_data(&self) -> Result, rusqlite::Error> { let conn = self.pool.get().unwrap(); let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; - let info_hash_iter = stmt.query_map(NO_PARAMS, |row| { - let info_hash: String = row.get(0)?; - let info_hash_converted = InfoHash::from_str(&info_hash).unwrap(); + let torrent_iter = stmt.query_map(NO_PARAMS, |row| { + let info_hash_string: String = row.get(0)?; + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); let completed: u32 = row.get(1)?; - Ok((info_hash_converted, completed)) + Ok((info_hash, completed)) })?; - for info_hash_item in info_hash_iter { - let (info_hash, completed): (InfoHash, u32) = info_hash_item.unwrap(); - tracker_copy.add_torrent(&info_hash, 0u32, completed, 0u32).await; - } + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok() ).collect(); - Ok(true) + Ok(torrents) } - pub async fn save_persistent_torrent_data(&self, tracker: Arc) -> Result { - let tracker_copy = tracker.clone(); + pub async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), rusqlite::Error> { let mut conn = self.pool.get().unwrap(); - let db = tracker_copy.get_torrents().await; let db_transaction = conn.transaction()?; - let _: Vec<_> = db - .iter() - .map(|(info_hash, torrent_entry)| { + + for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); - }) - .collect(); + } + let _ = db_transaction.commit(); - Ok(true) + + Ok(()) } pub async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { diff --git a/src/main.rs b/src/main.rs index bd5ff3b40..5829d3edc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -17,11 +17,17 @@ async fn main() { logging::setup_logging(&config); // the singleton torrent tracker that gets passed to the HTTP and UDP server - let tracker = Arc::new(TorrentTracker::new(config.clone())); + let tracker = Arc::new(TorrentTracker::new(config.clone()).unwrap_or_else(|e| { + panic!("{}", e) + })); - // Load torrents if enabled + // load persistent torrents if enabled if config.persistence { - load_torrents_into_memory(tracker.clone()).await; + info!("Loading persistent torrents into memory..."); + if tracker.load_torrents().await.is_err() { + panic!("Could not load persistent torrents.") + }; + info!("Persistent torrents loaded."); } // start torrent cleanup job (periodically removes old peers) @@ -48,27 +54,18 @@ async fn main() { // handle the signals here let ctrl_c = tokio::signal::ctrl_c(); tokio::select! { - _ = ctrl_c => { info!("Torrust shutting down..") } - } - - // Save torrents if enabled - if config.persistence { - save_torrents_into_memory(tracker.clone()).await; + _ = ctrl_c => { + info!("Torrust shutting down.."); + // Save torrents if enabled + if config.persistence { + info!("Saving torrents into SQL from memory..."); + let _ = tracker.save_torrents().await; + info!("Torrents saved"); + } + } } } -async fn load_torrents_into_memory(tracker: Arc) { - info!("Loading torrents from SQL into memory..."); - let _ = tracker.load_torrents(tracker.clone()).await; - info!("Torrents loaded"); -} - -async fn save_torrents_into_memory(tracker: Arc) { - info!("Saving torrents into SQL from memory..."); - let _ = tracker.save_torrents(tracker.clone()).await; - info!("Torrents saved"); -} - fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> Option> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.cleanup_interval.unwrap_or(600); diff --git a/src/tracker.rs b/src/tracker.rs index af7aa3455..58ddd70ab 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -10,7 +10,7 @@ use std::collections::btree_map::Entry; use crate::database::SqliteDatabase; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use log::debug; +use log::{debug}; use crate::key_manager::AuthKey; use r2d2_sqlite::rusqlite; use crate::torrust_http_tracker::AnnounceRequest; @@ -270,12 +270,10 @@ pub struct TorrentTracker { } impl TorrentTracker { - pub fn new(config: Arc) -> TorrentTracker { - let database = SqliteDatabase::new(&config.db_path).unwrap_or_else(|error| { - panic!("Could not create SQLite database. Reason: {}", error) - }); + pub fn new(config: Arc) -> Result { + let database = SqliteDatabase::new(&config.db_path)?; - TorrentTracker { + Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), database, @@ -293,7 +291,7 @@ impl TorrentTracker { udp6_announces_handled: 0, udp6_scrapes_handled: 0, }), - } + }) } fn is_public(&self) -> bool { @@ -355,13 +353,20 @@ impl TorrentTracker { } // Loading the torrents into memory - pub async fn load_torrents(&self, tracker: Arc) -> Result { - self.database.load_persistent_torrent_data(tracker).await + pub async fn load_torrents(&self) -> Result<(), rusqlite::Error> { + let torrents = self.database.load_persistent_torrent_data().await?; + + for torrent in torrents { + self.add_torrent(torrent.0, 0, torrent.1, 0).await; + } + + Ok(()) } // Saving the torrents from memory - pub async fn save_torrents(&self, tracker: Arc) -> Result { - self.database.save_persistent_torrent_data(tracker).await + pub async fn save_torrents(&self) -> Result<(), rusqlite::Error> { + let torrents = self.torrents.read().await; + self.database.save_persistent_torrent_data(&*torrents).await } // Adding torrents is not relevant to public trackers. @@ -419,12 +424,15 @@ impl TorrentTracker { } } - pub async fn add_torrent(&self, info_hash: &InfoHash, seeders: u32, completed: u32, leechers: u32) -> TorrentStats { + pub async fn add_torrent(&self, info_hash: InfoHash, seeders: u32, completed: u32, leechers: u32) -> TorrentStats { let mut torrents = self.torrents.write().await; if !torrents.contains_key(&info_hash) { - let mut torrent_entry = TorrentEntry::new(); - torrent_entry.completed = completed; + let torrent_entry = TorrentEntry { + peers: Default::default(), + completed, + seeders + }; torrents.insert(info_hash.clone(), torrent_entry); } From 0e0df0e45d3a80a2e5d5a684cbfca527b9bb150f Mon Sep 17 00:00:00 2001 From: Power2All Date: Thu, 17 Mar 2022 11:10:30 +0100 Subject: [PATCH 0049/1003] Conflict fixed, added min interval to response, and a proper exiting of tokio spawn --- Cargo.lock | 472 +-------------------------- Cargo.toml | 2 - src/config.rs | 2 + src/main.rs | 12 +- src/torrust_http_tracker/handlers.rs | 5 +- src/torrust_http_tracker/response.rs | 3 + src/torrust_http_tracker/server.rs | 18 +- src/utils.rs | 27 ++ 8 files changed, 71 insertions(+), 470 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f56a7f003..7017da294 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,40 +26,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" -[[package]] -name = "async-trait" -version = "0.1.51" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "attohttpc" -version = "0.16.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" -dependencies = [ - "http", - "log", - "url", - "wildmatch", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.0.1" @@ -201,22 +167,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" -[[package]] -name = "core-foundation" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" - [[package]] name = "cpufeatures" version = "0.2.1" @@ -226,12 +176,6 @@ dependencies = [ "libc", ] -[[package]] -name = "data-encoding" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" - [[package]] name = "derive_more" version = "0.99.17" @@ -269,47 +213,6 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" -[[package]] -name = "enum-as-inner" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "env_logger" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "external-ip" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2406194c5c4be3678bd7c1c128237ec589a6a3b7a3b05786971998bda7693c27" -dependencies = [ - "futures", - "http", - "hyper", - "hyper-tls", - "igd", - "log", - "rand 0.8.4", - "trust-dns-resolver", -] - [[package]] name = "fake-simd" version = "0.1.2" @@ -343,21 +246,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.0.0" @@ -370,13 +258,12 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" +checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" dependencies = [ "futures-channel", "futures-core", - "futures-executor", "futures-io", "futures-sink", "futures-task", @@ -385,9 +272,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -395,71 +282,39 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" - -[[package]] -name = "futures-executor" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] +checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" [[package]] name = "futures-io" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" - -[[package]] -name = "futures-macro" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] +checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] name = "futures-sink" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] name = "futures-task" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" -dependencies = [ - "once_cell", -] +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-util" -version = "0.3.12" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ - "futures-channel", "futures-core", - "futures-io", - "futures-macro", "futures-sink", "futures-task", - "memchr", "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] @@ -554,15 +409,6 @@ dependencies = [ "http", ] -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "hermit-abi" version = "0.1.18" @@ -578,17 +424,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - [[package]] name = "http" version = "0.2.3" @@ -622,12 +457,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - [[package]] name = "hyper" version = "0.14.2" @@ -652,19 +481,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "idna" version = "0.2.3" @@ -676,19 +492,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "igd" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4e7ee8b51e541486d7040883fe1f00e2a9954bcc24fd155b7e4f03ed4b93dd" -dependencies = [ - "attohttpc", - "log", - "rand 0.8.4", - "url", - "xmltree", -] - [[package]] name = "indexmap" version = "1.6.1" @@ -717,24 +520,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "ipconfig" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" -dependencies = [ - "socket2", - "widestring", - "winapi", - "winreg", -] - -[[package]] -name = "ipnet" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" - [[package]] name = "itoa" version = "0.4.7" @@ -818,12 +603,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matches" version = "0.1.8" @@ -893,24 +672,6 @@ dependencies = [ "twoway", ] -[[package]] -name = "native-tls" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "nom" version = "5.1.2" @@ -987,39 +748,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" -dependencies = [ - "bitflags", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-sys", -] - -[[package]] -name = "openssl-probe" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" - -[[package]] -name = "openssl-sys" -version = "0.9.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996d2d305e561b70d1ee0c53f1542833f4e1ac6ce9a6708b6ff2738ca67dc82" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "parking_lot" version = "0.11.1" @@ -1095,18 +823,6 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" version = "1.0.24" @@ -1268,16 +984,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "resolv-conf" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error", -] - [[package]] name = "ring" version = "0.16.20" @@ -1349,16 +1055,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" -[[package]] -name = "schannel" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi", -] - [[package]] name = "scheduled-thread-pool" version = "0.2.5" @@ -1390,29 +1086,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "security-framework" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19133a286e494cc3311c165c4676ccb1fd47bed45b55f9d71fbd784ad4cea6f8" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" version = "1.0.4" @@ -1593,15 +1266,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "termcolor" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" -dependencies = [ - "winapi-util", -] - [[package]] name = "thiserror" version = "1.0.26" @@ -1678,16 +1342,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.22.0" @@ -1756,8 +1410,6 @@ dependencies = [ "chrono", "config", "derive_more", - "env_logger", - "external-ip", "fern", "hex", "log", @@ -1802,51 +1454,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "trust-dns-proto" -version = "0.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" -dependencies = [ - "async-trait", - "cfg-if 1.0.0", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna", - "ipnet", - "lazy_static", - "log", - "rand 0.8.4", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" -dependencies = [ - "cfg-if 1.0.0", - "futures-util", - "ipconfig", - "lazy_static", - "log", - "lru-cache", - "parking_lot", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "trust-dns-proto", -] - [[package]] name = "try-lock" version = "0.2.3" @@ -1911,12 +1518,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" - [[package]] name = "unicode-xid" version = "0.2.1" @@ -2085,18 +1686,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "widestring" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" - -[[package]] -name = "wildmatch" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f44b95f62d34113cf558c93511ac93027e03e9c29a60dd0fd70e6e025c7270a" - [[package]] name = "winapi" version = "0.3.9" @@ -2113,45 +1702,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "winreg" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -dependencies = [ - "winapi", -] - -[[package]] -name = "xml-rs" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" - -[[package]] -name = "xmltree" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" -dependencies = [ - "xml-rs", -] - [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/Cargo.toml b/Cargo.toml index 77e478988..f91dba1cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,11 +24,9 @@ log = {version = "0.4", features = ["release_max_level_info"]} fern = "0.6" chrono = "0.4" byteorder = "1" -external-ip = "4.1.0" r2d2_sqlite = "0.16.0" r2d2 = "0.8.8" rand = "0.8.4" -env_logger = "0.9.0" config = "0.11" derive_more = "0.99" thiserror = "1.0" diff --git a/src/config.rs b/src/config.rs index 3ec55f857..94b37464d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -56,6 +56,7 @@ pub struct Configuration { pub cleanup_peerless: bool, pub external_ip: Option, pub announce_interval: u32, + pub announce_interval_min: u32, pub peer_timeout: u32, pub on_reverse_proxy: bool, pub udp_trackers: Vec, @@ -137,6 +138,7 @@ impl Configuration { cleanup_peerless: true, external_ip: Some(String::from("0.0.0.0")), announce_interval: 120, + announce_interval_min: 120, peer_timeout: 900, on_reverse_proxy: false, udp_trackers: Vec::new(), diff --git a/src/main.rs b/src/main.rs index 5829d3edc..fbd64abff 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,8 +1,8 @@ use std::net::SocketAddr; -use log::{info}; -use torrust_tracker::{http_api_server, Configuration, TorrentTracker, UdpServer, HttpTrackerConfig, UdpTrackerConfig, HttpApiConfig, logging}; use std::sync::Arc; +use log::info; use tokio::task::JoinHandle; +use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; #[tokio::main] @@ -83,7 +83,7 @@ fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> JoinHandle<()> { @@ -92,7 +92,11 @@ fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> Joi tokio::spawn(async move { let server = http_api_server::build_server(tracker); - server.bind(bind_addr).await; + let _ = server.bind_with_graceful_shutdown(bind_addr, async move { + tokio::signal::ctrl_c() + .await + .expect("failed to listen to shutdown signal"); + }); }) } diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 0d7277226..2fe216196 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -64,7 +64,7 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option } }); let announce_interval = tracker.config.announce_interval; - send_announce_response(&announce_request, torrent_stats, peers, announce_interval) + send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) } /// Handle scrape request @@ -127,7 +127,7 @@ pub async fn handle_error(r: Rejection) -> std::result::Result, interval: u32) -> WebResult { +fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32, interval_min: u32) -> WebResult { let http_peers: Vec = peers.iter().map(|peer| Peer { peer_id: peer.peer_id.to_string(), ip: peer.peer_addr.ip(), @@ -136,6 +136,7 @@ fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: Tor let res = AnnounceResponse { interval, + interval_min, complete: torrent_stats.seeders, incomplete: torrent_stats.leechers, peers: http_peers diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs index a80ee27c5..dc6614dd4 100644 --- a/src/torrust_http_tracker/response.rs +++ b/src/torrust_http_tracker/response.rs @@ -14,6 +14,7 @@ pub struct Peer { #[derive(Serialize)] pub struct AnnounceResponse { pub interval: u32, + pub interval_min: u32, //pub tracker_id: String, pub complete: u32, pub incomplete: u32, @@ -45,6 +46,8 @@ impl AnnounceResponse { let mut bytes: Vec = Vec::new(); bytes.write(b"d8:intervali")?; bytes.write(&self.interval.to_string().as_bytes())?; + bytes.write(b"d12:min intervali")?; + bytes.write(&self.interval.to_string().as_bytes())?; bytes.write(b"e8:completei")?; bytes.write(&self.complete.to_string().as_bytes())?; bytes.write(b"e10:incompletei")?; diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs index 90f8a84d0..69811b3d9 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/torrust_http_tracker/server.rs @@ -18,16 +18,26 @@ impl HttpServer { /// Start the HttpServer pub async fn start(&self, socket_addr: SocketAddr) { - warp::serve(routes(self.tracker.clone())) - .run(socket_addr).await; + let (_addr, server) = warp::serve(routes(self.tracker.clone())) + .bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c() + .await + .expect("failed to listen to shutdown signal"); + }); + tokio::task::spawn(server); } /// Start the HttpServer in TLS mode pub async fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: &str, ssl_key_path: &str) { - warp::serve(routes(self.tracker.clone())) + let (_addr, server) = warp::serve(routes(self.tracker.clone())) .tls() .cert_path(ssl_cert_path) .key_path(ssl_key_path) - .run(socket_addr).await; + .bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c() + .await + .expect("failed to listen to shutdown signal"); + }); + tokio::task::spawn(server); } } diff --git a/src/utils.rs b/src/utils.rs index 5790e6067..e3a8302df 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -2,7 +2,9 @@ use std::net::SocketAddr; use std::time::SystemTime; use std::error::Error; use std::fmt::Write; +use std::io::Cursor; use aquatic_udp_protocol::ConnectionId; +use byteorder::{BigEndian, ReadBytesExt}; pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) { @@ -29,3 +31,28 @@ pub fn url_encode_bytes(content: &[u8]) -> Result> { Ok(out) } + +// Function that will convert a small or big number into the smallest form of a byte array. +pub async fn convert_int_to_bytes(number: &u64) -> Vec { + let mut return_data: Vec = Vec::new(); + // return_data.extend(number.to_be_bytes().reverse()); + for i in 1..8 { + if number < &256u64.pow(i) { + let start: usize = 16usize - i as usize; + return_data.extend(number.to_be_bytes()[start..8].iter()); + return return_data; + } + } + return return_data; +} + +pub async fn convert_bytes_to_int(array: &Vec) -> u64 { + let mut array_fixed: Vec = Vec::new(); + let size = 8 - array.len(); + for _ in 0..size { + array_fixed.push(0); + } + array_fixed.extend(array); + let mut rdr = Cursor::new(array_fixed); + return rdr.read_u64::().unwrap(); +} From 37fe6fdc6138f55125e5677990ac9a0f4f376013 Mon Sep 17 00:00:00 2001 From: Power2All Date: Thu, 17 Mar 2022 11:34:13 +0100 Subject: [PATCH 0050/1003] Updated README to reflect changes --- README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 9a85dddcd..77055a335 100644 --- a/README.md +++ b/README.md @@ -6,15 +6,13 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor ### Features -* [X] UDP server -* [X] HTTP and/or HTTPS (SSL) server -* [X] Multiple UDP and HTTP(S) blocks for socket binding possible +* [X] Multiple UDP server and HTTP(S) server blocks for socket binding possible * [X] Full IPv4 and IPv6 support for both UDP and HTTP(S) * [X] Private & Whitelisted mode * [X] Built-in API * [X] Torrent whitelisting * [X] Peer authentication using time-bound keys -* [ ] NewTrackOn check supported +* [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count ### Implemented BEPs From 65834bd98171ee4f5fdf234523a76e92f8e306c3 Mon Sep 17 00:00:00 2001 From: Power2All Date: Thu, 17 Mar 2022 11:46:53 +0100 Subject: [PATCH 0051/1003] Version bump to 2.2.0 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7017da294..5994554bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1402,7 +1402,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.1.2" +version = "2.2.0" dependencies = [ "aquatic_udp_protocol", "binascii", diff --git a/Cargo.toml b/Cargo.toml index f91dba1cb..4570bd8f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "torrust-tracker" -version = "2.1.2" +version = "2.2.0" license = "AGPL-3.0" authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." From cd0473c31050b0686b2a3511efac929cbc921544 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 17 Mar 2022 02:24:54 +0100 Subject: [PATCH 0052/1003] refactor: optimized http (reverse proxy) ip determination --- src/torrust_http_tracker/filters.rs | 72 +++++++++++++++------------- src/torrust_http_tracker/handlers.rs | 58 +++++++++------------- src/torrust_http_tracker/request.rs | 8 ++-- src/torrust_http_tracker/routes.rs | 10 ++-- src/torrust_udp_tracker/handlers.rs | 1 + src/tracker.rs | 6 +-- 6 files changed, 75 insertions(+), 80 deletions(-) diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 1e646ac35..9a79f228e 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -2,6 +2,7 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; +use log::debug; use warp::{Filter, reject, Rejection}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; use crate::key_manager::AuthKey; @@ -98,31 +99,51 @@ pub fn with_auth_key() -> impl Filter,), Error = Infa }) } +/// Check for PeerAddress +pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { + warp::addr::remote() + .and(warp::header::optional::("X-Forwarded-For")) + .map(move |remote_addr: Option, x_forwarded_for: Option| { + (on_reverse_proxy, remote_addr, x_forwarded_for) + }) + .and_then(peer_addr) +} + +/// Get PeerAddress from RemoteAddress or Forwarded +async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { + if !on_reverse_proxy && remote_addr.is_none() { + return Err(reject::custom(ServerError::AddressNotFound)) + } + + if on_reverse_proxy && x_forwarded_for.is_none() { + return Err(reject::custom(ServerError::AddressNotFound)) + } + + match on_reverse_proxy { + true => { + IpAddr::from_str(x_forwarded_for.as_ref().unwrap()).or_else(|e| { + debug!("{}", e); + Err(reject::custom(ServerError::AddressNotFound)) + }) + }, + false => Ok(remote_addr.unwrap().ip()) + } +} + /// Check for AnnounceRequest -pub fn with_announce_request() -> impl Filter + Clone { +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) - .and(warp::addr::remote()) - .and(warp::header::optional::("X-Forwarded-For")) + .and(with_peer_addr(on_reverse_proxy)) .and_then(announce_request) } /// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, peer_id: PeerId, remote_addr: Option, forwarded_for: Option) -> WebResult { - // get first forwarded ip - let forwarded_ip = match forwarded_for { - None => None, - Some(forwarded_for_str) => { - forwarded_for_str.split(",").next() - .and_then(|ip_str| IpAddr::from_str(ip_str).ok()) - } - }; - +async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, peer_id: PeerId, peer_addr: IpAddr) -> WebResult { Ok(AnnounceRequest { info_hash: info_hashes[0], - peer_addr: remote_addr.unwrap(), - forwarded_ip, + peer_addr, downloaded: announce_request_query.downloaded.unwrap_or(0), uploaded: announce_request_query.uploaded.unwrap_or(0), peer_id, @@ -134,30 +155,17 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has } /// Check for ScrapeRequest -pub fn with_scrape_request() -> impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) - .and(warp::addr::remote()) - .and(warp::header::optional::("X-Forwarded-For")) + .and(with_peer_addr(on_reverse_proxy)) .and_then(scrape_request) } /// Parse ScrapeRequest from InfoHash -async fn scrape_request(info_hashes: Vec, remote_addr: Option, forwarded_for: Option) -> WebResult { - if remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)) } - - // get first forwarded ip - let forwarded_ip = match forwarded_for { - None => None, - Some(forwarded_for_str) => { - forwarded_for_str.split(",").next() - .and_then(|ip_str| IpAddr::from_str(ip_str).ok()) - } - }; - +async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { Ok(ScrapeRequest { info_hashes, - remote_addr: remote_addr.unwrap(), - forwarded_ip, + peer_addr, }) } diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 2fe216196..f15b7143a 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -36,26 +36,19 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option debug!("{:?}", announce_request); - if tracker.config.on_reverse_proxy && announce_request.forwarded_ip.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)) - } - - let peer_ip = match tracker.config.on_reverse_proxy { - true => announce_request.forwarded_ip.unwrap(), - false => announce_request.peer_addr.ip() - }; - - let peer = TorrentPeer::from_http_announce_request(&announce_request, peer_ip, tracker.config.get_ext_ip()); + let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; - // get all peers excluding the client_addr + + // get all torrent peers excluding the peer_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - //if peers.is_none() { return Err(reject::custom(ServerError::NoPeersFound)) } // success response let tracker_copy = tracker.clone(); + let is_ipv4 = announce_request.peer_addr.is_ipv4(); + tokio::spawn(async move { let mut status_writer = tracker_copy.set_stats().await; - if peer_ip.is_ipv4() { + if is_ipv4 { status_writer.tcp4_connections_handled += 1; status_writer.tcp4_announces_handled += 1; } else { @@ -63,7 +56,9 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option status_writer.tcp6_announces_handled += 1; } }); + let announce_interval = tracker.config.announce_interval; + send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) } @@ -91,19 +86,11 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option scrape_request.forwarded_ip.unwrap(), - false => scrape_request.remote_addr.ip() - }; - let tracker_copy = tracker.clone(); + tokio::spawn(async move { let mut status_writer = tracker_copy.set_stats().await; - if ip.is_ipv4() { + if scrape_request.peer_addr.is_ipv4() { status_writer.tcp4_connections_handled += 1; status_writer.tcp4_scrapes_handled += 1; } else { @@ -111,19 +98,8 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option std::result::Result { - if let Some(e) = r.find::() { - debug!("{:?}", e); - let body: String = ErrorResponse { failure_reason: e.to_string() }.write(); - Ok(Response::new(body)) - } else { - let body: String = ErrorResponse { failure_reason: "internal server error".to_string() }.write(); - Ok(Response::new(body)) - } + send_scrape_response(files) } /// Send announce response @@ -157,3 +133,15 @@ fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: Tor fn send_scrape_response(files: HashMap) -> WebResult { Ok(Response::new(ScrapeResponse { files }.write())) } + +/// Handle all server errors and send error reply +pub async fn send_error(r: Rejection) -> std::result::Result { + let body = if let Some(server_error) = r.find::() { + debug!("{:?}", server_error); + ErrorResponse { failure_reason: server_error.to_string() }.write() + } else { + ErrorResponse { failure_reason: ServerError::InternalServerError.to_string() }.write() + }; + + Ok(Response::new(body)) +} diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index ac0e9602f..0fb316671 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -1,4 +1,4 @@ -use std::net::{IpAddr, SocketAddr}; +use std::net::{IpAddr}; use serde::{Deserialize}; use crate::{InfoHash, PeerId}; use crate::torrust_http_tracker::Bytes; @@ -17,8 +17,7 @@ pub struct AnnounceRequestQuery { #[derive(Debug)] pub struct AnnounceRequest { pub info_hash: InfoHash, - pub peer_addr: SocketAddr, - pub forwarded_ip: Option, + pub peer_addr: IpAddr, pub downloaded: Bytes, pub uploaded: Bytes, pub peer_id: PeerId, @@ -30,6 +29,5 @@ pub struct AnnounceRequest { pub struct ScrapeRequest { pub info_hashes: Vec, - pub forwarded_ip: Option, - pub remote_addr: SocketAddr, + pub peer_addr: IpAddr, } diff --git a/src/torrust_http_tracker/routes.rs b/src/torrust_http_tracker/routes.rs index ad873e83e..4b4de722f 100644 --- a/src/torrust_http_tracker/routes.rs +++ b/src/torrust_http_tracker/routes.rs @@ -2,21 +2,21 @@ use std::convert::Infallible; use std::sync::Arc; use warp::{Filter, Rejection}; use crate::TorrentTracker; -use crate::torrust_http_tracker::{handle_announce, handle_error, handle_scrape, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use crate::torrust_http_tracker::{handle_announce, send_error, handle_scrape, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; /// All routes pub fn routes(tracker: Arc,) -> impl Filter + Clone { root(tracker.clone()) .or(announce(tracker.clone())) .or(scrape(tracker.clone())) - .recover(handle_error) + .recover(send_error) } /// GET / or / fn root(tracker: Arc,) -> impl Filter + Clone { warp::any() .and(warp::filters::method::get()) - .and(with_announce_request()) + .and(with_announce_request(tracker.config.on_reverse_proxy)) .and(with_auth_key()) .and(with_tracker(tracker)) .and_then(handle_announce) @@ -26,7 +26,7 @@ fn root(tracker: Arc,) -> impl Filter,) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) - .and(with_announce_request()) + .and(with_announce_request(tracker.config.on_reverse_proxy)) .and(with_auth_key()) .and(with_tracker(tracker)) .and_then(handle_announce) @@ -36,7 +36,7 @@ fn announce(tracker: Arc,) -> impl Filter,) -> impl Filter + Clone { warp::path::path("scrape") .and(warp::filters::method::get()) - .and(with_scrape_request()) + .and(with_scrape_request(tracker.config.on_reverse_proxy)) .and(with_auth_key()) .and(with_tracker(tracker)) .and_then(handle_scrape) diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index 0aeb804d7..cbd7b7a83 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -147,6 +147,7 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc Ok(announce_response) } +// todo: refactor this, db lock can be a lot shorter pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc) -> Result { let db = tracker.get_torrents().await; diff --git a/src/tracker.rs b/src/tracker.rs index 58ddd70ab..98c5be730 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use serde; use std::borrow::Cow; use std::collections::BTreeMap; -use tokio::sync::{RwLock, RwLockWriteGuard}; +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; use std::net::{IpAddr, SocketAddr}; use crate::{Configuration, key_manager, MAX_SCRAPE_TORRENTS}; @@ -443,7 +443,7 @@ impl TorrentTracker { } } - pub async fn get_torrents(&self) -> tokio::sync::RwLockReadGuard<'_, BTreeMap> { + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { self.torrents.read().await } @@ -451,7 +451,7 @@ impl TorrentTracker { self.stats.write().await } - pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, TrackerStats> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { self.stats.read().await } From 660a5550939de38a394436e8efbbeb586832a735 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 17 Mar 2022 14:15:19 +0100 Subject: [PATCH 0053/1003] chore: updated readme with the latest example config --- README.md | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 77055a335..1804f1277 100644 --- a/README.md +++ b/README.md @@ -53,20 +53,26 @@ persistence = false cleanup_interval = 600 cleanup_peerless = true external_ip = "0.0.0.0" -announce_interval = 0 +announce_interval = 120 +announce_interval_min = 900 +peer_timeout = 900 on_reverse_proxy = false [[udp_trackers]] -enabled = true +enabled = false bind_address = "0.0.0.0:6969" +[[udp_trackers]] +enabled = true +bind_address = "[::]:6969" + [[http_trackers]] enabled = true bind_address = "0.0.0.0:6969" -ssl_enabled = true +ssl_enabled = false ssl_bind_address = "0.0.0.0:6868" -ssl_cert_path = "cert.pem" -ssl_key_path = "key.pem" +ssl_cert_path = "" +ssl_key_path = "" [http_api] enabled = true @@ -92,4 +98,4 @@ Read the API documentation [here](https://torrust.github.io/torrust-documentatio ### Credits This project was a joint effort by [Nautilus Cyberneering GmbH](https://nautilus-cyberneering.de/) and [Dutch Bits](https://dutchbits.nl). Also thanks to [Naim A.](https://github.com/naim94a/udpt) and [greatest-ape](https://github.com/greatest-ape/aquatic) for some parts of the code. -Further added features and functions thanks to [Power2All](https://github.com/power2all). \ No newline at end of file +Further added features and functions thanks to [Power2All](https://github.com/power2all). From 0bfc7b7603b82997d1f1c4cf5c83f71ad9d8fc77 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 17 Mar 2022 14:22:38 +0100 Subject: [PATCH 0054/1003] chore: updated readme with BEP's --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 1804f1277..929585c11 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count ### Implemented BEPs +* [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol +* [BEP 7](https://www.bittorrent.org/beps/bep_0007.html): IPv6 Support * [BEP 15](http://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for BitTorrent * [BEP 23](http://bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists * [BEP 27](http://bittorrent.org/beps/bep_0027.html): Private Torrents From 9f601a59816f2edd7288580d603489bf1df1adfa Mon Sep 17 00:00:00 2001 From: Power2All Date: Thu, 17 Mar 2022 14:34:05 +0100 Subject: [PATCH 0055/1003] Debugging a issue where API wouldn't bind... --- src/main.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/main.rs b/src/main.rs index fbd64abff..a529d4710 100644 --- a/src/main.rs +++ b/src/main.rs @@ -92,11 +92,7 @@ fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> Joi tokio::spawn(async move { let server = http_api_server::build_server(tracker); - let _ = server.bind_with_graceful_shutdown(bind_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("failed to listen to shutdown signal"); - }); + let _ = server.bind(bind_addr).await; }) } From 2e25d537b197efb82249984f26fdd36a7d28e6a1 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 17 Mar 2022 15:53:03 +0100 Subject: [PATCH 0056/1003] fix: (HTTP) getting the peer address from x-forwarded-for when it is an array --- src/torrust_http_tracker/filters.rs | 98 ++++++++++++++++------------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 9a79f228e..61fa20a45 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -20,6 +20,50 @@ pub fn with_info_hash() -> impl Filter,), Error = Rejec .and_then(info_hashes) } +/// Check for PeerId +pub fn with_peer_id() -> impl Filter + Clone { + warp::filters::query::raw() + .and_then(peer_id) +} + +/// Pass Arc along +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { + warp::path::param::() + .map(|key: String| { + AuthKey::from_string(&key) + }) + .or_else(|_| async { + Ok::<(Option,), Infallible>((None,)) + }) +} + +/// Check for PeerAddress +pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { + warp::addr::remote() + .and(warp::header::optional::("X-Forwarded-For")) + .map(move |remote_addr: Option, x_forwarded_for: Option| { + (on_reverse_proxy, remote_addr, x_forwarded_for) + }) + .and_then(peer_addr) +} + +/// Check for AnnounceRequest +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { + warp::filters::query::query::() + .and(with_info_hash()) + .and(with_peer_id()) + .and(with_peer_addr(on_reverse_proxy)) + .and_then(announce_request) +} + +/// Check for ScrapeRequest +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { + warp::any() + .and(with_info_hash()) + .and(with_peer_addr(on_reverse_proxy)) + .and_then(scrape_request) +} + /// Parse InfoHash from raw query string async fn info_hashes(raw_query: String) -> WebResult> { let split_raw_query: Vec<&str> = raw_query.split("&").collect(); @@ -45,12 +89,6 @@ async fn info_hashes(raw_query: String) -> WebResult> { } } -/// Check for PeerId -pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw() - .and_then(peer_id) -} - /// Parse PeerId from raw query string async fn peer_id(raw_query: String) -> WebResult { // put all query params in a vec @@ -88,27 +126,6 @@ async fn peer_id(raw_query: String) -> WebResult { } } -/// Pass Arc along -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { - warp::path::param::() - .map(|key: String| { - AuthKey::from_string(&key) - }) - .or_else(|_| async { - Ok::<(Option,), Infallible>((None,)) - }) -} - -/// Check for PeerAddress -pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::addr::remote() - .and(warp::header::optional::("X-Forwarded-For")) - .map(move |remote_addr: Option, x_forwarded_for: Option| { - (on_reverse_proxy, remote_addr, x_forwarded_for) - }) - .and_then(peer_addr) -} - /// Get PeerAddress from RemoteAddress or Forwarded async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { @@ -121,7 +138,15 @@ async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Opti match on_reverse_proxy { true => { - IpAddr::from_str(x_forwarded_for.as_ref().unwrap()).or_else(|e| { + let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); + // remove whitespace chars + x_forwarded_for_raw.retain(|c| !c.is_whitespace()); + // get all forwarded ip's in a vec + let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); + // set client ip to last forwarded ip + let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); + + IpAddr::from_str(x_forwarded_ip).or_else(|e| { debug!("{}", e); Err(reject::custom(ServerError::AddressNotFound)) }) @@ -130,15 +155,6 @@ async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Opti } } -/// Check for AnnounceRequest -pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::filters::query::query::() - .and(with_info_hash()) - .and(with_peer_id()) - .and(with_peer_addr(on_reverse_proxy)) - .and_then(announce_request) -} - /// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, peer_id: PeerId, peer_addr: IpAddr) -> WebResult { Ok(AnnounceRequest { @@ -154,14 +170,6 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has }) } -/// Check for ScrapeRequest -pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::any() - .and(with_info_hash()) - .and(with_peer_addr(on_reverse_proxy)) - .and_then(scrape_request) -} - /// Parse ScrapeRequest from InfoHash async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { Ok(ScrapeRequest { From a22047863f54e7368b80c73a05c8316c522165ae Mon Sep 17 00:00:00 2001 From: Power2All Date: Thu, 17 Mar 2022 15:59:48 +0100 Subject: [PATCH 0057/1003] Fixing a bug in return data --- src/torrust_http_tracker/response.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs index dc6614dd4..d459a6fdd 100644 --- a/src/torrust_http_tracker/response.rs +++ b/src/torrust_http_tracker/response.rs @@ -46,8 +46,8 @@ impl AnnounceResponse { let mut bytes: Vec = Vec::new(); bytes.write(b"d8:intervali")?; bytes.write(&self.interval.to_string().as_bytes())?; - bytes.write(b"d12:min intervali")?; - bytes.write(&self.interval.to_string().as_bytes())?; + bytes.write(b"e12:min intervali")?; + bytes.write(&self.interval_min.to_string().as_bytes())?; bytes.write(b"e8:completei")?; bytes.write(&self.complete.to_string().as_bytes())?; bytes.write(b"e10:incompletei")?; From f46df3845e8ac7828db93bad17a9bae5343ee351 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 17 Mar 2022 23:11:10 +0100 Subject: [PATCH 0058/1003] feat: gracefully shutdown udp servers --- Cargo.lock | 28 +++++++++++++++++ Cargo.toml | 1 + src/main.rs | 30 +++++++++++++----- src/torrust_udp_tracker/handlers.rs | 2 +- src/torrust_udp_tracker/server.rs | 47 ++++++++++++++++++----------- 5 files changed, 82 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5994554bc..4279ad5b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -264,6 +264,7 @@ checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" dependencies = [ "futures-channel", "futures-core", + "futures-executor", "futures-io", "futures-sink", "futures-task", @@ -286,12 +287,34 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +[[package]] +name = "futures-executor" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + [[package]] name = "futures-io" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +[[package]] +name = "futures-macro" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "futures-sink" version = "0.3.21" @@ -310,9 +333,13 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ + "futures-channel", "futures-core", + "futures-io", + "futures-macro", "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", "slab", @@ -1411,6 +1438,7 @@ dependencies = [ "config", "derive_more", "fern", + "futures", "hex", "log", "percent-encoding", diff --git a/Cargo.toml b/Cargo.toml index 4570bd8f3..084a7cfb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,3 +31,4 @@ config = "0.11" derive_more = "0.99" thiserror = "1.0" aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } +futures = "0.3.21" diff --git a/src/main.rs b/src/main.rs index a529d4710..08610d24a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,6 +7,7 @@ use torrust_tracker::torrust_http_tracker::server::HttpServer; #[tokio::main] async fn main() { + // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), Err(error) => { @@ -14,13 +15,13 @@ async fn main() { } }; - logging::setup_logging(&config); - // the singleton torrent tracker that gets passed to the HTTP and UDP server let tracker = Arc::new(TorrentTracker::new(config.clone()).unwrap_or_else(|e| { panic!("{}", e) })); + logging::setup_logging(&config); + // load persistent torrents if enabled if config.persistence { info!("Loading persistent torrents into memory..."); @@ -38,10 +39,17 @@ async fn main() { let _api_server = start_api_server(&config.http_api, tracker.clone()); } + let (tx, rx) = tokio::sync::watch::channel(false); + let mut udp_server_handles = Vec::new(); + // start the udp blocks for udp_tracker in &config.udp_trackers { + // used to send kill signal to thread + if udp_tracker.enabled { - let _ = start_udp_tracker_server(&udp_tracker, tracker.clone()).await; + udp_server_handles.push( + start_udp_tracker_server(&udp_tracker, tracker.clone(), rx.clone()).await + ) } } @@ -52,10 +60,16 @@ async fn main() { } // handle the signals here - let ctrl_c = tokio::signal::ctrl_c(); tokio::select! { - _ = ctrl_c => { + _ = tokio::signal::ctrl_c() => { info!("Torrust shutting down.."); + + // send kill signal + let _ = tx.send(true); + + // await for all udp servers to shutdown + futures::future::join_all(udp_server_handles).await; + // Save torrents if enabled if config.persistence { info!("Saving torrents into SQL from memory..."); @@ -118,13 +132,13 @@ fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - let udp_server = UdpServer::new(tracker, config).await.unwrap_or_else(|e| { +async fn start_udp_tracker_server(config: &UdpTrackerConfig, tracker: Arc, rx: tokio::sync::watch::Receiver) -> JoinHandle<()> { + let udp_server = UdpServer::new(tracker, &config.bind_address).await.unwrap_or_else(|e| { panic!("Could not start UDP server: {}", e); }); info!("Starting UDP server on: {}", config.bind_address); tokio::spawn(async move { - udp_server.start().await; + udp_server.start(rx).await; }) } diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index cbd7b7a83..bf25a8861 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -24,7 +24,7 @@ pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> } } -pub async fn handle_packet(remote_addr: SocketAddr, payload: &[u8], tracker: Arc) -> Response { +pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { Ok(request) => { let transaction_id = match &request { diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 2f472ef54..44a60738b 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -1,41 +1,54 @@ +use std::future::Future; use std::io::Cursor; use std::net::{SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{Response}; -use log::debug; +use log::{debug, info}; use tokio::net::UdpSocket; -use crate::{TorrentTracker, UdpTrackerConfig}; +use crate::{TorrentTracker}; use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { - socket: UdpSocket, + socket: Arc, tracker: Arc, } impl UdpServer { - pub async fn new(tracker: Arc, config: &UdpTrackerConfig) -> Result { - let srv = UdpSocket::bind(&config.bind_address).await?; + pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { + let socket = UdpSocket::bind(bind_address).await?; Ok(UdpServer { - socket: srv, + socket: Arc::new(socket), tracker, }) } - pub async fn start(&self) { + pub async fn start(&self, rx: tokio::sync::watch::Receiver) { loop { + let mut rx = rx.clone(); let mut data = [0; MAX_PACKET_SIZE]; - if let Ok((valid_bytes, remote_addr)) = self.socket.recv_from(&mut data).await { - let data = &data[..valid_bytes]; - debug!("Received {} bytes from {}", data.len(), remote_addr); - debug!("{:?}", data); - let response = handle_packet(remote_addr, data, self.tracker.clone()).await; - self.send_response(remote_addr, response).await; + let socket = self.socket.clone(); + let tracker = self.tracker.clone(); + + tokio::select! { + _ = rx.changed() => { + info!("Stopping UDP server: {}...", socket.local_addr().unwrap()); + break; + } + Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { + let payload = data[..valid_bytes].to_vec(); + + debug!("Received {} bytes from {}", payload.len(), remote_addr); + debug!("{:?}", payload); + + let response = handle_packet(remote_addr, payload, tracker).await; + UdpServer::send_response(socket, remote_addr, response).await; + } } } } - async fn send_response(&self, remote_addr: SocketAddr, response: Response) { + async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { debug!("sending response to: {:?}", &remote_addr); let buffer = vec![0u8; MAX_PACKET_SIZE]; @@ -47,14 +60,14 @@ impl UdpServer { let inner = cursor.get_ref(); debug!("{:?}", &inner[..position]); - self.send_packet(&remote_addr, &inner[..position]).await; + UdpServer::send_packet(socket, &remote_addr, &inner[..position]).await; } Err(_) => { debug!("could not write response to bytes."); } } } - async fn send_packet(&self, remote_addr: &SocketAddr, payload: &[u8]) { + async fn send_packet(socket: Arc, remote_addr: &SocketAddr, payload: &[u8]) { // doesn't matter if it reaches or not - let _ = self.socket.send_to(payload, remote_addr).await; + let _ = socket.send_to(payload, remote_addr).await; } } From f9eaa10ba86334dbf918a798e4d48a8d711b0bfe Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 17 Mar 2022 23:50:32 +0100 Subject: [PATCH 0059/1003] refactor: changed udp max packet size from 65535 to 1496 --- src/torrust_udp_tracker/mod.rs | 2 +- src/torrust_udp_tracker/server.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/torrust_udp_tracker/mod.rs b/src/torrust_udp_tracker/mod.rs index cd4b99f5b..6aa5fbce0 100644 --- a/src/torrust_udp_tracker/mod.rs +++ b/src/torrust_udp_tracker/mod.rs @@ -12,5 +12,5 @@ pub type Bytes = u64; pub type Port = u16; pub type TransactionId = i64; -pub const MAX_PACKET_SIZE: usize = 0xffff; +pub const MAX_PACKET_SIZE: usize = 1496; pub const PROTOCOL_ID: i64 = 0x41727101980; diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index 44a60738b..cae1e5b94 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -1,4 +1,3 @@ -use std::future::Future; use std::io::Cursor; use std::net::{SocketAddr}; use std::sync::Arc; From f6cd28224d42332c7151e0256497e27b89369363 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 5 Apr 2022 23:31:50 +0200 Subject: [PATCH 0060/1003] wip: support for multiple db drivers --- Cargo.lock | 980 +++++++++++++++++++++++++++++++++++++++-- Cargo.toml | 2 + src/database.rs | 217 ++------- src/lib.rs | 3 +- src/sqlite_database.rs | 186 ++++++++ src/tracker.rs | 29 +- 6 files changed, 1202 insertions(+), 215 deletions(-) create mode 100644 src/sqlite_database.rs diff --git a/Cargo.lock b/Cargo.lock index 4279ad5b9..bf0fed8f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,23 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +dependencies = [ + "getrandom 0.2.3", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" version = "0.7.18" @@ -11,6 +28,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + [[package]] name = "aquatic_udp_protocol" version = "0.1.0" @@ -26,30 +52,111 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + +[[package]] +name = "async-trait" +version = "0.1.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + [[package]] name = "autocfg" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "base-x" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" + [[package]] name = "base64" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "bigdecimal" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" +dependencies = [ + "num-bigint 0.3.3", + "num-integer", + "num-traits 0.2.14", + "serde 1.0.120", +] + [[package]] name = "binascii" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" +[[package]] +name = "bindgen" +version = "0.58.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f8523b410d7187a43085e7e064416ea32ded16bd0a4e6fc025e21616d01258f" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "clap", + "env_logger", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "which", +] + [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +[[package]] +name = "bitvec" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + [[package]] name = "block-buffer" version = "0.7.3" @@ -90,6 +197,12 @@ dependencies = [ "safemem", ] +[[package]] +name = "bufstream" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" + [[package]] name = "bumpalo" version = "3.8.0" @@ -116,9 +229,18 @@ checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cc" -version = "1.0.66" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" + +[[package]] +name = "cexpr" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +dependencies = [ + "nom", +] [[package]] name = "cfg-if" @@ -141,10 +263,46 @@ dependencies = [ "libc", "num-integer", "num-traits 0.2.14", - "time", + "serde 1.0.120", + "time 0.1.44", "winapi", ] +[[package]] +name = "clang-sys" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "cmake" +version = "0.1.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +dependencies = [ + "cc", +] + [[package]] name = "config" version = "0.11.0" @@ -161,12 +319,34 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "const_fn" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" + [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + [[package]] name = "cpufeatures" version = "0.2.1" @@ -176,6 +356,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if 1.0.0", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -185,7 +374,18 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version", + "rustc_version 0.4.0", + "syn", +] + +[[package]] +name = "derive_utils" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" +dependencies = [ + "proc-macro2", + "quote", "syn", ] @@ -207,12 +407,31 @@ dependencies = [ "generic-array 0.14.4", ] +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + [[package]] name = "either" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +[[package]] +name = "env_logger" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "fake-simd" version = "0.1.2" @@ -240,12 +459,40 @@ dependencies = [ "log", ] +[[package]] +name = "flate2" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +dependencies = [ + "cfg-if 1.0.0", + "crc32fast", + "libc", + "libz-sys", + "miniz_oxide", +] + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.0.0" @@ -256,6 +503,76 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "frunk" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cd67cf7d54b7e72d0ea76f3985c3747d74aee43e0218ad993b7903ba7a5395e" +dependencies = [ + "frunk_core", + "frunk_derives", + "frunk_proc_macros", +] + +[[package]] +name = "frunk_core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1246cf43ec80bf8b2505b5c360b8fb999c97dabd17dbb604d85558d5cbc25482" + +[[package]] +name = "frunk_derives" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dbc4f084ec5a3f031d24ccedeb87ab2c3189a2f33b8d070889073837d5ea09e" +dependencies = [ + "frunk_proc_macro_helpers", + "quote", + "syn", +] + +[[package]] +name = "frunk_proc_macro_helpers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99f11257f106c6753f5ffcb8e601fb39c390a088017aaa55b70c526bff15f63e" +dependencies = [ + "frunk_core", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frunk_proc_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a078bd8459eccbb85e0b007b8f756585762a72a9efc53f359b371c3b6351dbcc" +dependencies = [ + "frunk_core", + "frunk_proc_macros_impl", + "proc-macro-hack", +] + +[[package]] +name = "frunk_proc_macros_impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ffba99f0fa4f57e42f57388fbb9a0ca863bc2b4261f3c5570fed579d5df6c32" +dependencies = [ + "frunk_core", + "frunk_proc_macro_helpers", + "proc-macro-hack", + "quote", + "syn", +] + +[[package]] +name = "funty" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" + [[package]] name = "futures" version = "0.3.21" @@ -386,6 +703,12 @@ dependencies = [ "wasi 0.10.0+wasi-snapshot-preview1", ] +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + [[package]] name = "h2" version = "0.3.4" @@ -411,6 +734,15 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash", +] + [[package]] name = "headers" version = "0.3.3" @@ -424,7 +756,7 @@ dependencies = [ "http", "mime", "sha-1 0.8.2", - "time", + "time 0.1.44", ] [[package]] @@ -484,6 +816,12 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.14.2" @@ -501,7 +839,7 @@ dependencies = [ "httpdate", "itoa", "pin-project", - "socket2", + "socket2 0.3.19", "tokio", "tower-service", "tracing", @@ -526,7 +864,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.9.1", ] [[package]] @@ -547,6 +885,18 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "io-enum" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03e3306b0f260aad2872563eb0d5d1a59f2420fad270a661dce59a01e92d806b" +dependencies = [ + "autocfg", + "derive_utils", + "quote", + "syn", +] + [[package]] name = "itoa" version = "0.4.7" @@ -568,13 +918,29 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "lexical" +version = "5.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f404a90a744e32e8be729034fc33b90cf2a56418fbf594d69aa3c0214ad414e5" +dependencies = [ + "cfg-if 1.0.0", + "lexical-core", +] + [[package]] name = "lexical-core" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" dependencies = [ - "arrayvec", + "arrayvec 0.5.2", "bitflags", "cfg-if 1.0.0", "ryu", @@ -583,9 +949,19 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.101" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad5c14e80759d0939d013e6ca49930e59fc53dd8e5009132f76240c179380c09" + +[[package]] +name = "libloading" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +dependencies = [ + "cfg-if 1.0.0", + "winapi", +] [[package]] name = "libsqlite3-sys" @@ -597,6 +973,17 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "libz-sys" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f35facd4a5673cb5a48822be2be1d4236c1c99cb4113cab7061ac720d5bf859" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.4" @@ -621,6 +1008,15 @@ dependencies = [ "cfg-if 0.1.10", ] +[[package]] +name = "lru" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" +dependencies = [ + "hashbrown 0.11.2", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -642,6 +1038,15 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.16" @@ -658,6 +1063,16 @@ dependencies = [ "unicase", ] +[[package]] +name = "miniz_oxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +dependencies = [ + "adler", + "autocfg", +] + [[package]] name = "mio" version = "0.7.7" @@ -677,7 +1092,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "socket2", + "socket2 0.3.19", "winapi", ] @@ -699,6 +1114,109 @@ dependencies = [ "twoway", ] +[[package]] +name = "mysql" +version = "21.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06f5abe1c0f91831afd4d35298c08d958e80144869757b913891e5b0d00c2c96" +dependencies = [ + "bufstream", + "bytes", + "io-enum", + "libc", + "lru", + "mysql_common", + "named_pipe", + "native-tls", + "nix", + "once_cell", + "pem", + "percent-encoding", + "serde 1.0.120", + "serde_json", + "socket2 0.4.4", + "twox-hash", + "url", +] + +[[package]] +name = "mysql_common" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02fa08ec695a40ed899b1239e81d0d74de5b40802d4fc8b513e2c541717c434e" +dependencies = [ + "base64", + "bigdecimal", + "bindgen", + "bitflags", + "bitvec", + "byteorder", + "bytes", + "cc", + "chrono", + "cmake", + "crc32fast", + "flate2", + "frunk", + "lazy_static", + "lexical", + "num-bigint 0.4.3", + "num-traits 0.2.14", + "rand 0.8.4", + "regex", + "rust_decimal", + "saturating", + "serde 1.0.120", + "serde_json", + "sha1", + "sha2", + "smallvec", + "subprocess", + "thiserror", + "time 0.2.27", + "uuid", +] + +[[package]] +name = "named_pipe" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad9c443cce91fc3e12f017290db75dde490d685cdaaf508d7159d7cf41f0eb2b" +dependencies = [ + "winapi", +] + +[[package]] +name = "native-tls" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nix" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +dependencies = [ + "bitflags", + "cc", + "cfg-if 1.0.0", + "libc", + "memoffset", +] + [[package]] name = "nom" version = "5.1.2" @@ -719,6 +1237,28 @@ dependencies = [ "winapi", ] +[[package]] +name = "num-bigint" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +dependencies = [ + "autocfg", + "num-integer", + "num-traits 0.2.14", +] + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits 0.2.14", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -758,22 +1298,55 @@ dependencies = [ ] [[package]] -name = "once_cell" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" - -[[package]] -name = "opaque-debug" -version = "0.2.3" +name = "once_cell" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "openssl" +version = "0.10.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" +dependencies = [ + "bitflags", + "cfg-if 1.0.0", + "foreign-types", + "libc", + "once_cell", + "openssl-sys", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "opaque-debug" -version = "0.3.0" +name = "openssl-sys" +version = "0.9.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] name = "parking_lot" @@ -800,6 +1373,23 @@ dependencies = [ "winapi", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + +[[package]] +name = "pem" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +dependencies = [ + "base64", + "once_cell", + "regex", +] + [[package]] name = "percent-encoding" version = "2.1.0" @@ -850,6 +1440,12 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + [[package]] name = "proc-macro2" version = "1.0.24" @@ -885,6 +1481,16 @@ dependencies = [ "scheduled-thread-pool", ] +[[package]] +name = "r2d2_mysql" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d05145690b395f5515feff202b8f4b9429c500f423ef7129175155c3c3a9e2" +dependencies = [ + "mysql", + "r2d2", +] + [[package]] name = "r2d2_sqlite" version = "0.16.0" @@ -895,6 +1501,12 @@ dependencies = [ "rusqlite", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "rand" version = "0.7.3" @@ -1039,7 +1651,7 @@ dependencies = [ "lru-cache", "memchr", "smallvec", - "time", + "time 0.1.44", ] [[package]] @@ -1048,13 +1660,39 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" +[[package]] +name = "rust_decimal" +version = "1.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d37baa70cf8662d2ba1c1868c5983dda16ef32b105cce41fb5c47e72936a90b3" +dependencies = [ + "arrayvec 0.7.2", + "num-traits 0.2.14", + "serde 1.0.120", +] + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver 0.9.0", +] + [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver", + "semver 1.0.4", ] [[package]] @@ -1082,6 +1720,22 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +[[package]] +name = "saturating" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" + +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi", +] + [[package]] name = "scheduled-thread-pool" version = "0.2.5" @@ -1113,12 +1767,50 @@ dependencies = [ "untrusted", ] +[[package]] +name = "security-framework" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "serde" version = "0.8.23" @@ -1224,6 +1916,40 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sha1" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +dependencies = [ + "sha1_smol", +] + +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + [[package]] name = "signal-hook-registry" version = "1.3.0" @@ -1256,18 +1982,102 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "standback" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" +dependencies = [ + "version_check", +] + [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version 0.2.3", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2", + "quote", + "serde 1.0.120", + "serde_derive", + "syn", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2", + "quote", + "serde 1.0.120", + "serde_derive", + "serde_json", + "sha1", + "syn", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "subprocess" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055cf3ebc2981ad8f0a5a17ef6652f652d87831f79fddcba2ac57bcb9a0aa407" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "syn" version = "1.0.67" @@ -1279,6 +2089,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tempfile" version = "3.2.0" @@ -1293,6 +2109,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + [[package]] name = "thiserror" version = "1.0.26" @@ -1324,6 +2158,44 @@ dependencies = [ "winapi", ] +[[package]] +name = "time" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" +dependencies = [ + "const_fn", + "libc", + "standback", + "stdweb", + "time-macros", + "version_check", + "winapi", +] + +[[package]] +name = "time-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "standback", + "syn", +] + [[package]] name = "tinyvec" version = "1.3.1" @@ -1432,6 +2304,7 @@ name = "torrust-tracker" version = "2.2.0" dependencies = [ "aquatic_udp_protocol", + "async-trait", "binascii", "byteorder", "chrono", @@ -1443,6 +2316,7 @@ dependencies = [ "log", "percent-encoding", "r2d2", + "r2d2_mysql", "r2d2_sqlite", "rand 0.8.4", "serde 1.0.120", @@ -1516,6 +2390,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "twox-hash" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" +dependencies = [ + "cfg-if 1.0.0", + "rand 0.8.4", + "static_assertions", +] + [[package]] name = "typenum" version = "1.12.0" @@ -1546,6 +2431,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + [[package]] name = "unicode-xid" version = "0.2.1" @@ -1576,12 +2467,24 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" + [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + [[package]] name = "version_check" version = "0.9.2" @@ -1714,6 +2617,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "which" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +dependencies = [ + "libc", +] + [[package]] name = "winapi" version = "0.3.9" @@ -1730,12 +2642,30 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "wyz" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "129e027ad65ce1453680623c3fb5163cbf7107bfe1aa32257e7d0e63f9ced188" +dependencies = [ + "tap", +] + [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/Cargo.toml b/Cargo.toml index 084a7cfb5..b29bf09f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ log = {version = "0.4", features = ["release_max_level_info"]} fern = "0.6" chrono = "0.4" byteorder = "1" +r2d2_mysql = "21.0.0" r2d2_sqlite = "0.16.0" r2d2 = "0.8.8" rand = "0.8.4" @@ -32,3 +33,4 @@ derive_more = "0.99" thiserror = "1.0" aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" +async-trait = "0.1.52" diff --git a/src/database.rs b/src/database.rs index 82d26d6d8..63d06c4f9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,196 +1,63 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry}; -use log::debug; -use r2d2_sqlite::{SqliteConnectionManager, rusqlite}; -use r2d2::{Pool}; -use r2d2_sqlite::rusqlite::NO_PARAMS; +use crate::{InfoHash, TorrentEntry}; use crate::key_manager::AuthKey; -use std::str::FromStr; +use crate::sqlite_database::SqliteDatabase; +use async_trait::async_trait; +use derive_more::{Display, Error}; -pub struct SqliteDatabase { - pool: Pool +pub enum DatabaseDrivers { + Sqlite3, + MySQL } -impl SqliteDatabase { - pub fn new(db_path: &str) -> Result { - let sqlite_connection_manager = SqliteConnectionManager::file(db_path); - let sqlite_pool = r2d2::Pool::new(sqlite_connection_manager).expect("Failed to create r2d2 SQLite connection pool."); - let sqlite_database = SqliteDatabase { - pool: sqlite_pool - }; - - if let Err(error) = SqliteDatabase::create_database_tables(&sqlite_database.pool) { - return Err(error) - }; - - Ok(sqlite_database) - } - - pub fn create_database_tables(pool: &Pool) -> Result { - let create_whitelist_table = " - CREATE TABLE IF NOT EXISTS whitelist ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE - );".to_string(); - - let create_torrents_table = " - CREATE TABLE IF NOT EXISTS torrents ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE, - completed INTEGER DEFAULT 0 NOT NULL - );".to_string(); - - let create_keys_table = format!(" - CREATE TABLE IF NOT EXISTS keys ( - id integer PRIMARY KEY AUTOINCREMENT, - key VARCHAR({}) NOT NULL UNIQUE, - valid_until INT(10) NOT NULL - );", AUTH_KEY_LENGTH as i8); - - let conn = pool.get().unwrap(); - match conn.execute(&create_whitelist_table, NO_PARAMS) { - Ok(updated) => { - match conn.execute(&create_keys_table, NO_PARAMS) { - Ok(updated2) => { - match conn.execute(&create_torrents_table, NO_PARAMS) { - Ok(updated3) => Ok(updated + updated2 + updated3), - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - Err(e) => { - debug!("{:?}", e); - Err(e) - } +pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result { + match db_driver { + DatabaseDrivers::Sqlite3 => { + let db = SqliteDatabase::new(db_path)?; + Ok(db) } - } - - pub async fn load_persistent_torrent_data(&self) -> Result, rusqlite::Error> { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; - - let torrent_iter = stmt.query_map(NO_PARAMS, |row| { - let info_hash_string: String = row.get(0)?; - let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); - let completed: u32 = row.get(1)?; - Ok((info_hash, completed)) - })?; - - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok() ).collect(); - - Ok(torrents) - } - - pub async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), rusqlite::Error> { - let mut conn = self.pool.get().unwrap(); - let db_transaction = conn.transaction()?; - - for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + _ => { + let db = SqliteDatabase::new(db_path)?; + Ok(db) } - - let _ = db_transaction.commit(); - - Ok(()) } +} - pub async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query(&[info_hash])?; +#[async_trait] +pub trait Database: Sync + Send { + fn create_database_tables(&self) -> Result; - if let Some(row) = rows.next()? { - let info_hash: String = row.get(0).unwrap(); + async fn load_persistent_torrent_data(&self) -> Result, Error>; - // should never be able to fail - Ok(InfoHash::from_str(&info_hash).unwrap()) - } else { - Err(rusqlite::Error::QueryReturnedNoRows) - } - } + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), Error>; - pub async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; - pub async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; - pub async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; - let mut rows = stmt.query(&[key.to_string()])?; + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - if let Some(row) = rows.next()? { - let key: String = row.get(0).unwrap(); - let valid_until_i64: i64 = row.get(1).unwrap(); + async fn get_key_from_keys(&self, key: &str) -> Result; - Ok(AuthKey { - key, - valid_until: Some(valid_until_i64 as u64) - }) - } else { - Err(rusqlite::Error::QueryReturnedNoRows) - } - } + async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; - pub async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()] - ) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } + async fn remove_key_from_keys(&self, key: String) -> Result; +} + +#[derive(Debug, Display, PartialEq, Error)] +#[allow(dead_code)] +pub enum Error { + #[display(fmt = "Query returned no rows.")] + QueryReturnedNoRows, + #[display(fmt = "Invalid query.")] + InvalidQuery, +} - pub async fn remove_key_from_keys(&self, key: String) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + match e { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, + _ => Error::InvalidQuery } } } diff --git a/src/lib.rs b/src/lib.rs index c055cfae4..a9692ac66 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,11 +3,12 @@ pub mod tracker; pub mod http_api_server; pub mod common; pub mod utils; -pub mod database; +pub mod sqlite_database; pub mod key_manager; pub mod logging; pub mod torrust_udp_tracker; pub mod torrust_http_tracker; +pub mod database; pub use self::config::*; pub use torrust_udp_tracker::server::*; diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs new file mode 100644 index 000000000..5bd081c6f --- /dev/null +++ b/src/sqlite_database.rs @@ -0,0 +1,186 @@ +use std::collections::BTreeMap; +use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry, database}; +use log::debug; +use r2d2_sqlite::{SqliteConnectionManager}; +use r2d2::{Pool}; +use r2d2_sqlite::rusqlite::NO_PARAMS; +use crate::key_manager::AuthKey; +use std::str::FromStr; +use crate::database::Database; +use async_trait::async_trait; + +pub struct SqliteDatabase { + pool: Pool +} + +impl SqliteDatabase { + pub fn new(db_path: &str) -> Result { + let cm = SqliteConnectionManager::file(db_path); + let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); + Ok(SqliteDatabase { + pool + }) + } +} + +#[async_trait] +impl Database for SqliteDatabase { + fn create_database_tables(&self) -> Result { + let create_whitelist_table = " + CREATE TABLE IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTOINCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE + );".to_string(); + + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTOINCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );".to_string(); + + let create_keys_table = format!(" + CREATE TABLE IF NOT EXISTS keys ( + id integer PRIMARY KEY AUTOINCREMENT, + key VARCHAR({}) NOT NULL UNIQUE, + valid_until INT(10) NOT NULL + );", AUTH_KEY_LENGTH as i8); + + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + conn.execute(&create_whitelist_table, NO_PARAMS) + .and_then(|_| conn.execute(&create_whitelist_table, NO_PARAMS)) + .and_then(|_| conn.execute(&create_keys_table, NO_PARAMS)) + .and_then(|_| conn.execute(&create_torrents_table, NO_PARAMS)) + .map_err(|_| database::Error::InvalidQuery) + } + + async fn load_persistent_torrent_data(&self) -> Result, database::Error> { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; + + let torrent_iter = stmt.query_map(NO_PARAMS, |row| { + let info_hash_string: String = row.get(0)?; + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + let completed: u32 = row.get(1)?; + Ok((info_hash, completed)) + })?; + + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok() ).collect(); + + Ok(torrents) + } + + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let db_transaction = conn.transaction()?; + + for (info_hash, torrent_entry) in torrents { + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + } + + let _ = db_transaction.commit(); + + Ok(()) + } + + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; + let mut rows = stmt.query(&[info_hash])?; + + if let Some(row) = rows.next()? { + let info_hash: String = row.get(0).unwrap(); + + // should never be able to fail + Ok(InfoHash::from_str(&info_hash).unwrap()) + } else { + Err(database::Error::InvalidQuery) + } + } + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { + Ok(updated) => { + if updated > 0 { return Ok(updated) } + Err(database::Error::QueryReturnedNoRows) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { + Ok(updated) => { + if updated > 0 { return Ok(updated) } + Err(database::Error::QueryReturnedNoRows) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn get_key_from_keys(&self, key: &str) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; + let mut rows = stmt.query(&[key.to_string()])?; + + if let Some(row) = rows.next()? { + let key: String = row.get(0).unwrap(); + let valid_until_i64: i64 = row.get(1).unwrap(); + + Ok(AuthKey { + key, + valid_until: Some(valid_until_i64 as u64) + }) + } else { + Err(database::Error::QueryReturnedNoRows) + } + } + + async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()] + ) { + Ok(updated) => { + if updated > 0 { return Ok(updated) } + Err(database::Error::QueryReturnedNoRows) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn remove_key_from_keys(&self, key: String) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { + Ok(updated) => { + if updated > 0 { return Ok(updated) } + Err(database::Error::QueryReturnedNoRows) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } +} diff --git a/src/tracker.rs b/src/tracker.rs index 98c5be730..24bd645d1 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -5,14 +5,14 @@ use std::collections::BTreeMap; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; use std::net::{IpAddr, SocketAddr}; -use crate::{Configuration, key_manager, MAX_SCRAPE_TORRENTS}; +use crate::{Configuration, database, key_manager, MAX_SCRAPE_TORRENTS}; use std::collections::btree_map::Entry; -use crate::database::SqliteDatabase; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::{debug}; use crate::key_manager::AuthKey; -use r2d2_sqlite::rusqlite; +use crate::database::{Database, DatabaseDrivers}; +use crate::key_manager::Error::KeyInvalid; use crate::torrust_http_tracker::AnnounceRequest; #[derive(Serialize, Deserialize, Clone, PartialEq)] @@ -265,18 +265,19 @@ pub struct TrackerStats { pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, - database: SqliteDatabase, + database: Box, stats: tokio::sync::RwLock, } impl TorrentTracker { - pub fn new(config: Arc) -> Result { - let database = SqliteDatabase::new(&config.db_path)?; + pub fn new(config: Arc) -> Result { + let db_driver = DatabaseDrivers::Sqlite3; + let database = database::connect_database(&db_driver, "data")?; Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), - database, + database: Box::new(database), stats: RwLock::new(TrackerStats { tcp4_connections_handled: 0, tcp4_announces_handled: 0, @@ -306,7 +307,7 @@ impl TorrentTracker { self.config.mode == TrackerMode::ListedMode || self.config.mode == TrackerMode::PrivateListedMode } - pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { + pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { let auth_key = key_manager::generate_auth_key(seconds_valid); // add key to database @@ -315,12 +316,12 @@ impl TorrentTracker { Ok(auth_key) } - pub async fn remove_auth_key(&self, key: String) -> Result { + pub async fn remove_auth_key(&self, key: String) -> Result { self.database.remove_key_from_keys(key).await } pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key_manager::Error> { - let db_key = self.database.get_key_from_keys(&auth_key.key).await?; + let db_key = self.database.get_key_from_keys(&auth_key.key).await.map_err(|_| KeyInvalid)?; key_manager::verify_auth_key(&db_key) } @@ -353,7 +354,7 @@ impl TorrentTracker { } // Loading the torrents into memory - pub async fn load_torrents(&self) -> Result<(), rusqlite::Error> { + pub async fn load_torrents(&self) -> Result<(), database::Error> { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { @@ -364,18 +365,18 @@ impl TorrentTracker { } // Saving the torrents from memory - pub async fn save_torrents(&self) -> Result<(), rusqlite::Error> { + pub async fn save_torrents(&self) -> Result<(), database::Error> { let torrents = self.torrents.read().await; self.database.save_persistent_torrent_data(&*torrents).await } // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { self.database.add_info_hash_to_whitelist(info_hash.clone()).await } // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result { + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result { self.database.remove_info_hash_from_whitelist(info_hash.clone()).await } From a9956381480bba15e00df08cab43b2e807e6a79f Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 10 Apr 2022 09:45:49 +0200 Subject: [PATCH 0061/1003] Fixing torrent cleanup, adding defragmentation of memory, and fixed response failure and bumping the version --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/torrust_http_tracker/response.rs | 1 + src/tracker.rs | 76 +++++++++++++++------------- 4 files changed, 44 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf0fed8f5..68f52fa18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2301,7 +2301,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.2.0" +version = "2.2.1" dependencies = [ "aquatic_udp_protocol", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index b29bf09f5..e3e57c378 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "torrust-tracker" -version = "2.2.0" +version = "2.2.1" license = "AGPL-3.0" authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs index d459a6fdd..af27bc5e9 100644 --- a/src/torrust_http_tracker/response.rs +++ b/src/torrust_http_tracker/response.rs @@ -86,6 +86,7 @@ impl ScrapeResponse { #[derive(Serialize)] pub struct ErrorResponse { + #[serde(rename = "failure reason")] pub failure_reason: String } diff --git a/src/tracker.rs b/src/tracker.rs index 24bd645d1..b79ffb6cf 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -9,7 +9,7 @@ use crate::{Configuration, database, key_manager, MAX_SCRAPE_TORRENTS}; use std::collections::btree_map::Entry; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use log::{debug}; +use log::info; use crate::key_manager::AuthKey; use crate::database::{Database, DatabaseDrivers}; use crate::key_manager::Error::KeyInvalid; @@ -456,47 +456,53 @@ impl TorrentTracker { self.stats.read().await } - // remove torrents without peers + // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { - debug!("Cleaning torrents.."); - let mut lock = self.torrents.write().await; - let db: &mut BTreeMap = &mut *lock; - let mut torrents_to_remove = Vec::new(); - - for (k, torrent_entry) in db.iter_mut() { - // timed-out peers.. - { - let mut peers_to_remove = Vec::new(); - let torrent_peers = &mut torrent_entry.peers; - - for (peer_id, peer) in torrent_peers.iter() { - if peer.is_seeder() { - if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { - // remove seeders after 5 minutes since last update... - peers_to_remove.push(peer_id.clone()); - torrent_entry.seeders -= 1; - } - } else if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { - // remove peers after 2 hours since last update... - peers_to_remove.push(peer_id.clone()); - } - } + info!("Cleaning torrents..."); + let lock = self.torrents.write().await; + + // First we create a mapping of all the torrent hashes in a vector, and we use this to iterate through the btreemap. + // Every hash we have handled, we remove from the btreemap completely, and push it to the top. + let mut torrent_hashes: Vec = Vec::new(); + for (k, _torrent_entry) in lock.iter() { + torrent_hashes.push(k.clone()); + } + + drop(lock); + + // Let's iterate through all torrents, and parse. + for hash in torrent_hashes.iter() { + let mut torrent = TorrentEntry{ + peers: BTreeMap::new(), + completed: 0, + seeders: 0 + }; - for peer_id in peers_to_remove.iter() { - torrent_peers.remove(peer_id); + let lock = self.torrents.write().await; + let torrent_data = lock.get(hash).unwrap().clone(); + drop(lock); + + torrent.completed = torrent_data.completed.clone(); + for (peer_id, peer) in torrent_data.peers.iter() { + if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { + continue; + } + torrent.peers.insert(peer_id.clone(), peer.clone()); + if peer.is_seeder() { + torrent.seeders += 1; } } - + let mut lock = self.torrents.write().await; + lock.remove(hash); if self.config.mode.clone() == TrackerMode::PublicMode && self.config.cleanup_peerless && !self.config.persistence { - // peer-less torrents.. - if torrent_entry.peers.len() == 0 { - torrents_to_remove.push(k.clone()); + if torrent.peers.len() != 0 { + lock.insert(hash.clone(), torrent); } + } else { + lock.insert(hash.clone(), torrent); } + drop(lock); } - - for info_hash in torrents_to_remove { - db.remove(&info_hash); - } + info!("Torrents cleaned up."); } } From 5baea30dfe2f9cb2f3005de63775622bf3bb3396 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 11 Apr 2022 01:01:03 +0200 Subject: [PATCH 0062/1003] feat: added MySQL database support --- src/config.rs | 3 + src/database.rs | 28 +++++-- src/lib.rs | 1 + src/mysql_database.rs | 183 +++++++++++++++++++++++++++++++++++++++++ src/sqlite_database.rs | 4 +- src/tracker.rs | 7 +- 6 files changed, 212 insertions(+), 14 deletions(-) create mode 100644 src/mysql_database.rs diff --git a/src/config.rs b/src/config.rs index 94b37464d..529ae07fe 100644 --- a/src/config.rs +++ b/src/config.rs @@ -8,6 +8,7 @@ use std::net::{IpAddr}; use std::path::Path; use std::str::FromStr; use config::{ConfigError, Config, File}; +use crate::database::DatabaseDrivers; #[derive(Serialize, Deserialize, PartialEq)] pub enum TrackerServer { @@ -50,6 +51,7 @@ pub struct HttpApiConfig { pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, + pub db_driver: DatabaseDrivers, pub db_path: String, pub persistence: bool, pub cleanup_interval: Option, @@ -132,6 +134,7 @@ impl Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::PublicMode, + db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), persistence: false, cleanup_interval: Some(600), diff --git a/src/database.rs b/src/database.rs index 63d06c4f9..0c0e4a303 100644 --- a/src/database.rs +++ b/src/database.rs @@ -4,28 +4,38 @@ use crate::key_manager::AuthKey; use crate::sqlite_database::SqliteDatabase; use async_trait::async_trait; use derive_more::{Display, Error}; +use log::debug; +use crate::mysql_database::MysqlDatabase; +use serde::{Serialize, Deserialize}; +#[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { Sqlite3, MySQL } -pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result { - match db_driver { +pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { + debug!("{:?}", db_driver); + + let database: Box = match db_driver { DatabaseDrivers::Sqlite3 => { let db = SqliteDatabase::new(db_path)?; - Ok(db) + Box::new(db) } - _ => { - let db = SqliteDatabase::new(db_path)?; - Ok(db) + DatabaseDrivers::MySQL => { + let db = MysqlDatabase::new(db_path)?; + Box::new(db) } - } + }; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) } #[async_trait] pub trait Database: Sync + Send { - fn create_database_tables(&self) -> Result; + fn create_database_tables(&self) -> Result<(), Error>; async fn load_persistent_torrent_data(&self) -> Result, Error>; @@ -51,6 +61,8 @@ pub enum Error { QueryReturnedNoRows, #[display(fmt = "Invalid query.")] InvalidQuery, + #[display(fmt = "Database error.")] + DatabaseError, } impl From for Error { diff --git a/src/lib.rs b/src/lib.rs index a9692ac66..addc7e1a1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ pub mod logging; pub mod torrust_udp_tracker; pub mod torrust_http_tracker; pub mod database; +pub mod mysql_database; pub use self::config::*; pub use torrust_udp_tracker::server::*; diff --git a/src/mysql_database.rs b/src/mysql_database.rs new file mode 100644 index 000000000..f6907f309 --- /dev/null +++ b/src/mysql_database.rs @@ -0,0 +1,183 @@ +use std::collections::BTreeMap; +use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry, database}; +use log::debug; +use r2d2::{Pool}; +use crate::key_manager::AuthKey; +use std::str::FromStr; +use crate::database::Database; +use async_trait::async_trait; +use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; +use r2d2_mysql::mysql::prelude::Queryable; +use r2d2_mysql::MysqlConnectionManager; + +pub struct MysqlDatabase { + pool: Pool +} + +impl MysqlDatabase { + pub fn new(db_path: &str) -> Result { + let opts = Opts::from_url(&db_path).expect("Failed to connect to MySQL database."); + let builder = OptsBuilder::from_opts(opts); + let manager = MysqlConnectionManager::new(builder); + let pool = r2d2::Pool::builder().build(manager).expect("Failed to create r2d2 MySQL connection pool."); + + Ok(Self { + pool + }) + } +} + +#[async_trait] +impl Database for MysqlDatabase { + fn create_database_tables(&self) -> Result<(), database::Error> { + let create_whitelist_table = " + CREATE TABLE IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE + );".to_string(); + + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );".to_string(); + + let create_keys_table = format!(" + CREATE TABLE IF NOT EXISTS `keys` ( + `id` INT NOT NULL AUTO_INCREMENT, + `key` VARCHAR({}) NOT NULL, + `valid_until` INT(10) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE (`key`) + );", AUTH_KEY_LENGTH as i8); + + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + conn.query_drop(&create_torrents_table).expect("Could not create torrents table."); + conn.query_drop(&create_keys_table).expect("Could not create keys table."); + conn.query_drop(&create_whitelist_table).expect("Could not create whitelist table."); + + Ok(()) + } + + async fn load_persistent_torrent_data(&self) -> Result, database::Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + (info_hash, completed) + }).map_err(|_| database::Error::QueryReturnedNoRows)?; + + Ok(torrents) + } + + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + + for (info_hash, torrent_entry) in torrents { + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.exec_drop("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", (info_hash.to_string(), completed.to_string())); + } + + let _ = db_transaction.commit(); + + Ok(()) + } + + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash => info_hash }) + .map_err(|_| database::Error::QueryReturnedNoRows)? { + Some(info_hash) => { + Ok(InfoHash::from_str(&info_hash).unwrap()) + }, + None => { + Err(database::Error::InvalidQuery) + } + } + } + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let info_hash_str = info_hash.to_string(); + + match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { + Ok(_) => { + Ok(1) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let info_hash = info_hash.to_string(); + + match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { + Ok(_) => { + Ok(1) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn get_key_from_keys(&self, key: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key => key }) + .map_err(|_| database::Error::QueryReturnedNoRows)? { + Some((key, valid_until)) => { + Ok(AuthKey { + key, + valid_until: Some(valid_until as u64) + }) + }, + None => { + Err(database::Error::InvalidQuery) + } + } + } + + async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let key = auth_key.key.to_string(); + let valid_until = auth_key.valid_until.unwrap_or(0).to_string(); + + match conn.exec_drop("INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", params! { key, valid_until }) { + Ok(_) => { + Ok(1) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn remove_key_from_keys(&self, key: String) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { + Ok(_) => { + Ok(1) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } +} diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index 5bd081c6f..c54a3c79c 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -25,7 +25,7 @@ impl SqliteDatabase { #[async_trait] impl Database for SqliteDatabase { - fn create_database_tables(&self) -> Result { + fn create_database_tables(&self) -> Result<(), database::Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTOINCREMENT, @@ -49,10 +49,10 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; conn.execute(&create_whitelist_table, NO_PARAMS) - .and_then(|_| conn.execute(&create_whitelist_table, NO_PARAMS)) .and_then(|_| conn.execute(&create_keys_table, NO_PARAMS)) .and_then(|_| conn.execute(&create_torrents_table, NO_PARAMS)) .map_err(|_| database::Error::InvalidQuery) + .map(|_| ()) } async fn load_persistent_torrent_data(&self) -> Result, database::Error> { diff --git a/src/tracker.rs b/src/tracker.rs index b79ffb6cf..1caccff23 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::info; use crate::key_manager::AuthKey; -use crate::database::{Database, DatabaseDrivers}; +use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; use crate::torrust_http_tracker::AnnounceRequest; @@ -271,13 +271,12 @@ pub struct TorrentTracker { impl TorrentTracker { pub fn new(config: Arc) -> Result { - let db_driver = DatabaseDrivers::Sqlite3; - let database = database::connect_database(&db_driver, "data")?; + let database = database::connect_database(&config.db_driver, &config.db_path)?; Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), - database: Box::new(database), + database, stats: RwLock::new(TrackerStats { tcp4_connections_handled: 0, tcp4_announces_handled: 0, From f407b9e686dd0a713c9901132ad52590d7885669 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 11 Apr 2022 23:45:38 +0200 Subject: [PATCH 0063/1003] fix: error when running in PrivateMode or PrivateListedMode --- src/config.rs | 27 ++++++++------------------- src/main.rs | 38 ++++++++++++++++++++++---------------- src/tracker.rs | 8 ++++---- 3 files changed, 34 insertions(+), 39 deletions(-) diff --git a/src/config.rs b/src/config.rs index 529ae07fe..f9166e577 100644 --- a/src/config.rs +++ b/src/config.rs @@ -16,18 +16,17 @@ pub enum TrackerServer { HTTP } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, pub bind_address: String, } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Debug)] pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, pub ssl_enabled: bool, - pub ssl_bind_address: String, #[serde(serialize_with = "none_as_empty_string")] pub ssl_cert_path: Option, #[serde(serialize_with = "none_as_empty_string")] @@ -35,8 +34,11 @@ pub struct HttpTrackerConfig { } impl HttpTrackerConfig { - pub fn is_ssl_enabled(&self) -> bool { - self.ssl_enabled && self.ssl_cert_path.is_some() && self.ssl_key_path.is_some() + pub fn verify_ssl_cert_and_key_set(&self) -> bool { + self.ssl_cert_path.is_some() + && self.ssl_key_path.is_some() + && !self.ssl_cert_path.as_ref().unwrap().is_empty() + && !self.ssl_key_path.as_ref().unwrap().is_empty() } } @@ -163,7 +165,6 @@ impl Configuration { enabled: false, bind_address: String::from("0.0.0.0:6969"), ssl_enabled: false, - ssl_bind_address: String::from("0.0.0.0:6868"), ssl_cert_path: None, ssl_key_path: None } @@ -171,15 +172,6 @@ impl Configuration { configuration } - pub fn verify(&self) -> Result<(), ConfigurationError> { - // UDP is not secure for sending private keys - if self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode { - return Err(ConfigurationError::TrackerModeIncompatible) - } - - Ok(()) - } - pub fn load_from_file() -> Result { let mut config = Config::new(); @@ -197,10 +189,7 @@ impl Configuration { let torrust_config: Configuration = config.try_into().map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; - match torrust_config.verify() { - Ok(_) => Ok(torrust_config), - Err(e) => Err(ConfigError::Message(format!("Errors while processing config: {}.", e))) - } + Ok(torrust_config) } pub fn save_to_file(&self) -> Result<(), ()>{ diff --git a/src/main.rs b/src/main.rs index 08610d24a..721385760 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,6 @@ use std::net::SocketAddr; use std::sync::Arc; -use log::info; +use log::{info}; use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; @@ -39,24 +39,33 @@ async fn main() { let _api_server = start_api_server(&config.http_api, tracker.clone()); } + // used to send graceful shutdown signal to udp listeners let (tx, rx) = tokio::sync::watch::channel(false); let mut udp_server_handles = Vec::new(); // start the udp blocks for udp_tracker in &config.udp_trackers { - // used to send kill signal to thread + if !udp_tracker.enabled { continue; } - if udp_tracker.enabled { - udp_server_handles.push( - start_udp_tracker_server(&udp_tracker, tracker.clone(), rx.clone()).await - ) + if tracker.is_private() { + panic!("Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", udp_tracker.bind_address, config.mode); } + + udp_server_handles.push( + start_udp_tracker_server(&udp_tracker, tracker.clone(), rx.clone()).await + ) } // start the http blocks for http_tracker in &config.http_trackers { - let _ = start_http_tracker_server(&http_tracker, tracker.clone(), true); - let _ = start_http_tracker_server(&http_tracker, tracker.clone(), false); + if !http_tracker.enabled { continue; } + + // SSL requires a cert and a key + if http_tracker.ssl_enabled && !http_tracker.verify_ssl_cert_and_key_set() { + panic!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", http_tracker.bind_address); + } + + let _ = start_http_tracker_server(&http_tracker, tracker.clone()); } // handle the signals here @@ -110,22 +119,19 @@ fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> Joi }) } -fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc, ssl: bool) -> JoinHandle<()> { +fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let http_tracker = HttpServer::new(tracker); - let enabled = config.enabled; let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; - let ssl_bind_addr = config.ssl_bind_address.parse::().unwrap(); let ssl_cert_path = config.ssl_cert_path.clone(); let ssl_key_path = config.ssl_key_path.clone(); tokio::spawn(async move { // run with tls if ssl_enabled and cert and key path are set - if ssl && ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: {} (TLS)", ssl_bind_addr); - http_tracker.start_tls(ssl_bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; - } - if !ssl && enabled { + if ssl_enabled { + info!("Starting HTTPS server on: {} (TLS)", bind_addr); + http_tracker.start_tls(bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; + } else { info!("Starting HTTP server on: {}", bind_addr); http_tracker.start(bind_addr).await; } diff --git a/src/tracker.rs b/src/tracker.rs index 1caccff23..a1e172690 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -15,7 +15,7 @@ use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; use crate::torrust_http_tracker::AnnounceRequest; -#[derive(Serialize, Deserialize, Clone, PartialEq)] +#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)] pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] @@ -294,15 +294,15 @@ impl TorrentTracker { }) } - fn is_public(&self) -> bool { + pub fn is_public(&self) -> bool { self.config.mode == TrackerMode::PublicMode } - fn is_private(&self) -> bool { + pub fn is_private(&self) -> bool { self.config.mode == TrackerMode::PrivateMode || self.config.mode == TrackerMode::PrivateListedMode } - fn is_whitelisted(&self) -> bool { + pub fn is_whitelisted(&self) -> bool { self.config.mode == TrackerMode::ListedMode || self.config.mode == TrackerMode::PrivateListedMode } From 12523c4c4ad05a845cfc46b4f0636a50e06a56bc Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 12 Apr 2022 13:06:33 +0200 Subject: [PATCH 0064/1003] Bypassing a error --- src/tracker.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/tracker.rs b/src/tracker.rs index a1e172690..43dd3b00f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -202,7 +202,9 @@ impl TorrentEntry { } AnnounceEvent::Stopped => { if peer_old.is_seeder() { - self.seeders -= 1; + if self.seeders != 0 { + self.seeders -= 1; + } } } // impossible, started should be the first time a peer announces itself From 64fd27183d3faf20eab36c7dee7c439876c5cbe8 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 13 Apr 2022 01:46:16 +0200 Subject: [PATCH 0065/1003] fix: fixed PeerKeyInvalid error when using MySQL --- src/mysql_database.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index f6907f309..0597d46aa 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -1,5 +1,5 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry, database}; +use crate::{InfoHash, AUTH_KEY_LENGTH, database}; use log::debug; use r2d2::{Pool}; use crate::key_manager::AuthKey; @@ -9,6 +9,7 @@ use async_trait::async_trait; use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; +use crate::torrent::TorrentEntry; pub struct MysqlDatabase { pool: Pool @@ -136,7 +137,7 @@ impl Database for MysqlDatabase { async fn get_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key => key }) + match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) .map_err(|_| database::Error::QueryReturnedNoRows)? { Some((key, valid_until)) => { Ok(AuthKey { From fa330c7d94527613cf36cd47eae0dc0d98f0d481 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 13 Apr 2022 02:14:56 +0200 Subject: [PATCH 0066/1003] fix: stats tracker now runs on a dedicated thread instead of spawning every request --- Cargo.lock | 1 + Cargo.toml | 2 +- src/database.rs | 3 +- src/http_api_server.rs | 2 +- src/lib.rs | 2 + src/sqlite_database.rs | 3 +- src/torrent.rs | 221 ++++++++++++++++++++++ src/torrust_http_tracker/handlers.rs | 43 ++--- src/torrust_udp_tracker/handlers.rs | 48 ++--- src/tracker.rs | 271 ++------------------------- src/tracker_stats.rs | 123 ++++++++++++ 11 files changed, 400 insertions(+), 319 deletions(-) create mode 100644 src/torrent.rs create mode 100644 src/tracker_stats.rs diff --git a/Cargo.lock b/Cargo.lock index 68f52fa18..2cb824d71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2224,6 +2224,7 @@ dependencies = [ "mio", "num_cpus", "once_cell", + "parking_lot", "pin-project-lite", "signal-hook-registry", "tokio-macros", diff --git a/Cargo.toml b/Cargo.toml index e3e57c378..81f76abe9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ serde_json = "1.0.72" hex = "0.4.3" percent-encoding = "2.1.0" warp = {version = "0.3", features = ["tls"]} -tokio = {version = "1.7", features = ["macros", "io-util", "net", "time", "rt-multi-thread", "fs", "sync", "signal"]} +tokio = {version = "1.7", features = ["full"]} binascii = "0.1" toml = "0.5" log = {version = "0.4", features = ["release_max_level_info"]} diff --git a/src/database.rs b/src/database.rs index 0c0e4a303..18bf41994 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,5 +1,5 @@ use std::collections::BTreeMap; -use crate::{InfoHash, TorrentEntry}; +use crate::{InfoHash}; use crate::key_manager::AuthKey; use crate::sqlite_database::SqliteDatabase; use async_trait::async_trait; @@ -7,6 +7,7 @@ use derive_more::{Display, Error}; use log::debug; use crate::mysql_database::MysqlDatabase; use serde::{Serialize, Deserialize}; +use crate::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { diff --git a/src/http_api_server.rs b/src/http_api_server.rs index a6bee4a14..eff45fc33 100644 --- a/src/http_api_server.rs +++ b/src/http_api_server.rs @@ -4,7 +4,7 @@ use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use warp::{filters, reply, reply::Reply, serve, Filter, Server}; -use crate::TorrentPeer; +use crate::torrent::TorrentPeer; use super::common::*; #[derive(Deserialize, Debug)] diff --git a/src/lib.rs b/src/lib.rs index addc7e1a1..3d928aff4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,6 +10,8 @@ pub mod torrust_udp_tracker; pub mod torrust_http_tracker; pub mod database; pub mod mysql_database; +pub mod torrent; +pub mod tracker_stats; pub use self::config::*; pub use torrust_udp_tracker::server::*; diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index c54a3c79c..5facd99d8 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -1,5 +1,5 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry, database}; +use crate::{InfoHash, AUTH_KEY_LENGTH, database}; use log::debug; use r2d2_sqlite::{SqliteConnectionManager}; use r2d2::{Pool}; @@ -8,6 +8,7 @@ use crate::key_manager::AuthKey; use std::str::FromStr; use crate::database::Database; use async_trait::async_trait; +use crate::torrent::TorrentEntry; pub struct SqliteDatabase { pool: Pool diff --git a/src/torrent.rs b/src/torrent.rs new file mode 100644 index 000000000..ef933d224 --- /dev/null +++ b/src/torrent.rs @@ -0,0 +1,221 @@ +use std::borrow::Cow; +use std::net::{IpAddr, SocketAddr}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use serde::{Serialize, Deserialize}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; +use crate::torrust_http_tracker::AnnounceRequest; +use crate::common::{AnnounceEventDef, NumberOfBytesDef}; + +#[derive(PartialEq, Eq, Debug, Clone, Serialize)] +pub struct TorrentPeer { + pub peer_id: PeerId, + pub peer_addr: SocketAddr, + #[serde(serialize_with = "ser_instant")] + pub updated: std::time::Instant, + #[serde(with = "NumberOfBytesDef")] + pub uploaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub downloaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub left: NumberOfBytes, + #[serde(with = "AnnounceEventDef")] + pub event: AnnounceEvent, +} + +impl TorrentPeer { + pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); + + TorrentPeer { + peer_id: PeerId(announce_request.peer_id.0), + peer_addr, + updated: std::time::Instant::now(), + uploaded: announce_request.bytes_uploaded, + downloaded: announce_request.bytes_downloaded, + left: announce_request.bytes_left, + event: announce_request.event + } + } + + pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); + + let event: AnnounceEvent = if let Some(event) = &announce_request.event { + match event.as_ref() { + "started" => AnnounceEvent::Started, + "stopped" => AnnounceEvent::Stopped, + "completed" => AnnounceEvent::Completed, + _ => AnnounceEvent::None + } + } else { + AnnounceEvent::None + }; + + TorrentPeer { + peer_id: announce_request.peer_id.clone(), + peer_addr, + updated: std::time::Instant::now(), + uploaded: NumberOfBytes(announce_request.uploaded as i64), + downloaded: NumberOfBytes(announce_request.downloaded as i64), + left: NumberOfBytes(announce_request.left as i64), + event + } + } + + // potentially substitute localhost ip with external ip + pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { + if remote_ip.is_loopback() && host_opt_ip.is_some() { + SocketAddr::new(host_opt_ip.unwrap(), port) + } else { + SocketAddr::new(remote_ip, port) + } + } + + pub(crate) fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + + fn is_completed(&self) -> bool { + self.event == AnnounceEvent::Completed + } +} + +fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { + ser.serialize_u64(inst.elapsed().as_millis() as u64) +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct TorrentEntry { + #[serde(skip)] + pub(crate) peers: std::collections::BTreeMap, + pub(crate) completed: u32, + #[serde(skip)] + pub(crate) seeders: u32, +} + +impl TorrentEntry { + pub fn new() -> TorrentEntry { + TorrentEntry { + peers: std::collections::BTreeMap::new(), + completed: 0, + seeders: 0, + } + } + + pub fn update_peer(&mut self, peer: &TorrentPeer) { + match peer.event { + AnnounceEvent::Stopped => { + let peer_old = self.peers.remove(&peer.peer_id); + self.update_torrent_stats_with_peer(peer, peer_old); + } + _ => { + let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); + self.update_torrent_stats_with_peer(peer, peer_old); + } + } + } + + pub fn get_peers(&self, remote_addr: Option<&std::net::SocketAddr>) -> Vec { + let mut list = Vec::new(); + for (_, peer) in self + .peers + .iter() + .filter(|e| match remote_addr { + // don't filter on ip_version + None => true, + // filter out different ip_version from remote_addr + Some(remote_address) => { + match e.1.peer_addr.ip() { + IpAddr::V4(_) => { remote_address.is_ipv4() } + IpAddr::V6(_) => { remote_address.is_ipv6() } + } + } + }) + .take(MAX_SCRAPE_TORRENTS as usize) + { + + // skip ip address of client + if let Some(remote_addr) = remote_addr { + if peer.peer_addr == *remote_addr { + continue; + } + } + + list.push(peer.clone()); + } + list + } + + pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { + match peer_old { + None => { + if peer.is_seeder() { + self.seeders += 1; + } + + if peer.is_completed() { + self.completed += 1; + } + } + Some(peer_old) => { + match peer.event { + AnnounceEvent::None => { + if peer.is_seeder() && !peer_old.is_seeder() { + self.seeders += 1; + } + } + AnnounceEvent::Completed => { + if peer.is_seeder() && !peer_old.is_seeder() { + self.seeders += 1; + } + + // don't double count completed + if !peer_old.is_completed() { + self.completed += 1; + } + } + AnnounceEvent::Stopped => { + if peer_old.is_seeder() { + if self.seeders != 0 { + self.seeders -= 1; + } + } + } + // impossible, started should be the first time a peer announces itself + AnnounceEvent::Started => {} + } + } + } + } + + pub fn get_stats(&self) -> (u32, u32, u32) { + let leechers: u32 = if self.seeders < (self.peers.len() as u32) { + (self.peers.len() as u32) - self.seeders + } else { + 0 + }; + + (self.seeders, self.completed, leechers) + } +} + +#[derive(Serialize, Deserialize)] +struct DatabaseRow<'a> { + info_hash: InfoHash, + entry: Cow<'a, TorrentEntry>, +} + +#[derive(Debug)] +pub struct TorrentStats { + pub completed: u32, + pub seeders: u32, + pub leechers: u32, +} + +#[derive(Debug)] +pub enum TorrentError { + TorrentNotWhitelisted, + PeerNotAuthenticated, + PeerKeyNotValid, + NoPeersFound, + CouldNotSendResponse, + InvalidInfoHash, +} diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index f15b7143a..8762faeaf 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -1,12 +1,15 @@ use std::collections::HashMap; use std::convert::Infallible; +use std::net::IpAddr; use std::sync::Arc; use log::debug; use warp::{reject, Rejection, Reply}; use warp::http::{Response}; -use crate::{InfoHash, TorrentError, TorrentPeer, TorrentStats, TorrentTracker}; +use crate::{InfoHash, TorrentTracker}; use crate::key_manager::AuthKey; +use crate::torrent::{TorrentError, TorrentPeer, TorrentStats}; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; +use crate::tracker_stats::TrackerStatsEvent; use crate::utils::url_encode_bytes; /// Authenticate InfoHash using optional AuthKey @@ -42,23 +45,14 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option // get all torrent peers excluding the peer_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - // success response - let tracker_copy = tracker.clone(); - let is_ipv4 = announce_request.peer_addr.is_ipv4(); - - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if is_ipv4 { - status_writer.tcp4_connections_handled += 1; - status_writer.tcp4_announces_handled += 1; - } else { - status_writer.tcp6_connections_handled += 1; - status_writer.tcp6_announces_handled += 1; - } - }); - let announce_interval = tracker.config.announce_interval; + // send stats event + match announce_request.peer_addr { + IpAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp4Announce).await; } + IpAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp6Announce).await; } + } + send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) } @@ -86,18 +80,11 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp4Scrape).await; } + IpAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp6Scrape).await; } + } send_scrape_response(files) } diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index bf25a8861..df1a15451 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -1,9 +1,11 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentError, TorrentPeer, TorrentTracker}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; +use crate::torrent::{TorrentError, TorrentPeer}; use crate::torrust_udp_tracker::errors::ServerError; use crate::torrust_udp_tracker::request::AnnounceRequestWrapper; +use crate::tracker_stats::TrackerStatsEvent; use crate::utils::get_connection_id; pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { @@ -71,15 +73,11 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t connection_id, }); - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_connections_handled += 1; - } else { - status_writer.udp6_connections_handled += 1; - } - }); + // send stats event + match remote_addr { + SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Connect).await; } + SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Connect).await; } + } Ok(response) } @@ -98,16 +96,6 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc // get all peers excluding the client_addr let peers = tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await; - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_announces_handled += 1; - } else { - status_writer.udp6_announces_handled += 1; - } - }); - let announce_response = if remote_addr.is_ipv4() { Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, @@ -144,6 +132,12 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc }) }; + // send stats event + match remote_addr { + SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Announce).await; } + SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Announce).await; } + } + Ok(announce_response) } @@ -180,15 +174,11 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra torrent_stats.push(scrape_entry); } - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_scrapes_handled += 1; - } else { - status_writer.udp6_scrapes_handled += 1; - } - }); + // send stats event + match remote_addr { + SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Scrape).await; } + SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Scrape).await; } + } Ok(Response::from(ScrapeResponse { transaction_id: request.transaction_id, diff --git a/src/tracker.rs b/src/tracker.rs index 43dd3b00f..eb6f006bf 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,19 +1,18 @@ use serde::{Deserialize, Serialize}; use serde; -use std::borrow::Cow; use std::collections::BTreeMap; -use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; -use std::net::{IpAddr, SocketAddr}; -use crate::{Configuration, database, key_manager, MAX_SCRAPE_TORRENTS}; +use tokio::sync::{RwLock, RwLockReadGuard}; +use crate::common::{InfoHash}; +use std::net::{SocketAddr}; +use crate::{Configuration, database, key_manager}; use std::collections::btree_map::Entry; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::info; use crate::key_manager::AuthKey; use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; -use crate::torrust_http_tracker::AnnounceRequest; +use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; +use crate::tracker_stats::{StatsTracker, TrackerStats}; #[derive(Serialize, Deserialize, Clone, PartialEq, Debug)] pub enum TrackerMode { @@ -34,265 +33,25 @@ pub enum TrackerMode { PrivateListedMode, } -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] -pub struct TorrentPeer { - pub peer_id: PeerId, - pub peer_addr: SocketAddr, - #[serde(serialize_with = "ser_instant")] - pub updated: std::time::Instant, - #[serde(with = "NumberOfBytesDef")] - pub uploaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub downloaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub left: NumberOfBytes, - #[serde(with = "AnnounceEventDef")] - pub event: AnnounceEvent, -} - -impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - - TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), - peer_addr, - updated: std::time::Instant::now(), - uploaded: announce_request.bytes_uploaded, - downloaded: announce_request.bytes_downloaded, - left: announce_request.bytes_left, - event: announce_request.event - } - } - - pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None - } - } else { - AnnounceEvent::None - }; - - TorrentPeer { - peer_id: announce_request.peer_id.clone(), - peer_addr, - updated: std::time::Instant::now(), - uploaded: NumberOfBytes(announce_request.uploaded as i64), - downloaded: NumberOfBytes(announce_request.downloaded as i64), - left: NumberOfBytes(announce_request.left as i64), - event - } - } - - // potentially substitute localhost ip with external ip - pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if remote_ip.is_loopback() && host_opt_ip.is_some() { - SocketAddr::new(host_opt_ip.unwrap(), port) - } else { - SocketAddr::new(remote_ip, port) - } - } - - fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } - - fn is_completed(&self) -> bool { - self.event == AnnounceEvent::Completed - } -} - -fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct TorrentEntry { - #[serde(skip)] - peers: std::collections::BTreeMap, - completed: u32, - #[serde(skip)] - seeders: u32, -} - -impl TorrentEntry { - pub fn new() -> TorrentEntry { - TorrentEntry { - peers: std::collections::BTreeMap::new(), - completed: 0, - seeders: 0, - } - } - - pub fn update_peer(&mut self, peer: &TorrentPeer) { - match peer.event { - AnnounceEvent::Stopped => { - let peer_old = self.peers.remove(&peer.peer_id); - self.update_torrent_stats_with_peer(peer, peer_old); - } - _ => { - let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); - self.update_torrent_stats_with_peer(peer, peer_old); - } - } - } - - pub fn get_peers(&self, remote_addr: Option<&std::net::SocketAddr>) -> Vec { - let mut list = Vec::new(); - for (_, peer) in self - .peers - .iter() - .filter(|e| match remote_addr { - // don't filter on ip_version - None => true, - // filter out different ip_version from remote_addr - Some(remote_address) => { - match e.1.peer_addr.ip() { - IpAddr::V4(_) => { remote_address.is_ipv4() } - IpAddr::V6(_) => { remote_address.is_ipv6() } - } - } - }) - .take(MAX_SCRAPE_TORRENTS as usize) - { - - // skip ip address of client - if let Some(remote_addr) = remote_addr { - if peer.peer_addr == *remote_addr { - continue; - } - } - - list.push(peer.clone()); - } - list - } - - pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { - match peer_old { - None => { - if peer.is_seeder() { - self.seeders += 1; - } - - if peer.is_completed() { - self.completed += 1; - } - } - Some(peer_old) => { - match peer.event { - AnnounceEvent::None => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - } - AnnounceEvent::Completed => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - - // don't double count completed - if !peer_old.is_completed() { - self.completed += 1; - } - } - AnnounceEvent::Stopped => { - if peer_old.is_seeder() { - if self.seeders != 0 { - self.seeders -= 1; - } - } - } - // impossible, started should be the first time a peer announces itself - AnnounceEvent::Started => {} - } - } - } - } - - pub fn get_stats(&self) -> (u32, u32, u32) { - let leechers: u32 = if self.seeders < (self.peers.len() as u32) { - (self.peers.len() as u32) - self.seeders - } else { - 0 - }; - - (self.seeders, self.completed, leechers) - } -} - -#[derive(Serialize, Deserialize)] -struct DatabaseRow<'a> { - info_hash: InfoHash, - entry: Cow<'a, TorrentEntry>, -} - -#[derive(Debug)] -pub struct TorrentStats { - pub completed: u32, - pub seeders: u32, - pub leechers: u32, -} - -#[derive(Debug)] -pub enum TorrentError { - TorrentNotWhitelisted, - PeerNotAuthenticated, - PeerKeyNotValid, - NoPeersFound, - CouldNotSendResponse, - InvalidInfoHash, -} - -#[derive(Debug)] -pub struct TrackerStats { - pub tcp4_connections_handled: u64, - pub tcp4_announces_handled: u64, - pub tcp4_scrapes_handled: u64, - pub tcp6_connections_handled: u64, - pub tcp6_announces_handled: u64, - pub tcp6_scrapes_handled: u64, - pub udp4_connections_handled: u64, - pub udp4_announces_handled: u64, - pub udp4_scrapes_handled: u64, - pub udp6_connections_handled: u64, - pub udp6_announces_handled: u64, - pub udp6_scrapes_handled: u64, -} - pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, database: Box, - stats: tokio::sync::RwLock, + pub stats_tracker: StatsTracker } impl TorrentTracker { pub fn new(config: Arc) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; + let mut stats_tracker = StatsTracker::new(); + + stats_tracker.run_worker(); Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), database, - stats: RwLock::new(TrackerStats { - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }), + stats_tracker }) } @@ -359,7 +118,7 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { - self.add_torrent(torrent.0, 0, torrent.1, 0).await; + let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } Ok(()) @@ -449,12 +208,8 @@ impl TorrentTracker { self.torrents.read().await } - pub async fn set_stats(&self) -> RwLockWriteGuard<'_, TrackerStats> { - self.stats.write().await - } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { - self.stats.read().await + self.stats_tracker.get_stats().await } // remove torrents without peers if enabled, and defragment memory diff --git a/src/tracker_stats.rs b/src/tracker_stats.rs new file mode 100644 index 000000000..1a6a71c2b --- /dev/null +++ b/src/tracker_stats.rs @@ -0,0 +1,123 @@ +use std::sync::Arc; +use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; +use tokio::sync::mpsc::{Sender}; +use tokio::sync::mpsc::error::SendError; + +const CHANNEL_BUFFER_SIZE: usize = 65_535; + +#[derive(Debug)] +pub enum TrackerStatsEvent { + Tcp4Announce, + Tcp4Scrape, + Tcp6Announce, + Tcp6Scrape, + Udp4Connect, + Udp4Announce, + Udp4Scrape, + Udp6Connect, + Udp6Announce, + Udp6Scrape +} + +#[derive(Debug)] +pub struct TrackerStats { + pub tcp4_connections_handled: u64, + pub tcp4_announces_handled: u64, + pub tcp4_scrapes_handled: u64, + pub tcp6_connections_handled: u64, + pub tcp6_announces_handled: u64, + pub tcp6_scrapes_handled: u64, + pub udp4_connections_handled: u64, + pub udp4_announces_handled: u64, + pub udp4_scrapes_handled: u64, + pub udp6_connections_handled: u64, + pub udp6_announces_handled: u64, + pub udp6_scrapes_handled: u64, +} + +impl TrackerStats { + pub fn new() -> Self { + Self { + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + } + } +} + +pub struct StatsTracker { + channel_sender: Option>, + pub stats: Arc> +} + +impl StatsTracker { + pub fn new() -> Self { + Self { + channel_sender: None, + stats: Arc::new(RwLock::new(TrackerStats::new())) + } + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { + self.stats.read().await + } + + pub async fn send_event(&self, event: TrackerStatsEvent) -> Option>> { + if let Some(tx) = &self.channel_sender { + Some(tx.send(event).await) + } else { + None + } + } + + pub fn run_worker(&mut self) { + let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + + // set send channel on stats_tracker + self.channel_sender = Some(tx); + + let stats = self.stats.clone(); + + tokio::spawn(async move { + while let Some(event) = rx.recv().await { + let mut stats_lock = stats.write().await; + + match event { + TrackerStatsEvent::Tcp4Announce => { + stats_lock.tcp4_announces_handled += 1; + stats_lock.tcp4_connections_handled += 1; + } + TrackerStatsEvent::Tcp4Scrape => { + stats_lock.tcp4_scrapes_handled += 1; + stats_lock.tcp4_connections_handled += 1; + } + TrackerStatsEvent::Tcp6Announce => { + stats_lock.tcp6_announces_handled += 1; + stats_lock.tcp6_connections_handled += 1; + } + TrackerStatsEvent::Tcp6Scrape => { + stats_lock.tcp6_scrapes_handled += 1; + stats_lock.tcp6_connections_handled += 1; + } + TrackerStatsEvent::Udp4Connect => { stats_lock.udp4_connections_handled += 1; } + TrackerStatsEvent::Udp4Announce => { stats_lock.udp4_announces_handled += 1; } + TrackerStatsEvent::Udp4Scrape => { stats_lock.udp4_scrapes_handled += 1; } + TrackerStatsEvent::Udp6Connect => { stats_lock.udp6_connections_handled += 1; } + TrackerStatsEvent::Udp6Announce => { stats_lock.udp6_announces_handled += 1; } + TrackerStatsEvent::Udp6Scrape => { stats_lock.udp6_scrapes_handled += 1; } + } + + drop(stats_lock); + } + }); + } +} From 6cec6f2a32ae05104183e5c0e9d098799b6345a8 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 14:53:23 +0200 Subject: [PATCH 0067/1003] Adding periodic --- src/config.rs | 2 ++ src/main.rs | 22 ++++++++++++++++++++ src/tracker.rs | 54 +++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 77 insertions(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index f9166e577..3045c1ca9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -56,6 +56,7 @@ pub struct Configuration { pub db_driver: DatabaseDrivers, pub db_path: String, pub persistence: bool, + pub persistence_interval: Option, pub cleanup_interval: Option, pub cleanup_peerless: bool, pub external_ip: Option, @@ -139,6 +140,7 @@ impl Configuration { db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), persistence: false, + persistence_interval: Some(900), cleanup_interval: Some(600), cleanup_peerless: true, external_ip: Some(String::from("0.0.0.0")), diff --git a/src/main.rs b/src/main.rs index 721385760..c3d9ba23c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -29,6 +29,8 @@ async fn main() { panic!("Could not load persistent torrents.") }; info!("Persistent torrents loaded."); + + let _torrent_periodic_job = start_torrent_periodic_job(config.clone(), tracker.clone()).unwrap(); } // start torrent cleanup job (periodically removes old peers) @@ -89,6 +91,26 @@ async fn main() { } } +fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> Option> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.persistence_interval.unwrap_or(900); + + return Some(tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; // first tick is immediate... + // periodically call tracker.cleanup_torrents() + loop { + interval.tick().await; + if let Some(tracker) = weak_tracker.upgrade() { + tracker.periodic_saving().await; + } else { + break; + } + } + })); +} + fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> Option> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.cleanup_interval.unwrap_or(600); diff --git a/src/tracker.rs b/src/tracker.rs index eb6f006bf..b2a65b4ce 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; use serde; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::common::{InfoHash}; use std::net::{SocketAddr}; @@ -36,6 +36,8 @@ pub enum TrackerMode { pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, + updates: tokio::sync::RwLock>, + shadow: tokio::sync::RwLock>, database: Box, pub stats_tracker: StatsTracker } @@ -50,6 +52,8 @@ impl TorrentTracker { Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), + updates: RwLock::new(std::collections::HashMap::new()), + shadow: RwLock::new(std::collections::HashMap::new()), database, stats_tracker }) @@ -178,6 +182,15 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); + if self.config.persistence { + let mut updates = self.updates.write().await; + if updates.contains_key(info_hash) { + updates.remove(info_hash); + } + updates.insert(*info_hash, completed); + drop(updates); + } + TorrentStats { seeders, leechers, @@ -261,4 +274,43 @@ impl TorrentTracker { } info!("Torrents cleaned up."); } + + pub async fn periodic_saving(&self) { + // Get a lock for writing + let mut shadow = self.shadow.write().await; + + // We will get the data and insert it into the shadow, while clearing updates. + let mut updates = self.updates.write().await; + + for (infohash, completed) in updates.iter() { + if shadow.contains_key(infohash) { + shadow.remove(infohash); + } + shadow.insert(*infohash, *completed); + } + updates.clear(); + drop(updates); + + // We get shadow data into local array to be handled. + let mut shadow_copy: BTreeMap = BTreeMap::new(); + for (infohash, completed) in shadow.iter() { + shadow_copy.insert(*infohash, TorrentEntry{ + peers: Default::default(), + completed: *completed, + seeders: 0 + }); + } + + // Drop the lock + drop(shadow); + + // We will now save the data from the shadow into the database. + // This should not put any strain on the server itself, other then the harddisk/ssd. + let result = self.database.save_persistent_torrent_data(&shadow_copy).await; + if result.is_ok() { + let mut shadow = self.shadow.write().await; + shadow.clear(); + drop(shadow); + } + } } From f7b6c4e0f4914a30b69ef193a18a671d5a3509e2 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 15:10:30 +0200 Subject: [PATCH 0068/1003] Fix the saving... --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index c3d9ba23c..ca632177d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -84,7 +84,7 @@ async fn main() { // Save torrents if enabled if config.persistence { info!("Saving torrents into SQL from memory..."); - let _ = tracker.save_torrents().await; + let _ = tracker.periodic_saving().await; info!("Torrents saved"); } } From f581289b48a96773eab9676fd0db82c7a1890b50 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 15:34:56 +0200 Subject: [PATCH 0069/1003] Showing information that it's saving periodically --- src/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main.rs b/src/main.rs index ca632177d..2c5ed8678 100644 --- a/src/main.rs +++ b/src/main.rs @@ -103,7 +103,9 @@ fn start_torrent_periodic_job(config: Arc, tracker: Arc Date: Wed, 13 Apr 2022 16:47:22 +0200 Subject: [PATCH 0070/1003] Adding memory logging --- src/config.rs | 2 ++ src/main.rs | 23 +++++++++++++++++++++++ src/tracker.rs | 16 +++++++++++++++- 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index 3045c1ca9..67078d608 100644 --- a/src/config.rs +++ b/src/config.rs @@ -52,6 +52,7 @@ pub struct HttpApiConfig { #[derive(Serialize, Deserialize)] pub struct Configuration { pub log_level: Option, + pub log_interval: Option, pub mode: TrackerMode, pub db_driver: DatabaseDrivers, pub db_path: String, @@ -136,6 +137,7 @@ impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), + log_interval: Some(60), mode: TrackerMode::PublicMode, db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), diff --git a/src/main.rs b/src/main.rs index 2c5ed8678..9ba45427e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -70,6 +70,9 @@ async fn main() { let _ = start_http_tracker_server(&http_tracker, tracker.clone()); } + // start a thread to post statistics + let _ = start_statistics_job(config.clone(), tracker.clone()).unwrap(); + // handle the signals here tokio::select! { _ = tokio::signal::ctrl_c() => { @@ -133,6 +136,26 @@ fn start_torrent_cleanup_job(config: Arc, tracker: Arc, tracker: Arc) -> Option> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.log_interval.unwrap_or(60); + + return Some(tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; // first tick is immediate... + // periodically call tracker.cleanup_torrents() + loop { + interval.tick().await; + if let Some(tracker) = weak_tracker.upgrade() { + tracker.post_log().await; + } else { + break; + } + } + })); +} + fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> JoinHandle<()> { info!("Starting HTTP API server on: {}", config.bind_address); let bind_addr = config.bind_address.parse::().unwrap(); diff --git a/src/tracker.rs b/src/tracker.rs index b2a65b4ce..68243a02e 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,11 +1,12 @@ use serde::{Deserialize, Serialize}; use serde; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::common::{InfoHash}; use std::net::{SocketAddr}; use crate::{Configuration, database, key_manager}; use std::collections::btree_map::Entry; +use std::mem; use std::sync::Arc; use log::info; use crate::key_manager::AuthKey; @@ -225,6 +226,19 @@ impl TorrentTracker { self.stats_tracker.get_stats().await } + pub async fn post_log(&self) { + let torrents = self.torrents.read().await; + let torrents_size = em::size_of_val(&*torrents); + drop(torrents); + let updates = self.updates.read().await; + let updates_size = em::size_of_val(&*updates); + drop(updates); + let shadow = self.shadow.read().await; + let shadow_size = em::size_of_val(&*shadow); + drop(shadow); + info!("Stats [::] Torrents: {} byte(s) | Updates: {} byte(s) | Shadow: {} byte(s)", torrents_size, updates_size, shadow_size); + } + // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { info!("Cleaning torrents..."); From 41785b62ef7a000b0278f033c629e889c2a738f6 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 16:49:21 +0200 Subject: [PATCH 0071/1003] Typo ... --- src/tracker.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tracker.rs b/src/tracker.rs index 68243a02e..4aaa767be 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -228,13 +228,13 @@ impl TorrentTracker { pub async fn post_log(&self) { let torrents = self.torrents.read().await; - let torrents_size = em::size_of_val(&*torrents); + let torrents_size = mem::size_of_val(&*torrents); drop(torrents); let updates = self.updates.read().await; - let updates_size = em::size_of_val(&*updates); + let updates_size = mem::size_of_val(&*updates); drop(updates); let shadow = self.shadow.read().await; - let shadow_size = em::size_of_val(&*shadow); + let shadow_size = mem::size_of_val(&*shadow); drop(shadow); info!("Stats [::] Torrents: {} byte(s) | Updates: {} byte(s) | Shadow: {} byte(s)", torrents_size, updates_size, shadow_size); } From c6dff90018d6b2a317cd493da9ebadcd40e3815e Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 19:41:58 +0200 Subject: [PATCH 0072/1003] Adding a statistics viewing in console --- Cargo.lock | 764 ++++++++++++++++++++++++------------------------- src/tracker.rs | 15 +- 2 files changed, 381 insertions(+), 398 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2cb824d71..7bc7233c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,11 +10,11 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom", "once_cell", "version_check", ] @@ -39,8 +39,8 @@ dependencies = [ [[package]] name = "aquatic_udp_protocol" -version = "0.1.0" -source = "git+https://github.com/greatest-ape/aquatic#065e007ede84de20f20983b4b504471bbda2fdf2" +version = "0.2.0" +source = "git+https://github.com/greatest-ape/aquatic#26e2e874377a2682f52568f8e5e8c080c3366326" dependencies = [ "byteorder", "either", @@ -60,9 +60,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-trait" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" dependencies = [ "proc-macro2", "quote", @@ -82,9 +82,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" @@ -107,7 +107,7 @@ dependencies = [ "num-bigint 0.3.3", "num-integer", "num-traits 0.2.14", - "serde 1.0.120", + "serde 1.0.136", ] [[package]] @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitvec" @@ -157,34 +157,22 @@ dependencies = [ "wyz", ] -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.3", -] - [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] -name = "block-padding" -version = "0.1.5" +name = "block-buffer" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" dependencies = [ - "byte-tools", + "generic-array", ] [[package]] @@ -205,27 +193,21 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.8.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "byteorder" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" @@ -242,12 +224,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -263,7 +239,7 @@ dependencies = [ "libc", "num-integer", "num-traits 0.2.14", - "serde 1.0.120", + "serde 1.0.136", "time 0.1.44", "winapi", ] @@ -312,7 +288,7 @@ dependencies = [ "lazy_static", "nom", "rust-ini", - "serde 1.0.120", + "serde 1.0.136", "serde-hjson", "serde_json", "toml", @@ -349,9 +325,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" dependencies = [ "libc", ] @@ -362,7 +338,17 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", +] + +[[package]] +name = "crypto-common" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +dependencies = [ + "generic-array", + "typenum", ] [[package]] @@ -391,20 +377,21 @@ dependencies = [ [[package]] name = "digest" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.12.3", + "generic-array", ] [[package]] name = "digest" -version = "0.9.0" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" dependencies = [ - "generic-array 0.14.4", + "block-buffer 0.10.2", + "crypto-common", ] [[package]] @@ -432,12 +419,6 @@ dependencies = [ "termcolor", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -450,6 +431,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + [[package]] name = "fern" version = "0.6.0" @@ -461,11 +451,11 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +checksum = "b39522e96686d38f4bc984b9198e3a0613264abaebaff2c5c918bfa6b6da09af" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crc32fast", "libc", "libz-sys", @@ -495,9 +485,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", "percent-encoding", @@ -664,18 +654,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.12.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", "version_check", @@ -683,22 +664,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.10.0+wasi-snapshot-preview1", ] @@ -711,9 +681,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.4" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" dependencies = [ "bytes", "fnv", @@ -724,16 +694,10 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util", + "tokio-util 0.7.1", "tracing", ] -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" - [[package]] name = "hashbrown" version = "0.11.2" @@ -745,18 +709,18 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.3" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62689dc57c7456e69712607ffcbd0aa1dfcccf9af73727e9b25bc1825375cac3" +checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" dependencies = [ "base64", "bitflags", "bytes", "headers-core", "http", + "httpdate", "mime", - "sha-1 0.8.2", - "time 0.1.44", + "sha-1 0.10.0", ] [[package]] @@ -770,9 +734,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] @@ -785,9 +749,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", @@ -796,25 +760,26 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.0" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", + "pin-project-lite", ] [[package]] name = "httparse" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "6330e8a36bd8c859f3fa6d9382911fbb7147ec39807f63b923933a247240b9ba" [[package]] name = "httpdate" -version = "0.3.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "humantime" @@ -824,9 +789,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.2" +version = "0.14.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" dependencies = [ "bytes", "futures-channel", @@ -838,8 +803,8 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project", - "socket2 0.3.19", + "pin-project-lite", + "socket2", "tokio", "tower-service", "tracing", @@ -859,30 +824,21 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" +checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", - "hashbrown 0.9.1", -] - -[[package]] -name = "input_buffer" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" -dependencies = [ - "bytes", + "hashbrown", ] [[package]] name = "instant" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -899,15 +855,15 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.7" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "js-sys" -version = "0.3.55" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" dependencies = [ "wasm-bindgen", ] @@ -930,7 +886,7 @@ version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f404a90a744e32e8be729034fc33b90cf2a56418fbf594d69aa3c0214ad414e5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "lexical-core", ] @@ -942,16 +898,16 @@ checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" dependencies = [ "arrayvec 0.5.2", "bitflags", - "cfg-if 1.0.0", + "cfg-if", "ryu", "static_assertions", ] [[package]] name = "libc" -version = "0.2.120" +version = "0.2.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad5c14e80759d0939d013e6ca49930e59fc53dd8e5009132f76240c179380c09" +checksum = "cb691a747a7ab48abc15c5b42066eaafde10dc427e3b6ee2a1cf43db04c763bd" [[package]] name = "libloading" @@ -959,7 +915,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -992,20 +948,21 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.13" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -1014,7 +971,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" dependencies = [ - "hashbrown 0.11.2", + "hashbrown", ] [[package]] @@ -1028,9 +985,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" @@ -1055,9 +1012,9 @@ checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "mime_guess" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", @@ -1065,42 +1022,41 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" dependencies = [ "adler", - "autocfg", ] [[package]] name = "mio" -version = "0.7.7" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" +checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" dependencies = [ "libc", "log", "miow", "ntapi", + "wasi 0.11.0+wasi-snapshot-preview1", "winapi", ] [[package]] name = "miow" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "socket2 0.3.19", "winapi", ] [[package]] name = "multipart" -version = "0.17.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" +checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" dependencies = [ "buf_redux", "httparse", @@ -1108,7 +1064,7 @@ dependencies = [ "mime", "mime_guess", "quick-error", - "rand 0.7.3", + "rand", "safemem", "tempfile", "twoway", @@ -1132,9 +1088,9 @@ dependencies = [ "once_cell", "pem", "percent-encoding", - "serde 1.0.120", + "serde 1.0.136", "serde_json", - "socket2 0.4.4", + "socket2", "twox-hash", "url", ] @@ -1162,11 +1118,11 @@ dependencies = [ "lexical", "num-bigint 0.4.3", "num-traits 0.2.14", - "rand 0.8.4", + "rand", "regex", "rust_decimal", "saturating", - "serde 1.0.120", + "serde 1.0.136", "serde_json", "sha1", "sha2", @@ -1188,9 +1144,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" dependencies = [ "lazy_static", "libc", @@ -1212,7 +1168,7 @@ checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "memoffset", ] @@ -1230,9 +1186,9 @@ dependencies = [ [[package]] name = "ntapi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" dependencies = [ "winapi", ] @@ -1289,9 +1245,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -1303,12 +1259,6 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.0" @@ -1322,7 +1272,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -1350,22 +1300,32 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core", + "parking_lot_core 0.8.5", +] + +[[package]] +name = "parking_lot" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.2", ] [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall", @@ -1373,6 +1333,19 @@ dependencies = [ "winapi", ] +[[package]] +name = "parking_lot_core" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "995f667a6c822200b0433ac218e05582f0e2efa1b922a3fd2fbaadc5f87bab37" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -1398,18 +1371,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.4" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.4" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -1418,9 +1391,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -1430,15 +1403,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-hack" @@ -1448,9 +1421,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" dependencies = [ "unicode-xid", ] @@ -1463,9 +1436,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.8" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" dependencies = [ "proc-macro2", ] @@ -1477,7 +1450,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" dependencies = [ "log", - "parking_lot", + "parking_lot 0.11.2", "scheduled-thread-pool", ] @@ -1509,37 +1482,13 @@ checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" [[package]] name = "rand" -version = "0.7.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ - "getrandom 0.1.16", "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -1549,16 +1498,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -1567,41 +1507,23 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.3", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", + "getrandom", ] [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" dependencies = [ "aho-corasick", "memchr", @@ -1662,13 +1584,13 @@ checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" [[package]] name = "rust_decimal" -version = "1.22.0" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37baa70cf8662d2ba1c1868c5983dda16ef32b105cce41fb5c47e72936a90b3" +checksum = "22dc69eadbf0ee2110b8d20418c0c6edbaefec2811c4963dc17b6344e11fe0f8" dependencies = [ "arrayvec 0.7.2", "num-traits 0.2.14", - "serde 1.0.120", + "serde 1.0.136", ] [[package]] @@ -1692,7 +1614,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.4", + "semver 1.0.7", ] [[package]] @@ -1710,9 +1632,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "safemem" @@ -1742,7 +1664,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" dependencies = [ - "parking_lot", + "parking_lot 0.11.2", ] [[package]] @@ -1769,9 +1691,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.3.1" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" dependencies = [ "bitflags", "core-foundation", @@ -1801,9 +1723,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.4" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "d65bd28f48be7196d222d95b9243287f48d27aca604e08497513019ff0502cc4" [[package]] name = "semver-parser" @@ -1819,9 +1741,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.120" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "166b2349061381baf54a58e4b13c89369feb0ef2eaa57198899e2312aac30aab" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" dependencies = [ "serde_derive", ] @@ -1844,7 +1766,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" dependencies = [ - "serde 1.0.120", + "serde 1.0.136", "serde_bytes", ] @@ -1854,14 +1776,14 @@ version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" dependencies = [ - "serde 1.0.120", + "serde 1.0.136", ] [[package]] name = "serde_derive" -version = "1.0.120" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca2a8cb5805ce9e3b95435e3765b7b553cecc762d938d409434338386cb5775" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ "proc-macro2", "quote", @@ -1870,50 +1792,49 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.72" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" dependencies = [ "itoa", "ryu", - "serde 1.0.120", + "serde 1.0.136", ] [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.120", + "serde 1.0.136", ] [[package]] name = "sha-1" -version = "0.8.2" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", ] [[package]] name = "sha-1" -version = "0.9.8" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.3", ] [[package]] @@ -1938,10 +1859,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", - "opaque-debug 0.3.0", + "opaque-debug", ] [[package]] @@ -1952,35 +1873,24 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] [[package]] name = "slab" -version = "0.4.2" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "smallvec" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" - -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi", -] +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "socket2" @@ -2035,7 +1945,7 @@ checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2", "quote", - "serde 1.0.120", + "serde 1.0.136", "serde_derive", "syn", ] @@ -2049,7 +1959,7 @@ dependencies = [ "base-x", "proc-macro2", "quote", - "serde 1.0.120", + "serde 1.0.136", "serde_derive", "serde_json", "sha1", @@ -2080,9 +1990,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.67" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6498a9efc342871f91cc2d0d694c674368b4ceb40f62b65a7a08c3792935e702" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" dependencies = [ "proc-macro2", "quote", @@ -2097,13 +2007,13 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "fastrand", "libc", - "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", @@ -2129,18 +2039,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.26" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.26" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -2198,9 +2108,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.3.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -2213,29 +2123,29 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.7.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c79ba603c337335df6ba6dd6afc38c38a7d5e1b0c871678439ea973cd62a118e" +checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ - "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", "once_cell", - "parking_lot", + "parking_lot 0.12.0", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", "winapi", ] [[package]] name = "tokio-macros" -version = "1.1.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -2255,9 +2165,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.2" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", "pin-project-lite", @@ -2266,9 +2176,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.13.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" +checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", @@ -2279,9 +2189,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", @@ -2291,13 +2201,27 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-util" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + [[package]] name = "toml" version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ - "serde 1.0.120", + "serde 1.0.136", ] [[package]] @@ -2319,8 +2243,8 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand 0.8.4", - "serde 1.0.120", + "rand", + "serde 1.0.136", "serde_bencode", "serde_bytes", "serde_json", @@ -2332,27 +2256,39 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "80b9fa4360528139bc96100c160b7ae879f5567f49f1782b0b02035b0358ebf3" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "6dfce9f3241b150f36e8e54bb561a742d5daa1a47b5dd9a5ce369fd4a4db2210" dependencies = [ "lazy_static", ] @@ -2365,19 +2301,19 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.12.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" dependencies = [ "base64", "byteorder", "bytes", "http", "httparse", - "input_buffer", "log", - "rand 0.8.4", + "rand", "sha-1 0.9.8", + "thiserror", "url", "utf-8", ] @@ -2397,16 +2333,16 @@ version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" dependencies = [ - "cfg-if 1.0.0", - "rand 0.8.4", + "cfg-if", + "rand", "static_assertions", ] [[package]] name = "typenum" -version = "1.12.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "unicase" @@ -2419,9 +2355,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" @@ -2440,9 +2376,9 @@ checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "untrusted" @@ -2488,9 +2424,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" @@ -2504,12 +2440,13 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d47745e9a0c38636dbd454729b147d16bd1ed08ae67b3ab281c4506771054" +checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e" dependencies = [ "bytes", - "futures", + "futures-channel", + "futures-util", "headers", "http", "hyper", @@ -2520,45 +2457,45 @@ dependencies = [ "percent-encoding", "pin-project", "scoped-tls", - "serde 1.0.120", + "serde 1.0.136", "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", "tokio-stream", "tokio-tungstenite", - "tokio-util", + "tokio-util 0.6.9", "tower-service", "tracing", ] [[package]] name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" dependencies = [ "bumpalo", "lazy_static", @@ -2571,9 +2508,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2581,9 +2518,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" dependencies = [ "proc-macro2", "quote", @@ -2594,15 +2531,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" dependencies = [ "js-sys", "wasm-bindgen", @@ -2658,6 +2595,49 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5acdd78cb4ba54c0045ac14f62d8f94a03d10047904ae2a40afa1e99d8f70825" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" + +[[package]] +name = "windows_i686_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" + +[[package]] +name = "windows_i686_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" + [[package]] name = "wyz" version = "0.4.0" diff --git a/src/tracker.rs b/src/tracker.rs index 4aaa767be..053119b4a 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -34,13 +34,15 @@ pub enum TrackerMode { PrivateListedMode, } + pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, updates: tokio::sync::RwLock>, shadow: tokio::sync::RwLock>, database: Box, - pub stats_tracker: StatsTracker + pub stats_tracker: StatsTracker, + pub guard: pprof } impl TorrentTracker { @@ -56,7 +58,8 @@ impl TorrentTracker { updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), database, - stats_tracker + stats_tracker, + guard: pprof::ProfilerGuard::new(100).unwrap() }) } @@ -228,15 +231,15 @@ impl TorrentTracker { pub async fn post_log(&self) { let torrents = self.torrents.read().await; - let torrents_size = mem::size_of_val(&*torrents); + let torrents_size = torrents.len(); drop(torrents); let updates = self.updates.read().await; - let updates_size = mem::size_of_val(&*updates); + let updates_size = updates.len(); drop(updates); let shadow = self.shadow.read().await; - let shadow_size = mem::size_of_val(&*shadow); + let shadow_size = shadow.len(); drop(shadow); - info!("Stats [::] Torrents: {} byte(s) | Updates: {} byte(s) | Shadow: {} byte(s)", torrents_size, updates_size, shadow_size); + info!("-=[ Stats ]=- | Torrents: {} | Updates: {} | Shadow: {}", torrents_size, updates_size, shadow_size); } // remove torrents without peers if enabled, and defragment memory From 97c17d4a5bb784c43188def982a58c3ad63c5b60 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 19:46:06 +0200 Subject: [PATCH 0073/1003] Oops :) --- src/tracker.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/tracker.rs b/src/tracker.rs index 053119b4a..defdecc1f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -41,8 +41,7 @@ pub struct TorrentTracker { updates: tokio::sync::RwLock>, shadow: tokio::sync::RwLock>, database: Box, - pub stats_tracker: StatsTracker, - pub guard: pprof + pub stats_tracker: StatsTracker } impl TorrentTracker { @@ -58,8 +57,7 @@ impl TorrentTracker { updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), database, - stats_tracker, - guard: pprof::ProfilerGuard::new(100).unwrap() + stats_tracker }) } From 145780a6b770431eec99d3eeeb7dfce247747e3b Mon Sep 17 00:00:00 2001 From: Power2All Date: Fri, 15 Apr 2022 08:57:25 +0200 Subject: [PATCH 0074/1003] Adding profiling and updates and cleanups --- Cargo.lock | 91 ++++++++++++++++++++++++++++++++++++++++++++++---- Cargo.toml | 5 +++ src/main.rs | 7 ++++ src/tracker.rs | 2 -- 4 files changed, 96 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7bc7233c8..eb04ba651 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,15 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -86,6 +95,21 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "backtrace" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide 0.4.4", + "object", + "rustc-demangle", +] + [[package]] name = "base-x" version = "0.2.8" @@ -375,6 +399,21 @@ dependencies = [ "syn", ] +[[package]] +name = "dhat" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47003dc9f6368a88e85956c3b2573a7e6872746a3e5d762a8885da3a136a0381" +dependencies = [ + "backtrace", + "lazy_static", + "parking_lot 0.11.2", + "rustc-hash", + "serde 1.0.136", + "serde_json", + "thousands", +] + [[package]] name = "digest" version = "0.9.0" @@ -459,7 +498,7 @@ dependencies = [ "crc32fast", "libc", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.5.1", ] [[package]] @@ -673,6 +712,12 @@ dependencies = [ "wasi 0.10.0+wasi-snapshot-preview1", ] +[[package]] +name = "gimli" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" + [[package]] name = "glob" version = "0.3.0" @@ -1020,6 +1065,16 @@ dependencies = [ "unicase", ] +[[package]] +name = "miniz_oxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +dependencies = [ + "adler", + "autocfg", +] + [[package]] name = "miniz_oxide" version = "0.5.1" @@ -1253,6 +1308,15 @@ dependencies = [ "libc", ] +[[package]] +name = "object" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" version = "1.10.0" @@ -1593,6 +1657,12 @@ dependencies = [ "serde 1.0.136", ] +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -2057,6 +2127,12 @@ dependencies = [ "syn", ] +[[package]] +name = "thousands" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" + [[package]] name = "time" version = "0.1.44" @@ -2217,9 +2293,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde 1.0.136", ] @@ -2235,6 +2311,7 @@ dependencies = [ "chrono", "config", "derive_more", + "dhat", "fern", "futures", "hex", @@ -2262,9 +2339,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b9fa4360528139bc96100c160b7ae879f5567f49f1782b0b02035b0358ebf3" +checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" dependencies = [ "cfg-if", "log", @@ -2286,9 +2363,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dfce9f3241b150f36e8e54bb561a742d5daa1a47b5dd9a5ce369fd4a4db2210" +checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" dependencies = [ "lazy_static", ] diff --git a/Cargo.toml b/Cargo.toml index 81f76abe9..9871f20a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,8 +7,12 @@ description = "A feature rich BitTorrent tracker." edition = "2018" [profile.release] +debug = 1 lto = "fat" +[features] +dhat-heap = [] # if you are doing heap profiling + [dependencies] serde = {version = "1.0", features = ["derive"]} serde_bencode = "^0.2.3" @@ -34,3 +38,4 @@ thiserror = "1.0" aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" +dhat = "0.3.0" diff --git a/src/main.rs b/src/main.rs index 9ba45427e..63eb2ec0a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,8 +5,15 @@ use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; +#[cfg(feature = "dhat-heap")] +#[global_allocator] +static ALLOC: dhat::Alloc = dhat::Alloc; + #[tokio::main] async fn main() { + #[cfg(feature = "dhat-heap")] + let _profiler = dhat::Profiler::new_heap(); + // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), diff --git a/src/tracker.rs b/src/tracker.rs index defdecc1f..3a69e3e0b 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -6,7 +6,6 @@ use crate::common::{InfoHash}; use std::net::{SocketAddr}; use crate::{Configuration, database, key_manager}; use std::collections::btree_map::Entry; -use std::mem; use std::sync::Arc; use log::info; use crate::key_manager::AuthKey; @@ -34,7 +33,6 @@ pub enum TrackerMode { PrivateListedMode, } - pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, From 9159187f312bbbc529df662f05d5b2f5bce6fa50 Mon Sep 17 00:00:00 2001 From: Power2All Date: Fri, 15 Apr 2022 14:07:42 +0200 Subject: [PATCH 0075/1003] First implementation and iteration of using crossbeam to eventually handle the torrent data, should improve and minimize the memory usage drastically, so to keep the memory object at a single place --- Cargo.lock | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 2 ++ src/main.rs | 55 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 128 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index eb04ba651..00e23092c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -365,6 +365,75 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" +dependencies = [ + "cfg-if", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +dependencies = [ + "cfg-if", + "lazy_static", +] + [[package]] name = "crypto-common" version = "0.1.3" @@ -2310,6 +2379,8 @@ dependencies = [ "byteorder", "chrono", "config", + "crossbeam", + "crossbeam-channel", "derive_more", "dhat", "fern", diff --git a/Cargo.toml b/Cargo.toml index 9871f20a4..408a07b85 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,3 +39,5 @@ aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" dhat = "0.3.0" +crossbeam = "0.8.1" +crossbeam-channel = "0.5.4" diff --git a/src/main.rs b/src/main.rs index 63eb2ec0a..2943e4ca7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; use std::sync::Arc; +use crossbeam_channel::bounded; use log::{info}; use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; @@ -9,11 +10,28 @@ use torrust_tracker::torrust_http_tracker::server::HttpServer; #[global_allocator] static ALLOC: dhat::Alloc = dhat::Alloc; +pub struct DataStream { + action: u8, + data: Vec<()> +} + #[tokio::main] async fn main() { #[cfg(feature = "dhat-heap")] let _profiler = dhat::Profiler::new_heap(); + // Loading configuration + let config = match Configuration::load_from_file() { + Ok(config) => config, + Err(error) => { + panic!("{}", error) + } + }; + + // Start the thread where data is being exchanged for usaga + let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(1); + let _torrents_memory_handler = start_torrents_memory_handler(&config, sender.clone(), receiver.clone()); + // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), @@ -97,10 +115,47 @@ async fn main() { let _ = tracker.periodic_saving().await; info!("Torrents saved"); } + + // Closing down channel + sender.clone().send(DataStream{ + action: ACTION_CLOSE_CHANNEL, + data: Vec::new() + }); } } } + +const ACTION_CLOSE_CHANNEL: u8 = 0; +const ACTION_READ_TORRENTS: u8 = 1; +const ACTION_WRITE_TORRENTS: u8 = 2; +const ACTION_UPDATE_TORRENTS: u8 = 3; +const ACTION_READ_PEERS: u8 = 4; +const ACTION_WRITE_PEERS: u8 = 5; +const ACTION_UPDATE_PEERS: u8 = 6; +fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_channel::Sender, receiver: crossbeam_channel::Receiver) -> Option> { + // This is our main memory handler, everything will be received, handled and send back. + return Some(tokio::spawn(async move { + loop { + // Wait for incoming data. + let data: DataStream = receiver.recv().unwrap(); + + // Lets check what action is given. + match data.action { + ACTION_CLOSE_CHANNEL => { + info!("Ending the memory handler thread..."); + sender.send(DataStream{ + action: ACTION_CLOSE_CHANNEL, + data: Vec::new() + }); + break; + } + _ => {} + } + } + })); +} + fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> Option> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.persistence_interval.unwrap_or(900); From 03eade11ee9d33dc5e786d9630464dc3c5394ccc Mon Sep 17 00:00:00 2001 From: Power2All Date: Fri, 15 Apr 2022 14:25:54 +0200 Subject: [PATCH 0076/1003] Moving around, and preparing a refactoring to channel based handling --- src/main.rs | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/src/main.rs b/src/main.rs index 2943e4ca7..b46fc4c71 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,8 +28,11 @@ async fn main() { } }; + // Enable logging handling + logging::setup_logging(&config); + // Start the thread where data is being exchanged for usaga - let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(1); + let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(0); let _torrents_memory_handler = start_torrents_memory_handler(&config, sender.clone(), receiver.clone()); // torrust config @@ -45,8 +48,6 @@ async fn main() { panic!("{}", e) })); - logging::setup_logging(&config); - // load persistent torrents if enabled if config.persistence { info!("Loading persistent torrents into memory..."); @@ -117,7 +118,7 @@ async fn main() { } // Closing down channel - sender.clone().send(DataStream{ + let _ = sender.clone().send(DataStream{ action: ACTION_CLOSE_CHANNEL, data: Vec::new() }); @@ -135,6 +136,8 @@ const ACTION_WRITE_PEERS: u8 = 5; const ACTION_UPDATE_PEERS: u8 = 6; fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_channel::Sender, receiver: crossbeam_channel::Receiver) -> Option> { // This is our main memory handler, everything will be received, handled and send back. + info!("Starting memory handler thread..."); + return Some(tokio::spawn(async move { loop { // Wait for incoming data. @@ -144,11 +147,29 @@ fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_chann match data.action { ACTION_CLOSE_CHANNEL => { info!("Ending the memory handler thread..."); - sender.send(DataStream{ + let _ = sender.send(DataStream{ action: ACTION_CLOSE_CHANNEL, data: Vec::new() }); break; + } + ACTION_READ_TORRENTS => { + + } + ACTION_WRITE_TORRENTS => { + + } + ACTION_UPDATE_TORRENTS => { + + } + ACTION_READ_PEERS => { + + } + ACTION_WRITE_PEERS => { + + } + ACTION_UPDATE_PEERS => { + } _ => {} } From 0a858dc067d6b1f4fbf48e1b8a373655df89d7b9 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sat, 16 Apr 2022 12:48:55 +0200 Subject: [PATCH 0077/1003] Revert "Moving around, and preparing a refactoring to channel based handling" This reverts commit 03eade11ee9d33dc5e786d9630464dc3c5394ccc. --- src/main.rs | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/src/main.rs b/src/main.rs index b46fc4c71..2943e4ca7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,11 +28,8 @@ async fn main() { } }; - // Enable logging handling - logging::setup_logging(&config); - // Start the thread where data is being exchanged for usaga - let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(0); + let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(1); let _torrents_memory_handler = start_torrents_memory_handler(&config, sender.clone(), receiver.clone()); // torrust config @@ -48,6 +45,8 @@ async fn main() { panic!("{}", e) })); + logging::setup_logging(&config); + // load persistent torrents if enabled if config.persistence { info!("Loading persistent torrents into memory..."); @@ -118,7 +117,7 @@ async fn main() { } // Closing down channel - let _ = sender.clone().send(DataStream{ + sender.clone().send(DataStream{ action: ACTION_CLOSE_CHANNEL, data: Vec::new() }); @@ -136,8 +135,6 @@ const ACTION_WRITE_PEERS: u8 = 5; const ACTION_UPDATE_PEERS: u8 = 6; fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_channel::Sender, receiver: crossbeam_channel::Receiver) -> Option> { // This is our main memory handler, everything will be received, handled and send back. - info!("Starting memory handler thread..."); - return Some(tokio::spawn(async move { loop { // Wait for incoming data. @@ -147,29 +144,11 @@ fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_chann match data.action { ACTION_CLOSE_CHANNEL => { info!("Ending the memory handler thread..."); - let _ = sender.send(DataStream{ + sender.send(DataStream{ action: ACTION_CLOSE_CHANNEL, data: Vec::new() }); break; - } - ACTION_READ_TORRENTS => { - - } - ACTION_WRITE_TORRENTS => { - - } - ACTION_UPDATE_TORRENTS => { - - } - ACTION_READ_PEERS => { - - } - ACTION_WRITE_PEERS => { - - } - ACTION_UPDATE_PEERS => { - } _ => {} } From bd680c4e10ff3c2c672145a2bed59a98bf7f5d3b Mon Sep 17 00:00:00 2001 From: Power2All Date: Sat, 16 Apr 2022 12:49:00 +0200 Subject: [PATCH 0078/1003] Revert "First implementation and iteration of using crossbeam to eventually handle the torrent data, should improve and minimize the memory usage drastically, so to keep the memory object at a single place" This reverts commit 9159187f312bbbc529df662f05d5b2f5bce6fa50. --- Cargo.lock | 71 ----------------------------------------------------- Cargo.toml | 2 -- src/main.rs | 55 ----------------------------------------- 3 files changed, 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00e23092c..eb04ba651 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -365,75 +365,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" -dependencies = [ - "cfg-if", - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" -dependencies = [ - "cfg-if", - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "lazy_static", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" -dependencies = [ - "cfg-if", - "lazy_static", -] - [[package]] name = "crypto-common" version = "0.1.3" @@ -2379,8 +2310,6 @@ dependencies = [ "byteorder", "chrono", "config", - "crossbeam", - "crossbeam-channel", "derive_more", "dhat", "fern", diff --git a/Cargo.toml b/Cargo.toml index 408a07b85..9871f20a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,5 +39,3 @@ aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" dhat = "0.3.0" -crossbeam = "0.8.1" -crossbeam-channel = "0.5.4" diff --git a/src/main.rs b/src/main.rs index 2943e4ca7..63eb2ec0a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,5 @@ use std::net::SocketAddr; use std::sync::Arc; -use crossbeam_channel::bounded; use log::{info}; use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; @@ -10,28 +9,11 @@ use torrust_tracker::torrust_http_tracker::server::HttpServer; #[global_allocator] static ALLOC: dhat::Alloc = dhat::Alloc; -pub struct DataStream { - action: u8, - data: Vec<()> -} - #[tokio::main] async fn main() { #[cfg(feature = "dhat-heap")] let _profiler = dhat::Profiler::new_heap(); - // Loading configuration - let config = match Configuration::load_from_file() { - Ok(config) => config, - Err(error) => { - panic!("{}", error) - } - }; - - // Start the thread where data is being exchanged for usaga - let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(1); - let _torrents_memory_handler = start_torrents_memory_handler(&config, sender.clone(), receiver.clone()); - // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), @@ -115,47 +97,10 @@ async fn main() { let _ = tracker.periodic_saving().await; info!("Torrents saved"); } - - // Closing down channel - sender.clone().send(DataStream{ - action: ACTION_CLOSE_CHANNEL, - data: Vec::new() - }); } } } - -const ACTION_CLOSE_CHANNEL: u8 = 0; -const ACTION_READ_TORRENTS: u8 = 1; -const ACTION_WRITE_TORRENTS: u8 = 2; -const ACTION_UPDATE_TORRENTS: u8 = 3; -const ACTION_READ_PEERS: u8 = 4; -const ACTION_WRITE_PEERS: u8 = 5; -const ACTION_UPDATE_PEERS: u8 = 6; -fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_channel::Sender, receiver: crossbeam_channel::Receiver) -> Option> { - // This is our main memory handler, everything will be received, handled and send back. - return Some(tokio::spawn(async move { - loop { - // Wait for incoming data. - let data: DataStream = receiver.recv().unwrap(); - - // Lets check what action is given. - match data.action { - ACTION_CLOSE_CHANNEL => { - info!("Ending the memory handler thread..."); - sender.send(DataStream{ - action: ACTION_CLOSE_CHANNEL, - data: Vec::new() - }); - break; - } - _ => {} - } - } - })); -} - fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> Option> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.persistence_interval.unwrap_or(900); From 509d487f8040a5471d7fa835c39df2aeb476e0de Mon Sep 17 00:00:00 2001 From: Power2All Date: Sat, 16 Apr 2022 13:02:20 +0200 Subject: [PATCH 0079/1003] Code cleanup and fixing MySQL --- Cargo.toml | 8 +++--- src/common.rs | 7 +++-- src/config.rs | 32 +++++++++++---------- src/database.rs | 12 ++++---- src/http_api_server.rs | 29 ++++++++++--------- src/key_manager.rs | 18 ++++++------ src/lib.rs | 14 ++++++---- src/logging.rs | 1 + src/main.rs | 6 ++-- src/mysql_database.rs | 42 +++++++++++++++------------- src/sqlite_database.rs | 42 +++++++++++++++------------- src/torrent.rs | 10 ++++--- src/torrust_http_tracker/errors.rs | 2 +- src/torrust_http_tracker/filters.rs | 26 +++++++++-------- src/torrust_http_tracker/handlers.rs | 12 ++++---- src/torrust_http_tracker/mod.rs | 16 +++++------ src/torrust_http_tracker/request.rs | 6 ++-- src/torrust_http_tracker/response.rs | 9 +++--- src/torrust_http_tracker/routes.rs | 12 ++++---- src/torrust_http_tracker/server.rs | 1 + src/torrust_udp_tracker/handlers.rs | 18 ++++++------ src/torrust_udp_tracker/mod.rs | 10 +++---- src/torrust_udp_tracker/request.rs | 7 +++-- src/torrust_udp_tracker/server.rs | 8 ++++-- src/tracker.rs | 42 +++++++++++++++------------- src/tracker_stats.rs | 9 +++--- src/utils.rs | 5 ++-- 27 files changed, 222 insertions(+), 182 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9871f20a4..a10d548c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,17 +14,17 @@ lto = "fat" dhat-heap = [] # if you are doing heap profiling [dependencies] -serde = {version = "1.0", features = ["derive"]} +serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" serde_bytes = "0.11" serde_json = "1.0.72" hex = "0.4.3" percent-encoding = "2.1.0" -warp = {version = "0.3", features = ["tls"]} -tokio = {version = "1.7", features = ["full"]} +warp = { version = "0.3", features = ["tls"] } +tokio = { version = "1.7", features = ["full"] } binascii = "0.1" toml = "0.5" -log = {version = "0.4", features = ["release_max_level_info"]} +log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" byteorder = "1" diff --git a/src/common.rs b/src/common.rs index 4d2f5ec71..5d69ed0e1 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,5 +1,5 @@ -use serde::{Deserialize, Serialize}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use serde::{Deserialize, Serialize}; pub const MAX_SCRAPE_TORRENTS: u8 = 74; pub const AUTH_KEY_LENGTH: usize = 32; @@ -19,7 +19,7 @@ pub enum AnnounceEventDef { Started, Stopped, Completed, - None + None, } #[derive(Serialize, Deserialize)] @@ -135,7 +135,7 @@ impl PeerId { String::from(std::str::from_utf8(bytes_out).unwrap()) } else { "".to_string() - } + }; } } @@ -218,6 +218,7 @@ impl PeerId { } } } + impl Serialize for PeerId { fn serialize(&self, serializer: S) -> Result where diff --git a/src/config.rs b/src/config.rs index 67078d608..7130d8c92 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,19 +1,21 @@ -pub use crate::tracker::TrackerMode; -use serde::{Serialize, Deserialize, Serializer}; use std; use std::collections::HashMap; use std::fs; -use toml; -use std::net::{IpAddr}; +use std::net::IpAddr; use std::path::Path; use std::str::FromStr; -use config::{ConfigError, Config, File}; + +use config::{Config, ConfigError, File}; +use serde::{Deserialize, Serialize, Serializer}; +use toml; + use crate::database::DatabaseDrivers; +pub use crate::tracker::TrackerMode; #[derive(Serialize, Deserialize, PartialEq)] pub enum TrackerServer { UDP, - HTTP + HTTP, } #[derive(Serialize, Deserialize, Debug)] @@ -30,7 +32,7 @@ pub struct HttpTrackerConfig { #[serde(serialize_with = "none_as_empty_string")] pub ssl_cert_path: Option, #[serde(serialize_with = "none_as_empty_string")] - pub ssl_key_path: Option + pub ssl_key_path: Option, } impl HttpTrackerConfig { @@ -113,7 +115,7 @@ impl Configuration { match Self::load(data.as_slice()) { Ok(cfg) => { Ok(cfg) - }, + } Err(e) => Err(ConfigurationError::ParseError(e)), } } @@ -156,21 +158,21 @@ impl Configuration { enabled: true, bind_address: String::from("127.0.0.1:1212"), access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), - } + }, }; configuration.udp_trackers.push( - UdpTrackerConfig{ + UdpTrackerConfig { enabled: false, - bind_address: String::from("0.0.0.0:6969") + bind_address: String::from("0.0.0.0:6969"), } ); configuration.http_trackers.push( - HttpTrackerConfig{ + HttpTrackerConfig { enabled: false, bind_address: String::from("0.0.0.0:6969"), ssl_enabled: false, ssl_cert_path: None, - ssl_key_path: None + ssl_key_path: None, } ); configuration @@ -188,7 +190,7 @@ impl Configuration { eprintln!("Creating config file.."); let config = Configuration::default(); let _ = config.save_to_file(); - return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))) + return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))); } let torrust_config: Configuration = config.try_into().map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; @@ -196,7 +198,7 @@ impl Configuration { Ok(torrust_config) } - pub fn save_to_file(&self) -> Result<(), ()>{ + pub fn save_to_file(&self) -> Result<(), ()> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write("config.toml", toml_string).expect("Could not write to file!"); Ok(()) diff --git a/src/database.rs b/src/database.rs index 18bf41994..a90161e91 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,18 +1,20 @@ use std::collections::BTreeMap; -use crate::{InfoHash}; -use crate::key_manager::AuthKey; -use crate::sqlite_database::SqliteDatabase; + use async_trait::async_trait; use derive_more::{Display, Error}; use log::debug; +use serde::{Deserialize, Serialize}; + +use crate::InfoHash; +use crate::key_manager::AuthKey; use crate::mysql_database::MysqlDatabase; -use serde::{Serialize, Deserialize}; +use crate::sqlite_database::SqliteDatabase; use crate::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { Sqlite3, - MySQL + MySQL, } pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { diff --git a/src/http_api_server.rs b/src/http_api_server.rs index eff45fc33..89505cb09 100644 --- a/src/http_api_server.rs +++ b/src/http_api_server.rs @@ -1,10 +1,13 @@ -use crate::tracker::{TorrentTracker}; -use serde::{Deserialize, Serialize}; use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use warp::{filters, reply, reply::Reply, serve, Filter, Server}; + +use serde::{Deserialize, Serialize}; +use warp::{Filter, filters, reply, reply::Reply, serve, Server}; + use crate::torrent::TorrentPeer; +use crate::tracker::TorrentTracker; + use super::common::*; #[derive(Deserialize, Debug)] @@ -52,7 +55,7 @@ enum ActionStatus<'a> { impl warp::reject::Reject for ActionStatus<'static> {} -fn authenticate(tokens: HashMap) -> impl Filter + Clone { +fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] struct AuthToken { token: Option, @@ -69,7 +72,7 @@ fn authenticate(tokens: HashMap) -> impl Filter { if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { reason: "token not valid".into() })) + return Err(warp::reject::custom(ActionStatus::Err { reason: "token not valid".into() })); } Ok(()) @@ -81,7 +84,7 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> Server + Clone + Send + Sync + 'static> { +pub fn build_server(tracker: Arc) -> Server + Clone + Send + Sync + 'static> { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -131,7 +134,7 @@ pub fn build_server(tracker: Arc) -> Server| { async move { - let mut results = Stats{ + let mut results = Stats { torrents: 0, seeders: 0, completed: 0, @@ -147,7 +150,7 @@ pub fn build_server(tracker: Arc) -> Server = db @@ -195,7 +198,7 @@ pub fn build_server(tracker: Arc) -> Server) -> Server)| { async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to remove torrent from whitelist".into() })) - } + match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to remove torrent from whitelist".into() })) + } } }); diff --git a/src/key_manager.rs b/src/key_manager.rs index b1f16f1dc..507402358 100644 --- a/src/key_manager.rs +++ b/src/key_manager.rs @@ -1,10 +1,12 @@ -use super::common::AUTH_KEY_LENGTH; -use crate::utils::current_time; -use rand::{thread_rng, Rng}; +use derive_more::{Display, Error}; +use log::debug; +use rand::{Rng, thread_rng}; use rand::distributions::Alphanumeric; use serde::Serialize; -use log::debug; -use derive_more::{Display, Error}; + +use crate::utils::current_time; + +use super::common::AUTH_KEY_LENGTH; pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { let key: String = thread_rng() @@ -23,8 +25,8 @@ pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { let current_time = current_time(); - if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid) } - if auth_key.valid_until.unwrap() < current_time { return Err(Error::KeyExpired) } + if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } + if auth_key.valid_until.unwrap() < current_time { return Err(Error::KeyExpired); } Ok(()) } @@ -67,7 +69,7 @@ pub enum Error { #[display(fmt = "Key is invalid.")] KeyInvalid, #[display(fmt = "Key has expired.")] - KeyExpired + KeyExpired, } impl From for Error { diff --git a/src/lib.rs b/src/lib.rs index 3d928aff4..b6cebfc5e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,11 @@ +pub use torrust_http_tracker::server::*; +pub use torrust_udp_tracker::server::*; + +pub use self::common::*; +pub use self::config::*; +pub use self::http_api_server::*; +pub use self::tracker::*; + pub mod config; pub mod tracker; pub mod http_api_server; @@ -13,9 +21,3 @@ pub mod mysql_database; pub mod torrent; pub mod tracker_stats; -pub use self::config::*; -pub use torrust_udp_tracker::server::*; -pub use torrust_http_tracker::server::*; -pub use self::tracker::*; -pub use self::http_api_server::*; -pub use self::common::*; diff --git a/src/logging.rs b/src/logging.rs index 580e35094..c2e77551f 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -1,4 +1,5 @@ use log::info; + use crate::Configuration; pub fn setup_logging(cfg: &Configuration) { diff --git a/src/main.rs b/src/main.rs index 63eb2ec0a..b17ef14fe 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,7 +1,9 @@ use std::net::SocketAddr; use std::sync::Arc; -use log::{info}; + +use log::info; use tokio::task::JoinHandle; + use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; @@ -12,7 +14,7 @@ static ALLOC: dhat::Alloc = dhat::Alloc; #[tokio::main] async fn main() { #[cfg(feature = "dhat-heap")] - let _profiler = dhat::Profiler::new_heap(); + let _profiler = dhat::Profiler::new_heap(); // torrust config let config = match Configuration::load_from_file() { diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 0597d46aa..be1fe649b 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -1,18 +1,20 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, database}; -use log::debug; -use r2d2::{Pool}; -use crate::key_manager::AuthKey; use std::str::FromStr; -use crate::database::Database; + use async_trait::async_trait; +use log::debug; +use r2d2::Pool; use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; + +use crate::{AUTH_KEY_LENGTH, database, InfoHash}; +use crate::database::Database; +use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; pub struct MysqlDatabase { - pool: Pool + pool: Pool, } impl MysqlDatabase { @@ -79,8 +81,8 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.exec_drop("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", (info_hash.to_string(), completed.to_string())); + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (?, ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())); } let _ = db_transaction.commit(); @@ -93,13 +95,13 @@ impl Database for MysqlDatabase { match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash => info_hash }) .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some(info_hash) => { - Ok(InfoHash::from_str(&info_hash).unwrap()) - }, - None => { - Err(database::Error::InvalidQuery) - } + Some(info_hash) => { + Ok(InfoHash::from_str(&info_hash).unwrap()) } + None => { + Err(database::Error::InvalidQuery) + } + } } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { @@ -110,7 +112,7 @@ impl Database for MysqlDatabase { match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { Ok(_) => { Ok(1) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -126,7 +128,7 @@ impl Database for MysqlDatabase { match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { Ok(_) => { Ok(1) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -142,9 +144,9 @@ impl Database for MysqlDatabase { Some((key, valid_until)) => { Ok(AuthKey { key, - valid_until: Some(valid_until as u64) + valid_until: Some(valid_until as u64), }) - }, + } None => { Err(database::Error::InvalidQuery) } @@ -160,7 +162,7 @@ impl Database for MysqlDatabase { match conn.exec_drop("INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", params! { key, valid_until }) { Ok(_) => { Ok(1) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -174,7 +176,7 @@ impl Database for MysqlDatabase { match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { Ok(_) => { Ok(1) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index 5facd99d8..fa519ffd0 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -1,17 +1,19 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, database}; +use std::str::FromStr; + +use async_trait::async_trait; use log::debug; -use r2d2_sqlite::{SqliteConnectionManager}; -use r2d2::{Pool}; +use r2d2::Pool; +use r2d2_sqlite::SqliteConnectionManager; use r2d2_sqlite::rusqlite::NO_PARAMS; -use crate::key_manager::AuthKey; -use std::str::FromStr; + +use crate::{AUTH_KEY_LENGTH, database, InfoHash}; use crate::database::Database; -use async_trait::async_trait; +use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; pub struct SqliteDatabase { - pool: Pool + pool: Pool, } impl SqliteDatabase { @@ -68,7 +70,7 @@ impl Database for SqliteDatabase { Ok((info_hash, completed)) })?; - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok() ).collect(); + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok()).collect(); Ok(torrents) } @@ -79,8 +81,8 @@ impl Database for SqliteDatabase { let db_transaction = conn.transaction()?; for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); } let _ = db_transaction.commit(); @@ -109,9 +111,9 @@ impl Database for SqliteDatabase { match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { Ok(updated) => { - if updated > 0 { return Ok(updated) } + if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -124,9 +126,9 @@ impl Database for SqliteDatabase { match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { Ok(updated) => { - if updated > 0 { return Ok(updated) } + if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -146,7 +148,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(valid_until_i64 as u64) + valid_until: Some(valid_until_i64 as u64), }) } else { Err(database::Error::QueryReturnedNoRows) @@ -157,12 +159,12 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()] + &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], ) { Ok(updated) => { - if updated > 0 { return Ok(updated) } + if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -175,9 +177,9 @@ impl Database for SqliteDatabase { match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { Ok(updated) => { - if updated > 0 { return Ok(updated) } + if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) diff --git a/src/torrent.rs b/src/torrent.rs index ef933d224..e2984a490 100644 --- a/src/torrent.rs +++ b/src/torrent.rs @@ -1,10 +1,12 @@ use std::borrow::Cow; use std::net::{IpAddr, SocketAddr}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; + use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; -use crate::torrust_http_tracker::AnnounceRequest; use crate::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::torrust_http_tracker::AnnounceRequest; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { @@ -33,7 +35,7 @@ impl TorrentPeer { uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, left: announce_request.bytes_left, - event: announce_request.event + event: announce_request.event, } } @@ -58,7 +60,7 @@ impl TorrentPeer { uploaded: NumberOfBytes(announce_request.uploaded as i64), downloaded: NumberOfBytes(announce_request.downloaded as i64), left: NumberOfBytes(announce_request.left as i64), - event + event, } } diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs index d8d6c7623..fe0cf26e6 100644 --- a/src/torrust_http_tracker/errors.rs +++ b/src/torrust_http_tracker/errors.rs @@ -1,5 +1,5 @@ -use warp::reject::Reject; use thiserror::Error; +use warp::reject::Reject; #[derive(Error, Debug)] pub enum ServerError { diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 61fa20a45..5c4fc9743 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -2,43 +2,45 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; + use log::debug; use warp::{Filter, reject, Rejection}; + use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; /// Pass Arc along -pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { +pub fn with_tracker(tracker: Arc) -> impl Filter, ), Error=Infallible> + Clone { warp::any() .map(move || tracker.clone()) } /// Check for infoHash -pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { +pub fn with_info_hash() -> impl Filter, ), Error=Rejection> + Clone { warp::filters::query::raw() .and_then(info_hashes) } /// Check for PeerId -pub fn with_peer_id() -> impl Filter + Clone { +pub fn with_peer_id() -> impl Filter + Clone { warp::filters::query::raw() .and_then(peer_id) } /// Pass Arc along -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key() -> impl Filter, ), Error=Infallible> + Clone { warp::path::param::() .map(|key: String| { AuthKey::from_string(&key) }) .or_else(|_| async { - Ok::<(Option,), Infallible>((None,)) + Ok::<(Option, ), Infallible>((None, )) }) } /// Check for PeerAddress -pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { +pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) .map(move |remote_addr: Option, x_forwarded_for: Option| { @@ -48,7 +50,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) @@ -57,7 +59,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) @@ -129,11 +131,11 @@ async fn peer_id(raw_query: String) -> WebResult { /// Get PeerAddress from RemoteAddress or Forwarded async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)) + return Err(reject::custom(ServerError::AddressNotFound)); } if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)) + return Err(reject::custom(ServerError::AddressNotFound)); } match on_reverse_proxy { @@ -150,7 +152,7 @@ async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Opti debug!("{}", e); Err(reject::custom(ServerError::AddressNotFound)) }) - }, + } false => Ok(remote_addr.unwrap().ip()) } } @@ -166,7 +168,7 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has port: announce_request_query.port, left: announce_request_query.left.unwrap_or(0), event: announce_request_query.event, - compact: announce_request_query.compact + compact: announce_request_query.compact, }) } diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 8762faeaf..994b7b765 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -2,9 +2,11 @@ use std::collections::HashMap; use std::convert::Infallible; use std::net::IpAddr; use std::sync::Arc; + use log::debug; use warp::{reject, Rejection, Reply}; -use warp::http::{Response}; +use warp::http::Response; + use crate::{InfoHash, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrent::{TorrentError, TorrentPeer, TorrentStats}; @@ -34,7 +36,7 @@ pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, trac /// Handle announce request pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc) -> WebResult { if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { - return Err(reject::custom(e)) + return Err(reject::custom(e)); } debug!("{:?}", announce_request); @@ -63,7 +65,7 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { @@ -94,7 +96,7 @@ fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: Tor let http_peers: Vec = peers.iter().map(|peer| Peer { peer_id: peer.peer_id.to_string(), ip: peer.peer_addr.ip(), - port: peer.peer_addr.port() + port: peer.peer_addr.port(), }).collect(); let res = AnnounceResponse { @@ -102,7 +104,7 @@ fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: Tor interval_min, complete: torrent_stats.seeders, incomplete: torrent_stats.leechers, - peers: http_peers + peers: http_peers, }; // check for compact response request diff --git a/src/torrust_http_tracker/mod.rs b/src/torrust_http_tracker/mod.rs index ea6675dce..07d077577 100644 --- a/src/torrust_http_tracker/mod.rs +++ b/src/torrust_http_tracker/mod.rs @@ -1,3 +1,11 @@ +pub use self::errors::*; +pub use self::filters::*; +pub use self::handlers::*; +pub use self::request::*; +pub use self::response::*; +pub use self::routes::*; +pub use self::server::*; + pub mod server; pub mod request; pub mod response; @@ -6,13 +14,5 @@ pub mod routes; pub mod handlers; pub mod filters; -pub use self::server::*; -pub use self::request::*; -pub use self::response::*; -pub use self::errors::*; -pub use self::routes::*; -pub use self::handlers::*; -pub use self::filters::*; - pub type Bytes = u64; pub type WebResult = std::result::Result; diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index 0fb316671..487e53a13 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -1,5 +1,7 @@ -use std::net::{IpAddr}; -use serde::{Deserialize}; +use std::net::IpAddr; + +use serde::Deserialize; + use crate::{InfoHash, PeerId}; use crate::torrust_http_tracker::Bytes; diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs index af27bc5e9..f57129cde 100644 --- a/src/torrust_http_tracker/response.rs +++ b/src/torrust_http_tracker/response.rs @@ -2,7 +2,8 @@ use std::collections::HashMap; use std::error::Error; use std::io::Write; use std::net::IpAddr; -use serde::{Serialize}; + +use serde::Serialize; #[derive(Serialize)] pub struct Peer { @@ -18,7 +19,7 @@ pub struct AnnounceResponse { //pub tracker_id: String, pub complete: u32, pub incomplete: u32, - pub peers: Vec + pub peers: Vec, } impl AnnounceResponse { @@ -75,7 +76,7 @@ pub struct ScrapeResponseEntry { #[derive(Serialize)] pub struct ScrapeResponse { - pub files: HashMap + pub files: HashMap, } impl ScrapeResponse { @@ -87,7 +88,7 @@ impl ScrapeResponse { #[derive(Serialize)] pub struct ErrorResponse { #[serde(rename = "failure reason")] - pub failure_reason: String + pub failure_reason: String, } impl ErrorResponse { diff --git a/src/torrust_http_tracker/routes.rs b/src/torrust_http_tracker/routes.rs index 4b4de722f..fb6bf5c16 100644 --- a/src/torrust_http_tracker/routes.rs +++ b/src/torrust_http_tracker/routes.rs @@ -1,11 +1,13 @@ use std::convert::Infallible; use std::sync::Arc; + use warp::{Filter, Rejection}; + use crate::TorrentTracker; -use crate::torrust_http_tracker::{handle_announce, send_error, handle_scrape, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use crate::torrust_http_tracker::{handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; /// All routes -pub fn routes(tracker: Arc,) -> impl Filter + Clone { +pub fn routes(tracker: Arc) -> impl Filter + Clone { root(tracker.clone()) .or(announce(tracker.clone())) .or(scrape(tracker.clone())) @@ -13,7 +15,7 @@ pub fn routes(tracker: Arc,) -> impl Filter -fn root(tracker: Arc,) -> impl Filter + Clone { +fn root(tracker: Arc) -> impl Filter + Clone { warp::any() .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -23,7 +25,7 @@ fn root(tracker: Arc,) -> impl Filter -fn announce(tracker: Arc,) -> impl Filter + Clone { +fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -33,7 +35,7 @@ fn announce(tracker: Arc,) -> impl Filter -fn scrape(tracker: Arc,) -> impl Filter + Clone { +fn scrape(tracker: Arc) -> impl Filter + Clone { warp::path::path("scrape") .and(warp::filters::method::get()) .and(with_scrape_request(tracker.config.on_reverse_proxy)) diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs index 69811b3d9..336670030 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/torrust_http_tracker/server.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; use std::sync::Arc; + use crate::TorrentTracker; use crate::torrust_http_tracker::routes; diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index df1a15451..3b8ece647 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -1,6 +1,8 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; + use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; + use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; use crate::torrent::{TorrentError, TorrentPeer}; use crate::torrust_udp_tracker::errors::ServerError; @@ -103,15 +105,15 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), peers: peers.iter() - .filter_map(|peer| if let IpAddr::V4(ip) = peer.peer_addr.ip() { + .filter_map(|peer| if let IpAddr::V4(ip) = peer.peer_addr.ip() { Some(ResponsePeer:: { ip_address: ip, - port: Port(peer.peer_addr.port()) + port: Port(peer.peer_addr.port()), }) } else { None } - ).collect() + ).collect(), }) } else { Response::from(AnnounceResponse { @@ -120,15 +122,15 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), peers: peers.iter() - .filter_map(|peer| if let IpAddr::V6(ip) = peer.peer_addr.ip() { + .filter_map(|peer| if let IpAddr::V6(ip) = peer.peer_addr.ip() { Some(ResponsePeer:: { ip_address: ip, - port: Port(peer.peer_addr.port()) + port: Port(peer.peer_addr.port()), }) } else { None } - ).collect() + ).collect(), }) }; @@ -150,7 +152,7 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra for info_hash in request.info_hashes.iter() { let info_hash = InfoHash(info_hash.0); - if authenticate(&info_hash, tracker.clone()).await.is_err() { continue } + if authenticate(&info_hash, tracker.clone()).await.is_err() { continue; } let scrape_entry = match db.get(&info_hash) { Some(torrent_info) => { @@ -182,7 +184,7 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra Ok(Response::from(ScrapeResponse { transaction_id: request.transaction_id, - torrent_stats + torrent_stats, })) } diff --git a/src/torrust_udp_tracker/mod.rs b/src/torrust_udp_tracker/mod.rs index 6aa5fbce0..25780ba93 100644 --- a/src/torrust_udp_tracker/mod.rs +++ b/src/torrust_udp_tracker/mod.rs @@ -1,13 +1,13 @@ +pub use self::errors::*; +pub use self::handlers::*; +pub use self::request::*; +pub use self::server::*; + pub mod errors; pub mod request; pub mod server; pub mod handlers; -pub use self::errors::*; -pub use self::request::*; -pub use self::server::*; -pub use self::handlers::*; - pub type Bytes = u64; pub type Port = u16; pub type TransactionId = i64; diff --git a/src/torrust_udp_tracker/request.rs b/src/torrust_udp_tracker/request.rs index f3f67fdc1..6531f54b9 100644 --- a/src/torrust_udp_tracker/request.rs +++ b/src/torrust_udp_tracker/request.rs @@ -1,5 +1,6 @@ -use aquatic_udp_protocol::{AnnounceRequest}; -use crate::{InfoHash}; +use aquatic_udp_protocol::AnnounceRequest; + +use crate::InfoHash; // struct AnnounceRequest { // pub connection_id: i64, @@ -25,7 +26,7 @@ impl AnnounceRequestWrapper { pub fn new(announce_request: AnnounceRequest) -> Self { AnnounceRequestWrapper { announce_request: announce_request.clone(), - info_hash: InfoHash(announce_request.info_hash.0) + info_hash: InfoHash(announce_request.info_hash.0), } } } diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index cae1e5b94..8dc34d85d 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -1,10 +1,12 @@ use std::io::Cursor; -use std::net::{SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; -use aquatic_udp_protocol::{Response}; + +use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::{TorrentTracker}; + +use crate::TorrentTracker; use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { diff --git a/src/tracker.rs b/src/tracker.rs index 3a69e3e0b..4f31256ea 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,15 +1,17 @@ +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::net::SocketAddr; +use std::sync::Arc; + +use log::info; use serde::{Deserialize, Serialize}; use serde; -use std::collections::BTreeMap; use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::common::{InfoHash}; -use std::net::{SocketAddr}; + use crate::{Configuration, database, key_manager}; -use std::collections::btree_map::Entry; -use std::sync::Arc; -use log::info; +use crate::common::InfoHash; +use crate::database::Database; use crate::key_manager::AuthKey; -use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; use crate::tracker_stats::{StatsTracker, TrackerStats}; @@ -39,7 +41,7 @@ pub struct TorrentTracker { updates: tokio::sync::RwLock>, shadow: tokio::sync::RwLock>, database: Box, - pub stats_tracker: StatsTracker + pub stats_tracker: StatsTracker, } impl TorrentTracker { @@ -55,7 +57,7 @@ impl TorrentTracker { updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), database, - stats_tracker + stats_tracker, }) } @@ -75,7 +77,7 @@ impl TorrentTracker { let auth_key = key_manager::generate_auth_key(seconds_valid); // add key to database - if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error) } + if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } Ok(auth_key) } @@ -91,18 +93,18 @@ impl TorrentTracker { pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode - if self.is_public() { return Ok(()) } + if self.is_public() { return Ok(()); } // check if auth_key is set and valid if self.is_private() { match key { Some(key) => { if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) + return Err(TorrentError::PeerKeyNotValid); } } None => { - return Err(TorrentError::PeerNotAuthenticated) + return Err(TorrentError::PeerNotAuthenticated); } } } @@ -110,7 +112,7 @@ impl TorrentTracker { // check if info_hash is whitelisted if self.is_whitelisted() { if self.is_info_hash_whitelisted(info_hash).await == false { - return Err(TorrentError::TorrentNotWhitelisted) + return Err(TorrentError::TorrentNotWhitelisted); } } @@ -155,7 +157,7 @@ impl TorrentTracker { pub async fn get_torrent_peers( &self, info_hash: &InfoHash, - peer_addr: &SocketAddr + peer_addr: &SocketAddr, ) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -205,7 +207,7 @@ impl TorrentTracker { let torrent_entry = TorrentEntry { peers: Default::default(), completed, - seeders + seeders, }; torrents.insert(info_hash.clone(), torrent_entry); } @@ -254,10 +256,10 @@ impl TorrentTracker { // Let's iterate through all torrents, and parse. for hash in torrent_hashes.iter() { - let mut torrent = TorrentEntry{ + let mut torrent = TorrentEntry { peers: BTreeMap::new(), completed: 0, - seeders: 0 + seeders: 0, }; let lock = self.torrents.write().await; @@ -307,10 +309,10 @@ impl TorrentTracker { // We get shadow data into local array to be handled. let mut shadow_copy: BTreeMap = BTreeMap::new(); for (infohash, completed) in shadow.iter() { - shadow_copy.insert(*infohash, TorrentEntry{ + shadow_copy.insert(*infohash, TorrentEntry { peers: Default::default(), completed: *completed, - seeders: 0 + seeders: 0, }); } diff --git a/src/tracker_stats.rs b/src/tracker_stats.rs index 1a6a71c2b..0bcd781ba 100644 --- a/src/tracker_stats.rs +++ b/src/tracker_stats.rs @@ -1,6 +1,7 @@ use std::sync::Arc; + use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; -use tokio::sync::mpsc::{Sender}; +use tokio::sync::mpsc::Sender; use tokio::sync::mpsc::error::SendError; const CHANNEL_BUFFER_SIZE: usize = 65_535; @@ -16,7 +17,7 @@ pub enum TrackerStatsEvent { Udp4Scrape, Udp6Connect, Udp6Announce, - Udp6Scrape + Udp6Scrape, } #[derive(Debug)] @@ -56,14 +57,14 @@ impl TrackerStats { pub struct StatsTracker { channel_sender: Option>, - pub stats: Arc> + pub stats: Arc>, } impl StatsTracker { pub fn new() -> Self { Self { channel_sender: None, - stats: Arc::new(RwLock::new(TrackerStats::new())) + stats: Arc::new(RwLock::new(TrackerStats::new())), } } diff --git a/src/utils.rs b/src/utils.rs index e3a8302df..fb2a94513 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,8 +1,9 @@ -use std::net::SocketAddr; -use std::time::SystemTime; use std::error::Error; use std::fmt::Write; use std::io::Cursor; +use std::net::SocketAddr; +use std::time::SystemTime; + use aquatic_udp_protocol::ConnectionId; use byteorder::{BigEndian, ReadBytesExt}; From 0604cb9ad5318adc57e366011a5da32ad8071f80 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sat, 16 Apr 2022 14:07:12 +0200 Subject: [PATCH 0080/1003] FIxing MySQL support --- src/mysql_database.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index be1fe649b..523a54cf3 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -36,20 +36,20 @@ impl Database for MysqlDatabase { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE + info_hash BINARY(20) NOT NULL UNIQUE );".to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE, + info_hash BINARY(20) NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL );".to_string(); let create_keys_table = format!(" CREATE TABLE IF NOT EXISTS `keys` ( `id` INT NOT NULL AUTO_INCREMENT, - `key` VARCHAR({}) NOT NULL, + `key` BINARY({}) NOT NULL, `valid_until` INT(10) NOT NULL, PRIMARY KEY (`id`), UNIQUE (`key`) @@ -67,7 +67,7 @@ impl Database for MysqlDatabase { async fn load_persistent_torrent_data(&self) -> Result, database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { + let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT HEX(info_hash), completed FROM torrents", |(info_hash_string, completed): (String, u32)| { let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); (info_hash, completed) }).map_err(|_| database::Error::QueryReturnedNoRows)?; @@ -82,7 +82,8 @@ impl Database for MysqlDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (?, ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())); + let _ = db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())); + debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); } let _ = db_transaction.commit(); @@ -93,7 +94,7 @@ impl Database for MysqlDatabase { async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash => info_hash }) + match conn.exec_first::("SELECT HEX(info_hash) FROM whitelist WHERE info_hash = UNHEX(:info_hash)", params! { info_hash => info_hash }) .map_err(|_| database::Error::QueryReturnedNoRows)? { Some(info_hash) => { Ok(InfoHash::from_str(&info_hash).unwrap()) @@ -109,7 +110,7 @@ impl Database for MysqlDatabase { let info_hash_str = info_hash.to_string(); - match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { + match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (UNHEX(:info_hash_str))", params! { info_hash_str }) { Ok(_) => { Ok(1) } @@ -125,7 +126,7 @@ impl Database for MysqlDatabase { let info_hash = info_hash.to_string(); - match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { + match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = UNHEX(:info_hash)", params! { info_hash }) { Ok(_) => { Ok(1) } From 3a839cddb7962a433bba11838cba0e158357ac35 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 13:24:31 +0200 Subject: [PATCH 0081/1003] Fixing SQL issue and locking problem --- src/mysql_database.rs | 10 +++++++--- src/tracker.rs | 28 ++++++++++++++++++---------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 523a54cf3..7ecae214a 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -9,7 +9,7 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; use crate::{AUTH_KEY_LENGTH, database, InfoHash}; -use crate::database::Database; +use crate::database::{Database, Error}; use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; @@ -82,11 +82,15 @@ impl Database for MysqlDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())); + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { + return Err(Error::InvalidQuery); + } debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); } - let _ = db_transaction.commit(); + if db_transaction.commit().is_err() { + return Err(Error::DatabaseError); + }; Ok(()) } diff --git a/src/tracker.rs b/src/tracker.rs index 4f31256ea..a95c35c24 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -243,6 +243,7 @@ impl TorrentTracker { // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { info!("Cleaning torrents..."); + let lock = self.torrents.write().await; // First we create a mapping of all the torrent hashes in a vector, and we use this to iterate through the btreemap. @@ -292,23 +293,30 @@ impl TorrentTracker { pub async fn periodic_saving(&self) { // Get a lock for writing - let mut shadow = self.shadow.write().await; + // let mut shadow = self.shadow.write().await; // We will get the data and insert it into the shadow, while clearing updates. let mut updates = self.updates.write().await; - - for (infohash, completed) in updates.iter() { - if shadow.contains_key(infohash) { - shadow.remove(infohash); - } - shadow.insert(*infohash, *completed); + let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); + // let mut torrent_hashes: Vec = Vec::new(); + for (k, completed) in updates.iter() { + updates_cloned.insert(*k, *completed); } updates.clear(); drop(updates); - // We get shadow data into local array to be handled. + let mut shadows = self.shadow.write().await; + for (k, completed) in updates_cloned.iter() { + if shadows.contains_key(k) { + shadows.remove(k); + } + shadows.insert(*k, *completed); + } + drop(updates_cloned); + + // We updated the shadow data from the updates data, let's handle shadow data as expected. let mut shadow_copy: BTreeMap = BTreeMap::new(); - for (infohash, completed) in shadow.iter() { + for (infohash, completed) in shadows.iter() { shadow_copy.insert(*infohash, TorrentEntry { peers: Default::default(), completed: *completed, @@ -317,7 +325,7 @@ impl TorrentTracker { } // Drop the lock - drop(shadow); + drop(shadows); // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. From 93b053793af0a6e59fb25a921d675505885cc6ee Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sun, 17 Apr 2022 13:31:40 +0200 Subject: [PATCH 0082/1003] feat: added statistics config option --- src/config.rs | 2 ++ src/torrust_http_tracker/handlers.rs | 8 +++--- src/torrust_udp_tracker/handlers.rs | 12 ++++----- src/tracker.rs | 37 +++++++++++++++++----------- 4 files changed, 34 insertions(+), 25 deletions(-) diff --git a/src/config.rs b/src/config.rs index 67078d608..b46f29d69 100644 --- a/src/config.rs +++ b/src/config.rs @@ -56,6 +56,7 @@ pub struct Configuration { pub mode: TrackerMode, pub db_driver: DatabaseDrivers, pub db_path: String, + pub statistics: bool, pub persistence: bool, pub persistence_interval: Option, pub cleanup_interval: Option, @@ -141,6 +142,7 @@ impl Configuration { mode: TrackerMode::PublicMode, db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), + statistics: true, persistence: false, persistence_interval: Some(900), cleanup_interval: Some(600), diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 8762faeaf..8e8f2576f 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -49,8 +49,8 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option // send stats event match announce_request.peer_addr { - IpAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp4Announce).await; } - IpAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp6Announce).await; } + IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp4Announce).await; } + IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp6Announce).await; } } send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) @@ -82,8 +82,8 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp4Scrape).await; } - IpAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp6Scrape).await; } + IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp4Scrape).await; } + IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp6Scrape).await; } } send_scrape_response(files) diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index df1a15451..c94e2e917 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -75,8 +75,8 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Connect).await; } - SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Connect).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Connect).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Connect).await; } } Ok(response) @@ -134,8 +134,8 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Announce).await; } - SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Announce).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Announce).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Announce).await; } } Ok(announce_response) @@ -176,8 +176,8 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Scrape).await; } - SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Scrape).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Scrape).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Scrape).await; } } Ok(Response::from(ScrapeResponse { diff --git a/src/tracker.rs b/src/tracker.rs index defdecc1f..7a036c1af 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -6,16 +6,16 @@ use crate::common::{InfoHash}; use std::net::{SocketAddr}; use crate::{Configuration, database, key_manager}; use std::collections::btree_map::Entry; -use std::mem; use std::sync::Arc; use log::info; +use tokio::sync::mpsc::error::SendError; use crate::key_manager::AuthKey; use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; -use crate::tracker_stats::{StatsTracker, TrackerStats}; +use crate::tracker_stats::{StatsTracker, TrackerStats, TrackerStatsEvent}; -#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)] +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] @@ -36,12 +36,13 @@ pub enum TrackerMode { pub struct TorrentTracker { + mode: TrackerMode, pub config: Arc, - torrents: tokio::sync::RwLock>, - updates: tokio::sync::RwLock>, - shadow: tokio::sync::RwLock>, - database: Box, - pub stats_tracker: StatsTracker + torrents: RwLock>, + updates: RwLock>, + shadow: RwLock>, + stats_tracker: StatsTracker, + database: Box } impl TorrentTracker { @@ -49,28 +50,30 @@ impl TorrentTracker { let database = database::connect_database(&config.db_driver, &config.db_path)?; let mut stats_tracker = StatsTracker::new(); - stats_tracker.run_worker(); + // starts a thread for updating tracker stats + if config.statistics { stats_tracker.run_worker(); } Ok(TorrentTracker { - config, + mode: config.mode, + config: config.clone(), torrents: RwLock::new(std::collections::BTreeMap::new()), updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), - database, - stats_tracker + stats_tracker, + database }) } pub fn is_public(&self) -> bool { - self.config.mode == TrackerMode::PublicMode + self.mode == TrackerMode::PublicMode } pub fn is_private(&self) -> bool { - self.config.mode == TrackerMode::PrivateMode || self.config.mode == TrackerMode::PrivateListedMode + self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode } pub fn is_whitelisted(&self) -> bool { - self.config.mode == TrackerMode::ListedMode || self.config.mode == TrackerMode::PrivateListedMode + self.mode == TrackerMode::ListedMode || self.mode == TrackerMode::PrivateListedMode } pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { @@ -227,6 +230,10 @@ impl TorrentTracker { self.stats_tracker.get_stats().await } + pub async fn send_stats_event(&self, event: TrackerStatsEvent) -> Option>> { + self.stats_tracker.send_event(event).await + } + pub async fn post_log(&self) { let torrents = self.torrents.read().await; let torrents_size = torrents.len(); From 9d0714db67db0b17610f525516f2ff5d9570c256 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 14:04:15 +0200 Subject: [PATCH 0083/1003] Updating README --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 929585c11..bb4649271 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys * [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count +* [X] MySQL support added as engine option +* [X] Periodically saving added, interval can be configured ### Implemented BEPs * [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol From 9bc1ebb6057e94b7c70e4571f6e4ab67da6c142c Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 15:50:59 +0200 Subject: [PATCH 0084/1003] Fixing possible bug --- src/tracker.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/tracker.rs b/src/tracker.rs index 0e42f69e1..a67cb62e6 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::info; +use log::{debug, info}; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -128,6 +128,7 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { + debug!("{:#?}", torrent); let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } @@ -308,7 +309,7 @@ impl TorrentTracker { let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); for (k, completed) in updates.iter() { - updates_cloned.insert(*k, *completed); + updates_cloned.insert(k.clone(), completed.clone()); } updates.clear(); drop(updates); @@ -318,16 +319,16 @@ impl TorrentTracker { if shadows.contains_key(k) { shadows.remove(k); } - shadows.insert(*k, *completed); + shadows.insert(k.clone(), completed.clone()); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. let mut shadow_copy: BTreeMap = BTreeMap::new(); for (infohash, completed) in shadows.iter() { - shadow_copy.insert(*infohash, TorrentEntry { + shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), - completed: *completed, + completed: completed.clone(), seeders: 0, }); } From 0c332cf382cbc26c325cd79e557b894e3aa57d9c Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 16:24:04 +0200 Subject: [PATCH 0085/1003] Fixing a buggy SQLite3 and MySQL --- src/mysql_database.rs | 4 ++-- src/sqlite_database.rs | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 7ecae214a..eea2190a3 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -82,10 +82,10 @@ impl Database for MysqlDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", (info_hash.to_string(), completed.to_string())).is_err() { return Err(Error::InvalidQuery); } - debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); + debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", info_hash.to_string(), completed.to_string()); } if db_transaction.commit().is_err() { diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index fa519ffd0..82bb9d4fc 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -82,7 +82,8 @@ impl Database for SqliteDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); } let _ = db_transaction.commit(); From 84c6ca460c0f8d6a6bb1c63e1203bfe1530439a0 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 17:04:52 +0200 Subject: [PATCH 0086/1003] Speeding up inserting --- src/mysql_database.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index eea2190a3..ac8bbb54a 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -80,12 +80,24 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + let mut insert_vector= vec![]; + for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", (info_hash.to_string(), completed.to_string())).is_err() { + insert_vector.push("(UNHEX('" + info_hash.to_string() + "'), " + completed.to_string() + ")"); + if insert_vector.len() == 1000 { + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES " + insert_vector.join(",") + "ON DUPLICATE KEY UPDATE completed = VALUES(completed)").is_err() { + return Err(Error::InvalidQuery); + } + insert_vector.clear(); + } + } + + if insert_vector.len() != 0 { + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES " + insert_vector.join(",") + "ON DUPLICATE KEY UPDATE completed = VALUES(completed)").is_err() { return Err(Error::InvalidQuery); } - debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", info_hash.to_string(), completed.to_string()); + insert_vector.clear(); } if db_transaction.commit().is_err() { From 8ebd1e5f1164dff111049efcda15a43bd1277a1b Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 17:13:19 +0200 Subject: [PATCH 0087/1003] Typo's --- src/mysql_database.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index ac8bbb54a..15e2de633 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -84,9 +84,10 @@ impl Database for MysqlDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - insert_vector.push("(UNHEX('" + info_hash.to_string() + "'), " + completed.to_string() + ")"); + insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); if insert_vector.len() == 1000 { - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES " + insert_vector.join(",") + "ON DUPLICATE KEY UPDATE completed = VALUES(completed)").is_err() { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { return Err(Error::InvalidQuery); } insert_vector.clear(); @@ -94,7 +95,8 @@ impl Database for MysqlDatabase { } if insert_vector.len() != 0 { - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES " + insert_vector.join(",") + "ON DUPLICATE KEY UPDATE completed = VALUES(completed)").is_err() { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { return Err(Error::InvalidQuery); } insert_vector.clear(); From 1b4decac685b374cf1b4abab9cae64fb98a21ebc Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 22:06:33 +0200 Subject: [PATCH 0088/1003] Looking for reason of timeout --- src/tracker.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/tracker.rs b/src/tracker.rs index a67cb62e6..c523cefba 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -308,6 +308,7 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); + info!("Copying updates to updates_cloned..."); for (k, completed) in updates.iter() { updates_cloned.insert(k.clone(), completed.clone()); } @@ -315,6 +316,7 @@ impl TorrentTracker { drop(updates); let mut shadows = self.shadow.write().await; + info!("Copying updates_cloned into the shadow to overwrite..."); for (k, completed) in updates_cloned.iter() { if shadows.contains_key(k) { shadows.remove(k); @@ -324,6 +326,7 @@ impl TorrentTracker { drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. + info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); for (infohash, completed) in shadows.iter() { shadow_copy.insert(infohash.clone(), TorrentEntry { @@ -338,11 +341,15 @@ impl TorrentTracker { // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. + info!("Start saving shadow data into SQL..."); let result = self.database.save_persistent_torrent_data(&shadow_copy).await; if result.is_ok() { + info!("Done saving data to SQL and succeeded, emptying shadow..."); let mut shadow = self.shadow.write().await; shadow.clear(); drop(shadow); + } else { + info!("Done saving data to SQL and failed, not emptying shadow..."); } } } From 035c28d9cc6d52c83e08eda87b99b1562db60b75 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 22:10:16 +0200 Subject: [PATCH 0089/1003] Improving locking --- src/tracker.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tracker.rs b/src/tracker.rs index c523cefba..8ad1faf2f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -315,19 +315,21 @@ impl TorrentTracker { updates.clear(); drop(updates); - let mut shadows = self.shadow.write().await; info!("Copying updates_cloned into the shadow to overwrite..."); for (k, completed) in updates_cloned.iter() { + let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { shadows.remove(k); } shadows.insert(k.clone(), completed.clone()); + drop(shadows); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); + let mut shadows = self.shadow.write().await; for (infohash, completed) in shadows.iter() { shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), @@ -335,8 +337,6 @@ impl TorrentTracker { seeders: 0, }); } - - // Drop the lock drop(shadows); // We will now save the data from the shadow into the database. From aea2de04851058a58faf940f20cff65b315605f4 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 22:12:11 +0200 Subject: [PATCH 0090/1003] Typo --- src/tracker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker.rs b/src/tracker.rs index 8ad1faf2f..c0c25bc41 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -329,7 +329,7 @@ impl TorrentTracker { // We updated the shadow data from the updates data, let's handle shadow data as expected. info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); - let mut shadows = self.shadow.write().await; + let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), From 5d1ad9d959ad3de5cf3bac43142266a8a12ab418 Mon Sep 17 00:00:00 2001 From: WarmBeer Date: Mon, 25 Apr 2022 19:13:48 +0200 Subject: [PATCH 0091/1003] Revert "Development" --- README.md | 2 -- src/mysql_database.rs | 18 ++---------------- src/sqlite_database.rs | 3 +-- src/tracker.rs | 24 ++++++++---------------- 4 files changed, 11 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index bb4649271..929585c11 100644 --- a/README.md +++ b/README.md @@ -14,8 +14,6 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys * [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count -* [X] MySQL support added as engine option -* [X] Periodically saving added, interval can be configured ### Implemented BEPs * [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 15e2de633..7ecae214a 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -80,26 +80,12 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; - let mut insert_vector= vec![]; - for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); - if insert_vector.len() == 1000 { - let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); - if db_transaction.query_drop(query).is_err() { - return Err(Error::InvalidQuery); - } - insert_vector.clear(); - } - } - - if insert_vector.len() != 0 { - let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); - if db_transaction.query_drop(query).is_err() { + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { return Err(Error::InvalidQuery); } - insert_vector.clear(); + debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); } if db_transaction.commit().is_err() { diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index 82bb9d4fc..fa519ffd0 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -82,8 +82,7 @@ impl Database for SqliteDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); - let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); + let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); } let _ = db_transaction.commit(); diff --git a/src/tracker.rs b/src/tracker.rs index c0c25bc41..0e42f69e1 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::{debug, info}; +use log::info; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -128,7 +128,6 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { - debug!("{:#?}", torrent); let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } @@ -308,48 +307,41 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); - info!("Copying updates to updates_cloned..."); for (k, completed) in updates.iter() { - updates_cloned.insert(k.clone(), completed.clone()); + updates_cloned.insert(*k, *completed); } updates.clear(); drop(updates); - info!("Copying updates_cloned into the shadow to overwrite..."); + let mut shadows = self.shadow.write().await; for (k, completed) in updates_cloned.iter() { - let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { shadows.remove(k); } - shadows.insert(k.clone(), completed.clone()); - drop(shadows); + shadows.insert(*k, *completed); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. - info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); - let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { - shadow_copy.insert(infohash.clone(), TorrentEntry { + shadow_copy.insert(*infohash, TorrentEntry { peers: Default::default(), - completed: completed.clone(), + completed: *completed, seeders: 0, }); } + + // Drop the lock drop(shadows); // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. - info!("Start saving shadow data into SQL..."); let result = self.database.save_persistent_torrent_data(&shadow_copy).await; if result.is_ok() { - info!("Done saving data to SQL and succeeded, emptying shadow..."); let mut shadow = self.shadow.write().await; shadow.clear(); drop(shadow); - } else { - info!("Done saving data to SQL and failed, not emptying shadow..."); } } } From cef2016c918825694ea51eaa4b4527c62af454f9 Mon Sep 17 00:00:00 2001 From: WarmBeer Date: Mon, 25 Apr 2022 20:06:29 +0200 Subject: [PATCH 0092/1003] Revert "Revert "Development"" --- README.md | 2 ++ src/mysql_database.rs | 18 ++++++++++++++++-- src/sqlite_database.rs | 3 ++- src/tracker.rs | 24 ++++++++++++++++-------- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 929585c11..bb4649271 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys * [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count +* [X] MySQL support added as engine option +* [X] Periodically saving added, interval can be configured ### Implemented BEPs * [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 7ecae214a..15e2de633 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -80,12 +80,26 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + let mut insert_vector= vec![]; + for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { + insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); + if insert_vector.len() == 1000 { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { + return Err(Error::InvalidQuery); + } + insert_vector.clear(); + } + } + + if insert_vector.len() != 0 { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { return Err(Error::InvalidQuery); } - debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); + insert_vector.clear(); } if db_transaction.commit().is_err() { diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index fa519ffd0..82bb9d4fc 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -82,7 +82,8 @@ impl Database for SqliteDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); } let _ = db_transaction.commit(); diff --git a/src/tracker.rs b/src/tracker.rs index 0e42f69e1..c0c25bc41 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::info; +use log::{debug, info}; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -128,6 +128,7 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { + debug!("{:#?}", torrent); let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } @@ -307,41 +308,48 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); + info!("Copying updates to updates_cloned..."); for (k, completed) in updates.iter() { - updates_cloned.insert(*k, *completed); + updates_cloned.insert(k.clone(), completed.clone()); } updates.clear(); drop(updates); - let mut shadows = self.shadow.write().await; + info!("Copying updates_cloned into the shadow to overwrite..."); for (k, completed) in updates_cloned.iter() { + let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { shadows.remove(k); } - shadows.insert(*k, *completed); + shadows.insert(k.clone(), completed.clone()); + drop(shadows); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. + info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); + let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { - shadow_copy.insert(*infohash, TorrentEntry { + shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), - completed: *completed, + completed: completed.clone(), seeders: 0, }); } - - // Drop the lock drop(shadows); // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. + info!("Start saving shadow data into SQL..."); let result = self.database.save_persistent_torrent_data(&shadow_copy).await; if result.is_ok() { + info!("Done saving data to SQL and succeeded, emptying shadow..."); let mut shadow = self.shadow.write().await; shadow.clear(); drop(shadow); + } else { + info!("Done saving data to SQL and failed, not emptying shadow..."); } } } From 6578c3545a4254c505d92e568a3a5e7cc0b57016 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 2 May 2022 12:42:18 +0200 Subject: [PATCH 0093/1003] chore: removed dhat --- Cargo.lock | 81 ++--------------------------------------------------- Cargo.toml | 4 --- src/main.rs | 32 +++++++++------------ 3 files changed, 15 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eb04ba651..56ecd77a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,15 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "addr2line" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" -dependencies = [ - "gimli", -] - [[package]] name = "adler" version = "1.0.2" @@ -95,21 +86,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" -[[package]] -name = "backtrace" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide 0.4.4", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" @@ -399,21 +375,6 @@ dependencies = [ "syn", ] -[[package]] -name = "dhat" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47003dc9f6368a88e85956c3b2573a7e6872746a3e5d762a8885da3a136a0381" -dependencies = [ - "backtrace", - "lazy_static", - "parking_lot 0.11.2", - "rustc-hash", - "serde 1.0.136", - "serde_json", - "thousands", -] - [[package]] name = "digest" version = "0.9.0" @@ -498,7 +459,7 @@ dependencies = [ "crc32fast", "libc", "libz-sys", - "miniz_oxide 0.5.1", + "miniz_oxide", ] [[package]] @@ -712,12 +673,6 @@ dependencies = [ "wasi 0.10.0+wasi-snapshot-preview1", ] -[[package]] -name = "gimli" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" - [[package]] name = "glob" version = "0.3.0" @@ -1065,16 +1020,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - [[package]] name = "miniz_oxide" version = "0.5.1" @@ -1308,15 +1253,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.10.0" @@ -1657,12 +1593,6 @@ dependencies = [ "serde 1.0.136", ] -[[package]] -name = "rustc-demangle" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" - [[package]] name = "rustc-hash" version = "1.1.0" @@ -2127,12 +2057,6 @@ dependencies = [ "syn", ] -[[package]] -name = "thousands" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" - [[package]] name = "time" version = "0.1.44" @@ -2302,7 +2226,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.2.1" +version = "2.3.0" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -2311,7 +2235,6 @@ dependencies = [ "chrono", "config", "derive_more", - "dhat", "fern", "futures", "hex", diff --git a/Cargo.toml b/Cargo.toml index a10d548c2..7be32ce89 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,9 +10,6 @@ edition = "2018" debug = 1 lto = "fat" -[features] -dhat-heap = [] # if you are doing heap profiling - [dependencies] serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" @@ -38,4 +35,3 @@ thiserror = "1.0" aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" -dhat = "0.3.0" diff --git a/src/main.rs b/src/main.rs index b17ef14fe..5e16f2e0c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,15 +7,8 @@ use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; -#[cfg(feature = "dhat-heap")] -#[global_allocator] -static ALLOC: dhat::Alloc = dhat::Alloc; - #[tokio::main] async fn main() { - #[cfg(feature = "dhat-heap")] - let _profiler = dhat::Profiler::new_heap(); - // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), @@ -29,6 +22,7 @@ async fn main() { panic!("{}", e) })); + // initialize logging logging::setup_logging(&config); // load persistent torrents if enabled @@ -39,11 +33,11 @@ async fn main() { }; info!("Persistent torrents loaded."); - let _torrent_periodic_job = start_torrent_periodic_job(config.clone(), tracker.clone()).unwrap(); + let _torrent_periodic_job = start_torrent_periodic_job(config.clone(), tracker.clone()); } // start torrent cleanup job (periodically removes old peers) - let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()).unwrap(); + let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()); // start HTTP API server if config.http_api.enabled { @@ -80,7 +74,7 @@ async fn main() { } // start a thread to post statistics - let _ = start_statistics_job(config.clone(), tracker.clone()).unwrap(); + let _ = start_statistics_job(config.clone(), tracker.clone()); // handle the signals here tokio::select! { @@ -103,11 +97,11 @@ async fn main() { } } -fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> Option> { +fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.persistence_interval.unwrap_or(900); - return Some(tokio::spawn(async move { + tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); let mut interval = tokio::time::interval(interval); interval.tick().await; // first tick is immediate... @@ -122,14 +116,14 @@ fn start_torrent_periodic_job(config: Arc, tracker: Arc, tracker: Arc) -> Option> { +fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.cleanup_interval.unwrap_or(600); - return Some(tokio::spawn(async move { + tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); let mut interval = tokio::time::interval(interval); interval.tick().await; // first tick is immediate... @@ -142,14 +136,14 @@ fn start_torrent_cleanup_job(config: Arc, tracker: Arc, tracker: Arc) -> Option> { +fn start_statistics_job(config: Arc, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.log_interval.unwrap_or(60); - return Some(tokio::spawn(async move { + tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); let mut interval = tokio::time::interval(interval); interval.tick().await; // first tick is immediate... @@ -162,7 +156,7 @@ fn start_statistics_job(config: Arc, tracker: Arc break; } } - })); + }) } fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> JoinHandle<()> { From 52785a0b58097151eb85c663269f9210fb51cea5 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 2 May 2022 23:04:37 +0200 Subject: [PATCH 0094/1003] refactor: renamed http and udp modules --- Cargo.toml | 2 +- src/{torrust_http_tracker => http}/errors.rs | 0 src/{torrust_http_tracker => http}/filters.rs | 2 +- src/{torrust_http_tracker => http}/handlers.rs | 2 +- src/{torrust_http_tracker => http}/mod.rs | 0 src/{torrust_http_tracker => http}/request.rs | 2 +- src/{torrust_http_tracker => http}/response.rs | 0 src/{torrust_http_tracker => http}/routes.rs | 2 +- src/{torrust_http_tracker => http}/server.rs | 2 +- src/lib.rs | 8 ++++---- src/main.rs | 2 +- src/torrent.rs | 2 +- src/{torrust_udp_tracker => udp}/errors.rs | 0 src/{torrust_udp_tracker => udp}/handlers.rs | 4 ++-- src/{torrust_udp_tracker => udp}/mod.rs | 0 src/{torrust_udp_tracker => udp}/request.rs | 0 src/{torrust_udp_tracker => udp}/server.rs | 2 +- 17 files changed, 15 insertions(+), 15 deletions(-) rename src/{torrust_http_tracker => http}/errors.rs (100%) rename src/{torrust_http_tracker => http}/filters.rs (98%) rename src/{torrust_http_tracker => http}/handlers.rs (96%) rename src/{torrust_http_tracker => http}/mod.rs (100%) rename src/{torrust_http_tracker => http}/request.rs (94%) rename src/{torrust_http_tracker => http}/response.rs (100%) rename src/{torrust_http_tracker => http}/routes.rs (90%) rename src/{torrust_http_tracker => http}/server.rs (97%) rename src/{torrust_udp_tracker => udp}/errors.rs (100%) rename src/{torrust_udp_tracker => udp}/handlers.rs (98%) rename src/{torrust_udp_tracker => udp}/mod.rs (100%) rename src/{torrust_udp_tracker => udp}/request.rs (100%) rename src/{torrust_udp_tracker => udp}/server.rs (97%) diff --git a/Cargo.toml b/Cargo.toml index 7be32ce89..cc97072a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "torrust-tracker" -version = "2.2.1" +version = "2.3.0" license = "AGPL-3.0" authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." diff --git a/src/torrust_http_tracker/errors.rs b/src/http/errors.rs similarity index 100% rename from src/torrust_http_tracker/errors.rs rename to src/http/errors.rs diff --git a/src/torrust_http_tracker/filters.rs b/src/http/filters.rs similarity index 98% rename from src/torrust_http_tracker/filters.rs rename to src/http/filters.rs index 5c4fc9743..5e0c3e068 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/http/filters.rs @@ -8,7 +8,7 @@ use warp::{Filter, reject, Rejection}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; use crate::key_manager::AuthKey; -use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; +use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; /// Pass Arc along pub fn with_tracker(tracker: Arc) -> impl Filter, ), Error=Infallible> + Clone { diff --git a/src/torrust_http_tracker/handlers.rs b/src/http/handlers.rs similarity index 96% rename from src/torrust_http_tracker/handlers.rs rename to src/http/handlers.rs index 9021e8858..c81c93d9b 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/http/handlers.rs @@ -10,7 +10,7 @@ use warp::http::Response; use crate::{InfoHash, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrent::{TorrentError, TorrentPeer, TorrentStats}; -use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; +use crate::http::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; use crate::tracker_stats::TrackerStatsEvent; use crate::utils::url_encode_bytes; diff --git a/src/torrust_http_tracker/mod.rs b/src/http/mod.rs similarity index 100% rename from src/torrust_http_tracker/mod.rs rename to src/http/mod.rs diff --git a/src/torrust_http_tracker/request.rs b/src/http/request.rs similarity index 94% rename from src/torrust_http_tracker/request.rs rename to src/http/request.rs index 487e53a13..28cd4750e 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/http/request.rs @@ -3,7 +3,7 @@ use std::net::IpAddr; use serde::Deserialize; use crate::{InfoHash, PeerId}; -use crate::torrust_http_tracker::Bytes; +use crate::http::Bytes; #[derive(Deserialize)] pub struct AnnounceRequestQuery { diff --git a/src/torrust_http_tracker/response.rs b/src/http/response.rs similarity index 100% rename from src/torrust_http_tracker/response.rs rename to src/http/response.rs diff --git a/src/torrust_http_tracker/routes.rs b/src/http/routes.rs similarity index 90% rename from src/torrust_http_tracker/routes.rs rename to src/http/routes.rs index fb6bf5c16..775d9ce79 100644 --- a/src/torrust_http_tracker/routes.rs +++ b/src/http/routes.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use warp::{Filter, Rejection}; use crate::TorrentTracker; -use crate::torrust_http_tracker::{handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use crate::http::{handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { diff --git a/src/torrust_http_tracker/server.rs b/src/http/server.rs similarity index 97% rename from src/torrust_http_tracker/server.rs rename to src/http/server.rs index 336670030..31a8e4664 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/http/server.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; use crate::TorrentTracker; -use crate::torrust_http_tracker::routes; +use crate::http::routes; /// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] diff --git a/src/lib.rs b/src/lib.rs index b6cebfc5e..4f8e9a241 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,5 @@ -pub use torrust_http_tracker::server::*; -pub use torrust_udp_tracker::server::*; +pub use http::server::*; +pub use udp::server::*; pub use self::common::*; pub use self::config::*; @@ -14,8 +14,8 @@ pub mod utils; pub mod sqlite_database; pub mod key_manager; pub mod logging; -pub mod torrust_udp_tracker; -pub mod torrust_http_tracker; +pub mod udp; +pub mod http; pub mod database; pub mod mysql_database; pub mod torrent; diff --git a/src/main.rs b/src/main.rs index 5e16f2e0c..b2bcb31d1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,7 +5,7 @@ use log::info; use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; -use torrust_tracker::torrust_http_tracker::server::HttpServer; +use torrust_tracker::http::server::HttpServer; #[tokio::main] async fn main() { diff --git a/src/torrent.rs b/src/torrent.rs index e2984a490..4e44a995a 100644 --- a/src/torrent.rs +++ b/src/torrent.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; use crate::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::torrust_http_tracker::AnnounceRequest; +use crate::http::AnnounceRequest; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { diff --git a/src/torrust_udp_tracker/errors.rs b/src/udp/errors.rs similarity index 100% rename from src/torrust_udp_tracker/errors.rs rename to src/udp/errors.rs diff --git a/src/torrust_udp_tracker/handlers.rs b/src/udp/handlers.rs similarity index 98% rename from src/torrust_udp_tracker/handlers.rs rename to src/udp/handlers.rs index ff6e8981b..23fac0405 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/udp/handlers.rs @@ -5,8 +5,8 @@ use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; use crate::torrent::{TorrentError, TorrentPeer}; -use crate::torrust_udp_tracker::errors::ServerError; -use crate::torrust_udp_tracker::request::AnnounceRequestWrapper; +use crate::udp::errors::ServerError; +use crate::udp::request::AnnounceRequestWrapper; use crate::tracker_stats::TrackerStatsEvent; use crate::utils::get_connection_id; diff --git a/src/torrust_udp_tracker/mod.rs b/src/udp/mod.rs similarity index 100% rename from src/torrust_udp_tracker/mod.rs rename to src/udp/mod.rs diff --git a/src/torrust_udp_tracker/request.rs b/src/udp/request.rs similarity index 100% rename from src/torrust_udp_tracker/request.rs rename to src/udp/request.rs diff --git a/src/torrust_udp_tracker/server.rs b/src/udp/server.rs similarity index 97% rename from src/torrust_udp_tracker/server.rs rename to src/udp/server.rs index 8dc34d85d..03745192b 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/udp/server.rs @@ -7,7 +7,7 @@ use log::{debug, info}; use tokio::net::UdpSocket; use crate::TorrentTracker; -use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; +use crate::udp::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { socket: Arc, From 5a6eaa41a319bff79e4ab2d291fb5b9d99dfef75 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 5 May 2022 23:58:10 +0200 Subject: [PATCH 0095/1003] refactor: moved databases to own module --- src/config.rs | 2 +- src/{ => databases}/database.rs | 4 ++-- src/databases/mod.rs | 3 +++ src/{mysql_database.rs => databases/mysql.rs} | 5 +++-- src/{sqlite_database.rs => databases/sqlite.rs} | 5 +++-- src/lib.rs | 7 +++---- src/tracker.rs | 7 ++++--- 7 files changed, 19 insertions(+), 14 deletions(-) rename src/{ => databases}/database.rs (96%) create mode 100644 src/databases/mod.rs rename src/{mysql_database.rs => databases/mysql.rs} (98%) rename src/{sqlite_database.rs => databases/sqlite.rs} (98%) diff --git a/src/config.rs b/src/config.rs index ce3f59760..c6901564f 100644 --- a/src/config.rs +++ b/src/config.rs @@ -9,7 +9,7 @@ use config::{Config, ConfigError, File}; use serde::{Deserialize, Serialize, Serializer}; use toml; -use crate::database::DatabaseDrivers; +use crate::databases::database::DatabaseDrivers; pub use crate::tracker::TrackerMode; #[derive(Serialize, Deserialize, PartialEq)] diff --git a/src/database.rs b/src/databases/database.rs similarity index 96% rename from src/database.rs rename to src/databases/database.rs index a90161e91..b39a0ada1 100644 --- a/src/database.rs +++ b/src/databases/database.rs @@ -7,8 +7,8 @@ use serde::{Deserialize, Serialize}; use crate::InfoHash; use crate::key_manager::AuthKey; -use crate::mysql_database::MysqlDatabase; -use crate::sqlite_database::SqliteDatabase; +use crate::databases::mysql::MysqlDatabase; +use crate::databases::sqlite::SqliteDatabase; use crate::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] diff --git a/src/databases/mod.rs b/src/databases/mod.rs new file mode 100644 index 000000000..119e34816 --- /dev/null +++ b/src/databases/mod.rs @@ -0,0 +1,3 @@ +pub mod mysql; +pub mod sqlite; +pub mod database; diff --git a/src/mysql_database.rs b/src/databases/mysql.rs similarity index 98% rename from src/mysql_database.rs rename to src/databases/mysql.rs index 15e2de633..df85402b3 100644 --- a/src/mysql_database.rs +++ b/src/databases/mysql.rs @@ -8,8 +8,9 @@ use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; -use crate::{AUTH_KEY_LENGTH, database, InfoHash}; -use crate::database::{Database, Error}; +use crate::{AUTH_KEY_LENGTH, InfoHash}; +use crate::databases::database::{Database, Error}; +use crate::databases::database; use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; diff --git a/src/sqlite_database.rs b/src/databases/sqlite.rs similarity index 98% rename from src/sqlite_database.rs rename to src/databases/sqlite.rs index 82bb9d4fc..b51fd6c51 100644 --- a/src/sqlite_database.rs +++ b/src/databases/sqlite.rs @@ -7,8 +7,9 @@ use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use r2d2_sqlite::rusqlite::NO_PARAMS; -use crate::{AUTH_KEY_LENGTH, database, InfoHash}; -use crate::database::Database; +use crate::{AUTH_KEY_LENGTH, InfoHash}; +use crate::databases::database::Database; +use crate::databases::database; use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; diff --git a/src/lib.rs b/src/lib.rs index 4f8e9a241..e965fa88d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,13 +11,12 @@ pub mod tracker; pub mod http_api_server; pub mod common; pub mod utils; -pub mod sqlite_database; pub mod key_manager; pub mod logging; pub mod udp; pub mod http; -pub mod database; -pub mod mysql_database; pub mod torrent; pub mod tracker_stats; - +pub mod setup; +pub mod persistent_torrent_statistics; +pub mod databases; diff --git a/src/tracker.rs b/src/tracker.rs index c0c25bc41..b8c2b9931 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -8,10 +8,11 @@ use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::{Configuration, database, key_manager}; +use crate::{Configuration, key_manager}; use crate::common::InfoHash; -use crate::database::Database; +use crate::databases::database::Database; use tokio::sync::mpsc::error::SendError; +use crate::databases::database; use crate::key_manager::AuthKey; use crate::key_manager::Error::KeyInvalid; use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; @@ -124,7 +125,7 @@ impl TorrentTracker { } // Loading the torrents into memory - pub async fn load_torrents(&self) -> Result<(), database::Error> { + pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { From 4bf0c8b1b35b6df3faba79ed022dc468611022ae Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 6 May 2022 23:21:51 +0200 Subject: [PATCH 0096/1003] refactor: major refactor of all code --- src/api/mod.rs | 1 + src/{http_api_server.rs => api/server.rs} | 20 +- src/config.rs | 6 +- src/databases/database.rs | 6 +- src/databases/mysql.rs | 6 +- src/databases/sqlite.rs | 6 +- src/http/filters.rs | 5 +- src/http/handlers.rs | 20 +- src/http/routes.rs | 10 +- src/http/server.rs | 16 +- src/jobs/http_tracker.rs | 27 +++ src/jobs/log_statistics.rs | 32 +++ src/jobs/mod.rs | 6 + src/jobs/persistent_torrent_statistics.rs | 38 ++++ src/jobs/torrent_cleanup.rs | 36 ++++ src/jobs/tracker_api.rs | 15 ++ src/jobs/udp_tracker.rs | 22 ++ src/lib.rs | 14 +- src/main.rs | 194 ++---------------- src/{ => protocol}/common.rs | 0 src/protocol/mod.rs | 2 + src/{ => protocol}/utils.rs | 0 src/setup.rs | 54 +++++ src/{key_manager.rs => tracker/key.rs} | 18 +- src/tracker/mod.rs | 5 + src/tracker/peer.rs | 83 ++++++++ .../statistics.rs} | 38 ++-- src/{ => tracker}/torrent.rs | 83 +------- src/{ => tracker}/tracker.rs | 67 +++--- src/udp/handlers.rs | 22 +- src/udp/server.rs | 9 +- 31 files changed, 475 insertions(+), 386 deletions(-) create mode 100644 src/api/mod.rs rename src/{http_api_server.rs => api/server.rs} (95%) create mode 100644 src/jobs/http_tracker.rs create mode 100644 src/jobs/log_statistics.rs create mode 100644 src/jobs/mod.rs create mode 100644 src/jobs/persistent_torrent_statistics.rs create mode 100644 src/jobs/torrent_cleanup.rs create mode 100644 src/jobs/tracker_api.rs create mode 100644 src/jobs/udp_tracker.rs rename src/{ => protocol}/common.rs (100%) create mode 100644 src/protocol/mod.rs rename src/{ => protocol}/utils.rs (100%) create mode 100644 src/setup.rs rename src/{key_manager.rs => tracker/key.rs} (85%) create mode 100644 src/tracker/mod.rs create mode 100644 src/tracker/peer.rs rename src/{tracker_stats.rs => tracker/statistics.rs} (67%) rename src/{ => tracker}/torrent.rs (57%) rename src/{ => tracker}/tracker.rs (90%) diff --git a/src/api/mod.rs b/src/api/mod.rs new file mode 100644 index 000000000..74f47ad34 --- /dev/null +++ b/src/api/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/src/http_api_server.rs b/src/api/server.rs similarity index 95% rename from src/http_api_server.rs rename to src/api/server.rs index 89505cb09..77264162c 100644 --- a/src/http_api_server.rs +++ b/src/api/server.rs @@ -1,14 +1,14 @@ use std::cmp::min; use std::collections::{HashMap, HashSet}; +use std::net::SocketAddr; use std::sync::Arc; use serde::{Deserialize, Serialize}; -use warp::{Filter, filters, reply, reply::Reply, serve, Server}; +use warp::{Filter, filters, reply, serve}; -use crate::torrent::TorrentPeer; -use crate::tracker::TorrentTracker; - -use super::common::*; +use crate::protocol::common::*; +use crate::peer::TorrentPeer; +use crate::tracker::tracker::TorrentTracker; #[derive(Deserialize, Debug)] struct TorrentInfoQuery { @@ -84,7 +84,7 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> Server + Clone + Send + Sync + 'static> { +pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -309,5 +309,11 @@ pub fn build_server(tracker: Arc) -> Server, - pub cleanup_interval: Option, + pub cleanup_interval: u64, pub cleanup_peerless: bool, pub external_ip: Option, pub announce_interval: u32, @@ -147,7 +147,7 @@ impl Configuration { statistics: true, persistence: false, persistence_interval: Some(900), - cleanup_interval: Some(600), + cleanup_interval: 600, cleanup_peerless: true, external_ip: Some(String::from("0.0.0.0")), announce_interval: 120, diff --git a/src/databases/database.rs b/src/databases/database.rs index b39a0ada1..fd9f2a19d 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -6,10 +6,10 @@ use log::debug; use serde::{Deserialize, Serialize}; use crate::InfoHash; -use crate::key_manager::AuthKey; +use crate::tracker::key::AuthKey; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; -use crate::torrent::TorrentEntry; +use crate::tracker::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { @@ -40,7 +40,7 @@ pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result Result<(), Error>; - async fn load_persistent_torrent_data(&self) -> Result, Error>; + async fn load_persistent_torrents(&self) -> Result, Error>; async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), Error>; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index df85402b3..e7f57a7a4 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -11,8 +11,8 @@ use r2d2_mysql::MysqlConnectionManager; use crate::{AUTH_KEY_LENGTH, InfoHash}; use crate::databases::database::{Database, Error}; use crate::databases::database; -use crate::key_manager::AuthKey; -use crate::torrent::TorrentEntry; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::TorrentEntry; pub struct MysqlDatabase { pool: Pool, @@ -65,7 +65,7 @@ impl Database for MysqlDatabase { Ok(()) } - async fn load_persistent_torrent_data(&self) -> Result, database::Error> { + async fn load_persistent_torrents(&self) -> Result, database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT HEX(info_hash), completed FROM torrents", |(info_hash_string, completed): (String, u32)| { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index b51fd6c51..18a1d5a28 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -10,8 +10,8 @@ use r2d2_sqlite::rusqlite::NO_PARAMS; use crate::{AUTH_KEY_LENGTH, InfoHash}; use crate::databases::database::Database; use crate::databases::database; -use crate::key_manager::AuthKey; -use crate::torrent::TorrentEntry; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::TorrentEntry; pub struct SqliteDatabase { pool: Pool, @@ -59,7 +59,7 @@ impl Database for SqliteDatabase { .map(|_| ()) } - async fn load_persistent_torrent_data(&self) -> Result, database::Error> { + async fn load_persistent_torrents(&self) -> Result, database::Error> { let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; diff --git a/src/http/filters.rs b/src/http/filters.rs index 5e0c3e068..8f3ee04c0 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -6,9 +6,10 @@ use std::sync::Arc; use log::debug; use warp::{Filter, reject, Rejection}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; -use crate::key_manager::AuthKey; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; +use crate::tracker::key::AuthKey; use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; +use crate::tracker::tracker::TorrentTracker; /// Pass Arc along pub fn with_tracker(tracker: Arc) -> impl Filter, ), Error=Infallible> + Clone { diff --git a/src/http/handlers.rs b/src/http/handlers.rs index c81c93d9b..d7e4859d9 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -7,12 +7,14 @@ use log::debug; use warp::{reject, Rejection, Reply}; use warp::http::Response; -use crate::{InfoHash, TorrentTracker}; -use crate::key_manager::AuthKey; -use crate::torrent::{TorrentError, TorrentPeer, TorrentStats}; +use crate::{InfoHash}; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::http::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; -use crate::tracker_stats::TrackerStatsEvent; -use crate::utils::url_encode_bytes; +use crate::peer::TorrentPeer; +use crate::tracker::statistics::TrackerStatisticsEvent; +use crate::protocol::utils::url_encode_bytes; +use crate::tracker::tracker::TorrentTracker; /// Authenticate InfoHash using optional AuthKey pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { @@ -51,8 +53,8 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option // send stats event match announce_request.peer_addr { - IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp4Announce).await; } - IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp6Announce).await; } + IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; } + IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; } } send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) @@ -84,8 +86,8 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { tracker.send_stats_event(TrackerStatsEvent::Tcp4Scrape).await; } - IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp6Scrape).await; } + IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; } + IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; } } send_scrape_response(files) diff --git a/src/http/routes.rs b/src/http/routes.rs index 775d9ce79..a0b197f44 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -3,8 +3,14 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use crate::TorrentTracker; -use crate::http::{handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use crate::http::handle_announce; +use crate::http::handle_scrape; +use crate::http::send_error; +use crate::http::with_announce_request; +use crate::http::with_auth_key; +use crate::http::with_scrape_request; +use crate::http::with_tracker; +use crate::tracker::tracker::TorrentTracker; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { diff --git a/src/http/server.rs b/src/http/server.rs index 31a8e4664..5a5b5f735 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -1,8 +1,8 @@ use std::net::SocketAddr; use std::sync::Arc; -use crate::TorrentTracker; use crate::http::routes; +use crate::tracker::tracker::TorrentTracker; /// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] @@ -18,18 +18,19 @@ impl HttpServer { } /// Start the HttpServer - pub async fn start(&self, socket_addr: SocketAddr) { + pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { let (_addr, server) = warp::serve(routes(self.tracker.clone())) .bind_with_graceful_shutdown(socket_addr, async move { tokio::signal::ctrl_c() .await - .expect("failed to listen to shutdown signal"); + .expect("Failed to listen to shutdown signal."); }); - tokio::task::spawn(server); + + server } /// Start the HttpServer in TLS mode - pub async fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: &str, ssl_key_path: &str) { + pub fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: String, ssl_key_path: String) -> impl warp::Future { let (_addr, server) = warp::serve(routes(self.tracker.clone())) .tls() .cert_path(ssl_cert_path) @@ -37,8 +38,9 @@ impl HttpServer { .bind_with_graceful_shutdown(socket_addr, async move { tokio::signal::ctrl_c() .await - .expect("failed to listen to shutdown signal"); + .expect("Failed to listen to shutdown signal."); }); - tokio::task::spawn(server); + + server } } diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs new file mode 100644 index 000000000..85f64200f --- /dev/null +++ b/src/jobs/http_tracker.rs @@ -0,0 +1,27 @@ +use std::net::SocketAddr; +use std::sync::Arc; +use log::{info, warn}; +use tokio::task::JoinHandle; +use crate::{HttpServer, HttpTrackerConfig}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config.bind_address.parse::().unwrap(); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + tokio::spawn(async move { + let http_tracker = HttpServer::new(tracker); + + if !ssl_enabled { + info!("Starting HTTP server on: {}", bind_addr); + http_tracker.start(bind_addr).await; + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting HTTPS server on: {} (TLS)", bind_addr); + http_tracker.start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()).await; + } else { + warn!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", bind_addr); + } + }) +} diff --git a/src/jobs/log_statistics.rs b/src/jobs/log_statistics.rs new file mode 100644 index 000000000..f62399a47 --- /dev/null +++ b/src/jobs/log_statistics.rs @@ -0,0 +1,32 @@ +use std::sync::Arc; +use log::info; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.log_interval.unwrap_or(60); + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Stopping statistics logging job.."); + break; + } + _ = interval.tick() => { + if let Some(tracker) = weak_tracker.upgrade() { + tracker.post_log().await; + } else { + break; + } + } + } + } + }) +} diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs new file mode 100644 index 000000000..a71fcb210 --- /dev/null +++ b/src/jobs/mod.rs @@ -0,0 +1,6 @@ +pub mod persistent_torrent_statistics; +pub mod torrent_cleanup; +pub mod tracker_api; +pub mod log_statistics; +pub mod http_tracker; +pub mod udp_tracker; diff --git a/src/jobs/persistent_torrent_statistics.rs b/src/jobs/persistent_torrent_statistics.rs new file mode 100644 index 000000000..7ebc80bdb --- /dev/null +++ b/src/jobs/persistent_torrent_statistics.rs @@ -0,0 +1,38 @@ +use std::sync::Arc; +use log::info; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.persistence_interval.unwrap_or(900); + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + // periodically save torrents to database + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + // Save before shutting down + tracker.periodic_saving().await; + info!("Stopping periodic torrent saving job.."); + break; + } + _ = interval.tick() => { + if let Some(tracker) = weak_tracker.upgrade() { + info!("Saving torrents to database..."); + tracker.periodic_saving().await; + info!("Periodic saving done."); + } else { + // If tracker no longer exists, stop job + break; + } + } + } + } + }) +} diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs new file mode 100644 index 000000000..05e639728 --- /dev/null +++ b/src/jobs/torrent_cleanup.rs @@ -0,0 +1,36 @@ +use std::sync::Arc; +use chrono::Utc; +use log::info; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.cleanup_interval; + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Stopping torrent cleanup job.."); + break; + } + _ = interval.tick() => { + if let Some(tracker) = weak_tracker.upgrade() { + let start_time = Utc::now().time(); + info!("Cleaning up torrents.."); + tracker.cleanup_torrents().await; + info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()) + } else { + break; + } + } + } + } + }) +} diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs new file mode 100644 index 000000000..476a87a6a --- /dev/null +++ b/src/jobs/tracker_api.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; +use log::info; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::api::server; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config.http_api.bind_address.parse::().expect("Tracker API bind_address invalid."); + info!("Starting Torrust API server on: {}", bind_addr); + + tokio::spawn(async move { + server::start(bind_addr, tracker).await; + }) +} diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs new file mode 100644 index 000000000..32ef76ef4 --- /dev/null +++ b/src/jobs/udp_tracker.rs @@ -0,0 +1,22 @@ +use std::sync::Arc; +use log::{error, info, warn}; +use tokio::task::JoinHandle; +use crate::{UdpServer, UdpTrackerConfig}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config.bind_address.clone(); + + tokio::spawn(async move { + match UdpServer::new(tracker, &bind_addr).await { + Ok(udp_server) => { + info!("Starting UDP server on: {}", bind_addr); + udp_server.start().await; + } + Err(e) => { + warn!("Could not start UDP tracker on: {}", bind_addr); + error!("{}", e); + } + } + }) +} diff --git a/src/lib.rs b/src/lib.rs index e965fa88d..245f4686c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,22 +1,18 @@ pub use http::server::*; pub use udp::server::*; -pub use self::common::*; +pub use protocol::common::*; pub use self::config::*; -pub use self::http_api_server::*; +pub use api::server::*; pub use self::tracker::*; pub mod config; pub mod tracker; -pub mod http_api_server; -pub mod common; -pub mod utils; -pub mod key_manager; pub mod logging; pub mod udp; pub mod http; -pub mod torrent; -pub mod tracker_stats; pub mod setup; -pub mod persistent_torrent_statistics; pub mod databases; +pub mod jobs; +pub mod api; +pub mod protocol; diff --git a/src/main.rs b/src/main.rs index b2bcb31d1..794cda4bb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,15 +1,13 @@ -use std::net::SocketAddr; use std::sync::Arc; - use log::info; -use tokio::task::JoinHandle; - -use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; -use torrust_tracker::http::server::HttpServer; +use torrust_tracker::Configuration; +use torrust_tracker::logging; +use torrust_tracker::setup; +use torrust_tracker::tracker::tracker::TorrentTracker; #[tokio::main] async fn main() { - // torrust config + // Initialize Torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), Err(error) => { @@ -17,184 +15,28 @@ async fn main() { } }; - // the singleton torrent tracker that gets passed to the HTTP and UDP server - let tracker = Arc::new(TorrentTracker::new(config.clone()).unwrap_or_else(|e| { - panic!("{}", e) - })); - - // initialize logging - logging::setup_logging(&config); - - // load persistent torrents if enabled - if config.persistence { - info!("Loading persistent torrents into memory..."); - if tracker.load_torrents().await.is_err() { - panic!("Could not load persistent torrents.") - }; - info!("Persistent torrents loaded."); - - let _torrent_periodic_job = start_torrent_periodic_job(config.clone(), tracker.clone()); - } - - // start torrent cleanup job (periodically removes old peers) - let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()); - - // start HTTP API server - if config.http_api.enabled { - let _api_server = start_api_server(&config.http_api, tracker.clone()); - } - - // used to send graceful shutdown signal to udp listeners - let (tx, rx) = tokio::sync::watch::channel(false); - let mut udp_server_handles = Vec::new(); - - // start the udp blocks - for udp_tracker in &config.udp_trackers { - if !udp_tracker.enabled { continue; } - - if tracker.is_private() { - panic!("Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", udp_tracker.bind_address, config.mode); - } - - udp_server_handles.push( - start_udp_tracker_server(&udp_tracker, tracker.clone(), rx.clone()).await - ) - } - - // start the http blocks - for http_tracker in &config.http_trackers { - if !http_tracker.enabled { continue; } - - // SSL requires a cert and a key - if http_tracker.ssl_enabled && !http_tracker.verify_ssl_cert_and_key_set() { - panic!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", http_tracker.bind_address); + // Initialize Torrust tracker + let tracker = match TorrentTracker::new(config.clone()) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) } + }; - let _ = start_http_tracker_server(&http_tracker, tracker.clone()); - } + // Initialize logging + logging::setup_logging(&config); - // start a thread to post statistics - let _ = start_statistics_job(config.clone(), tracker.clone()); + // Run jobs + let jobs = setup::setup(&config, tracker.clone()).await; // handle the signals here tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Torrust shutting down.."); - // send kill signal - let _ = tx.send(true); - - // await for all udp servers to shutdown - futures::future::join_all(udp_server_handles).await; - - // Save torrents if enabled - if config.persistence { - info!("Saving torrents into SQL from memory..."); - let _ = tracker.periodic_saving().await; - info!("Torrents saved"); - } + // Await for all jobs to shutdown + futures::future::join_all(jobs).await; + info!("Torrust successfully shutdown."); } } } - -fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.persistence_interval.unwrap_or(900); - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - // periodically call tracker.cleanup_torrents() - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker.upgrade() { - info!("Executing periodic saving..."); - tracker.periodic_saving().await; - info!("Periodic saving done."); - } else { - break; - } - } - }) -} - -fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.cleanup_interval.unwrap_or(600); - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - // periodically call tracker.cleanup_torrents() - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker.upgrade() { - tracker.cleanup_torrents().await; - } else { - break; - } - } - }) -} - -fn start_statistics_job(config: Arc, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.log_interval.unwrap_or(60); - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - // periodically call tracker.cleanup_torrents() - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker.upgrade() { - tracker.post_log().await; - } else { - break; - } - } - }) -} - -fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> JoinHandle<()> { - info!("Starting HTTP API server on: {}", config.bind_address); - let bind_addr = config.bind_address.parse::().unwrap(); - - tokio::spawn(async move { - let server = http_api_server::build_server(tracker); - let _ = server.bind(bind_addr).await; - }) -} - -fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - let http_tracker = HttpServer::new(tracker); - let bind_addr = config.bind_address.parse::().unwrap(); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - tokio::spawn(async move { - // run with tls if ssl_enabled and cert and key path are set - if ssl_enabled { - info!("Starting HTTPS server on: {} (TLS)", bind_addr); - http_tracker.start_tls(bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; - } else { - info!("Starting HTTP server on: {}", bind_addr); - http_tracker.start(bind_addr).await; - } - }) -} - -async fn start_udp_tracker_server(config: &UdpTrackerConfig, tracker: Arc, rx: tokio::sync::watch::Receiver) -> JoinHandle<()> { - let udp_server = UdpServer::new(tracker, &config.bind_address).await.unwrap_or_else(|e| { - panic!("Could not start UDP server: {}", e); - }); - - info!("Starting UDP server on: {}", config.bind_address); - tokio::spawn(async move { - udp_server.start(rx).await; - }) -} diff --git a/src/common.rs b/src/protocol/common.rs similarity index 100% rename from src/common.rs rename to src/protocol/common.rs diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs new file mode 100644 index 000000000..99cfd91e4 --- /dev/null +++ b/src/protocol/mod.rs @@ -0,0 +1,2 @@ +pub mod common; +pub mod utils; diff --git a/src/utils.rs b/src/protocol/utils.rs similarity index 100% rename from src/utils.rs rename to src/protocol/utils.rs diff --git a/src/setup.rs b/src/setup.rs new file mode 100644 index 000000000..b8d49614d --- /dev/null +++ b/src/setup.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; +use log::{info, warn}; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::jobs::{http_tracker, log_statistics, persistent_torrent_statistics, torrent_cleanup, tracker_api, udp_tracker}; +use crate::tracker::tracker::TorrentTracker; + +pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ + let mut jobs: Vec> = Vec::new(); + + // Load persistent torrents + if config.persistence { + info!("Loading persistent torrents into memory.."); + tracker.load_persistent_torrents().await.expect("Could not load persistent torrents."); + info!("Persistent torrents loaded."); + jobs.push(persistent_torrent_statistics::start_job(&config, tracker.clone())); + } + + // Start the UDP blocks + for udp_tracker_config in &config.udp_trackers { + if !udp_tracker_config.enabled { continue; } + + if tracker.is_private() { + warn!("Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", udp_tracker_config.bind_address, config.mode); + } else { + jobs.push(udp_tracker::start_job(&udp_tracker_config, tracker.clone())) + } + } + + // Start the HTTP blocks + for http_tracker_config in &config.http_trackers { + if !http_tracker_config.enabled { continue; } + jobs.push(http_tracker::start_job(&http_tracker_config, tracker.clone())); + } + + // Start HTTP API server + if config.http_api.enabled { + jobs.push(tracker_api::start_job(&config, tracker.clone())); + } + + // Remove torrents without peers, every interval + if config.cleanup_interval > 0 { + jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); + } + + // Log detailed torrent stats + if let Some(log_interval) = config.log_interval { + if log_interval > 0 { + jobs.push(log_statistics::start_job(&config, tracker.clone())); + } + } + + jobs +} diff --git a/src/key_manager.rs b/src/tracker/key.rs similarity index 85% rename from src/key_manager.rs rename to src/tracker/key.rs index 507402358..2e2ca81f7 100644 --- a/src/key_manager.rs +++ b/src/tracker/key.rs @@ -4,9 +4,9 @@ use rand::{Rng, thread_rng}; use rand::distributions::Alphanumeric; use serde::Serialize; -use crate::utils::current_time; +use crate::protocol::utils::current_time; -use super::common::AUTH_KEY_LENGTH; +use crate::AUTH_KEY_LENGTH; pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { let key: String = thread_rng() @@ -81,11 +81,11 @@ impl From for Error { #[cfg(test)] mod tests { - use crate::key_manager; + use crate::tracker::key; #[test] fn auth_key_from_buffer() { - let auth_key = key_manager::AuthKey::from_buffer( + let auth_key = key::AuthKey::from_buffer( [ 89, 90, 83, 108, 52, 108, 77, 90, @@ -104,7 +104,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key_manager::AuthKey::from_string(key_string); + let auth_key = key::AuthKey::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, key_string); @@ -112,16 +112,16 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key_manager::generate_auth_key(9999); + let auth_key = key::generate_auth_key(9999); - assert!(key_manager::verify_auth_key(&auth_key).is_ok()); + assert!(key::verify_auth_key(&auth_key).is_ok()); } #[test] fn generate_expired_auth_key() { - let mut auth_key = key_manager::generate_auth_key(0); + let mut auth_key = key::generate_auth_key(0); auth_key.valid_until = Some(0); - assert!(key_manager::verify_auth_key(&auth_key).is_err()); + assert!(key::verify_auth_key(&auth_key).is_err()); } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs new file mode 100644 index 000000000..6115bc23e --- /dev/null +++ b/src/tracker/mod.rs @@ -0,0 +1,5 @@ +pub mod tracker; +pub mod statistics; +pub mod peer; +pub mod torrent; +pub mod key; diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs new file mode 100644 index 000000000..2a54e2fd2 --- /dev/null +++ b/src/tracker/peer.rs @@ -0,0 +1,83 @@ +use std::net::{IpAddr, SocketAddr}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use serde; +use serde::{Serialize}; +use crate::protocol::common::{NumberOfBytesDef, AnnounceEventDef}; +use crate::http::AnnounceRequest; +use crate::PeerId; + +#[derive(PartialEq, Eq, Debug, Clone, Serialize)] +pub struct TorrentPeer { + pub peer_id: PeerId, + pub peer_addr: SocketAddr, + #[serde(serialize_with = "ser_instant")] + pub updated: std::time::Instant, + #[serde(with = "NumberOfBytesDef")] + pub uploaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub downloaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub left: NumberOfBytes, + #[serde(with = "AnnounceEventDef")] + pub event: AnnounceEvent, +} + +impl TorrentPeer { + pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); + + TorrentPeer { + peer_id: PeerId(announce_request.peer_id.0), + peer_addr, + updated: std::time::Instant::now(), + uploaded: announce_request.bytes_uploaded, + downloaded: announce_request.bytes_downloaded, + left: announce_request.bytes_left, + event: announce_request.event, + } + } + + pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); + + let event: AnnounceEvent = if let Some(event) = &announce_request.event { + match event.as_ref() { + "started" => AnnounceEvent::Started, + "stopped" => AnnounceEvent::Stopped, + "completed" => AnnounceEvent::Completed, + _ => AnnounceEvent::None + } + } else { + AnnounceEvent::None + }; + + TorrentPeer { + peer_id: announce_request.peer_id.clone(), + peer_addr, + updated: std::time::Instant::now(), + uploaded: NumberOfBytes(announce_request.uploaded as i64), + downloaded: NumberOfBytes(announce_request.downloaded as i64), + left: NumberOfBytes(announce_request.left as i64), + event, + } + } + + // potentially substitute localhost ip with external ip + pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { + if remote_ip.is_loopback() && host_opt_ip.is_some() { + SocketAddr::new(host_opt_ip.unwrap(), port) + } else { + SocketAddr::new(remote_ip, port) + } + } + + pub(crate) fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + + pub(crate) fn is_completed(&self) -> bool { + self.event == AnnounceEvent::Completed + } +} + +fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { + ser.serialize_u64(inst.elapsed().as_millis() as u64) +} diff --git a/src/tracker_stats.rs b/src/tracker/statistics.rs similarity index 67% rename from src/tracker_stats.rs rename to src/tracker/statistics.rs index 0bcd781ba..5822d2d4e 100644 --- a/src/tracker_stats.rs +++ b/src/tracker/statistics.rs @@ -7,7 +7,7 @@ use tokio::sync::mpsc::error::SendError; const CHANNEL_BUFFER_SIZE: usize = 65_535; #[derive(Debug)] -pub enum TrackerStatsEvent { +pub enum TrackerStatisticsEvent { Tcp4Announce, Tcp4Scrape, Tcp6Announce, @@ -21,7 +21,7 @@ pub enum TrackerStatsEvent { } #[derive(Debug)] -pub struct TrackerStats { +pub struct TrackerStatistics { pub tcp4_connections_handled: u64, pub tcp4_announces_handled: u64, pub tcp4_scrapes_handled: u64, @@ -36,7 +36,7 @@ pub struct TrackerStats { pub udp6_scrapes_handled: u64, } -impl TrackerStats { +impl TrackerStatistics { pub fn new() -> Self { Self { tcp4_connections_handled: 0, @@ -56,23 +56,23 @@ impl TrackerStats { } pub struct StatsTracker { - channel_sender: Option>, - pub stats: Arc>, + channel_sender: Option>, + pub stats: Arc>, } impl StatsTracker { pub fn new() -> Self { Self { channel_sender: None, - stats: Arc::new(RwLock::new(TrackerStats::new())), + stats: Arc::new(RwLock::new(TrackerStatistics::new())), } } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { self.stats.read().await } - pub async fn send_event(&self, event: TrackerStatsEvent) -> Option>> { + pub async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { if let Some(tx) = &self.channel_sender { Some(tx.send(event).await) } else { @@ -81,7 +81,7 @@ impl StatsTracker { } pub fn run_worker(&mut self) { - let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); // set send channel on stats_tracker self.channel_sender = Some(tx); @@ -93,28 +93,28 @@ impl StatsTracker { let mut stats_lock = stats.write().await; match event { - TrackerStatsEvent::Tcp4Announce => { + TrackerStatisticsEvent::Tcp4Announce => { stats_lock.tcp4_announces_handled += 1; stats_lock.tcp4_connections_handled += 1; } - TrackerStatsEvent::Tcp4Scrape => { + TrackerStatisticsEvent::Tcp4Scrape => { stats_lock.tcp4_scrapes_handled += 1; stats_lock.tcp4_connections_handled += 1; } - TrackerStatsEvent::Tcp6Announce => { + TrackerStatisticsEvent::Tcp6Announce => { stats_lock.tcp6_announces_handled += 1; stats_lock.tcp6_connections_handled += 1; } - TrackerStatsEvent::Tcp6Scrape => { + TrackerStatisticsEvent::Tcp6Scrape => { stats_lock.tcp6_scrapes_handled += 1; stats_lock.tcp6_connections_handled += 1; } - TrackerStatsEvent::Udp4Connect => { stats_lock.udp4_connections_handled += 1; } - TrackerStatsEvent::Udp4Announce => { stats_lock.udp4_announces_handled += 1; } - TrackerStatsEvent::Udp4Scrape => { stats_lock.udp4_scrapes_handled += 1; } - TrackerStatsEvent::Udp6Connect => { stats_lock.udp6_connections_handled += 1; } - TrackerStatsEvent::Udp6Announce => { stats_lock.udp6_announces_handled += 1; } - TrackerStatsEvent::Udp6Scrape => { stats_lock.udp6_scrapes_handled += 1; } + TrackerStatisticsEvent::Udp4Connect => { stats_lock.udp4_connections_handled += 1; } + TrackerStatisticsEvent::Udp4Announce => { stats_lock.udp4_announces_handled += 1; } + TrackerStatisticsEvent::Udp4Scrape => { stats_lock.udp4_scrapes_handled += 1; } + TrackerStatisticsEvent::Udp6Connect => { stats_lock.udp6_connections_handled += 1; } + TrackerStatisticsEvent::Udp6Announce => { stats_lock.udp6_announces_handled += 1; } + TrackerStatisticsEvent::Udp6Scrape => { stats_lock.udp6_scrapes_handled += 1; } } drop(stats_lock); diff --git a/src/torrent.rs b/src/tracker/torrent.rs similarity index 57% rename from src/torrent.rs rename to src/tracker/torrent.rs index 4e44a995a..c5c721a4a 100644 --- a/src/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,88 +1,11 @@ use std::borrow::Cow; -use std::net::{IpAddr, SocketAddr}; +use std::net::{IpAddr}; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use aquatic_udp_protocol::{AnnounceEvent}; use serde::{Deserialize, Serialize}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; -use crate::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::http::AnnounceRequest; - -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] -pub struct TorrentPeer { - pub peer_id: PeerId, - pub peer_addr: SocketAddr, - #[serde(serialize_with = "ser_instant")] - pub updated: std::time::Instant, - #[serde(with = "NumberOfBytesDef")] - pub uploaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub downloaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub left: NumberOfBytes, - #[serde(with = "AnnounceEventDef")] - pub event: AnnounceEvent, -} - -impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - - TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), - peer_addr, - updated: std::time::Instant::now(), - uploaded: announce_request.bytes_uploaded, - downloaded: announce_request.bytes_downloaded, - left: announce_request.bytes_left, - event: announce_request.event, - } - } - - pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None - } - } else { - AnnounceEvent::None - }; - - TorrentPeer { - peer_id: announce_request.peer_id.clone(), - peer_addr, - updated: std::time::Instant::now(), - uploaded: NumberOfBytes(announce_request.uploaded as i64), - downloaded: NumberOfBytes(announce_request.downloaded as i64), - left: NumberOfBytes(announce_request.left as i64), - event, - } - } - - // potentially substitute localhost ip with external ip - pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if remote_ip.is_loopback() && host_opt_ip.is_some() { - SocketAddr::new(host_opt_ip.unwrap(), port) - } else { - SocketAddr::new(remote_ip, port) - } - } - - pub(crate) fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } - - fn is_completed(&self) -> bool { - self.event == AnnounceEvent::Completed - } -} - -fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) -} +use crate::peer::TorrentPeer; #[derive(Serialize, Deserialize, Clone)] pub struct TorrentEntry { diff --git a/src/tracker.rs b/src/tracker/tracker.rs similarity index 90% rename from src/tracker.rs rename to src/tracker/tracker.rs index b8c2b9931..4f0d571c6 100644 --- a/src/tracker.rs +++ b/src/tracker/tracker.rs @@ -3,20 +3,22 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::{debug, info}; +use log::info; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::{Configuration, key_manager}; -use crate::common::InfoHash; +use crate::Configuration; +use crate::protocol::common::InfoHash; use crate::databases::database::Database; use tokio::sync::mpsc::error::SendError; use crate::databases::database; -use crate::key_manager::AuthKey; -use crate::key_manager::Error::KeyInvalid; -use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; -use crate::tracker_stats::{StatsTracker, TrackerStats, TrackerStatsEvent}; +use crate::peer::TorrentPeer; +use crate::tracker::key::AuthKey; +use crate::tracker::key::Error::KeyInvalid; +use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; +use crate::tracker::key; +use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] pub enum TrackerMode { @@ -79,7 +81,7 @@ impl TorrentTracker { } pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { - let auth_key = key_manager::generate_auth_key(seconds_valid); + let auth_key = key::generate_auth_key(seconds_valid); // add key to database if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } @@ -91,9 +93,9 @@ impl TorrentTracker { self.database.remove_key_from_keys(key).await } - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key_manager::Error> { + pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { let db_key = self.database.get_key_from_keys(&auth_key.key).await.map_err(|_| KeyInvalid)?; - key_manager::verify_auth_key(&db_key) + key::verify_auth_key(&db_key) } pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { @@ -124,13 +126,22 @@ impl TorrentTracker { Ok(()) } - // Loading the torrents into memory + // Loading the torrents from database into memory pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { - let torrents = self.database.load_persistent_torrent_data().await?; + let persistent_torrents = self.database.load_persistent_torrents().await?; + let mut torrents = self.torrents.write().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(&info_hash) { continue; } + + let torrent_entry = TorrentEntry { + peers: Default::default(), + completed, + seeders: Default::default(), + }; - for torrent in torrents { - debug!("{:#?}", torrent); - let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; + torrents.insert(info_hash.clone(), torrent_entry); } Ok(()) @@ -206,34 +217,15 @@ impl TorrentTracker { } } - pub async fn add_torrent(&self, info_hash: InfoHash, seeders: u32, completed: u32, leechers: u32) -> TorrentStats { - let mut torrents = self.torrents.write().await; - - if !torrents.contains_key(&info_hash) { - let torrent_entry = TorrentEntry { - peers: Default::default(), - completed, - seeders, - }; - torrents.insert(info_hash.clone(), torrent_entry); - } - - TorrentStats { - seeders, - completed, - leechers, - } - } - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { self.torrents.read().await } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { self.stats_tracker.get_stats().await } - pub async fn send_stats_event(&self, event: TrackerStatsEvent) -> Option>> { + pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { self.stats_tracker.send_event(event).await } @@ -252,8 +244,6 @@ impl TorrentTracker { // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { - info!("Cleaning torrents..."); - let lock = self.torrents.write().await; // First we create a mapping of all the torrent hashes in a vector, and we use this to iterate through the btreemap. @@ -298,7 +288,6 @@ impl TorrentTracker { } drop(lock); } - info!("Torrents cleaned up."); } pub async fn periodic_saving(&self) { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 23fac0405..4ea767c0b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -3,12 +3,14 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; -use crate::torrent::{TorrentError, TorrentPeer}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; +use crate::peer::TorrentPeer; +use crate::tracker::torrent::{TorrentError}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; -use crate::tracker_stats::TrackerStatsEvent; -use crate::utils::get_connection_id; +use crate::tracker::statistics::TrackerStatisticsEvent; +use crate::tracker::tracker::TorrentTracker; +use crate::protocol::utils::get_connection_id; pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, &None).await { @@ -77,8 +79,8 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Connect).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Connect).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; } } Ok(response) @@ -136,8 +138,8 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Announce).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Announce).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; } } Ok(announce_response) @@ -178,8 +180,8 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Scrape).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Scrape).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; } } Ok(Response::from(ScrapeResponse { diff --git a/src/udp/server.rs b/src/udp/server.rs index 03745192b..bcacc2642 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::TorrentTracker; +use crate::tracker::tracker::TorrentTracker; use crate::udp::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { @@ -24,16 +24,15 @@ impl UdpServer { }) } - pub async fn start(&self, rx: tokio::sync::watch::Receiver) { + pub async fn start(&self) { loop { - let mut rx = rx.clone(); let mut data = [0; MAX_PACKET_SIZE]; let socket = self.socket.clone(); let tracker = self.tracker.clone(); tokio::select! { - _ = rx.changed() => { - info!("Stopping UDP server: {}...", socket.local_addr().unwrap()); + _ = tokio::signal::ctrl_c() => { + info!("Stopping UDP server: {}..", socket.local_addr().unwrap()); break; } Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { From 047c2db9100ae0d52af57d786c15aa237f1d1e03 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 01:45:00 +0200 Subject: [PATCH 0097/1003] refactor: refactored even more code --- src/api/server.rs | 4 +- src/config.rs | 4 +- src/http/response.rs | 2 + src/protocol/utils.rs | 4 ++ src/tracker/mod.rs | 1 + src/tracker/mode.rs | 21 ++++++++ src/tracker/peer.rs | 13 ++--- src/tracker/torrent.rs | 119 ++++++++++------------------------------- src/tracker/tracker.rs | 59 ++++++-------------- 9 files changed, 80 insertions(+), 147 deletions(-) create mode 100644 src/tracker/mode.rs diff --git a/src/api/server.rs b/src/api/server.rs index 77264162c..d33f17dc0 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -23,7 +23,7 @@ struct Torrent<'a> { completed: u32, leechers: u32, #[serde(skip_serializing_if = "Option::is_none")] - peers: Option>, + peers: Option>, } #[derive(Serialize)] @@ -198,7 +198,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let torrent_entry_option = db.get(&info_hash); if torrent_entry_option.is_none() { - return Err(warp::reject::custom(ActionStatus::Err { reason: "torrent does not exist".into() })); + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")) } let torrent_entry = torrent_entry_option.unwrap(); diff --git a/src/config.rs b/src/config.rs index 10268ebbb..c24d1c45d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize, Serializer}; use toml; use crate::databases::database::DatabaseDrivers; -use crate::tracker::tracker::TrackerMode; +use crate::mode::TrackerMode; #[derive(Serialize, Deserialize, PartialEq)] pub enum TrackerServer { @@ -141,7 +141,7 @@ impl Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), log_interval: Some(60), - mode: TrackerMode::PublicMode, + mode: TrackerMode::Public, db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), statistics: true, diff --git a/src/http/response.rs b/src/http/response.rs index f57129cde..3118f7df1 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -3,6 +3,7 @@ use std::error::Error; use std::io::Write; use std::net::IpAddr; +use serde; use serde::Serialize; #[derive(Serialize)] @@ -15,6 +16,7 @@ pub struct Peer { #[derive(Serialize)] pub struct AnnounceResponse { pub interval: u32, + #[serde(rename = "min interval")] pub interval_min: u32, //pub tracker_id: String, pub complete: u32, diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index fb2a94513..5fc694c8e 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -57,3 +57,7 @@ pub async fn convert_bytes_to_int(array: &Vec) -> u64 { let mut rdr = Cursor::new(array_fixed); return rdr.read_u64::().unwrap(); } + +pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { + ser.serialize_u64(inst.elapsed().as_millis() as u64) +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6115bc23e..791e2e7d2 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -3,3 +3,4 @@ pub mod statistics; pub mod peer; pub mod torrent; pub mod key; +pub mod mode; diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs new file mode 100644 index 000000000..edcb27f1c --- /dev/null +++ b/src/tracker/mode.rs @@ -0,0 +1,21 @@ +use serde; +use serde::{Serialize, Deserialize}; + +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] +pub enum TrackerMode { + // Will track every new info hash and serve every peer. + #[serde(rename = "public")] + Public, + + // Will only track whitelisted info hashes. + #[serde(rename = "listed")] + Listed, + + // Will only serve authenticated peers + #[serde(rename = "private")] + Private, + + // Will only track whitelisted info hashes and serve authenticated peers + #[serde(rename = "private_listed")] + PrivateListed, +} diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 2a54e2fd2..ce4e52022 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,8 +1,11 @@ use std::net::{IpAddr, SocketAddr}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::{Serialize}; + use crate::protocol::common::{NumberOfBytesDef, AnnounceEventDef}; +use crate::protocol::utils::ser_instant; use crate::http::AnnounceRequest; use crate::PeerId; @@ -71,13 +74,5 @@ impl TorrentPeer { } } - pub(crate) fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } - - pub(crate) fn is_completed(&self) -> bool { - self.event == AnnounceEvent::Completed - } -} - -fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) + pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index c5c721a4a..a01b5ce55 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,19 +1,16 @@ -use std::borrow::Cow; -use std::net::{IpAddr}; +use std::net::{IpAddr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent}; use serde::{Deserialize, Serialize}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; +use crate::{MAX_SCRAPE_TORRENTS, PeerId}; use crate::peer::TorrentPeer; #[derive(Serialize, Deserialize, Clone)] pub struct TorrentEntry { #[serde(skip)] - pub(crate) peers: std::collections::BTreeMap, - pub(crate) completed: u32, - #[serde(skip)] - pub(crate) seeders: u32, + pub peers: std::collections::BTreeMap, + pub completed: u32, } impl TorrentEntry { @@ -21,113 +18,53 @@ impl TorrentEntry { TorrentEntry { peers: std::collections::BTreeMap::new(), completed: 0, - seeders: 0, } } pub fn update_peer(&mut self, peer: &TorrentPeer) { match peer.event { AnnounceEvent::Stopped => { - let peer_old = self.peers.remove(&peer.peer_id); - self.update_torrent_stats_with_peer(peer, peer_old); + let _ = self.peers.remove(&peer.peer_id); } - _ => { + AnnounceEvent::Completed => { let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); - self.update_torrent_stats_with_peer(peer, peer_old); + // Don't count if peer was not previously known + if peer_old.is_some() { self.completed += 1; } + } + _ => { + let _ = self.peers.insert(peer.peer_id.clone(), peer.clone()); } } } - pub fn get_peers(&self, remote_addr: Option<&std::net::SocketAddr>) -> Vec { - let mut list = Vec::new(); - for (_, peer) in self - .peers - .iter() - .filter(|e| match remote_addr { - // don't filter on ip_version + pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { + self.peers + .values() + .filter(|peer| match client_addr { + // Don't filter on ip_version None => true, - // filter out different ip_version from remote_addr - Some(remote_address) => { - match e.1.peer_addr.ip() { - IpAddr::V4(_) => { remote_address.is_ipv4() } - IpAddr::V6(_) => { remote_address.is_ipv6() } + // Filter out different ip_version from remote_addr + Some(remote_addr) => { + // Skip ip address of client + if peer.peer_addr.ip() == remote_addr.ip() { return false; } + + match peer.peer_addr.ip() { + IpAddr::V4(_) => { remote_addr.is_ipv4() } + IpAddr::V6(_) => { remote_addr.is_ipv6() } } } }) .take(MAX_SCRAPE_TORRENTS as usize) - { - - // skip ip address of client - if let Some(remote_addr) = remote_addr { - if peer.peer_addr == *remote_addr { - continue; - } - } - - list.push(peer.clone()); - } - list - } - - pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { - match peer_old { - None => { - if peer.is_seeder() { - self.seeders += 1; - } - - if peer.is_completed() { - self.completed += 1; - } - } - Some(peer_old) => { - match peer.event { - AnnounceEvent::None => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - } - AnnounceEvent::Completed => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - - // don't double count completed - if !peer_old.is_completed() { - self.completed += 1; - } - } - AnnounceEvent::Stopped => { - if peer_old.is_seeder() { - if self.seeders != 0 { - self.seeders -= 1; - } - } - } - // impossible, started should be the first time a peer announces itself - AnnounceEvent::Started => {} - } - } - } + .collect() } pub fn get_stats(&self) -> (u32, u32, u32) { - let leechers: u32 = if self.seeders < (self.peers.len() as u32) { - (self.peers.len() as u32) - self.seeders - } else { - 0 - }; - - (self.seeders, self.completed, leechers) + let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let leechers: u32 = self.peers.len() as u32 - seeders; + (seeders, self.completed, leechers) } } -#[derive(Serialize, Deserialize)] -struct DatabaseRow<'a> { - info_hash: InfoHash, - entry: Cow<'a, TorrentEntry>, -} - #[derive(Debug)] pub struct TorrentStats { pub completed: u32, diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 4f0d571c6..d3e7c8faa 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -4,15 +4,14 @@ use std::net::SocketAddr; use std::sync::Arc; use log::info; -use serde::{Deserialize, Serialize}; -use serde; use tokio::sync::{RwLock, RwLockReadGuard}; +use tokio::sync::mpsc::error::SendError; use crate::Configuration; use crate::protocol::common::InfoHash; use crate::databases::database::Database; -use tokio::sync::mpsc::error::SendError; use crate::databases::database; +use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::tracker::key::AuthKey; use crate::tracker::key::Error::KeyInvalid; @@ -20,28 +19,9 @@ use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent} use crate::tracker::key; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] -pub enum TrackerMode { - // Will track every new info hash and serve every peer. - #[serde(rename = "public")] - PublicMode, - - // Will only track whitelisted info hashes. - #[serde(rename = "listed")] - ListedMode, - - // Will only serve authenticated peers - #[serde(rename = "private")] - PrivateMode, - - // Will only track whitelisted info hashes and serve authenticated peers - #[serde(rename = "private_listed")] - PrivateListedMode, -} - pub struct TorrentTracker { - mode: TrackerMode, pub config: Arc, + mode: TrackerMode, torrents: RwLock>, updates: RwLock>, shadow: RwLock>, @@ -58,8 +38,8 @@ impl TorrentTracker { if config.statistics { stats_tracker.run_worker(); } Ok(TorrentTracker { - mode: config.mode, config: config.clone(), + mode: config.mode, torrents: RwLock::new(std::collections::BTreeMap::new()), updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), @@ -69,15 +49,15 @@ impl TorrentTracker { } pub fn is_public(&self) -> bool { - self.mode == TrackerMode::PublicMode + self.mode == TrackerMode::Public } pub fn is_private(&self) -> bool { - self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode + self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::ListedMode || self.mode == TrackerMode::PrivateListedMode + self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { @@ -138,7 +118,6 @@ impl TorrentTracker { let torrent_entry = TorrentEntry { peers: Default::default(), completed, - seeders: Default::default(), }; torrents.insert(info_hash.clone(), torrent_entry); @@ -171,16 +150,13 @@ impl TorrentTracker { } - pub async fn get_torrent_peers( - &self, - info_hash: &InfoHash, - peer_addr: &SocketAddr, - ) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr, ) -> Vec { let read_lock = self.torrents.read().await; + match read_lock.get(info_hash) { None => vec![], Some(entry) => { - entry.get_peers(Some(peer_addr)) + entry.get_peers(Some(client_addr)).into_iter().cloned().collect() } } } @@ -242,6 +218,7 @@ impl TorrentTracker { info!("-=[ Stats ]=- | Torrents: {} | Updates: {} | Shadow: {}", torrents_size, updates_size, shadow_size); } + // todo: refactor // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { let lock = self.torrents.write().await; @@ -260,7 +237,6 @@ impl TorrentTracker { let mut torrent = TorrentEntry { peers: BTreeMap::new(), completed: 0, - seeders: 0, }; let lock = self.torrents.write().await; @@ -273,13 +249,10 @@ impl TorrentTracker { continue; } torrent.peers.insert(peer_id.clone(), peer.clone()); - if peer.is_seeder() { - torrent.seeders += 1; - } } let mut lock = self.torrents.write().await; lock.remove(hash); - if self.config.mode.clone() == TrackerMode::PublicMode && self.config.cleanup_peerless && !self.config.persistence { + if self.config.mode.clone() == TrackerMode::Public && self.config.cleanup_peerless && !self.config.persistence { if torrent.peers.len() != 0 { lock.insert(hash.clone(), torrent); } @@ -290,6 +263,7 @@ impl TorrentTracker { } } + // todo: refactor pub async fn periodic_saving(&self) { // Get a lock for writing // let mut shadow = self.shadow.write().await; @@ -298,14 +272,14 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); - info!("Copying updates to updates_cloned..."); + // Copying updates to updates_cloned for (k, completed) in updates.iter() { updates_cloned.insert(k.clone(), completed.clone()); } updates.clear(); drop(updates); - info!("Copying updates_cloned into the shadow to overwrite..."); + // Copying updates_cloned into the shadow to overwrite for (k, completed) in updates_cloned.iter() { let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { @@ -317,14 +291,13 @@ impl TorrentTracker { drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. - info!("Handle shadow_copy to be updated into SQL..."); + // Handle shadow_copy to be updated into SQL let mut shadow_copy: BTreeMap = BTreeMap::new(); let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), completed: completed.clone(), - seeders: 0, }); } drop(shadows); From 6fcc043c0e38091b55add786f4e9c33f68a0ed73 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 01:49:16 +0200 Subject: [PATCH 0098/1003] refactor: removed unused enum in config --- src/config.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/config.rs b/src/config.rs index c24d1c45d..729406163 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,12 +12,6 @@ use toml; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; -#[derive(Serialize, Deserialize, PartialEq)] -pub enum TrackerServer { - UDP, - HTTP, -} - #[derive(Serialize, Deserialize, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, From abe2e685444aa0ba2925dfe55668fdca2f519af5 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 02:02:04 +0200 Subject: [PATCH 0099/1003] refactor: removed unused byteorder dependency --- Cargo.lock | 206 ++++++++++++++++++++++-------------------- Cargo.toml | 19 ++-- src/protocol/utils.rs | 27 ------ 3 files changed, 120 insertions(+), 132 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56ecd77a7..4d6b9b17a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,7 +40,7 @@ dependencies = [ [[package]] name = "aquatic_udp_protocol" version = "0.2.0" -source = "git+https://github.com/greatest-ape/aquatic#26e2e874377a2682f52568f8e5e8c080c3366326" +source = "git+https://github.com/greatest-ape/aquatic#99792eefc3a0cfb15dc9bbd351af94b14a44e9fc" dependencies = [ "byteorder", "either", @@ -88,9 +88,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" [[package]] name = "base64" @@ -106,8 +106,8 @@ checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" dependencies = [ "num-bigint 0.3.3", "num-integer", - "num-traits 0.2.14", - "serde 1.0.136", + "num-traits 0.2.15", + "serde 1.0.137", ] [[package]] @@ -238,8 +238,8 @@ checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ "libc", "num-integer", - "num-traits 0.2.14", - "serde 1.0.136", + "num-traits 0.2.15", + "serde 1.0.137", "time 0.1.44", "winapi", ] @@ -288,7 +288,7 @@ dependencies = [ "lazy_static", "nom", "rust-ini", - "serde 1.0.136", + "serde 1.0.137", "serde-hjson", "serde_json", "toml", @@ -442,9 +442,9 @@ dependencies = [ [[package]] name = "fern" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9a4820f0ccc8a7afd67c39a0f1a0f4b07ca1725164271a64939d7aeb9af065" +checksum = "3bdd7b0849075e79ee9a1836df22c717d1eba30451796fdc631b04565dd11e2a" dependencies = [ "log", ] @@ -749,9 +749,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" dependencies = [ "bytes", "fnv", @@ -771,9 +771,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6330e8a36bd8c859f3fa6d9382911fbb7147ec39807f63b923933a247240b9ba" +checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" [[package]] name = "httpdate" @@ -905,9 +905,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.123" +version = "0.2.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb691a747a7ab48abc15c5b42066eaafde10dc427e3b6ee2a1cf43db04c763bd" +checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" [[package]] name = "libloading" @@ -931,9 +931,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f35facd4a5673cb5a48822be2be1d4236c1c99cb4113cab7061ac720d5bf859" +checksum = "92e7e15d7610cce1d9752e137625f14e61a28cd45929b6e12e47b50fe154ee2e" dependencies = [ "cc", "pkg-config", @@ -958,9 +958,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] @@ -991,9 +991,9 @@ checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" @@ -1088,7 +1088,7 @@ dependencies = [ "once_cell", "pem", "percent-encoding", - "serde 1.0.136", + "serde 1.0.137", "serde_json", "socket2", "twox-hash", @@ -1117,12 +1117,12 @@ dependencies = [ "lazy_static", "lexical", "num-bigint 0.4.3", - "num-traits 0.2.14", + "num-traits 0.2.15", "rand", "regex", "rust_decimal", "saturating", - "serde 1.0.136", + "serde 1.0.137", "serde_json", "sha1", "sha2", @@ -1201,7 +1201,7 @@ checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.14", + "num-traits 0.2.15", ] [[package]] @@ -1212,17 +1212,17 @@ checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.14", + "num-traits 0.2.15", ] [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", - "num-traits 0.2.14", + "num-traits 0.2.15", ] [[package]] @@ -1231,14 +1231,14 @@ version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" dependencies = [ - "num-traits 0.2.14", + "num-traits 0.2.15", ] [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] @@ -1267,18 +1267,30 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.38" +version = "0.10.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" +checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" dependencies = [ "bitflags", "cfg-if", "foreign-types", "libc", "once_cell", + "openssl-macros", "openssl-sys", ] +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "openssl-probe" version = "0.1.5" @@ -1287,9 +1299,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.72" +version = "0.9.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" +checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0" dependencies = [ "autocfg", "cc", @@ -1316,7 +1328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" dependencies = [ "lock_api", - "parking_lot_core 0.9.2", + "parking_lot_core 0.9.3", ] [[package]] @@ -1335,9 +1347,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "995f667a6c822200b0433ac218e05582f0e2efa1b922a3fd2fbaadc5f87bab37" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ "cfg-if", "libc", @@ -1391,9 +1403,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1421,9 +1433,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" +checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa" dependencies = [ "unicode-xid", ] @@ -1589,8 +1601,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22dc69eadbf0ee2110b8d20418c0c6edbaefec2811c4963dc17b6344e11fe0f8" dependencies = [ "arrayvec 0.7.2", - "num-traits 0.2.14", - "serde 1.0.136", + "num-traits 0.2.15", + "serde 1.0.137", ] [[package]] @@ -1614,7 +1626,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.7", + "semver 1.0.9", ] [[package]] @@ -1723,9 +1735,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65bd28f48be7196d222d95b9243287f48d27aca604e08497513019ff0502cc4" +checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" [[package]] name = "semver-parser" @@ -1741,9 +1753,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" dependencies = [ "serde_derive", ] @@ -1766,24 +1778,24 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" dependencies = [ - "serde 1.0.136", + "serde 1.0.137", "serde_bytes", ] [[package]] name = "serde_bytes" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" +checksum = "212e73464ebcde48d723aa02eb270ba62eff38a9b732df31f33f1b4e145f3a54" dependencies = [ - "serde 1.0.136", + "serde 1.0.137", ] [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" dependencies = [ "proc-macro2", "quote", @@ -1792,13 +1804,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ "itoa", "ryu", - "serde 1.0.136", + "serde 1.0.137", ] [[package]] @@ -1810,7 +1822,7 @@ dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.136", + "serde 1.0.137", ] [[package]] @@ -1945,7 +1957,7 @@ checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2", "quote", - "serde 1.0.136", + "serde 1.0.137", "serde_derive", "syn", ] @@ -1959,7 +1971,7 @@ dependencies = [ "base-x", "proc-macro2", "quote", - "serde 1.0.136", + "serde 1.0.137", "serde_derive", "serde_json", "sha1", @@ -1990,9 +2002,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" +checksum = "7ff7c592601f11445996a06f8ad0c27f094a58857c2f89e97974ab9235b92c52" dependencies = [ "proc-macro2", "quote", @@ -2039,18 +2051,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", @@ -2108,9 +2120,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -2123,9 +2135,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" +checksum = "dce653fb475565de9f6fb0614b28bca8df2c430c0cf84bcd9c843f15de5414cc" dependencies = [ "bytes", "libc", @@ -2221,7 +2233,7 @@ version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ - "serde 1.0.136", + "serde 1.0.137", ] [[package]] @@ -2231,7 +2243,6 @@ dependencies = [ "aquatic_udp_protocol", "async-trait", "binascii", - "byteorder", "chrono", "config", "derive_more", @@ -2244,9 +2255,8 @@ dependencies = [ "r2d2_mysql", "r2d2_sqlite", "rand", - "serde 1.0.136", + "serde 1.0.137", "serde_bencode", - "serde_bytes", "serde_json", "thiserror", "tokio", @@ -2275,9 +2285,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" dependencies = [ "proc-macro2", "quote", @@ -2329,9 +2339,9 @@ dependencies = [ [[package]] name = "twox-hash" -version = "1.6.2" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "rand", @@ -2355,9 +2365,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-normalization" @@ -2376,9 +2386,9 @@ checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "untrusted" @@ -2457,7 +2467,7 @@ dependencies = [ "percent-encoding", "pin-project", "scoped-tls", - "serde 1.0.136", + "serde 1.0.137", "serde_json", "serde_urlencoded", "tokio", @@ -2597,9 +2607,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5acdd78cb4ba54c0045ac14f62d8f94a03d10047904ae2a40afa1e99d8f70825" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ "windows_aarch64_msvc", "windows_i686_gnu", @@ -2610,33 +2620,33 @@ dependencies = [ [[package]] name = "windows_aarch64_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_i686_gnu" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_x86_64_gnu" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] name = "wyz" diff --git a/Cargo.toml b/Cargo.toml index cc97072a5..420e772fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,27 +11,32 @@ debug = 1 lto = "fat" [dependencies] +tokio = { version = "1.7", features = ["full"] } + serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" -serde_bytes = "0.11" serde_json = "1.0.72" hex = "0.4.3" percent-encoding = "2.1.0" -warp = { version = "0.3", features = ["tls"] } -tokio = { version = "1.7", features = ["full"] } binascii = "0.1" + +warp = { version = "0.3", features = ["tls"] } + +config = "0.11" toml = "0.5" + log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" -byteorder = "1" + +r2d2 = "0.8.8" r2d2_mysql = "21.0.0" r2d2_sqlite = "0.16.0" -r2d2 = "0.8.8" + rand = "0.8.4" -config = "0.11" derive_more = "0.99" thiserror = "1.0" -aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" + +aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 5fc694c8e..392966307 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,11 +1,9 @@ use std::error::Error; use std::fmt::Write; -use std::io::Cursor; use std::net::SocketAddr; use std::time::SystemTime; use aquatic_udp_protocol::ConnectionId; -use byteorder::{BigEndian, ReadBytesExt}; pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) { @@ -33,31 +31,6 @@ pub fn url_encode_bytes(content: &[u8]) -> Result> { Ok(out) } -// Function that will convert a small or big number into the smallest form of a byte array. -pub async fn convert_int_to_bytes(number: &u64) -> Vec { - let mut return_data: Vec = Vec::new(); - // return_data.extend(number.to_be_bytes().reverse()); - for i in 1..8 { - if number < &256u64.pow(i) { - let start: usize = 16usize - i as usize; - return_data.extend(number.to_be_bytes()[start..8].iter()); - return return_data; - } - } - return return_data; -} - -pub async fn convert_bytes_to_int(array: &Vec) -> u64 { - let mut array_fixed: Vec = Vec::new(); - let size = 8 - array.len(); - for _ in 0..size { - array_fixed.push(0); - } - array_fixed.extend(array); - let mut rdr = Cursor::new(array_fixed); - return rdr.read_u64::().unwrap(); -} - pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { ser.serialize_u64(inst.elapsed().as_millis() as u64) } From d6dd240c080363daedcad067fb1910c8ae31282d Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 02:05:01 +0200 Subject: [PATCH 0100/1003] refactor: updated rust version to 2021 --- Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 420e772fd..a98820521 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,11 @@ [package] +edition = "2021" name = "torrust-tracker" version = "2.3.0" license = "AGPL-3.0" authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." -edition = "2018" +repository = "https://github.com/torrust/torrust-tracker" [profile.release] debug = 1 From 278ac3abebf2f8c0bebe8faf7d9d00067da808f8 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 14:11:29 +0200 Subject: [PATCH 0101/1003] refactor: updated cargo.toml dev and release profiles --- Cargo.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index a98820521..4c2fa4a86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,8 +7,14 @@ authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." repository = "https://github.com/torrust/torrust-tracker" +[profile.dev] +debug = 1 +opt-level = 1 +lto = "thin" + [profile.release] debug = 1 +opt-level = 3 lto = "fat" [dependencies] From 829afb9606d311022892898804d947288e86af49 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 17:49:03 +0200 Subject: [PATCH 0102/1003] refactor: refactored and improved tracker.cleanup_torrents() --- src/config.rs | 16 +++---- src/jobs/persistent_torrent_statistics.rs | 2 +- src/jobs/torrent_cleanup.rs | 2 +- src/setup.rs | 5 ++- src/tracker/torrent.rs | 6 +++ src/tracker/tracker.rs | 52 ++++++----------------- 6 files changed, 33 insertions(+), 50 deletions(-) diff --git a/src/config.rs b/src/config.rs index 729406163..292b065b4 100644 --- a/src/config.rs +++ b/src/config.rs @@ -53,10 +53,10 @@ pub struct Configuration { pub db_driver: DatabaseDrivers, pub db_path: String, pub statistics: bool, - pub persistence: bool, - pub persistence_interval: Option, - pub cleanup_interval: u64, - pub cleanup_peerless: bool, + pub persistent_torrent_completed_stat: bool, + pub persistence_interval: u64, + pub inactive_peer_cleanup_interval: u64, + pub remove_peerless_torrents: bool, pub external_ip: Option, pub announce_interval: u32, pub announce_interval_min: u32, @@ -139,10 +139,10 @@ impl Configuration { db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), statistics: true, - persistence: false, - persistence_interval: Some(900), - cleanup_interval: 600, - cleanup_peerless: true, + persistent_torrent_completed_stat: false, + persistence_interval: 0, + inactive_peer_cleanup_interval: 600, + remove_peerless_torrents: true, external_ip: Some(String::from("0.0.0.0")), announce_interval: 120, announce_interval_min: 120, diff --git a/src/jobs/persistent_torrent_statistics.rs b/src/jobs/persistent_torrent_statistics.rs index 7ebc80bdb..54ee23b6b 100644 --- a/src/jobs/persistent_torrent_statistics.rs +++ b/src/jobs/persistent_torrent_statistics.rs @@ -6,7 +6,7 @@ use crate::tracker::tracker::TorrentTracker; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.persistence_interval.unwrap_or(900); + let interval = config.persistence_interval; tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 05e639728..7d9967352 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -7,7 +7,7 @@ use crate::tracker::tracker::TorrentTracker; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.cleanup_interval; + let interval = config.inactive_peer_cleanup_interval; tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); diff --git a/src/setup.rs b/src/setup.rs index b8d49614d..76372ef0d 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -8,8 +8,9 @@ use crate::tracker::tracker::TorrentTracker; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ let mut jobs: Vec> = Vec::new(); + // todo: replace by realtime updates // Load persistent torrents - if config.persistence { + if config.persistent_torrent_completed_stat && config.persistence_interval > 0 { info!("Loading persistent torrents into memory.."); tracker.load_persistent_torrents().await.expect("Could not load persistent torrents."); info!("Persistent torrents loaded."); @@ -39,7 +40,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< } // Remove torrents without peers, every interval - if config.cleanup_interval > 0 { + if config.inactive_peer_cleanup_interval > 0 { jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index a01b5ce55..4b891e992 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -63,6 +63,12 @@ impl TorrentEntry { let leechers: u32 = self.peers.len() as u32 - seeders; (seeders, self.completed, leechers) } + + pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { + self.peers.retain(|_, peer| { + peer.updated.elapsed() > std::time::Duration::from_secs(max_peer_timeout as u64) + }); + } } #[derive(Debug)] diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index d3e7c8faa..d25f32923 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -177,7 +177,7 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - if self.config.persistence { + if self.config.persistent_torrent_completed_stat { let mut updates = self.updates.write().await; if updates.contains_key(info_hash) { updates.remove(info_hash); @@ -218,48 +218,24 @@ impl TorrentTracker { info!("-=[ Stats ]=- | Torrents: {} | Updates: {} | Shadow: {}", torrents_size, updates_size, shadow_size); } - // todo: refactor - // remove torrents without peers if enabled, and defragment memory + // Remove inactive peers and (optionally) peerless torrents pub async fn cleanup_torrents(&self) { - let lock = self.torrents.write().await; - - // First we create a mapping of all the torrent hashes in a vector, and we use this to iterate through the btreemap. - // Every hash we have handled, we remove from the btreemap completely, and push it to the top. - let mut torrent_hashes: Vec = Vec::new(); - for (k, _torrent_entry) in lock.iter() { - torrent_hashes.push(k.clone()); - } - - drop(lock); - - // Let's iterate through all torrents, and parse. - for hash in torrent_hashes.iter() { - let mut torrent = TorrentEntry { - peers: BTreeMap::new(), - completed: 0, - }; + let mut torrents_lock = self.torrents.write().await; - let lock = self.torrents.write().await; - let torrent_data = lock.get(hash).unwrap().clone(); - drop(lock); + // If we don't need to remove torrents we will use the faster iter + if self.config.remove_peerless_torrents { + torrents_lock.retain(|_, torrent_entry| { + torrent_entry.remove_inactive_peers(self.config.peer_timeout); - torrent.completed = torrent_data.completed.clone(); - for (peer_id, peer) in torrent_data.peers.iter() { - if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { - continue; - } - torrent.peers.insert(peer_id.clone(), peer.clone()); - } - let mut lock = self.torrents.write().await; - lock.remove(hash); - if self.config.mode.clone() == TrackerMode::Public && self.config.cleanup_peerless && !self.config.persistence { - if torrent.peers.len() != 0 { - lock.insert(hash.clone(), torrent); + match self.config.persistent_torrent_completed_stat { + true => { torrent_entry.completed > 0 || torrent_entry.peers.len() > 0 } + false => { torrent_entry.peers.len() > 0 } } - } else { - lock.insert(hash.clone(), torrent); + }); + } else { + for (_, torrent_entry) in torrents_lock.iter_mut() { + torrent_entry.remove_inactive_peers(self.config.peer_timeout); } - drop(lock); } } From eb69fa00595989b23cdf71f9e4c954669ff018ac Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 18:08:22 +0200 Subject: [PATCH 0103/1003] feat: added strip to Cargo.toml to almost half compiled binary size on unix systems --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 4c2fa4a86..c320e9e03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ lto = "thin" debug = 1 opt-level = 3 lto = "fat" +strip = true [dependencies] tokio = { version = "1.7", features = ["full"] } From 1d353cadb337b7ab847d47f5b903d931edabc2b8 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 23:47:08 +0200 Subject: [PATCH 0104/1003] feat: massively improved key authentication speed in private mode --- src/api/server.rs | 2 +- src/databases/database.rs | 4 +++- src/databases/mysql.rs | 17 +++++++++++++++-- src/databases/sqlite.rs | 24 ++++++++++++++++++++++-- src/setup.rs | 4 ++++ src/tracker/tracker.rs | 38 ++++++++++++++++++++++++++++++++------ 6 files changed, 77 insertions(+), 12 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index d33f17dc0..77496b497 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -289,7 +289,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp }) .and_then(|(key, tracker): (String, Arc)| { async move { - match tracker.remove_auth_key(key).await { + match tracker.remove_auth_key(&key).await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to delete key".into() })) } diff --git a/src/databases/database.rs b/src/databases/database.rs index fd9f2a19d..e2f3cdfe5 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -42,6 +42,8 @@ pub trait Database: Sync + Send { async fn load_persistent_torrents(&self) -> Result, Error>; + async fn load_keys(&self) -> Result, Error>; + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), Error>; async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; @@ -54,7 +56,7 @@ pub trait Database: Sync + Send { async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; - async fn remove_key_from_keys(&self, key: String) -> Result; + async fn remove_key_from_keys(&self, key: &str) -> Result; } #[derive(Debug, Display, PartialEq, Error)] diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index e7f57a7a4..6fd63b7d9 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use std::str::FromStr; use async_trait::async_trait; -use log::debug; +use log::{debug}; use r2d2::Pool; use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; use r2d2_mysql::mysql::prelude::Queryable; @@ -76,6 +76,19 @@ impl Database for MysqlDatabase { Ok(torrents) } + async fn load_keys(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + + let keys: Vec = conn.query_map("SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| { + AuthKey { + key, + valid_until: Some(valid_until as u64) + } + }).map_err(|_| database::Error::QueryReturnedNoRows)?; + + Ok(keys) + } + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; @@ -190,7 +203,7 @@ impl Database for MysqlDatabase { } } - async fn remove_key_from_keys(&self, key: String) -> Result { + async fn remove_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 18a1d5a28..9c452eadc 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -8,7 +8,7 @@ use r2d2_sqlite::SqliteConnectionManager; use r2d2_sqlite::rusqlite::NO_PARAMS; use crate::{AUTH_KEY_LENGTH, InfoHash}; -use crate::databases::database::Database; +use crate::databases::database::{Database, Error}; use crate::databases::database; use crate::tracker::key::AuthKey; use crate::tracker::torrent::TorrentEntry; @@ -76,6 +76,26 @@ impl Database for SqliteDatabase { Ok(torrents) } + async fn load_keys(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + + let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; + + let keys_iter = stmt.query_map(NO_PARAMS, |row| { + let key = row.get(0)?; + let valid_until: i64 = row.get(1)?; + + Ok(AuthKey { + key, + valid_until: Some(valid_until as u64) + }) + })?; + + let keys: Vec = keys_iter.filter_map(|x| x.ok()).collect(); + + Ok(keys) + } + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; @@ -174,7 +194,7 @@ impl Database for SqliteDatabase { } } - async fn remove_key_from_keys(&self, key: String) -> Result { + async fn remove_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { diff --git a/src/setup.rs b/src/setup.rs index 76372ef0d..69f2db432 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -8,6 +8,10 @@ use crate::tracker::tracker::TorrentTracker; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ let mut jobs: Vec> = Vec::new(); + if tracker.is_private() { + tracker.load_keys().await.expect("Could not retrieve keys."); + } + // todo: replace by realtime updates // Load persistent torrents if config.persistent_torrent_completed_stat && config.persistence_interval > 0 { diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index d25f32923..fdbaac4de 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -14,7 +14,6 @@ use crate::databases::database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::tracker::key::AuthKey; -use crate::tracker::key::Error::KeyInvalid; use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; use crate::tracker::key; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; @@ -22,6 +21,7 @@ use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; pub struct TorrentTracker { pub config: Arc, mode: TrackerMode, + keys: RwLock>, torrents: RwLock>, updates: RwLock>, shadow: RwLock>, @@ -40,6 +40,7 @@ impl TorrentTracker { Ok(TorrentTracker { config: config.clone(), mode: config.mode, + keys: RwLock::new(std::collections::HashMap::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), @@ -66,18 +67,32 @@ impl TorrentTracker { // add key to database if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } + // Add key to in-memory database + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) } - pub async fn remove_auth_key(&self, key: String) -> Result { - self.database.remove_key_from_keys(key).await + pub async fn remove_auth_key(&self, key: &str) -> Result { + self.database.remove_key_from_keys(&key).await?; + + // Remove key from in-memory database + self.keys.write().await.remove(key); + + Ok(1) } pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { - let db_key = self.database.get_key_from_keys(&auth_key.key).await.map_err(|_| KeyInvalid)?; - key::verify_auth_key(&db_key) + let keys_lock = self.keys.read().await; + + if let Some(key) = keys_lock.get(&auth_key.key) { + key::verify_auth_key(key) + } else { + Err(key::Error::KeyInvalid) + } } + // todo: speed this up in non-public modes pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode if self.is_public() { return Ok(()); } @@ -98,7 +113,7 @@ impl TorrentTracker { // check if info_hash is whitelisted if self.is_whitelisted() { - if self.is_info_hash_whitelisted(info_hash).await == false { + if !self.is_info_hash_whitelisted(info_hash).await { return Err(TorrentError::TorrentNotWhitelisted); } } @@ -106,6 +121,17 @@ impl TorrentTracker { Ok(()) } + pub async fn load_keys(&self) -> Result<(), database::Error> { + let keys_from_database = self.database.load_keys().await?; + let mut keys = self.keys.write().await; + + for key in keys_from_database { + let _ = keys.insert(key.key.clone(), key); + } + + Ok(()) + } + // Loading the torrents from database into memory pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; From aac9ac72bdd300ffb2959ba6f2a1ee04ecac5895 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sun, 8 May 2022 17:26:44 +0200 Subject: [PATCH 0105/1003] fix: http scrape response --- src/http/filters.rs | 2 ++ src/http/handlers.rs | 16 +++++++++------- src/http/response.rs | 25 ++++++++++++++++++++++--- src/protocol/utils.rs | 13 ------------- src/tracker/tracker.rs | 2 +- 5 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index 8f3ee04c0..d5a7881d8 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -75,9 +75,11 @@ async fn info_hashes(raw_query: String) -> WebResult> { for v in split_raw_query { if v.contains("info_hash") { let raw_info_hash = v.split("=").collect::>()[1]; + debug!("Raw info hash: {}", raw_info_hash); let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); if let Ok(ih) = info_hash { + debug!("Parsed info hash: {}", ih.to_string()); info_hashes.push(ih); } } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index d7e4859d9..977aeb4ba 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,7 +13,6 @@ use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::http::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; use crate::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; -use crate::protocol::utils::url_encode_bytes; use crate::tracker::tracker::TorrentTracker; /// Authenticate InfoHash using optional AuthKey @@ -62,7 +61,7 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option /// Handle scrape request pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc) -> WebResult { - let mut files: HashMap = HashMap::new(); + let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; for info_hash in scrape_request.info_hashes.iter() { @@ -79,9 +78,7 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option) -> WebResult { - Ok(Response::new(ScrapeResponse { files }.write())) +fn send_scrape_response(files: HashMap) -> WebResult { + let res = ScrapeResponse { files }; + + match res.write() { + Ok(body) => Ok(Response::new(body)), + Err(_) => Err(reject::custom(ServerError::InternalServerError)) + } } /// Handle all server errors and send error reply diff --git a/src/http/response.rs b/src/http/response.rs index 3118f7df1..2bdd4c1e7 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -5,6 +5,7 @@ use std::net::IpAddr; use serde; use serde::Serialize; +use crate::InfoHash; #[derive(Serialize)] pub struct Peer { @@ -78,12 +79,30 @@ pub struct ScrapeResponseEntry { #[derive(Serialize)] pub struct ScrapeResponse { - pub files: HashMap, + pub files: HashMap, } impl ScrapeResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() + pub fn write(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + + bytes.write(b"d5:filesd")?; + + for (info_hash, scrape_response_entry) in self.files.iter() { + bytes.write(b"20:")?; + bytes.write(&info_hash.0)?; + bytes.write(b"d8:completei")?; + bytes.write(scrape_response_entry.complete.to_string().as_bytes())?; + bytes.write(b"e10:downloadedi")?; + bytes.write(scrape_response_entry.downloaded.to_string().as_bytes())?; + bytes.write(b"e10:incompletei")?; + bytes.write(scrape_response_entry.incomplete.to_string().as_bytes())?; + bytes.write(b"ee")?; + } + + bytes.write(b"ee")?; + + Ok(bytes) } } diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 392966307..2b8ac3ebf 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -18,19 +18,6 @@ pub fn current_time() -> u64 { .as_secs() } -pub fn url_encode_bytes(content: &[u8]) -> Result> { - let mut out: String = String::new(); - - for byte in content.iter() { - match *byte as char { - '0'..='9' | 'a'..='z' | 'A'..='Z' | '.' | '-' | '_' | '~' => out.push(*byte as char), - _ => write!(&mut out, "%{:02x}", byte)?, - }; - } - - Ok(out) -} - pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { ser.serialize_u64(inst.elapsed().as_millis() as u64) } diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index fdbaac4de..45b23f728 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -92,7 +92,6 @@ impl TorrentTracker { } } - // todo: speed this up in non-public modes pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode if self.is_public() { return Ok(()); } @@ -111,6 +110,7 @@ impl TorrentTracker { } } + // todo: speed this up // check if info_hash is whitelisted if self.is_whitelisted() { if !self.is_info_hash_whitelisted(info_hash).await { From c4c6c3ddcb31c580cdc133eaa76191144a84ce58 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 18:28:02 +0200 Subject: [PATCH 0106/1003] feat: improved persistent torrent saving --- Cargo.lock | 3 +- Cargo.toml | 2 +- src/config.rs | 37 +++---- src/databases/database.rs | 10 +- src/databases/mysql.rs | 62 +++++------- src/databases/sqlite.rs | 37 ++++--- src/http/filters.rs | 6 +- src/http/handlers.rs | 2 +- src/jobs/log_statistics.rs | 32 ------ src/jobs/mod.rs | 2 - src/jobs/persistent_torrent_statistics.rs | 38 ------- src/protocol/utils.rs | 2 - src/setup.rs | 24 ++--- src/tracker/statistics.rs | 24 +++-- src/tracker/torrent.rs | 12 ++- src/tracker/tracker.rs | 118 +++++----------------- 16 files changed, 134 insertions(+), 277 deletions(-) delete mode 100644 src/jobs/log_statistics.rs delete mode 100644 src/jobs/persistent_torrent_statistics.rs diff --git a/Cargo.lock b/Cargo.lock index 4d6b9b17a..1ee924fac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,7 +40,8 @@ dependencies = [ [[package]] name = "aquatic_udp_protocol" version = "0.2.0" -source = "git+https://github.com/greatest-ape/aquatic#99792eefc3a0cfb15dc9bbd351af94b14a44e9fc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16149f27924d42b337a637cd90a8ee2a8973bbccf32aabebce2b3c66913f947f" dependencies = [ "byteorder", "either", diff --git a/Cargo.toml b/Cargo.toml index c320e9e03..53e2949db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,4 +47,4 @@ thiserror = "1.0" futures = "0.3.21" async-trait = "0.1.52" -aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } +aquatic_udp_protocol = "0.2.0" diff --git a/src/config.rs b/src/config.rs index 292b065b4..e2cd167f5 100644 --- a/src/config.rs +++ b/src/config.rs @@ -29,15 +29,6 @@ pub struct HttpTrackerConfig { pub ssl_key_path: Option, } -impl HttpTrackerConfig { - pub fn verify_ssl_cert_and_key_set(&self) -> bool { - self.ssl_cert_path.is_some() - && self.ssl_key_path.is_some() - && !self.ssl_cert_path.as_ref().unwrap().is_empty() - && !self.ssl_key_path.as_ref().unwrap().is_empty() - } -} - #[derive(Serialize, Deserialize)] pub struct HttpApiConfig { pub enabled: bool, @@ -48,20 +39,18 @@ pub struct HttpApiConfig { #[derive(Serialize, Deserialize)] pub struct Configuration { pub log_level: Option, - pub log_interval: Option, pub mode: TrackerMode, pub db_driver: DatabaseDrivers, pub db_path: String, - pub statistics: bool, + pub announce_interval: u32, + pub min_announce_interval: u32, + pub max_peer_timeout: u32, + pub on_reverse_proxy: bool, + pub external_ip: Option, + pub tracker_usage_statistics: bool, pub persistent_torrent_completed_stat: bool, - pub persistence_interval: u64, pub inactive_peer_cleanup_interval: u64, pub remove_peerless_torrents: bool, - pub external_ip: Option, - pub announce_interval: u32, - pub announce_interval_min: u32, - pub peer_timeout: u32, - pub on_reverse_proxy: bool, pub udp_trackers: Vec, pub http_trackers: Vec, pub http_api: HttpApiConfig, @@ -134,20 +123,18 @@ impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - log_interval: Some(60), mode: TrackerMode::Public, db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), - statistics: true, + announce_interval: 120, + min_announce_interval: 120, + max_peer_timeout: 900, + on_reverse_proxy: false, + external_ip: Some(String::from("0.0.0.0")), + tracker_usage_statistics: true, persistent_torrent_completed_stat: false, - persistence_interval: 0, inactive_peer_cleanup_interval: 600, remove_peerless_torrents: true, - external_ip: Some(String::from("0.0.0.0")), - announce_interval: 120, - announce_interval_min: 120, - peer_timeout: 900, - on_reverse_proxy: false, udp_trackers: Vec::new(), http_trackers: Vec::new(), http_api: HttpApiConfig { diff --git a/src/databases/database.rs b/src/databases/database.rs index e2f3cdfe5..721dfd00d 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -1,15 +1,11 @@ -use std::collections::BTreeMap; - use async_trait::async_trait; use derive_more::{Display, Error}; -use log::debug; use serde::{Deserialize, Serialize}; use crate::InfoHash; use crate::tracker::key::AuthKey; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; -use crate::tracker::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { @@ -18,8 +14,6 @@ pub enum DatabaseDrivers { } pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { - debug!("{:?}", db_driver); - let database: Box = match db_driver { DatabaseDrivers::Sqlite3 => { let db = SqliteDatabase::new(db_path)?; @@ -44,7 +38,9 @@ pub trait Database: Sync + Send { async fn load_keys(&self) -> Result, Error>; - async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), Error>; + async fn load_whitelist(&self) -> Result, Error>; + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 6fd63b7d9..74f807d70 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -1,10 +1,9 @@ -use std::collections::BTreeMap; use std::str::FromStr; use async_trait::async_trait; use log::{debug}; use r2d2::Pool; -use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; +use r2d2_mysql::mysql::{Opts, OptsBuilder, params}; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; @@ -12,7 +11,6 @@ use crate::{AUTH_KEY_LENGTH, InfoHash}; use crate::databases::database::{Database, Error}; use crate::databases::database; use crate::tracker::key::AuthKey; -use crate::tracker::torrent::TorrentEntry; pub struct MysqlDatabase { pool: Pool, @@ -37,20 +35,20 @@ impl Database for MysqlDatabase { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, - info_hash BINARY(20) NOT NULL UNIQUE + info_hash VARCHAR(40) NOT NULL UNIQUE );".to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, - info_hash BINARY(20) NOT NULL UNIQUE, + info_hash VARCHAR(40) NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL );".to_string(); let create_keys_table = format!(" CREATE TABLE IF NOT EXISTS `keys` ( `id` INT NOT NULL AUTO_INCREMENT, - `key` BINARY({}) NOT NULL, + `key` VARCHAR({}) NOT NULL, `valid_until` INT(10) NOT NULL, PRIMARY KEY (`id`), UNIQUE (`key`) @@ -68,7 +66,7 @@ impl Database for MysqlDatabase { async fn load_persistent_torrents(&self) -> Result, database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT HEX(info_hash), completed FROM torrents", |(info_hash_string, completed): (String, u32)| { + let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); (info_hash, completed) }).map_err(|_| database::Error::QueryReturnedNoRows)?; @@ -89,44 +87,38 @@ impl Database for MysqlDatabase { Ok(keys) } - async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { + async fn load_whitelist(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + + let info_hashes: Vec = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { + InfoHash::from_str(&info_hash).unwrap() + }).map_err(|_| database::Error::QueryReturnedNoRows)?; + + Ok(info_hashes) + } + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + let info_hash_str = info_hash.to_string(); - let mut insert_vector= vec![]; + debug!("{}", info_hash_str); - for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); - if insert_vector.len() == 1000 { - let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); - if db_transaction.query_drop(query).is_err() { - return Err(Error::InvalidQuery); - } - insert_vector.clear(); + match conn.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", params! { info_hash_str, completed }) { + Ok(_) => { + Ok(()) } - } - - if insert_vector.len() != 0 { - let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); - if db_transaction.query_drop(query).is_err() { - return Err(Error::InvalidQuery); + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) } - insert_vector.clear(); } - - if db_transaction.commit().is_err() { - return Err(Error::DatabaseError); - }; - - Ok(()) } async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - match conn.exec_first::("SELECT HEX(info_hash) FROM whitelist WHERE info_hash = UNHEX(:info_hash)", params! { info_hash => info_hash }) + match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) .map_err(|_| database::Error::QueryReturnedNoRows)? { Some(info_hash) => { Ok(InfoHash::from_str(&info_hash).unwrap()) @@ -142,7 +134,7 @@ impl Database for MysqlDatabase { let info_hash_str = info_hash.to_string(); - match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (UNHEX(:info_hash_str))", params! { info_hash_str }) { + match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { Ok(_) => { Ok(1) } @@ -158,7 +150,7 @@ impl Database for MysqlDatabase { let info_hash = info_hash.to_string(); - match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = UNHEX(:info_hash)", params! { info_hash }) { + match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { Ok(_) => { Ok(1) } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 9c452eadc..55c77969a 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -1,4 +1,3 @@ -use std::collections::BTreeMap; use std::str::FromStr; use async_trait::async_trait; @@ -11,7 +10,6 @@ use crate::{AUTH_KEY_LENGTH, InfoHash}; use crate::databases::database::{Database, Error}; use crate::databases::database; use crate::tracker::key::AuthKey; -use crate::tracker::torrent::TorrentEntry; pub struct SqliteDatabase { pool: Pool, @@ -96,20 +94,35 @@ impl Database for SqliteDatabase { Ok(keys) } - async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + async fn load_whitelist(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let db_transaction = conn.transaction()?; + let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; - for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); - let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); - } + let info_hash_iter = stmt.query_map(NO_PARAMS, |row| { + let info_hash: String = row.get(0)?; + + Ok(InfoHash::from_str(&info_hash).unwrap()) + })?; - let _ = db_transaction.commit(); + let info_hashes: Vec = info_hash_iter.filter_map(|x| x.ok()).collect(); + + Ok(info_hashes) + } + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - Ok(()) + match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", &[info_hash.to_string(), completed.to_string()]) { + Ok(updated) => { + if updated > 0 { return Ok(()); } + Err(database::Error::QueryReturnedNoRows) + } + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } } async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { diff --git a/src/http/filters.rs b/src/http/filters.rs index d5a7881d8..a288f8d97 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -3,7 +3,6 @@ use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; -use log::debug; use warp::{Filter, reject, Rejection}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; @@ -75,11 +74,9 @@ async fn info_hashes(raw_query: String) -> WebResult> { for v in split_raw_query { if v.contains("info_hash") { let raw_info_hash = v.split("=").collect::>()[1]; - debug!("Raw info hash: {}", raw_info_hash); let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); if let Ok(ih) = info_hash { - debug!("Parsed info hash: {}", ih.to_string()); info_hashes.push(ih); } } @@ -151,8 +148,7 @@ async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Opti // set client ip to last forwarded ip let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - IpAddr::from_str(x_forwarded_ip).or_else(|e| { - debug!("{}", e); + IpAddr::from_str(x_forwarded_ip).or_else(|_| { Err(reject::custom(ServerError::AddressNotFound)) }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 977aeb4ba..7616ca301 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -56,7 +56,7 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; } } - send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) + send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.min_announce_interval) } /// Handle scrape request diff --git a/src/jobs/log_statistics.rs b/src/jobs/log_statistics.rs deleted file mode 100644 index f62399a47..000000000 --- a/src/jobs/log_statistics.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::sync::Arc; -use log::info; -use tokio::task::JoinHandle; -use crate::{Configuration}; -use crate::tracker::tracker::TorrentTracker; - -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.log_interval.unwrap_or(60); - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; - - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - info!("Stopping statistics logging job.."); - break; - } - _ = interval.tick() => { - if let Some(tracker) = weak_tracker.upgrade() { - tracker.post_log().await; - } else { - break; - } - } - } - } - }) -} diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index a71fcb210..c3e58e56e 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -1,6 +1,4 @@ -pub mod persistent_torrent_statistics; pub mod torrent_cleanup; pub mod tracker_api; -pub mod log_statistics; pub mod http_tracker; pub mod udp_tracker; diff --git a/src/jobs/persistent_torrent_statistics.rs b/src/jobs/persistent_torrent_statistics.rs deleted file mode 100644 index 54ee23b6b..000000000 --- a/src/jobs/persistent_torrent_statistics.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::sync::Arc; -use log::info; -use tokio::task::JoinHandle; -use crate::{Configuration}; -use crate::tracker::tracker::TorrentTracker; - -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.persistence_interval; - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; - - // periodically save torrents to database - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - // Save before shutting down - tracker.periodic_saving().await; - info!("Stopping periodic torrent saving job.."); - break; - } - _ = interval.tick() => { - if let Some(tracker) = weak_tracker.upgrade() { - info!("Saving torrents to database..."); - tracker.periodic_saving().await; - info!("Periodic saving done."); - } else { - // If tracker no longer exists, stop job - break; - } - } - } - } - }) -} diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 2b8ac3ebf..30b87b99b 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,5 +1,3 @@ -use std::error::Error; -use std::fmt::Write; use std::net::SocketAddr; use std::time::SystemTime; diff --git a/src/setup.rs b/src/setup.rs index 69f2db432..ed9b6d8ff 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -1,24 +1,21 @@ use std::sync::Arc; -use log::{info, warn}; +use log::{warn}; use tokio::task::JoinHandle; use crate::{Configuration}; -use crate::jobs::{http_tracker, log_statistics, persistent_torrent_statistics, torrent_cleanup, tracker_api, udp_tracker}; +use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; use crate::tracker::tracker::TorrentTracker; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ let mut jobs: Vec> = Vec::new(); + // Load peer keys if tracker.is_private() { - tracker.load_keys().await.expect("Could not retrieve keys."); + tracker.load_keys().await.expect("Could not retrieve keys from database."); } - // todo: replace by realtime updates - // Load persistent torrents - if config.persistent_torrent_completed_stat && config.persistence_interval > 0 { - info!("Loading persistent torrents into memory.."); - tracker.load_persistent_torrents().await.expect("Could not load persistent torrents."); - info!("Persistent torrents loaded."); - jobs.push(persistent_torrent_statistics::start_job(&config, tracker.clone())); + // Load whitelisted torrents + if tracker.is_whitelisted() { + tracker.load_whitelist().await.expect("Could not load whitelist from database."); } // Start the UDP blocks @@ -48,12 +45,5 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); } - // Log detailed torrent stats - if let Some(log_interval) = config.log_interval { - if log_interval > 0 { - jobs.push(log_statistics::start_job(&config, tracker.clone())); - } - } - jobs } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 5822d2d4e..c67df72ec 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -109,12 +109,24 @@ impl StatsTracker { stats_lock.tcp6_scrapes_handled += 1; stats_lock.tcp6_connections_handled += 1; } - TrackerStatisticsEvent::Udp4Connect => { stats_lock.udp4_connections_handled += 1; } - TrackerStatisticsEvent::Udp4Announce => { stats_lock.udp4_announces_handled += 1; } - TrackerStatisticsEvent::Udp4Scrape => { stats_lock.udp4_scrapes_handled += 1; } - TrackerStatisticsEvent::Udp6Connect => { stats_lock.udp6_connections_handled += 1; } - TrackerStatisticsEvent::Udp6Announce => { stats_lock.udp6_announces_handled += 1; } - TrackerStatisticsEvent::Udp6Scrape => { stats_lock.udp6_scrapes_handled += 1; } + TrackerStatisticsEvent::Udp4Connect => { + stats_lock.udp4_connections_handled += 1; + } + TrackerStatisticsEvent::Udp4Announce => { + stats_lock.udp4_announces_handled += 1; + } + TrackerStatisticsEvent::Udp4Scrape => { + stats_lock.udp4_scrapes_handled += 1; + } + TrackerStatisticsEvent::Udp6Connect => { + stats_lock.udp6_connections_handled += 1; + } + TrackerStatisticsEvent::Udp6Announce => { + stats_lock.udp6_announces_handled += 1; + } + TrackerStatisticsEvent::Udp6Scrape => { + stats_lock.udp6_scrapes_handled += 1; + } } drop(stats_lock); diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4b891e992..0c03e3f82 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -21,7 +21,10 @@ impl TorrentEntry { } } - pub fn update_peer(&mut self, peer: &TorrentPeer) { + // Update peer and return completed (times torrent has been downloaded) + pub fn update_peer(&mut self, peer: &TorrentPeer) -> bool { + let mut did_torrent_stats_change: bool = false; + match peer.event { AnnounceEvent::Stopped => { let _ = self.peers.remove(&peer.peer_id); @@ -29,12 +32,17 @@ impl TorrentEntry { AnnounceEvent::Completed => { let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); // Don't count if peer was not previously known - if peer_old.is_some() { self.completed += 1; } + if peer_old.is_some() { + self.completed += 1; + did_torrent_stats_change = true; + } } _ => { let _ = self.peers.insert(peer.peer_id.clone(), peer.clone()); } } + + did_torrent_stats_change } pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 45b23f728..bcfae3c37 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -3,7 +3,6 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::info; use tokio::sync::{RwLock, RwLockReadGuard}; use tokio::sync::mpsc::error::SendError; @@ -22,9 +21,8 @@ pub struct TorrentTracker { pub config: Arc, mode: TrackerMode, keys: RwLock>, + whitelist: RwLock>, torrents: RwLock>, - updates: RwLock>, - shadow: RwLock>, stats_tracker: StatsTracker, database: Box } @@ -35,15 +33,14 @@ impl TorrentTracker { let mut stats_tracker = StatsTracker::new(); // starts a thread for updating tracker stats - if config.statistics { stats_tracker.run_worker(); } + if config.tracker_usage_statistics { stats_tracker.run_worker(); } Ok(TorrentTracker { config: config.clone(), mode: config.mode, keys: RwLock::new(std::collections::HashMap::new()), + whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), - updates: RwLock::new(std::collections::HashMap::new()), - shadow: RwLock::new(std::collections::HashMap::new()), stats_tracker, database }) @@ -64,7 +61,7 @@ impl TorrentTracker { pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { let auth_key = key::generate_auth_key(seconds_valid); - // add key to database + // Add key to database if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } // Add key to in-memory database @@ -125,6 +122,8 @@ impl TorrentTracker { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; + keys.clear(); + for key in keys_from_database { let _ = keys.insert(key.key.clone(), key); } @@ -132,6 +131,19 @@ impl TorrentTracker { Ok(()) } + pub async fn load_whitelist(&self) -> Result<(), database::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); + } + + Ok(()) + } + // Loading the torrents from database into memory pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; @@ -152,12 +164,6 @@ impl TorrentTracker { Ok(()) } - // Saving the torrents from memory - pub async fn save_torrents(&self) -> Result<(), database::Error> { - let torrents = self.torrents.read().await; - self.database.save_persistent_torrent_data(&*torrents).await - } - // Adding torrents is not relevant to public trackers. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { self.database.add_info_hash_to_whitelist(info_hash.clone()).await @@ -199,19 +205,15 @@ impl TorrentTracker { } }; - torrent_entry.update_peer(peer); + let stats_updated = torrent_entry.update_peer(peer); - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - if self.config.persistent_torrent_completed_stat { - let mut updates = self.updates.write().await; - if updates.contains_key(info_hash) { - updates.remove(info_hash); - } - updates.insert(*info_hash, completed); - drop(updates); + // todo: move this action to a separate worker + if self.config.persistent_torrent_completed_stat && stats_updated { + let _ = self.database.save_persistent_torrent(&info_hash, torrent_entry.completed).await; } + let (seeders, completed, leechers) = torrent_entry.get_stats(); + TorrentStats { seeders, leechers, @@ -231,19 +233,6 @@ impl TorrentTracker { self.stats_tracker.send_event(event).await } - pub async fn post_log(&self) { - let torrents = self.torrents.read().await; - let torrents_size = torrents.len(); - drop(torrents); - let updates = self.updates.read().await; - let updates_size = updates.len(); - drop(updates); - let shadow = self.shadow.read().await; - let shadow_size = shadow.len(); - drop(shadow); - info!("-=[ Stats ]=- | Torrents: {} | Updates: {} | Shadow: {}", torrents_size, updates_size, shadow_size); - } - // Remove inactive peers and (optionally) peerless torrents pub async fn cleanup_torrents(&self) { let mut torrents_lock = self.torrents.write().await; @@ -251,7 +240,7 @@ impl TorrentTracker { // If we don't need to remove torrents we will use the faster iter if self.config.remove_peerless_torrents { torrents_lock.retain(|_, torrent_entry| { - torrent_entry.remove_inactive_peers(self.config.peer_timeout); + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); match self.config.persistent_torrent_completed_stat { true => { torrent_entry.completed > 0 || torrent_entry.peers.len() > 0 } @@ -260,61 +249,8 @@ impl TorrentTracker { }); } else { for (_, torrent_entry) in torrents_lock.iter_mut() { - torrent_entry.remove_inactive_peers(self.config.peer_timeout); + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); } } } - - // todo: refactor - pub async fn periodic_saving(&self) { - // Get a lock for writing - // let mut shadow = self.shadow.write().await; - - // We will get the data and insert it into the shadow, while clearing updates. - let mut updates = self.updates.write().await; - let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); - // let mut torrent_hashes: Vec = Vec::new(); - // Copying updates to updates_cloned - for (k, completed) in updates.iter() { - updates_cloned.insert(k.clone(), completed.clone()); - } - updates.clear(); - drop(updates); - - // Copying updates_cloned into the shadow to overwrite - for (k, completed) in updates_cloned.iter() { - let mut shadows = self.shadow.write().await; - if shadows.contains_key(k) { - shadows.remove(k); - } - shadows.insert(k.clone(), completed.clone()); - drop(shadows); - } - drop(updates_cloned); - - // We updated the shadow data from the updates data, let's handle shadow data as expected. - // Handle shadow_copy to be updated into SQL - let mut shadow_copy: BTreeMap = BTreeMap::new(); - let shadows = self.shadow.read().await; - for (infohash, completed) in shadows.iter() { - shadow_copy.insert(infohash.clone(), TorrentEntry { - peers: Default::default(), - completed: completed.clone(), - }); - } - drop(shadows); - - // We will now save the data from the shadow into the database. - // This should not put any strain on the server itself, other then the harddisk/ssd. - info!("Start saving shadow data into SQL..."); - let result = self.database.save_persistent_torrent_data(&shadow_copy).await; - if result.is_ok() { - info!("Done saving data to SQL and succeeded, emptying shadow..."); - let mut shadow = self.shadow.write().await; - shadow.clear(); - drop(shadow); - } else { - info!("Done saving data to SQL and failed, not emptying shadow..."); - } - } } From 0a771314295c79bc5c89bdbcdb35057df085d127 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 20:05:11 +0200 Subject: [PATCH 0107/1003] fix: fixed up some database errors --- src/databases/mysql.rs | 18 +++++++++--------- src/databases/sqlite.rs | 38 +++++++++++++++++++------------------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 74f807d70..5b6e34eb1 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -54,7 +54,7 @@ impl Database for MysqlDatabase { UNIQUE (`key`) );", AUTH_KEY_LENGTH as i8); - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; conn.query_drop(&create_torrents_table).expect("Could not create torrents table."); conn.query_drop(&create_keys_table).expect("Could not create keys table."); @@ -64,7 +64,7 @@ impl Database for MysqlDatabase { } async fn load_persistent_torrents(&self) -> Result, database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); @@ -98,7 +98,7 @@ impl Database for MysqlDatabase { } async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -116,7 +116,7 @@ impl Database for MysqlDatabase { } async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) .map_err(|_| database::Error::QueryReturnedNoRows)? { @@ -130,7 +130,7 @@ impl Database for MysqlDatabase { } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -146,7 +146,7 @@ impl Database for MysqlDatabase { } async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let info_hash = info_hash.to_string(); @@ -162,7 +162,7 @@ impl Database for MysqlDatabase { } async fn get_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) .map_err(|_| database::Error::QueryReturnedNoRows)? { @@ -179,7 +179,7 @@ impl Database for MysqlDatabase { } async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(0).to_string(); @@ -196,7 +196,7 @@ impl Database for MysqlDatabase { } async fn remove_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { Ok(_) => { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 55c77969a..e1659d897 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -6,7 +6,7 @@ use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use r2d2_sqlite::rusqlite::NO_PARAMS; -use crate::{AUTH_KEY_LENGTH, InfoHash}; +use crate::{InfoHash}; use crate::databases::database::{Database, Error}; use crate::databases::database; use crate::tracker::key::AuthKey; @@ -30,25 +30,25 @@ impl Database for SqliteDatabase { fn create_database_tables(&self) -> Result<(), database::Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE );".to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE, + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL );".to_string(); - let create_keys_table = format!(" + let create_keys_table = " CREATE TABLE IF NOT EXISTS keys ( - id integer PRIMARY KEY AUTOINCREMENT, - key VARCHAR({}) NOT NULL UNIQUE, - valid_until INT(10) NOT NULL - );", AUTH_KEY_LENGTH as i8); + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER NOT NULL + );".to_string(); - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; conn.execute(&create_whitelist_table, NO_PARAMS) .and_then(|_| conn.execute(&create_keys_table, NO_PARAMS)) @@ -58,7 +58,7 @@ impl Database for SqliteDatabase { } async fn load_persistent_torrents(&self) -> Result, database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -111,7 +111,7 @@ impl Database for SqliteDatabase { } async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", &[info_hash.to_string(), completed.to_string()]) { Ok(updated) => { @@ -126,7 +126,7 @@ impl Database for SqliteDatabase { } async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; let mut rows = stmt.query(&[info_hash])?; @@ -142,7 +142,7 @@ impl Database for SqliteDatabase { } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { Ok(updated) => { @@ -157,7 +157,7 @@ impl Database for SqliteDatabase { } async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { Ok(updated) => { @@ -172,7 +172,7 @@ impl Database for SqliteDatabase { } async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; let mut rows = stmt.query(&[key.to_string()])?; @@ -191,7 +191,7 @@ impl Database for SqliteDatabase { } async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], @@ -208,7 +208,7 @@ impl Database for SqliteDatabase { } async fn remove_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { Ok(updated) => { From 68d3242207d9a5d76114866a9acabe9cf0cae9e9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 20:30:05 +0200 Subject: [PATCH 0108/1003] some code cleanup --- src/api/server.rs | 4 ++ src/tracker/tracker.rs | 110 ++++++++++++++++++----------------------- 2 files changed, 53 insertions(+), 61 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index 77496b497..58c3f0d1a 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -152,7 +152,9 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp udp6_announces_handled: 0, udp6_scrapes_handled: 0, }; + let db = tracker.get_torrents().await; + let _: Vec<_> = db .iter() .map(|(_info_hash, torrent_entry)| { @@ -163,7 +165,9 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp results.torrents += 1; }) .collect(); + let stats = tracker.get_stats().await; + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index bcfae3c37..163bfe446 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -60,33 +60,66 @@ impl TorrentTracker { pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { let auth_key = key::generate_auth_key(seconds_valid); - - // Add key to database - if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } - - // Add key to in-memory database + self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) } - pub async fn remove_auth_key(&self, key: &str) -> Result { + pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { self.database.remove_key_from_keys(&key).await?; - - // Remove key from in-memory database self.keys.write().await.remove(key); - - Ok(1) + Ok(()) } pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { - let keys_lock = self.keys.read().await; + match self.keys.read().await.get(&auth_key.key) { + None => Err(key::Error::KeyInvalid), + Some(key) => key::verify_auth_key(key) + } + } - if let Some(key) = keys_lock.get(&auth_key.key) { - key::verify_auth_key(key) - } else { - Err(key::Error::KeyInvalid) + pub async fn load_keys(&self) -> Result<(), database::Error> { + let keys_from_database = self.database.load_keys().await?; + let mut keys = self.keys.write().await; + + keys.clear(); + + for key in keys_from_database { + let _ = keys.insert(key.key.clone(), key); + } + + Ok(()) + } + + // Adding torrents is not relevant to public trackers. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.add_info_hash_to_whitelist(info_hash.clone()).await?; + self.whitelist.write().await.insert(info_hash.clone()); + Ok(()) + } + + // Removing torrents is not relevant to public trackers. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.remove_info_hash_from_whitelist(info_hash.clone()).await?; + self.whitelist.write().await.remove(info_hash); + Ok(()) + } + + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + pub async fn load_whitelist(&self) -> Result<(), database::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); } + + Ok(()) } pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { @@ -107,7 +140,6 @@ impl TorrentTracker { } } - // todo: speed this up // check if info_hash is whitelisted if self.is_whitelisted() { if !self.is_info_hash_whitelisted(info_hash).await { @@ -118,32 +150,6 @@ impl TorrentTracker { Ok(()) } - pub async fn load_keys(&self) -> Result<(), database::Error> { - let keys_from_database = self.database.load_keys().await?; - let mut keys = self.keys.write().await; - - keys.clear(); - - for key in keys_from_database { - let _ = keys.insert(key.key.clone(), key); - } - - Ok(()) - } - - pub async fn load_whitelist(&self) -> Result<(), database::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist().await?; - let mut whitelist = self.whitelist.write().await; - - whitelist.clear(); - - for info_hash in whitelisted_torrents_from_database { - let _ = whitelist.insert(info_hash); - } - - Ok(()) - } - // Loading the torrents from database into memory pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; @@ -164,24 +170,6 @@ impl TorrentTracker { Ok(()) } - // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { - self.database.add_info_hash_to_whitelist(info_hash.clone()).await - } - - // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result { - self.database.remove_info_hash_from_whitelist(info_hash.clone()).await - } - - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - match self.database.get_info_hash_from_whitelist(&info_hash.to_string()).await { - Ok(_) => true, - Err(_) => false - } - } - - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr, ) -> Vec { let read_lock = self.torrents.read().await; From ce24446cbb1ed49acdbd9104245b1f5b70e027ab Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 21:41:54 +0200 Subject: [PATCH 0109/1003] feat: scrape unauthenticated info_hashes return 0 values instead of error --- src/http/handlers.rs | 11 ++++++----- src/udp/handlers.rs | 24 ++++++++++++++++-------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 7616ca301..0dc737641 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -65,13 +65,14 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { - let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } + if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { + let (seeders, completed, leechers) = torrent_info.get_stats(); + ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } + } else { + ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } + } } None => { ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 4ea767c0b..860a2fe4b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -154,16 +154,22 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra for info_hash in request.info_hashes.iter() { let info_hash = InfoHash(info_hash.0); - if authenticate(&info_hash, tracker.clone()).await.is_err() { continue; } - let scrape_entry = match db.get(&info_hash) { Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - TorrentScrapeStatistics { - seeders: NumberOfPeers(seeders as i32), - completed: NumberOfDownloads(completed as i32), - leechers: NumberOfPeers(leechers as i32), + if authenticate(&info_hash, tracker.clone()).await.is_ok() { + let (seeders, completed, leechers) = torrent_info.get_stats(); + + TorrentScrapeStatistics { + seeders: NumberOfPeers(seeders as i32), + completed: NumberOfDownloads(completed as i32), + leechers: NumberOfPeers(leechers as i32), + } + } else { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + } } } None => { @@ -178,6 +184,8 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra torrent_stats.push(scrape_entry); } + drop(db); + // send stats event match remote_addr { SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; } From 77e173cf2b7e0e075efff5b8619fc1c1ecf0a84c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 21:48:19 +0200 Subject: [PATCH 0110/1003] refactor: removed root from http routes --- src/http/routes.rs | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/src/http/routes.rs b/src/http/routes.rs index a0b197f44..53b2b0ce5 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -14,22 +14,11 @@ use crate::tracker::tracker::TorrentTracker; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { - root(tracker.clone()) - .or(announce(tracker.clone())) - .or(scrape(tracker.clone())) + announce(tracker.clone()) + .or(scrape(tracker)) .recover(send_error) } -/// GET / or / -fn root(tracker: Arc) -> impl Filter + Clone { - warp::any() - .and(warp::filters::method::get()) - .and(with_announce_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_announce) -} - /// GET /announce or /announce/ fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") From b9aca67246df50f6ed183f7eb61652c54dc3a40c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 22:02:54 +0200 Subject: [PATCH 0111/1003] feat: added endpoints to reload whitelist and keys from database --- src/api/server.rs | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/src/api/server.rs b/src/api/server.rs index 58c3f0d1a..19ceac92a 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -300,6 +300,46 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp } }); + // GET /api/whitelist/reload + // Reload whitelist + let t7 = tracker.clone(); + let reload_whitelist = filters::method::get() + .and(filters::path::path("whitelist")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || { + let tracker = t7.clone(); + tracker + }) + .and_then(|tracker: Arc| { + async move { + match tracker.load_whitelist().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to reload whitelist".into() })) + } + } + }); + + // GET /api/keys/reload + // Reload whitelist + let t8 = tracker.clone(); + let reload_keys = filters::method::get() + .and(filters::path::path("keys")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || { + let tracker = t8.clone(); + tracker + }) + .and_then(|tracker: Arc| { + async move { + match tracker.load_keys().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to reload keys".into() })) + } + } + }); + let api_routes = filters::path::path("api") .and(view_torrent_list @@ -309,6 +349,8 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .or(add_torrent) .or(create_key) .or(delete_key) + .or(reload_whitelist) + .or(reload_keys) ); let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); From df87a9d3e89e530bfe90f1a48d0df884d377b430 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 22:04:28 +0200 Subject: [PATCH 0112/1003] updated example config --- README.md | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index bb4649271..beb2591ea 100644 --- a/README.md +++ b/README.md @@ -52,29 +52,26 @@ cargo build --release ```toml log_level = "info" mode = "public" +db_driver = "Sqlite3" db_path = "data.db" -persistence = false -cleanup_interval = 600 -cleanup_peerless = true -external_ip = "0.0.0.0" announce_interval = 120 -announce_interval_min = 900 -peer_timeout = 900 +min_announce_interval = 120 +max_peer_timeout = 900 on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true [[udp_trackers]] enabled = false bind_address = "0.0.0.0:6969" -[[udp_trackers]] -enabled = true -bind_address = "[::]:6969" - [[http_trackers]] enabled = true bind_address = "0.0.0.0:6969" ssl_enabled = false -ssl_bind_address = "0.0.0.0:6868" ssl_cert_path = "" ssl_key_path = "" From 2d97caba4389e09e0a7fe5f9ab73f6804b676c78 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Aug 2022 10:32:46 +0100 Subject: [PATCH 0113/1003] ci: show coverage report --- .github/workflows/test_build_release.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 7be5626e5..d848ed653 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -14,9 +14,12 @@ jobs: with: profile: minimal toolchain: stable + components: llvm-tools-preview - uses: Swatinem/rust-cache@v1 + - uses: taiki-e/install-action@cargo-llvm-cov + - uses: taiki-e/install-action@nextest - name: Run tests - run: cargo test + run: cargo llvm-cov nextest build: needs: test From b548e80e8cb5b1da87a14578d99df5f303cca345 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Aug 2022 17:26:02 +0100 Subject: [PATCH 0114/1003] test: add test for config module --- Cargo.lock | 112 ++++++++++++++++++++++++++++- Cargo.toml | 2 + src/config.rs | 148 +++++++++++++++++++++++++++++++------- src/databases/database.rs | 2 +- src/main.rs | 4 +- 5 files changed, 237 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ee924fac..c52f6767b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -265,7 +265,7 @@ dependencies = [ "ansi_term", "atty", "bitflags", - "strsim", + "strsim 0.8.0", "textwrap", "unicode-width", "vec_map", @@ -352,6 +352,41 @@ dependencies = [ "typenum", ] +[[package]] +name = "darling" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4529658bdda7fd6769b8614be250cdcfc3aeb0ee72fe66f9e41e5e5eb73eac02" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "649c91bc01e8b1eac09fb91e8dbc7d517684ca6be8ebc75bb9cafc894f9fdb6f" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" +dependencies = [ + "darling_core", + "quote", + "syn", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -812,6 +847,12 @@ dependencies = [ "want", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.2.3" @@ -831,6 +872,7 @@ checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", "hashbrown", + "serde 1.0.137", ] [[package]] @@ -1131,7 +1173,7 @@ dependencies = [ "subprocess", "thiserror", "time 0.2.27", - "uuid", + "uuid 0.8.2", ] [[package]] @@ -1254,6 +1296,15 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", +] + [[package]] name = "once_cell" version = "1.10.0" @@ -1826,6 +1877,34 @@ dependencies = [ "serde 1.0.137", ] +[[package]] +name = "serde_with" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89df7a26519371a3cce44fbb914c2819c84d9b897890987fa3ab096491cc0ea8" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap", + "serde 1.0.137", + "serde_json", + "serde_with_macros", + "time 0.3.13", +] + +[[package]] +name = "serde_with_macros" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de337f322382fcdfbb21a014f7c224ee041a23785651db67b9827403178f698f" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sha-1" version = "0.9.8" @@ -1991,6 +2070,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "subprocess" version = "0.2.8" @@ -2096,6 +2181,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "time" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db76ff9fa4b1458b3c7f077f3ff9887394058460d21e634355b273aaf11eea45" +dependencies = [ + "itoa", + "libc", + "num_threads", + "serde 1.0.137", +] + [[package]] name = "time-macros" version = "0.1.1" @@ -2259,9 +2356,11 @@ dependencies = [ "serde 1.0.137", "serde_bencode", "serde_json", + "serde_with", "thiserror", "tokio", "toml", + "uuid 1.1.2", "warp", ] @@ -2421,6 +2520,15 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +[[package]] +name = "uuid" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" +dependencies = [ + "getrandom", +] + [[package]] name = "vcpkg" version = "0.2.15" diff --git a/Cargo.toml b/Cargo.toml index 53e2949db..554ba940d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ tokio = { version = "1.7", features = ["full"] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" serde_json = "1.0.72" +serde_with = "2.0.0" hex = "0.4.3" percent-encoding = "2.1.0" binascii = "0.1" @@ -48,3 +49,4 @@ futures = "0.3.21" async-trait = "0.1.52" aquatic_udp_protocol = "0.2.0" +uuid = { version = "1.1.2", features = ["v4"] } diff --git a/src/config.rs b/src/config.rs index e2cd167f5..8acda3f63 100644 --- a/src/config.rs +++ b/src/config.rs @@ -6,37 +6,39 @@ use std::path::Path; use std::str::FromStr; use config::{Config, ConfigError, File}; -use serde::{Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, NoneAsEmptyString}; use toml; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, pub bind_address: String, } -#[derive(Serialize, Deserialize, Debug)] +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, pub ssl_enabled: bool, - #[serde(serialize_with = "none_as_empty_string")] + #[serde_as(as = "NoneAsEmptyString")] pub ssl_cert_path: Option, - #[serde(serialize_with = "none_as_empty_string")] + #[serde_as(as = "NoneAsEmptyString")] pub ssl_key_path: Option, } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct HttpApiConfig { pub enabled: bool, pub bind_address: String, pub access_tokens: HashMap, } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, @@ -75,18 +77,6 @@ impl std::fmt::Display for ConfigurationError { impl std::error::Error for ConfigurationError {} -pub fn none_as_empty_string(option: &Option, serializer: S) -> Result - where - T: Serialize, - S: Serializer, -{ - if let Some(value) = option { - value.serialize(serializer) - } else { - "".serialize(serializer) - } -} - impl Configuration { pub fn load(data: &[u8]) -> Result { toml::from_slice(data) @@ -161,18 +151,16 @@ impl Configuration { configuration } - pub fn load_from_file() -> Result { + pub fn load_from_file(path: &str) -> Result { let mut config = Config::new(); - const CONFIG_PATH: &str = "config.toml"; - - if Path::new(CONFIG_PATH).exists() { - config.merge(File::with_name(CONFIG_PATH))?; + if Path::new(path).exists() { + config.merge(File::with_name(path))?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); let config = Configuration::default(); - let _ = config.save_to_file(); + let _ = config.save_to_file(path); return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))); } @@ -181,9 +169,115 @@ impl Configuration { Ok(torrust_config) } - pub fn save_to_file(&self) -> Result<(), ()> { + pub fn save_to_file(&self, path: &str) -> Result<(), ()> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); - fs::write("config.toml", toml_string).expect("Could not write to file!"); + fs::write(path, toml_string).expect("Could not write to file!"); Ok(()) } } + +mod configuration { + + #[cfg(test)] + fn default_config_toml() -> String { + let config = r#"log_level = "info" + mode = "public" + db_driver = "Sqlite3" + db_path = "data.db" + announce_interval = 120 + min_announce_interval = 120 + max_peer_timeout = 900 + on_reverse_proxy = false + external_ip = "0.0.0.0" + tracker_usage_statistics = true + persistent_torrent_completed_stat = false + inactive_peer_cleanup_interval = 600 + remove_peerless_torrents = true + + [[udp_trackers]] + enabled = false + bind_address = "0.0.0.0:6969" + + [[http_trackers]] + enabled = false + bind_address = "0.0.0.0:6969" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" + + [http_api] + enabled = true + bind_address = "127.0.0.1:1212" + + [http_api.access_tokens] + admin = "MyAccessToken" + "#.lines().map(|line| line.trim_start()).collect::>().join("\n"); + config + } + + #[test] + fn should_have_a_default_value_for_the_log_level() { + use crate::Configuration; + + let configuration = Configuration::default(); + + assert_eq!(configuration.log_level, Option::from(String::from("info")), "Expected default log level to be: {:?}, got {:?}", Option::from(String::from("info")), configuration.log_level); + } + + #[test] + fn should_be_saved_in_a_toml_config_file() { + use std::env; + use crate::Configuration; + use std::fs; + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::save_to_file + let config_file_path = temp_file.clone(); + let path = config_file_path.to_string_lossy().to_string(); + + let default_configuration = Configuration::default(); + + default_configuration.save_to_file(&path).expect("Could not save configuration to file"); + + let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); + + assert_eq!(contents, default_config_toml()); + } + + #[cfg(test)] + fn create_temp_config_file_with_default_config()-> String { + use std::env; + use std::fs::File; + use std::io::Write; + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::load_from_file + let config_file_path = temp_file.clone(); + let path = config_file_path.to_string_lossy().to_string(); + + // Write file contents + let mut file = File::create(temp_file).unwrap(); + writeln!(&mut file, "{}", default_config_toml()).unwrap(); + + path + } + + #[test] + fn should_be_loaded_from_a_toml_config_file() { + use crate::Configuration; + + let config_file_path = create_temp_config_file_with_default_config(); + + let configuration = Configuration::load_from_file(&config_file_path).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + } +} \ No newline at end of file diff --git a/src/databases/database.rs b/src/databases/database.rs index 721dfd00d..915c5381e 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -7,7 +7,7 @@ use crate::tracker::key::AuthKey; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub enum DatabaseDrivers { Sqlite3, MySQL, diff --git a/src/main.rs b/src/main.rs index 794cda4bb..963419f03 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,8 +7,10 @@ use torrust_tracker::tracker::tracker::TorrentTracker; #[tokio::main] async fn main() { + const CONFIG_PATH: &str = "config.toml"; + // Initialize Torrust config - let config = match Configuration::load_from_file() { + let config = match Configuration::load_from_file(CONFIG_PATH) { Ok(config) => Arc::new(config), Err(error) => { panic!("{}", error) From 1e4cbb8f5af17cf09f07828a84615d930ea1bd87 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 08:21:39 +0100 Subject: [PATCH 0115/1003] test: refactor to follow rust conventions And add missing annotation for tests module. --- src/config.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/config.rs b/src/config.rs index 8acda3f63..c6414de5e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -176,7 +176,8 @@ impl Configuration { } } -mod configuration { +#[cfg(test)] +mod tests { #[cfg(test)] fn default_config_toml() -> String { @@ -216,7 +217,7 @@ mod configuration { } #[test] - fn should_have_a_default_value_for_the_log_level() { + fn configuration_should_have_a_default_value_for_the_log_level() { use crate::Configuration; let configuration = Configuration::default(); @@ -225,7 +226,7 @@ mod configuration { } #[test] - fn should_be_saved_in_a_toml_config_file() { + fn configuration_should_be_saved_in_a_toml_config_file() { use std::env; use crate::Configuration; use std::fs; @@ -271,7 +272,7 @@ mod configuration { } #[test] - fn should_be_loaded_from_a_toml_config_file() { + fn configuration_should_be_loaded_from_a_toml_config_file() { use crate::Configuration; let config_file_path = create_temp_config_file_with_default_config(); From 228585eee884437ac33d65ed089a3def45e1dd80 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 08:24:17 +0100 Subject: [PATCH 0116/1003] test: remove test for config default options --- src/config.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index c6414de5e..3d6405621 100644 --- a/src/config.rs +++ b/src/config.rs @@ -216,15 +216,6 @@ mod tests { config } - #[test] - fn configuration_should_have_a_default_value_for_the_log_level() { - use crate::Configuration; - - let configuration = Configuration::default(); - - assert_eq!(configuration.log_level, Option::from(String::from("info")), "Expected default log level to be: {:?}, got {:?}", Option::from(String::from("info")), configuration.log_level); - } - #[test] fn configuration_should_be_saved_in_a_toml_config_file() { use std::env; From 1a9b7dbfd076a30afd0f5471f8cf5a54c22d1fd0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 08:26:19 +0100 Subject: [PATCH 0117/1003] refactor: remove duplicate impl block --- src/config.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/config.rs b/src/config.rs index 3d6405621..0eaaafc45 100644 --- a/src/config.rs +++ b/src/config.rs @@ -107,9 +107,7 @@ impl Configuration { } } } -} -impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), From 749e8963732ee6adf53622df49d3be7b8f11b36f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 09:53:13 +0100 Subject: [PATCH 0118/1003] refactor: remove unused code --- src/config.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/config.rs b/src/config.rs index 0eaaafc45..8e79c45ef 100644 --- a/src/config.rs +++ b/src/config.rs @@ -78,23 +78,6 @@ impl std::fmt::Display for ConfigurationError { impl std::error::Error for ConfigurationError {} impl Configuration { - pub fn load(data: &[u8]) -> Result { - toml::from_slice(data) - } - - pub fn load_file(path: &str) -> Result { - match std::fs::read(path) { - Err(e) => Err(ConfigurationError::IOError(e)), - Ok(data) => { - match Self::load(data.as_slice()) { - Ok(cfg) => { - Ok(cfg) - } - Err(e) => Err(ConfigurationError::ParseError(e)), - } - } - } - } pub fn get_ext_ip(&self) -> Option { match &self.external_ip { From d7dfe0252dd4a3a021ec4c7b390f512f1fb8ce67 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 10:01:10 +0100 Subject: [PATCH 0119/1003] test: add more tests to configuration --- src/config.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/config.rs b/src/config.rs index 8e79c45ef..005705f78 100644 --- a/src/config.rs +++ b/src/config.rs @@ -197,6 +197,26 @@ mod tests { config } + #[test] + fn configuration_should_have_default_values() { + use crate::Configuration; + + let configuration = Configuration::default(); + + let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); + + assert_eq!(toml, default_config_toml()); + } + + #[test] + fn configuration_should_contain_the_external_ip() { + use crate::Configuration; + + let configuration = Configuration::default(); + + assert_eq!(configuration.external_ip, Option::Some(String::from("0.0.0.0"))); + } + #[test] fn configuration_should_be_saved_in_a_toml_config_file() { use std::env; @@ -253,4 +273,13 @@ mod tests { assert_eq!(configuration, Configuration::default()); } + + #[test] + fn configuration_error_could_be_displayed() { + use crate::ConfigurationError; + + let error = ConfigurationError::TrackerModeIncompatible; + + assert_eq!(format!("{}", error), "TrackerModeIncompatible"); + } } \ No newline at end of file From f9880472179ce2f7c1733dc7ac1e0fd723e5c291 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 11 Aug 2022 19:24:03 +0200 Subject: [PATCH 0120/1003] chore(benchmark): added a basic script to benchmark http(s) announce performance and memory usage --- tests/README.md | 9 ++++++ tests/wrk_benchmark_announce.lua | 53 ++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 tests/README.md create mode 100644 tests/wrk_benchmark_announce.lua diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..04860056c --- /dev/null +++ b/tests/README.md @@ -0,0 +1,9 @@ +### Running Benchmarks + +#### HTTP(S) Announce Peer + Torrent +For this benchmark we use the tool [wrk](https://github.com/wg/wrk). + +To run the benchmark using wrk, execute the following example script (change the url to your own tracker url): + + wrk -c200 -t1 -d10s -s ./wrk_benchmark_announce.lua --latency http://tracker.dutchbits.nl + diff --git a/tests/wrk_benchmark_announce.lua b/tests/wrk_benchmark_announce.lua new file mode 100644 index 000000000..c182f8e68 --- /dev/null +++ b/tests/wrk_benchmark_announce.lua @@ -0,0 +1,53 @@ +-- else the randomness would be the same every run +math.randomseed(os.time()) + +local charset = "0123456789ABCDEF" + +function hexToChar(hex) + local n = tonumber(hex, 16) + local f = string.char(n) + return f +end + +function hexStringToCharString(hex) + local ret = {} + local r + for i = 0, 19 do + local x = i * 2 + r = hex:sub(x+1, x+2) + local f = hexToChar(r) + table.insert(ret, f) + end + return table.concat(ret) +end + +function urlEncode(str) + str = string.gsub (str, "([^0-9a-zA-Z !'()*._~-])", -- locale independent + function (c) return string.format ("%%%02X", string.byte(c)) end) + str = string.gsub (str, " ", "+") + return str +end + +function genHexString(length) + local ret = {} + local r + for i = 1, length do + r = math.random(1, #charset) + table.insert(ret, charset:sub(r, r)) + end + return table.concat(ret) +end + +function randomInfoHash() + local hexString = genHexString(40) + local str = hexStringToCharString(hexString) + return urlEncode(str) +end + +-- the request function that will run at each request +request = function() + path = "/announce?info_hash=" .. randomInfoHash() .. "&peer_id=-lt0D80-a%D4%10%19%99%A6yh%9A%E1%CD%96&port=54434&uploaded=885&downloaded=0&left=0&corrupt=0&key=A78381BD&numwant=200&compact=1&no_peer_id=1&supportcrypto=1&redundant=0" + headers = {} + headers["X-Forwarded-For"] = "1.1.1.1" + return wrk.format("GET", path, headers) +end From faf7a99b97b8ef2a2eaf8ea2a83fd985591c6905 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sun, 14 Aug 2022 16:11:56 +0200 Subject: [PATCH 0121/1003] chore: updated r2d2_sqlite and included other dependencies to support cross compilation --- Cargo.lock | 144 ++++++++++++++-------------------------- Cargo.toml | 6 +- src/databases/sqlite.rs | 23 +++---- 3 files changed, 63 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c52f6767b..279e4a67d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -743,6 +743,24 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashlink" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d452c155cb93fecdfb02a73dd57b5d8e442c2063bd7aac72f1bc5e4263a43086" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "headers" version = "0.3.7" @@ -871,7 +889,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.11.2", "serde 1.0.137", ] @@ -964,10 +982,11 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.18.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e704a02bcaecd4a08b93a23f6be59d0bd79cd161e0963e9499165a0a35df7bd" +checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" dependencies = [ + "cc", "pkg-config", "vcpkg", ] @@ -1014,16 +1033,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" dependencies = [ - "hashbrown", -] - -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", + "hashbrown 0.11.2", ] [[package]] @@ -1319,9 +1329,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.40" +version = "0.10.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" +checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" dependencies = [ "bitflags", "cfg-if", @@ -1349,15 +1359,25 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-src" +version = "111.22.0+1.1.1q" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" -version = "0.9.73" +version = "0.9.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0" +checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" dependencies = [ "autocfg", "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] @@ -1370,17 +1390,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", -] - -[[package]] -name = "parking_lot" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" -dependencies = [ - "lock_api", - "parking_lot_core 0.9.3", + "parking_lot_core", ] [[package]] @@ -1397,19 +1407,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "parking_lot_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-sys", -] - [[package]] name = "peeking_take_while" version = "0.1.2" @@ -1514,7 +1511,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" dependencies = [ "log", - "parking_lot 0.11.2", + "parking_lot", "scheduled-thread-pool", ] @@ -1530,9 +1527,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.16.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed60ebe88b27ac28c0563bc0fbeaecd302ff53e3a01e5ddc2ec9f4e6c707d929" +checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" dependencies = [ "r2d2", "rusqlite", @@ -1626,18 +1623,16 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.23.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d0fd62e1df63d254714e6cb40d0a0e82e7a1623e7a27f679d851af092ae58b" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", + "hashlink", "libsqlite3-sys", - "lru-cache", - "memchr", "smallvec", - "time 0.1.44", ] [[package]] @@ -1728,7 +1723,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" dependencies = [ - "parking_lot 0.11.2", + "parking_lot", ] [[package]] @@ -2243,7 +2238,6 @@ dependencies = [ "mio", "num_cpus", "once_cell", - "parking_lot 0.12.0", "pin-project-lite", "signal-hook-registry", "socket2", @@ -2253,9 +2247,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -2348,6 +2342,7 @@ dependencies = [ "futures", "hex", "log", + "openssl", "percent-encoding", "r2d2", "r2d2_mysql", @@ -2714,49 +2709,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - [[package]] name = "wyz" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 554ba940d..9d21ed7d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ lto = "fat" strip = true [dependencies] -tokio = { version = "1.7", features = ["full"] } +tokio = { version = "1.7", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" @@ -29,6 +29,8 @@ hex = "0.4.3" percent-encoding = "2.1.0" binascii = "0.1" +openssl = { version = "0.10.41", features = ["vendored"] } + warp = { version = "0.3", features = ["tls"] } config = "0.11" @@ -40,7 +42,7 @@ chrono = "0.4" r2d2 = "0.8.8" r2d2_mysql = "21.0.0" -r2d2_sqlite = "0.16.0" +r2d2_sqlite = { version = "0.21.0", features = ["bundled"] } rand = "0.8.4" derive_more = "0.99" diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index e1659d897..143029ec2 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -4,7 +4,6 @@ use async_trait::async_trait; use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use r2d2_sqlite::rusqlite::NO_PARAMS; use crate::{InfoHash}; use crate::databases::database::{Database, Error}; @@ -50,9 +49,9 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - conn.execute(&create_whitelist_table, NO_PARAMS) - .and_then(|_| conn.execute(&create_keys_table, NO_PARAMS)) - .and_then(|_| conn.execute(&create_torrents_table, NO_PARAMS)) + conn.execute(&create_whitelist_table, []) + .and_then(|_| conn.execute(&create_keys_table, [])) + .and_then(|_| conn.execute(&create_torrents_table, [])) .map_err(|_| database::Error::InvalidQuery) .map(|_| ()) } @@ -62,7 +61,7 @@ impl Database for SqliteDatabase { let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; - let torrent_iter = stmt.query_map(NO_PARAMS, |row| { + let torrent_iter = stmt.query_map([], |row| { let info_hash_string: String = row.get(0)?; let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); let completed: u32 = row.get(1)?; @@ -79,7 +78,7 @@ impl Database for SqliteDatabase { let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; - let keys_iter = stmt.query_map(NO_PARAMS, |row| { + let keys_iter = stmt.query_map([], |row| { let key = row.get(0)?; let valid_until: i64 = row.get(1)?; @@ -99,7 +98,7 @@ impl Database for SqliteDatabase { let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; - let info_hash_iter = stmt.query_map(NO_PARAMS, |row| { + let info_hash_iter = stmt.query_map([], |row| { let info_hash: String = row.get(0)?; Ok(InfoHash::from_str(&info_hash).unwrap()) @@ -113,7 +112,7 @@ impl Database for SqliteDatabase { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", &[info_hash.to_string(), completed.to_string()]) { + match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", [info_hash.to_string(), completed.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(()); } Err(database::Error::QueryReturnedNoRows) @@ -144,7 +143,7 @@ impl Database for SqliteDatabase { async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { + match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) @@ -159,7 +158,7 @@ impl Database for SqliteDatabase { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { + match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) @@ -175,7 +174,7 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; - let mut rows = stmt.query(&[key.to_string()])?; + let mut rows = stmt.query([key.to_string()])?; if let Some(row) = rows.next()? { let key: String = row.get(0).unwrap(); @@ -194,7 +193,7 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], + [auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], ) { Ok(updated) => { if updated > 0 { return Ok(updated); } From 498c32b99c4fc1592d7e94ad46864142ce7aaaf1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 18:20:50 +0200 Subject: [PATCH 0122/1003] formatting: add custom rustfmt.toml and update .gitignore --- .gitignore | 1 + rustfmt.toml | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 rustfmt.toml diff --git a/.gitignore b/.gitignore index 99a07430b..e2956b2d6 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ /.idea/ /config.toml /data.db +/.vscode/launch.json diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..3e878b271 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,4 @@ +max_width = 130 +imports_granularity = "Module" +group_imports = "StdExternalCrate" + From 57bf2000e39dccfc2f8b6e41d6c6f3eac38a3886 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 18:22:57 +0200 Subject: [PATCH 0123/1003] formatting: format the world! --- src/api/server.rs | 307 ++++++++++++++++++------------------ src/config.rs | 79 +++++----- src/databases/database.rs | 6 +- src/databases/mod.rs | 2 +- src/databases/mysql.rs | 134 +++++++++------- src/databases/sqlite.rs | 49 ++++-- src/http/filters.rs | 59 ++++--- src/http/handlers.rs | 117 ++++++++++---- src/http/mod.rs | 8 +- src/http/request.rs | 2 +- src/http/response.rs | 1 + src/http/routes.rs | 20 +-- src/http/server.rs | 24 ++- src/jobs/http_tracker.rs | 8 +- src/jobs/mod.rs | 2 +- src/jobs/torrent_cleanup.rs | 4 +- src/jobs/tracker_api.rs | 10 +- src/jobs/udp_tracker.rs | 4 +- src/lib.rs | 16 +- src/logging.rs | 22 ++- src/main.rs | 5 +- src/protocol/common.rs | 5 +- src/protocol/utils.rs | 4 +- src/setup.rs | 26 ++- src/tracker/key.rs | 31 ++-- src/tracker/mod.rs | 8 +- src/tracker/mode.rs | 2 +- src/tracker/peer.rs | 24 ++- src/tracker/statistics.rs | 4 +- src/tracker/torrent.rs | 17 +- src/tracker/tracker.rs | 51 +++--- src/udp/handlers.rs | 168 ++++++++++++-------- src/udp/mod.rs | 2 +- src/udp/server.rs | 4 +- 34 files changed, 682 insertions(+), 543 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index 19ceac92a..cc6c905e4 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -4,10 +4,10 @@ use std::net::SocketAddr; use std::sync::Arc; use serde::{Deserialize, Serialize}; -use warp::{Filter, filters, reply, serve}; +use warp::{filters, reply, serve, Filter}; -use crate::protocol::common::*; use crate::peer::TorrentPeer; +use crate::protocol::common::*; use crate::tracker::tracker::TorrentTracker; #[derive(Deserialize, Debug)] @@ -55,7 +55,7 @@ enum ActionStatus<'a> { impl warp::reject::Reject for ActionStatus<'static> {} -fn authenticate(tokens: HashMap) -> impl Filter + Clone { +fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] struct AuthToken { token: Option, @@ -67,18 +67,20 @@ fn authenticate(tokens: HashMap) -> impl Filter()) - .and_then(|tokens: Arc>, token: AuthToken| { - async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { reason: "token not valid".into() })); - } - - Ok(()) + .and_then(|tokens: Arc>, token: AuthToken| async move { + match token.token { + Some(token) => { + if !tokens.contains(&token) { + return Err(warp::reject::custom(ActionStatus::Err { + reason: "token not valid".into(), + })); } - None => Err(warp::reject::custom(ActionStatus::Err { reason: "unauthorized".into() })) + + Ok(()) } + None => Err(warp::reject::custom(ActionStatus::Err { + reason: "unauthorized".into(), + })), } }) .untuple_one() @@ -96,30 +98,28 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = api_torrents.clone(); (limits, tracker) }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| { - async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - let db = tracker.get_torrents().await; - let results: Vec<_> = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - Torrent { - info_hash, - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - } + .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { + let offset = limits.offset.unwrap_or(0); + let limit = min(limits.limit.unwrap_or(1000), 4000); + + let db = tracker.get_torrents().await; + let results: Vec<_> = db + .iter() + .map(|(info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + Torrent { + info_hash, + seeders, + completed, + leechers, + peers: None, + } + }) + .skip(offset as usize) + .take(limit as usize) + .collect(); + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) }); // GET /api/stats @@ -132,57 +132,55 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = api_stats.clone(); tracker }) - .and_then(|tracker: Arc| { - async move { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - let _: Vec<_> = db - .iter() - .map(|(_info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }) - .collect(); - - let stats = tracker.get_stats().await; - - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - } + .and_then(|tracker: Arc| async move { + let mut results = Stats { + torrents: 0, + seeders: 0, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }; + + let db = tracker.get_torrents().await; + + let _: Vec<_> = db + .iter() + .map(|(_info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }) + .collect(); + + let stats = tracker.get_stats().await; + + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) }); // GET /api/torrent/:info_hash @@ -196,28 +194,26 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t2.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + let db = tracker.get_torrents().await; + let torrent_entry_option = db.get(&info_hash); - if torrent_entry_option.is_none() { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")) - } + if torrent_entry_option.is_none() { + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); + } - let torrent_entry = torrent_entry_option.unwrap(); - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let torrent_entry = torrent_entry_option.unwrap(); + let (seeders, completed, leechers) = torrent_entry.get_stats(); - let peers = torrent_entry.get_peers(None); + let peers = torrent_entry.get_peers(None); - Ok(reply::json(&Torrent { - info_hash: &info_hash, - seeders, - completed, - leechers, - peers: Some(peers), - })) - } + Ok(reply::json(&Torrent { + info_hash: &info_hash, + seeders, + completed, + leechers, + peers: Some(peers), + })) }); // DELETE /api/whitelist/:info_hash @@ -231,12 +227,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t3.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to remove torrent from whitelist".into() })) - } + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to remove torrent from whitelist".into(), + })), } }); @@ -251,12 +247,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t4.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to whitelist torrent".into() })) - } + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to whitelist torrent".into(), + })), } }); @@ -271,12 +267,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t5.clone(); (seconds_valid, tracker) }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| { - async move { - match tracker.generate_auth_key(seconds_valid).await { - Ok(auth_key) => Ok(warp::reply::json(&auth_key)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into() })) - } + .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { + match tracker.generate_auth_key(seconds_valid).await { + Ok(auth_key) => Ok(warp::reply::json(&auth_key)), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to generate key".into(), + })), } }); @@ -291,12 +287,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t6.clone(); (key, tracker) }) - .and_then(|(key, tracker): (String, Arc)| { - async move { - match tracker.remove_auth_key(&key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to delete key".into() })) - } + .and_then(|(key, tracker): (String, Arc)| async move { + match tracker.remove_auth_key(&key).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to delete key".into(), + })), } }); @@ -311,12 +307,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t7.clone(); tracker }) - .and_then(|tracker: Arc| { - async move { - match tracker.load_whitelist().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to reload whitelist".into() })) - } + .and_then(|tracker: Arc| async move { + match tracker.load_whitelist().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload whitelist".into(), + })), } }); @@ -331,34 +327,31 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t8.clone(); tracker }) - .and_then(|tracker: Arc| { - async move { - match tracker.load_keys().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to reload keys".into() })) - } + .and_then(|tracker: Arc| async move { + match tracker.load_keys().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload keys".into(), + })), } }); - let api_routes = - filters::path::path("api") - .and(view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(view_stats_list) - .or(add_torrent) - .or(create_key) - .or(delete_key) - .or(reload_whitelist) - .or(reload_keys) - ); + let api_routes = filters::path::path("api").and( + view_torrent_list + .or(delete_torrent) + .or(view_torrent_info) + .or(view_stats_list) + .or(add_torrent) + .or(create_key) + .or(delete_key) + .or(reload_whitelist) + .or(reload_keys), + ); let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("Failed to listen to shutdown signal."); + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); api_server diff --git a/src/config.rs b/src/config.rs index 005705f78..c094eb2f9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,4 +1,3 @@ -use std; use std::collections::HashMap; use std::fs; use std::net::IpAddr; @@ -8,7 +7,7 @@ use std::str::FromStr; use config::{Config, ConfigError, File}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; -use toml; +use {std, toml}; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; @@ -70,7 +69,7 @@ impl std::fmt::Display for ConfigurationError { match self { ConfigurationError::IOError(e) => e.fmt(f), ConfigurationError::ParseError(e) => e.fmt(f), - _ => write!(f, "{:?}", self) + _ => write!(f, "{:?}", self), } } } @@ -78,16 +77,13 @@ impl std::fmt::Display for ConfigurationError { impl std::error::Error for ConfigurationError {} impl Configuration { - pub fn get_ext_ip(&self) -> Option { match &self.external_ip { None => None, - Some(external_ip) => { - match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None - } - } + Some(external_ip) => match IpAddr::from_str(external_ip) { + Ok(external_ip) => Some(external_ip), + Err(_) => None, + }, } } @@ -111,24 +107,23 @@ impl Configuration { http_api: HttpApiConfig { enabled: true, bind_address: String::from("127.0.0.1:1212"), - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), + access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] + .iter() + .cloned() + .collect(), }, }; - configuration.udp_trackers.push( - UdpTrackerConfig { - enabled: false, - bind_address: String::from("0.0.0.0:6969"), - } - ); - configuration.http_trackers.push( - HttpTrackerConfig { - enabled: false, - bind_address: String::from("0.0.0.0:6969"), - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, - } - ); + configuration.udp_trackers.push(UdpTrackerConfig { + enabled: false, + bind_address: String::from("0.0.0.0:6969"), + }); + configuration.http_trackers.push(HttpTrackerConfig { + enabled: false, + bind_address: String::from("0.0.0.0:6969"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, + }); configuration } @@ -142,10 +137,14 @@ impl Configuration { eprintln!("Creating config file.."); let config = Configuration::default(); let _ = config.save_to_file(path); - return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))); + return Err(ConfigError::Message(format!( + "Please edit the config.TOML in the root folder and restart the tracker." + ))); } - let torrust_config: Configuration = config.try_into().map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; + let torrust_config: Configuration = config + .try_into() + .map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; Ok(torrust_config) } @@ -193,7 +192,11 @@ mod tests { [http_api.access_tokens] admin = "MyAccessToken" - "#.lines().map(|line| line.trim_start()).collect::>().join("\n"); + "# + .lines() + .map(|line| line.trim_start()) + .collect::>() + .join("\n"); config } @@ -219,11 +222,12 @@ mod tests { #[test] fn configuration_should_be_saved_in_a_toml_config_file() { - use std::env; - use crate::Configuration; - use std::fs; + use std::{env, fs}; + use uuid::Uuid; + use crate::Configuration; + // Build temp config file path let temp_directory = env::temp_dir(); let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); @@ -234,7 +238,9 @@ mod tests { let default_configuration = Configuration::default(); - default_configuration.save_to_file(&path).expect("Could not save configuration to file"); + default_configuration + .save_to_file(&path) + .expect("Could not save configuration to file"); let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); @@ -242,16 +248,17 @@ mod tests { } #[cfg(test)] - fn create_temp_config_file_with_default_config()-> String { + fn create_temp_config_file_with_default_config() -> String { use std::env; use std::fs::File; use std::io::Write; + use uuid::Uuid; // Build temp config file path let temp_directory = env::temp_dir(); let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); - + // Convert to argument type for Configuration::load_from_file let config_file_path = temp_file.clone(); let path = config_file_path.to_string_lossy().to_string(); @@ -282,4 +289,4 @@ mod tests { assert_eq!(format!("{}", error), "TrackerModeIncompatible"); } -} \ No newline at end of file +} diff --git a/src/databases/database.rs b/src/databases/database.rs index 915c5381e..adc735fd2 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -2,10 +2,10 @@ use async_trait::async_trait; use derive_more::{Display, Error}; use serde::{Deserialize, Serialize}; -use crate::InfoHash; -use crate::tracker::key::AuthKey; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; +use crate::tracker::key::AuthKey; +use crate::InfoHash; #[derive(Serialize, Deserialize, PartialEq, Debug)] pub enum DatabaseDrivers { @@ -70,7 +70,7 @@ impl From for Error { fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { match e { r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, - _ => Error::InvalidQuery + _ => Error::InvalidQuery, } } } diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 119e34816..169d99f4d 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -1,3 +1,3 @@ +pub mod database; pub mod mysql; pub mod sqlite; -pub mod database; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 5b6e34eb1..882fb7bf4 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -1,16 +1,16 @@ use std::str::FromStr; use async_trait::async_trait; -use log::{debug}; +use log::debug; use r2d2::Pool; -use r2d2_mysql::mysql::{Opts, OptsBuilder, params}; use r2d2_mysql::mysql::prelude::Queryable; +use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; -use crate::{AUTH_KEY_LENGTH, InfoHash}; -use crate::databases::database::{Database, Error}; use crate::databases::database; +use crate::databases::database::{Database, Error}; use crate::tracker::key::AuthKey; +use crate::{InfoHash, AUTH_KEY_LENGTH}; pub struct MysqlDatabase { pool: Pool, @@ -21,11 +21,11 @@ impl MysqlDatabase { let opts = Opts::from_url(&db_path).expect("Failed to connect to MySQL database."); let builder = OptsBuilder::from_opts(opts); let manager = MysqlConnectionManager::new(builder); - let pool = r2d2::Pool::builder().build(manager).expect("Failed to create r2d2 MySQL connection pool."); + let pool = r2d2::Pool::builder() + .build(manager) + .expect("Failed to create r2d2 MySQL connection pool."); - Ok(Self { - pool - }) + Ok(Self { pool }) } } @@ -36,29 +36,36 @@ impl Database for MysqlDatabase { CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, info_hash VARCHAR(40) NOT NULL UNIQUE - );".to_string(); + );" + .to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, info_hash VARCHAR(40) NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL - );".to_string(); + );" + .to_string(); - let create_keys_table = format!(" + let create_keys_table = format!( + " CREATE TABLE IF NOT EXISTS `keys` ( `id` INT NOT NULL AUTO_INCREMENT, `key` VARCHAR({}) NOT NULL, `valid_until` INT(10) NOT NULL, PRIMARY KEY (`id`), UNIQUE (`key`) - );", AUTH_KEY_LENGTH as i8); + );", + AUTH_KEY_LENGTH as i8 + ); let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - conn.query_drop(&create_torrents_table).expect("Could not create torrents table."); + conn.query_drop(&create_torrents_table) + .expect("Could not create torrents table."); conn.query_drop(&create_keys_table).expect("Could not create keys table."); - conn.query_drop(&create_whitelist_table).expect("Could not create whitelist table."); + conn.query_drop(&create_whitelist_table) + .expect("Could not create whitelist table."); Ok(()) } @@ -66,10 +73,15 @@ impl Database for MysqlDatabase { async fn load_persistent_torrents(&self) -> Result, database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { - let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); - (info_hash, completed) - }).map_err(|_| database::Error::QueryReturnedNoRows)?; + let torrents: Vec<(InfoHash, u32)> = conn + .query_map( + "SELECT info_hash, completed FROM torrents", + |(info_hash_string, completed): (String, u32)| { + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + (info_hash, completed) + }, + ) + .map_err(|_| database::Error::QueryReturnedNoRows)?; Ok(torrents) } @@ -77,12 +89,15 @@ impl Database for MysqlDatabase { async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let keys: Vec = conn.query_map("SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| { - AuthKey { - key, - valid_until: Some(valid_until as u64) - } - }).map_err(|_| database::Error::QueryReturnedNoRows)?; + let keys: Vec = conn + .query_map( + "SELECT `key`, valid_until FROM `keys`", + |(key, valid_until): (String, i64)| AuthKey { + key, + valid_until: Some(valid_until as u64), + }, + ) + .map_err(|_| database::Error::QueryReturnedNoRows)?; Ok(keys) } @@ -90,9 +105,11 @@ impl Database for MysqlDatabase { async fn load_whitelist(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let info_hashes: Vec = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { - InfoHash::from_str(&info_hash).unwrap() - }).map_err(|_| database::Error::QueryReturnedNoRows)?; + let info_hashes: Vec = conn + .query_map("SELECT info_hash FROM whitelist", |info_hash: String| { + InfoHash::from_str(&info_hash).unwrap() + }) + .map_err(|_| database::Error::QueryReturnedNoRows)?; Ok(info_hashes) } @@ -118,14 +135,15 @@ impl Database for MysqlDatabase { async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) - .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some(info_hash) => { - Ok(InfoHash::from_str(&info_hash).unwrap()) - } - None => { - Err(database::Error::InvalidQuery) - } + match conn + .exec_first::( + "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", + params! { info_hash }, + ) + .map_err(|_| database::Error::QueryReturnedNoRows)? + { + Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), + None => Err(database::Error::InvalidQuery), } } @@ -134,10 +152,11 @@ impl Database for MysqlDatabase { let info_hash_str = info_hash.to_string(); - match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { - Ok(_) => { - Ok(1) - } + match conn.exec_drop( + "INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", + params! { info_hash_str }, + ) { + Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -151,9 +170,7 @@ impl Database for MysqlDatabase { let info_hash = info_hash.to_string(); match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { - Ok(_) => { - Ok(1) - } + Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -164,17 +181,15 @@ impl Database for MysqlDatabase { async fn get_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) - .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some((key, valid_until)) => { - Ok(AuthKey { - key, - valid_until: Some(valid_until as u64), - }) - } - None => { - Err(database::Error::InvalidQuery) - } + match conn + .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) + .map_err(|_| database::Error::QueryReturnedNoRows)? + { + Some((key, valid_until)) => Ok(AuthKey { + key, + valid_until: Some(valid_until as u64), + }), + None => Err(database::Error::InvalidQuery), } } @@ -184,10 +199,11 @@ impl Database for MysqlDatabase { let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(0).to_string(); - match conn.exec_drop("INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", params! { key, valid_until }) { - Ok(_) => { - Ok(1) - } + match conn.exec_drop( + "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", + params! { key, valid_until }, + ) { + Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -199,9 +215,7 @@ impl Database for MysqlDatabase { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { - Ok(_) => { - Ok(1) - } + Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 143029ec2..3aba39919 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -5,10 +5,10 @@ use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use crate::{InfoHash}; -use crate::databases::database::{Database, Error}; use crate::databases::database; +use crate::databases::database::{Database, Error}; use crate::tracker::key::AuthKey; +use crate::InfoHash; pub struct SqliteDatabase { pool: Pool, @@ -18,9 +18,7 @@ impl SqliteDatabase { pub fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); - Ok(SqliteDatabase { - pool - }) + Ok(SqliteDatabase { pool }) } } @@ -31,21 +29,24 @@ impl Database for SqliteDatabase { CREATE TABLE IF NOT EXISTS whitelist ( id INTEGER PRIMARY KEY AUTOINCREMENT, info_hash TEXT NOT NULL UNIQUE - );".to_string(); + );" + .to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( id INTEGER PRIMARY KEY AUTOINCREMENT, info_hash TEXT NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL - );".to_string(); + );" + .to_string(); let create_keys_table = " CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY AUTOINCREMENT, key TEXT NOT NULL UNIQUE, valid_until INTEGER NOT NULL - );".to_string(); + );" + .to_string(); let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; @@ -84,7 +85,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(valid_until as u64) + valid_until: Some(valid_until as u64), }) })?; @@ -112,9 +113,14 @@ impl Database for SqliteDatabase { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", [info_hash.to_string(), completed.to_string()]) { + match conn.execute( + "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", + [info_hash.to_string(), completed.to_string()], + ) { Ok(updated) => { - if updated > 0 { return Ok(()); } + if updated > 0 { + return Ok(()); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { @@ -145,7 +151,9 @@ impl Database for SqliteDatabase { match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { Ok(updated) => { - if updated > 0 { return Ok(updated); } + if updated > 0 { + return Ok(updated); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { @@ -160,7 +168,9 @@ impl Database for SqliteDatabase { match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { Ok(updated) => { - if updated > 0 { return Ok(updated); } + if updated > 0 { + return Ok(updated); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { @@ -192,11 +202,14 @@ impl Database for SqliteDatabase { async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], + match conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + [auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], ) { Ok(updated) => { - if updated > 0 { return Ok(updated); } + if updated > 0 { + return Ok(updated); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { @@ -211,7 +224,9 @@ impl Database for SqliteDatabase { match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { Ok(updated) => { - if updated > 0 { return Ok(updated); } + if updated > 0 { + return Ok(updated); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { diff --git a/src/http/filters.rs b/src/http/filters.rs index a288f8d97..514cb804c 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -3,44 +3,37 @@ use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; -use warp::{Filter, reject, Rejection}; +use warp::{reject, Filter, Rejection}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; -use crate::tracker::key::AuthKey; use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; +use crate::tracker::key::AuthKey; use crate::tracker::tracker::TorrentTracker; +use crate::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; /// Pass Arc along -pub fn with_tracker(tracker: Arc) -> impl Filter, ), Error=Infallible> + Clone { - warp::any() - .map(move || tracker.clone()) +pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { + warp::any().map(move || tracker.clone()) } /// Check for infoHash -pub fn with_info_hash() -> impl Filter, ), Error=Rejection> + Clone { - warp::filters::query::raw() - .and_then(info_hashes) +pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { + warp::filters::query::raw().and_then(info_hashes) } /// Check for PeerId -pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw() - .and_then(peer_id) +pub fn with_peer_id() -> impl Filter + Clone { + warp::filters::query::raw().and_then(peer_id) } /// Pass Arc along -pub fn with_auth_key() -> impl Filter, ), Error=Infallible> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| { - AuthKey::from_string(&key) - }) - .or_else(|_| async { - Ok::<(Option, ), Infallible>((None, )) - }) + .map(|key: String| AuthKey::from_string(&key)) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for PeerAddress -pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { +pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) .map(move |remote_addr: Option, x_forwarded_for: Option| { @@ -50,7 +43,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) @@ -59,7 +52,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) @@ -129,7 +122,9 @@ async fn peer_id(raw_query: String) -> WebResult { } /// Get PeerAddress from RemoteAddress or Forwarded -async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { +async fn peer_addr( + (on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option), +) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)); } @@ -148,16 +143,19 @@ async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Opti // set client ip to last forwarded ip let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - IpAddr::from_str(x_forwarded_ip).or_else(|_| { - Err(reject::custom(ServerError::AddressNotFound)) - }) + IpAddr::from_str(x_forwarded_ip).or_else(|_| Err(reject::custom(ServerError::AddressNotFound))) } - false => Ok(remote_addr.unwrap().ip()) + false => Ok(remote_addr.unwrap().ip()), } } /// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, peer_id: PeerId, peer_addr: IpAddr) -> WebResult { +async fn announce_request( + announce_request_query: AnnounceRequestQuery, + info_hashes: Vec, + peer_id: PeerId, + peer_addr: IpAddr, +) -> WebResult { Ok(AnnounceRequest { info_hash: info_hashes[0], peer_addr, @@ -173,8 +171,5 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has /// Parse ScrapeRequest from InfoHash async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(ScrapeRequest { - info_hashes, - peer_addr, - }) + Ok(ScrapeRequest { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 0dc737641..5214bbe6e 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -4,19 +4,26 @@ use std::net::IpAddr; use std::sync::Arc; use log::debug; -use warp::{reject, Rejection, Reply}; use warp::http::Response; +use warp::{reject, Rejection, Reply}; -use crate::{InfoHash}; -use crate::tracker::key::AuthKey; -use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::http::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; +use crate::http::{ + AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, + WebResult, +}; use crate::peer::TorrentPeer; +use crate::tracker::key::AuthKey; use crate::tracker::statistics::TrackerStatisticsEvent; +use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::tracker::tracker::TorrentTracker; +use crate::InfoHash; /// Authenticate InfoHash using optional AuthKey -pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { +pub async fn authenticate( + info_hash: &InfoHash, + auth_key: &Option, + tracker: Arc, +) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, auth_key).await { Ok(_) => Ok(()), Err(e) => { @@ -35,15 +42,22 @@ pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, trac } /// Handle announce request -pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc) -> WebResult { +pub async fn handle_announce( + announce_request: AnnounceRequest, + auth_key: Option, + tracker: Arc, +) -> WebResult { if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { return Err(reject::custom(e)); } debug!("{:?}", announce_request); - let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; + let peer = + TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + let torrent_stats = tracker + .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) + .await; // get all torrent peers excluding the peer_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; @@ -52,15 +66,29 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option // send stats event match announce_request.peer_addr { - IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; } - IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; } + IpAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; + } } - send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.min_announce_interval) + send_announce_response( + &announce_request, + torrent_stats, + peers, + announce_interval, + tracker.config.min_announce_interval, + ) } /// Handle scrape request -pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc) -> WebResult { +pub async fn handle_scrape( + scrape_request: ScrapeRequest, + auth_key: Option, + tracker: Arc, +) -> WebResult { let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; @@ -69,14 +97,24 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } + ScrapeResponseEntry { + complete: seeders, + downloaded: completed, + incomplete: leechers, + } } else { - ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } + ScrapeResponseEntry { + complete: 0, + downloaded: 0, + incomplete: 0, + } } } - None => { - ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } - } + None => ScrapeResponseEntry { + complete: 0, + downloaded: 0, + incomplete: 0, + }, }; files.insert(info_hash.clone(), scrape_entry); @@ -84,20 +122,33 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; } - IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; } + IpAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; + } } send_scrape_response(files) } /// Send announce response -fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32, interval_min: u32) -> WebResult { - let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: peer.peer_id.to_string(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port(), - }).collect(); +fn send_announce_response( + announce_request: &AnnounceRequest, + torrent_stats: TorrentStats, + peers: Vec, + interval: u32, + interval_min: u32, +) -> WebResult { + let http_peers: Vec = peers + .iter() + .map(|peer| Peer { + peer_id: peer.peer_id.to_string(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + }) + .collect(); let res = AnnounceResponse { interval, @@ -111,7 +162,7 @@ fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: Tor if let Some(1) = announce_request.compact { match res.write_compact() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)) + Err(_) => Err(reject::custom(ServerError::InternalServerError)), } } else { Ok(Response::new(res.write().into())) @@ -124,7 +175,7 @@ fn send_scrape_response(files: HashMap) -> WebRes match res.write() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)) + Err(_) => Err(reject::custom(ServerError::InternalServerError)), } } @@ -132,9 +183,15 @@ fn send_scrape_response(files: HashMap) -> WebRes pub async fn send_error(r: Rejection) -> std::result::Result { let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); - ErrorResponse { failure_reason: server_error.to_string() }.write() + ErrorResponse { + failure_reason: server_error.to_string(), + } + .write() } else { - ErrorResponse { failure_reason: ServerError::InternalServerError.to_string() }.write() + ErrorResponse { + failure_reason: ServerError::InternalServerError.to_string(), + } + .write() }; Ok(Response::new(body)) diff --git a/src/http/mod.rs b/src/http/mod.rs index 07d077577..4842c0a25 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -6,13 +6,13 @@ pub use self::response::*; pub use self::routes::*; pub use self::server::*; -pub mod server; +pub mod errors; +pub mod filters; +pub mod handlers; pub mod request; pub mod response; -pub mod errors; pub mod routes; -pub mod handlers; -pub mod filters; +pub mod server; pub type Bytes = u64; pub type WebResult = std::result::Result; diff --git a/src/http/request.rs b/src/http/request.rs index 28cd4750e..6dd025e8c 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -2,8 +2,8 @@ use std::net::IpAddr; use serde::Deserialize; -use crate::{InfoHash, PeerId}; use crate::http::Bytes; +use crate::{InfoHash, PeerId}; #[derive(Deserialize)] pub struct AnnounceRequestQuery { diff --git a/src/http/response.rs b/src/http/response.rs index 2bdd4c1e7..4db12f995 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -5,6 +5,7 @@ use std::net::IpAddr; use serde; use serde::Serialize; + use crate::InfoHash; #[derive(Serialize)] diff --git a/src/http/routes.rs b/src/http/routes.rs index 53b2b0ce5..a9ca3027f 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -3,24 +3,18 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use crate::http::handle_announce; -use crate::http::handle_scrape; -use crate::http::send_error; -use crate::http::with_announce_request; -use crate::http::with_auth_key; -use crate::http::with_scrape_request; -use crate::http::with_tracker; +use crate::http::{ + handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker, +}; use crate::tracker::tracker::TorrentTracker; /// All routes -pub fn routes(tracker: Arc) -> impl Filter + Clone { - announce(tracker.clone()) - .or(scrape(tracker)) - .recover(send_error) +pub fn routes(tracker: Arc) -> impl Filter + Clone { + announce(tracker.clone()).or(scrape(tracker)).recover(send_error) } /// GET /announce or /announce/ -fn announce(tracker: Arc) -> impl Filter + Clone { +fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -30,7 +24,7 @@ fn announce(tracker: Arc) -> impl Filter -fn scrape(tracker: Arc) -> impl Filter + Clone { +fn scrape(tracker: Arc) -> impl Filter + Clone { warp::path::path("scrape") .and(warp::filters::method::get()) .and(with_scrape_request(tracker.config.on_reverse_proxy)) diff --git a/src/http/server.rs b/src/http/server.rs index 5a5b5f735..8b92d8792 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -12,33 +12,31 @@ pub struct HttpServer { impl HttpServer { pub fn new(tracker: Arc) -> HttpServer { - HttpServer { - tracker - } + HttpServer { tracker } } /// Start the HttpServer pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { - let (_addr, server) = warp::serve(routes(self.tracker.clone())) - .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("Failed to listen to shutdown signal."); - }); + let (_addr, server) = warp::serve(routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }); server } /// Start the HttpServer in TLS mode - pub fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: String, ssl_key_path: String) -> impl warp::Future { + pub fn start_tls( + &self, + socket_addr: SocketAddr, + ssl_cert_path: String, + ssl_key_path: String, + ) -> impl warp::Future { let (_addr, server) = warp::serve(routes(self.tracker.clone())) .tls() .cert_path(ssl_cert_path) .key_path(ssl_key_path) .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("Failed to listen to shutdown signal."); + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); server diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 85f64200f..ef67f0a7e 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -1,9 +1,11 @@ use std::net::SocketAddr; use std::sync::Arc; + use log::{info, warn}; use tokio::task::JoinHandle; -use crate::{HttpServer, HttpTrackerConfig}; + use crate::tracker::tracker::TorrentTracker; +use crate::{HttpServer, HttpTrackerConfig}; pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); @@ -19,7 +21,9 @@ pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> Jo http_tracker.start(bind_addr).await; } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { info!("Starting HTTPS server on: {} (TLS)", bind_addr); - http_tracker.start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()).await; + http_tracker + .start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await; } else { warn!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", bind_addr); } diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index c3e58e56e..8b8f0662b 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -1,4 +1,4 @@ +pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_api; -pub mod http_tracker; pub mod udp_tracker; diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 7d9967352..6e4b0c77e 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -1,9 +1,11 @@ use std::sync::Arc; + use chrono::Utc; use log::info; use tokio::task::JoinHandle; -use crate::{Configuration}; + use crate::tracker::tracker::TorrentTracker; +use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 476a87a6a..f3c9ae788 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -1,12 +1,18 @@ use std::sync::Arc; + use log::info; use tokio::task::JoinHandle; -use crate::{Configuration}; + use crate::api::server; use crate::tracker::tracker::TorrentTracker; +use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config.http_api.bind_address.parse::().expect("Tracker API bind_address invalid."); + let bind_addr = config + .http_api + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); info!("Starting Torrust API server on: {}", bind_addr); tokio::spawn(async move { diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 32ef76ef4..f93979c9f 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -1,8 +1,10 @@ use std::sync::Arc; + use log::{error, info, warn}; use tokio::task::JoinHandle; -use crate::{UdpServer, UdpTrackerConfig}; + use crate::tracker::tracker::TorrentTracker; +use crate::{UdpServer, UdpTrackerConfig}; pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); diff --git a/src/lib.rs b/src/lib.rs index 245f4686c..6dcc7e6da 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,18 +1,18 @@ +pub use api::server::*; pub use http::server::*; +pub use protocol::common::*; pub use udp::server::*; -pub use protocol::common::*; pub use self::config::*; -pub use api::server::*; pub use self::tracker::*; +pub mod api; pub mod config; -pub mod tracker; -pub mod logging; -pub mod udp; -pub mod http; -pub mod setup; pub mod databases; +pub mod http; pub mod jobs; -pub mod api; +pub mod logging; pub mod protocol; +pub mod setup; +pub mod tracker; +pub mod udp; diff --git a/src/logging.rs b/src/logging.rs index c2e77551f..209c9f848 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -5,19 +5,17 @@ use crate::Configuration; pub fn setup_logging(cfg: &Configuration) { let log_level = match &cfg.log_level { None => log::LevelFilter::Info, - Some(level) => { - match level.as_str() { - "off" => log::LevelFilter::Off, - "trace" => log::LevelFilter::Trace, - "debug" => log::LevelFilter::Debug, - "info" => log::LevelFilter::Info, - "warn" => log::LevelFilter::Warn, - "error" => log::LevelFilter::Error, - _ => { - panic!("Unknown log level encountered: '{}'", level.as_str()); - } + Some(level) => match level.as_str() { + "off" => log::LevelFilter::Off, + "trace" => log::LevelFilter::Trace, + "debug" => log::LevelFilter::Debug, + "info" => log::LevelFilter::Info, + "warn" => log::LevelFilter::Warn, + "error" => log::LevelFilter::Error, + _ => { + panic!("Unknown log level encountered: '{}'", level.as_str()); } - } + }, }; if let Err(_err) = fern::Dispatch::new() diff --git a/src/main.rs b/src/main.rs index 963419f03..0b406c85a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,8 @@ use std::sync::Arc; + use log::info; -use torrust_tracker::Configuration; -use torrust_tracker::logging; -use torrust_tracker::setup; use torrust_tracker::tracker::tracker::TorrentTracker; +use torrust_tracker::{logging, setup, Configuration}; #[tokio::main] async fn main() { diff --git a/src/protocol/common.rs b/src/protocol/common.rs index 5d69ed0e1..92a3ed51c 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -221,8 +221,9 @@ impl PeerId { impl Serialize for PeerId { fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, { + where + S: serde::Serializer, + { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; binascii::bin2hex(&self.0, &mut tmp).unwrap(); diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 30b87b99b..e50c8b036 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -11,9 +11,7 @@ pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { } pub fn current_time() -> u64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH).unwrap() - .as_secs() + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs() } pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { diff --git a/src/setup.rs b/src/setup.rs index ed9b6d8ff..0c5ed9004 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -1,11 +1,13 @@ use std::sync::Arc; -use log::{warn}; + +use log::warn; use tokio::task::JoinHandle; -use crate::{Configuration}; + use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; use crate::tracker::tracker::TorrentTracker; +use crate::Configuration; -pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ +pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); // Load peer keys @@ -15,15 +17,23 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Load whitelisted torrents if tracker.is_whitelisted() { - tracker.load_whitelist().await.expect("Could not load whitelist from database."); + tracker + .load_whitelist() + .await + .expect("Could not load whitelist from database."); } // Start the UDP blocks for udp_tracker_config in &config.udp_trackers { - if !udp_tracker_config.enabled { continue; } + if !udp_tracker_config.enabled { + continue; + } if tracker.is_private() { - warn!("Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", udp_tracker_config.bind_address, config.mode); + warn!( + "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", + udp_tracker_config.bind_address, config.mode + ); } else { jobs.push(udp_tracker::start_job(&udp_tracker_config, tracker.clone())) } @@ -31,7 +41,9 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Start the HTTP blocks for http_tracker_config in &config.http_trackers { - if !http_tracker_config.enabled { continue; } + if !http_tracker_config.enabled { + continue; + } jobs.push(http_tracker::start_job(&http_tracker_config, tracker.clone())); } diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 2e2ca81f7..f935dac07 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -1,11 +1,10 @@ use derive_more::{Display, Error}; use log::debug; -use rand::{Rng, thread_rng}; use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; use serde::Serialize; use crate::protocol::utils::current_time; - use crate::AUTH_KEY_LENGTH; pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { @@ -25,8 +24,12 @@ pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { let current_time = current_time(); - if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } - if auth_key.valid_until.unwrap() < current_time { return Err(Error::KeyExpired); } + if auth_key.valid_until.is_none() { + return Err(Error::KeyInvalid); + } + if auth_key.valid_until.unwrap() < current_time { + return Err(Error::KeyExpired); + } Ok(()) } @@ -40,10 +43,7 @@ pub struct AuthKey { impl AuthKey { pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(AuthKey { - key, - valid_until: None, - }) + Some(AuthKey { key, valid_until: None }) } else { None } @@ -85,17 +85,10 @@ mod tests { #[test] fn auth_key_from_buffer() { - let auth_key = key::AuthKey::from_buffer( - [ - 89, 90, 83, 108, - 52, 108, 77, 90, - 117, 112, 82, 117, - 79, 112, 83, 82, - 67, 51, 107, 114, - 73, 75, 82, 53, - 66, 80, 66, 49, - 52, 110, 114, 74] - ); + let auth_key = key::AuthKey::from_buffer([ + 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, + 49, 52, 110, 114, 74, + ]); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 791e2e7d2..bbb027a35 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,6 +1,6 @@ -pub mod tracker; -pub mod statistics; -pub mod peer; -pub mod torrent; pub mod key; pub mod mode; +pub mod peer; +pub mod statistics; +pub mod torrent; +pub mod tracker; diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index edcb27f1c..9110b7f4f 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -1,5 +1,5 @@ use serde; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] pub enum TrackerMode { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index ce4e52022..0514f41ed 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -2,11 +2,11 @@ use std::net::{IpAddr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; -use serde::{Serialize}; +use serde::Serialize; -use crate::protocol::common::{NumberOfBytesDef, AnnounceEventDef}; -use crate::protocol::utils::ser_instant; use crate::http::AnnounceRequest; +use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::protocol::utils::ser_instant; use crate::PeerId; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] @@ -26,7 +26,11 @@ pub struct TorrentPeer { } impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + pub fn from_udp_announce_request( + announce_request: &aquatic_udp_protocol::AnnounceRequest, + remote_ip: IpAddr, + host_opt_ip: Option, + ) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); TorrentPeer { @@ -40,7 +44,11 @@ impl TorrentPeer { } } - pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + pub fn from_http_announce_request( + announce_request: &AnnounceRequest, + remote_ip: IpAddr, + host_opt_ip: Option, + ) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); let event: AnnounceEvent = if let Some(event) = &announce_request.event { @@ -48,7 +56,7 @@ impl TorrentPeer { "started" => AnnounceEvent::Started, "stopped" => AnnounceEvent::Stopped, "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None + _ => AnnounceEvent::None, } } else { AnnounceEvent::None @@ -74,5 +82,7 @@ impl TorrentPeer { } } - pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + pub fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index c67df72ec..85a2dbae9 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; -use tokio::sync::mpsc::Sender; use tokio::sync::mpsc::error::SendError; +use tokio::sync::mpsc::Sender; +use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 0c03e3f82..7950ce9c0 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,10 +1,10 @@ use std::net::{IpAddr, SocketAddr}; -use aquatic_udp_protocol::{AnnounceEvent}; +use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use crate::{MAX_SCRAPE_TORRENTS, PeerId}; use crate::peer::TorrentPeer; +use crate::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone)] pub struct TorrentEntry { @@ -54,11 +54,13 @@ impl TorrentEntry { // Filter out different ip_version from remote_addr Some(remote_addr) => { // Skip ip address of client - if peer.peer_addr.ip() == remote_addr.ip() { return false; } + if peer.peer_addr.ip() == remote_addr.ip() { + return false; + } match peer.peer_addr.ip() { - IpAddr::V4(_) => { remote_addr.is_ipv4() } - IpAddr::V6(_) => { remote_addr.is_ipv6() } + IpAddr::V4(_) => remote_addr.is_ipv4(), + IpAddr::V6(_) => remote_addr.is_ipv6(), } } }) @@ -73,9 +75,8 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - self.peers.retain(|_, peer| { - peer.updated.elapsed() > std::time::Duration::from_secs(max_peer_timeout as u64) - }); + self.peers + .retain(|_, peer| peer.updated.elapsed() > std::time::Duration::from_secs(max_peer_timeout as u64)); } } diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 163bfe446..51d7716fb 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -3,19 +3,19 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use tokio::sync::{RwLock, RwLockReadGuard}; use tokio::sync::mpsc::error::SendError; +use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::Configuration; -use crate::protocol::common::InfoHash; -use crate::databases::database::Database; use crate::databases::database; +use crate::databases::database::Database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; -use crate::tracker::key::AuthKey; +use crate::protocol::common::InfoHash; use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; use crate::tracker::key; +use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; +use crate::Configuration; pub struct TorrentTracker { pub config: Arc, @@ -24,7 +24,7 @@ pub struct TorrentTracker { whitelist: RwLock>, torrents: RwLock>, stats_tracker: StatsTracker, - database: Box + database: Box, } impl TorrentTracker { @@ -33,7 +33,9 @@ impl TorrentTracker { let mut stats_tracker = StatsTracker::new(); // starts a thread for updating tracker stats - if config.tracker_usage_statistics { stats_tracker.run_worker(); } + if config.tracker_usage_statistics { + stats_tracker.run_worker(); + } Ok(TorrentTracker { config: config.clone(), @@ -42,7 +44,7 @@ impl TorrentTracker { whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), stats_tracker, - database + database, }) } @@ -74,7 +76,7 @@ impl TorrentTracker { pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { match self.keys.read().await.get(&auth_key.key) { None => Err(key::Error::KeyInvalid), - Some(key) => key::verify_auth_key(key) + Some(key) => key::verify_auth_key(key), } } @@ -124,7 +126,9 @@ impl TorrentTracker { pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode - if self.is_public() { return Ok(()); } + if self.is_public() { + return Ok(()); + } // check if auth_key is set and valid if self.is_private() { @@ -157,7 +161,9 @@ impl TorrentTracker { for (info_hash, completed) in persistent_torrents { // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { continue; } + if torrents.contains_key(&info_hash) { + continue; + } let torrent_entry = TorrentEntry { peers: Default::default(), @@ -170,14 +176,12 @@ impl TorrentTracker { Ok(()) } - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr, ) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => { - entry.get_peers(Some(client_addr)).into_iter().cloned().collect() - } + Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), } } @@ -185,19 +189,18 @@ impl TorrentTracker { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(info_hash.clone()) { - Entry::Vacant(vacant) => { - vacant.insert(TorrentEntry::new()) - } - Entry::Occupied(entry) => { - entry.into_mut() - } + Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), + Entry::Occupied(entry) => entry.into_mut(), }; let stats_updated = torrent_entry.update_peer(peer); // todo: move this action to a separate worker if self.config.persistent_torrent_completed_stat && stats_updated { - let _ = self.database.save_persistent_torrent(&info_hash, torrent_entry.completed).await; + let _ = self + .database + .save_persistent_torrent(&info_hash, torrent_entry.completed) + .await; } let (seeders, completed, leechers) = torrent_entry.get_stats(); @@ -231,8 +234,8 @@ impl TorrentTracker { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); match self.config.persistent_torrent_completed_stat { - true => { torrent_entry.completed > 0 || torrent_entry.peers.len() > 0 } - false => { torrent_entry.peers.len() > 0 } + true => torrent_entry.completed > 0 || torrent_entry.peers.len() > 0, + false => torrent_entry.peers.len() > 0, } }); } else { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 860a2fe4b..907dac0bc 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -1,16 +1,19 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; +use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, + NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, +}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; use crate::peer::TorrentPeer; -use crate::tracker::torrent::{TorrentError}; -use crate::udp::errors::ServerError; -use crate::udp::request::AnnounceRequestWrapper; +use crate::protocol::utils::get_connection_id; use crate::tracker::statistics::TrackerStatisticsEvent; +use crate::tracker::torrent::TorrentError; use crate::tracker::tracker::TorrentTracker; -use crate::protocol::utils::get_connection_id; +use crate::udp::errors::ServerError; +use crate::udp::request::AnnounceRequestWrapper; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, &None).await { @@ -34,42 +37,38 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { Ok(request) => { let transaction_id = match &request { - Request::Connect(connect_request) => { - connect_request.transaction_id - } - Request::Announce(announce_request) => { - announce_request.transaction_id - } - Request::Scrape(scrape_request) => { - scrape_request.transaction_id - } + Request::Connect(connect_request) => connect_request.transaction_id, + Request::Announce(announce_request) => announce_request.transaction_id, + Request::Scrape(scrape_request) => scrape_request.transaction_id, }; match handle_request(request, remote_addr, tracker).await { Ok(response) => response, - Err(e) => handle_error(e, transaction_id) + Err(e) => handle_error(e, transaction_id), } } // bad request - Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)) + Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)), } } -pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: Arc) -> Result { +pub async fn handle_request( + request: Request, + remote_addr: SocketAddr, + tracker: Arc, +) -> Result { match request { - Request::Connect(connect_request) => { - handle_connect(remote_addr, &connect_request, tracker).await - } - Request::Announce(announce_request) => { - handle_announce(remote_addr, &announce_request, tracker).await - } - Request::Scrape(scrape_request) => { - handle_scrape(remote_addr, &scrape_request, tracker).await - } + Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, + Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, + Request::Scrape(scrape_request) => handle_scrape(remote_addr, &scrape_request, tracker).await, } } -pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: Arc) -> Result { +pub async fn handle_connect( + remote_addr: SocketAddr, + request: &ConnectRequest, + tracker: Arc, +) -> Result { let connection_id = get_connection_id(&remote_addr); let response = Response::from(ConnectResponse { @@ -79,26 +78,42 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; } + SocketAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; + } } Ok(response) } -pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: Arc) -> Result { +pub async fn handle_announce( + remote_addr: SocketAddr, + announce_request: &AnnounceRequest, + tracker: Arc, +) -> Result { let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; - let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip()); + let peer = TorrentPeer::from_udp_announce_request( + &wrapped_announce_request.announce_request, + remote_addr.ip(), + tracker.config.get_ext_ip(), + ); //let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; + let torrent_stats = tracker + .update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer) + .await; // get all peers excluding the client_addr - let peers = tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await; + let peers = tracker + .get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr) + .await; let announce_response = if remote_addr.is_ipv4() { Response::from(AnnounceResponse { @@ -106,16 +121,19 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), - peers: peers.iter() - .filter_map(|peer| if let IpAddr::V4(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()), - }) - } else { - None - } - ).collect(), + peers: peers + .iter() + .filter_map(|peer| { + if let IpAddr::V4(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip, + port: Port(peer.peer_addr.port()), + }) + } else { + None + } + }) + .collect(), }) } else { Response::from(AnnounceResponse { @@ -123,30 +141,41 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), - peers: peers.iter() - .filter_map(|peer| if let IpAddr::V6(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()), - }) - } else { - None - } - ).collect(), + peers: peers + .iter() + .filter_map(|peer| { + if let IpAddr::V6(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip, + port: Port(peer.peer_addr.port()), + }) + } else { + None + } + }) + .collect(), }) }; // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; } + SocketAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; + } } Ok(announce_response) } // todo: refactor this, db lock can be a lot shorter -pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc) -> Result { +pub async fn handle_scrape( + remote_addr: SocketAddr, + request: &ScrapeRequest, + tracker: Arc, +) -> Result { let db = tracker.get_torrents().await; let mut torrent_stats: Vec = Vec::new(); @@ -172,13 +201,11 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra } } } - None => { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), - } - } + None => TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + }, }; torrent_stats.push(scrape_entry); @@ -188,8 +215,12 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; } + SocketAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; + } } Ok(Response::from(ScrapeResponse { @@ -200,5 +231,8 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { let message = e.to_string(); - Response::from(ErrorResponse { transaction_id, message: message.into() }) + Response::from(ErrorResponse { + transaction_id, + message: message.into(), + }) } diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 25780ba93..ae87973f1 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -4,9 +4,9 @@ pub use self::request::*; pub use self::server::*; pub mod errors; +pub mod handlers; pub mod request; pub mod server; -pub mod handlers; pub type Bytes = u64; pub type Port = u16; diff --git a/src/udp/server.rs b/src/udp/server.rs index bcacc2642..11cb61d99 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -62,7 +62,9 @@ impl UdpServer { debug!("{:?}", &inner[..position]); UdpServer::send_packet(socket, &remote_addr, &inner[..position]).await; } - Err(_) => { debug!("could not write response to bytes."); } + Err(_) => { + debug!("could not write response to bytes."); + } } } From 1f3dd8a2bb2bb5eb1469e962fd2e423d1d6c6f77 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 18:32:39 +0200 Subject: [PATCH 0124/1003] ci: update workflow to enforce formatting --- .github/workflows/test_build_release.yml | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index d848ed653..87f6a9488 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -4,7 +4,23 @@ name: CI on: [push, pull_request] jobs: + format: + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: rustfmt + - uses: Swatinem/rust-cache@v1 + - name: Check Rust Formatting + run: cargo fmt --check + test: + needs: format runs-on: ubuntu-latest env: CARGO_TERM_COLOR: always @@ -18,7 +34,7 @@ jobs: - uses: Swatinem/rust-cache@v1 - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - - name: Run tests + - name: Run Tests run: cargo llvm-cov nextest build: @@ -37,9 +53,9 @@ jobs: profile: minimal toolchain: stable - uses: Swatinem/rust-cache@v1 - - name: Build torrust tracker + - name: Build Torrust Tracker run: cargo build --release - - name: Upload build artifact + - name: Upload Build Artifact uses: actions/upload-artifact@v2 with: name: torrust-tracker @@ -49,7 +65,7 @@ jobs: needs: build runs-on: ubuntu-latest steps: - - name: Download build artifact + - name: Download Build Artifact uses: actions/download-artifact@v2 with: name: torrust-tracker From e622c132fcf986487a290fc5a50403ed479ce33e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 18:49:42 +0200 Subject: [PATCH 0125/1003] formatting: add formatting commit to the git blame ignore file --- .git-blame-ignore | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .git-blame-ignore diff --git a/.git-blame-ignore b/.git-blame-ignore new file mode 100644 index 000000000..06c439a36 --- /dev/null +++ b/.git-blame-ignore @@ -0,0 +1,4 @@ +# https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revs-fileltfilegt + +# Format the world! +57bf2000e39dccfc2f8b6e41d6c6f3eac38a3886 From 2ae7ab47ba19a22b7d98d31096cae605b26c3a8a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 13 Sep 2022 00:32:05 +0200 Subject: [PATCH 0126/1003] clock: add mockable clock --- Cargo.lock | 1 + Cargo.toml | 9 +- src/lib.rs | 11 ++ src/main.rs | 5 +- src/protocol/clock/clock.rs | 248 ++++++++++++++++++++++++++++++++++++ src/protocol/clock/mod.rs | 1 + src/protocol/mod.rs | 1 + 7 files changed, 274 insertions(+), 2 deletions(-) create mode 100644 src/protocol/clock/clock.rs create mode 100644 src/protocol/clock/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 279e4a67d..1a4fe8b4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2341,6 +2341,7 @@ dependencies = [ "fern", "futures", "hex", + "lazy_static", "log", "openssl", "percent-encoding", diff --git a/Cargo.toml b/Cargo.toml index 9d21ed7d7..89fdffa99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,13 @@ lto = "fat" strip = true [dependencies] -tokio = { version = "1.7", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +tokio = { version = "1.7", features = [ + "rt-multi-thread", + "net", + "sync", + "macros", + "signal", +] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" @@ -28,6 +34,7 @@ serde_with = "2.0.0" hex = "0.4.3" percent-encoding = "2.1.0" binascii = "0.1" +lazy_static = "1.4.0" openssl = { version = "0.10.41", features = ["vendored"] } diff --git a/src/lib.rs b/src/lib.rs index 6dcc7e6da..882e126bc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -16,3 +16,14 @@ pub mod protocol; pub mod setup; pub mod tracker; pub mod udp; + +#[macro_use] +extern crate lazy_static; + +pub mod static_time { + use std::time::SystemTime; + + lazy_static! { + pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); + } +} diff --git a/src/main.rs b/src/main.rs index 0b406c85a..01121052a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,12 +2,15 @@ use std::sync::Arc; use log::info; use torrust_tracker::tracker::tracker::TorrentTracker; -use torrust_tracker::{logging, setup, Configuration}; +use torrust_tracker::{logging, setup, static_time, Configuration}; #[tokio::main] async fn main() { const CONFIG_PATH: &str = "config.toml"; + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + // Initialize Torrust config let config = match Configuration::load_from_file(CONFIG_PATH) { Ok(config) => Arc::new(config), diff --git a/src/protocol/clock/clock.rs b/src/protocol/clock/clock.rs new file mode 100644 index 000000000..db59170b3 --- /dev/null +++ b/src/protocol/clock/clock.rs @@ -0,0 +1,248 @@ +use std::num::IntErrorKind; +pub use std::time::Duration; + +pub type SinceUnixEpoch = Duration; + +#[derive(Debug)] +pub enum ClockType { + WorkingClock, + StoppedClock, +} + +#[derive(Debug)] +pub struct Clock; + +pub type WorkingClock = Clock<{ ClockType::WorkingClock as usize }>; +pub type StoppedClock = Clock<{ ClockType::StoppedClock as usize }>; + +#[cfg(not(test))] +pub type DefaultClock = WorkingClock; + +#[cfg(test)] +pub type DefaultClock = StoppedClock; + +pub trait Time: Sized { + fn now() -> SinceUnixEpoch; +} + +pub trait TimeNow: Time { + fn add(add_time: &Duration) -> Option { + Self::now().checked_add(*add_time) + } + fn sub(sub_time: &Duration) -> Option { + Self::now().checked_sub(*sub_time) + } +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + + use crate::protocol::clock::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; + + #[test] + fn it_should_be_the_stopped_clock_as_default_when_testing() { + // We are testing, so we should default to the fixed time. + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(StoppedClock::now(), DefaultClock::now()) + } + + #[test] + fn it_should_have_different_times() { + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(StoppedClock::now(), WorkingClock::now()) + } +} + +mod working_clock { + use std::time::SystemTime; + + use super::{SinceUnixEpoch, Time, TimeNow, WorkingClock}; + + impl Time for WorkingClock { + fn now() -> SinceUnixEpoch { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() + } + } + + impl TimeNow for WorkingClock {} +} + +pub trait StoppedTime: TimeNow { + fn local_set(unix_time: &SinceUnixEpoch); + fn local_set_to_unix_epoch() { + Self::local_set(&SinceUnixEpoch::ZERO) + } + fn local_set_to_app_start_time(); + fn local_set_to_system_time_now(); + fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + fn local_reset(); +} + +mod stopped_clock { + use std::num::IntErrorKind; + use std::time::Duration; + + use super::{SinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; + + impl Time for StoppedClock { + fn now() -> SinceUnixEpoch { + detail::FIXED_TIME.with(|time| { + return *time.borrow(); + }) + } + } + + impl TimeNow for StoppedClock {} + + impl StoppedTime for StoppedClock { + fn local_set(unix_time: &SinceUnixEpoch) { + detail::FIXED_TIME.with(|time| { + *time.borrow_mut() = *unix_time; + }) + } + + fn local_set_to_app_start_time() { + Self::local_set(&detail::get_app_start_time()) + } + + fn local_set_to_system_time_now() { + Self::local_set(&detail::get_app_start_time()) + } + + fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_add(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::PosOverflow); + } + }; + Ok(()) + }) + } + + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::NegOverflow); + } + }; + Ok(()) + }) + } + + fn local_reset() { + Self::local_set(&detail::get_default_fixed_time()) + } + } + + #[cfg(test)] + mod tests { + use std::thread; + use std::time::Duration; + + use crate::protocol::clock::clock::{SinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + + #[test] + fn it_should_default_to_zero_when_testing() { + assert_eq!(StoppedClock::now(), SinceUnixEpoch::ZERO) + } + + #[test] + fn it_should_possible_to_set_the_time() { + // Check we start with ZERO. + assert_eq!(StoppedClock::now(), Duration::ZERO); + + // Set to Current Time and Check + let timestamp = WorkingClock::now(); + StoppedClock::local_set(×tamp); + assert_eq!(StoppedClock::now(), timestamp); + + // Elapse the Current Time and Check + StoppedClock::local_add(×tamp).unwrap(); + assert_eq!(StoppedClock::now(), timestamp + timestamp); + + // Reset to ZERO and Check + StoppedClock::local_reset(); + assert_eq!(StoppedClock::now(), Duration::ZERO); + } + + #[test] + fn it_should_default_to_zero_on_thread_exit() { + assert_eq!(StoppedClock::now(), Duration::ZERO); + let after5 = WorkingClock::add(&Duration::from_secs(5)).unwrap(); + StoppedClock::local_set(&after5); + assert_eq!(StoppedClock::now(), after5); + + let t = thread::spawn(move || { + // each thread starts out with the initial value of ZERO + assert_eq!(StoppedClock::now(), Duration::ZERO); + + // and gets set to the current time. + let timestamp = WorkingClock::now(); + StoppedClock::local_set(×tamp); + assert_eq!(StoppedClock::now(), timestamp); + }); + + // wait for the thread to complete and bail out on panic + t.join().unwrap(); + + // we retain our original value of current time + 5sec despite the child thread + assert_eq!(StoppedClock::now(), after5); + + // Reset to ZERO and Check + StoppedClock::local_reset(); + assert_eq!(StoppedClock::now(), Duration::ZERO); + } + } + + mod detail { + use std::cell::RefCell; + use std::time::SystemTime; + + use crate::protocol::clock::clock::SinceUnixEpoch; + use crate::static_time; + + pub fn get_app_start_time() -> SinceUnixEpoch { + (*static_time::TIME_AT_APP_START) + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + } + + #[cfg(not(test))] + pub fn get_default_fixed_time() -> SinceUnixEpoch { + get_app_start_time() + } + + #[cfg(test)] + pub fn get_default_fixed_time() -> SinceUnixEpoch { + SinceUnixEpoch::ZERO + } + + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + + #[cfg(test)] + mod tests { + use std::time::Duration; + + use crate::protocol::clock::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; + + #[test] + fn it_should_get_the_zero_start_time_when_testing() { + assert_eq!(get_default_fixed_time(), Duration::ZERO); + } + + #[test] + fn it_should_get_app_start_time() { + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 000022312); + assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); + } + } + } +} diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs new file mode 100644 index 000000000..159730d2b --- /dev/null +++ b/src/protocol/clock/mod.rs @@ -0,0 +1 @@ +pub mod clock; diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index 99cfd91e4..fcb28b3b2 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -1,2 +1,3 @@ +pub mod clock; pub mod common; pub mod utils; From cab093c026cf253ba6f7208211953a25b3d1c91b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 13 Sep 2022 00:44:12 +0200 Subject: [PATCH 0127/1003] clock: use mockable clock in project --- src/api/server.rs | 3 ++- src/databases/mysql.rs | 7 ++++--- src/databases/sqlite.rs | 7 ++++--- src/protocol/utils.rs | 14 ++++++-------- src/tracker/key.rs | 36 ++++++++++++++++++++++++++---------- src/tracker/peer.rs | 11 ++++++----- src/tracker/torrent.rs | 6 ++++-- src/tracker/tracker.rs | 5 +++-- src/udp/errors.rs | 3 +++ 9 files changed, 58 insertions(+), 34 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index cc6c905e4..5285c9b2b 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -2,6 +2,7 @@ use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; @@ -268,7 +269,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp (seconds_valid, tracker) }) .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { - match tracker.generate_auth_key(seconds_valid).await { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => Ok(warp::reply::json(&auth_key)), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 882fb7bf4..33287df6d 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -1,4 +1,5 @@ use std::str::FromStr; +use std::time::Duration; use async_trait::async_trait; use log::debug; @@ -94,7 +95,7 @@ impl Database for MysqlDatabase { "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| AuthKey { key, - valid_until: Some(valid_until as u64), + valid_until: Some(Duration::from_secs(valid_until as u64)), }, ) .map_err(|_| database::Error::QueryReturnedNoRows)?; @@ -187,7 +188,7 @@ impl Database for MysqlDatabase { { Some((key, valid_until)) => Ok(AuthKey { key, - valid_until: Some(valid_until as u64), + valid_until: Some(Duration::from_secs(valid_until as u64)), }), None => Err(database::Error::InvalidQuery), } @@ -197,7 +198,7 @@ impl Database for MysqlDatabase { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let key = auth_key.key.to_string(); - let valid_until = auth_key.valid_until.unwrap_or(0).to_string(); + let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); match conn.exec_drop( "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 3aba39919..ff080306d 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -7,6 +7,7 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; +use crate::protocol::clock::clock::SinceUnixEpoch; use crate::tracker::key::AuthKey; use crate::InfoHash; @@ -85,7 +86,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(valid_until as u64), + valid_until: Some(SinceUnixEpoch::from_secs(valid_until as u64)), }) })?; @@ -192,7 +193,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(valid_until_i64 as u64), + valid_until: Some(SinceUnixEpoch::from_secs(valid_until_i64 as u64)), }) } else { Err(database::Error::QueryReturnedNoRows) @@ -204,7 +205,7 @@ impl Database for SqliteDatabase { match conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], + [auth_key.key.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], ) { Ok(updated) => { if updated > 0 { diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index e50c8b036..f2a68fdb3 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,19 +1,17 @@ use std::net::SocketAddr; -use std::time::SystemTime; use aquatic_udp_protocol::ConnectionId; +use super::clock::clock::{DefaultClock, SinceUnixEpoch, Time}; + pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { - match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) { - Ok(duration) => ConnectionId(((duration.as_secs() / 3600) | ((remote_address.port() as u64) << 36)) as i64), - Err(_) => ConnectionId(0x7FFFFFFFFFFFFFFF), - } + ConnectionId(((current_time() / 3600) | ((remote_address.port() as u64) << 36)) as i64) } pub fn current_time() -> u64 { - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs() + DefaultClock::now().as_secs() } -pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) +pub fn ser_unix_time_value(unix_time_value: &SinceUnixEpoch, ser: S) -> Result { + ser.serialize_u64(unix_time_value.as_millis() as u64) } diff --git a/src/tracker/key.rs b/src/tracker/key.rs index f935dac07..8ba19ab12 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -1,29 +1,31 @@ +use std::time::Duration; + use derive_more::{Display, Error}; use log::debug; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::utils::current_time; +use crate::protocol::clock::clock::{DefaultClock, SinceUnixEpoch, Time, TimeNow}; use crate::AUTH_KEY_LENGTH; -pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { +pub fn generate_auth_key(lifetime: Duration) -> AuthKey { let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) .map(char::from) .collect(); - debug!("Generated key: {}, valid for: {} seconds", key, seconds_valid); + debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); AuthKey { key, - valid_until: Some(current_time() + seconds_valid), + valid_until: Some(DefaultClock::add(&lifetime).unwrap()), } } pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time = current_time(); + let current_time: SinceUnixEpoch = DefaultClock::now(); if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } @@ -37,7 +39,7 @@ pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { #[derive(Serialize, Debug, Eq, PartialEq, Clone)] pub struct AuthKey { pub key: String, - pub valid_until: Option, + pub valid_until: Option, } impl AuthKey { @@ -81,6 +83,9 @@ impl From for Error { #[cfg(test)] mod tests { + use std::time::Duration; + + use crate::protocol::clock::clock::{DefaultClock, StoppedTime}; use crate::tracker::key; #[test] @@ -105,15 +110,26 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key::generate_auth_key(9999); + let auth_key = key::generate_auth_key(Duration::new(9999, 0)); assert!(key::verify_auth_key(&auth_key).is_ok()); } #[test] - fn generate_expired_auth_key() { - let mut auth_key = key::generate_auth_key(0); - auth_key.valid_until = Some(0); + fn generate_and_check_expired_auth_key() { + // Set the time to the current time. + DefaultClock::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let auth_key = key::generate_auth_key(Duration::from_secs(19)); + + // Mock the time has passed 10 sec. + DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(key::verify_auth_key(&auth_key).is_ok()); + + // Mock the time has passed another 10 sec. + DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); assert!(key::verify_auth_key(&auth_key).is_err()); } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 0514f41ed..b37090b8e 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -5,16 +5,17 @@ use serde; use serde::Serialize; use crate::http::AnnounceRequest; +use crate::protocol::clock::clock::{DefaultClock, SinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::protocol::utils::ser_instant; +use crate::protocol::utils::ser_unix_time_value; use crate::PeerId; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { pub peer_id: PeerId, pub peer_addr: SocketAddr, - #[serde(serialize_with = "ser_instant")] - pub updated: std::time::Instant, + #[serde(serialize_with = "ser_unix_time_value")] + pub updated: SinceUnixEpoch, #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] @@ -36,7 +37,7 @@ impl TorrentPeer { TorrentPeer { peer_id: PeerId(announce_request.peer_id.0), peer_addr, - updated: std::time::Instant::now(), + updated: DefaultClock::now(), uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, left: announce_request.bytes_left, @@ -65,7 +66,7 @@ impl TorrentPeer { TorrentPeer { peer_id: announce_request.peer_id.clone(), peer_addr, - updated: std::time::Instant::now(), + updated: DefaultClock::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), downloaded: NumberOfBytes(announce_request.downloaded as i64), left: NumberOfBytes(announce_request.left as i64), diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 7950ce9c0..b08f03266 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,9 +1,11 @@ use std::net::{IpAddr, SocketAddr}; +use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use crate::peer::TorrentPeer; +use crate::protocol::clock::clock::{DefaultClock, TimeNow}; use crate::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone)] @@ -75,8 +77,8 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - self.peers - .retain(|_, peer| peer.updated.elapsed() > std::time::Duration::from_secs(max_peer_timeout as u64)); + let current_cutoff = DefaultClock::sub(&Duration::from_secs(max_peer_timeout as u64)).unwrap_or_default(); + self.peers.retain(|_, peer| peer.updated > current_cutoff); } } diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 51d7716fb..9a242e41a 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -2,6 +2,7 @@ use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -60,8 +61,8 @@ impl TorrentTracker { self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } - pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { - let auth_key = key::generate_auth_key(seconds_valid); + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = key::generate_auth_key(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) diff --git a/src/udp/errors.rs b/src/udp/errors.rs index fb29e969e..8d7b04b4f 100644 --- a/src/udp/errors.rs +++ b/src/udp/errors.rs @@ -8,6 +8,9 @@ pub enum ServerError { #[error("info_hash is either missing or invalid")] InvalidInfoHash, + #[error("connection id could not be verified")] + InvalidConnectionId, + #[error("could not find remote address")] AddressNotFound, From 7ac49c8228985324042a86c4cfea7205283edff1 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 14 Sep 2022 20:26:20 +0200 Subject: [PATCH 0128/1003] refactor: renamed `SinceUnixEpoch` to `DurationSinceUnixEpoch` to be more descriptive --- src/databases/sqlite.rs | 6 +++--- src/protocol/clock/clock.rs | 38 ++++++++++++++++++------------------- src/protocol/utils.rs | 4 ++-- src/tracker/key.rs | 6 +++--- src/tracker/peer.rs | 4 ++-- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index ff080306d..a329b6bfc 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -7,7 +7,7 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; -use crate::protocol::clock::clock::SinceUnixEpoch; +use crate::protocol::clock::clock::DurationSinceUnixEpoch; use crate::tracker::key::AuthKey; use crate::InfoHash; @@ -86,7 +86,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(SinceUnixEpoch::from_secs(valid_until as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until as u64)), }) })?; @@ -193,7 +193,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(SinceUnixEpoch::from_secs(valid_until_i64 as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until_i64 as u64)), }) } else { Err(database::Error::QueryReturnedNoRows) diff --git a/src/protocol/clock/clock.rs b/src/protocol/clock/clock.rs index db59170b3..538edcfed 100644 --- a/src/protocol/clock/clock.rs +++ b/src/protocol/clock/clock.rs @@ -1,7 +1,7 @@ use std::num::IntErrorKind; pub use std::time::Duration; -pub type SinceUnixEpoch = Duration; +pub type DurationSinceUnixEpoch = Duration; #[derive(Debug)] pub enum ClockType { @@ -22,14 +22,14 @@ pub type DefaultClock = WorkingClock; pub type DefaultClock = StoppedClock; pub trait Time: Sized { - fn now() -> SinceUnixEpoch; + fn now() -> DurationSinceUnixEpoch; } pub trait TimeNow: Time { - fn add(add_time: &Duration) -> Option { + fn add(add_time: &Duration) -> Option { Self::now().checked_add(*add_time) } - fn sub(sub_time: &Duration) -> Option { + fn sub(sub_time: &Duration) -> Option { Self::now().checked_sub(*sub_time) } } @@ -57,10 +57,10 @@ mod tests { mod working_clock { use std::time::SystemTime; - use super::{SinceUnixEpoch, Time, TimeNow, WorkingClock}; + use super::{DurationSinceUnixEpoch, Time, TimeNow, WorkingClock}; impl Time for WorkingClock { - fn now() -> SinceUnixEpoch { + fn now() -> DurationSinceUnixEpoch { SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() } } @@ -69,9 +69,9 @@ mod working_clock { } pub trait StoppedTime: TimeNow { - fn local_set(unix_time: &SinceUnixEpoch); + fn local_set(unix_time: &DurationSinceUnixEpoch); fn local_set_to_unix_epoch() { - Self::local_set(&SinceUnixEpoch::ZERO) + Self::local_set(&DurationSinceUnixEpoch::ZERO) } fn local_set_to_app_start_time(); fn local_set_to_system_time_now(); @@ -84,10 +84,10 @@ mod stopped_clock { use std::num::IntErrorKind; use std::time::Duration; - use super::{SinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; + use super::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; impl Time for StoppedClock { - fn now() -> SinceUnixEpoch { + fn now() -> DurationSinceUnixEpoch { detail::FIXED_TIME.with(|time| { return *time.borrow(); }) @@ -97,7 +97,7 @@ mod stopped_clock { impl TimeNow for StoppedClock {} impl StoppedTime for StoppedClock { - fn local_set(unix_time: &SinceUnixEpoch) { + fn local_set(unix_time: &DurationSinceUnixEpoch) { detail::FIXED_TIME.with(|time| { *time.borrow_mut() = *unix_time; }) @@ -147,11 +147,11 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::clock::{SinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + use crate::protocol::clock::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; #[test] fn it_should_default_to_zero_when_testing() { - assert_eq!(StoppedClock::now(), SinceUnixEpoch::ZERO) + assert_eq!(StoppedClock::now(), DurationSinceUnixEpoch::ZERO) } #[test] @@ -206,26 +206,26 @@ mod stopped_clock { use std::cell::RefCell; use std::time::SystemTime; - use crate::protocol::clock::clock::SinceUnixEpoch; + use crate::protocol::clock::clock::DurationSinceUnixEpoch; use crate::static_time; - pub fn get_app_start_time() -> SinceUnixEpoch { + pub fn get_app_start_time() -> DurationSinceUnixEpoch { (*static_time::TIME_AT_APP_START) .duration_since(SystemTime::UNIX_EPOCH) .unwrap() } #[cfg(not(test))] - pub fn get_default_fixed_time() -> SinceUnixEpoch { + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { get_app_start_time() } #[cfg(test)] - pub fn get_default_fixed_time() -> SinceUnixEpoch { - SinceUnixEpoch::ZERO + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::ZERO } - thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); #[cfg(test)] mod tests { diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index f2a68fdb3..127baa4eb 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; -use super::clock::clock::{DefaultClock, SinceUnixEpoch, Time}; +use super::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { ConnectionId(((current_time() / 3600) | ((remote_address.port() as u64) << 36)) as i64) @@ -12,6 +12,6 @@ pub fn current_time() -> u64 { DefaultClock::now().as_secs() } -pub fn ser_unix_time_value(unix_time_value: &SinceUnixEpoch, ser: S) -> Result { +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { ser.serialize_u64(unix_time_value.as_millis() as u64) } diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 8ba19ab12..76ac21527 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -6,7 +6,7 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::clock::clock::{DefaultClock, SinceUnixEpoch, Time, TimeNow}; +use crate::protocol::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; use crate::AUTH_KEY_LENGTH; pub fn generate_auth_key(lifetime: Duration) -> AuthKey { @@ -25,7 +25,7 @@ pub fn generate_auth_key(lifetime: Duration) -> AuthKey { } pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time: SinceUnixEpoch = DefaultClock::now(); + let current_time: DurationSinceUnixEpoch = DefaultClock::now(); if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } @@ -39,7 +39,7 @@ pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { #[derive(Serialize, Debug, Eq, PartialEq, Clone)] pub struct AuthKey { pub key: String, - pub valid_until: Option, + pub valid_until: Option, } impl AuthKey { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index b37090b8e..b10c97bcc 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -5,7 +5,7 @@ use serde; use serde::Serialize; use crate::http::AnnounceRequest; -use crate::protocol::clock::clock::{DefaultClock, SinceUnixEpoch, Time}; +use crate::protocol::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; use crate::PeerId; @@ -15,7 +15,7 @@ pub struct TorrentPeer { pub peer_id: PeerId, pub peer_addr: SocketAddr, #[serde(serialize_with = "ser_unix_time_value")] - pub updated: SinceUnixEpoch, + pub updated: DurationSinceUnixEpoch, #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] From 20f099eb0763a2d4625b788af63b2cd808440dd8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 19 Sep 2022 08:06:55 +0200 Subject: [PATCH 0129/1003] refactor: move clock/clock.rs into clock.rs (clippy) --- src/databases/sqlite.rs | 2 +- src/protocol/{clock => }/clock.rs | 8 ++++---- src/protocol/clock/mod.rs | 1 - src/protocol/utils.rs | 2 +- src/tracker/key.rs | 4 ++-- src/tracker/peer.rs | 5 ++--- src/tracker/torrent.rs | 2 +- 7 files changed, 11 insertions(+), 13 deletions(-) rename src/protocol/{clock => }/clock.rs (94%) delete mode 100644 src/protocol/clock/mod.rs diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index a329b6bfc..fb66c0b94 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -7,7 +7,7 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; -use crate::protocol::clock::clock::DurationSinceUnixEpoch; +use crate::protocol::clock::DurationSinceUnixEpoch; use crate::tracker::key::AuthKey; use crate::InfoHash; diff --git a/src/protocol/clock/clock.rs b/src/protocol/clock.rs similarity index 94% rename from src/protocol/clock/clock.rs rename to src/protocol/clock.rs index 538edcfed..a72f3699e 100644 --- a/src/protocol/clock/clock.rs +++ b/src/protocol/clock.rs @@ -38,7 +38,7 @@ pub trait TimeNow: Time { mod tests { use std::any::TypeId; - use crate::protocol::clock::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; + use crate::protocol::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; #[test] fn it_should_be_the_stopped_clock_as_default_when_testing() { @@ -147,7 +147,7 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + use crate::protocol::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; #[test] fn it_should_default_to_zero_when_testing() { @@ -206,7 +206,7 @@ mod stopped_clock { use std::cell::RefCell; use std::time::SystemTime; - use crate::protocol::clock::clock::DurationSinceUnixEpoch; + use crate::protocol::clock::DurationSinceUnixEpoch; use crate::static_time; pub fn get_app_start_time() -> DurationSinceUnixEpoch { @@ -231,7 +231,7 @@ mod stopped_clock { mod tests { use std::time::Duration; - use crate::protocol::clock::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; + use crate::protocol::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; #[test] fn it_should_get_the_zero_start_time_when_testing() { diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs deleted file mode 100644 index 159730d2b..000000000 --- a/src/protocol/clock/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod clock; diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 127baa4eb..48fe4eb17 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; -use super::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; +use super::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { ConnectionId(((current_time() / 3600) | ((remote_address.port() as u64) << 36)) as i64) diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 76ac21527..c513b48da 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -6,7 +6,7 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; use crate::AUTH_KEY_LENGTH; pub fn generate_auth_key(lifetime: Duration) -> AuthKey { @@ -85,7 +85,7 @@ impl From for Error { mod tests { use std::time::Duration; - use crate::protocol::clock::clock::{DefaultClock, StoppedTime}; + use crate::protocol::clock::{DefaultClock, StoppedTime}; use crate::tracker::key; #[test] diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index b10c97bcc..71c411b9b 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -5,10 +5,9 @@ use serde; use serde::Serialize; use crate::http::AnnounceRequest; -use crate::protocol::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; -use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; +use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; -use crate::PeerId; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index b08f03266..7404f63af 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -5,7 +5,7 @@ use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use crate::peer::TorrentPeer; -use crate::protocol::clock::clock::{DefaultClock, TimeNow}; +use crate::protocol::clock::{DefaultClock, TimeNow}; use crate::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone)] From b60a0bcc1f3ba08b5a667f05e9d5127276f07e30 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 19:05:05 +0200 Subject: [PATCH 0130/1003] env: workspace vscode settings and extension recommendations --- .vscode/extensions.json | 6 ++++++ .vscode/settings.json | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 .vscode/extensions.json create mode 100644 .vscode/settings.json diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 000000000..b55ef8bf6 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,6 @@ +{ + "recommendations": [ + "streetsidesoftware.code-spell-checker", + "matklad.rust-analyzer" + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..7d04b248f --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,17 @@ +{ + "cSpell.words": [ + "byteorder", + "hasher", + "leechers", + "nanos", + "rngs", + "Seedable", + "thiserror", + "torrust", + "typenum" + ], + "[rust]": { + "editor.defaultFormatter": "matklad.rust-analyzer", + "editor.formatOnSave": true + }, +} \ No newline at end of file From 2b294dd1773d06967e9f6134dfc65101c91dc8fe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 15:23:33 +0200 Subject: [PATCH 0131/1003] env: vscode workspace settings, do clippy check on save. Co-authored-by: Jose Celano --- .vscode/settings.json | 1 + 1 file changed, 1 insertion(+) diff --git a/.vscode/settings.json b/.vscode/settings.json index 7d04b248f..d87732d93 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -14,4 +14,5 @@ "editor.defaultFormatter": "matklad.rust-analyzer", "editor.formatOnSave": true }, + "rust-analyzer.checkOnSave.command": "clippy", } \ No newline at end of file From 028644dd82e5fdb02741a6ee6e82a4ba6fdac5d8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 20 Sep 2022 18:25:27 +0200 Subject: [PATCH 0132/1003] clock: time extent, maker, and associated traits `TimeExtent` is a simple structure that contains base increment (duration), and amount (a multiplier for the base the base increment). The resulting product of the base and the multiplier is the total duration of the time extent. `TimeExtentMaker` is a helper that generates time extents based upon the increment length and the time of the clock, this clock is specialised according to the `test` predicate with the `DefaultClockTimeExtentMaker` type. --- src/protocol/{clock.rs => clock/mod.rs} | 6 +- src/protocol/clock/timeextent.rs | 185 ++++++++++++++++++++++++ 2 files changed, 189 insertions(+), 2 deletions(-) rename src/protocol/{clock.rs => clock/mod.rs} (99%) create mode 100644 src/protocol/clock/timeextent.rs diff --git a/src/protocol/clock.rs b/src/protocol/clock/mod.rs similarity index 99% rename from src/protocol/clock.rs rename to src/protocol/clock/mod.rs index a72f3699e..cab7290e3 100644 --- a/src/protocol/clock.rs +++ b/src/protocol/clock/mod.rs @@ -1,5 +1,5 @@ use std::num::IntErrorKind; -pub use std::time::Duration; +use std::time::Duration; pub type DurationSinceUnixEpoch = Duration; @@ -240,9 +240,11 @@ mod stopped_clock { #[test] fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 000022312); + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 22312); assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); } } } } + +pub mod timeextent; diff --git a/src/protocol/clock/timeextent.rs b/src/protocol/clock/timeextent.rs new file mode 100644 index 000000000..eb050179e --- /dev/null +++ b/src/protocol/clock/timeextent.rs @@ -0,0 +1,185 @@ +use std::num::{IntErrorKind, TryFromIntError}; +use std::time::Duration; + +use super::{ClockType, StoppedClock, TimeNow, WorkingClock}; + +pub trait Extent: Sized + Default { + type Base; + type Multiplier; + type Product; + + fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; + + fn add(&self, add: Self::Multiplier) -> Result; + fn sub(&self, sub: Self::Multiplier) -> Result; + + fn total(&self) -> Result, TryFromIntError>; + fn total_next(&self) -> Result, TryFromIntError>; +} + +pub type TimeExtentBase = Duration; +pub type TimeExtentMultiplier = u64; +pub type TimeExtentProduct = TimeExtentBase; + +#[derive(Debug, Default, Hash, PartialEq, Eq)] +pub struct TimeExtent { + pub increment: TimeExtentBase, + pub amount: TimeExtentMultiplier, +} + +impl TimeExtent { + pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { + Self { + increment: TimeExtentBase::from_secs(seconds), + amount: *amount, + } + } +} + +impl Extent for TimeExtent { + type Base = TimeExtentBase; + type Multiplier = TimeExtentMultiplier; + type Product = TimeExtentProduct; + + fn new(increment: &Self::Base, amount: &Self::Multiplier) -> Self { + Self { + increment: *increment, + amount: *amount, + } + } + + fn add(&self, add: Self::Multiplier) -> Result { + match self.amount.checked_add(add) { + None => Err(IntErrorKind::PosOverflow), + Some(amount) => Ok(Self { + increment: self.increment, + amount, + }), + } + } + + fn sub(&self, sub: Self::Multiplier) -> Result { + match self.amount.checked_sub(sub) { + None => Err(IntErrorKind::NegOverflow), + Some(amount) => Ok(Self { + increment: self.increment, + amount, + }), + } + } + + fn total(&self) -> Result, TryFromIntError> { + match u32::try_from(self.amount) { + Err(error) => Err(error), + Ok(amount) => Ok(self.increment.checked_mul(amount)), + } + } + + fn total_next(&self) -> Result, TryFromIntError> { + match u32::try_from(self.amount) { + Err(e) => Err(e), + Ok(amount) => match amount.checked_add(1) { + None => Ok(None), + Some(amount) => match self.increment.checked_mul(amount) { + None => Ok(None), + Some(extent) => Ok(Some(extent)), + }, + }, + } + } +} + +pub trait MakeTimeExtent: Sized +where + Clock: TimeNow, +{ + fn now(increment: &TimeExtentBase) -> Option> { + Clock::now() + .as_nanos() + .checked_div((*increment).as_nanos()) + .map(|amount| match TimeExtentMultiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }) + } + + fn now_add(increment: &TimeExtentBase, add_time: &Duration) -> Option> { + match Clock::add(add_time) { + None => None, + Some(time) => { + time.as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match TimeExtentMultiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }) + } + } + } + fn now_sub(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { + match Clock::sub(sub_time) { + None => None, + Some(time) => { + time.as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match TimeExtentMultiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }) + } + } + } +} + +#[derive(Debug)] +pub struct TimeExtentMaker {} + +pub type WorkingClockTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; +pub type StoppedClockTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; + +impl MakeTimeExtent for WorkingClockTimeExtentMaker {} +impl MakeTimeExtent for StoppedClockTimeExtentMaker {} + +#[cfg(not(test))] +pub type DefaultClockTimeExtentMaker = WorkingClockTimeExtentMaker; + +#[cfg(test)] +pub type DefaultClockTimeExtentMaker = StoppedClockTimeExtentMaker; + +#[cfg(test)] +mod test { + + use std::time::Duration; + + use crate::protocol::clock::timeextent::{DefaultClockTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; + use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; + + #[test] + fn it_should_get_the_total_duration() { + assert_eq!(TimeExtent::default().total().unwrap().unwrap(), Duration::ZERO); + + assert_eq!( + TimeExtent::from_sec(12, &12).total().unwrap().unwrap(), + Duration::from_secs(144) + ); + assert_eq!( + TimeExtent::from_sec(12, &12).total_next().unwrap().unwrap(), + Duration::from_secs(156) + ); + } + + #[test] + fn it_should_make_the_current_extent() { + assert_eq!( + DefaultClockTimeExtentMaker::now(&Duration::from_secs(2)).unwrap().unwrap(), + TimeExtent::from_sec(2, &0) + ); + + DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(12387687123)); + + assert_eq!( + DefaultClockTimeExtentMaker::now(&Duration::from_secs(2)).unwrap().unwrap(), + TimeExtent::from_sec(2, &6193843561) + ); + } +} From 0cab30cd2f60d1bf6e2d2d9296a75451f4319d53 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 14:48:34 +0200 Subject: [PATCH 0133/1003] time extent: add tests and some corrections * Write tests for all functions. * Rename `now_add` to `now_after` and `now_sub` to `now_before`. * Rework time extent totals calculations to work with larger numbers. --- src/protocol/clock/timeextent.rs | 437 +++++++++++++++++++++++++++---- 1 file changed, 383 insertions(+), 54 deletions(-) diff --git a/src/protocol/clock/timeextent.rs b/src/protocol/clock/timeextent.rs index eb050179e..dfd00efe7 100644 --- a/src/protocol/clock/timeextent.rs +++ b/src/protocol/clock/timeextent.rs @@ -10,11 +10,11 @@ pub trait Extent: Sized + Default { fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; - fn add(&self, add: Self::Multiplier) -> Result; - fn sub(&self, sub: Self::Multiplier) -> Result; + fn increase(&self, add: Self::Multiplier) -> Result; + fn decrease(&self, sub: Self::Multiplier) -> Result; - fn total(&self) -> Result, TryFromIntError>; - fn total_next(&self) -> Result, TryFromIntError>; + fn total(&self) -> Option>; + fn total_next(&self) -> Option>; } pub type TimeExtentBase = Duration; @@ -36,6 +36,20 @@ impl TimeExtent { } } +fn checked_duration_from_nanos(time: u128) -> Result { + const NANOS_PER_SEC: u32 = 1_000_000_000; + + let secs = time.div_euclid(NANOS_PER_SEC as u128); + let nanos = time.rem_euclid(NANOS_PER_SEC as u128); + + assert!(nanos < NANOS_PER_SEC as u128); + + match u64::try_from(secs) { + Err(error) => Err(error), + Ok(secs) => Ok(Duration::new(secs, nanos.try_into().unwrap())), + } +} + impl Extent for TimeExtent { type Base = TimeExtentBase; type Multiplier = TimeExtentMultiplier; @@ -48,7 +62,7 @@ impl Extent for TimeExtent { } } - fn add(&self, add: Self::Multiplier) -> Result { + fn increase(&self, add: Self::Multiplier) -> Result { match self.amount.checked_add(add) { None => Err(IntErrorKind::PosOverflow), Some(amount) => Ok(Self { @@ -58,7 +72,7 @@ impl Extent for TimeExtent { } } - fn sub(&self, sub: Self::Multiplier) -> Result { + fn decrease(&self, sub: Self::Multiplier) -> Result { match self.amount.checked_sub(sub) { None => Err(IntErrorKind::NegOverflow), Some(amount) => Ok(Self { @@ -68,24 +82,18 @@ impl Extent for TimeExtent { } } - fn total(&self) -> Result, TryFromIntError> { - match u32::try_from(self.amount) { - Err(error) => Err(error), - Ok(amount) => Ok(self.increment.checked_mul(amount)), - } + fn total(&self) -> Option> { + self.increment + .as_nanos() + .checked_mul(self.amount as u128) + .map(checked_duration_from_nanos) } - fn total_next(&self) -> Result, TryFromIntError> { - match u32::try_from(self.amount) { - Err(e) => Err(e), - Ok(amount) => match amount.checked_add(1) { - None => Ok(None), - Some(amount) => match self.increment.checked_mul(amount) { - None => Ok(None), - Some(extent) => Ok(Some(extent)), - }, - }, - } + fn total_next(&self) -> Option> { + self.increment + .as_nanos() + .checked_mul((self.amount as u128) + 1) + .map(checked_duration_from_nanos) } } @@ -103,7 +111,7 @@ where }) } - fn now_add(increment: &TimeExtentBase, add_time: &Duration) -> Option> { + fn now_after(increment: &TimeExtentBase, add_time: &Duration) -> Option> { match Clock::add(add_time) { None => None, Some(time) => { @@ -116,7 +124,7 @@ where } } } - fn now_sub(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { + fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, Some(time) => { @@ -149,37 +157,358 @@ pub type DefaultClockTimeExtentMaker = StoppedClockTimeExtentMaker; #[cfg(test)] mod test { - use std::time::Duration; - - use crate::protocol::clock::timeextent::{DefaultClockTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; + use crate::protocol::clock::timeextent::{ + checked_duration_from_nanos, DefaultClockTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, + TimeExtentProduct, + }; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; - #[test] - fn it_should_get_the_total_duration() { - assert_eq!(TimeExtent::default().total().unwrap().unwrap(), Duration::ZERO); - - assert_eq!( - TimeExtent::from_sec(12, &12).total().unwrap().unwrap(), - Duration::from_secs(144) - ); - assert_eq!( - TimeExtent::from_sec(12, &12).total_next().unwrap().unwrap(), - Duration::from_secs(156) - ); - } - - #[test] - fn it_should_make_the_current_extent() { - assert_eq!( - DefaultClockTimeExtentMaker::now(&Duration::from_secs(2)).unwrap().unwrap(), - TimeExtent::from_sec(2, &0) - ); - - DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(12387687123)); - - assert_eq!( - DefaultClockTimeExtentMaker::now(&Duration::from_secs(2)).unwrap().unwrap(), - TimeExtent::from_sec(2, &6193843561) - ); + const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239812388723); + + mod fn_checked_duration_from_nanos { + use std::time::Duration; + + use super::*; + + const NANOS_PER_SEC: u32 = 1_000_000_000; + + #[test] + fn it_should_return_a_duration() { + assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::from_micros(0)); + assert_eq!( + checked_duration_from_nanos(1232143214343432).unwrap(), + Duration::from_nanos(1232143214343432) + ); + assert_eq!( + checked_duration_from_nanos(u64::MAX as u128).unwrap(), + Duration::from_nanos(u64::MAX) + ); + assert_eq!( + checked_duration_from_nanos(TIME_EXTENT_VAL.amount as u128 * NANOS_PER_SEC as u128).unwrap(), + Duration::from_secs(TIME_EXTENT_VAL.amount) + ); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + assert_eq!( + checked_duration_from_nanos(u128::MAX).unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod time_extent_from_sec { + use super::*; + + #[test] + fn it_should_make_time_extent() { + assert_eq!(TIME_EXTENT_VAL.increment, TimeExtentBase::from_secs(2)); + assert_eq!(TIME_EXTENT_VAL.amount, 239812388723); + } + } + + mod time_extent_default { + use super::*; + + #[test] + fn it_should_make_time_extent() { + let time_extent_default = TimeExtent::default(); + assert_eq!(time_extent_default.increment, TimeExtentBase::ZERO); + assert_eq!(time_extent_default.amount, 0); + } + } + + mod time_extent_new { + use super::*; + + #[test] + fn it_should_make_time_extent() { + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + assert_eq!(time_extent.increment, TimeExtentBase::from_millis(2)); + assert_eq!(time_extent.amount, TIME_EXTENT_VAL.amount); + } + } + + mod time_extent_increase { + use std::num::IntErrorKind; + + use super::*; + + #[test] + fn it_should_return_increased() { + let time_extent_default = TimeExtent::default(); + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + + let time_extent_default_increase = TimeExtent { + increment: TimeExtentBase::ZERO, + amount: 50, + }; + let time_extent_increase = TimeExtent { + increment: TimeExtentBase::from_millis(2), + amount: TIME_EXTENT_VAL.amount + 50, + }; + let time_extent_from_sec_increase = TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount + 50, + }; + + assert_eq!(time_extent_default.increase(50).unwrap(), time_extent_default_increase); + assert_eq!(time_extent.increase(50).unwrap(), time_extent_increase); + assert_eq!(TIME_EXTENT_VAL.increase(50).unwrap(), time_extent_from_sec_increase); + } + + #[test] + fn it_should_postive_overflow() { + assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); + } + } + + mod time_extent_decrease { + use std::num::IntErrorKind; + + use super::*; + + #[test] + fn it_should_return_decreased() { + let time_extent_default = TimeExtent::default(); + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + + let time_extent_default_decrease = TimeExtent { + increment: TimeExtentBase::ZERO, + amount: 0, + }; + let time_extent_decrease = TimeExtent { + increment: TimeExtentBase::from_millis(2), + amount: TIME_EXTENT_VAL.amount - 50, + }; + let time_extent_from_sec_decrease = TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount - 50, + }; + + assert_eq!(time_extent_default.decrease(0).unwrap(), time_extent_default_decrease); + assert_eq!(time_extent.decrease(50).unwrap(), time_extent_decrease); + assert_eq!(TIME_EXTENT_VAL.decrease(50).unwrap(), time_extent_from_sec_decrease); + } + + #[test] + fn it_should_return_an_negitive_overflow() { + assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); + } + } + + mod time_extent_total { + use super::*; + + #[test] + fn it_should_return_total() { + let time_extent_default = TimeExtent::default(); + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount / 1000)); + + assert_eq!(time_extent_default.total().unwrap().unwrap(), TimeExtentProduct::ZERO); + assert_eq!( + time_extent.total().unwrap().unwrap(), + TimeExtentProduct::new(479624, 776000000) + ); + assert_eq!( + TIME_EXTENT_VAL.total().unwrap().unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + } + + #[test] + fn it_should_return_none() { + let time_extent_max = TimeExtent { + increment: TimeExtentBase::MAX, + amount: u64::MAX as u64, + }; + assert_eq!(time_extent_max.total(), None); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + let time_extent_max = TimeExtent { + increment: TimeExtentBase::MAX, + amount: 2, + }; + assert_eq!( + time_extent_max.total().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod time_extent_total_next { + use super::*; + + #[test] + fn it_should_get_the_time_extent_total_next() { + let time_extent_default = TimeExtent::default(); + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + + assert_eq!( + time_extent_default.total_next().unwrap().unwrap(), + TimeExtentProduct::from_secs(0) + ); + assert_eq!( + time_extent.total_next().unwrap().unwrap(), + TimeExtentProduct::new(479624777, 448000000) + ); + assert_eq!( + TIME_EXTENT_VAL.total_next().unwrap().unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + ); + } + + #[test] + fn it_should_return_none() { + let time_extent_max = TimeExtent { + increment: TimeExtentBase::MAX, + amount: u64::MAX as u64, + }; + assert_eq!(time_extent_max.total_next(), None); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + let time_extent_max = TimeExtent { + increment: TimeExtentBase::MAX, + amount: 2, + }; + assert_eq!( + time_extent_max.total_next().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod make_time_extent_now { + use super::*; + + #[test] + fn it_should_return_a_time_extent() { + assert_eq!( + DefaultClockTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: 0 + } + ); + + DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + + assert_eq!( + DefaultClockTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TIME_EXTENT_VAL + ); + } + + #[test] + fn it_should_return_none() { + assert_eq!(DefaultClockTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultClockTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod make_time_extent_now_after { + use std::time::Duration; + + use super::*; + + #[test] + fn it_should_return_a_time_extent() { + assert_eq!( + DefaultClockTimeExtentMaker::now_after( + &TIME_EXTENT_VAL.increment, + &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) + ) + .unwrap() + .unwrap(), + TIME_EXTENT_VAL + ); + } + + #[test] + fn it_should_return_none() { + assert_eq!( + DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), + None + ); + + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), + None + ); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + mod make_time_extent_now_before { + use std::time::Duration; + + use super::*; + + #[test] + fn it_should_return_a_time_extent() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + + assert_eq!( + DefaultClockTimeExtentMaker::now_before( + &TimeExtentBase::from_secs(u32::MAX as u64), + &Duration::from_secs(u32::MAX as u64) + ) + .unwrap() + .unwrap(), + TimeExtent { + increment: TimeExtentBase::from_secs(u32::MAX as u64), + amount: 4294967296 + } + ); + } + + #[test] + fn it_should_return_none() { + assert_eq!( + DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), + None + ); + + assert_eq!( + DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), + None + ); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } } From 8653f9a509431433c4aabd004cdf419ffd0ef62d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 15:02:11 +0200 Subject: [PATCH 0134/1003] time extent: rename `TimeExtentMaker` for clarity Rename: from `DefaultClockTimeExtentMaker` to `DefaultTimeExtentMaker`, and the associated types. Co-authored-by: Jose Celano --- src/protocol/clock/timeextent.rs | 48 ++++++++++++++------------------ 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/src/protocol/clock/timeextent.rs b/src/protocol/clock/timeextent.rs index dfd00efe7..0356b7f39 100644 --- a/src/protocol/clock/timeextent.rs +++ b/src/protocol/clock/timeextent.rs @@ -142,23 +142,23 @@ where #[derive(Debug)] pub struct TimeExtentMaker {} -pub type WorkingClockTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; -pub type StoppedClockTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; +pub type WorkingTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; +pub type StoppedTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; -impl MakeTimeExtent for WorkingClockTimeExtentMaker {} -impl MakeTimeExtent for StoppedClockTimeExtentMaker {} +impl MakeTimeExtent for WorkingTimeExtentMaker {} +impl MakeTimeExtent for StoppedTimeExtentMaker {} #[cfg(not(test))] -pub type DefaultClockTimeExtentMaker = WorkingClockTimeExtentMaker; +pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; #[cfg(test)] -pub type DefaultClockTimeExtentMaker = StoppedClockTimeExtentMaker; +pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; #[cfg(test)] mod test { use crate::protocol::clock::timeextent::{ - checked_duration_from_nanos, DefaultClockTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, + checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, TimeExtentProduct, }; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; @@ -389,7 +389,7 @@ mod test { #[test] fn it_should_return_a_time_extent() { assert_eq!( - DefaultClockTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), TimeExtent { increment: TIME_EXTENT_VAL.increment, amount: 0 @@ -399,21 +399,21 @@ mod test { DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( - DefaultClockTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), TIME_EXTENT_VAL ); } #[test] fn it_should_return_none() { - assert_eq!(DefaultClockTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); } #[test] fn it_should_return_tryfrom_int_error() { DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultClockTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) + DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -429,12 +429,9 @@ mod test { #[test] fn it_should_return_a_time_extent() { assert_eq!( - DefaultClockTimeExtentMaker::now_after( - &TIME_EXTENT_VAL.increment, - &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) - ) - .unwrap() - .unwrap(), + DefaultTimeExtentMaker::now_after(&TIME_EXTENT_VAL.increment, &Duration::from_secs(TIME_EXTENT_VAL.amount * 2)) + .unwrap() + .unwrap(), TIME_EXTENT_VAL ); } @@ -442,22 +439,19 @@ mod test { #[test] fn it_should_return_none() { assert_eq!( - DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), + DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), None ); DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); } #[test] fn it_should_return_tryfrom_int_error() { DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -474,7 +468,7 @@ mod test { DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultClockTimeExtentMaker::now_before( + DefaultTimeExtentMaker::now_before( &TimeExtentBase::from_secs(u32::MAX as u64), &Duration::from_secs(u32::MAX as u64) ) @@ -490,12 +484,12 @@ mod test { #[test] fn it_should_return_none() { assert_eq!( - DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), + DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), None ); assert_eq!( - DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), + DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), None ); } @@ -504,7 +498,7 @@ mod test { fn it_should_return_tryfrom_int_error() { DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() From 3d743edec410f7d0e96db37d66e01cab8b248ec4 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 15:28:32 +0200 Subject: [PATCH 0135/1003] time extent: use snake_case for module name Rename: from timeextent to time_extent. Co-authored-by: Jose Celano --- src/protocol/clock/mod.rs | 2 +- src/protocol/clock/{timeextent.rs => time_extent.rs} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename src/protocol/clock/{timeextent.rs => time_extent.rs} (99%) diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index cab7290e3..4e15950e6 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -247,4 +247,4 @@ mod stopped_clock { } } -pub mod timeextent; +pub mod time_extent; diff --git a/src/protocol/clock/timeextent.rs b/src/protocol/clock/time_extent.rs similarity index 99% rename from src/protocol/clock/timeextent.rs rename to src/protocol/clock/time_extent.rs index 0356b7f39..85b5257ad 100644 --- a/src/protocol/clock/timeextent.rs +++ b/src/protocol/clock/time_extent.rs @@ -157,7 +157,7 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; #[cfg(test)] mod test { - use crate::protocol::clock::timeextent::{ + use crate::protocol::clock::time_extent::{ checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, TimeExtentProduct, }; From e785a7fd8e0d0a393b1d312d07178ebef43dda25 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 18:08:13 +0200 Subject: [PATCH 0136/1003] time extent: overhaul tests naming scheme --- src/protocol/clock/time_extent.rs | 558 ++++++++++++++++-------------- 1 file changed, 302 insertions(+), 256 deletions(-) diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index 85b5257ad..d0713645b 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -27,6 +27,15 @@ pub struct TimeExtent { pub amount: TimeExtentMultiplier, } +pub const ZERO: TimeExtent = TimeExtent { + increment: TimeExtentBase::ZERO, + amount: TimeExtentMultiplier::MIN, +}; +pub const MAX: TimeExtent = TimeExtent { + increment: TimeExtentBase::MAX, + amount: TimeExtentMultiplier::MAX, +}; + impl TimeExtent { pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { Self { @@ -159,7 +168,7 @@ mod test { use crate::protocol::clock::time_extent::{ checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, - TimeExtentProduct, + TimeExtentMultiplier, TimeExtentProduct, MAX, ZERO, }; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; @@ -173,8 +182,12 @@ mod test { const NANOS_PER_SEC: u32 = 1_000_000_000; #[test] - fn it_should_return_a_duration() { - assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::from_micros(0)); + fn it_should_give_zero_for_zero_input() { + assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::ZERO); + } + + #[test] + fn it_should_be_the_same_as_duration_implementation_for_u64_numbers() { assert_eq!( checked_duration_from_nanos(1232143214343432).unwrap(), Duration::from_nanos(1232143214343432) @@ -183,6 +196,10 @@ mod test { checked_duration_from_nanos(u64::MAX as u128).unwrap(), Duration::from_nanos(u64::MAX) ); + } + + #[test] + fn it_should_work_for_some_numbers_larger_than_u64() { assert_eq!( checked_duration_from_nanos(TIME_EXTENT_VAL.amount as u128 * NANOS_PER_SEC as u128).unwrap(), Duration::from_secs(TIME_EXTENT_VAL.amount) @@ -190,7 +207,7 @@ mod test { } #[test] - fn it_should_return_tryfrom_int_error() { + fn it_should_fail_for_numbers_that_are_too_large() { assert_eq!( checked_duration_from_nanos(u128::MAX).unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -198,311 +215,340 @@ mod test { } } - mod time_extent_from_sec { + mod time_extent { use super::*; - #[test] - fn it_should_make_time_extent() { - assert_eq!(TIME_EXTENT_VAL.increment, TimeExtentBase::from_secs(2)); - assert_eq!(TIME_EXTENT_VAL.amount, 239812388723); - } - } + mod fn_default { - mod time_extent_default { - use super::*; + use super::*; - #[test] - fn it_should_make_time_extent() { - let time_extent_default = TimeExtent::default(); - assert_eq!(time_extent_default.increment, TimeExtentBase::ZERO); - assert_eq!(time_extent_default.amount, 0); + #[test] + fn it_should_default_initialize_to_zero() { + assert_eq!(TimeExtent::default(), ZERO); + } } - } - mod time_extent_new { - use super::*; + mod fn_from_sec { + use super::*; - #[test] - fn it_should_make_time_extent() { - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); - assert_eq!(time_extent.increment, TimeExtentBase::from_millis(2)); - assert_eq!(time_extent.amount, TIME_EXTENT_VAL.amount); + #[test] + fn it_should_make_empty_for_zero() { + assert_eq!(TimeExtent::from_sec(u64::MIN, &TimeExtentMultiplier::MIN), ZERO); + } + #[test] + fn it_should_make_from_seconds() { + assert_eq!( + TimeExtent::from_sec(TIME_EXTENT_VAL.increment.as_secs(), &TIME_EXTENT_VAL.amount), + TIME_EXTENT_VAL + ); + } } - } - mod time_extent_increase { - use std::num::IntErrorKind; + mod fn_new { + use super::*; - use super::*; + #[test] + fn it_should_make_empty_for_zero() { + assert_eq!(TimeExtent::new(&TimeExtentBase::ZERO, &TimeExtentMultiplier::MIN), ZERO); + } - #[test] - fn it_should_return_increased() { - let time_extent_default = TimeExtent::default(); - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); - - let time_extent_default_increase = TimeExtent { - increment: TimeExtentBase::ZERO, - amount: 50, - }; - let time_extent_increase = TimeExtent { - increment: TimeExtentBase::from_millis(2), - amount: TIME_EXTENT_VAL.amount + 50, - }; - let time_extent_from_sec_increase = TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: TIME_EXTENT_VAL.amount + 50, - }; - - assert_eq!(time_extent_default.increase(50).unwrap(), time_extent_default_increase); - assert_eq!(time_extent.increase(50).unwrap(), time_extent_increase); - assert_eq!(TIME_EXTENT_VAL.increase(50).unwrap(), time_extent_from_sec_increase); + #[test] + fn it_should_make_new() { + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount), + TimeExtent { + increment: TimeExtentBase::from_millis(2), + amount: TIME_EXTENT_VAL.amount + } + ); + } } - #[test] - fn it_should_postive_overflow() { - assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); - } - } + mod fn_increase { + use std::num::IntErrorKind; - mod time_extent_decrease { - use std::num::IntErrorKind; + use super::*; - use super::*; + #[test] + fn it_should_not_increase_for_zero() { + assert_eq!(ZERO.increase(0).unwrap(), ZERO); + } - #[test] - fn it_should_return_decreased() { - let time_extent_default = TimeExtent::default(); - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); - - let time_extent_default_decrease = TimeExtent { - increment: TimeExtentBase::ZERO, - amount: 0, - }; - let time_extent_decrease = TimeExtent { - increment: TimeExtentBase::from_millis(2), - amount: TIME_EXTENT_VAL.amount - 50, - }; - let time_extent_from_sec_decrease = TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: TIME_EXTENT_VAL.amount - 50, - }; - - assert_eq!(time_extent_default.decrease(0).unwrap(), time_extent_default_decrease); - assert_eq!(time_extent.decrease(50).unwrap(), time_extent_decrease); - assert_eq!(TIME_EXTENT_VAL.decrease(50).unwrap(), time_extent_from_sec_decrease); - } + #[test] + fn it_should_increase() { + assert_eq!( + TIME_EXTENT_VAL.increase(50).unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount + 50, + } + ); + } - #[test] - fn it_should_return_an_negitive_overflow() { - assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); + #[test] + fn it_should_fail_when_attempting_to_increase_beyond_bounds() { + assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); + } } - } - mod time_extent_total { - use super::*; + mod fn_decrease { + use std::num::IntErrorKind; - #[test] - fn it_should_return_total() { - let time_extent_default = TimeExtent::default(); - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount / 1000)); + use super::*; - assert_eq!(time_extent_default.total().unwrap().unwrap(), TimeExtentProduct::ZERO); - assert_eq!( - time_extent.total().unwrap().unwrap(), - TimeExtentProduct::new(479624, 776000000) - ); - assert_eq!( - TIME_EXTENT_VAL.total().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) - ); - } + #[test] + fn it_should_not_decrease_for_zero() { + assert_eq!(ZERO.decrease(0).unwrap(), ZERO); + } - #[test] - fn it_should_return_none() { - let time_extent_max = TimeExtent { - increment: TimeExtentBase::MAX, - amount: u64::MAX as u64, - }; - assert_eq!(time_extent_max.total(), None); - } + #[test] + fn it_should_decrease() { + assert_eq!( + TIME_EXTENT_VAL.decrease(50).unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount - 50, + } + ); + } - #[test] - fn it_should_return_tryfrom_int_error() { - let time_extent_max = TimeExtent { - increment: TimeExtentBase::MAX, - amount: 2, - }; - assert_eq!( - time_extent_max.total().unwrap().unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); + #[test] + fn it_should_fail_when_attempting_to_decrease_beyond_bounds() { + assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); + } } - } - mod time_extent_total_next { - use super::*; + mod fn_total { + use super::*; - #[test] - fn it_should_get_the_time_extent_total_next() { - let time_extent_default = TimeExtent::default(); - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + #[test] + fn it_should_be_zero_for_zero() { + assert_eq!(ZERO.total().unwrap().unwrap(), TimeExtentProduct::ZERO); + } - assert_eq!( - time_extent_default.total_next().unwrap().unwrap(), - TimeExtentProduct::from_secs(0) - ); - assert_eq!( - time_extent.total_next().unwrap().unwrap(), - TimeExtentProduct::new(479624777, 448000000) - ); - assert_eq!( - TIME_EXTENT_VAL.total_next().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) - ); - } + #[test] + fn it_should_give_a_total() { + assert_eq!( + TIME_EXTENT_VAL.total().unwrap().unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + .total() + .unwrap() + .unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX)) + .total() + .unwrap() + .unwrap(), + TimeExtentProduct::from_secs(u64::MAX) + ); + } - #[test] - fn it_should_return_none() { - let time_extent_max = TimeExtent { - increment: TimeExtentBase::MAX, - amount: u64::MAX as u64, - }; - assert_eq!(time_extent_max.total_next(), None); - } + #[test] + fn it_should_fail_when_too_large() { + assert_eq!(MAX.total(), None); + } - #[test] - fn it_should_return_tryfrom_int_error() { - let time_extent_max = TimeExtent { - increment: TimeExtentBase::MAX, - amount: 2, - }; - assert_eq!( - time_extent_max.total_next().unwrap().unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); + #[test] + fn it_should_fail_when_product_is_too_large() { + let time_extent = TimeExtent { + increment: MAX.increment, + amount: 2, + }; + assert_eq!( + time_extent.total().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } - } - mod make_time_extent_now { - use super::*; + mod fn_total_next { + use super::*; - #[test] - fn it_should_return_a_time_extent() { - assert_eq!( - DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), - TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: 0 - } - ); - - DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + #[test] + fn it_should_be_zero_for_zero() { + assert_eq!(ZERO.total_next().unwrap().unwrap(), TimeExtentProduct::ZERO); + } - assert_eq!( - DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), - TIME_EXTENT_VAL - ); - } + #[test] + fn it_should_give_a_total() { + assert_eq!( + TIME_EXTENT_VAL.total_next().unwrap().unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + ); + + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + .total_next() + .unwrap() + .unwrap(), + TimeExtentProduct::new( + TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount), + TimeExtentBase::from_millis(2).as_nanos().try_into().unwrap() + ) + ); + + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX - 1)) + .total_next() + .unwrap() + .unwrap(), + TimeExtentProduct::from_secs(u64::MAX) + ); + } - #[test] - fn it_should_return_none() { - assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); - } + #[test] + fn it_should_fail_when_too_large() { + assert_eq!(MAX.total_next(), None); + } - #[test] - fn it_should_return_tryfrom_int_error() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) - .unwrap() - .unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); + #[test] + fn it_should_fail_when_product_is_too_large() { + let time_extent = TimeExtent { + increment: MAX.increment, + amount: 2, + }; + assert_eq!( + time_extent.total_next().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } } - mod make_time_extent_now_after { - use std::time::Duration; - + mod make_time_extent { use super::*; - #[test] - fn it_should_return_a_time_extent() { - assert_eq!( - DefaultTimeExtentMaker::now_after(&TIME_EXTENT_VAL.increment, &Duration::from_secs(TIME_EXTENT_VAL.amount * 2)) - .unwrap() - .unwrap(), - TIME_EXTENT_VAL - ); - } + mod fn_now { + use super::*; + + #[test] + fn it_should_give_a_time_extent() { + assert_eq!( + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: 0 + } + ); + + DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + + assert_eq!( + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TIME_EXTENT_VAL + ); + } - #[test] - fn it_should_return_none() { - assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + #[test] + fn it_should_fail_for_zero() { + assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + } - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } - #[test] - fn it_should_return_tryfrom_int_error() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + mod fn_now_after { + use std::time::Duration; + + use super::*; + + #[test] + fn it_should_give_a_time_extent() { + assert_eq!( + DefaultTimeExtentMaker::now_after( + &TIME_EXTENT_VAL.increment, + &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) + ) .unwrap() - .unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); - } - } - mod make_time_extent_now_before { - use std::time::Duration; + .unwrap(), + TIME_EXTENT_VAL + ); + } - use super::*; + #[test] + fn it_should_fail_for_zero() { + assert_eq!( + DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), + None + ); - #[test] - fn it_should_return_a_time_extent() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); + } - assert_eq!( - DefaultTimeExtentMaker::now_before( - &TimeExtentBase::from_secs(u32::MAX as u64), - &Duration::from_secs(u32::MAX as u64) - ) - .unwrap() - .unwrap(), - TimeExtent { - increment: TimeExtentBase::from_secs(u32::MAX as u64), - amount: 4294967296 - } - ); + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } + mod fn_now_before { + use std::time::Duration; - #[test] - fn it_should_return_none() { - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + use super::*; - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), - None - ); - } + #[test] + fn it_should_give_a_time_extent() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - #[test] - fn it_should_return_tryfrom_int_error() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + assert_eq!( + DefaultTimeExtentMaker::now_before( + &TimeExtentBase::from_secs(u32::MAX as u64), + &Duration::from_secs(u32::MAX as u64) + ) .unwrap() - .unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); + .unwrap(), + TimeExtent { + increment: TimeExtentBase::from_secs(u32::MAX as u64), + amount: 4294967296 + } + ); + } + + #[test] + fn it_should_fail_for_zero() { + assert_eq!( + DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), + None + ); + + assert_eq!( + DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), + None + ); + } + + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } } } From 7abe0f5bde1e209553d1a1e2d6fe644cd46a9395 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Sep 2022 16:56:51 +0100 Subject: [PATCH 0137/1003] test: add test for udp::handlers --- src/main.rs | 6 +- src/tracker/statistics.rs | 54 +- src/tracker/torrent.rs | 2 +- src/tracker/tracker.rs | 36 +- src/udp/handlers.rs | 1048 +++++++++++++++++++++++++++++++++++++ 5 files changed, 1119 insertions(+), 27 deletions(-) diff --git a/src/main.rs b/src/main.rs index 01121052a..47896ff43 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use log::info; +use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::tracker::TorrentTracker; use torrust_tracker::{logging, setup, static_time, Configuration}; @@ -19,8 +20,11 @@ async fn main() { } }; + // Initialize stats tracker + let stats_tracker = StatsTracker::new_running_instance(); + // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone()) { + let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker)) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 85a2dbae9..cf801e1df 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,12 +1,12 @@ +use async_trait::async_trait; use std::sync::Arc; - use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::Sender; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum TrackerStatisticsEvent { Tcp4Announce, Tcp4Scrape, @@ -61,6 +61,12 @@ pub struct StatsTracker { } impl StatsTracker { + pub fn new_running_instance() -> Self { + let mut stats_tracker = Self::new(); + stats_tracker.run_worker(); + stats_tracker + } + pub fn new() -> Self { Self { channel_sender: None, @@ -68,18 +74,6 @@ impl StatsTracker { } } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats.read().await - } - - pub async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { - if let Some(tx) = &self.channel_sender { - Some(tx.send(event).await) - } else { - None - } - } - pub fn run_worker(&mut self) { let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); @@ -134,3 +128,35 @@ impl StatsTracker { }); } } + +#[async_trait] +pub trait TrackerStatisticsEventSender: Sync + Send { + async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; +} + +#[async_trait] +impl TrackerStatisticsEventSender for StatsTracker { + async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { + if let Some(tx) = &self.channel_sender { + Some(tx.send(event).await) + } else { + None + } + } +} + +#[async_trait] +pub trait TrackerStatisticsRepository: Sync + Send { + async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics>; +} + +#[async_trait] +impl TrackerStatisticsRepository for StatsTracker { + async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats.read().await + } +} + +pub trait TrackerStatsService: TrackerStatisticsEventSender + TrackerStatisticsRepository {} + +impl TrackerStatsService for StatsTracker {} diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 7404f63af..f12f0a622 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -8,7 +8,7 @@ use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, TimeNow}; use crate::{PeerId, MAX_SCRAPE_TORRENTS}; -#[derive(Serialize, Deserialize, Clone)] +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct TorrentEntry { #[serde(skip)] pub peers: std::collections::BTreeMap, diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 9a242e41a..5499eebeb 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -12,7 +12,7 @@ use crate::databases::database::Database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; -use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; +use crate::statistics::{TrackerStatistics, TrackerStatisticsEvent, TrackerStatsService}; use crate::tracker::key; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; @@ -24,19 +24,13 @@ pub struct TorrentTracker { keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, - stats_tracker: StatsTracker, + stats_tracker: Box, database: Box, } impl TorrentTracker { - pub fn new(config: Arc) -> Result { + pub fn new(config: Arc, stats_tracker: Box) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; - let mut stats_tracker = StatsTracker::new(); - - // starts a thread for updating tracker stats - if config.tracker_usage_statistics { - stats_tracker.run_worker(); - } Ok(TorrentTracker { config: config.clone(), @@ -96,11 +90,20 @@ impl TorrentTracker { // Adding torrents is not relevant to public trackers. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.add_info_hash_to_whitelist(info_hash.clone()).await?; - self.whitelist.write().await.insert(info_hash.clone()); + self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_memory_whitelist(info_hash).await; Ok(()) } + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + // Removing torrents is not relevant to public trackers. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { self.database.remove_info_hash_from_whitelist(info_hash.clone()).await?; @@ -177,6 +180,7 @@ impl TorrentTracker { Ok(()) } + /// Get all torrent peers for a given torrent filtering out the peer with the client address pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; @@ -186,6 +190,16 @@ impl TorrentTracker { } } + /// Get all torrent peers for a given torrent + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), + } + } + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { let mut torrents = self.torrents.write().await; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 907dac0bc..3c4074eae 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -236,3 +236,1051 @@ fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { message: message.into(), }) } + +#[cfg(test)] +mod tests { + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + }; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use async_trait::async_trait; + use tokio::sync::{mpsc::error::SendError, RwLock, RwLockReadGuard}; + + use crate::{ + mode::TrackerMode, + peer::TorrentPeer, + protocol::clock::{DefaultClock, Time}, + statistics::{ + StatsTracker, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatisticsRepository, + TrackerStatsService, + }, + tracker::tracker::TorrentTracker, + Configuration, PeerId, + }; + + fn default_tracker_config() -> Arc { + Arc::new(Configuration::default()) + } + + fn initialized_public_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + } + + fn initialized_private_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + } + + fn initialized_whitelisted_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + } + + fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + fn sample_ipv6_remote_addr() -> SocketAddr { + sample_ipv6_socket_address() + } + + fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn sample_ipv6_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + } + + struct TorrentPeerBuilder { + peer: TorrentPeer, + } + + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = TorrentPeer { + peer_id: PeerId([255u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DefaultClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_peer_addr(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + pub fn into(self) -> TorrentPeer { + self.peer + } + } + + struct TrackerStatsServiceMock { + stats: Arc>, + expected_event: Option, + } + + impl TrackerStatsServiceMock { + fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(TrackerStatistics::new())), + expected_event: None, + } + } + + fn should_throw_event(&mut self, expected_event: TrackerStatisticsEvent) { + self.expected_event = Some(expected_event); + } + } + + #[async_trait] + impl TrackerStatisticsEventSender for TrackerStatsServiceMock { + async fn send_event(&self, _event: TrackerStatisticsEvent) -> Option>> { + if self.expected_event.is_some() { + assert_eq!(_event, *self.expected_event.as_ref().unwrap()); + } + None + } + } + + #[async_trait] + impl TrackerStatisticsRepository for TrackerStatsServiceMock { + async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats.read().await + } + } + + impl TrackerStatsService for TrackerStatsServiceMock {} + + struct TrackerConfigurationBuilder { + configuration: Configuration, + } + + impl TrackerConfigurationBuilder { + pub fn default() -> TrackerConfigurationBuilder { + let default_configuration = Configuration::default(); + TrackerConfigurationBuilder { + configuration: default_configuration, + } + } + + pub fn with_external_ip(mut self, external_ip: &str) -> Self { + self.configuration.external_ip = Some(external_ip.to_owned()); + self + } + + pub fn with_mode(mut self, mode: TrackerMode) -> Self { + self.configuration.mode = mode; + self + } + + pub fn into(self) -> Configuration { + self.configuration + } + } + + mod connect_request { + + use std::sync::Arc; + + use crate::{ + protocol::utils::get_connection_id, + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::{ + handle_connect, + handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}, + }, + }; + use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + + use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + + fn sample_connect_request() -> ConnectRequest { + ConnectRequest { + transaction_id: TransactionId(0i32), + } + } + + #[tokio::test] + async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32), + }; + + let response = handle_connect(sample_ipv4_remote_addr(), &request, initialized_public_tracker()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: get_connection_id(&sample_ipv4_remote_addr()), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32), + }; + + let response = handle_connect(sample_ipv4_remote_addr(), &request, initialized_public_tracker()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: get_connection_id(&sample_ipv4_remote_addr()), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + let client_socket_address = sample_ipv4_socket_address(); + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Connect); + + let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) + .await + .unwrap(); + } + + #[tokio::test] + async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Connect); + + let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) + .await + .unwrap(); + } + } + + mod announce_request { + + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + }; + + use crate::{protocol::utils::get_connection_id, udp::handlers::tests::sample_ipv4_remote_addr}; + + struct AnnounceRequestBuilder { + request: AnnounceRequest, + } + + impl AnnounceRequestBuilder { + pub fn default() -> AnnounceRequestBuilder { + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); + + let default_request = AnnounceRequest { + connection_id: get_connection_id(&sample_ipv4_remote_addr()), + transaction_id: TransactionId(0i32), + info_hash: info_hash_aquatic, + peer_id: AquaticPeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(client_ip), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client_port), + }; + AnnounceRequestBuilder { + request: default_request, + } + } + + pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { + self.request.info_hash = info_hash; + self + } + + pub fn with_peer_id(mut self, peer_id: AquaticPeerId) -> Self { + self.request.peer_id = peer_id; + self + } + + pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { + self.request.ip_address = Some(ip_address); + self + } + + pub fn with_port(mut self, port: u16) -> Self { + self.request.port = Port(port); + self + } + + pub fn into(self) -> AnnounceRequest { + self.request + } + } + + mod using_ipv4 { + + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + }; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, + Response, ResponsePeer, + }; + + use crate::{ + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::{ + handle_announce, + handlers::tests::{ + announce_request::AnnounceRequestBuilder, default_tracker_config, initialized_public_tracker, + sample_ipv4_socket_address, TorrentPeerBuilder, TrackerStatsServiceMock, + }, + }, + PeerId, + }; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let tracker = initialized_public_tracker(); + + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let request = AnnounceRequestBuilder::default().into(); + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let response = handle_announce(remote_addr, &request, initialized_public_tracker()) + .await + .unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32), + leechers: NumberOfPeers(0i32), + seeders: NumberOfPeers(1i32), + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let tracker = initialized_public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = Ipv4Addr::new(126, 0, 0, 1); + let remote_client_port = 8081; + let peer_address = Ipv4Addr::new(126, 0, 0, 2); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); + } + + async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv6 = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6) + .await; + } + + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + let request = AnnounceRequestBuilder::default().into(); + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + response + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { + let tracker = initialized_public_tracker(); + + add_a_torrent_peer_using_ipv6(tracker.clone()).await; + + let response = announce_a_new_peer_using_ipv4(tracker.clone()).await; + + // The response should not contain the peer using IPV6 + let peers: Option>> = match response { + Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv6_peers = peers.is_none(); + assert!(no_ipv6_peers); + } + + #[tokio::test] + async fn should_send_the_upd4_announce_event() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Announce); + + let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + handle_announce( + sample_ipv4_socket_address(), + &AnnounceRequestBuilder::default().into(), + tracker.clone(), + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::{ + udp::{ + handle_announce, + handlers::tests::{ + announce_request::AnnounceRequestBuilder, initialized_public_tracker, TorrentPeerBuilder, + }, + }, + PeerId, + }; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { + let tracker = initialized_public_tracker(); + + let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let external_ip_in_tracker_configuration = + tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(external_ip_in_tracker_configuration), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + } + } + + mod using_ipv6 { + + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + }; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, + Response, ResponsePeer, + }; + + use crate::{ + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::{ + handle_announce, + handlers::tests::{ + announce_request::AnnounceRequestBuilder, default_tracker_config, initialized_public_tracker, + sample_ipv6_remote_addr, TorrentPeerBuilder, TrackerStatsServiceMock, + }, + }, + PeerId, + }; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let tracker = initialized_public_tracker(); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let request = AnnounceRequestBuilder::default().into(); + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + + let response = handle_announce(remote_addr, &request, initialized_public_tracker()) + .await + .unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32), + leechers: NumberOfPeers(0i32), + seeders: NumberOfPeers(1i32), + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let tracker = initialized_public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = "::100".parse().unwrap(); // IPV4 ::0.0.1.0 -> IPV6 = ::100 = ::ffff:0:100 = 0:0:0:0:0:ffff:0:0100 + let remote_client_port = 8081; + let peer_address = "126.0.0.1".parse().unwrap(); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + // When using IPv6 the tracker converts the remote client ip into a IPv4 address + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); + } + + async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv4 = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4) + .await; + } + + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default().into(); + let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + response + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { + let tracker = initialized_public_tracker(); + + add_a_torrent_peer_using_ipv4(tracker.clone()).await; + + let response = announce_a_new_peer_using_ipv6(tracker.clone()).await; + + // The response should not contain the peer using IPV4 + let peers: Option>> = match response { + Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv4_peers = peers.is_none(); + assert!(no_ipv4_peers); + } + + #[tokio::test] + async fn should_send_the_upd6_announce_event() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Announce); + + let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + handle_announce( + sample_ipv6_remote_addr(), + &AnnounceRequestBuilder::default().into(), + tracker.clone(), + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + }; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::{ + statistics::StatsTracker, + tracker::tracker::TorrentTracker, + udp::{ + handle_announce, + handlers::tests::{announce_request::AnnounceRequestBuilder, TrackerConfigurationBuilder}, + }, + }; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + let tracker = + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()); + + let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); + let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + let client_ip_v4 = loopback_ipv4; + let client_ip_v6 = loopback_ipv6; + let client_port = 8080; + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let _external_ip_in_tracker_configuration = + tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + + // There's a special type of IPv6 addresses that provide compatibility with IPv4. + // The last 32 bits of these addresses represent an IPv4, and are represented like this: + // 1111:2222:3333:4444:5555:6666:1.2.3.4 + // + // ::127.0.0.1 is the IPV6 representation for the IPV4 address 127.0.0.1. + assert_eq!(Ok(peers[0].peer_addr.ip()), "::126.0.0.1".parse()); + } + } + } + } + + mod scrape_request { + use std::{net::SocketAddr, sync::Arc}; + + use aquatic_udp_protocol::{ + InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, + TransactionId, + }; + + use crate::{ + protocol::utils::get_connection_id, + tracker::tracker::TorrentTracker, + udp::{ + handle_scrape, + handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}, + }, + PeerId, + }; + + use super::TorrentPeerBuilder; + + fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + } + } + + #[tokio::test] + async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { + let remote_addr = sample_ipv4_remote_addr(); + + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + let request = ScrapeRequest { + connection_id: get_connection_id(&remote_addr), + transaction_id: TransactionId(0i32), + info_hashes, + }; + + let response = handle_scrape(remote_addr, &request, initialized_public_tracker()) + .await + .unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!( + response, + Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats: expected_torrent_stats + }) + ); + } + + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + let peer_id = PeerId([255u8; 20]); + + let peer = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(*remote_addr) + .with_bytes_left(0) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer) + .await; + } + + fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { + let info_hashes = vec![*info_hash]; + + ScrapeRequest { + connection_id: get_connection_id(remote_addr), + transaction_id: TransactionId(0i32), + info_hashes, + } + } + + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap() + } + + fn match_scrape_response(response: Response) -> Option { + match response { + Response::Scrape(scrape_response) => Some(scrape_response), + _ => None, + } + } + + mod with_a_public_tracker { + use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::udp::handlers::tests::{ + initialized_public_tracker, + scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}, + }; + + #[tokio::test] + async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { + let tracker = initialized_public_tracker(); + + let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + }]; + + assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); + } + } + + mod with_a_private_tracker { + + use aquatic_udp_protocol::InfoHash; + + use crate::udp::{ + handle_scrape, + handlers::tests::{ + initialized_private_tracker, sample_ipv4_remote_addr, + scrape_request::{ + add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, + }, + }, + }; + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { + let tracker = initialized_private_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let non_existing_info_hash = InfoHash([0u8; 20]); + + let request = build_scrape_request(&remote_addr, &non_existing_info_hash); + + let torrent_stats = + match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_tracker_has_the_requested_torrent_because_authenticated_requests_are_not_supported_in_udp_tracker( + ) { + let tracker = initialized_private_tracker(); + + let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + mod with_a_whitelisted_tracker { + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::udp::{ + handle_scrape, + handlers::tests::{ + initialized_whitelisted_tracker, sample_ipv4_remote_addr, + scrape_request::{add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics}, + }, + }; + + #[tokio::test] + async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { + let tracker = initialized_whitelisted_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + tracker.add_torrent_to_memory_whitelist(&info_hash.0.into()).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = + match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + }]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { + let tracker = initialized_whitelisted_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = + match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + fn sample_scrape_request(remote_addr: &SocketAddr) -> ScrapeRequest { + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + ScrapeRequest { + connection_id: get_connection_id(remote_addr), + transaction_id: TransactionId(0i32), + info_hashes, + } + } + + mod using_ipv4 { + use std::sync::Arc; + + use crate::{ + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::handlers::{ + handle_scrape, + tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}, + }, + }; + + use super::sample_scrape_request; + + #[tokio::test] + async fn should_send_the_upd4_scrape_event() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); + + let remote_addr = sample_ipv4_remote_addr(); + let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) + .await + .unwrap(); + } + } + + mod using_ipv6 { + use std::sync::Arc; + + use crate::{ + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::handlers::{ + handle_scrape, + tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}, + }, + }; + + use super::sample_scrape_request; + + #[tokio::test] + async fn should_send_the_upd6_scrape_event() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); + + let remote_addr = sample_ipv6_remote_addr(); + let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) + .await + .unwrap(); + } + } + } +} From 6efbb64df1d79b22f6c56ff33215814178b3a89b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 Sep 2022 17:05:29 +0100 Subject: [PATCH 0138/1003] test: add test for tracker::torrent::TorrentPeer --- src/tracker/peer.rs | 254 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 254 insertions(+) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 71c411b9b..712c65017 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -86,3 +86,257 @@ impl TorrentPeer { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } } + +#[cfg(test)] +mod test { + mod torrent_peer { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::{ + peer::TorrentPeer, + protocol::clock::{DefaultClock, Time}, + PeerId, + }; + + #[test] + fn it_should_be_serializable() { + let torrent_peer = TorrentPeer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DefaultClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + + let json_serialized_value = serde_json::to_string(&torrent_peer).unwrap(); + + assert_eq!( + json_serialized_value, + // todo: compare using pretty json format to improve readability + r#"{"peer_id":{"id":"2d71423030303030303030303030303030303030","client":"qBittorrent"},"peer_addr":"126.0.0.1:8080","updated":0,"uploaded":0,"downloaded":0,"left":0,"event":"Started"}"# + ); + } + } + + mod torrent_peer_constructor_from_udp_requests { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + }; + + use crate::protocol::utils::get_connection_id; + + use crate::peer::TorrentPeer; + + // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. + + fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + struct AnnounceRequestBuilder { + request: AnnounceRequest, + } + + impl AnnounceRequestBuilder { + pub fn default() -> AnnounceRequestBuilder { + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); + + let default_request = AnnounceRequest { + connection_id: get_connection_id(&sample_ipv4_remote_addr()), + transaction_id: TransactionId(0i32), + info_hash: info_hash_aquatic, + peer_id: AquaticPeerId(*b"-qB00000000000000000"), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(client_ip), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client_port), + }; + AnnounceRequestBuilder { + request: default_request, + } + } + + pub fn into(self) -> AnnounceRequest { + self.request + } + } + + #[test] + fn it_should_use_the_udp_source_ip_as_the_peer_ip_address_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + let announce_request = AnnounceRequestBuilder::default().into(); + + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); + } + + #[test] + fn it_should_always_use_the_port_in_the_announce_request_for_the_peer_port() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + let announce_request = AnnounceRequestBuilder::default().into(); + + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); + } + + mod when_source_udp_ip_is_a_ipv_4_loopback_ip { + + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + str::FromStr, + }; + + use crate::peer::{test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder, TorrentPeer}; + + #[test] + fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); + } + + #[test] + fn it_should_use_the_external_host_ip_in_tracker_configuration_if_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_defined_even_if_the_external_ip_is_an_ipv6_ip() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); + } + } + + mod when_source_udp_ip_is_a_ipv6_loopback_ip { + + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + str::FromStr, + }; + + use crate::peer::{test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder, TorrentPeer}; + + #[test] + fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); + } + + #[test] + fn it_should_use_the_external_host_ip_in_tracker_configuration_if_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_defined_even_if_the_external_ip_is_an_ipv4_ip() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); + } + } + } + + mod torrent_peer_constructor_from_for_http_requests { + use crate::{http::AnnounceRequest, peer::TorrentPeer, InfoHash, PeerId}; + + use std::net::{IpAddr, Ipv4Addr}; + + fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { + AnnounceRequest { + info_hash: InfoHash([0u8; 20]), + peer_addr, + downloaded: 0u64, + uploaded: 0u64, + peer_id: PeerId(*b"-qB00000000000000000"), + port, + left: 0u64, + event: None, + compact: None, + } + } + + #[test] + fn it_should_use_the_source_ip_in_the_udp_heder_as_the_peer_ip_address_ignoring_the_peer_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + + let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); + + let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); + assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); + } + + #[test] + fn it_should_always_use_the_port_in_the_announce_request_for_the_peer_port_ignoring_the_port_in_the_udp_header() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + let remote_port = 8080; + + let port_in_announce_request = 8081; + let announce_request = + sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); + + let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); + assert_ne!(torrent_peer.peer_addr.port(), remote_port); + } + + // todo: other cases are already covered by UDP cases. + // Code review: + // We should extract the method "peer_addr_from_ip_and_port_and_opt_host_ip" from TorrentPeer. + // It could be another service responsible for assigning the IP to the peer. + // So we can test that behavior independently from where you use it. + // We could also build the peer with the IP in the announce request and let the tracker decide + // wether it has to change it or not depending on tracker configuration. + } +} From 3f617ebf44f82886c9f0cbf9fa691039f4f13dc1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 Sep 2022 19:20:21 +0100 Subject: [PATCH 0139/1003] test: add test for InfoHash --- src/protocol/common.rs | 95 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/src/protocol/common.rs b/src/protocol/common.rs index 92a3ed51c..f1bd6a99c 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -94,6 +94,101 @@ impl<'de> serde::de::Deserialize<'de> for InfoHash { } } +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use serde::{Deserialize, Serialize}; + use serde_json::json; + + use crate::InfoHash; + + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] + struct ContainingInfoHash { + pub info_hash: InfoHash, + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + assert!(info_hash.is_ok()); + } + + #[test] + fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { + let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { + let info_hash = InfoHash::from_str(&"F".repeat(39)); + assert!(info_hash.is_err()); + + let info_hash = InfoHash::from_str(&"F".repeat(41)); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + let output = format!("{}", info_hash); + + assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { + let info_hash: InfoHash = [255u8; 20].as_slice().into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { + let info_hash: InfoHash = [255u8; 20].into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_serialized() { + let s = ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + }; + + let json_serialized_value = serde_json::to_string(&s).unwrap(); + + assert_eq!( + json_serialized_value, + r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# + ); + } + + #[test] + fn an_info_hash_can_be_deserialized() { + let json = json!({ + "info_hash": "ffffffffffffffffffffffffffffffffffffffff", + }); + + let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); + + assert_eq!( + s, + ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + } + ); + } +} + struct InfoHashVisitor; impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { From 8d268b02a4df052440ddf0f4f4b14be282f5f04d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 Sep 2022 18:18:37 +0100 Subject: [PATCH 0140/1003] test: add tests to TorrentEntry --- src/tracker/torrent.rs | 269 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 269 insertions(+) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index f12f0a622..ff257fb90 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -98,3 +98,272 @@ pub enum TorrentError { CouldNotSendResponse, InvalidInfoHash, } + +#[cfg(test)] +mod tests { + use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + ops::Sub, + time::Duration, + }; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::{ + peer::TorrentPeer, + protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}, + torrent::TorrentEntry, + PeerId, + }; + + struct TorrentPeerBuilder { + peer: TorrentPeer, + } + + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = TorrentPeer { + peer_id: PeerId([0u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DefaultClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + pub fn into(self) -> TorrentPeer { + self.peer + } + } + + /// A torrent seeder is a peer with 0 bytes left to download which + /// has not announced it has stopped + fn a_torrent_seeder() -> TorrentPeer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(0) + .with_event_completed() + .into() + } + + /// A torrent leecher is a peer that is not a seeder. + /// Leecher: left > 0 OR event = Stopped + fn a_torrent_leecher() -> TorrentPeer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(1) + .with_event_completed() + .into() + } + + #[test] + fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { + let torrent_entry = TorrentEntry::new(); + + assert_eq!(torrent_entry.get_peers(None).len(), 0); + } + + #[test] + fn a_new_peer_can_be_added_to_a_torrent_entry() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); + assert_eq!(torrent_entry.get_peers(None).len(), 1); + } + + #[test] + fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + assert_eq!(torrent_entry.get_peers(None), vec![&torrent_peer]); + } + + #[test] + fn a_peer_can_be_updated_in_a_torrent_entry() { + let mut torrent_entry = TorrentEntry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); + } + + #[test] + fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { + let mut torrent_entry = TorrentEntry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Stopped; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_peers(None).len(), 0); + } + + #[test] + fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { + let mut torrent_entry = TorrentEntry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert!(stats_have_changed); + } + + #[test] + fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( + ) { + let mut torrent_entry = TorrentEntry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Add a peer that did not exist before in the entry + let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); + + assert!(torrent_stats_have_not_changed); + } + + #[test] + fn a_torrent_entry_could_filter_out_peers_with_a_given_socket_address() { + let mut torrent_entry = TorrentEntry::new(); + let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); + torrent_entry.update_peer(&torrent_peer); // Add peer + + // Get peers excluding the one we have just added + let peers = torrent_entry.get_peers(Some(&peer_socket_address)); + + assert_eq!(peers.len(), 0); + } + + fn peer_id_from_i32(number: i32) -> PeerId { + let peer_id = number.to_le_bytes(); + PeerId([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], + peer_id[3], + ]) + } + + #[test] + fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { + let mut torrent_entry = TorrentEntry::new(); + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let torrent_peer = TorrentPeerBuilder::default() + .with_peer_id(peer_id_from_i32(peer_number)) + .into(); + torrent_entry.update_peer(&torrent_peer); + } + + let peers = torrent_entry.get_peers(None); + + assert_eq!(peers.len(), 74) + } + + #[test] + fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_seeder = a_torrent_seeder(); + + torrent_entry.update_peer(&torrent_seeder); // Add seeder + + assert_eq!(torrent_entry.get_stats().0, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_leecher = a_torrent_leecher(); + + torrent_entry.update_peer(&torrent_leecher); // Add leecher + + assert_eq!(torrent_entry.get_stats().2, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( + ) { + let mut torrent_entry = TorrentEntry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + // Announce "Completed" torrent download event. + torrent_peer.event = AnnounceEvent::Completed; + torrent_entry.update_peer(&torrent_peer); // Update the peer + + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; + + assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); + } + + #[test] + fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Announce "Completed" torrent download event. + // It's the first event announced from this peer. + torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer + + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; + + assert_eq!(number_of_peers_with_completed_torrent, 0); + } + + #[test] + fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { + let mut torrent_entry = TorrentEntry::new(); + + let timeout = 120u32; + + let now = WorkingClock::now(); + StoppedClock::local_set(&now); + + let timeout_seconds_before_now = now.sub(Duration::from_secs(timeout as u64)); + let inactive_peer = TorrentPeerBuilder::default() + .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) + .into(); + torrent_entry.update_peer(&inactive_peer); // Add the peer + + torrent_entry.remove_inactive_peers(timeout); + + assert_eq!(torrent_entry.peers.len(), 0); + } +} From a9df618910492ae86358d94871d406997345781f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 22 Sep 2022 17:06:04 +0100 Subject: [PATCH 0141/1003] fix: vscode setting, remove default formatter Co-authored-by: da2ce7 --- .vscode/settings.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index d87732d93..72e8db7e0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -11,8 +11,7 @@ "typenum" ], "[rust]": { - "editor.defaultFormatter": "matklad.rust-analyzer", "editor.formatOnSave": true }, "rust-analyzer.checkOnSave.command": "clippy", -} \ No newline at end of file +} From 5e2bef3ce9c2c7924de2fcc93d65debf39c784de Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 17 Oct 2022 17:03:06 +0200 Subject: [PATCH 0142/1003] format: re-apply rust-format --- src/tracker/peer.rs | 35 +++---- src/tracker/statistics.rs | 3 +- src/tracker/torrent.rs | 18 ++-- src/udp/handlers.rs | 200 +++++++++++++++----------------------- 4 files changed, 101 insertions(+), 155 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 712c65017..09509e50f 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -95,11 +95,9 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::{ - peer::TorrentPeer, - protocol::clock::{DefaultClock, Time}, - PeerId, - }; + use crate::peer::TorrentPeer; + use crate::protocol::clock::{DefaultClock, Time}; + use crate::PeerId; #[test] fn it_should_be_serializable() { @@ -131,9 +129,8 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::protocol::utils::get_connection_id; - use crate::peer::TorrentPeer; + use crate::protocol::utils::get_connection_id; // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. @@ -201,12 +198,11 @@ mod test { mod when_source_udp_ip_is_a_ipv_4_loopback_ip { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - str::FromStr, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; - use crate::peer::{test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder, TorrentPeer}; + use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::peer::TorrentPeer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { @@ -243,12 +239,11 @@ mod test { mod when_source_udp_ip_is_a_ipv6_loopback_ip { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - str::FromStr, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; - use crate::peer::{test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder, TorrentPeer}; + use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::peer::TorrentPeer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { @@ -285,10 +280,12 @@ mod test { } mod torrent_peer_constructor_from_for_http_requests { - use crate::{http::AnnounceRequest, peer::TorrentPeer, InfoHash, PeerId}; - use std::net::{IpAddr, Ipv4Addr}; + use crate::http::AnnounceRequest; + use crate::peer::TorrentPeer; + use crate::{InfoHash, PeerId}; + fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { AnnounceRequest { info_hash: InfoHash([0u8; 20]), diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index cf801e1df..a2a0de99b 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,5 +1,6 @@ -use async_trait::async_trait; use std::sync::Arc; + +use async_trait::async_trait; use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::Sender; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index ff257fb90..65eaa0a40 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -101,20 +101,16 @@ pub enum TorrentError { #[cfg(test)] mod tests { - use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - ops::Sub, - time::Duration, - }; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::ops::Sub; + use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::{ - peer::TorrentPeer, - protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}, - torrent::TorrentEntry, - PeerId, - }; + use crate::peer::TorrentPeer; + use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}; + use crate::torrent::TorrentEntry; + use crate::PeerId; struct TorrentPeerBuilder { peer: TorrentPeer, diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 3c4074eae..5e286b9f7 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -239,26 +239,23 @@ fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { #[cfg(test)] mod tests { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use async_trait::async_trait; - use tokio::sync::{mpsc::error::SendError, RwLock, RwLockReadGuard}; - - use crate::{ - mode::TrackerMode, - peer::TorrentPeer, - protocol::clock::{DefaultClock, Time}, - statistics::{ - StatsTracker, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatisticsRepository, - TrackerStatsService, - }, - tracker::tracker::TorrentTracker, - Configuration, PeerId, + use tokio::sync::mpsc::error::SendError; + use tokio::sync::{RwLock, RwLockReadGuard}; + + use crate::mode::TrackerMode; + use crate::peer::TorrentPeer; + use crate::protocol::clock::{DefaultClock, Time}; + use crate::statistics::{ + StatsTracker, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatisticsRepository, + TrackerStatsService, }; + use crate::tracker::tracker::TorrentTracker; + use crate::{Configuration, PeerId}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) @@ -401,18 +398,14 @@ mod tests { use std::sync::Arc; - use crate::{ - protocol::utils::get_connection_id, - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::{ - handle_connect, - handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}, - }, - }; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use crate::protocol::utils::get_connection_id; + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_connect; + use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -492,7 +485,8 @@ mod tests { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::{protocol::utils::get_connection_id, udp::handlers::tests::sample_ipv4_remote_addr}; + use crate::protocol::utils::get_connection_id; + use crate::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { request: AnnounceRequest, @@ -550,28 +544,23 @@ mod tests { mod using_ipv4 { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{ AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; - use crate::{ - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::{ - handle_announce, - handlers::tests::{ - announce_request::AnnounceRequestBuilder, default_tracker_config, initialized_public_tracker, - sample_ipv4_socket_address, TorrentPeerBuilder, TrackerStatsServiceMock, - }, - }, - PeerId, + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_announce; + use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::udp::handlers::tests::{ + default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, + TrackerStatsServiceMock, }; + use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -718,15 +707,10 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::{ - udp::{ - handle_announce, - handlers::tests::{ - announce_request::AnnounceRequestBuilder, initialized_public_tracker, TorrentPeerBuilder, - }, - }, - PeerId, - }; + use crate::udp::handle_announce; + use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; + use crate::PeerId; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -764,28 +748,23 @@ mod tests { mod using_ipv6 { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{ AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; - use crate::{ - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::{ - handle_announce, - handlers::tests::{ - announce_request::AnnounceRequestBuilder, default_tracker_config, initialized_public_tracker, - sample_ipv6_remote_addr, TorrentPeerBuilder, TrackerStatsServiceMock, - }, - }, - PeerId, + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_announce; + use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::udp::handlers::tests::{ + default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, + TrackerStatsServiceMock, }; + use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -934,21 +913,16 @@ mod tests { } mod from_a_loopback_ip { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::{ - statistics::StatsTracker, - tracker::tracker::TorrentTracker, - udp::{ - handle_announce, - handlers::tests::{announce_request::AnnounceRequestBuilder, TrackerConfigurationBuilder}, - }, - }; + use crate::statistics::StatsTracker; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_announce; + use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::udp::handlers::tests::TrackerConfigurationBuilder; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -993,24 +967,20 @@ mod tests { } mod scrape_request { - use std::{net::SocketAddr, sync::Arc}; + use std::net::SocketAddr; + use std::sync::Arc; use aquatic_udp_protocol::{ InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; - use crate::{ - protocol::utils::get_connection_id, - tracker::tracker::TorrentTracker, - udp::{ - handle_scrape, - handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}, - }, - PeerId, - }; - use super::TorrentPeerBuilder; + use crate::protocol::utils::get_connection_id; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_scrape; + use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; + use crate::PeerId; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1093,10 +1063,8 @@ mod tests { mod with_a_public_tracker { use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handlers::tests::{ - initialized_public_tracker, - scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}, - }; + use crate::udp::handlers::tests::initialized_public_tracker; + use crate::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { @@ -1118,15 +1086,11 @@ mod tests { use aquatic_udp_protocol::InfoHash; - use crate::udp::{ - handle_scrape, - handlers::tests::{ - initialized_private_tracker, sample_ipv4_remote_addr, - scrape_request::{ - add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, - }, - }, + use crate::udp::handle_scrape; + use crate::udp::handlers::tests::scrape_request::{ + add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; + use crate::udp::handlers::tests::{initialized_private_tracker, sample_ipv4_remote_addr}; #[tokio::test] async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { @@ -1161,13 +1125,11 @@ mod tests { mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::{ - handle_scrape, - handlers::tests::{ - initialized_whitelisted_tracker, sample_ipv4_remote_addr, - scrape_request::{add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics}, - }, + use crate::udp::handle_scrape; + use crate::udp::handlers::tests::scrape_request::{ + add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; + use crate::udp::handlers::tests::{initialized_whitelisted_tracker, sample_ipv4_remote_addr}; #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { @@ -1228,16 +1190,11 @@ mod tests { mod using_ipv4 { use std::sync::Arc; - use crate::{ - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::handlers::{ - handle_scrape, - tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}, - }, - }; - use super::sample_scrape_request; + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handlers::handle_scrape; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1257,16 +1214,11 @@ mod tests { mod using_ipv6 { use std::sync::Arc; - use crate::{ - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::handlers::{ - handle_scrape, - tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}, - }, - }; - use super::sample_scrape_request; + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handlers::handle_scrape; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { From 873293a6f871ba6102d75df2379471b43bcf0096 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 21:12:22 +0200 Subject: [PATCH 0143/1003] crypto: ephemeral instance seeds with keepers * Implement a lazy static that holds ephemeral seeds that are freshly generated on app load. * New `crypto` supporting module that provides a simple interface to accessing the seed. * Provide a 'default seed', that is zeroed-out when testing, and random when not testing. --- src/lib.rs | 11 +++++ src/main.rs | 5 ++- src/protocol/crypto.rs | 98 ++++++++++++++++++++++++++++++++++++++++++ src/protocol/mod.rs | 1 + 4 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 src/protocol/crypto.rs diff --git a/src/lib.rs b/src/lib.rs index 882e126bc..5f003b5fd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,3 +27,14 @@ pub mod static_time { pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); } } + +pub mod ephemeral_instance_keys { + use rand::rngs::ThreadRng; + use rand::Rng; + + pub type Seed = [u8; 32]; + + lazy_static! { + pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); + } +} diff --git a/src/main.rs b/src/main.rs index 47896ff43..bac7854bb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use log::info; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::tracker::TorrentTracker; -use torrust_tracker::{logging, setup, static_time, Configuration}; +use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; #[tokio::main] async fn main() { @@ -12,6 +12,9 @@ async fn main() { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + // Initialize Torrust config let config = match Configuration::load_from_file(CONFIG_PATH) { Ok(config) => Arc::new(config), diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs new file mode 100644 index 000000000..18cfaf5e6 --- /dev/null +++ b/src/protocol/crypto.rs @@ -0,0 +1,98 @@ +pub mod keys { + + pub mod seeds { + use self::detail::DEFAULT_SEED; + use crate::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + + pub trait SeedKeeper { + type Seed: Sized + Default + AsMut<[u8]>; + fn get_seed() -> &'static Self::Seed; + } + + pub struct InstanceSeed; + pub struct DefaultSeed; + + impl SeedKeeper for InstanceSeed { + type Seed = Seed; + + fn get_seed() -> &'static Self::Seed { + &RANDOM_SEED + } + } + + impl SeedKeeper for DefaultSeed { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &DEFAULT_SEED + } + } + + #[cfg(test)] + mod tests { + use super::detail::ZEROED_TEST_SEED; + use super::{DefaultSeed, InstanceSeed, SeedKeeper}; + use crate::ephemeral_instance_keys::Seed; + + pub struct ZeroedTestSeed; + + impl SeedKeeper for ZeroedTestSeed { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &ZEROED_TEST_SEED + } + } + + #[test] + fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { + assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()) + } + + #[test] + fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { + assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()) + } + } + + mod detail { + use crate::ephemeral_instance_keys::Seed; + + #[allow(dead_code)] + pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; + + #[cfg(test)] + pub use ZEROED_TEST_SEED as DEFAULT_SEED; + + #[cfg(not(test))] + pub use crate::ephemeral_instance_keys::RANDOM_SEED as DEFAULT_SEED; + + #[cfg(test)] + mod tests { + use std::convert::TryInto; + + use crate::ephemeral_instance_keys::RANDOM_SEED; + use crate::protocol::crypto::keys::seeds::detail::ZEROED_TEST_SEED; + use crate::protocol::crypto::keys::seeds::DEFAULT_SEED; + + #[test] + fn it_should_have_a_zero_test_seed() { + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]) + } + + #[test] + fn it_should_default_to_zeroed_seed_when_testing() { + assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED) + } + + #[test] + fn it_should_have_a_large_random_seed() { + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u64::MAX as u128); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u64::MAX as u128); + } + } + } + } +} diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index fcb28b3b2..85e4f90ad 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -1,3 +1,4 @@ pub mod clock; pub mod common; +pub mod crypto; pub mod utils; From f2eaf9584cf8d396660f325594b835cbd70dedfa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Oct 2022 13:51:41 +0100 Subject: [PATCH 0144/1003] feat: use nightly toolchain for formatting @da2ce7 started using the nightly build for rust formatting here: https://github.com/torrust/torrust-tracker/pull/99 And the job 'format' in the workflow did not work, showing these warnings: ``` Warning: can't set `imports_granularity = Module`, unstable features are only available in nightly channel. Warning: can't set `group_imports = StdExternalCrate`, unstable features are only available in nightly channel. Warning: can't set `imports_granularity = Module`, unstable features are only available in nightly channel. Warning: can't set `group_imports = StdExternalCrate`, unstable features are only available in nightly channel. ``` So we needed the nightly channel anyway. --- .github/workflows/test_build_release.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 87f6a9488..1266ae51f 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -13,11 +13,15 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: stable - components: rustfmt + toolchain: nightly + override: true + components: rustfmt, clippy - uses: Swatinem/rust-cache@v1 - name: Check Rust Formatting - run: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --check test: needs: format From c8fa30a509d8fadbe0f7271b4254e064729fafd9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 18 Oct 2022 18:31:28 +0200 Subject: [PATCH 0145/1003] fix: prepare tests for connection cookie Co-authored-by: Jose Celano --- src/protocol/clock/time_extent.rs | 1 + src/tracker/peer.rs | 1 - src/udp/handlers.rs | 76 +++++++++++++++++++++++-------- 3 files changed, 57 insertions(+), 21 deletions(-) diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index d0713645b..3fa60de82 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -133,6 +133,7 @@ where } } } + fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 09509e50f..2d0985fc9 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -131,7 +131,6 @@ mod test { use crate::peer::TorrentPeer; use crate::protocol::utils::get_connection_id; - // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. fn sample_ipv4_remote_addr() -> SocketAddr { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 5e286b9f7..f22436bc1 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -482,7 +482,8 @@ mod tests { use std::net::Ipv4Addr; use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, + TransactionId, }; use crate::protocol::utils::get_connection_id; @@ -517,6 +518,11 @@ mod tests { } } + pub fn with_connection_id(mut self, connection_id: ConnectionId) -> Self { + self.request.connection_id = connection_id; + self + } + pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { self.request.info_hash = info_hash; self @@ -552,6 +558,7 @@ mod tests { Response, ResponsePeer, }; + use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::handle_announce; @@ -571,14 +578,16 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -593,9 +602,12 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let request = AnnounceRequestBuilder::default().into(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); + let response = handle_announce(remote_addr, &request, initialized_public_tracker()) .await .unwrap(); @@ -629,14 +641,16 @@ mod tests { let remote_client_port = 8081; let peer_address = Ipv4Addr::new(126, 0, 0, 2); + let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -663,8 +677,10 @@ mod tests { } async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { - let request = AnnounceRequestBuilder::default().into(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); response } @@ -707,6 +723,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::protocol::utils::get_connection_id; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; @@ -721,14 +738,16 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -756,6 +775,7 @@ mod tests { Response, ResponsePeer, }; + use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::handle_announce; @@ -776,14 +796,16 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -798,11 +820,15 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let request = AnnounceRequestBuilder::default().into(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); + let response = handle_announce(remote_addr, &request, initialized_public_tracker()) .await .unwrap(); @@ -836,14 +862,16 @@ mod tests { let remote_client_port = 8081; let peer_address = "126.0.0.1".parse().unwrap(); + let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -874,7 +902,9 @@ mod tests { let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - let request = AnnounceRequestBuilder::default().into(); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); response } @@ -903,13 +933,16 @@ mod tests { tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Announce); let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); - handle_announce( - sample_ipv6_remote_addr(), - &AnnounceRequestBuilder::default().into(), - tracker.clone(), - ) - .await - .unwrap(); + + let remote_addr = sample_ipv6_remote_addr(); + + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); + + handle_announce(remote_addr, &announce_request, tracker.clone()) + .await + .unwrap(); } mod from_a_loopback_ip { @@ -918,6 +951,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::protocol::utils::get_connection_id; use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; use crate::udp::handle_announce; @@ -940,14 +974,16 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -1036,7 +1072,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: get_connection_id(remote_addr), + connection_id: get_connection_id(&remote_addr), transaction_id: TransactionId(0i32), info_hashes, } @@ -1181,7 +1217,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: get_connection_id(remote_addr), + connection_id: get_connection_id(&remote_addr), transaction_id: TransactionId(0i32), info_hashes, } From 43685b69004583f54e16e834b73122a7603483cb Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 18 Oct 2022 18:37:19 +0200 Subject: [PATCH 0146/1003] dev: connection cookie implmentation --- src/udp/connection_cookie.rs | 178 +++++++++++++++++++++++++++++++++++ src/udp/mod.rs | 1 + 2 files changed, 179 insertions(+) create mode 100644 src/udp/connection_cookie.rs diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs new file mode 100644 index 000000000..712c2ce61 --- /dev/null +++ b/src/udp/connection_cookie.rs @@ -0,0 +1,178 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::ConnectionId; + +use crate::protocol::clock::time_extent::{Extent, TimeExtent}; +use crate::udp::ServerError; + +pub type Cookie = [u8; 8]; + +pub type SinceUnixEpochTimeExtent = TimeExtent; + +pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); + +pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { + connection_id.0.to_le_bytes() +} + +pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { + ConnectionId(i64::from_le_bytes(*connection_cookie)) +} + +pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { + let time_extent = cookie_builder::get_last_time_extent(); + + let cookie = cookie_builder::build(remote_address, &time_extent); + //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); + cookie +} + +pub fn check_connection_cookie( + remote_address: &SocketAddr, + connection_cookie: &Cookie, +) -> Result { + // we loop backwards testing each time_extent until we find one that matches. + // (or the lifetime of time_extents is exhausted) + for offset in 0..=COOKIE_LIFETIME.amount { + let checking_time_extent = cookie_builder::get_last_time_extent().decrease(offset).unwrap(); + + let checking_cookie = cookie_builder::build(remote_address, &checking_time_extent); + //println!("remote_address: {remote_address:?}, time_extent: {checking_time_extent:?}, cookie: {checking_cookie:?}"); + + if *connection_cookie == checking_cookie { + return Ok(checking_time_extent); + } + } + Err(ServerError::InvalidConnectionId) +} + +mod cookie_builder { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + use std::net::SocketAddr; + + use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; + use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; + use crate::protocol::crypto::keys::seeds::{DefaultSeed, SeedKeeper}; + + pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { + DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) + .unwrap() + .unwrap() + .increase(COOKIE_LIFETIME.amount) + .unwrap() + } + + pub(super) fn build(remote_address: &SocketAddr, time_extent: &TimeExtent) -> Cookie { + let seed = DefaultSeed::get_seed(); + + let mut hasher = DefaultHasher::new(); + + remote_address.hash(&mut hasher); + time_extent.hash(&mut hasher); + seed.hash(&mut hasher); + + hasher.finish().to_le_bytes() + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use super::cookie_builder::get_last_time_extent; + use crate::protocol::clock::time_extent::Extent; + use crate::protocol::clock::{StoppedClock, StoppedTime}; + use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; + + fn make_test_socket_addr() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn make_test_cookie(remote_address: Option<&SocketAddr>) -> Cookie { + make_connection_cookie(remote_address.unwrap_or(&make_test_socket_addr())) + } + + #[test] + fn it_should_make_a_connection_cookie() { + // remote_address: 127.0.0.1:8080, time_extent: 60, + // seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + const ID_COOKIE: Cookie = [45, 59, 50, 101, 97, 203, 48, 19]; + + let test_cookie = make_test_cookie(None); + //println!("{test_cookie:?}"); + + assert_eq!(test_cookie, ID_COOKIE) + } + + #[test] + fn it_should_make_different_connection_cookie_with_different_remote_addresses() { + let test_remote_address_1 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 1); + let test_remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 2); + let test_remote_address_3 = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 1); + + assert_ne!( + make_test_cookie(Some(&test_remote_address_1)), + make_test_cookie(Some(&test_remote_address_2)) + ); + + assert_ne!( + make_test_cookie(Some(&test_remote_address_1)), + make_test_cookie(Some(&test_remote_address_3)) + ); + + assert_ne!( + make_test_cookie(Some(&test_remote_address_2)), + make_test_cookie(Some(&test_remote_address_3)) + ) + } + + #[test] + fn it_should_make_different_cookies_for_the_next_time_extent() { + let cookie_now = make_test_cookie(None); + + StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + + let cookie_next = make_test_cookie(None); + + assert_ne!(cookie_now, cookie_next) + } + + #[test] + fn it_should_be_valid_for_this_time_extent() { + let cookie_now = make_test_cookie(None); + + check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + } + + #[test] + fn it_should_be_valid_for_the_next_time_extent() { + let cookie_now = make_test_cookie(None); + + StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + + check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + } + + #[test] + fn it_cookies_should_be_valid_for_the_last_time_extent() { + let cookie_now = make_test_cookie(None); + + StoppedClock::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + + check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + } + + #[test] + #[should_panic] + fn it_cookies_should_be_not_valid_after_their_last_time_extent() { + let cookie_now = make_test_cookie(None); + + let last_time_extent = get_last_time_extent().increase(COOKIE_LIFETIME.amount).unwrap(); + + StoppedClock::local_set(&last_time_extent.total_next().unwrap().unwrap()); + + check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + } +} diff --git a/src/udp/mod.rs b/src/udp/mod.rs index ae87973f1..4c98875c5 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -3,6 +3,7 @@ pub use self::handlers::*; pub use self::request::*; pub use self::server::*; +pub mod connection_cookie; pub mod errors; pub mod handlers; pub mod request; From 4949424b9158706205753667a5ed6c28ab5209c9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 18 Oct 2022 18:40:18 +0200 Subject: [PATCH 0147/1003] dev: use the connection cookie implementation --- src/protocol/utils.rs | 14 +--------- src/tracker/peer.rs | 4 +-- src/udp/handlers.rs | 60 ++++++++++++++++++++++++------------------- 3 files changed, 37 insertions(+), 41 deletions(-) diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 48fe4eb17..ac20aa41e 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,16 +1,4 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::ConnectionId; - -use super::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; - -pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { - ConnectionId(((current_time() / 3600) | ((remote_address.port() as u64) << 36)) as i64) -} - -pub fn current_time() -> u64 { - DefaultClock::now().as_secs() -} +use super::clock::DurationSinceUnixEpoch; pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { ser.serialize_u64(unix_time_value.as_millis() as u64) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 2d0985fc9..7ac35179a 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -130,7 +130,7 @@ mod test { }; use crate::peer::TorrentPeer; - use crate::protocol::utils::get_connection_id; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. fn sample_ipv4_remote_addr() -> SocketAddr { @@ -152,7 +152,7 @@ mod test { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: get_connection_id(&sample_ipv4_remote_addr()), + connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId(*b"-qB00000000000000000"), diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index f22436bc1..d46cd9231 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -6,8 +6,8 @@ use aquatic_udp_protocol::{ NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; +use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::peer::TorrentPeer; -use crate::protocol::utils::get_connection_id; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; use crate::tracker::tracker::TorrentTracker; @@ -69,7 +69,8 @@ pub async fn handle_connect( request: &ConnectRequest, tracker: Arc, ) -> Result { - let connection_id = get_connection_id(&remote_addr); + let connection_cookie = make_connection_cookie(&remote_addr); + let connection_id = into_connection_id(&connection_cookie); let response = Response::from(ConnectResponse { transaction_id: request.transaction_id, @@ -94,6 +95,13 @@ pub async fn handle_announce( announce_request: &AnnounceRequest, tracker: Arc, ) -> Result { + match check_connection_cookie(&remote_addr, &from_connection_id(&announce_request.connection_id)) { + Ok(_) => {} + Err(e) => { + return Err(e); + } + } + let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; @@ -401,9 +409,9 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; - use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -426,7 +434,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: get_connection_id(&sample_ipv4_remote_addr()), + connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -445,7 +453,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: get_connection_id(&sample_ipv4_remote_addr()), + connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -486,7 +494,7 @@ mod tests { TransactionId, }; - use crate::protocol::utils::get_connection_id; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { @@ -500,7 +508,7 @@ mod tests { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: get_connection_id(&sample_ipv4_remote_addr()), + connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId([255u8; 20]), @@ -558,9 +566,9 @@ mod tests { Response, ResponsePeer, }; - use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ @@ -581,7 +589,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -605,7 +613,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -644,7 +652,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -679,7 +687,7 @@ mod tests { async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); response @@ -723,7 +731,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::protocol::utils::get_connection_id; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; @@ -741,7 +749,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -775,9 +783,9 @@ mod tests { Response, ResponsePeer, }; - use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ @@ -799,7 +807,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -826,7 +834,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -865,7 +873,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -903,7 +911,7 @@ mod tests { let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); response @@ -937,7 +945,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); handle_announce(remote_addr, &announce_request, tracker.clone()) @@ -951,9 +959,9 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::protocol::utils::get_connection_id; use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::TrackerConfigurationBuilder; @@ -977,7 +985,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -1012,8 +1020,8 @@ mod tests { }; use super::TorrentPeerBuilder; - use crate::protocol::utils::get_connection_id; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; use crate::PeerId; @@ -1034,7 +1042,7 @@ mod tests { let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: get_connection_id(&remote_addr), + connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, }; @@ -1072,7 +1080,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: get_connection_id(&remote_addr), + connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } @@ -1217,7 +1225,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: get_connection_id(&remote_addr), + connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } From 2911f3d05587bc0501979e76347c73279456b0fe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 19 Oct 2022 14:08:05 +0200 Subject: [PATCH 0148/1003] tests: improve connection cookie tests Co-authored-by: Jose Celano --- src/udp/connection_cookie.rs | 161 +++++++++++++++++++++++++---------- 1 file changed, 114 insertions(+), 47 deletions(-) diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 712c2ce61..a17431b9c 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -80,99 +80,166 @@ mod cookie_builder { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use super::cookie_builder::get_last_time_extent; - use crate::protocol::clock::time_extent::Extent; + use super::cookie_builder::{self}; + use crate::protocol::clock::time_extent::{self, Extent}; use crate::protocol::clock::{StoppedClock, StoppedTime}; use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; - fn make_test_socket_addr() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + // #![feature(const_socketaddr)] + // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + #[test] + fn it_should_make_a_connection_cookie() { + let cookie = make_connection_cookie(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); + + // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. + const ID_COOKIE: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; + + assert_eq!(cookie, ID_COOKIE) } - fn make_test_cookie(remote_address: Option<&SocketAddr>) -> Cookie { - make_connection_cookie(remote_address.unwrap_or(&make_test_socket_addr())) + #[test] + fn it_should_make_the_same_connection_cookie_for_the_same_input_data() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + + assert_eq!(cookie, cookie_2) } #[test] - fn it_should_make_a_connection_cookie() { - // remote_address: 127.0.0.1:8080, time_extent: 60, - // seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + fn it_should_make_the_different_connection_cookie_for_different_ip() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::BROADCAST), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 255.255.255.255:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [217, 87, 239, 178, 182, 126, 66, 166] + + assert_ne!(cookie, cookie_2) + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_ip_version() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); - const ID_COOKIE: Cookie = [45, 59, 50, 101, 97, 203, 48, 19]; + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); - let test_cookie = make_test_cookie(None); - //println!("{test_cookie:?}"); + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: [::]:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [99, 119, 230, 177, 20, 220, 163, 187] - assert_eq!(test_cookie, ID_COOKIE) + assert_ne!(cookie, cookie_2) } #[test] - fn it_should_make_different_connection_cookie_with_different_remote_addresses() { - let test_remote_address_1 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 1); - let test_remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 2); - let test_remote_address_3 = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 1); - - assert_ne!( - make_test_cookie(Some(&test_remote_address_1)), - make_test_cookie(Some(&test_remote_address_2)) - ); - - assert_ne!( - make_test_cookie(Some(&test_remote_address_1)), - make_test_cookie(Some(&test_remote_address_3)) - ); - - assert_ne!( - make_test_cookie(Some(&test_remote_address_2)), - make_test_cookie(Some(&test_remote_address_3)) - ) + fn it_should_make_the_different_connection_cookie_for_different_socket() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 1); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 0.0.0.0:1, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [38, 8, 0, 102, 92, 170, 220, 11] + + assert_ne!(cookie, cookie_2) + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_time_extents() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + let time_extent_max = time_extent::MAX; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address, &time_extent_max); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address:?}, time_extent: {time_extent_max:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 18446744073709551615.999999999s, amount: 18446744073709551615 }, cookie: [87, 111, 109, 125, 182, 206, 3, 201] + + assert_ne!(cookie, cookie_2) } #[test] fn it_should_make_different_cookies_for_the_next_time_extent() { - let cookie_now = make_test_cookie(None); + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make_connection_cookie(&remote_address); StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); - let cookie_next = make_test_cookie(None); + let cookie_next = make_connection_cookie(&remote_address); - assert_ne!(cookie_now, cookie_next) + assert_ne!(cookie, cookie_next) } #[test] fn it_should_be_valid_for_this_time_extent() { - let cookie_now = make_test_cookie(None); + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make_connection_cookie(&remote_address); - check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + check_connection_cookie(&remote_address, &cookie).unwrap(); } #[test] fn it_should_be_valid_for_the_next_time_extent() { - let cookie_now = make_test_cookie(None); + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make_connection_cookie(&remote_address); StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); - check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + check_connection_cookie(&remote_address, &cookie).unwrap(); } #[test] - fn it_cookies_should_be_valid_for_the_last_time_extent() { - let cookie_now = make_test_cookie(None); + fn it_should_be_valid_for_the_last_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make_connection_cookie(&remote_address); StoppedClock::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); - check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + check_connection_cookie(&remote_address, &cookie).unwrap(); } #[test] #[should_panic] - fn it_cookies_should_be_not_valid_after_their_last_time_extent() { - let cookie_now = make_test_cookie(None); + fn it_should_be_not_valid_after_their_last_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let last_time_extent = get_last_time_extent().increase(COOKIE_LIFETIME.amount).unwrap(); + let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_set(&last_time_extent.total_next().unwrap().unwrap()); + StoppedClock::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); - check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + check_connection_cookie(&remote_address, &cookie).unwrap(); } } From adee3b5267386b642b7b09519eebf049a44f5eea Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 11:03:05 +0100 Subject: [PATCH 0149/1003] fix: [#97] make tracker statistics optional again Commit 7abe0f5bde1e209553d1a1e2d6fe644cd46a9395 introduced an unwanted change. Thread for statistics is always created regardless configuration. This commit reverts that change. The config option: config.tracker_usage_statistics defines wether the statistics should be enabled or not. --- src/main.rs | 6 +++++- src/tracker/statistics.rs | 6 ------ src/udp/handlers.rs | 18 +++++++++++------- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/main.rs b/src/main.rs index bac7854bb..ffe080f9a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,7 +24,11 @@ async fn main() { }; // Initialize stats tracker - let stats_tracker = StatsTracker::new_running_instance(); + let mut stats_tracker = StatsTracker::new(); + + if config.tracker_usage_statistics { + stats_tracker.run_worker(); + } // Initialize Torrust tracker let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker)) { diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index a2a0de99b..2a216770e 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,12 +62,6 @@ pub struct StatsTracker { } impl StatsTracker { - pub fn new_running_instance() -> Self { - let mut stats_tracker = Self::new(); - stats_tracker.run_worker(); - stats_tracker - } - pub fn new() -> Self { Self { channel_sender: None, diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index d46cd9231..8117b6c89 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -271,17 +271,23 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) } fn initialized_private_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) + } + + fn initialized_stats_tracker() -> StatsTracker { + let mut stats_tracker = StatsTracker::new(); + stats_tracker.run_worker(); + stats_tracker } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -959,18 +965,16 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::TrackerConfigurationBuilder; + use crate::udp::handlers::tests::{initialized_stats_tracker, TrackerConfigurationBuilder}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let tracker = - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()); + let tracker = Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); From e12d8e6ec142883ae5734113920f7c8cdb80c30a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 18:29:13 +0100 Subject: [PATCH 0150/1003] test: [#97] add test for optional statistics Tracker statistics can be enabled or disabled using the configuration option `tracker_usage_statistics`. This commit adds tests for that behavior. --- src/main.rs | 6 +---- src/tracker/statistics.rs | 51 +++++++++++++++++++++++++++++++++++++++ src/udp/handlers.rs | 18 ++++++-------- 3 files changed, 59 insertions(+), 16 deletions(-) diff --git a/src/main.rs b/src/main.rs index ffe080f9a..dcb92acb8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,11 +24,7 @@ async fn main() { }; // Initialize stats tracker - let mut stats_tracker = StatsTracker::new(); - - if config.tracker_usage_statistics { - stats_tracker.run_worker(); - } + let stats_tracker = StatsTracker::new_instance(config.tracker_usage_statistics); // Initialize Torrust tracker let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker)) { diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 2a216770e..fb4e4c0fe 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,6 +62,27 @@ pub struct StatsTracker { } impl StatsTracker { + pub fn new_active_instance() -> Self { + Self::new_instance(true) + } + + pub fn new_inactive_instance() -> Self { + Self::new_instance(false) + } + + pub fn new_instance(active: bool) -> Self { + let mut stats_tracker = Self { + channel_sender: None, + stats: Arc::new(RwLock::new(TrackerStatistics::new())), + }; + + if active { + stats_tracker.run_worker(); + } + + stats_tracker + } + pub fn new() -> Self { Self { channel_sender: None, @@ -155,3 +176,33 @@ impl TrackerStatisticsRepository for StatsTracker { pub trait TrackerStatsService: TrackerStatisticsEventSender + TrackerStatisticsRepository {} impl TrackerStatsService for StatsTracker {} + +#[cfg(test)] +mod test { + + mod event_sender { + use crate::statistics::{StatsTracker, TrackerStatisticsEvent, TrackerStatisticsEventSender}; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let inactive_stats_tracker = StatsTracker::new_instance(tracker_usage_statistics); + + let result = inactive_stats_tracker.send_event(TrackerStatisticsEvent::Tcp4Announce).await; + + assert!(result.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let active_stats_tracker = StatsTracker::new_instance(tracker_usage_statistics); + + let result = active_stats_tracker.send_event(TrackerStatisticsEvent::Tcp4Announce).await; + + assert!(result.is_some()); + } + } +} diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 8117b6c89..845b860e9 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -271,23 +271,17 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) } fn initialized_private_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) - } - - fn initialized_stats_tracker() -> StatsTracker { - let mut stats_tracker = StatsTracker::new(); - stats_tracker.run_worker(); - stats_tracker + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -965,16 +959,18 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{initialized_stats_tracker, TrackerConfigurationBuilder}; + use crate::udp::handlers::tests::TrackerConfigurationBuilder; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let tracker = Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()); + let tracker = + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); From a27adf23d997663e6bf2f40c3021749faac0b7c1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Oct 2022 18:27:18 +0100 Subject: [PATCH 0151/1003] test: integration tests for udp tracker --- tests/udp.rs | 293 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 tests/udp.rs diff --git a/tests/udp.rs b/tests/udp.rs new file mode 100644 index 000000000..be4e42e41 --- /dev/null +++ b/tests/udp.rs @@ -0,0 +1,293 @@ +/// Integration tests for UDP tracker server +/// +/// cargo test udp_tracker_server -- --nocapture + +#[macro_use] +extern crate lazy_static; + +extern crate rand; + +mod udp_tracker_server { + + use core::panic; + use std::io::Cursor; + use std::net::IpAddr; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Arc; + use std::sync::RwLock; + + use rand::{thread_rng, Rng}; + + use tokio::net::UdpSocket; + use tokio::task::JoinHandle; + + use torrust_tracker::jobs::udp_tracker; + use torrust_tracker::tracker::statistics::StatsTracker; + use torrust_tracker::tracker::tracker::TorrentTracker; + use torrust_tracker::udp::MAX_PACKET_SIZE; + use torrust_tracker::{logging, static_time, Configuration}; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, + Port, Request, Response, TransactionId, + }; + + fn tracker_configuration() -> Arc { + let mut config = Configuration::default(); + //config.log_level = Some("debug".to_owned()); // Uncomment to enable logging + config.external_ip = Some("127.0.0.1".to_owned()); + config.udp_trackers[0].bind_address = "127.0.0.1:6969".to_owned(); + Arc::new(config) + } + + fn tracker_bind_address() -> String { + tracker_configuration().udp_trackers[0].bind_address.clone() + } + + pub struct UdpServer { + pub started: AtomicBool, + pub job: Option>, + } + + impl UdpServer { + pub fn new() -> Self { + Self { + started: AtomicBool::new(false), + job: None, + } + } + + pub async fn start(&mut self, configuration: Arc) { + if !self.started.load(Ordering::Relaxed) { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize stats tracker + let stats_tracker = StatsTracker::new_running_instance(); + + // Initialize Torrust tracker + let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker)) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup_logging(&configuration); + + // Start the UDP tracker job + self.job = Some(udp_tracker::start_job(&configuration.udp_trackers[0], tracker.clone())); + + self.started.store(true, Ordering::Relaxed); + } + } + } + + lazy_static! { + static ref SERVER: RwLock = RwLock::new(UdpServer::new()); + } + + async fn start_udp_server(configuration: Arc) { + SERVER.write().unwrap().start(configuration.clone()).await; + } + + struct UdpClient { + socket: Arc, + } + + impl UdpClient { + async fn bind(local_address: &str) -> Self { + let socket = UdpSocket::bind(local_address).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } + + async fn connect(&self, remote_address: &str) { + self.socket.connect(remote_address).await.unwrap() + } + + async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } + } + + /// Creates a new UdpClient connected to a Udp server + async fn new_connected_udp_client(remote_address: &str) -> UdpClient { + let client = UdpClient::bind(&source_address(ephemeral_random_port())).await; + client.connect(remote_address).await; + client + } + + struct UdpTrackerClient { + pub udp_client: UdpClient, + } + + impl UdpTrackerClient { + async fn send(&self, request: Request) -> usize { + // Write request into a buffer + let request_buffer = vec![0u8; MAX_PACKET_SIZE]; + let mut cursor = Cursor::new(request_buffer); + + let request_data = match request.write(&mut cursor) { + Ok(_) => { + let position = cursor.position() as usize; + let inner_request_buffer = cursor.get_ref(); + // Return slice which contains written request data + &inner_request_buffer[..position] + } + Err(_) => panic!("could not write request to bytes."), + }; + + self.udp_client.send(&request_data).await + } + + async fn receive(&self) -> Response { + let mut response_buffer = [0u8; MAX_PACKET_SIZE]; + + let payload_size = self.udp_client.receive(&mut response_buffer).await; + + Response::from_bytes(&response_buffer[..payload_size], true).unwrap() + } + } + + /// Creates a new UdpTrackerClient connected to a Udp Tracker server + async fn new_connected_udp_tracker_client(remote_address: &str) -> UdpTrackerClient { + let udp_client = new_connected_udp_client(remote_address).await; + UdpTrackerClient { udp_client } + } + + fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] + } + + fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] + } + + /// Generates a random ephemeral port for a client source address + fn ephemeral_random_port() -> u16 { + // todo: this may produce random test failures because two test can try to bind the same port. + // We could either use the same client for all tests (slower) or + // create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) + } + + /// Generates the source address for the UDP client + fn source_address(port: u16) -> String { + format!("127.0.0.1:{}", port) + } + + fn is_error_response(response: &Response, error_message: &str) -> bool { + match response { + Response::Error(error_response) => return error_response.message.starts_with(error_message), + _ => return false, + }; + } + + fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { + match response { + Response::Connect(connect_response) => return connect_response.transaction_id == transaction_id, + _ => return false, + }; + } + + fn is_ipv4_announce_response(response: &Response) -> bool { + match response { + Response::AnnounceIpv4(_) => return true, + _ => return false, + }; + } + + // #[tokio::test] + // async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + // start_udp_server(tracker_configuration().clone()).await; + + // let client = new_connected_udp_client(&tracker_bind_address()).await; + + // client.send(&empty_udp_request()).await; + + // let mut buffer = empty_buffer(); + // client.receive(&mut buffer).await; + // let response = Response::from_bytes(&buffer, true).unwrap(); + + // assert!(is_error_response(&response, "bad request")); + // } + + // #[tokio::test] + // async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { + // start_udp_server(tracker_configuration().clone()).await; + + // let client = new_connected_udp_tracker_client(&tracker_bind_address()).await; + + // let connect_request = ConnectRequest { + // transaction_id: TransactionId(123), + // }; + + // client.send(connect_request.into()).await; + + // let response = client.receive().await; + + // assert!(is_connect_response(&response, TransactionId(123))); + // } + + #[tokio::test] + async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { + start_udp_server(tracker_configuration().clone()).await; + + let client = new_connected_udp_tracker_client(&tracker_bind_address()).await; + + // todo: extract client.connect() -> ConnectionId + + // Get connection id before sending the announce request + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + let connection_id = match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + }; + + // Send announce request + + let client_ip = match client.udp_client.socket.local_addr().unwrap().ip() { + IpAddr::V4(ip4) => ip4, + _ => panic!("error: IPV6 addresses cannot be used for the client ip in the announce request. Try to use IPV4."), + }; + + let announce_request = AnnounceRequest { + connection_id: ConnectionId(8724592475294857), + transaction_id: TransactionId(123i32), + info_hash: InfoHash([0u8; 20]), + peer_id: PeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(client_ip), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client.udp_client.socket.local_addr().unwrap().port()), + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + assert!(is_ipv4_announce_response(&response)); + } +} From 946e80a7a20e9cb31eb15eb04e2b89912ba7e6d6 Mon Sep 17 00:00:00 2001 From: Mick van Dijke Date: Fri, 7 Oct 2022 16:31:11 +0200 Subject: [PATCH 0152/1003] refactor: run tests with own udp tracker (#98) * refactor: run tests with own udp tracker * fixup! refactor: run tests with own udp tracker --- src/logging.rs | 4 +++ tests/udp.rs | 96 +++++++++++++++++++++++--------------------------- 2 files changed, 49 insertions(+), 51 deletions(-) diff --git a/src/logging.rs b/src/logging.rs index 209c9f848..7552a5459 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -18,6 +18,10 @@ pub fn setup_logging(cfg: &Configuration) { }, }; + if log_level == log::LevelFilter::Off { + return; + } + if let Err(_err) = fern::Dispatch::new() .format(|out, message, record| { out.finish(format_args!( diff --git a/tests/udp.rs b/tests/udp.rs index be4e42e41..ecfd879ce 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -1,20 +1,14 @@ /// Integration tests for UDP tracker server /// /// cargo test udp_tracker_server -- --nocapture - -#[macro_use] -extern crate lazy_static; - extern crate rand; mod udp_tracker_server { - use core::panic; use std::io::Cursor; - use std::net::IpAddr; + use std::net::Ipv4Addr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; - use std::sync::RwLock; use rand::{thread_rng, Rng}; @@ -34,19 +28,15 @@ mod udp_tracker_server { fn tracker_configuration() -> Arc { let mut config = Configuration::default(); - //config.log_level = Some("debug".to_owned()); // Uncomment to enable logging - config.external_ip = Some("127.0.0.1".to_owned()); - config.udp_trackers[0].bind_address = "127.0.0.1:6969".to_owned(); + config.log_level = Some("off".to_owned()); // "off" is necessary when running multiple trackers + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); Arc::new(config) } - fn tracker_bind_address() -> String { - tracker_configuration().udp_trackers[0].bind_address.clone() - } - pub struct UdpServer { pub started: AtomicBool, pub job: Option>, + pub bind_address: Option, } impl UdpServer { @@ -54,6 +44,7 @@ mod udp_tracker_server { Self { started: AtomicBool::new(false), job: None, + bind_address: None, } } @@ -76,20 +67,22 @@ mod udp_tracker_server { // Initialize logging logging::setup_logging(&configuration); + let udp_tracker_config = &configuration.udp_trackers[0]; + // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(&configuration.udp_trackers[0], tracker.clone())); + self.job = Some(udp_tracker::start_job(&udp_tracker_config, tracker.clone())); + + self.bind_address = Some(udp_tracker_config.bind_address.clone()); self.started.store(true, Ordering::Relaxed); } } } - lazy_static! { - static ref SERVER: RwLock = RwLock::new(UdpServer::new()); - } - - async fn start_udp_server(configuration: Arc) { - SERVER.write().unwrap().start(configuration.clone()).await; + async fn new_running_udp_server(configuration: Arc) -> UdpServer { + let mut udp_server = UdpServer::new(); + udp_server.start(configuration).await; + udp_server } struct UdpClient { @@ -207,43 +200,49 @@ mod udp_tracker_server { }; } - // #[tokio::test] - // async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - // start_udp_server(tracker_configuration().clone()).await; + #[tokio::test] + async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + let configuration = tracker_configuration(); - // let client = new_connected_udp_client(&tracker_bind_address()).await; + let udp_server = new_running_udp_server(configuration).await; - // client.send(&empty_udp_request()).await; + let client = new_connected_udp_client(&udp_server.bind_address.unwrap()).await; - // let mut buffer = empty_buffer(); - // client.receive(&mut buffer).await; - // let response = Response::from_bytes(&buffer, true).unwrap(); + client.send(&empty_udp_request()).await; - // assert!(is_error_response(&response, "bad request")); - // } + let mut buffer = empty_buffer(); + client.receive(&mut buffer).await; + let response = Response::from_bytes(&buffer, true).unwrap(); - // #[tokio::test] - // async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { - // start_udp_server(tracker_configuration().clone()).await; + assert!(is_error_response(&response, "bad request")); + } - // let client = new_connected_udp_tracker_client(&tracker_bind_address()).await; + #[tokio::test] + async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { + let configuration = tracker_configuration(); - // let connect_request = ConnectRequest { - // transaction_id: TransactionId(123), - // }; + let udp_server = new_running_udp_server(configuration).await; - // client.send(connect_request.into()).await; + let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - // let response = client.receive().await; + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; - // assert!(is_connect_response(&response, TransactionId(123))); - // } + client.send(connect_request.into()).await; + + let response = client.receive().await; + + assert!(is_connect_response(&response, TransactionId(123))); + } #[tokio::test] async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { - start_udp_server(tracker_configuration().clone()).await; + let configuration = tracker_configuration(); + + let udp_server = new_running_udp_server(configuration).await; - let client = new_connected_udp_tracker_client(&tracker_bind_address()).await; + let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; // todo: extract client.connect() -> ConnectionId @@ -264,13 +263,8 @@ mod udp_tracker_server { // Send announce request - let client_ip = match client.udp_client.socket.local_addr().unwrap().ip() { - IpAddr::V4(ip4) => ip4, - _ => panic!("error: IPV6 addresses cannot be used for the client ip in the announce request. Try to use IPV4."), - }; - let announce_request = AnnounceRequest { - connection_id: ConnectionId(8724592475294857), + connection_id: ConnectionId(connection_id.0), transaction_id: TransactionId(123i32), info_hash: InfoHash([0u8; 20]), peer_id: PeerId([255u8; 20]), @@ -278,7 +272,7 @@ mod udp_tracker_server { bytes_uploaded: NumberOfBytes(0i64), bytes_left: NumberOfBytes(0i64), event: AnnounceEvent::Started, - ip_address: Some(client_ip), + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), key: PeerKey(0u32), peers_wanted: NumberOfPeers(1i32), port: Port(client.udp_client.socket.local_addr().unwrap().port()), From f5aee0325c0dc10e902287dc74056b7f4e870940 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 11:44:46 +0100 Subject: [PATCH 0153/1003] fix: re-format with rust nightly --- tests/udp.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index ecfd879ce..49f8e7c16 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -10,22 +10,19 @@ mod udp_tracker_server { use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, + Port, Request, Response, TransactionId, + }; use rand::{thread_rng, Rng}; - use tokio::net::UdpSocket; use tokio::task::JoinHandle; - use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{logging, static_time, Configuration}; - use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, - Port, Request, Response, TransactionId, - }; - fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); // "off" is necessary when running multiple trackers From 1f258c1cd01c75d003d0ab130cfa58d31fa4f1b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 12:17:28 +0100 Subject: [PATCH 0154/1003] test: integration test for udp scrape request --- tests/udp.rs | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index 49f8e7c16..31f631fd6 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -12,7 +12,7 @@ mod udp_tracker_server { use aquatic_udp_protocol::{ AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, - Port, Request, Response, TransactionId, + Port, Request, Response, ScrapeRequest, TransactionId, }; use rand::{thread_rng, Rng}; use tokio::net::UdpSocket; @@ -164,7 +164,7 @@ mod udp_tracker_server { /// Generates a random ephemeral port for a client source address fn ephemeral_random_port() -> u16 { - // todo: this may produce random test failures because two test can try to bind the same port. + // todo: this may produce random test failures because two tests can try to bind the same port. // We could either use the same client for all tests (slower) or // create a pool of available ports (with read/write lock) let mut rng = thread_rng(); @@ -197,6 +197,13 @@ mod udp_tracker_server { }; } + fn is_scrape_response(response: &Response) -> bool { + match response { + Response::Scrape(_) => return true, + _ => return false, + }; + } + #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { let configuration = tracker_configuration(); @@ -281,4 +288,47 @@ mod udp_tracker_server { assert!(is_ipv4_announce_response(&response)); } + + #[tokio::test] + async fn should_return_a_scrape_response_when_the_client_sends_a_scrape_request() { + let configuration = tracker_configuration(); + + let udp_server = new_running_udp_server(configuration).await; + + let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; + + // todo: extract client.connect() -> ConnectionId + + // Get connection id before sending the announce request + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123i32), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + let connection_id = match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + }; + + // Send scrape request + + // Full scrapes are not allowed so it will return "bad request" error with empty vector + let info_hashes = vec![InfoHash([0u8; 20])]; + + let scrape_request = ScrapeRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hashes, + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + assert!(is_scrape_response(&response)); + } } From 508803a73aaa651ee50afe99551fd18fcc15f141 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 12:36:35 +0100 Subject: [PATCH 0155/1003] refactor: extract function --- tests/udp.rs | 47 +++++++++++++++-------------------------------- 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index 31f631fd6..83fec1fb3 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -240,6 +240,19 @@ mod udp_tracker_server { assert!(is_connect_response(&response, TransactionId(123))); } + async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + } + } + #[tokio::test] async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { let configuration = tracker_configuration(); @@ -248,22 +261,7 @@ mod udp_tracker_server { let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - // todo: extract client.connect() -> ConnectionId - - // Get connection id before sending the announce request - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - let connection_id = match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), - }; + let connection_id = send_connection_request(TransactionId(123), &client).await; // Send announce request @@ -297,22 +295,7 @@ mod udp_tracker_server { let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - // todo: extract client.connect() -> ConnectionId - - // Get connection id before sending the announce request - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123i32), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - let connection_id = match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), - }; + let connection_id = send_connection_request(TransactionId(123), &client).await; // Send scrape request From d2c69fa5f531520192de03e66335fa842e06ee7a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 09:24:58 +0100 Subject: [PATCH 0156/1003] fix: test tear up --- tests/udp.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/udp.rs b/tests/udp.rs index 83fec1fb3..00bb42366 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -51,7 +51,7 @@ mod udp_tracker_server { lazy_static::initialize(&static_time::TIME_AT_APP_START); // Initialize stats tracker - let stats_tracker = StatsTracker::new_running_instance(); + let stats_tracker = StatsTracker::new_active_instance(); // Initialize Torrust tracker let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker)) { From 5dcea43da6fe6e7590ff69f4ac04200d2394acde Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 13:44:22 +0100 Subject: [PATCH 0157/1003] fix: initialize loggin once `setup_logging` cannot be called twice becuase it panics. --- src/logging.rs | 42 ++++++++++++++++++++++++------------------ tests/udp.rs | 2 +- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/src/logging.rs b/src/logging.rs index 7552a5459..5d0efa8a4 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -1,27 +1,32 @@ -use log::info; +use std::str::FromStr; +use std::sync::Once; + +use log::{info, LevelFilter}; use crate::Configuration; +static INIT: Once = Once::new(); + pub fn setup_logging(cfg: &Configuration) { - let log_level = match &cfg.log_level { - None => log::LevelFilter::Info, - Some(level) => match level.as_str() { - "off" => log::LevelFilter::Off, - "trace" => log::LevelFilter::Trace, - "debug" => log::LevelFilter::Debug, - "info" => log::LevelFilter::Info, - "warn" => log::LevelFilter::Warn, - "error" => log::LevelFilter::Error, - _ => { - panic!("Unknown log level encountered: '{}'", level.as_str()); - } - }, - }; - - if log_level == log::LevelFilter::Off { + let level = config_level_or_default(&cfg.log_level); + + if level == log::LevelFilter::Off { return; } + INIT.call_once(|| { + stdout_config(level); + }); +} + +fn config_level_or_default(log_level: &Option) -> LevelFilter { + match log_level { + None => log::LevelFilter::Info, + Some(level) => LevelFilter::from_str(level).unwrap(), + } +} + +fn stdout_config(level: LevelFilter) { if let Err(_err) = fern::Dispatch::new() .format(|out, message, record| { out.finish(format_args!( @@ -32,11 +37,12 @@ pub fn setup_logging(cfg: &Configuration) { message )) }) - .level(log_level) + .level(level) .chain(std::io::stdout()) .apply() { panic!("Failed to initialize logging.") } + info!("logging initialized."); } diff --git a/tests/udp.rs b/tests/udp.rs index 00bb42366..b391b922f 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -25,7 +25,7 @@ mod udp_tracker_server { fn tracker_configuration() -> Arc { let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); // "off" is necessary when running multiple trackers + config.log_level = Some("off".to_owned()); config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); Arc::new(config) } From 971b34decc01c75b05f9dd0384fa9ad08d532d13 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 24 Oct 2022 15:45:18 +0200 Subject: [PATCH 0158/1003] chore: update project dependencies * minor api changes for config applied --- Cargo.lock | 984 +++++++++++++++++++++++++++++++------------------- Cargo.toml | 30 +- src/config.rs | 9 +- 3 files changed, 643 insertions(+), 380 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a4fe8b4f..0a60397f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,13 +21,22 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -61,9 +70,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-trait" -version = "0.1.53" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" +checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" dependencies = [ "proc-macro2", "quote", @@ -89,15 +98,15 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "bigdecimal" @@ -107,8 +116,8 @@ checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" dependencies = [ "num-bigint 0.3.3", "num-integer", - "num-traits 0.2.15", - "serde 1.0.137", + "num-traits", + "serde", ] [[package]] @@ -169,9 +178,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] @@ -194,9 +203,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "byteorder" @@ -206,9 +215,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cc" @@ -222,7 +231,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" dependencies = [ - "nom", + "nom 5.1.2", ] [[package]] @@ -233,23 +242,25 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", - "num-traits 0.2.15", - "serde 1.0.137", + "num-traits", + "serde", "time 0.1.44", + "wasm-bindgen", "winapi", ] [[package]] name = "clang-sys" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -280,17 +291,30 @@ dependencies = [ "cc", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "config" -version = "0.11.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369" +checksum = "11f1667b8320afa80d69d8bbe40830df2c8a06003d86f73d8e003b2c48df416d" dependencies = [ + "async-trait", + "json5", "lazy_static", - "nom", + "nom 7.1.1", + "pathdiff", + "ron", "rust-ini", - "serde 1.0.137", - "serde-hjson", + "serde", "serde_json", "toml", "yaml-rust", @@ -326,9 +350,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -344,14 +368,58 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] +[[package]] +name = "cxx" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "darling" version = "0.14.1" @@ -422,11 +490,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", ] @@ -436,11 +504,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "dlv-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" + [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "env_logger" @@ -469,9 +543,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -487,13 +561,11 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39522e96686d38f4bc984b9198e3a0613264abaebaff2c5c918bfa6b6da09af" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "cfg-if", "crc32fast", - "libc", "libz-sys", "miniz_oxide", ] @@ -521,11 +593,10 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -601,9 +672,9 @@ checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -616,9 +687,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -626,15 +697,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -643,15 +714,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -660,21 +731,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", @@ -690,9 +761,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -700,13 +771,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -717,9 +788,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -730,7 +801,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.1", + "tokio-util", "tracing", ] @@ -754,18 +825,18 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452c155cb93fecdfb02a73dd57b5d8e442c2063bd7aac72f1bc5e4263a43086" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ "hashbrown 0.12.3", ] [[package]] name = "headers" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64", "bitflags", @@ -774,7 +845,7 @@ dependencies = [ "http", "httpdate", "mime", - "sha-1 0.10.0", + "sha1 0.10.5", ] [[package]] @@ -803,9 +874,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", @@ -814,9 +885,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -825,9 +896,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -843,9 +914,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -865,6 +936,30 @@ dependencies = [ "want", ] +[[package]] +name = "iana-time-zone" +version = "0.1.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5a6ef98976b22b3b7f2f3a806f858cb862044cfa66805aa3ad84cb3d3b785ed" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -873,24 +968,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown 0.11.2", - "serde 1.0.137", + "hashbrown 0.12.3", + "serde", ] [[package]] @@ -916,19 +1010,30 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "js-sys" -version = "0.3.57" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -966,9 +1071,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.125" +version = "0.2.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" +checksum = "55edcf6c0bb319052dea84732cf99db461780fd5e8d3eb46ab6ff312ab31f197" [[package]] name = "libloading" @@ -993,26 +1098,35 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.6" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e7e15d7610cce1d9752e137625f14e61a28cd45929b6e12e47b50fe154ee2e" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" dependencies = [ "cc", "pkg-config", "vcpkg", ] +[[package]] +name = "link-cplusplus" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +dependencies = [ + "cc", +] + [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -1036,12 +1150,6 @@ dependencies = [ "hashbrown 0.11.2", ] -[[package]] -name = "matches" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" - [[package]] name = "memchr" version = "2.5.0" @@ -1073,36 +1181,31 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" -version = "0.5.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", - "miow", - "ntapi", "wasi 0.11.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "windows-sys 0.36.1", ] [[package]] @@ -1141,7 +1244,7 @@ dependencies = [ "once_cell", "pem", "percent-encoding", - "serde 1.0.137", + "serde", "serde_json", "socket2", "twox-hash", @@ -1170,14 +1273,14 @@ dependencies = [ "lazy_static", "lexical", "num-bigint 0.4.3", - "num-traits 0.2.15", + "num-traits", "rand", "regex", "rust_decimal", "saturating", - "serde 1.0.137", + "serde", "serde_json", - "sha1", + "sha1 0.6.1", "sha2", "smallvec", "subprocess", @@ -1232,18 +1335,18 @@ version = "5.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" dependencies = [ - "lexical-core", "memchr", "version_check", ] [[package]] -name = "ntapi" -version = "0.3.7" +name = "nom" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ - "winapi", + "memchr", + "minimal-lexical", ] [[package]] @@ -1254,7 +1357,7 @@ checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -1265,7 +1368,7 @@ checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -1275,16 +1378,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", - "num-traits 0.2.15", -] - -[[package]] -name = "num-traits" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" -dependencies = [ - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -1317,9 +1411,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.10.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" [[package]] name = "opaque-debug" @@ -1329,9 +1423,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" dependencies = [ "bitflags", "cfg-if", @@ -1370,9 +1464,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" dependencies = [ "autocfg", "cc", @@ -1382,31 +1476,45 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-multimap" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" +dependencies = [ + "dlv-list", + "hashbrown 0.12.3", +] + [[package]] name = "parking_lot" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if", - "instant", "libc", "redox_syscall", "smallvec", - "winapi", + "windows-sys 0.42.0", ] +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -1426,24 +1534,68 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" + +[[package]] +name = "pest" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +dependencies = [ + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b75706b9642ebcb34dab3bc7750f811609a0eb1dd8b88c2d15bf628c1c65b2" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f9272122f5979a6511a749af9db9bfc810393f63119970d7085fed1c4ea0db" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "4c8717927f9b79515e565a64fe46c38b8cd0427e64c40680b14a7365ab09ac8d" +dependencies = [ + "once_cell", + "pest", + "sha1 0.10.5", +] [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -1482,11 +1634,11 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.38" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1497,18 +1649,18 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.18" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] [[package]] name = "r2d2" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", "parking_lot", @@ -1564,27 +1716,27 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.5.5" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -1593,9 +1745,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remove_dir_all" @@ -1621,6 +1773,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64", + "bitflags", + "serde", +] + [[package]] name = "rusqlite" version = "0.28.0" @@ -1637,19 +1800,23 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.13.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" +checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +dependencies = [ + "cfg-if", + "ordered-multimap", +] [[package]] name = "rust_decimal" -version = "1.23.1" +version = "1.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22dc69eadbf0ee2110b8d20418c0c6edbaefec2811c4963dc17b6344e11fe0f8" +checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" dependencies = [ "arrayvec 0.7.2", - "num-traits 0.2.15", - "serde 1.0.137", + "num-traits", + "serde", ] [[package]] @@ -1673,27 +1840,35 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.9", + "semver 1.0.14", ] [[package]] name = "rustls" -version = "0.19.1" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ - "base64", "log", "ring", "sct", "webpki", ] +[[package]] +name = "rustls-pemfile" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64", +] + [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "safemem" @@ -1709,19 +1884,19 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys 0.36.1", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" dependencies = [ "parking_lot", ] @@ -1738,11 +1913,17 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" + [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -1750,9 +1931,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -1782,9 +1963,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.9" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" [[package]] name = "semver-parser" @@ -1794,55 +1975,37 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "0.8.23" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" - -[[package]] -name = "serde" -version = "1.0.137" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] -[[package]] -name = "serde-hjson" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a3a4e0ea8a88553209f6cc6cfe8724ecad22e1acf372793c27d995290fe74f8" -dependencies = [ - "lazy_static", - "num-traits 0.1.43", - "regex", - "serde 0.8.23", -] - [[package]] name = "serde_bencode" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" dependencies = [ - "serde 1.0.137", + "serde", "serde_bytes", ] [[package]] name = "serde_bytes" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212e73464ebcde48d723aa02eb270ba62eff38a9b732df31f33f1b4e145f3a54" +checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" dependencies = [ - "serde 1.0.137", + "serde", ] [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", @@ -1851,13 +2014,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ "itoa", "ryu", - "serde 1.0.137", + "serde", ] [[package]] @@ -1869,30 +2032,30 @@ dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.137", + "serde", ] [[package]] name = "serde_with" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89df7a26519371a3cce44fbb914c2819c84d9b897890987fa3ab096491cc0ea8" +checksum = "368f2d60d049ea019a84dcd6687b0d1e0030fe663ae105039bdf967ed5e6a9a7" dependencies = [ "base64", "chrono", "hex", "indexmap", - "serde 1.0.137", + "serde", "serde_json", "serde_with_macros", - "time 0.3.13", + "time 0.3.16", ] [[package]] name = "serde_with_macros" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de337f322382fcdfbb21a014f7c224ee041a23785651db67b9827403178f698f" +checksum = "1ccadfacf6cf10faad22bbadf55986bdd0856edfb5d9210aa1dcf1f516e84e93" dependencies = [ "darling", "proc-macro2", @@ -1900,19 +2063,6 @@ dependencies = [ "syn", ] -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha-1" version = "0.10.0" @@ -1921,7 +2071,7 @@ checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -1933,6 +2083,17 @@ dependencies = [ "sha1_smol", ] +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", +] + [[package]] name = "sha1_smol" version = "1.0.0" @@ -1969,21 +2130,24 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -2032,7 +2196,7 @@ checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2", "quote", - "serde 1.0.137", + "serde", "serde_derive", "syn", ] @@ -2046,10 +2210,10 @@ dependencies = [ "base-x", "proc-macro2", "quote", - "serde 1.0.137", + "serde", "serde_derive", "serde_json", - "sha1", + "sha1 0.6.1", "syn", ] @@ -2073,9 +2237,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subprocess" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055cf3ebc2981ad8f0a5a17ef6652f652d87831f79fddcba2ac57bcb9a0aa407" +checksum = "0c2e86926081dda636c546d8c5e641661049d7562a68f5488be4a1f7f66f6086" dependencies = [ "libc", "winapi", @@ -2083,13 +2247,13 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.92" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff7c592601f11445996a06f8ad0c27f094a58857c2f89e97974ab9235b92c52" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -2132,18 +2296,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -2171,23 +2335,31 @@ dependencies = [ "libc", "standback", "stdweb", - "time-macros", + "time-macros 0.1.1", "version_check", "winapi", ] [[package]] name = "time" -version = "0.3.13" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db76ff9fa4b1458b3c7f077f3ff9887394058460d21e634355b273aaf11eea45" +checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca" dependencies = [ "itoa", "libc", "num_threads", - "serde 1.0.137", + "serde", + "time-core", + "time-macros 0.2.5", ] +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + [[package]] name = "time-macros" version = "0.1.1" @@ -2198,6 +2370,15 @@ dependencies = [ "time-macros-impl", ] +[[package]] +name = "time-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b" +dependencies = [ + "time-core", +] + [[package]] name = "time-macros-impl" version = "0.1.2" @@ -2228,16 +2409,16 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.18.1" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce653fb475565de9f6fb0614b28bca8df2c430c0cf84bcd9c843f15de5414cc" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ + "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", - "once_cell", "pin-project-lite", "signal-hook-registry", "socket2", @@ -2258,9 +2439,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.22.0" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", @@ -2269,9 +2450,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite", @@ -2280,36 +2461,21 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.15.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "pin-project", "tokio", "tungstenite", ] [[package]] name = "tokio-util" -version = "0.6.9" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -2325,7 +2491,7 @@ version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ - "serde 1.0.137", + "serde", ] [[package]] @@ -2349,54 +2515,42 @@ dependencies = [ "r2d2_mysql", "r2d2_sqlite", "rand", - "serde 1.0.137", + "serde", "serde_bencode", "serde_json", "serde_with", "thiserror", "tokio", "toml", - "uuid 1.1.2", + "uuid 1.2.1", "warp", ] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.34" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", "pin-project-lite", - "tracing-attributes", "tracing-core", ] -[[package]] -name = "tracing-attributes" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tracing-core" -version = "0.1.26" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -2407,9 +2561,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.14.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64", "byteorder", @@ -2418,7 +2572,7 @@ dependencies = [ "httparse", "log", "rand", - "sha-1 0.9.8", + "sha-1", "thiserror", "url", "utf-8", @@ -2450,6 +2604,12 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +[[package]] +name = "ucd-trie" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" + [[package]] name = "unicase" version = "2.6.0" @@ -2465,26 +2625,26 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +[[package]] +name = "unicode-ident" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" + [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - -[[package]] -name = "unicode-xid" -version = "0.2.3" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "untrusted" @@ -2494,13 +2654,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", ] @@ -2518,9 +2677,9 @@ checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" [[package]] name = "uuid" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" +checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" dependencies = [ "getrandom", ] @@ -2555,9 +2714,9 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e" +checksum = "ed7b8be92646fc3d18b06147664ebc5f48d222686cb11a8755e561a735aacc6d" dependencies = [ "bytes", "futures-channel", @@ -2571,15 +2730,16 @@ dependencies = [ "multipart", "percent-encoding", "pin-project", + "rustls-pemfile", "scoped-tls", - "serde 1.0.137", + "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", "tokio-stream", "tokio-tungstenite", - "tokio-util 0.6.9", + "tokio-util", "tower-service", "tracing", ] @@ -2598,9 +2758,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2608,13 +2768,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -2623,9 +2783,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2633,9 +2793,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -2646,15 +2806,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "web-sys" -version = "0.3.57" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -2662,9 +2822,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ "ring", "untrusted", @@ -2710,6 +2870,106 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + [[package]] name = "wyz" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 89fdffa99..c7e3790bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ lto = "fat" strip = true [dependencies] -tokio = { version = "1.7", features = [ +tokio = { version = "1", features = [ "rt-multi-thread", "net", "sync", @@ -29,33 +29,33 @@ tokio = { version = "1.7", features = [ serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" -serde_json = "1.0.72" -serde_with = "2.0.0" +serde_json = "1.0" +serde_with = "2.0" hex = "0.4.3" -percent-encoding = "2.1.0" +percent-encoding = "2" binascii = "0.1" -lazy_static = "1.4.0" +lazy_static = "1.4" -openssl = { version = "0.10.41", features = ["vendored"] } +openssl = { version = "0.10", features = ["vendored"] } warp = { version = "0.3", features = ["tls"] } -config = "0.11" +config = "0.13" toml = "0.5" log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" -r2d2 = "0.8.8" -r2d2_mysql = "21.0.0" -r2d2_sqlite = { version = "0.21.0", features = ["bundled"] } +r2d2 = "0.8" +r2d2_mysql = "21" +r2d2_sqlite = { version = "0.21", features = ["bundled"] } -rand = "0.8.4" +rand = "0.8" derive_more = "0.99" thiserror = "1.0" -futures = "0.3.21" -async-trait = "0.1.52" +futures = "0.3" +async-trait = "0.1" -aquatic_udp_protocol = "0.2.0" -uuid = { version = "1.1.2", features = ["v4"] } +aquatic_udp_protocol = "0.2" +uuid = { version = "1", features = ["v4"] } diff --git a/src/config.rs b/src/config.rs index c094eb2f9..b59d572ea 100644 --- a/src/config.rs +++ b/src/config.rs @@ -128,10 +128,13 @@ impl Configuration { } pub fn load_from_file(path: &str) -> Result { - let mut config = Config::new(); + let config_builder = Config::builder(); + + #[allow(unused_assignments)] + let mut config = Config::default(); if Path::new(path).exists() { - config.merge(File::with_name(path))?; + config = config_builder.add_source(File::with_name(path)).build()?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); @@ -143,7 +146,7 @@ impl Configuration { } let torrust_config: Configuration = config - .try_into() + .try_deserialize() .map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; Ok(torrust_config) From a334f17e5f62bc9357cc520c7d4fd60a0a1b04b3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 14:33:56 +0100 Subject: [PATCH 0159/1003] refactor: extract struct StatsEventSender --- src/tracker/statistics.rs | 44 ++++++++++++++++++++++++++++----------- src/udp/handlers.rs | 15 ++++++++----- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index fb4e4c0fe..7d1d17c51 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,24 +62,31 @@ pub struct StatsTracker { } impl StatsTracker { - pub fn new_active_instance() -> Self { - Self::new_instance(true) - } + pub fn new_active_instance() -> (Self, StatsEventSender) { + let mut stats_tracker = Self { + channel_sender: None, + stats: Arc::new(RwLock::new(TrackerStatistics::new())), + }; - pub fn new_inactive_instance() -> Self { - Self::new_instance(false) + let stats_event_sender = stats_tracker.run_worker(); + + (stats_tracker, stats_event_sender) } - pub fn new_instance(active: bool) -> Self { - let mut stats_tracker = Self { + pub fn new_inactive_instance() -> Self { + Self { channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), - }; + } + } - if active { - stats_tracker.run_worker(); + pub fn new_instance(active: bool) -> Self { + if !active { + return Self::new_inactive_instance(); } + let (stats_tracker, _stats_event_sender) = Self::new_active_instance(); + stats_tracker } @@ -90,11 +97,11 @@ impl StatsTracker { } } - pub fn run_worker(&mut self) { + pub fn run_worker(&mut self) -> StatsEventSender { let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); // set send channel on stats_tracker - self.channel_sender = Some(tx); + self.channel_sender = Some(tx.clone()); let stats = self.stats.clone(); @@ -142,6 +149,8 @@ impl StatsTracker { drop(stats_lock); } }); + + StatsEventSender { sender: tx } } } @@ -161,6 +170,17 @@ impl TrackerStatisticsEventSender for StatsTracker { } } +pub struct StatsEventSender { + sender: Sender, +} + +#[async_trait] +impl TrackerStatisticsEventSender for StatsEventSender { + async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { + Some(self.sender.send(event).await) + } +} + #[async_trait] pub trait TrackerStatisticsRepository: Sync + Send { async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics>; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 845b860e9..7992bcaf0 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -271,19 +271,24 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) + initialized_tracker(configuration) } fn initialized_private_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) + initialized_tracker(configuration) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) + initialized_tracker(configuration) } + fn initialized_tracker(configuration: Arc) -> Arc { + let (stats_tracker, _stats_event_sender) = StatsTracker::new_active_instance(); + Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker)).unwrap()) + } + fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() } @@ -969,8 +974,8 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let tracker = - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()); + let (stats_tracker, _stats_event_sender) = StatsTracker::new_active_instance(); + let tracker = Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker)).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); From b784442cb676edaeee6caa8941c2f050d0e9e897 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 16:25:17 +0100 Subject: [PATCH 0160/1003] refactor: inject new struct StatsEventSender into TorrentTracker Parallel change. We are still using the old TrackerStatsService to send events. --- src/main.rs | 10 ++++- src/tracker/statistics.rs | 6 +-- src/tracker/tracker.rs | 10 ++++- src/udp/handlers.rs | 84 ++++++++++++++++++++++++++++++--------- tests/udp.rs | 5 ++- 5 files changed, 88 insertions(+), 27 deletions(-) diff --git a/src/main.rs b/src/main.rs index dcb92acb8..f995ba377 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,10 +24,16 @@ async fn main() { }; // Initialize stats tracker - let stats_tracker = StatsTracker::new_instance(config.tracker_usage_statistics); + let mut stats_tracker = StatsTracker::new_inactive_instance(); + + let mut stats_event_sender = None; + + if config.tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_worker()); + } // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker)) { + let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker), stats_event_sender) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 7d1d17c51..8b57d6bfe 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,7 +62,7 @@ pub struct StatsTracker { } impl StatsTracker { - pub fn new_active_instance() -> (Self, StatsEventSender) { + pub fn new_active_instance() -> (Self, Box) { let mut stats_tracker = Self { channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), @@ -97,7 +97,7 @@ impl StatsTracker { } } - pub fn run_worker(&mut self) -> StatsEventSender { + pub fn run_worker(&mut self) -> Box { let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); // set send channel on stats_tracker @@ -150,7 +150,7 @@ impl StatsTracker { } }); - StatsEventSender { sender: tx } + Box::new(StatsEventSender { sender: tx }) } } diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 5499eebeb..b1d009077 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -12,7 +12,7 @@ use crate::databases::database::Database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; -use crate::statistics::{TrackerStatistics, TrackerStatisticsEvent, TrackerStatsService}; +use crate::statistics::{TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatsService}; use crate::tracker::key; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; @@ -25,11 +25,16 @@ pub struct TorrentTracker { whitelist: RwLock>, torrents: RwLock>, stats_tracker: Box, + _stats_event_sender: Option>, database: Box, } impl TorrentTracker { - pub fn new(config: Arc, stats_tracker: Box) -> Result { + pub fn new( + config: Arc, + stats_tracker: Box, + _stats_event_sender: Option>, + ) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; Ok(TorrentTracker { @@ -39,6 +44,7 @@ impl TorrentTracker { whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), stats_tracker, + _stats_event_sender, database, }) } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 7992bcaf0..fc3e0968f 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -285,9 +285,9 @@ mod tests { } fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_tracker, _stats_event_sender) = StatsTracker::new_active_instance(); - Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker)).unwrap()) - } + let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); + Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker), Some(stats_event_sender)).unwrap()) + } fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() @@ -371,6 +371,30 @@ mod tests { } } + struct StatsEventSenderMock { + expected_event: Option, + } + + impl StatsEventSenderMock { + fn new() -> Self { + Self { expected_event: None } + } + + fn should_throw_event(&mut self, expected_event: TrackerStatisticsEvent) { + self.expected_event = Some(expected_event); + } + } + + #[async_trait] + impl TrackerStatisticsEventSender for StatsEventSenderMock { + async fn send_event(&self, _event: TrackerStatisticsEvent) -> Option>> { + if self.expected_event.is_some() { + assert_eq!(_event, *self.expected_event.as_ref().unwrap()); + } + None + } + } + #[async_trait] impl TrackerStatisticsRepository for TrackerStatsServiceMock { async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { @@ -413,7 +437,10 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; - use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use super::{ + default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, StatsEventSenderMock, + TrackerStatsServiceMock, + }; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; @@ -467,11 +494,13 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); let client_socket_address = sample_ipv4_socket_address(); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Connect); - let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let torrent_tracker = + Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await .unwrap(); @@ -480,10 +509,12 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Connect); - let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let torrent_tracker = + Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await .unwrap(); @@ -577,8 +608,8 @@ mod tests { use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, - TrackerStatsServiceMock, + default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, StatsEventSenderMock, + TorrentPeerBuilder, TrackerStatsServiceMock, }; use crate::PeerId; @@ -718,10 +749,13 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Announce); - let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + ); handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), @@ -794,8 +828,8 @@ mod tests { use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, - TrackerStatsServiceMock, + default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, StatsEventSenderMock, + TorrentPeerBuilder, TrackerStatsServiceMock, }; use crate::PeerId; @@ -942,10 +976,13 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Announce); - let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + ); let remote_addr = sample_ipv6_remote_addr(); @@ -974,8 +1011,9 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (stats_tracker, _stats_event_sender) = StatsTracker::new_active_instance(); - let tracker = Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker)).unwrap()); + let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); + let tracker = + Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker), Some(stats_event_sender)).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1243,16 +1281,21 @@ mod tests { use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}; + use crate::udp::handlers::tests::{ + default_tracker_config, sample_ipv4_remote_addr, StatsEventSenderMock, TrackerStatsServiceMock, + }; #[tokio::test] async fn should_send_the_upd4_scrape_event() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); let remote_addr = sample_ipv4_remote_addr(); - let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) .await @@ -1267,16 +1310,21 @@ mod tests { use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use crate::udp::handlers::tests::{ + default_tracker_config, sample_ipv6_remote_addr, StatsEventSenderMock, TrackerStatsServiceMock, + }; #[tokio::test] async fn should_send_the_upd6_scrape_event() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); let remote_addr = sample_ipv6_remote_addr(); - let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) .await diff --git a/tests/udp.rs b/tests/udp.rs index b391b922f..d2b500d5a 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -51,10 +51,11 @@ mod udp_tracker_server { lazy_static::initialize(&static_time::TIME_AT_APP_START); // Initialize stats tracker - let stats_tracker = StatsTracker::new_active_instance(); + let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker)) { + let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker), Some(stats_event_sender)) + { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) From 720a5841c943364bfd99fba2337ea024324b6293 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 16:40:05 +0100 Subject: [PATCH 0161/1003] refactor: use StatsEventSender to send events instead of StatsTracker. --- src/tracker/tracker.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index b1d009077..80f6e549d 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -25,7 +25,7 @@ pub struct TorrentTracker { whitelist: RwLock>, torrents: RwLock>, stats_tracker: Box, - _stats_event_sender: Option>, + stats_event_sender: Option>, database: Box, } @@ -33,7 +33,7 @@ impl TorrentTracker { pub fn new( config: Arc, stats_tracker: Box, - _stats_event_sender: Option>, + stats_event_sender: Option>, ) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; @@ -44,7 +44,7 @@ impl TorrentTracker { whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), stats_tracker, - _stats_event_sender, + stats_event_sender, database, }) } @@ -242,7 +242,10 @@ impl TorrentTracker { } pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { - self.stats_tracker.send_event(event).await + match &self.stats_event_sender { + None => None, + Some(stats_event_sender) => stats_event_sender.send_event(event).await, + } } // Remove inactive peers and (optionally) peerless torrents From 5b73d801c99f6f2d92125ad46c2cc8a39b4c68c1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 17:08:24 +0100 Subject: [PATCH 0162/1003] refactor: removed unused code and extract fn - The StatsTracker does not need anymore the channel sender. - A setup function for statistics was extracted. --- src/lib.rs | 1 + src/main.rs | 12 +++------- src/stats.rs | 36 ++++++++++++++++++++++++++++ src/tracker/statistics.rs | 50 +-------------------------------------- 4 files changed, 41 insertions(+), 58 deletions(-) create mode 100644 src/stats.rs diff --git a/src/lib.rs b/src/lib.rs index 5f003b5fd..cf830f108 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,6 +14,7 @@ pub mod jobs; pub mod logging; pub mod protocol; pub mod setup; +pub mod stats; pub mod tracker; pub mod udp; diff --git a/src/main.rs b/src/main.rs index f995ba377..c21aa1793 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use log::info; -use torrust_tracker::tracker::statistics::StatsTracker; +use torrust_tracker::stats::setup_statistics; use torrust_tracker::tracker::tracker::TorrentTracker; use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; @@ -23,14 +23,8 @@ async fn main() { } }; - // Initialize stats tracker - let mut stats_tracker = StatsTracker::new_inactive_instance(); - - let mut stats_event_sender = None; - - if config.tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_worker()); - } + // Initialize statistics:wq + let (stats_tracker, stats_event_sender) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker), stats_event_sender) { diff --git a/src/stats.rs b/src/stats.rs new file mode 100644 index 000000000..d459d8f5b --- /dev/null +++ b/src/stats.rs @@ -0,0 +1,36 @@ +use crate::statistics::{StatsTracker, TrackerStatisticsEventSender}; + +pub fn setup_statistics(tracker_usage_statistics: bool) -> (StatsTracker, Option>) { + let mut stats_tracker = StatsTracker::new_inactive_instance(); + + let mut stats_event_sender = None; + + if tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_worker()); + } + + (stats_tracker, stats_event_sender) +} + +#[cfg(test)] +mod test { + use crate::stats::setup_statistics; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let (_stats_tracker, stats_event_sender) = setup_statistics(tracker_usage_statistics); + + assert!(stats_event_sender.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let (_stats_tracker, stats_event_sender) = setup_statistics(tracker_usage_statistics); + + assert!(stats_event_sender.is_some()); + } +} diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 8b57d6bfe..a89b5d4cc 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -57,14 +57,12 @@ impl TrackerStatistics { } pub struct StatsTracker { - channel_sender: Option>, pub stats: Arc>, } impl StatsTracker { pub fn new_active_instance() -> (Self, Box) { let mut stats_tracker = Self { - channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), }; @@ -75,7 +73,6 @@ impl StatsTracker { pub fn new_inactive_instance() -> Self { Self { - channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), } } @@ -92,7 +89,6 @@ impl StatsTracker { pub fn new() -> Self { Self { - channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), } } @@ -100,9 +96,6 @@ impl StatsTracker { pub fn run_worker(&mut self) -> Box { let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - // set send channel on stats_tracker - self.channel_sender = Some(tx.clone()); - let stats = self.stats.clone(); tokio::spawn(async move { @@ -159,17 +152,6 @@ pub trait TrackerStatisticsEventSender: Sync + Send { async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; } -#[async_trait] -impl TrackerStatisticsEventSender for StatsTracker { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { - if let Some(tx) = &self.channel_sender { - Some(tx.send(event).await) - } else { - None - } - } -} - pub struct StatsEventSender { sender: Sender, } @@ -193,36 +175,6 @@ impl TrackerStatisticsRepository for StatsTracker { } } -pub trait TrackerStatsService: TrackerStatisticsEventSender + TrackerStatisticsRepository {} +pub trait TrackerStatsService: TrackerStatisticsRepository {} impl TrackerStatsService for StatsTracker {} - -#[cfg(test)] -mod test { - - mod event_sender { - use crate::statistics::{StatsTracker, TrackerStatisticsEvent, TrackerStatisticsEventSender}; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - let inactive_stats_tracker = StatsTracker::new_instance(tracker_usage_statistics); - - let result = inactive_stats_tracker.send_event(TrackerStatisticsEvent::Tcp4Announce).await; - - assert!(result.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - let active_stats_tracker = StatsTracker::new_instance(tracker_usage_statistics); - - let result = active_stats_tracker.send_event(TrackerStatisticsEvent::Tcp4Announce).await; - - assert!(result.is_some()); - } - } -} From daec1fed0553e397ce0aa9823f26f1b6ed42a249 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 17:23:46 +0100 Subject: [PATCH 0163/1003] refactor: extract stats event_listener --- src/tracker/statistics.rs | 97 ++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 46 deletions(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index a89b5d4cc..66aea0169 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use async_trait::async_trait; +use log::debug; use tokio::sync::mpsc::error::SendError; -use tokio::sync::mpsc::Sender; +use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; @@ -94,59 +95,63 @@ impl StatsTracker { } pub fn run_worker(&mut self) -> Box { - let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + let (tx, rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); let stats = self.stats.clone(); - tokio::spawn(async move { - while let Some(event) = rx.recv().await { - let mut stats_lock = stats.write().await; - - match event { - TrackerStatisticsEvent::Tcp4Announce => { - stats_lock.tcp4_announces_handled += 1; - stats_lock.tcp4_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp4Scrape => { - stats_lock.tcp4_scrapes_handled += 1; - stats_lock.tcp4_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp6Announce => { - stats_lock.tcp6_announces_handled += 1; - stats_lock.tcp6_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp6Scrape => { - stats_lock.tcp6_scrapes_handled += 1; - stats_lock.tcp6_connections_handled += 1; - } - TrackerStatisticsEvent::Udp4Connect => { - stats_lock.udp4_connections_handled += 1; - } - TrackerStatisticsEvent::Udp4Announce => { - stats_lock.udp4_announces_handled += 1; - } - TrackerStatisticsEvent::Udp4Scrape => { - stats_lock.udp4_scrapes_handled += 1; - } - TrackerStatisticsEvent::Udp6Connect => { - stats_lock.udp6_connections_handled += 1; - } - TrackerStatisticsEvent::Udp6Announce => { - stats_lock.udp6_announces_handled += 1; - } - TrackerStatisticsEvent::Udp6Scrape => { - stats_lock.udp6_scrapes_handled += 1; - } - } - - drop(stats_lock); - } - }); + tokio::spawn(async move { event_listener(rx, stats).await }); Box::new(StatsEventSender { sender: tx }) } } +async fn event_listener(mut rx: Receiver, stats: Arc>) { + while let Some(event) = rx.recv().await { + let mut stats_lock = stats.write().await; + + match event { + TrackerStatisticsEvent::Tcp4Announce => { + stats_lock.tcp4_announces_handled += 1; + stats_lock.tcp4_connections_handled += 1; + } + TrackerStatisticsEvent::Tcp4Scrape => { + stats_lock.tcp4_scrapes_handled += 1; + stats_lock.tcp4_connections_handled += 1; + } + TrackerStatisticsEvent::Tcp6Announce => { + stats_lock.tcp6_announces_handled += 1; + stats_lock.tcp6_connections_handled += 1; + } + TrackerStatisticsEvent::Tcp6Scrape => { + stats_lock.tcp6_scrapes_handled += 1; + stats_lock.tcp6_connections_handled += 1; + } + TrackerStatisticsEvent::Udp4Connect => { + stats_lock.udp4_connections_handled += 1; + } + TrackerStatisticsEvent::Udp4Announce => { + stats_lock.udp4_announces_handled += 1; + } + TrackerStatisticsEvent::Udp4Scrape => { + stats_lock.udp4_scrapes_handled += 1; + } + TrackerStatisticsEvent::Udp6Connect => { + stats_lock.udp6_connections_handled += 1; + } + TrackerStatisticsEvent::Udp6Announce => { + stats_lock.udp6_announces_handled += 1; + } + TrackerStatisticsEvent::Udp6Scrape => { + stats_lock.udp6_scrapes_handled += 1; + } + } + + debug!("stats: {:?}", stats_lock); + + drop(stats_lock); + } +} + #[async_trait] pub trait TrackerStatisticsEventSender: Sync + Send { async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; From a2b16ff7b2d3bdd97d994cf19cc33cb6b8b4be62 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 17:35:22 +0100 Subject: [PATCH 0164/1003] fix: tests using mock for old service I only change test to use the new mock. I realized test were wrong becuase they do not fail when no event is sent. THey only fail when the event sent is not the rigth type. --- src/udp/handlers.rs | 56 ++++++++++++++++----------------------------- 1 file changed, 20 insertions(+), 36 deletions(-) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index fc3e0968f..ba545da1b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -345,29 +345,13 @@ mod tests { struct TrackerStatsServiceMock { stats: Arc>, - expected_event: Option, } impl TrackerStatsServiceMock { fn new() -> Self { Self { stats: Arc::new(RwLock::new(TrackerStatistics::new())), - expected_event: None, - } - } - - fn should_throw_event(&mut self, expected_event: TrackerStatisticsEvent) { - self.expected_event = Some(expected_event); - } - } - - #[async_trait] - impl TrackerStatisticsEventSender for TrackerStatsServiceMock { - async fn send_event(&self, _event: TrackerStatisticsEvent) -> Option>> { - if self.expected_event.is_some() { - assert_eq!(_event, *self.expected_event.as_ref().unwrap()); } - None } } @@ -387,9 +371,9 @@ mod tests { #[async_trait] impl TrackerStatisticsEventSender for StatsEventSenderMock { - async fn send_event(&self, _event: TrackerStatisticsEvent) -> Option>> { + async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { if self.expected_event.is_some() { - assert_eq!(_event, *self.expected_event.as_ref().unwrap()); + assert_eq!(event, *self.expected_event.as_ref().unwrap()); } None } @@ -493,11 +477,11 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); let client_socket_address = sample_ipv4_socket_address(); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Connect); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Connect); let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); @@ -508,10 +492,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Connect); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Connect); let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); @@ -748,10 +732,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Announce); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Announce); let tracker = Arc::new( TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), @@ -975,10 +959,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Announce); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Announce); let tracker = Arc::new( TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), @@ -1287,10 +1271,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( @@ -1316,10 +1300,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( From bc3df5a74b2fc66155bd88aec1b4fad1942da379 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 18:29:18 +0100 Subject: [PATCH 0165/1003] fix typo --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index c21aa1793..bfcce014b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -23,7 +23,7 @@ async fn main() { } }; - // Initialize statistics:wq + // Initialize statistics let (stats_tracker, stats_event_sender) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker From e5701103788f79c61bb175915a6467618be096a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Oct 2022 16:33:46 +0100 Subject: [PATCH 0166/1003] test: add new dev dependency mockall It will be used to mock a trait in tests. --- Cargo.lock | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 3 ++ 2 files changed, 109 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 0a60397f9..ce66efa09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -479,6 +479,12 @@ dependencies = [ "syn", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.9.0" @@ -510,6 +516,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "either" version = "1.8.0" @@ -570,6 +582,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -600,6 +621,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "frunk" version = "0.4.0" @@ -1008,6 +1035,15 @@ dependencies = [ "syn", ] +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.4" @@ -1208,6 +1244,33 @@ dependencies = [ "windows-sys 0.36.1", ] +[[package]] +name = "mockall" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "multipart" version = "0.18.0" @@ -1349,6 +1412,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "num-bigint" version = "0.3.3" @@ -1626,6 +1695,36 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +[[package]] +name = "predicates" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5aab5be6e4732b473071984b3164dbbfb7a3674d30ea5ff44410b6bcd960c3c" +dependencies = [ + "difflib", + "float-cmp", + "itertools", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da1c2388b1513e1b605fcec39a95e0a9e8ef088f71443ef37099fa9ae6673fcb" + +[[package]] +name = "predicates-tree" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d86de6de25020a36c6d3643a86d9a6a9f552107c0559c60ea03551b5e16c032" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -2285,6 +2384,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507e9898683b6c43a9aa55b64259b721b52ba226e0f3779137e50ad114a4c90b" + [[package]] name = "textwrap" version = "0.11.0" @@ -2509,6 +2614,7 @@ dependencies = [ "hex", "lazy_static", "log", + "mockall", "openssl", "percent-encoding", "r2d2", diff --git a/Cargo.toml b/Cargo.toml index c7e3790bb..18188565c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,3 +59,6 @@ async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } + +[dev-dependencies] +mockall = "0.11.3" From 8874032074c12aecb1f250e054caac4d8c9c63f4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Oct 2022 16:34:55 +0100 Subject: [PATCH 0167/1003] fix: weak tests As I explained here: https://github.com/torrust/torrust-tracker/pull/103#issue-1418647900 The mock for the trait TrackerStatisticsEventSender did not work completely weel becuase it only checked for the rigth type of the triggered event but It did not check if the event was sent. I finally used a mockinf library becuase I do not know how to mock trait that uses a mutable reference to 'self'. I need to store wether the event was sent or not and I do not know how to do that without changing the function signature making it mutable. --- src/tracker/statistics.rs | 3 + src/udp/handlers.rs | 135 +++++++++++++++++++------------------- 2 files changed, 72 insertions(+), 66 deletions(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 66aea0169..73042ff3e 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -2,6 +2,8 @@ use std::sync::Arc; use async_trait::async_trait; use log::debug; +#[cfg(test)] +use mockall::{automock, predicate::*}; use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; @@ -153,6 +155,7 @@ async fn event_listener(mut rx: Receiver, stats: Arc Option>>; } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index ba545da1b..35d2e0247 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -252,16 +252,12 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use async_trait::async_trait; - use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; - use crate::statistics::{ - StatsTracker, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatisticsRepository, - TrackerStatsService, - }; + use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsRepository, TrackerStatsService}; use crate::tracker::tracker::TorrentTracker; use crate::{Configuration, PeerId}; @@ -355,30 +351,6 @@ mod tests { } } - struct StatsEventSenderMock { - expected_event: Option, - } - - impl StatsEventSenderMock { - fn new() -> Self { - Self { expected_event: None } - } - - fn should_throw_event(&mut self, expected_event: TrackerStatisticsEvent) { - self.expected_event = Some(expected_event); - } - } - - #[async_trait] - impl TrackerStatisticsEventSender for StatsEventSenderMock { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { - if self.expected_event.is_some() { - assert_eq!(event, *self.expected_event.as_ref().unwrap()); - } - None - } - } - #[async_trait] impl TrackerStatisticsRepository for TrackerStatsServiceMock { async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { @@ -417,15 +389,14 @@ mod tests { mod connect_request { + use std::future; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + use mockall::predicate::eq; - use super::{ - default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, StatsEventSenderMock, - TrackerStatsServiceMock, - }; - use crate::statistics::TrackerStatisticsEvent; + use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; @@ -478,10 +449,15 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp4Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let client_socket_address = sample_ipv4_socket_address(); - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Connect); let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); @@ -493,9 +469,13 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Connect); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); @@ -578,6 +558,7 @@ mod tests { mod using_ipv4 { + use std::future; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; @@ -585,15 +566,16 @@ mod tests { AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; + use mockall::predicate::eq; - use crate::statistics::TrackerStatisticsEvent; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, StatsEventSenderMock, - TorrentPeerBuilder, TrackerStatsServiceMock, + default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, + TrackerStatsServiceMock, }; use crate::PeerId; @@ -733,13 +715,18 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Announce); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), ); + handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), @@ -798,6 +785,7 @@ mod tests { mod using_ipv6 { + use std::future; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; @@ -805,15 +793,16 @@ mod tests { AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; + use mockall::predicate::eq; - use crate::statistics::TrackerStatisticsEvent; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, StatsEventSenderMock, - TorrentPeerBuilder, TrackerStatsServiceMock, + default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, + TrackerStatsServiceMock, }; use crate::PeerId; @@ -960,9 +949,13 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Announce); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), @@ -1252,29 +1245,34 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), + connection_id: into_connection_id(&make_connection_cookie(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } } mod using_ipv4 { + use std::future; use std::sync::Arc; + use mockall::predicate::eq; + use super::sample_scrape_request; - use crate::statistics::TrackerStatisticsEvent; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{ - default_tracker_config, sample_ipv4_remote_addr, StatsEventSenderMock, TrackerStatsServiceMock, - }; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( @@ -1288,22 +1286,27 @@ mod tests { } mod using_ipv6 { + use std::future; use std::sync::Arc; + use mockall::predicate::eq; + use super::sample_scrape_request; - use crate::statistics::TrackerStatisticsEvent; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{ - default_tracker_config, sample_ipv6_remote_addr, StatsEventSenderMock, TrackerStatsServiceMock, - }; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( From d3297cf0b9011933cd5be0018db5f6e4f763c8a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Oct 2022 10:31:45 +0100 Subject: [PATCH 0168/1003] refactor: extract StatsRepository --- src/main.rs | 4 +- src/stats.rs | 16 ++++---- src/tracker/statistics.rs | 81 ++++++++++++++++++++------------------- src/tracker/tracker.rs | 10 ++--- src/udp/handlers.rs | 75 +++++++++++------------------------- tests/udp.rs | 5 +-- 6 files changed, 81 insertions(+), 110 deletions(-) diff --git a/src/main.rs b/src/main.rs index bfcce014b..08061cd7b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,10 +24,10 @@ async fn main() { }; // Initialize statistics - let (stats_tracker, stats_event_sender) = setup_statistics(config.tracker_usage_statistics); + let (stats_event_sender, stats_repository) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker), stats_event_sender) { + let tracker = match TorrentTracker::new(config.clone(), stats_event_sender, stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/stats.rs b/src/stats.rs index d459d8f5b..1f387a084 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,15 +1,15 @@ -use crate::statistics::{StatsTracker, TrackerStatisticsEventSender}; - -pub fn setup_statistics(tracker_usage_statistics: bool) -> (StatsTracker, Option>) { - let mut stats_tracker = StatsTracker::new_inactive_instance(); +use crate::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { let mut stats_event_sender = None; + let mut stats_tracker = StatsTracker::new(); + if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_worker()); + stats_event_sender = Some(stats_tracker.run_event_listener()); } - (stats_tracker, stats_event_sender) + (stats_event_sender, stats_tracker.stats_repository) } #[cfg(test)] @@ -20,7 +20,7 @@ mod test { async fn should_not_send_any_event_when_statistics_are_disabled() { let tracker_usage_statistics = false; - let (_stats_tracker, stats_event_sender) = setup_statistics(tracker_usage_statistics); + let (stats_event_sender, _stats_repository) = setup_statistics(tracker_usage_statistics); assert!(stats_event_sender.is_none()); } @@ -29,7 +29,7 @@ mod test { async fn should_send_events_when_statistics_are_enabled() { let tracker_usage_statistics = true; - let (_stats_tracker, stats_event_sender) = setup_statistics(tracker_usage_statistics); + let (stats_event_sender, _stats_repository) = setup_statistics(tracker_usage_statistics); assert!(stats_event_sender.is_some()); } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 73042ff3e..8f203c36e 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -40,6 +40,12 @@ pub struct TrackerStatistics { pub udp6_scrapes_handled: u64, } +impl Default for TrackerStatistics { + fn default() -> Self { + Self::new() + } +} + impl TrackerStatistics { pub fn new() -> Self { Self { @@ -60,56 +66,44 @@ impl TrackerStatistics { } pub struct StatsTracker { - pub stats: Arc>, + pub stats_repository: StatsRepository, } -impl StatsTracker { - pub fn new_active_instance() -> (Self, Box) { - let mut stats_tracker = Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), - }; - - let stats_event_sender = stats_tracker.run_worker(); - - (stats_tracker, stats_event_sender) - } - - pub fn new_inactive_instance() -> Self { - Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), - } +impl Default for StatsTracker { + fn default() -> Self { + Self::new() } +} - pub fn new_instance(active: bool) -> Self { - if !active { - return Self::new_inactive_instance(); - } +impl StatsTracker { + pub fn new_active_instance() -> (Box, StatsRepository) { + let mut stats_tracker = Self::new(); - let (stats_tracker, _stats_event_sender) = Self::new_active_instance(); + let stats_event_sender = stats_tracker.run_event_listener(); - stats_tracker + (stats_event_sender, stats_tracker.stats_repository) } pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), + stats_repository: StatsRepository::new(), } } - pub fn run_worker(&mut self) -> Box { - let (tx, rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - let stats = self.stats.clone(); + let stats_repository = self.stats_repository.clone(); - tokio::spawn(async move { event_listener(rx, stats).await }); + tokio::spawn(async move { event_listener(receiver, stats_repository).await }); - Box::new(StatsEventSender { sender: tx }) + Box::new(StatsEventSender { sender }) } } -async fn event_listener(mut rx: Receiver, stats: Arc>) { - while let Some(event) = rx.recv().await { - let mut stats_lock = stats.write().await; +async fn event_listener(mut receiver: Receiver, stats_repository: StatsRepository) { + while let Some(event) = receiver.recv().await { + let mut stats_lock = stats_repository.stats.write().await; match event { TrackerStatisticsEvent::Tcp4Announce => { @@ -171,18 +165,25 @@ impl TrackerStatisticsEventSender for StatsEventSender { } } -#[async_trait] -pub trait TrackerStatisticsRepository: Sync + Send { - async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics>; +#[derive(Clone)] +pub struct StatsRepository { + pub stats: Arc>, } -#[async_trait] -impl TrackerStatisticsRepository for StatsTracker { - async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats.read().await +impl Default for StatsRepository { + fn default() -> Self { + Self::new() } } -pub trait TrackerStatsService: TrackerStatisticsRepository {} +impl StatsRepository { + pub fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(TrackerStatistics::new())), + } + } -impl TrackerStatsService for StatsTracker {} + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats.read().await + } +} diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 80f6e549d..7e74a3554 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -12,7 +12,7 @@ use crate::databases::database::Database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; -use crate::statistics::{TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatsService}; +use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::tracker::key; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; @@ -24,16 +24,16 @@ pub struct TorrentTracker { keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, - stats_tracker: Box, stats_event_sender: Option>, + stats_repository: StatsRepository, database: Box, } impl TorrentTracker { pub fn new( config: Arc, - stats_tracker: Box, stats_event_sender: Option>, + stats_repository: StatsRepository, ) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; @@ -43,8 +43,8 @@ impl TorrentTracker { keys: RwLock::new(std::collections::HashMap::new()), whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), - stats_tracker, stats_event_sender, + stats_repository, database, }) } @@ -238,7 +238,7 @@ impl TorrentTracker { } pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats_tracker.get_stats().await + self.stats_repository.get_stats().await } pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 35d2e0247..b962b1333 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -251,13 +251,11 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use async_trait::async_trait; - use tokio::sync::{RwLock, RwLockReadGuard}; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; - use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsRepository, TrackerStatsService}; + use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; use crate::{Configuration, PeerId}; @@ -281,8 +279,8 @@ mod tests { } fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); - Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker), Some(stats_event_sender)).unwrap()) + let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -339,27 +337,6 @@ mod tests { } } - struct TrackerStatsServiceMock { - stats: Arc>, - } - - impl TrackerStatsServiceMock { - fn new() -> Self { - Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), - } - } - } - - #[async_trait] - impl TrackerStatisticsRepository for TrackerStatsServiceMock { - async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats.read().await - } - } - - impl TrackerStatsService for TrackerStatsServiceMock {} - struct TrackerConfigurationBuilder { configuration: Configuration, } @@ -395,8 +372,8 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use mockall::predicate::eq; - use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; @@ -448,7 +425,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -459,8 +435,9 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); - let torrent_tracker = - Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); + let torrent_tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await .unwrap(); @@ -468,7 +445,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -477,8 +453,9 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let torrent_tracker = - Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); + let torrent_tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await .unwrap(); @@ -568,14 +545,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, - TrackerStatsServiceMock, }; use crate::PeerId; @@ -714,7 +690,6 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -724,7 +699,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), ); handle_announce( @@ -795,14 +770,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, - TrackerStatsServiceMock, }; use crate::PeerId; @@ -948,7 +922,6 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -958,7 +931,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -988,9 +961,9 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); let tracker = - Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker), Some(stats_event_sender)).unwrap()); + Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1258,14 +1231,13 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -1276,7 +1248,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1292,14 +1264,13 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -1310,7 +1281,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/tests/udp.rs b/tests/udp.rs index d2b500d5a..abd16427b 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -51,11 +51,10 @@ mod udp_tracker_server { lazy_static::initialize(&static_time::TIME_AT_APP_START); // Initialize stats tracker - let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker), Some(stats_event_sender)) - { + let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) From 0dd95e7961dee201a7fd4517230bf645c0bb3839 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Oct 2022 16:21:59 +0100 Subject: [PATCH 0169/1003] refactor: extract statistics event_handler --- src/tracker/statistics.rs | 335 ++++++++++++++++++++++++++++++++------ 1 file changed, 288 insertions(+), 47 deletions(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 8f203c36e..1a681a7a2 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -76,6 +76,12 @@ impl Default for StatsTracker { } impl StatsTracker { + pub fn new() -> Self { + Self { + stats_repository: StatsRepository::new(), + } + } + pub fn new_active_instance() -> (Box, StatsRepository) { let mut stats_tracker = Self::new(); @@ -84,12 +90,6 @@ impl StatsTracker { (stats_event_sender, stats_tracker.stats_repository) } - pub fn new() -> Self { - Self { - stats_repository: StatsRepository::new(), - } - } - pub fn run_event_listener(&mut self) -> Box { let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); @@ -103,49 +103,56 @@ impl StatsTracker { async fn event_listener(mut receiver: Receiver, stats_repository: StatsRepository) { while let Some(event) = receiver.recv().await { - let mut stats_lock = stats_repository.stats.write().await; - - match event { - TrackerStatisticsEvent::Tcp4Announce => { - stats_lock.tcp4_announces_handled += 1; - stats_lock.tcp4_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp4Scrape => { - stats_lock.tcp4_scrapes_handled += 1; - stats_lock.tcp4_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp6Announce => { - stats_lock.tcp6_announces_handled += 1; - stats_lock.tcp6_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp6Scrape => { - stats_lock.tcp6_scrapes_handled += 1; - stats_lock.tcp6_connections_handled += 1; - } - TrackerStatisticsEvent::Udp4Connect => { - stats_lock.udp4_connections_handled += 1; - } - TrackerStatisticsEvent::Udp4Announce => { - stats_lock.udp4_announces_handled += 1; - } - TrackerStatisticsEvent::Udp4Scrape => { - stats_lock.udp4_scrapes_handled += 1; - } - TrackerStatisticsEvent::Udp6Connect => { - stats_lock.udp6_connections_handled += 1; - } - TrackerStatisticsEvent::Udp6Announce => { - stats_lock.udp6_announces_handled += 1; - } - TrackerStatisticsEvent::Udp6Scrape => { - stats_lock.udp6_scrapes_handled += 1; - } - } - - debug!("stats: {:?}", stats_lock); + event_handler(event, &stats_repository).await; + } +} - drop(stats_lock); +async fn event_handler(event: TrackerStatisticsEvent, stats_repository: &StatsRepository) { + match event { + // TCP4 + TrackerStatisticsEvent::Tcp4Announce => { + stats_repository.increase_tcp4_announces().await; + stats_repository.increase_tcp4_connections().await; + } + TrackerStatisticsEvent::Tcp4Scrape => { + stats_repository.increase_tcp4_scrapes().await; + stats_repository.increase_tcp4_connections().await; + } + + // TCP6 + TrackerStatisticsEvent::Tcp6Announce => { + stats_repository.increase_tcp6_announces().await; + stats_repository.increase_tcp6_connections().await; + } + TrackerStatisticsEvent::Tcp6Scrape => { + stats_repository.increase_tcp6_scrapes().await; + stats_repository.increase_tcp6_connections().await; + } + + // UDP4 + TrackerStatisticsEvent::Udp4Connect => { + stats_repository.increase_udp4_connections().await; + } + TrackerStatisticsEvent::Udp4Announce => { + stats_repository.increase_udp4_announces().await; + } + TrackerStatisticsEvent::Udp4Scrape => { + stats_repository.increase_udp4_scrapes().await; + } + + // UDP6 + TrackerStatisticsEvent::Udp6Connect => { + stats_repository.increase_udp6_connections().await; + } + TrackerStatisticsEvent::Udp6Announce => { + stats_repository.increase_udp6_announces().await; + } + TrackerStatisticsEvent::Udp6Scrape => { + stats_repository.increase_udp6_scrapes().await; + } } + + debug!("stats: {:?}", stats_repository.get_stats().await); } #[async_trait] @@ -186,4 +193,238 @@ impl StatsRepository { pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { self.stats.read().await } + + pub async fn increase_tcp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_scrapes_handled += 1; + drop(stats_lock); + } +} + +#[cfg(test)] +mod tests { + + mod event_handler { + use crate::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; + + #[tokio::test] + async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp4Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp6Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrapes_handled, 1); + } + } } From 9e493055f8c2bb59b923cce1ca2306c681e09c59 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Oct 2022 16:36:28 +0100 Subject: [PATCH 0170/1003] test: add tests for StatsTracker --- src/tracker/statistics.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 1a681a7a2..c4d4971af 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -270,6 +270,30 @@ impl StatsRepository { #[cfg(test)] mod tests { + mod stats_tracker { + use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; + + #[tokio::test] + async fn should_contain_the_tracker_statistics() { + let stats_tracker = StatsTracker::new(); + + let stats = stats_tracker.stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, TrackerStatistics::new().tcp4_announces_handled); + } + + #[tokio::test] + async fn should_create_an_event_sender_to_send_statistical_events() { + let mut stats_tracker = StatsTracker::new(); + + let event_sender = stats_tracker.run_event_listener(); + + let result = event_sender.send_event(TrackerStatisticsEvent::Udp4Connect).await; + + assert!(result.is_some()); + } + } + mod event_handler { use crate::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; From 6f77dfeee64037cd9011617057d9f1e5c3e2fd63 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Oct 2022 15:31:37 +0100 Subject: [PATCH 0171/1003] fix: use only minor version for dependencies --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 18188565c..b2b256a2c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,4 +61,4 @@ aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } [dev-dependencies] -mockall = "0.11.3" +mockall = "0.11" From ce8672931a4694b0104bdf17eecc8b90ca0ac2b3 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:15:29 +0100 Subject: [PATCH 0172/1003] vscode: move and add cSpell words, and fix settings --- .vscode/extensions.json | 2 +- .vscode/settings.json | 13 +--------- cSpell.json | 53 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 13 deletions(-) create mode 100644 cSpell.json diff --git a/.vscode/extensions.json b/.vscode/extensions.json index b55ef8bf6..11d11a5c5 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,6 +1,6 @@ { "recommendations": [ "streetsidesoftware.code-spell-checker", - "matklad.rust-analyzer" + "rust-lang.rust-analyzer" ] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 72e8db7e0..f1027e9bd 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,17 +1,6 @@ { - "cSpell.words": [ - "byteorder", - "hasher", - "leechers", - "nanos", - "rngs", - "Seedable", - "thiserror", - "torrust", - "typenum" - ], "[rust]": { "editor.formatOnSave": true }, "rust-analyzer.checkOnSave.command": "clippy", -} +} \ No newline at end of file diff --git a/cSpell.json b/cSpell.json new file mode 100644 index 000000000..2cc3e1179 --- /dev/null +++ b/cSpell.json @@ -0,0 +1,53 @@ +{ + "words": [ + "AUTOINCREMENT", + "automock", + "Avicora", + "Azureus", + "bencode", + "binascii", + "Bitflu", + "bufs", + "byteorder", + "canonicalize", + "canonicalized", + "chrono", + "completei", + "downloadedi", + "filesd", + "Freebox", + "hasher", + "hexlify", + "Hydranode", + "incompletei", + "intervali", + "leecher", + "leechers", + "libtorrent", + "Lphant", + "mockall", + "nanos", + "nocapture", + "ostr", + "Pando", + "Rasterbar", + "repr", + "rngs", + "rusqlite", + "Seedable", + "Shareaza", + "sharktorrent", + "socketaddr", + "sqllite", + "Swiftbit", + "thiserror", + "Torrentstorm", + "torrust", + "typenum", + "Unamed", + "untuple", + "Vagaa", + "Xtorrent", + "Xunlei" + ] +} From 68a88e81c62c37b2064b2eac758e137615d75fc3 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:27:10 +0100 Subject: [PATCH 0173/1003] clippy: fix src/http/response.rs --- src/http/response.rs | 64 ++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/src/http/response.rs b/src/http/response.rs index 4db12f995..c87b5e0e8 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -38,34 +38,34 @@ impl AnnounceResponse { for peer in &self.peers { match peer.ip { IpAddr::V4(ip) => { - peers_v4.write(&u32::from(ip).to_be_bytes())?; - peers_v4.write(&peer.port.to_be_bytes())?; + peers_v4.write_all(&u32::from(ip).to_be_bytes())?; + peers_v4.write_all(&peer.port.to_be_bytes())?; } IpAddr::V6(ip) => { - peers_v6.write(&u128::from(ip).to_be_bytes())?; - peers_v6.write(&peer.port.to_be_bytes())?; + peers_v6.write_all(&u128::from(ip).to_be_bytes())?; + peers_v6.write_all(&peer.port.to_be_bytes())?; } } } let mut bytes: Vec = Vec::new(); - bytes.write(b"d8:intervali")?; - bytes.write(&self.interval.to_string().as_bytes())?; - bytes.write(b"e12:min intervali")?; - bytes.write(&self.interval_min.to_string().as_bytes())?; - bytes.write(b"e8:completei")?; - bytes.write(&self.complete.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(&self.incomplete.to_string().as_bytes())?; - bytes.write(b"e5:peers")?; - bytes.write(&peers_v4.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v4.as_slice())?; - bytes.write(b"e6:peers6")?; - bytes.write(&peers_v6.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v6.as_slice())?; - bytes.write(b"e")?; + bytes.write_all(b"d8:intervali")?; + bytes.write_all(self.interval.to_string().as_bytes())?; + bytes.write_all(b"e12:min intervali")?; + bytes.write_all(self.interval_min.to_string().as_bytes())?; + bytes.write_all(b"e8:completei")?; + bytes.write_all(self.complete.to_string().as_bytes())?; + bytes.write_all(b"e10:incompletei")?; + bytes.write_all(self.incomplete.to_string().as_bytes())?; + bytes.write_all(b"e5:peers")?; + bytes.write_all(peers_v4.len().to_string().as_bytes())?; + bytes.write_all(b":")?; + bytes.write_all(peers_v4.as_slice())?; + bytes.write_all(b"e6:peers6")?; + bytes.write_all(peers_v6.len().to_string().as_bytes())?; + bytes.write_all(b":")?; + bytes.write_all(peers_v6.as_slice())?; + bytes.write_all(b"e")?; Ok(bytes) } @@ -87,21 +87,21 @@ impl ScrapeResponse { pub fn write(&self) -> Result, Box> { let mut bytes: Vec = Vec::new(); - bytes.write(b"d5:filesd")?; + bytes.write_all(b"d5:filesd")?; for (info_hash, scrape_response_entry) in self.files.iter() { - bytes.write(b"20:")?; - bytes.write(&info_hash.0)?; - bytes.write(b"d8:completei")?; - bytes.write(scrape_response_entry.complete.to_string().as_bytes())?; - bytes.write(b"e10:downloadedi")?; - bytes.write(scrape_response_entry.downloaded.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(scrape_response_entry.incomplete.to_string().as_bytes())?; - bytes.write(b"ee")?; + bytes.write_all(b"20:")?; + bytes.write_all(&info_hash.0)?; + bytes.write_all(b"d8:completei")?; + bytes.write_all(scrape_response_entry.complete.to_string().as_bytes())?; + bytes.write_all(b"e10:downloadedi")?; + bytes.write_all(scrape_response_entry.downloaded.to_string().as_bytes())?; + bytes.write_all(b"e10:incompletei")?; + bytes.write_all(scrape_response_entry.incomplete.to_string().as_bytes())?; + bytes.write_all(b"ee")?; } - bytes.write(b"ee")?; + bytes.write_all(b"ee")?; Ok(bytes) } From 7f3066a5064e5b08e1c1b23427f9427b473fc31d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:28:16 +0100 Subject: [PATCH 0174/1003] clippy: fix src/http/handlers.rs --- src/http/handlers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 5214bbe6e..73f7c866c 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -93,7 +93,7 @@ pub async fn handle_scrape( let db = tracker.get_torrents().await; for info_hash in scrape_request.info_hashes.iter() { - let scrape_entry = match db.get(&info_hash) { + let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); @@ -117,7 +117,7 @@ pub async fn handle_scrape( }, }; - files.insert(info_hash.clone(), scrape_entry); + files.insert(*info_hash, scrape_entry); } // send stats event From 9a2422e680bfb576fa12b4ff2732d8b04c9ea538 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:30:04 +0100 Subject: [PATCH 0175/1003] clippy: fix src/http/filters.rs --- src/http/filters.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index 514cb804c..bee89661b 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -61,12 +61,12 @@ pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter WebResult> { - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut info_hashes: Vec = Vec::new(); for v in split_raw_query { if v.contains("info_hash") { - let raw_info_hash = v.split("=").collect::>()[1]; + let raw_info_hash = v.split('=').collect::>()[1]; let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); if let Ok(ih) = info_hash { @@ -77,7 +77,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { Err(reject::custom(ServerError::ExceededInfoHashLimit)) - } else if info_hashes.len() < 1 { + } else if info_hashes.is_empty() { Err(reject::custom(ServerError::InvalidInfoHash)) } else { Ok(info_hashes) @@ -87,7 +87,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { /// Parse PeerId from raw query string async fn peer_id(raw_query: String) -> WebResult { // put all query params in a vec - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut peer_id: Option = None; @@ -95,7 +95,7 @@ async fn peer_id(raw_query: String) -> WebResult { // look for the peer_id param if v.contains("peer_id") { // get raw percent_encoded peer_id - let raw_peer_id = v.split("=").collect::>()[1]; + let raw_peer_id = v.split('=').collect::>()[1]; // decode peer_id let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); @@ -143,7 +143,7 @@ async fn peer_addr( // set client ip to last forwarded ip let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - IpAddr::from_str(x_forwarded_ip).or_else(|_| Err(reject::custom(ServerError::AddressNotFound))) + IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(ServerError::AddressNotFound)) } false => Ok(remote_addr.unwrap().ip()), } From 4d93dbc0fbfdc171d581a1b71930d4bc011a9915 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:31:16 +0100 Subject: [PATCH 0176/1003] clippy: fix src/api/server.rs --- src/api/server.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index 5285c9b2b..a8b235a66 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -129,10 +129,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let view_stats_list = filters::method::get() .and(filters::path::path("stats")) .and(filters::path::end()) - .map(move || { - let tracker = api_stats.clone(); - tracker - }) + .map(move || api_stats.clone()) .and_then(|tracker: Arc| async move { let mut results = Stats { torrents: 0, @@ -304,10 +301,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("whitelist")) .and(filters::path::path("reload")) .and(filters::path::end()) - .map(move || { - let tracker = t7.clone(); - tracker - }) + .map(move || t7.clone()) .and_then(|tracker: Arc| async move { match tracker.load_whitelist().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), @@ -324,10 +318,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("keys")) .and(filters::path::path("reload")) .and(filters::path::end()) - .map(move || { - let tracker = t8.clone(); - tracker - }) + .map(move || t8.clone()) .and_then(|tracker: Arc| async move { match tracker.load_keys().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), From eccf1b095afa9c0dbacdc81ff94fbfc78f6e18ad Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:32:14 +0100 Subject: [PATCH 0177/1003] clippy: fix src/databases/database.rs --- src/databases/database.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index adc735fd2..c67f39a54 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -7,7 +7,7 @@ use crate::databases::sqlite::SqliteDatabase; use crate::tracker::key::AuthKey; use crate::InfoHash; -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum DatabaseDrivers { Sqlite3, MySQL, @@ -55,7 +55,7 @@ pub trait Database: Sync + Send { async fn remove_key_from_keys(&self, key: &str) -> Result; } -#[derive(Debug, Display, PartialEq, Error)] +#[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum Error { #[display(fmt = "Query returned no rows.")] From 5d586aa17af476e76a4d455c1358ed0131a3e0ef Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:32:52 +0100 Subject: [PATCH 0178/1003] clippy: fix src/databases/mysql.rs --- src/databases/mysql.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 33287df6d..a4d870101 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -19,7 +19,7 @@ pub struct MysqlDatabase { impl MysqlDatabase { pub fn new(db_path: &str) -> Result { - let opts = Opts::from_url(&db_path).expect("Failed to connect to MySQL database."); + let opts = Opts::from_url(db_path).expect("Failed to connect to MySQL database."); let builder = OptsBuilder::from_opts(opts); let manager = MysqlConnectionManager::new(builder); let pool = r2d2::Pool::builder() From 5b30adf305615f20e4cba31037a883d5b63d7b79 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:35:11 +0100 Subject: [PATCH 0179/1003] clippy: fix src/databases/sqlite.rs --- src/databases/sqlite.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index fb66c0b94..ef9f12d9c 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -135,7 +135,7 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query(&[info_hash])?; + let mut rows = stmt.query([info_hash])?; if let Some(row) = rows.next()? { let info_hash: String = row.get(0).unwrap(); @@ -223,7 +223,7 @@ impl Database for SqliteDatabase { async fn remove_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { + match conn.execute("DELETE FROM keys WHERE key = ?", [key]) { Ok(updated) => { if updated > 0 { return Ok(updated); From 706830dd29f936e23b9f912a14efca09df690789 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:44:53 +0100 Subject: [PATCH 0180/1003] clippy: fix src/protocol/common.rs --- src/protocol/common.rs | 47 ++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/src/protocol/common.rs b/src/protocol/common.rs index f1bd6a99c..431521764 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -26,17 +26,9 @@ pub enum AnnounceEventDef { #[serde(remote = "NumberOfBytes")] pub struct NumberOfBytesDef(pub i64); -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Ord)] +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); -impl InfoHash { - pub fn to_string(&self) -> String { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - String::from(std::str::from_utf8(bytes_out).unwrap()) - } -} - impl std::fmt::Display for InfoHash { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let mut chars = [0u8; 40]; @@ -49,7 +41,7 @@ impl std::str::FromStr for InfoHash { type Err = binascii::ConvertError; fn from_str(s: &str) -> Result { - let mut i = Self { 0: [0u8; 20] }; + let mut i = Self([0u8; 20]); if s.len() != 40 { return Err(binascii::ConvertError::InvalidInputLength); } @@ -58,6 +50,12 @@ impl std::str::FromStr for InfoHash { } } +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + impl std::cmp::PartialOrd for InfoHash { fn partial_cmp(&self, other: &InfoHash) -> Option { self.0.partial_cmp(&other.0) @@ -67,15 +65,15 @@ impl std::cmp::PartialOrd for InfoHash { impl std::convert::From<&[u8]> for InfoHash { fn from(data: &[u8]) -> InfoHash { assert_eq!(data.len(), 20); - let mut ret = InfoHash { 0: [0u8; 20] }; + let mut ret = InfoHash([0u8; 20]); ret.0.clone_from_slice(data); - return ret; + ret } } -impl std::convert::Into for [u8; 20] { - fn into(self) -> InfoHash { - InfoHash { 0: self } +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) } } @@ -206,15 +204,15 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { )); } - let mut res = InfoHash { 0: [0u8; 20] }; + let mut res = InfoHash([0u8; 20]); - if let Err(_) = binascii::hex2bin(v.as_bytes(), &mut res.0) { + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), &"expected a hexadecimal string", )); } else { - return Ok(res); + Ok(res) } } } @@ -222,15 +220,14 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] pub struct PeerId(pub [u8; 20]); -impl PeerId { - pub fn to_string(&self) -> String { +impl std::fmt::Display for PeerId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut buffer = [0u8; 20]; let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - return if let Some(bytes_out) = bytes_out { - String::from(std::str::from_utf8(bytes_out).unwrap()) - } else { - "".to_string() - }; + match bytes_out { + Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), + None => write!(f, ""), + } } } From a8cfeb120c39da96fff3d1c7cc3c4cd472eeb417 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:45:20 +0100 Subject: [PATCH 0181/1003] clippy: fix src/tracker/key.rs --- src/tracker/key.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker/key.rs b/src/tracker/key.rs index c513b48da..1bf0557a1 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -63,7 +63,7 @@ impl AuthKey { } } -#[derive(Debug, Display, PartialEq, Error)] +#[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum Error { #[display(fmt = "Key could not be verified.")] From d20bc691189044fe695601d5afc87221b2f9eee0 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:57:53 +0100 Subject: [PATCH 0182/1003] clippy: fix src/tracker/tracker.rs --- src/tracker/tracker.rs | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 7e74a3554..15000c827 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -13,10 +13,9 @@ use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; -use crate::tracker::key; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -use crate::Configuration; +use crate::{key, Configuration}; pub struct TorrentTracker { pub config: Arc, @@ -69,7 +68,7 @@ impl TorrentTracker { } pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { - self.database.remove_key_from_keys(&key).await?; + self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(key); Ok(()) } @@ -112,7 +111,7 @@ impl TorrentTracker { // Removing torrents is not relevant to public trackers. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.remove_info_hash_from_whitelist(info_hash.clone()).await?; + self.database.remove_info_hash_from_whitelist(*info_hash).await?; self.whitelist.write().await.remove(info_hash); Ok(()) } @@ -155,10 +154,8 @@ impl TorrentTracker { } // check if info_hash is whitelisted - if self.is_whitelisted() { - if !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted); - } + if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { + return Err(TorrentError::TorrentNotWhitelisted); } Ok(()) @@ -180,7 +177,7 @@ impl TorrentTracker { completed, }; - torrents.insert(info_hash.clone(), torrent_entry); + torrents.insert(info_hash, torrent_entry); } Ok(()) @@ -209,7 +206,7 @@ impl TorrentTracker { pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { let mut torrents = self.torrents.write().await; - let torrent_entry = match torrents.entry(info_hash.clone()) { + let torrent_entry = match torrents.entry(*info_hash) { Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), Entry::Occupied(entry) => entry.into_mut(), }; @@ -220,7 +217,7 @@ impl TorrentTracker { if self.config.persistent_torrent_completed_stat && stats_updated { let _ = self .database - .save_persistent_torrent(&info_hash, torrent_entry.completed) + .save_persistent_torrent(info_hash, torrent_entry.completed) .await; } @@ -258,8 +255,8 @@ impl TorrentTracker { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); match self.config.persistent_torrent_completed_stat { - true => torrent_entry.completed > 0 || torrent_entry.peers.len() > 0, - false => torrent_entry.peers.len() > 0, + true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), + false => !torrent_entry.peers.is_empty(), } }); } else { From 1dc43f5645d0500432f814320daf8050c6b94853 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:58:19 +0100 Subject: [PATCH 0183/1003] clippy: fix src/tracker/mode.rs --- src/tracker/mode.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index 9110b7f4f..f444b4523 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -1,7 +1,7 @@ use serde; use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] From ce0ea09d8e6504bc8bf2e0eaa5f7a2415cfdb293 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:59:40 +0100 Subject: [PATCH 0184/1003] clippy: fix src/tracker/peer.rs --- src/tracker/peer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 7ac35179a..7a2599f82 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -75,8 +75,8 @@ impl TorrentPeer { // potentially substitute localhost ip with external ip pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if remote_ip.is_loopback() && host_opt_ip.is_some() { - SocketAddr::new(host_opt_ip.unwrap(), port) + if let Some(host_ip) = host_opt_ip.filter(|_| remote_ip.is_loopback()) { + SocketAddr::new(host_ip, port) } else { SocketAddr::new(remote_ip, port) } From f48072e5f5d336c7c5038a8f4165ffdaf75c6326 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:00:10 +0100 Subject: [PATCH 0185/1003] clippy: fix src/tracker/statistics.rs --- src/tracker/statistics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index c4d4971af..ac3889270 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -10,7 +10,7 @@ use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub enum TrackerStatisticsEvent { Tcp4Announce, Tcp4Scrape, From 2084c4a8375ccc14b3aac0c858865dbb3b807431 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:01:23 +0100 Subject: [PATCH 0186/1003] clippy: fix src/tracker/torrent.rs --- src/tracker/torrent.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 65eaa0a40..4e602d359 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -82,6 +82,12 @@ impl TorrentEntry { } } +impl Default for TorrentEntry { + fn default() -> Self { + Self::new() + } +} + #[derive(Debug)] pub struct TorrentStats { pub completed: u32, From 8f80060c52ada04b8ac6d1d86890d7c939a39d8b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:04:46 +0100 Subject: [PATCH 0187/1003] clippy: fix src/config.rs --- src/config.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index b59d572ea..8c17070d2 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,14 +12,14 @@ use {std, toml}; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, pub bind_address: String, } #[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, @@ -30,14 +30,14 @@ pub struct HttpTrackerConfig { pub ssl_key_path: Option, } -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpApiConfig { pub enabled: bool, pub bind_address: String, pub access_tokens: HashMap, } -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, @@ -140,9 +140,9 @@ impl Configuration { eprintln!("Creating config file.."); let config = Configuration::default(); let _ = config.save_to_file(path); - return Err(ConfigError::Message(format!( - "Please edit the config.TOML in the root folder and restart the tracker." - ))); + return Err(ConfigError::Message( + "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), + )); } let torrust_config: Configuration = config @@ -152,7 +152,7 @@ impl Configuration { Ok(torrust_config) } - pub fn save_to_file(&self, path: &str) -> Result<(), ()> { + pub fn save_to_file(&self, path: &str) -> Result<(), ConfigurationError> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); Ok(()) @@ -236,7 +236,7 @@ mod tests { let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); // Convert to argument type for Configuration::save_to_file - let config_file_path = temp_file.clone(); + let config_file_path = temp_file; let path = config_file_path.to_string_lossy().to_string(); let default_configuration = Configuration::default(); From 96c14324b9a8d9a2587cdeeeb95e5a71171d8fb9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:05:22 +0100 Subject: [PATCH 0188/1003] clippy: fix src/setup.rs --- src/setup.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/setup.rs b/src/setup.rs index 0c5ed9004..387b6c26e 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -35,7 +35,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< udp_tracker_config.bind_address, config.mode ); } else { - jobs.push(udp_tracker::start_job(&udp_tracker_config, tracker.clone())) + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())) } } @@ -44,17 +44,17 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(&http_tracker_config, tracker.clone())); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone())); } // Start HTTP API server if config.http_api.enabled { - jobs.push(tracker_api::start_job(&config, tracker.clone())); + jobs.push(tracker_api::start_job(config, tracker.clone())); } // Remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); + jobs.push(torrent_cleanup::start_job(config, tracker.clone())); } jobs From acf9ee26ec7ae1f2f53a84a8eb6f292222e7ea96 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:07:10 +0100 Subject: [PATCH 0189/1003] clippy: fix tests/udp.rs --- tests/udp.rs | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index abd16427b..0bc2a6506 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -67,7 +67,7 @@ mod udp_tracker_server { let udp_tracker_config = &configuration.udp_trackers[0]; // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(&udp_tracker_config, tracker.clone())); + self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); self.bind_address = Some(udp_tracker_config.bind_address.clone()); @@ -136,7 +136,7 @@ mod udp_tracker_server { Err(_) => panic!("could not write request to bytes."), }; - self.udp_client.send(&request_data).await + self.udp_client.send(request_data).await } async fn receive(&self) -> Response { @@ -178,30 +178,24 @@ mod udp_tracker_server { fn is_error_response(response: &Response, error_message: &str) -> bool { match response { - Response::Error(error_response) => return error_response.message.starts_with(error_message), - _ => return false, - }; + Response::Error(error_response) => error_response.message.starts_with(error_message), + _ => false, + } } fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { match response { - Response::Connect(connect_response) => return connect_response.transaction_id == transaction_id, - _ => return false, - }; + Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, + _ => false, + } } fn is_ipv4_announce_response(response: &Response) -> bool { - match response { - Response::AnnounceIpv4(_) => return true, - _ => return false, - }; + matches!(response, Response::AnnounceIpv4(_)) } fn is_scrape_response(response: &Response) -> bool { - match response { - Response::Scrape(_) => return true, - _ => return false, - }; + matches!(response, Response::Scrape(_)) } #[tokio::test] From 8dfffe4db683df124e2b7e3c3ad967a36df1928f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:13:28 +0100 Subject: [PATCH 0190/1003] clippy: fix src/udp/connection_cookie.rs --- src/udp/connection_cookie.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index a17431b9c..c40a56959 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -22,9 +22,8 @@ pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); - let cookie = cookie_builder::build(remote_address, &time_extent); //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); - cookie + cookie_builder::build(remote_address, &time_extent) } pub fn check_connection_cookie( From 7336d5e810d45453d96a770b05f5e3a8038ee729 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:14:57 +0100 Subject: [PATCH 0191/1003] clippy: fix src/udp/handlers.rs --- src/udp/handlers.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index b962b1333..cc4229b66 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -667,8 +667,8 @@ mod tests { let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); - response + + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() } #[tokio::test] @@ -899,8 +899,8 @@ mod tests { let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); - response + + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() } #[tokio::test] @@ -1073,7 +1073,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), + connection_id: into_connection_id(&make_connection_cookie(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } From 78633c4662a743984e1c13115078593e8a07e38f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:18:29 +0100 Subject: [PATCH 0192/1003] clippy: merge src/tracker/tracker.rs into src/tracker/mod.rs --- src/api/server.rs | 2 +- src/http/filters.rs | 2 +- src/http/handlers.rs | 2 +- src/http/routes.rs | 2 +- src/http/server.rs | 2 +- src/jobs/http_tracker.rs | 2 +- src/jobs/torrent_cleanup.rs | 2 +- src/jobs/tracker_api.rs | 2 +- src/jobs/udp_tracker.rs | 2 +- src/main.rs | 2 +- src/setup.rs | 2 +- src/tracker/mod.rs | 270 +++++++++++++++++++++++++++++++++++- src/tracker/tracker.rs | 268 ----------------------------------- src/udp/handlers.rs | 18 +-- src/udp/server.rs | 2 +- tests/udp.rs | 2 +- 16 files changed, 291 insertions(+), 291 deletions(-) delete mode 100644 src/tracker/tracker.rs diff --git a/src/api/server.rs b/src/api/server.rs index a8b235a66..5a604aa0c 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -9,7 +9,7 @@ use warp::{filters, reply, serve, Filter}; use crate::peer::TorrentPeer; use crate::protocol::common::*; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; #[derive(Deserialize, Debug)] struct TorrentInfoQuery { diff --git a/src/http/filters.rs b/src/http/filters.rs index bee89661b..42d1592ff 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -7,7 +7,7 @@ use warp::{reject, Filter, Rejection}; use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; use crate::tracker::key::AuthKey; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; /// Pass Arc along diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 73f7c866c..87d2d51f6 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -15,7 +15,7 @@ use crate::peer::TorrentPeer; use crate::tracker::key::AuthKey; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::InfoHash; /// Authenticate InfoHash using optional AuthKey diff --git a/src/http/routes.rs b/src/http/routes.rs index a9ca3027f..8bfaf5ed9 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -6,7 +6,7 @@ use warp::{Filter, Rejection}; use crate::http::{ handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker, }; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { diff --git a/src/http/server.rs b/src/http/server.rs index 8b92d8792..4e48f97e3 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; use crate::http::routes; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; /// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index ef67f0a7e..2d8f307b4 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use log::{info, warn}; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{HttpServer, HttpTrackerConfig}; pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 6e4b0c77e..04b064043 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -4,7 +4,7 @@ use chrono::Utc; use log::info; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index f3c9ae788..97b1fa3b0 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -4,7 +4,7 @@ use log::info; use tokio::task::JoinHandle; use crate::api::server; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index f93979c9f..00fdaddbe 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{UdpServer, UdpTrackerConfig}; pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { diff --git a/src/main.rs b/src/main.rs index 08061cd7b..bf832dbf4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use log::info; use torrust_tracker::stats::setup_statistics; -use torrust_tracker::tracker::tracker::TorrentTracker; +use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; #[tokio::main] diff --git a/src/setup.rs b/src/setup.rs index 387b6c26e..2ecc1c143 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -4,7 +4,7 @@ use log::warn; use tokio::task::JoinHandle; use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index bbb027a35..77f51098a 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -3,4 +3,272 @@ pub mod mode; pub mod peer; pub mod statistics; pub mod torrent; -pub mod tracker; + +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{RwLock, RwLockReadGuard}; + +use crate::databases::database; +use crate::databases::database::Database; +use crate::mode::TrackerMode; +use crate::peer::TorrentPeer; +use crate::protocol::common::InfoHash; +use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; +use crate::Configuration; + +pub struct TorrentTracker { + pub config: Arc, + mode: TrackerMode, + keys: RwLock>, + whitelist: RwLock>, + torrents: RwLock>, + stats_event_sender: Option>, + stats_repository: StatsRepository, + database: Box, +} + +impl TorrentTracker { + pub fn new( + config: Arc, + stats_event_sender: Option>, + stats_repository: StatsRepository, + ) -> Result { + let database = database::connect_database(&config.db_driver, &config.db_path)?; + + Ok(TorrentTracker { + config: config.clone(), + mode: config.mode, + keys: RwLock::new(std::collections::HashMap::new()), + whitelist: RwLock::new(std::collections::HashSet::new()), + torrents: RwLock::new(std::collections::BTreeMap::new()), + stats_event_sender, + stats_repository, + database, + }) + } + + pub fn is_public(&self) -> bool { + self.mode == TrackerMode::Public + } + + pub fn is_private(&self) -> bool { + self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + } + + pub fn is_whitelisted(&self) -> bool { + self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + } + + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = key::generate_auth_key(lifetime); + self.database.add_key_to_keys(&auth_key).await?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { + self.database.remove_key_from_keys(key).await?; + self.keys.write().await.remove(key); + Ok(()) + } + + pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { + match self.keys.read().await.get(&auth_key.key) { + None => Err(key::Error::KeyInvalid), + Some(key) => key::verify_auth_key(key), + } + } + + pub async fn load_keys(&self) -> Result<(), database::Error> { + let keys_from_database = self.database.load_keys().await?; + let mut keys = self.keys.write().await; + + keys.clear(); + + for key in keys_from_database { + let _ = keys.insert(key.key.clone(), key); + } + + Ok(()) + } + + // Adding torrents is not relevant to public trackers. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_memory_whitelist(info_hash).await; + Ok(()) + } + + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + // Removing torrents is not relevant to public trackers. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.remove_info_hash_from_whitelist(*info_hash).await?; + self.whitelist.write().await.remove(info_hash); + Ok(()) + } + + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + pub async fn load_whitelist(&self) -> Result<(), database::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); + } + + Ok(()) + } + + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + // no authentication needed in public mode + if self.is_public() { + return Ok(()); + } + + // check if auth_key is set and valid + if self.is_private() { + match key { + Some(key) => { + if self.verify_auth_key(key).await.is_err() { + return Err(TorrentError::PeerKeyNotValid); + } + } + None => { + return Err(TorrentError::PeerNotAuthenticated); + } + } + } + + // check if info_hash is whitelisted + if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { + return Err(TorrentError::TorrentNotWhitelisted); + } + + Ok(()) + } + + // Loading the torrents from database into memory + pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { + let persistent_torrents = self.database.load_persistent_torrents().await?; + let mut torrents = self.torrents.write().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(&info_hash) { + continue; + } + + let torrent_entry = TorrentEntry { + peers: Default::default(), + completed, + }; + + torrents.insert(info_hash, torrent_entry); + } + + Ok(()) + } + + /// Get all torrent peers for a given torrent filtering out the peer with the client address + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), + } + } + + /// Get all torrent peers for a given torrent + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), + } + } + + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { + let mut torrents = self.torrents.write().await; + + let torrent_entry = match torrents.entry(*info_hash) { + Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), + Entry::Occupied(entry) => entry.into_mut(), + }; + + let stats_updated = torrent_entry.update_peer(peer); + + // todo: move this action to a separate worker + if self.config.persistent_torrent_completed_stat && stats_updated { + let _ = self + .database + .save_persistent_torrent(info_hash, torrent_entry.completed) + .await; + } + + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + TorrentStats { + seeders, + leechers, + completed, + } + } + + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { + self.torrents.read().await + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats_repository.get_stats().await + } + + pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { + match &self.stats_event_sender { + None => None, + Some(stats_event_sender) => stats_event_sender.send_event(event).await, + } + } + + // Remove inactive peers and (optionally) peerless torrents + pub async fn cleanup_torrents(&self) { + let mut torrents_lock = self.torrents.write().await; + + // If we don't need to remove torrents we will use the faster iter + if self.config.remove_peerless_torrents { + torrents_lock.retain(|_, torrent_entry| { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + + match self.config.persistent_torrent_completed_stat { + true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), + false => !torrent_entry.peers.is_empty(), + } + }); + } else { + for (_, torrent_entry) in torrents_lock.iter_mut() { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + } + } + } +} diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs deleted file mode 100644 index 15000c827..000000000 --- a/src/tracker/tracker.rs +++ /dev/null @@ -1,268 +0,0 @@ -use std::collections::btree_map::Entry; -use std::collections::BTreeMap; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; - -use tokio::sync::mpsc::error::SendError; -use tokio::sync::{RwLock, RwLockReadGuard}; - -use crate::databases::database; -use crate::databases::database::Database; -use crate::mode::TrackerMode; -use crate::peer::TorrentPeer; -use crate::protocol::common::InfoHash; -use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; -use crate::tracker::key::AuthKey; -use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -use crate::{key, Configuration}; - -pub struct TorrentTracker { - pub config: Arc, - mode: TrackerMode, - keys: RwLock>, - whitelist: RwLock>, - torrents: RwLock>, - stats_event_sender: Option>, - stats_repository: StatsRepository, - database: Box, -} - -impl TorrentTracker { - pub fn new( - config: Arc, - stats_event_sender: Option>, - stats_repository: StatsRepository, - ) -> Result { - let database = database::connect_database(&config.db_driver, &config.db_path)?; - - Ok(TorrentTracker { - config: config.clone(), - mode: config.mode, - keys: RwLock::new(std::collections::HashMap::new()), - whitelist: RwLock::new(std::collections::HashSet::new()), - torrents: RwLock::new(std::collections::BTreeMap::new()), - stats_event_sender, - stats_repository, - database, - }) - } - - pub fn is_public(&self) -> bool { - self.mode == TrackerMode::Public - } - - pub fn is_private(&self) -> bool { - self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed - } - - pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed - } - - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = key::generate_auth_key(lifetime); - self.database.add_key_to_keys(&auth_key).await?; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) - } - - pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { - self.database.remove_key_from_keys(key).await?; - self.keys.write().await.remove(key); - Ok(()) - } - - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { - match self.keys.read().await.get(&auth_key.key) { - None => Err(key::Error::KeyInvalid), - Some(key) => key::verify_auth_key(key), - } - } - - pub async fn load_keys(&self) -> Result<(), database::Error> { - let keys_from_database = self.database.load_keys().await?; - let mut keys = self.keys.write().await; - - keys.clear(); - - for key in keys_from_database { - let _ = keys.insert(key.key.clone(), key); - } - - Ok(()) - } - - // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.add_torrent_to_database_whitelist(info_hash).await?; - self.add_torrent_to_memory_whitelist(info_hash).await; - Ok(()) - } - - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.add_info_hash_to_whitelist(*info_hash).await?; - Ok(()) - } - - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.insert(*info_hash) - } - - // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.remove_info_hash_from_whitelist(*info_hash).await?; - self.whitelist.write().await.remove(info_hash); - Ok(()) - } - - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.whitelist.read().await.contains(info_hash) - } - - pub async fn load_whitelist(&self) -> Result<(), database::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist().await?; - let mut whitelist = self.whitelist.write().await; - - whitelist.clear(); - - for info_hash in whitelisted_torrents_from_database { - let _ = whitelist.insert(info_hash); - } - - Ok(()) - } - - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { - // no authentication needed in public mode - if self.is_public() { - return Ok(()); - } - - // check if auth_key is set and valid - if self.is_private() { - match key { - Some(key) => { - if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid); - } - } - None => { - return Err(TorrentError::PeerNotAuthenticated); - } - } - } - - // check if info_hash is whitelisted - if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted); - } - - Ok(()) - } - - // Loading the torrents from database into memory - pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { - let persistent_torrents = self.database.load_persistent_torrents().await?; - let mut torrents = self.torrents.write().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = TorrentEntry { - peers: Default::default(), - completed, - }; - - torrents.insert(info_hash, torrent_entry); - } - - Ok(()) - } - - /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { - let read_lock = self.torrents.read().await; - - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), - } - } - - /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.read().await; - - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), - } - } - - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { - let mut torrents = self.torrents.write().await; - - let torrent_entry = match torrents.entry(*info_hash) { - Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), - Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.update_peer(peer); - - // todo: move this action to a separate worker - if self.config.persistent_torrent_completed_stat && stats_updated { - let _ = self - .database - .save_persistent_torrent(info_hash, torrent_entry.completed) - .await; - } - - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - TorrentStats { - seeders, - leechers, - completed, - } - } - - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { - self.torrents.read().await - } - - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats_repository.get_stats().await - } - - pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { - match &self.stats_event_sender { - None => None, - Some(stats_event_sender) => stats_event_sender.send_event(event).await, - } - } - - // Remove inactive peers and (optionally) peerless torrents - pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.write().await; - - // If we don't need to remove torrents we will use the faster iter - if self.config.remove_peerless_torrents { - torrents_lock.retain(|_, torrent_entry| { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - - match self.config.persistent_torrent_completed_stat { - true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), - false => !torrent_entry.peers.is_empty(), - } - }); - } else { - for (_, torrent_entry) in torrents_lock.iter_mut() { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - } - } - } -} diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index cc4229b66..5514bc1eb 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -10,7 +10,7 @@ use super::connection_cookie::{check_connection_cookie, from_connection_id, into use crate::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; @@ -256,7 +256,7 @@ mod tests { use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; use crate::statistics::StatsTracker; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::{Configuration, PeerId}; fn default_tracker_config() -> Arc { @@ -374,7 +374,7 @@ mod tests { use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -546,7 +546,7 @@ mod tests { use mockall::predicate::eq; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -771,7 +771,7 @@ mod tests { use mockall::predicate::eq; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -952,7 +952,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use crate::statistics::StatsTracker; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1013,7 +1013,7 @@ mod tests { }; use super::TorrentPeerBuilder; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1232,7 +1232,7 @@ mod tests { use super::sample_scrape_request; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; @@ -1265,7 +1265,7 @@ mod tests { use super::sample_scrape_request; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; diff --git a/src/udp/server.rs b/src/udp/server.rs index 11cb61d99..2f41c3c4d 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::udp::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { diff --git a/tests/udp.rs b/tests/udp.rs index 0bc2a6506..c88dc9885 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -19,7 +19,7 @@ mod udp_tracker_server { use tokio::task::JoinHandle; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; - use torrust_tracker::tracker::tracker::TorrentTracker; + use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{logging, static_time, Configuration}; From 4dd7326748b78a5ead86e98c71d1226a8fec8e91 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:52:53 +0100 Subject: [PATCH 0193/1003] ci: check code and clippy in test workflow --- .github/workflows/test_build_release.yml | 10 ++++++++++ cSpell.json | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 1266ae51f..4acf14277 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -36,6 +36,16 @@ jobs: toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v1 + - name: Check Rust Code + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-targets + - name: Clippy Rust Code + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --all-targets - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests diff --git a/cSpell.json b/cSpell.json index 2cc3e1179..1df69e4e7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -12,6 +12,7 @@ "canonicalize", "canonicalized", "chrono", + "clippy", "completei", "downloadedi", "filesd", @@ -27,6 +28,7 @@ "Lphant", "mockall", "nanos", + "nextest", "nocapture", "ostr", "Pando", @@ -34,11 +36,13 @@ "repr", "rngs", "rusqlite", + "rustfmt", "Seedable", "Shareaza", "sharktorrent", "socketaddr", "sqllite", + "Swatinem", "Swiftbit", "thiserror", "Torrentstorm", From 23916a60a2e6881ec4336f1f995e57e2fea8c54d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Nov 2022 19:13:43 +0000 Subject: [PATCH 0194/1003] fix: [#108] revert change in auth key generation endpoint The response for the enpoint POST /api/key/:seconds_valid should be: ```json { "key": "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM", "valid_until": 1674804892 } ``` instead of: ```json { "key": "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM", "valid_until": { "secs": 1674804892, "nanos": 423855037 } } ``` It was propagated to the API after changing the internal struct `AuthKey` from: ```rust pub struct AuthKey { pub key: String, pub valid_until: Option, } ``` to: ```rust pub struct AuthKey { pub key: String, pub valid_until: Option, } ``` --- src/api/mod.rs | 1 + src/api/resources/auth_key_resource.rs | 57 ++++++++++++++++++++++++++ src/api/resources/mod.rs | 9 ++++ src/api/server.rs | 4 +- 4 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 src/api/resources/auth_key_resource.rs create mode 100644 src/api/resources/mod.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index 74f47ad34..e08417133 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1 +1,2 @@ pub mod server; +pub mod resources; diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs new file mode 100644 index 000000000..4f74266f6 --- /dev/null +++ b/src/api/resources/auth_key_resource.rs @@ -0,0 +1,57 @@ +use serde::{Deserialize, Serialize}; + +use crate::key::AuthKey; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct AuthKeyResource { + pub key: String, + pub valid_until: Option, +} + +impl AuthKeyResource { + pub fn from_auth_key(auth_key: &AuthKey) -> Self { + Self { + key: auth_key.key.clone(), + valid_until: auth_key.valid_until.map(|duration| duration.as_secs()), + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use super::AuthKeyResource; + use crate::key::AuthKey; + use crate::protocol::clock::{DefaultClock, TimeNow}; + + #[test] + fn it_should_be_instantiated_from_an_auth_key() { + let expire_time = DefaultClock::add(&Duration::new(60, 0)).unwrap(); + + let auth_key_resource = AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(expire_time), + }; + + assert_eq!( + AuthKeyResource::from_auth_key(&auth_key_resource), + AuthKeyResource { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(expire_time.as_secs()) + } + ) + } + + #[test] + fn it_should_be_converted_to_json() { + assert_eq!( + serde_json::to_string(&AuthKeyResource { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(60) + }) + .unwrap(), + "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60}" // cspell:disable-line + ); + } +} diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs new file mode 100644 index 000000000..f7d24ee86 --- /dev/null +++ b/src/api/resources/mod.rs @@ -0,0 +1,9 @@ +//! These are the Rest API resources. +//! +//! WIP. Not all endpoints have their resource structs. +//! +//! - [x] AuthKeys +//! - [ ] ... +//! - [ ] ... +//! - [ ] ... +pub mod auth_key_resource; diff --git a/src/api/server.rs b/src/api/server.rs index 5a604aa0c..89d3bb38d 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -11,6 +11,8 @@ use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; +use super::resources::auth_key_resource::AuthKeyResource; + #[derive(Deserialize, Debug)] struct TorrentInfoQuery { offset: Option, @@ -267,7 +269,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp }) .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&auth_key)), + Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from_auth_key(&auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), })), From ede046082660a0bf69c1518e329413aac1959634 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Nov 2022 19:17:09 +0000 Subject: [PATCH 0195/1003] feat: [#108] add dev dependency reqwest Added for API end to end tests. --- Cargo.lock | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++-- Cargo.toml | 1 + 2 files changed, 100 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce66efa09..e3a6d9c09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -528,6 +528,15 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +[[package]] +name = "encoding_rs" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +dependencies = [ + "cfg-if", +] + [[package]] name = "env_logger" version = "0.8.4" @@ -941,9 +950,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -963,6 +972,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "iana-time-zone" version = "0.1.51" @@ -1035,6 +1057,12 @@ dependencies = [ "syn", ] +[[package]] +name = "ipnet" +version = "2.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" + [[package]] name = "itertools" version = "0.10.5" @@ -1857,6 +1885,43 @@ dependencies = [ "winapi", ] +[[package]] +name = "reqwest" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + [[package]] name = "ring" version = "0.16.20" @@ -2542,6 +2607,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.23.4" @@ -2621,6 +2696,7 @@ dependencies = [ "r2d2_mysql", "r2d2_sqlite", "rand", + "reqwest", "serde", "serde_bencode", "serde_json", @@ -2887,6 +2963,18 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.83" @@ -3076,6 +3164,15 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] + [[package]] name = "wyz" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index b2b256a2c..80e9009f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,3 +62,4 @@ uuid = { version = "1", features = ["v4"] } [dev-dependencies] mockall = "0.11" +reqwest = { version = "0.11.13", features = ["json"] } From 409f82af4cb76936da389b37deca05d8321710fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Nov 2022 19:18:55 +0000 Subject: [PATCH 0196/1003] test: [#108] add e2e test for auth key generation API endpoint --- .gitignore | 1 + src/api/mod.rs | 2 +- src/api/resources/auth_key_resource.rs | 56 +++++++++--- src/api/resources/mod.rs | 4 +- src/api/server.rs | 5 +- tests/api.rs | 119 +++++++++++++++++++++++++ tests/common/mod.rs | 8 ++ tests/udp.rs | 19 ++-- 8 files changed, 185 insertions(+), 29 deletions(-) create mode 100644 tests/api.rs create mode 100644 tests/common/mod.rs diff --git a/.gitignore b/.gitignore index e2956b2d6..ba9ceeb53 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ /config.toml /data.db /.vscode/launch.json + diff --git a/src/api/mod.rs b/src/api/mod.rs index e08417133..46ad24218 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,2 +1,2 @@ -pub mod server; pub mod resources; +pub mod server; diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 4f74266f6..c38b7cc18 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -1,6 +1,9 @@ +use std::convert::From; + use serde::{Deserialize, Serialize}; use crate::key::AuthKey; +use crate::protocol::clock::DurationSinceUnixEpoch; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct AuthKeyResource { @@ -8,11 +11,22 @@ pub struct AuthKeyResource { pub valid_until: Option, } -impl AuthKeyResource { - pub fn from_auth_key(auth_key: &AuthKey) -> Self { - Self { - key: auth_key.key.clone(), - valid_until: auth_key.valid_until.map(|duration| duration.as_secs()), +impl From for AuthKey { + fn from(auth_key_resource: AuthKeyResource) -> Self { + AuthKey { + key: auth_key_resource.key, + valid_until: auth_key_resource + .valid_until + .map(|valid_until| DurationSinceUnixEpoch::new(valid_until, 0)), + } + } +} + +impl From for AuthKeyResource { + fn from(auth_key: AuthKey) -> Self { + AuthKeyResource { + key: auth_key.key, + valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), } } } @@ -26,25 +40,43 @@ mod tests { use crate::protocol::clock::{DefaultClock, TimeNow}; #[test] - fn it_should_be_instantiated_from_an_auth_key() { - let expire_time = DefaultClock::add(&Duration::new(60, 0)).unwrap(); + fn it_should_be_convertible_into_an_auth_key() { + let duration_in_secs = 60; + + let auth_key_resource = AuthKeyResource { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(duration_in_secs), + }; + + assert_eq!( + AuthKey::from(auth_key_resource), + AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()) + } + ) + } + + #[test] + fn it_should_be_convertible_from_an_auth_key() { + let duration_in_secs = 60; - let auth_key_resource = AuthKey { + let auth_key = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(expire_time), + valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; assert_eq!( - AuthKeyResource::from_auth_key(&auth_key_resource), + AuthKeyResource::from(auth_key), AuthKeyResource { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(expire_time.as_secs()) + valid_until: Some(duration_in_secs) } ) } #[test] - fn it_should_be_converted_to_json() { + fn it_should_be_convertible_into_json() { assert_eq!( serde_json::to_string(&AuthKeyResource { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index f7d24ee86..4b4f2214c 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -1,7 +1,7 @@ //! These are the Rest API resources. -//! +//! //! WIP. Not all endpoints have their resource structs. -//! +//! //! - [x] AuthKeys //! - [ ] ... //! - [ ] ... diff --git a/src/api/server.rs b/src/api/server.rs index 89d3bb38d..9f215710e 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,12 +7,11 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; +use super::resources::auth_key_resource::AuthKeyResource; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; -use super::resources::auth_key_resource::AuthKeyResource; - #[derive(Deserialize, Debug)] struct TorrentInfoQuery { offset: Option, @@ -269,7 +268,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp }) .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from_auth_key(&auth_key))), + Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from(auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), })), diff --git a/tests/api.rs b/tests/api.rs new file mode 100644 index 000000000..38966a81b --- /dev/null +++ b/tests/api.rs @@ -0,0 +1,119 @@ +/// Integration tests for the tracker API +/// +/// cargo test tracker_api -- --nocapture +extern crate rand; + +mod common; + +mod tracker_api { + use core::panic; + use std::env; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Arc; + + use tokio::task::JoinHandle; + use tokio::time::{sleep, Duration}; + use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; + use torrust_tracker::jobs::tracker_api; + use torrust_tracker::tracker::key::AuthKey; + use torrust_tracker::tracker::statistics::StatsTracker; + use torrust_tracker::tracker::TorrentTracker; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + + use crate::common::ephemeral_random_port; + + #[tokio::test] + async fn should_generate_a_new_auth_key() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let seconds_valid = 60; + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + + let url = format!("http://{}/api/key/{}?token={}", &bind_address, &seconds_valid, &api_token); + + let auth_key: AuthKeyResource = reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap(); + + // Verify the key with the tracker + assert!(api_server + .tracker + .unwrap() + .verify_auth_key(&AuthKey::from(auth_key)) + .await + .is_ok()); + } + + fn tracker_configuration() -> Arc { + let mut config = Configuration::default(); + config.log_level = Some("off".to_owned()); + + config.http_api.bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); + + // Temp database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join("data.db"); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + Arc::new(config) + } + + async fn new_running_api_server(configuration: Arc) -> ApiServer { + let mut api_server = ApiServer::new(); + api_server.start(configuration).await; + api_server + } + + pub struct ApiServer { + pub started: AtomicBool, + pub job: Option>, + pub bind_address: Option, + pub tracker: Option>, + } + + impl ApiServer { + pub fn new() -> Self { + Self { + started: AtomicBool::new(false), + job: None, + bind_address: None, + tracker: None, + } + } + + pub async fn start(&mut self, configuration: Arc) { + if !self.started.load(Ordering::Relaxed) { + self.bind_address = Some(configuration.http_api.bind_address.clone()); + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + self.tracker = Some(tracker.clone()); + + // Initialize logging + logging::setup_logging(&configuration); + + // Start the HTTP API job + self.job = Some(tracker_api::start_job(&configuration, tracker.clone())); + + self.started.store(true, Ordering::Relaxed); + + // Wait to give time to the API server to be ready to accept requests + sleep(Duration::from_millis(100)).await; + } + } + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs new file mode 100644 index 000000000..5fd484cf5 --- /dev/null +++ b/tests/common/mod.rs @@ -0,0 +1,8 @@ +use rand::{thread_rng, Rng}; + +pub fn ephemeral_random_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) +} diff --git a/tests/udp.rs b/tests/udp.rs index c88dc9885..ab96259c5 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -3,6 +3,8 @@ /// cargo test udp_tracker_server -- --nocapture extern crate rand; +mod common; + mod udp_tracker_server { use core::panic; use std::io::Cursor; @@ -14,14 +16,15 @@ mod udp_tracker_server { AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Request, Response, ScrapeRequest, TransactionId, }; - use rand::{thread_rng, Rng}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{logging, static_time, Configuration}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + + use crate::common::ephemeral_random_port; fn tracker_configuration() -> Arc { let mut config = Configuration::default(); @@ -50,6 +53,9 @@ mod udp_tracker_server { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + // Initialize stats tracker let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); @@ -162,15 +168,6 @@ mod udp_tracker_server { [0; MAX_PACKET_SIZE] } - /// Generates a random ephemeral port for a client source address - fn ephemeral_random_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could either use the same client for all tests (slower) or - // create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) - } - /// Generates the source address for the UDP client fn source_address(port: u16) -> String { format!("127.0.0.1:{}", port) From 9cfab4d009789994db6614f16d2f70b4ed5f4e4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Nov 2022 17:07:26 +0000 Subject: [PATCH 0197/1003] test: [#74] reproduce API bug with an e2e test The API endpoint to whitelist torrents returns an error if you try to whitelist the same torrent twice. This test reproduces that wrong behavior before fixing it. --- tests/api.rs | 44 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/tests/api.rs b/tests/api.rs index 38966a81b..8de220093 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -44,15 +44,53 @@ mod tracker_api { .is_ok()); } + #[tokio::test] + async fn should_whitelist_a_torrent() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let url = format!("http://{}/api/whitelist/{}?token={}", &bind_address, &info_hash, &api_token); + + let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + + assert_eq!(res.status(), 200); + } + + #[tokio::test] + async fn should_whitelist_a_torrent_that_has_been_already_whitelisted() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let url = format!("http://{}/api/whitelist/{}?token={}", &bind_address, &info_hash, &api_token); + + // First whitelist request + let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + assert_eq!(res.status(), 200); + + // Second whitelist request + let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + assert_eq!(res.status(), 200); + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); - config.http_api.bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); + // Ephemeral socket address + let port = ephemeral_random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &port); - // Temp database + // Ephemeral database let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join("data.db"); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); config.db_path = temp_file.to_str().unwrap().to_owned(); Arc::new(config) From 2d621c5e64f657e9be1f99af65ba8b160b9b68d0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Nov 2022 18:42:54 +0000 Subject: [PATCH 0198/1003] fix: [#74] bug calling the whitelist API endpoint twice --- cSpell.json | 1 + src/tracker/mod.rs | 14 ++++++++++++-- tests/api.rs | 10 +++++++++- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/cSpell.json b/cSpell.json index 1df69e4e7..c880bf3ae 100644 --- a/cSpell.json +++ b/cSpell.json @@ -34,6 +34,7 @@ "Pando", "Rasterbar", "repr", + "reqwest", "rngs", "rusqlite", "rustfmt", diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 77f51098a..a02e1123b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -106,9 +106,19 @@ impl TorrentTracker { Ok(()) } + /// It adds a torrent to the whitelist if it has not been whitelisted previously async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.add_info_hash_to_whitelist(*info_hash).await?; - Ok(()) + match self + .database + .get_info_hash_from_whitelist(&info_hash.to_owned().to_string()) + .await + { + Ok(_preexisting_info_hash) => Ok(()), + _ => { + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) + } + } } pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { diff --git a/tests/api.rs b/tests/api.rs index 8de220093..96af71d54 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -8,6 +8,7 @@ mod common; mod tracker_api { use core::panic; use std::env; + use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -18,7 +19,7 @@ mod tracker_api { use torrust_tracker::tracker::key::AuthKey; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash}; use crate::common::ephemeral_random_port; @@ -58,6 +59,13 @@ mod tracker_api { let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); assert_eq!(res.status(), 200); + assert!( + api_server + .tracker + .unwrap() + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); } #[tokio::test] From 167b749e3dabfa4928fa621a0c438863a3ddb127 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 21:31:47 +0100 Subject: [PATCH 0199/1003] db: check info_hash record not found instead dropping all errors --- src/databases/mysql.rs | 4 ++-- src/databases/sqlite.rs | 16 +++++++++------- src/tracker/mod.rs | 10 ++++++---- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index a4d870101..fc6ff5098 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -141,10 +141,10 @@ impl Database for MysqlDatabase { "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }, ) - .map_err(|_| database::Error::QueryReturnedNoRows)? + .map_err(|_| database::Error::DatabaseError)? { Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), - None => Err(database::Error::InvalidQuery), + None => Err(database::Error::QueryReturnedNoRows), } } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index ef9f12d9c..7a567b07e 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -137,13 +137,15 @@ impl Database for SqliteDatabase { let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; let mut rows = stmt.query([info_hash])?; - if let Some(row) = rows.next()? { - let info_hash: String = row.get(0).unwrap(); - - // should never be able to fail - Ok(InfoHash::from_str(&info_hash).unwrap()) - } else { - Err(database::Error::InvalidQuery) + match rows.next() { + Ok(row) => match row { + Some(row) => Ok(InfoHash::from_str(&row.get_unwrap::<_, String>(0)).unwrap()), + None => Err(database::Error::QueryReturnedNoRows), + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a02e1123b..8987f49f5 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -108,17 +108,19 @@ impl TorrentTracker { /// It adds a torrent to the whitelist if it has not been whitelisted previously async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - match self + if let Err(e) = self .database .get_info_hash_from_whitelist(&info_hash.to_owned().to_string()) .await { - Ok(_preexisting_info_hash) => Ok(()), - _ => { + if let database::Error::QueryReturnedNoRows = e { self.database.add_info_hash_to_whitelist(*info_hash).await?; - Ok(()) + } else { + eprintln!("{e}"); + return Err(e); } } + Ok(()) } pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { From 8af9834e57373a97673c804ec3a50014e0d171c6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 08:51:31 +0000 Subject: [PATCH 0200/1003] refactor: rename tests --- tests/api.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/api.rs b/tests/api.rs index 96af71d54..e4c23716d 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -24,7 +24,7 @@ mod tracker_api { use crate::common::ephemeral_random_port; #[tokio::test] - async fn should_generate_a_new_auth_key() { + async fn should_allow_generating_a_new_auth_key() { let configuration = tracker_configuration(); let api_server = new_running_api_server(configuration.clone()).await; @@ -46,7 +46,7 @@ mod tracker_api { } #[tokio::test] - async fn should_whitelist_a_torrent() { + async fn should_allow_whitelisting_a_torrent() { let configuration = tracker_configuration(); let api_server = new_running_api_server(configuration.clone()).await; @@ -69,7 +69,7 @@ mod tracker_api { } #[tokio::test] - async fn should_whitelist_a_torrent_that_has_been_already_whitelisted() { + async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { let configuration = tracker_configuration(); let api_server = new_running_api_server(configuration.clone()).await; From ed5c1edaaaed067337154cfd0bfaa943a28bba8f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 09:15:22 +0000 Subject: [PATCH 0201/1003] refactor: extract fn is_info_hash_whitelisted --- src/databases/database.rs | 11 +++++++++++ src/tracker/mod.rs | 16 +++++----------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index c67f39a54..795be0d45 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -53,6 +53,17 @@ pub trait Database: Sync + Send { async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; async fn remove_key_from_keys(&self, key: &str) -> Result; + + async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { + if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.to_owned().to_string()).await { + if let Error::QueryReturnedNoRows = e { + return Ok(false); + } else { + return Err(e); + } + } + Ok(true) + } } #[derive(Debug, Display, PartialEq, Eq, Error)] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 8987f49f5..a3eecd427 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -108,18 +108,12 @@ impl TorrentTracker { /// It adds a torrent to the whitelist if it has not been whitelisted previously async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - if let Err(e) = self - .database - .get_info_hash_from_whitelist(&info_hash.to_owned().to_string()) - .await - { - if let database::Error::QueryReturnedNoRows = e { - self.database.add_info_hash_to_whitelist(*info_hash).await?; - } else { - eprintln!("{e}"); - return Err(e); - } + if self.database.is_info_hash_whitelisted(info_hash).await.unwrap() { + return Ok(()); } + + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) } From 32a6d79ea3c53401a14a44dede63e039330675e1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 10:43:36 +0000 Subject: [PATCH 0202/1003] fix: [#74] send message from API when is ready In the e2e tests we needed to wait until the API server is ready to accept request. We were waiting a random duration (100 milliseconds). Now we send a message from the API when is ready to the initiator. In production code is not used. --- cSpell.json | 1 + src/api/server.rs | 13 ++++++++++++- src/jobs/tracker_api.rs | 17 +++++++++++++---- src/setup.rs | 3 ++- tests/api.rs | 11 +++++++---- 5 files changed, 35 insertions(+), 10 deletions(-) diff --git a/cSpell.json b/cSpell.json index c880bf3ae..a2c4235c4 100644 --- a/cSpell.json +++ b/cSpell.json @@ -30,6 +30,7 @@ "nanos", "nextest", "nocapture", + "oneshot", "ostr", "Pando", "Rasterbar", diff --git a/src/api/server.rs b/src/api/server.rs index 9f215710e..984aeb2e6 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -5,9 +5,11 @@ use std::sync::Arc; use std::time::Duration; use serde::{Deserialize, Serialize}; +use tokio::sync::oneshot::Sender; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; +use crate::jobs::tracker_api::ApiReady; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -88,7 +90,11 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> impl warp::Future { +pub fn start( + socket_addr: SocketAddr, + tracker: Arc, + messenger_to_initiator: Sender, +) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -343,6 +349,11 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); + // Send a message to the initiator to notify the API is ready to accept requests + if messenger_to_initiator.send(ApiReady()).is_err() { + panic!("the receiver dropped"); + } + let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 97b1fa3b0..169ba2edb 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -1,21 +1,30 @@ use std::sync::Arc; use log::info; +use tokio::sync::oneshot::{self, Receiver}; use tokio::task::JoinHandle; use crate::api::server; use crate::tracker::TorrentTracker; use crate::Configuration; -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { +#[derive(Debug)] +pub struct ApiReady(); + +pub fn start_job(config: &Configuration, tracker: Arc) -> (JoinHandle<()>, Receiver) { let bind_addr = config .http_api .bind_address .parse::() .expect("Tracker API bind_address invalid."); + + let (tx, rx) = oneshot::channel::(); + info!("Starting Torrust API server on: {}", bind_addr); - tokio::spawn(async move { - server::start(bind_addr, tracker).await; - }) + let join_handle = tokio::spawn(async move { + server::start(bind_addr, tracker, tx).await; + }); + + (join_handle, rx) } diff --git a/src/setup.rs b/src/setup.rs index 2ecc1c143..52bb64f01 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -49,7 +49,8 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Start HTTP API server if config.http_api.enabled { - jobs.push(tracker_api::start_job(config, tracker.clone())); + let (join_handle, _receiver) = tracker_api::start_job(config, tracker.clone()); + jobs.push(join_handle); } // Remove torrents without peers, every interval diff --git a/tests/api.rs b/tests/api.rs index e4c23716d..251fca2b1 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -13,7 +13,6 @@ mod tracker_api { use std::sync::Arc; use tokio::task::JoinHandle; - use tokio::time::{sleep, Duration}; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; use torrust_tracker::jobs::tracker_api; use torrust_tracker::tracker::key::AuthKey; @@ -153,12 +152,16 @@ mod tracker_api { logging::setup_logging(&configuration); // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration, tracker.clone())); + let (join_handle, api_receiver) = tracker_api::start_job(&configuration, tracker.clone()); + self.job = Some(join_handle); self.started.store(true, Ordering::Relaxed); - // Wait to give time to the API server to be ready to accept requests - sleep(Duration::from_millis(100)).await; + // Wait until the API is ready + match api_receiver.await { + Ok(msg) => println!("Message received from API server: {:?}", msg), + Err(_) => panic!("the api server dropped"), + } } } } From 15aa8313fde6aeb82daa563bc9bc4ca902402808 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 14:04:23 +0000 Subject: [PATCH 0203/1003] refactor: api job starter waits until api is ready There are two main changes: - The API server does not send the message when is ready. The job starter waits until the API server is running. This change is less radical becuase we keep the `start_job` return type as the other job starters. We did not want to send a real message from the API. We only wanted to know that the API thread is up and running. - The job starter waits until the API job is running even in production code. In the previous version we did that only for the e2e tests. --- src/api/server.rs | 13 +------------ src/jobs/tracker_api.rs | 24 +++++++++++++++++------- src/setup.rs | 3 +-- tests/api.rs | 9 +-------- 4 files changed, 20 insertions(+), 29 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index 984aeb2e6..9f215710e 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -5,11 +5,9 @@ use std::sync::Arc; use std::time::Duration; use serde::{Deserialize, Serialize}; -use tokio::sync::oneshot::Sender; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; -use crate::jobs::tracker_api::ApiReady; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -90,11 +88,7 @@ fn authenticate(tokens: HashMap) -> impl Filter, - messenger_to_initiator: Sender, -) -> impl warp::Future { +pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -349,11 +343,6 @@ pub fn start( let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); - // Send a message to the initiator to notify the API is ready to accept requests - if messenger_to_initiator.send(ApiReady()).is_err() { - panic!("the receiver dropped"); - } - let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 169ba2edb..7b5797391 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use log::info; -use tokio::sync::oneshot::{self, Receiver}; +use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::api::server; @@ -9,22 +9,32 @@ use crate::tracker::TorrentTracker; use crate::Configuration; #[derive(Debug)] -pub struct ApiReady(); +pub struct ApiServerJobStarted(); -pub fn start_job(config: &Configuration, tracker: Arc) -> (JoinHandle<()>, Receiver) { +pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .http_api .bind_address .parse::() .expect("Tracker API bind_address invalid."); - let (tx, rx) = oneshot::channel::(); - info!("Starting Torrust API server on: {}", bind_addr); + let (tx, rx) = oneshot::channel::(); + + // Run the API server let join_handle = tokio::spawn(async move { - server::start(bind_addr, tracker, tx).await; + if tx.send(ApiServerJobStarted()).is_err() { + panic!("the start job dropped"); + } + server::start(bind_addr, tracker).await; }); - (join_handle, rx) + // Wait until the API server job is running + match rx.await { + Ok(_msg) => info!("Torrust API server started"), + Err(_) => panic!("the api server dropped"), + } + + join_handle } diff --git a/src/setup.rs b/src/setup.rs index 52bb64f01..9906a2d03 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -49,8 +49,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Start HTTP API server if config.http_api.enabled { - let (join_handle, _receiver) = tracker_api::start_job(config, tracker.clone()); - jobs.push(join_handle); + jobs.push(tracker_api::start_job(config, tracker.clone()).await); } // Remove torrents without peers, every interval diff --git a/tests/api.rs b/tests/api.rs index 251fca2b1..278f9d4fb 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -152,16 +152,9 @@ mod tracker_api { logging::setup_logging(&configuration); // Start the HTTP API job - let (join_handle, api_receiver) = tracker_api::start_job(&configuration, tracker.clone()); - self.job = Some(join_handle); + self.job = Some(tracker_api::start_job(&configuration, tracker).await); self.started.store(true, Ordering::Relaxed); - - // Wait until the API is ready - match api_receiver.await { - Ok(msg) => println!("Message received from API server: {:?}", msg), - Err(_) => panic!("the api server dropped"), - } } } } From 5274b2c067aeea9d1bf344930fe7f1bdea794627 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 16:09:57 +0000 Subject: [PATCH 0204/1003] fix: [#74] send api ready event after starting the api server Cameron Garnham (@da2ce7) suggested this change. It's better to send the event after spwaning the API server task. --- src/jobs/tracker_api.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 7b5797391..ba5b8a1fb 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -24,10 +24,13 @@ pub async fn start_job(config: &Configuration, tracker: Arc) -> // Run the API server let join_handle = tokio::spawn(async move { + let handel = server::start(bind_addr, tracker); + if tx.send(ApiServerJobStarted()).is_err() { panic!("the start job dropped"); } - server::start(bind_addr, tracker).await; + + handel.await; }); // Wait until the API server job is running From ea92ceb61c1d765e2ba882186da97b433996b971 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 17:58:50 +0000 Subject: [PATCH 0205/1003] test: [#61] add e2e test to API torrent info endpoint before refactoring --- src/api/resources/mod.rs | 1 + src/api/resources/torrent_resource.rs | 26 ++++++++++ tests/api.rs | 75 ++++++++++++++++++++++++++- 3 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 src/api/resources/torrent_resource.rs diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index 4b4f2214c..a229539dd 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -7,3 +7,4 @@ //! - [ ] ... //! - [ ] ... pub mod auth_key_resource; +pub mod torrent_resource; diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs new file mode 100644 index 000000000..c9f6a1451 --- /dev/null +++ b/src/api/resources/torrent_resource.rs @@ -0,0 +1,26 @@ +use serde::Deserialize; + +#[derive(Deserialize, Debug, PartialEq)] +pub struct TorrentResource { + pub info_hash: String, + pub completed: u32, + pub leechers: u32, + pub peers: Vec, +} + +#[derive(Deserialize, Debug, PartialEq)] +pub struct TorrentPeerResource { + pub peer_id: PeerIdResource, + pub peer_addr: String, + pub updated: i64, + pub uploaded: i64, + pub downloaded: i64, + pub left: i64, + pub event: String, +} + +#[derive(Deserialize, Debug, PartialEq)] +pub struct PeerIdResource { + pub id: String, + pub client: String, +} diff --git a/tests/api.rs b/tests/api.rs index 278f9d4fb..2a0ded24a 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -8,17 +8,22 @@ mod common; mod tracker_api { use core::panic; use std::env; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use tokio::task::JoinHandle; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; + use torrust_tracker::api::resources::torrent_resource::{PeerIdResource, TorrentPeerResource, TorrentResource}; use torrust_tracker::jobs::tracker_api; + use torrust_tracker::peer::TorrentPeer; + use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::tracker::key::AuthKey; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash, PeerId}; use crate::common::ephemeral_random_port; @@ -87,6 +92,74 @@ mod tracker_api { assert_eq!(res.status(), 200); } + fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { + ( + TorrentPeer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1669397478934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }, + TorrentPeerResource { + peer_id: PeerIdResource { + id: "2d71423030303030303030303030303030303030".to_string(), + client: "qBittorrent".to_string(), + }, + peer_addr: "126.0.0.1:8080".to_string(), + updated: 1669397478934000i64, + uploaded: 0i64, + downloaded: 0i64, + left: 0i64, + event: "Started".to_string(), + }, + ) + } + + #[tokio::test] + async fn should_allow_getting_a_torrent_info() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + + let (peer, peer_resource) = sample_torrent_peer(); + + // Add the torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let url = format!("http://{}/api/torrent/{}?token={}", &bind_address, &info_hash, &api_token); + + let torrent_resource = reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + assert_eq!( + torrent_resource, + TorrentResource { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + completed: 0, + leechers: 0, + peers: vec![peer_resource] + } + ); + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); From 801dfe6d8df0292f5e9afe25ebcf265d53c0834d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 19:27:13 +0000 Subject: [PATCH 0206/1003] refactor: [#61] use TorrentResource in torrent info API endpoint --- src/api/resources/torrent_resource.rs | 29 +++++++++++++++++++-------- src/api/server.rs | 21 ++++++++++++++++--- src/protocol/common.rs | 17 +++++++++------- tests/api.rs | 15 +++++++------- 4 files changed, 57 insertions(+), 25 deletions(-) diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index c9f6a1451..3c59852e1 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,26 +1,39 @@ -use serde::Deserialize; +use serde::{Deserialize, Serialize}; -#[derive(Deserialize, Debug, PartialEq)] +use crate::PeerId; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct TorrentResource { pub info_hash: String, + pub seeders: u32, pub completed: u32, pub leechers: u32, - pub peers: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub peers: Option>, } -#[derive(Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct TorrentPeerResource { pub peer_id: PeerIdResource, pub peer_addr: String, - pub updated: i64, + pub updated: u128, pub uploaded: i64, pub downloaded: i64, pub left: i64, pub event: String, } -#[derive(Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct PeerIdResource { - pub id: String, - pub client: String, + pub id: Option, + pub client: Option, +} + +impl From for PeerIdResource { + fn from(peer_id: PeerId) -> Self { + PeerIdResource { + id: peer_id.get_id(), + client: peer_id.get_client_name().map(|client_name| client_name.to_string()), + } + } } diff --git a/src/api/server.rs b/src/api/server.rs index 9f215710e..06e2af251 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; +use super::resources::torrent_resource::{PeerIdResource, TorrentPeerResource, TorrentResource}; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -109,6 +110,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .iter() .map(|(info_hash, torrent_entry)| { let (seeders, completed, leechers) = torrent_entry.get_stats(); + // todo: use TorrentResource Torrent { info_hash, seeders, @@ -206,12 +208,25 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let peers = torrent_entry.get_peers(None); - Ok(reply::json(&Torrent { - info_hash: &info_hash, + let peer_resources = peers + .iter() + .map(|peer| TorrentPeerResource { + peer_id: PeerIdResource::from(peer.peer_id.clone()), + peer_addr: peer.peer_addr.to_string(), + updated: peer.updated.as_millis(), + uploaded: peer.uploaded.0, + downloaded: peer.downloaded.0, + left: peer.left.0, + event: format!("{:?}", peer.event), + }) + .collect(); + + Ok(reply::json(&TorrentResource { + info_hash: info_hash.to_string(), seeders, completed, leechers, - peers: Some(peers), + peers: Some(peer_resources), })) }); diff --git a/src/protocol/common.rs b/src/protocol/common.rs index 431521764..da6d95e40 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -232,6 +232,14 @@ impl std::fmt::Display for PeerId { } impl PeerId { + pub fn get_id(&self) -> Option { + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + + std::str::from_utf8(&tmp).ok().map(|id| id.to_string()) + } + pub fn get_client_name(&self) -> Option<&'static str> { if self.0[0] == b'M' { return Some("BitTorrent"); @@ -316,19 +324,14 @@ impl Serialize for PeerId { where S: serde::Serializer, { - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - let id = std::str::from_utf8(&tmp).ok(); - #[derive(Serialize)] struct PeerIdInfo<'a> { - id: Option<&'a str>, + id: Option, client: Option<&'a str>, } let obj = PeerIdInfo { - id, + id: self.get_id(), client: self.get_client_name(), }; obj.serialize(serializer) diff --git a/tests/api.rs b/tests/api.rs index 2a0ded24a..a5606b0a9 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -105,14 +105,14 @@ mod tracker_api { }, TorrentPeerResource { peer_id: PeerIdResource { - id: "2d71423030303030303030303030303030303030".to_string(), - client: "qBittorrent".to_string(), + id: Some("2d71423030303030303030303030303030303030".to_string()), + client: Some("qBittorrent".to_string()), }, peer_addr: "126.0.0.1:8080".to_string(), - updated: 1669397478934000i64, - uploaded: 0i64, - downloaded: 0i64, - left: 0i64, + updated: 1669397478934000, + uploaded: 0, + downloaded: 0, + left: 0, event: "Started".to_string(), }, ) @@ -153,9 +153,10 @@ mod tracker_api { torrent_resource, TorrentResource { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, completed: 0, leechers: 0, - peers: vec![peer_resource] + peers: Some(vec![peer_resource]) } ); } From 7298701f5d92d854139eedd296606dbc78f5e080 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 13:41:27 +0000 Subject: [PATCH 0207/1003] refactor: [#61] extract converter from TorrentPeer to TorrentPeerResource --- src/api/resources/torrent_resource.rs | 15 ++++++++++++ src/api/server.rs | 15 ++---------- src/protocol/common.rs | 2 +- src/tracker/peer.rs | 2 +- tests/api.rs | 35 +++++++++------------------ 5 files changed, 31 insertions(+), 38 deletions(-) diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 3c59852e1..ecf2a3fda 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,5 +1,6 @@ use serde::{Deserialize, Serialize}; +use crate::peer::TorrentPeer; use crate::PeerId; #[derive(Serialize, Deserialize, Debug, PartialEq)] @@ -37,3 +38,17 @@ impl From for PeerIdResource { } } } + +impl From for TorrentPeerResource { + fn from(peer: TorrentPeer) -> Self { + TorrentPeerResource { + peer_id: PeerIdResource::from(peer.peer_id), + peer_addr: peer.peer_addr.to_string(), + updated: peer.updated.as_millis(), + uploaded: peer.uploaded.0, + downloaded: peer.downloaded.0, + left: peer.left.0, + event: format!("{:?}", peer.event), + } + } +} diff --git a/src/api/server.rs b/src/api/server.rs index 06e2af251..85c177b8b 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; -use super::resources::torrent_resource::{PeerIdResource, TorrentPeerResource, TorrentResource}; +use super::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -208,18 +208,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let peers = torrent_entry.get_peers(None); - let peer_resources = peers - .iter() - .map(|peer| TorrentPeerResource { - peer_id: PeerIdResource::from(peer.peer_id.clone()), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), - }) - .collect(); + let peer_resources = peers.iter().map(|peer| TorrentPeerResource::from(**peer)).collect(); Ok(reply::json(&TorrentResource { info_hash: info_hash.to_string(), diff --git a/src/protocol/common.rs b/src/protocol/common.rs index da6d95e40..ce1cbf253 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -217,7 +217,7 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { } } -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] pub struct PeerId(pub [u8; 20]); impl std::fmt::Display for PeerId { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 7a2599f82..42ef6a60b 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -9,7 +9,7 @@ use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] pub struct TorrentPeer { pub peer_id: PeerId, pub peer_addr: SocketAddr, diff --git a/tests/api.rs b/tests/api.rs index a5606b0a9..0f6214ffb 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -93,29 +93,18 @@ mod tracker_api { } fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { - ( - TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1669397478934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }, - TorrentPeerResource { - peer_id: PeerIdResource { - id: Some("2d71423030303030303030303030303030303030".to_string()), - client: Some("qBittorrent".to_string()), - }, - peer_addr: "126.0.0.1:8080".to_string(), - updated: 1669397478934000, - uploaded: 0, - downloaded: 0, - left: 0, - event: "Started".to_string(), - }, - ) + let torrent_peer = TorrentPeer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1669397478934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); + + (torrent_peer, torrent_peer_resource) } #[tokio::test] From 284c91be299e814c7df6ac33ec050e69817085e8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 15:45:10 +0000 Subject: [PATCH 0208/1003] test: [#61] add e2e test for torrent list API endpoint --- src/tracker/torrent.rs | 4 ++-- tests/api.rs | 45 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4e602d359..335554006 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -32,7 +32,7 @@ impl TorrentEntry { let _ = self.peers.remove(&peer.peer_id); } AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); + let peer_old = self.peers.insert(peer.peer_id, *peer); // Don't count if peer was not previously known if peer_old.is_some() { self.completed += 1; @@ -40,7 +40,7 @@ impl TorrentEntry { } } _ => { - let _ = self.peers.insert(peer.peer_id.clone(), peer.clone()); + let _ = self.peers.insert(peer.peer_id, *peer); } } diff --git a/tests/api.rs b/tests/api.rs index 0f6214ffb..ce419724a 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,7 +16,7 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use tokio::task::JoinHandle; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; - use torrust_tracker::api::resources::torrent_resource::{PeerIdResource, TorrentPeerResource, TorrentResource}; + use torrust_tracker::api::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; use torrust_tracker::jobs::tracker_api; use torrust_tracker::peer::TorrentPeer; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; @@ -150,6 +150,49 @@ mod tracker_api { ); } + #[tokio::test] + async fn should_allow_getting_torrents() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + + let (peer, _peer_resource) = sample_torrent_peer(); + + // Add the torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let url = format!("http://{}/api/torrents?token={}", &bind_address, &api_token); + + let torrent_resources = reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap(); + + assert_eq!( + torrent_resources, + vec![TorrentResource { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include peer list + }] + ); + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); From b974ce0eba7614bbe1ce79b03a73ab20143b75f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 16:05:41 +0000 Subject: [PATCH 0209/1003] refactor: [#61] use TorrentListItemResource in torrent list API endpoint --- src/api/resources/mod.rs | 4 ++-- src/api/resources/torrent_resource.rs | 10 ++++++++++ src/api/server.rs | 18 +++--------------- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index a229539dd..e139207b5 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -3,8 +3,8 @@ //! WIP. Not all endpoints have their resource structs. //! //! - [x] AuthKeys -//! - [ ] ... -//! - [ ] ... +//! - [ ] TorrentResource, TorrentListItemResource, TorrentPeerResource, PeerIdResource +//! - [ ] StatsResource //! - [ ] ... pub mod auth_key_resource; pub mod torrent_resource; diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index ecf2a3fda..88d0463cb 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -13,6 +13,16 @@ pub struct TorrentResource { pub peers: Option>, } +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct TorrentListItemResource { + pub info_hash: String, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + // todo: this is always None. Remove field from endpoint? + pub peers: Option>, +} + #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct TorrentPeerResource { pub peer_id: PeerIdResource, diff --git a/src/api/server.rs b/src/api/server.rs index 85c177b8b..ef514749b 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -8,8 +8,7 @@ use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; -use super::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; -use crate::peer::TorrentPeer; +use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -19,16 +18,6 @@ struct TorrentInfoQuery { limit: Option, } -#[derive(Serialize)] -struct Torrent<'a> { - info_hash: &'a InfoHash, - seeders: u32, - completed: u32, - leechers: u32, - #[serde(skip_serializing_if = "Option::is_none")] - peers: Option>, -} - #[derive(Serialize)] struct Stats { torrents: u32, @@ -110,9 +99,8 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .iter() .map(|(info_hash, torrent_entry)| { let (seeders, completed, leechers) = torrent_entry.get_stats(); - // todo: use TorrentResource - Torrent { - info_hash, + TorrentListItemResource { + info_hash: info_hash.to_string(), seeders, completed, leechers, From 7e03714ef49076ac562ca9fc8179dd7495534e82 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 16:29:40 +0000 Subject: [PATCH 0210/1003] refactor: [#61] use StatsResource in API stats endpoint --- src/api/resources/mod.rs | 1 + src/api/resources/stats_resource.rs | 21 ++++++++++ src/api/server.rs | 23 +---------- tests/api.rs | 59 ++++++++++++++++++++++++++++- 4 files changed, 81 insertions(+), 23 deletions(-) create mode 100644 src/api/resources/stats_resource.rs diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index e139207b5..d214d8a59 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -7,4 +7,5 @@ //! - [ ] StatsResource //! - [ ] ... pub mod auth_key_resource; +pub mod stats_resource; pub mod torrent_resource; diff --git a/src/api/resources/stats_resource.rs b/src/api/resources/stats_resource.rs new file mode 100644 index 000000000..7fc9f1376 --- /dev/null +++ b/src/api/resources/stats_resource.rs @@ -0,0 +1,21 @@ +use serde::{Serialize, Deserialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct StatsResource { + pub torrents: u32, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + pub tcp4_connections_handled: u32, + pub tcp4_announces_handled: u32, + pub tcp4_scrapes_handled: u32, + pub tcp6_connections_handled: u32, + pub tcp6_announces_handled: u32, + pub tcp6_scrapes_handled: u32, + pub udp4_connections_handled: u32, + pub udp4_announces_handled: u32, + pub udp4_scrapes_handled: u32, + pub udp6_connections_handled: u32, + pub udp6_announces_handled: u32, + pub udp6_scrapes_handled: u32, +} diff --git a/src/api/server.rs b/src/api/server.rs index ef514749b..41e6f7074 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; +use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -18,26 +19,6 @@ struct TorrentInfoQuery { limit: Option, } -#[derive(Serialize)] -struct Stats { - torrents: u32, - seeders: u32, - completed: u32, - leechers: u32, - tcp4_connections_handled: u32, - tcp4_announces_handled: u32, - tcp4_scrapes_handled: u32, - tcp6_connections_handled: u32, - tcp6_announces_handled: u32, - tcp6_scrapes_handled: u32, - udp4_connections_handled: u32, - udp4_announces_handled: u32, - udp4_scrapes_handled: u32, - udp6_connections_handled: u32, - udp6_announces_handled: u32, - udp6_scrapes_handled: u32, -} - #[derive(Serialize, Debug)] #[serde(tag = "status", rename_all = "snake_case")] enum ActionStatus<'a> { @@ -122,7 +103,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::end()) .map(move || api_stats.clone()) .and_then(|tracker: Arc| async move { - let mut results = Stats { + let mut results = StatsResource { torrents: 0, seeders: 0, completed: 0, diff --git a/tests/api.rs b/tests/api.rs index ce419724a..37cdd5415 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,6 +16,7 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use tokio::task::JoinHandle; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; + use torrust_tracker::api::resources::stats_resource::StatsResource; use torrust_tracker::api::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; use torrust_tracker::jobs::tracker_api; use torrust_tracker::peer::TorrentPeer; @@ -118,7 +119,7 @@ mod tracker_api { let (peer, peer_resource) = sample_torrent_peer(); - // Add the torrent to the tracker + // Add a torrent to the tracker api_server .tracker .unwrap() @@ -161,7 +162,7 @@ mod tracker_api { let (peer, _peer_resource) = sample_torrent_peer(); - // Add the torrent to the tracker + // Add a torrent to the tracker api_server .tracker .unwrap() @@ -193,6 +194,60 @@ mod tracker_api { ); } + #[tokio::test] + async fn should_allow_getting_tracker_statistics() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + + let (peer, _peer_resource) = sample_torrent_peer(); + + // Add a torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let url = format!("http://{}/api/stats?token={}", &bind_address, &api_token); + + let stats = reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + assert_eq!( + stats, + StatsResource { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + } + ); + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); From bc3d246fc4b5d3ed11b0831abd3dffe722a8dad0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 16:46:16 +0000 Subject: [PATCH 0211/1003] feat(api): in torrent endpoint rename field to Marked as deprecated. It will be a breaking change in version v3.0.0. --- src/api/resources/stats_resource.rs | 2 +- src/api/resources/torrent_resource.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/resources/stats_resource.rs b/src/api/resources/stats_resource.rs index 7fc9f1376..2fbaf42c1 100644 --- a/src/api/resources/stats_resource.rs +++ b/src/api/resources/stats_resource.rs @@ -1,4 +1,4 @@ -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct StatsResource { diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 88d0463cb..11e9d7196 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -27,7 +27,9 @@ pub struct TorrentListItemResource { pub struct TorrentPeerResource { pub peer_id: PeerIdResource, pub peer_addr: String, + #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] pub updated: u128, + pub updated_milliseconds_ago: u128, pub uploaded: i64, pub downloaded: i64, pub left: i64, @@ -55,6 +57,7 @@ impl From for TorrentPeerResource { peer_id: PeerIdResource::from(peer.peer_id), peer_addr: peer.peer_addr.to_string(), updated: peer.updated.as_millis(), + updated_milliseconds_ago: peer.updated.as_millis(), uploaded: peer.uploaded.0, downloaded: peer.downloaded.0, left: peer.left.0, From e1b84f6eb75dcd9ba0cc6803a0f7221d9b761ef8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 19:37:09 +0000 Subject: [PATCH 0212/1003] refactor: [#61] extract struct ApiClient for API testing --- tests/api.rs | 251 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 154 insertions(+), 97 deletions(-) diff --git a/tests/api.rs b/tests/api.rs index 37cdd5415..475da9a24 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -14,10 +14,11 @@ mod tracker_api { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use reqwest::Response; use tokio::task::JoinHandle; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; use torrust_tracker::api::resources::stats_resource::StatsResource; - use torrust_tracker::api::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; + use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use torrust_tracker::jobs::tracker_api; use torrust_tracker::peer::TorrentPeer; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; @@ -30,16 +31,13 @@ mod tracker_api { #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); let seconds_valid = 60; - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); - let url = format!("http://{}/api/key/{}?token={}", &bind_address, &seconds_valid, &api_token); - - let auth_key: AuthKeyResource = reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap(); + let auth_key = ApiClient::new(api_server.get_connection_info().unwrap()) + .generate_auth_key(seconds_valid) + .await; // Verify the key with the tracker assert!(api_server @@ -52,16 +50,13 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let url = format!("http://{}/api/whitelist/{}?token={}", &bind_address, &info_hash, &api_token); - - let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + let res = ApiClient::new(api_server.get_connection_info().unwrap()) + .whitelist_a_torrent(&info_hash) + .await; assert_eq!(res.status(), 200); assert!( @@ -75,47 +70,25 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let url = format!("http://{}/api/whitelist/{}?token={}", &bind_address, &info_hash, &api_token); + let api_client = ApiClient::new(api_server.get_connection_info().unwrap()); - // First whitelist request - let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); - // Second whitelist request - let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); } - fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { - let torrent_peer = TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1669397478934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); - - (torrent_peer, torrent_peer_resource) - } - #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; + let api_connection_info = api_server.get_connection_info().unwrap(); - let bind_address = api_server.bind_address.unwrap().clone(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let (peer, peer_resource) = sample_torrent_peer(); @@ -126,18 +99,7 @@ mod tracker_api { .update_torrent_with_peer_and_get_stats(&info_hash, &peer) .await; - let url = format!("http://{}/api/torrent/{}?token={}", &bind_address, &info_hash, &api_token); - - let torrent_resource = reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::() - .await - .unwrap(); + let torrent_resource = ApiClient::new(api_connection_info).get_torrent(&info_hash.to_string()).await; assert_eq!( torrent_resource, @@ -153,15 +115,14 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_torrents() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let (peer, _peer_resource) = sample_torrent_peer(); + let api_connection_info = api_server.get_connection_info().unwrap(); + // Add a torrent to the tracker api_server .tracker @@ -169,22 +130,11 @@ mod tracker_api { .update_torrent_with_peer_and_get_stats(&info_hash, &peer) .await; - let url = format!("http://{}/api/torrents?token={}", &bind_address, &api_token); - - let torrent_resources = reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::>() - .await - .unwrap(); + let torrent_resources = ApiClient::new(api_connection_info).get_torrents().await; assert_eq!( torrent_resources, - vec![TorrentResource { + vec![TorrentListItemResource { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, @@ -196,15 +146,14 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let (peer, _peer_resource) = sample_torrent_peer(); + let api_connection_info = api_server.get_connection_info().unwrap(); + // Add a torrent to the tracker api_server .tracker @@ -212,21 +161,10 @@ mod tracker_api { .update_torrent_with_peer_and_get_stats(&info_hash, &peer) .await; - let url = format!("http://{}/api/stats?token={}", &bind_address, &api_token); - - let stats = reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::() - .await - .unwrap(); + let stats_resource = ApiClient::new(api_connection_info).get_tracker_statistics().await; assert_eq!( - stats, + stats_resource, StatsResource { torrents: 1, seeders: 1, @@ -248,6 +186,21 @@ mod tracker_api { ); } + fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { + let torrent_peer = TorrentPeer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1669397478934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); + + (torrent_peer, torrent_peer_resource) + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); @@ -264,17 +217,26 @@ mod tracker_api { Arc::new(config) } - async fn new_running_api_server(configuration: Arc) -> ApiServer { - let mut api_server = ApiServer::new(); - api_server.start(configuration).await; - api_server + #[derive(Clone)] + struct ApiConnectionInfo { + pub bind_address: String, + pub api_token: String, + } + + impl ApiConnectionInfo { + pub fn new(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: api_token.to_string(), + } + } } - pub struct ApiServer { + struct ApiServer { pub started: AtomicBool, pub job: Option>, - pub bind_address: Option, pub tracker: Option>, + pub connection_info: Option, } impl ApiServer { @@ -282,14 +244,28 @@ mod tracker_api { Self { started: AtomicBool::new(false), job: None, - bind_address: None, tracker: None, + connection_info: None, } } + pub async fn new_running_instance() -> ApiServer { + let configuration = tracker_configuration(); + ApiServer::new_running_custom_instance(configuration.clone()).await + } + + async fn new_running_custom_instance(configuration: Arc) -> ApiServer { + let mut api_server = ApiServer::new(); + api_server.start(configuration).await; + api_server + } + pub async fn start(&mut self, configuration: Arc) { if !self.started.load(Ordering::Relaxed) { - self.bind_address = Some(configuration.http_api.bind_address.clone()); + self.connection_info = Some(ApiConnectionInfo::new( + &configuration.http_api.bind_address.clone(), + &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), + )); // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -318,5 +294,86 @@ mod tracker_api { self.started.store(true, Ordering::Relaxed); } } + + pub fn get_connection_info(&self) -> Option { + self.connection_info.clone() + } + } + + struct ApiClient { + connection_info: ApiConnectionInfo, + } + + impl ApiClient { + pub fn new(connection_info: ApiConnectionInfo) -> Self { + Self { connection_info } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKeyResource { + let url = format!( + "http://{}/api/key/{}?token={}", + &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token + ); + reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap() + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { + let url = format!( + "http://{}/api/whitelist/{}?token={}", + &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token + ); + reqwest::Client::new().post(url.clone()).send().await.unwrap() + } + + pub async fn get_torrent(&self, info_hash: &str) -> TorrentResource { + let url = format!( + "http://{}/api/torrent/{}?token={}", + &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap() + } + + pub async fn get_torrents(&self) -> Vec { + let url = format!( + "http://{}/api/torrents?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap() + } + + pub async fn get_tracker_statistics(&self) -> StatsResource { + let url = format!( + "http://{}/api/stats?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap() + } } } From 7eb25a0d2c4524efdd6f6207a82d360607d4d92f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 17:58:51 +0100 Subject: [PATCH 0213/1003] refactor: remove internal mod exports --- src/api/resources/auth_key_resource.rs | 6 ++-- src/api/resources/stats_resource.rs | 2 +- src/api/resources/torrent_resource.rs | 13 +++---- src/config.rs | 13 ++----- src/databases/database.rs | 2 +- src/databases/mysql.rs | 2 +- src/databases/sqlite.rs | 2 +- src/http/filters.rs | 6 ++-- src/http/handlers.rs | 13 +++---- src/http/mod.rs | 8 ----- src/http/request.rs | 2 +- src/http/response.rs | 2 +- src/http/routes.rs | 5 ++- src/http/server.rs | 11 +++--- src/jobs/http_tracker.rs | 3 +- src/jobs/torrent_cleanup.rs | 2 +- src/jobs/tracker_api.rs | 2 +- src/jobs/udp_tracker.rs | 3 +- src/lib.rs | 8 ----- src/logging.rs | 2 +- src/main.rs | 3 +- src/protocol/common.rs | 3 +- src/setup.rs | 2 +- src/stats.rs | 2 +- src/tracker/key.rs | 2 +- src/tracker/mod.rs | 8 ++--- src/tracker/peer.rs | 22 ++++++------ src/tracker/statistics.rs | 4 +-- src/tracker/torrent.rs | 10 +++--- src/udp/connection_cookie.rs | 2 +- src/udp/handlers.rs | 49 +++++++++++++------------- src/udp/mod.rs | 5 --- src/udp/request.rs | 2 +- src/udp/server.rs | 3 +- tests/api.rs | 6 ++-- tests/udp.rs | 3 +- 36 files changed, 108 insertions(+), 125 deletions(-) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index c38b7cc18..4fc5d0cf9 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -2,10 +2,10 @@ use std::convert::From; use serde::{Deserialize, Serialize}; -use crate::key::AuthKey; use crate::protocol::clock::DurationSinceUnixEpoch; +use crate::tracker::key::AuthKey; -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKeyResource { pub key: String, pub valid_until: Option, @@ -36,8 +36,8 @@ mod tests { use std::time::Duration; use super::AuthKeyResource; - use crate::key::AuthKey; use crate::protocol::clock::{DefaultClock, TimeNow}; + use crate::tracker::key::AuthKey; #[test] fn it_should_be_convertible_into_an_auth_key() { diff --git a/src/api/resources/stats_resource.rs b/src/api/resources/stats_resource.rs index 2fbaf42c1..e6f184897 100644 --- a/src/api/resources/stats_resource.rs +++ b/src/api/resources/stats_resource.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct StatsResource { pub torrents: u32, pub seeders: u32, diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 11e9d7196..784ffcb05 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,9 +1,9 @@ use serde::{Deserialize, Serialize}; -use crate::peer::TorrentPeer; -use crate::PeerId; +use crate::protocol::common::PeerId; +use crate::tracker::peer::TorrentPeer; -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentResource { pub info_hash: String, pub seeders: u32, @@ -13,7 +13,7 @@ pub struct TorrentResource { pub peers: Option>, } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentListItemResource { pub info_hash: String, pub seeders: u32, @@ -23,7 +23,7 @@ pub struct TorrentListItemResource { pub peers: Option>, } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentPeerResource { pub peer_id: PeerIdResource, pub peer_addr: String, @@ -36,7 +36,7 @@ pub struct TorrentPeerResource { pub event: String, } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct PeerIdResource { pub id: Option, pub client: Option, @@ -52,6 +52,7 @@ impl From for PeerIdResource { } impl From for TorrentPeerResource { + #[allow(deprecated)] fn from(peer: TorrentPeer) -> Self { TorrentPeerResource { peer_id: PeerIdResource::from(peer.peer_id), diff --git a/src/config.rs b/src/config.rs index 8c17070d2..1afc55e54 100644 --- a/src/config.rs +++ b/src/config.rs @@ -10,7 +10,7 @@ use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; use crate::databases::database::DatabaseDrivers; -use crate::mode::TrackerMode; +use crate::tracker::mode::TrackerMode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct UdpTrackerConfig { @@ -161,6 +161,7 @@ impl Configuration { #[cfg(test)] mod tests { + use crate::config::{Configuration, ConfigurationError}; #[cfg(test)] fn default_config_toml() -> String { @@ -205,8 +206,6 @@ mod tests { #[test] fn configuration_should_have_default_values() { - use crate::Configuration; - let configuration = Configuration::default(); let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); @@ -216,8 +215,6 @@ mod tests { #[test] fn configuration_should_contain_the_external_ip() { - use crate::Configuration; - let configuration = Configuration::default(); assert_eq!(configuration.external_ip, Option::Some(String::from("0.0.0.0"))); @@ -229,8 +226,6 @@ mod tests { use uuid::Uuid; - use crate::Configuration; - // Build temp config file path let temp_directory = env::temp_dir(); let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); @@ -275,8 +270,6 @@ mod tests { #[test] fn configuration_should_be_loaded_from_a_toml_config_file() { - use crate::Configuration; - let config_file_path = create_temp_config_file_with_default_config(); let configuration = Configuration::load_from_file(&config_file_path).expect("Could not load configuration from file"); @@ -286,8 +279,6 @@ mod tests { #[test] fn configuration_error_could_be_displayed() { - use crate::ConfigurationError; - let error = ConfigurationError::TrackerModeIncompatible; assert_eq!(format!("{}", error), "TrackerModeIncompatible"); diff --git a/src/databases/database.rs b/src/databases/database.rs index 795be0d45..52ca68291 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize}; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; +use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; -use crate::InfoHash; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum DatabaseDrivers { diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index fc6ff5098..5e7410ac2 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -10,8 +10,8 @@ use r2d2_mysql::MysqlConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; +use crate::protocol::common::{InfoHash, AUTH_KEY_LENGTH}; use crate::tracker::key::AuthKey; -use crate::{InfoHash, AUTH_KEY_LENGTH}; pub struct MysqlDatabase { pool: Pool, diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 7a567b07e..cf710a7e1 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -8,8 +8,8 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; +use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; -use crate::InfoHash; pub struct SqliteDatabase { pool: Pool, diff --git a/src/http/filters.rs b/src/http/filters.rs index 42d1592ff..d8f5a81f8 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -5,10 +5,12 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; -use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; +use super::errors::ServerError; +use super::request::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest}; +use super::WebResult; +use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; use crate::tracker::key::AuthKey; use crate::tracker::TorrentTracker; -use crate::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; /// Pass Arc along pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 87d2d51f6..c8b33c6d0 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -7,16 +7,17 @@ use log::debug; use warp::http::Response; use warp::{reject, Rejection, Reply}; -use crate::http::{ - AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, - WebResult, -}; -use crate::peer::TorrentPeer; +use super::errors::ServerError; +use super::request::{AnnounceRequest, ScrapeRequest}; +use super::response::{AnnounceResponse, Peer, ScrapeResponse, ScrapeResponseEntry}; +use crate::http::response::ErrorResponse; +use crate::http::WebResult; +use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; +use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::tracker::TorrentTracker; -use crate::InfoHash; /// Authenticate InfoHash using optional AuthKey pub async fn authenticate( diff --git a/src/http/mod.rs b/src/http/mod.rs index 4842c0a25..6e3ce7111 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,11 +1,3 @@ -pub use self::errors::*; -pub use self::filters::*; -pub use self::handlers::*; -pub use self::request::*; -pub use self::response::*; -pub use self::routes::*; -pub use self::server::*; - pub mod errors; pub mod filters; pub mod handlers; diff --git a/src/http/request.rs b/src/http/request.rs index 6dd025e8c..2d72a1a3c 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -3,7 +3,7 @@ use std::net::IpAddr; use serde::Deserialize; use crate::http::Bytes; -use crate::{InfoHash, PeerId}; +use crate::protocol::common::{InfoHash, PeerId}; #[derive(Deserialize)] pub struct AnnounceRequestQuery { diff --git a/src/http/response.rs b/src/http/response.rs index c87b5e0e8..44387a9f3 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -6,7 +6,7 @@ use std::net::IpAddr; use serde; use serde::Serialize; -use crate::InfoHash; +use crate::protocol::common::InfoHash; #[derive(Serialize)] pub struct Peer { diff --git a/src/http/routes.rs b/src/http/routes.rs index 8bfaf5ed9..f82bf45bc 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -3,9 +3,8 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use crate::http::{ - handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker, -}; +use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use super::handlers::{handle_announce, handle_scrape, send_error}; use crate::tracker::TorrentTracker; /// All routes diff --git a/src/http/server.rs b/src/http/server.rs index 4e48f97e3..d60387346 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; -use crate::http::routes; +use super::routes; use crate::tracker::TorrentTracker; /// Server that listens on HTTP, needs a TorrentTracker @@ -17,9 +17,10 @@ impl HttpServer { /// Start the HttpServer pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { - let (_addr, server) = warp::serve(routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); + let (_addr, server) = + warp::serve(routes::routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }); server } @@ -31,7 +32,7 @@ impl HttpServer { ssl_cert_path: String, ssl_key_path: String, ) -> impl warp::Future { - let (_addr, server) = warp::serve(routes(self.tracker.clone())) + let (_addr, server) = warp::serve(routes::routes(self.tracker.clone())) .tls() .cert_path(ssl_cert_path) .key_path(ssl_key_path) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 2d8f307b4..8ae9eb3f5 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -4,8 +4,9 @@ use std::sync::Arc; use log::{info, warn}; use tokio::task::JoinHandle; +use crate::config::HttpTrackerConfig; +use crate::http::server::HttpServer; use crate::tracker::TorrentTracker; -use crate::{HttpServer, HttpTrackerConfig}; pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 04b064043..3b572d780 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -4,8 +4,8 @@ use chrono::Utc; use log::info; use tokio::task::JoinHandle; +use crate::config::Configuration; use crate::tracker::TorrentTracker; -use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index ba5b8a1fb..b0b315f44 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -5,8 +5,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::api::server; +use crate::config::Configuration; use crate::tracker::TorrentTracker; -use crate::Configuration; #[derive(Debug)] pub struct ApiServerJobStarted(); diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 00fdaddbe..90986455c 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -3,8 +3,9 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; +use crate::config::UdpTrackerConfig; use crate::tracker::TorrentTracker; -use crate::{UdpServer, UdpTrackerConfig}; +use crate::udp::server::UdpServer; pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); diff --git a/src/lib.rs b/src/lib.rs index cf830f108..7e4fe13a7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,3 @@ -pub use api::server::*; -pub use http::server::*; -pub use protocol::common::*; -pub use udp::server::*; - -pub use self::config::*; -pub use self::tracker::*; - pub mod api; pub mod config; pub mod databases; diff --git a/src/logging.rs b/src/logging.rs index 5d0efa8a4..7682bace1 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -3,7 +3,7 @@ use std::sync::Once; use log::{info, LevelFilter}; -use crate::Configuration; +use crate::config::Configuration; static INIT: Once = Once::new(); diff --git a/src/main.rs b/src/main.rs index bf832dbf4..f64354fcf 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,10 @@ use std::sync::Arc; use log::info; +use torrust_tracker::config::Configuration; use torrust_tracker::stats::setup_statistics; use torrust_tracker::tracker::TorrentTracker; -use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; +use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time}; #[tokio::main] async fn main() { diff --git a/src/protocol/common.rs b/src/protocol/common.rs index ce1cbf253..efeb328c9 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -94,12 +94,13 @@ impl<'de> serde::de::Deserialize<'de> for InfoHash { #[cfg(test)] mod tests { + use std::str::FromStr; use serde::{Deserialize, Serialize}; use serde_json::json; - use crate::InfoHash; + use super::InfoHash; #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] struct ContainingInfoHash { diff --git a/src/setup.rs b/src/setup.rs index 9906a2d03..736f448b6 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; +use crate::config::Configuration; use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; use crate::tracker::TorrentTracker; -use crate::Configuration; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); diff --git a/src/stats.rs b/src/stats.rs index 1f387a084..22b74c8d3 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,4 +1,4 @@ -use crate::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +use crate::tracker::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { let mut stats_event_sender = None; diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 1bf0557a1..6d3f3c320 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -7,7 +7,7 @@ use rand::{thread_rng, Rng}; use serde::Serialize; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; -use crate::AUTH_KEY_LENGTH; +use crate::protocol::common::AUTH_KEY_LENGTH; pub fn generate_auth_key(lifetime: Duration) -> AuthKey { let key: String = thread_rng() diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a3eecd427..f31347e3e 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,15 +13,15 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; +use self::mode::TrackerMode; +use self::peer::TorrentPeer; +use self::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; +use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; -use crate::mode::TrackerMode; -use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; -use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -use crate::Configuration; pub struct TorrentTracker { pub config: Arc, diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 42ef6a60b..77613e080 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -4,7 +4,7 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::Serialize; -use crate::http::AnnounceRequest; +use crate::http::request::AnnounceRequest; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; @@ -95,9 +95,9 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; - use crate::PeerId; + use crate::protocol::common::PeerId; + use crate::tracker::peer::TorrentPeer; #[test] fn it_should_be_serializable() { @@ -129,7 +129,7 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::peer::TorrentPeer; + use crate::tracker::peer::TorrentPeer; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. @@ -200,8 +200,8 @@ mod test { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; - use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::peer::TorrentPeer; + use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::tracker::peer::TorrentPeer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { @@ -241,8 +241,8 @@ mod test { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; - use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::peer::TorrentPeer; + use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::tracker::peer::TorrentPeer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { @@ -281,9 +281,9 @@ mod test { mod torrent_peer_constructor_from_for_http_requests { use std::net::{IpAddr, Ipv4Addr}; - use crate::http::AnnounceRequest; - use crate::peer::TorrentPeer; - use crate::{InfoHash, PeerId}; + use crate::http::request::AnnounceRequest; + use crate::protocol::common::{InfoHash, PeerId}; + use crate::tracker::peer::TorrentPeer; fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { AnnounceRequest { diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index ac3889270..50804a5f4 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -271,7 +271,7 @@ impl StatsRepository { mod tests { mod stats_tracker { - use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; + use crate::tracker::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; #[tokio::test] async fn should_contain_the_tracker_statistics() { @@ -295,7 +295,7 @@ mod tests { } mod event_handler { - use crate::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 335554006..f23858949 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -4,9 +4,9 @@ use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use crate::peer::TorrentPeer; +use super::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, TimeNow}; -use crate::{PeerId, MAX_SCRAPE_TORRENTS}; +use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct TorrentEntry { @@ -113,10 +113,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}; - use crate::torrent::TorrentEntry; - use crate::PeerId; + use crate::protocol::common::PeerId; + use crate::tracker::peer::TorrentPeer; + use crate::tracker::torrent::TorrentEntry; struct TorrentPeerBuilder { peer: TorrentPeer, diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index c40a56959..ef241245a 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -2,8 +2,8 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; +use super::errors::ServerError; use crate::protocol::clock::time_extent::{Extent, TimeExtent}; -use crate::udp::ServerError; pub type Cookie = [u8; 8]; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 5514bc1eb..30b33225c 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -7,13 +7,13 @@ use aquatic_udp_protocol::{ }; use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; -use crate::peer::TorrentPeer; +use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; +use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; use crate::tracker::TorrentTracker; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, &None).await { @@ -252,12 +252,13 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::mode::TrackerMode; - use crate::peer::TorrentPeer; + use crate::config::Configuration; use crate::protocol::clock::{DefaultClock, Time}; - use crate::statistics::StatsTracker; + use crate::protocol::common::PeerId; + use crate::tracker::mode::TrackerMode; + use crate::tracker::peer::TorrentPeer; + use crate::tracker::statistics::StatsTracker; use crate::tracker::TorrentTracker; - use crate::{Configuration, PeerId}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) @@ -373,10 +374,10 @@ mod tests { use mockall::predicate::eq; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_connect; + use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { @@ -545,15 +546,15 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::protocol::common::PeerId; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, }; - use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -716,11 +717,11 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::protocol::common::PeerId; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; - use crate::PeerId; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -770,15 +771,15 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::protocol::common::PeerId; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, }; - use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -951,10 +952,10 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::statistics::StatsTracker; + use crate::tracker::statistics::StatsTracker; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::TrackerConfigurationBuilder; @@ -1013,11 +1014,11 @@ mod tests { }; use super::TorrentPeerBuilder; + use crate::protocol::common::PeerId; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_scrape; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; - use crate::PeerId; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1123,7 +1124,7 @@ mod tests { use aquatic_udp_protocol::InfoHash; - use crate::udp::handle_scrape; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::scrape_request::{ add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; @@ -1162,7 +1163,7 @@ mod tests { mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handle_scrape; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; @@ -1231,7 +1232,7 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; @@ -1264,7 +1265,7 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 4c98875c5..327f03eed 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -1,8 +1,3 @@ -pub use self::errors::*; -pub use self::handlers::*; -pub use self::request::*; -pub use self::server::*; - pub mod connection_cookie; pub mod errors; pub mod handlers; diff --git a/src/udp/request.rs b/src/udp/request.rs index 6531f54b9..67aaeb57f 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -1,6 +1,6 @@ use aquatic_udp_protocol::AnnounceRequest; -use crate::InfoHash; +use crate::protocol::common::InfoHash; // struct AnnounceRequest { // pub connection_id: i64, diff --git a/src/udp/server.rs b/src/udp/server.rs index 2f41c3c4d..5c215f9ec 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -7,7 +7,8 @@ use log::{debug, info}; use tokio::net::UdpSocket; use crate::tracker::TorrentTracker; -use crate::udp::{handle_packet, MAX_PACKET_SIZE}; +use crate::udp::handlers::handle_packet; +use crate::udp::MAX_PACKET_SIZE; pub struct UdpServer { socket: Arc, diff --git a/tests/api.rs b/tests/api.rs index 475da9a24..a5ae79621 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -19,13 +19,15 @@ mod tracker_api { use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; use torrust_tracker::api::resources::stats_resource::StatsResource; use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; + use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; - use torrust_tracker::peer::TorrentPeer; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; + use torrust_tracker::protocol::common::{InfoHash, PeerId}; use torrust_tracker::tracker::key::AuthKey; + use torrust_tracker::tracker::peer::TorrentPeer; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash, PeerId}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; use crate::common::ephemeral_random_port; diff --git a/tests/udp.rs b/tests/udp.rs index ab96259c5..7a0d883a5 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -18,11 +18,12 @@ mod udp_tracker_server { }; use tokio::net::UdpSocket; use tokio::task::JoinHandle; + use torrust_tracker::config::Configuration; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; use crate::common::ephemeral_random_port; From 81c41293cb6c029085efcced0ff6bcb40c586d90 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 17:19:34 +0100 Subject: [PATCH 0214/1003] vscode: clippy padantic warnings --- .vscode/settings.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.vscode/settings.json b/.vscode/settings.json index f1027e9bd..94f199bd6 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,4 +3,6 @@ "editor.formatOnSave": true }, "rust-analyzer.checkOnSave.command": "clippy", + "rust-analyzer.checkOnSave.allTargets": true, + "rust-analyzer.checkOnSave.extraArgs": ["--","-W","clippy::pedantic"], } \ No newline at end of file From 2b88ce50070ae8593e9d06b1788cbffce016139e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:11:12 +0100 Subject: [PATCH 0215/1003] clippy: auto fix --- src/api/resources/mod.rs | 6 ++--- src/api/resources/torrent_resource.rs | 2 +- src/api/server.rs | 2 +- src/config.rs | 4 +++- src/databases/database.rs | 2 +- src/databases/sqlite.rs | 6 ++--- src/http/filters.rs | 20 +++++++++-------- src/http/handlers.rs | 4 ++-- src/http/response.rs | 4 +++- src/http/server.rs | 7 +++--- src/jobs/http_tracker.rs | 1 + src/jobs/torrent_cleanup.rs | 1 + src/jobs/tracker_api.rs | 4 +--- src/jobs/udp_tracker.rs | 1 + src/protocol/clock/mod.rs | 4 +++- src/protocol/clock/time_extent.rs | 32 +++++++++++++++------------ src/protocol/common.rs | 4 +++- src/protocol/crypto.rs | 4 ++-- src/stats.rs | 1 + src/tracker/key.rs | 3 +++ src/tracker/mod.rs | 6 ++--- src/tracker/peer.rs | 6 ++++- src/tracker/statistics.rs | 6 ++++- src/tracker/torrent.rs | 7 ++++-- src/udp/connection_cookie.rs | 3 +++ src/udp/handlers.rs | 2 +- src/udp/mod.rs | 2 +- src/udp/request.rs | 1 + tests/api.rs | 4 ++-- tests/udp.rs | 6 ++--- 30 files changed, 95 insertions(+), 60 deletions(-) diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index d214d8a59..2b3e4b886 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -2,9 +2,9 @@ //! //! WIP. Not all endpoints have their resource structs. //! -//! - [x] AuthKeys -//! - [ ] TorrentResource, TorrentListItemResource, TorrentPeerResource, PeerIdResource -//! - [ ] StatsResource +//! - [x] `AuthKeys` +//! - [ ] `TorrentResource`, `TorrentListItemResource`, `TorrentPeerResource`, `PeerIdResource` +//! - [ ] `StatsResource` //! - [ ] ... pub mod auth_key_resource; pub mod stats_resource; diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 784ffcb05..eb9620d23 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -46,7 +46,7 @@ impl From for PeerIdResource { fn from(peer_id: PeerId) -> Self { PeerIdResource { id: peer_id.get_id(), - client: peer_id.get_client_name().map(|client_name| client_name.to_string()), + client: peer_id.get_client_name().map(std::string::ToString::to_string), } } } diff --git a/src/api/server.rs b/src/api/server.rs index 41e6f7074..ce272b3ac 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -10,7 +10,7 @@ use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; -use crate::protocol::common::*; +use crate::protocol::common::InfoHash; use crate::tracker::TorrentTracker; #[derive(Deserialize, Debug)] diff --git a/src/config.rs b/src/config.rs index 1afc55e54..1199c7fe7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -77,6 +77,7 @@ impl std::fmt::Display for ConfigurationError { impl std::error::Error for ConfigurationError {} impl Configuration { + #[must_use] pub fn get_ext_ip(&self) -> Option { match &self.external_ip { None => None, @@ -87,6 +88,7 @@ impl Configuration { } } + #[must_use] pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), @@ -198,7 +200,7 @@ mod tests { admin = "MyAccessToken" "# .lines() - .map(|line| line.trim_start()) + .map(str::trim_start) .collect::>() .join("\n"); config diff --git a/src/databases/database.rs b/src/databases/database.rs index 52ca68291..87a91ddeb 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -55,7 +55,7 @@ pub trait Database: Sync + Send { async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.to_owned().to_string()).await { + if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.clone().to_string()).await { if let Error::QueryReturnedNoRows = e { return Ok(false); } else { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index cf710a7e1..19849f297 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -70,7 +70,7 @@ impl Database for SqliteDatabase { Ok((info_hash, completed)) })?; - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok()).collect(); + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); Ok(torrents) } @@ -90,7 +90,7 @@ impl Database for SqliteDatabase { }) })?; - let keys: Vec = keys_iter.filter_map(|x| x.ok()).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -106,7 +106,7 @@ impl Database for SqliteDatabase { Ok(InfoHash::from_str(&info_hash).unwrap()) })?; - let info_hashes: Vec = info_hash_iter.filter_map(|x| x.ok()).collect(); + let info_hashes: Vec = info_hash_iter.filter_map(std::result::Result::ok).collect(); Ok(info_hashes) } diff --git a/src/http/filters.rs b/src/http/filters.rs index d8f5a81f8..d33acbcfa 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -13,6 +13,7 @@ use crate::tracker::key::AuthKey; use crate::tracker::TorrentTracker; /// Pass Arc along +#[must_use] pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { warp::any().map(move || tracker.clone()) } @@ -22,19 +23,20 @@ pub fn with_info_hash() -> impl Filter,), Error = Rejec warp::filters::query::raw().and_then(info_hashes) } -/// Check for PeerId +/// Check for `PeerId` pub fn with_peer_id() -> impl Filter + Clone { warp::filters::query::raw().and_then(peer_id) } /// Pass Arc along +#[must_use] pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() .map(|key: String| AuthKey::from_string(&key)) .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } -/// Check for PeerAddress +/// Check for `PeerAddress` pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) @@ -44,7 +46,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) @@ -53,7 +55,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { warp::any() .and(with_info_hash()) @@ -61,7 +63,7 @@ pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter WebResult> { let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut info_hashes: Vec = Vec::new(); @@ -86,7 +88,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { } } -/// Parse PeerId from raw query string +/// Parse `PeerId` from raw query string async fn peer_id(raw_query: String) -> WebResult { // put all query params in a vec let split_raw_query: Vec<&str> = raw_query.split('&').collect(); @@ -123,7 +125,7 @@ async fn peer_id(raw_query: String) -> WebResult { } } -/// Get PeerAddress from RemoteAddress or Forwarded +/// Get `PeerAddress` from `RemoteAddress` or Forwarded async fn peer_addr( (on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option), ) -> WebResult { @@ -151,7 +153,7 @@ async fn peer_addr( } } -/// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option +/// Parse `AnnounceRequest` from raw `AnnounceRequestQuery`, `InfoHash` and Option async fn announce_request( announce_request_query: AnnounceRequestQuery, info_hashes: Vec, @@ -171,7 +173,7 @@ async fn announce_request( }) } -/// Parse ScrapeRequest from InfoHash +/// Parse `ScrapeRequest` from `InfoHash` async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { Ok(ScrapeRequest { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index c8b33c6d0..cd521b43b 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -19,7 +19,7 @@ use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::tracker::TorrentTracker; -/// Authenticate InfoHash using optional AuthKey +/// Authenticate `InfoHash` using optional `AuthKey` pub async fn authenticate( info_hash: &InfoHash, auth_key: &Option, @@ -93,7 +93,7 @@ pub async fn handle_scrape( let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; - for info_hash in scrape_request.info_hashes.iter() { + for info_hash in &scrape_request.info_hashes { let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { diff --git a/src/http/response.rs b/src/http/response.rs index 44387a9f3..cb01068fa 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -27,6 +27,7 @@ pub struct AnnounceResponse { } impl AnnounceResponse { + #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() } @@ -89,7 +90,7 @@ impl ScrapeResponse { bytes.write_all(b"d5:filesd")?; - for (info_hash, scrape_response_entry) in self.files.iter() { + for (info_hash, scrape_response_entry) in &self.files { bytes.write_all(b"20:")?; bytes.write_all(&info_hash.0)?; bytes.write_all(b"d8:completei")?; @@ -114,6 +115,7 @@ pub struct ErrorResponse { } impl ErrorResponse { + #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() } diff --git a/src/http/server.rs b/src/http/server.rs index d60387346..97ec30aa0 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -4,18 +4,19 @@ use std::sync::Arc; use super::routes; use crate::tracker::TorrentTracker; -/// Server that listens on HTTP, needs a TorrentTracker +/// Server that listens on HTTP, needs a `TorrentTracker` #[derive(Clone)] pub struct HttpServer { tracker: Arc, } impl HttpServer { + #[must_use] pub fn new(tracker: Arc) -> HttpServer { HttpServer { tracker } } - /// Start the HttpServer + /// Start the `HttpServer` pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { let (_addr, server) = warp::serve(routes::routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { @@ -25,7 +26,7 @@ impl HttpServer { server } - /// Start the HttpServer in TLS mode + /// Start the `HttpServer` in TLS mode pub fn start_tls( &self, socket_addr: SocketAddr, diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 8ae9eb3f5..6070e0d27 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -8,6 +8,7 @@ use crate::config::HttpTrackerConfig; use crate::http::server::HttpServer; use crate::tracker::TorrentTracker; +#[must_use] pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 3b572d780..3d7b49d6b 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -7,6 +7,7 @@ use tokio::task::JoinHandle; use crate::config::Configuration; use crate::tracker::TorrentTracker; +#[must_use] pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index b0b315f44..ac7657858 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -26,9 +26,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) -> let join_handle = tokio::spawn(async move { let handel = server::start(bind_addr, tracker); - if tx.send(ApiServerJobStarted()).is_err() { - panic!("the start job dropped"); - } + assert!(tx.send(ApiServerJobStarted()).is_ok(), "the start job dropped"); handel.await; }); diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 90986455c..8bf839380 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -7,6 +7,7 @@ use crate::config::UdpTrackerConfig; use crate::tracker::TorrentTracker; use crate::udp::server::UdpServer; +#[must_use] pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index 4e15950e6..51197dba6 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -26,9 +26,11 @@ pub trait Time: Sized { } pub trait TimeNow: Time { + #[must_use] fn add(add_time: &Duration) -> Option { Self::now().checked_add(*add_time) } + #[must_use] fn sub(sub_time: &Duration) -> Option { Self::now().checked_sub(*sub_time) } @@ -240,7 +242,7 @@ mod stopped_clock { #[test] fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 22312); + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); } } diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index 3fa60de82..f975e9a04 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -37,6 +37,7 @@ pub const MAX: TimeExtent = TimeExtent { }; impl TimeExtent { + #[must_use] pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { Self { increment: TimeExtentBase::from_secs(seconds), @@ -48,10 +49,10 @@ impl TimeExtent { fn checked_duration_from_nanos(time: u128) -> Result { const NANOS_PER_SEC: u32 = 1_000_000_000; - let secs = time.div_euclid(NANOS_PER_SEC as u128); - let nanos = time.rem_euclid(NANOS_PER_SEC as u128); + let secs = time.div_euclid(u128::from(NANOS_PER_SEC)); + let nanos = time.rem_euclid(u128::from(NANOS_PER_SEC)); - assert!(nanos < NANOS_PER_SEC as u128); + assert!(nanos < u128::from(NANOS_PER_SEC)); match u64::try_from(secs) { Err(error) => Err(error), @@ -94,14 +95,14 @@ impl Extent for TimeExtent { fn total(&self) -> Option> { self.increment .as_nanos() - .checked_mul(self.amount as u128) + .checked_mul(u128::from(self.amount)) .map(checked_duration_from_nanos) } fn total_next(&self) -> Option> { self.increment .as_nanos() - .checked_mul((self.amount as u128) + 1) + .checked_mul(u128::from(self.amount) + 1) .map(checked_duration_from_nanos) } } @@ -110,6 +111,7 @@ pub trait MakeTimeExtent: Sized where Clock: TimeNow, { + #[must_use] fn now(increment: &TimeExtentBase) -> Option> { Clock::now() .as_nanos() @@ -120,6 +122,7 @@ where }) } + #[must_use] fn now_after(increment: &TimeExtentBase, add_time: &Duration) -> Option> { match Clock::add(add_time) { None => None, @@ -134,6 +137,7 @@ where } } + #[must_use] fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, @@ -173,7 +177,7 @@ mod test { }; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; - const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239812388723); + const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); mod fn_checked_duration_from_nanos { use std::time::Duration; @@ -190,11 +194,11 @@ mod test { #[test] fn it_should_be_the_same_as_duration_implementation_for_u64_numbers() { assert_eq!( - checked_duration_from_nanos(1232143214343432).unwrap(), - Duration::from_nanos(1232143214343432) + checked_duration_from_nanos(1_232_143_214_343_432).unwrap(), + Duration::from_nanos(1_232_143_214_343_432) ); assert_eq!( - checked_duration_from_nanos(u64::MAX as u128).unwrap(), + checked_duration_from_nanos(u128::from(u64::MAX)).unwrap(), Duration::from_nanos(u64::MAX) ); } @@ -202,7 +206,7 @@ mod test { #[test] fn it_should_work_for_some_numbers_larger_than_u64() { assert_eq!( - checked_duration_from_nanos(TIME_EXTENT_VAL.amount as u128 * NANOS_PER_SEC as u128).unwrap(), + checked_duration_from_nanos(u128::from(TIME_EXTENT_VAL.amount) * u128::from(NANOS_PER_SEC)).unwrap(), Duration::from_secs(TIME_EXTENT_VAL.amount) ); } @@ -515,14 +519,14 @@ mod test { assert_eq!( DefaultTimeExtentMaker::now_before( - &TimeExtentBase::from_secs(u32::MAX as u64), - &Duration::from_secs(u32::MAX as u64) + &TimeExtentBase::from_secs(u64::from(u32::MAX)), + &Duration::from_secs(u64::from(u32::MAX)) ) .unwrap() .unwrap(), TimeExtent { - increment: TimeExtentBase::from_secs(u32::MAX as u64), - amount: 4294967296 + increment: TimeExtentBase::from_secs(u64::from(u32::MAX)), + amount: 4_294_967_296 } ); } diff --git a/src/protocol/common.rs b/src/protocol/common.rs index efeb328c9..c5c9b4578 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -233,14 +233,16 @@ impl std::fmt::Display for PeerId { } impl PeerId { + #[must_use] pub fn get_id(&self) -> Option { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; binascii::bin2hex(&self.0, &mut tmp).unwrap(); - std::str::from_utf8(&tmp).ok().map(|id| id.to_string()) + std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) } + #[must_use] pub fn get_client_name(&self) -> Option<&'static str> { if self.0[0] == b'M' { return Some("BitTorrent"); diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs index 18cfaf5e6..6e1517ef8 100644 --- a/src/protocol/crypto.rs +++ b/src/protocol/crypto.rs @@ -89,8 +89,8 @@ pub mod keys { #[test] fn it_should_have_a_large_random_seed() { - assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u64::MAX as u128); - assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u64::MAX as u128); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); } } } diff --git a/src/stats.rs b/src/stats.rs index 22b74c8d3..738909934 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,5 +1,6 @@ use crate::tracker::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +#[must_use] pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { let mut stats_event_sender = None; diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 6d3f3c320..881dac877 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -9,6 +9,7 @@ use serde::Serialize; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; +#[must_use] pub fn generate_auth_key(lifetime: Duration) -> AuthKey { let key: String = thread_rng() .sample_iter(&Alphanumeric) @@ -43,6 +44,7 @@ pub struct AuthKey { } impl AuthKey { + #[must_use] pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { Some(AuthKey { key, valid_until: None }) @@ -51,6 +53,7 @@ impl AuthKey { } } + #[must_use] pub fn from_string(key: &str) -> Option { if key.len() != AUTH_KEY_LENGTH { None diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index f31347e3e..6aae06a4b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -201,7 +201,7 @@ impl TorrentTracker { match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), + Some(entry) => entry.get_peers(Some(client_addr)).into_iter().copied().collect(), } } @@ -211,7 +211,7 @@ impl TorrentTracker { match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), + Some(entry) => entry.get_peers(None).into_iter().copied().collect(), } } @@ -236,9 +236,9 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); TorrentStats { + completed, seeders, leechers, - completed, } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 77613e080..a5f000eca 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -26,6 +26,7 @@ pub struct TorrentPeer { } impl TorrentPeer { + #[must_use] pub fn from_udp_announce_request( announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, @@ -44,6 +45,7 @@ impl TorrentPeer { } } + #[must_use] pub fn from_http_announce_request( announce_request: &AnnounceRequest, remote_ip: IpAddr, @@ -63,7 +65,7 @@ impl TorrentPeer { }; TorrentPeer { - peer_id: announce_request.peer_id.clone(), + peer_id: announce_request.peer_id, peer_addr, updated: DefaultClock::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), @@ -74,6 +76,7 @@ impl TorrentPeer { } // potentially substitute localhost ip with external ip + #[must_use] pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { if let Some(host_ip) = host_opt_ip.filter(|_| remote_ip.is_loopback()) { SocketAddr::new(host_ip, port) @@ -82,6 +85,7 @@ impl TorrentPeer { } } + #[must_use] pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 50804a5f4..609f036aa 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use async_trait::async_trait; use log::debug; #[cfg(test)] -use mockall::{automock, predicate::*}; +use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; @@ -47,6 +47,7 @@ impl Default for TrackerStatistics { } impl TrackerStatistics { + #[must_use] pub fn new() -> Self { Self { tcp4_connections_handled: 0, @@ -76,12 +77,14 @@ impl Default for StatsTracker { } impl StatsTracker { + #[must_use] pub fn new() -> Self { Self { stats_repository: StatsRepository::new(), } } + #[must_use] pub fn new_active_instance() -> (Box, StatsRepository) { let mut stats_tracker = Self::new(); @@ -184,6 +187,7 @@ impl Default for StatsRepository { } impl StatsRepository { + #[must_use] pub fn new() -> Self { Self { stats: Arc::new(RwLock::new(TrackerStatistics::new())), diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index f23858949..46608643d 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -16,6 +16,7 @@ pub struct TorrentEntry { } impl TorrentEntry { + #[must_use] pub fn new() -> TorrentEntry { TorrentEntry { peers: std::collections::BTreeMap::new(), @@ -47,6 +48,7 @@ impl TorrentEntry { did_torrent_stats_change } + #[must_use] pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { self.peers .values() @@ -70,6 +72,7 @@ impl TorrentEntry { .collect() } + #[must_use] pub fn get_stats(&self) -> (u32, u32, u32) { let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; let leechers: u32 = self.peers.len() as u32 - seeders; @@ -77,7 +80,7 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = DefaultClock::sub(&Duration::from_secs(max_peer_timeout as u64)).unwrap_or_default(); + let current_cutoff = DefaultClock::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); self.peers.retain(|_, peer| peer.updated > current_cutoff); } } @@ -358,7 +361,7 @@ mod tests { let now = WorkingClock::now(); StoppedClock::local_set(&now); - let timeout_seconds_before_now = now.sub(Duration::from_secs(timeout as u64)); + let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); let inactive_peer = TorrentPeerBuilder::default() .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) .into(); diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index ef241245a..b18940dfc 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -11,14 +11,17 @@ pub type SinceUnixEpochTimeExtent = TimeExtent; pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); +#[must_use] pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { connection_id.0.to_le_bytes() } +#[must_use] pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { ConnectionId(i64::from_le_bytes(*connection_cookie)) } +#[must_use] pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 30b33225c..81578e9c3 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -188,7 +188,7 @@ pub async fn handle_scrape( let mut torrent_stats: Vec = Vec::new(); - for info_hash in request.info_hashes.iter() { + for info_hash in &request.info_hashes { let info_hash = InfoHash(info_hash.0); let scrape_entry = match db.get(&info_hash) { diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 327f03eed..2a8d42d9f 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -9,4 +9,4 @@ pub type Port = u16; pub type TransactionId = i64; pub const MAX_PACKET_SIZE: usize = 1496; -pub const PROTOCOL_ID: i64 = 0x41727101980; +pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; diff --git a/src/udp/request.rs b/src/udp/request.rs index 67aaeb57f..53d646f1a 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -23,6 +23,7 @@ pub struct AnnounceRequestWrapper { } impl AnnounceRequestWrapper { + #[must_use] pub fn new(announce_request: AnnounceRequest) -> Self { AnnounceRequestWrapper { announce_request: announce_request.clone(), diff --git a/tests/api.rs b/tests/api.rs index a5ae79621..14fefa50e 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -1,6 +1,6 @@ /// Integration tests for the tracker API /// -/// cargo test tracker_api -- --nocapture +/// cargo test `tracker_api` -- --nocapture extern crate rand; mod common; @@ -192,7 +192,7 @@ mod tracker_api { let torrent_peer = TorrentPeer { peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1669397478934, 0), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), diff --git a/tests/udp.rs b/tests/udp.rs index 7a0d883a5..54caeaa68 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -1,6 +1,6 @@ /// Integration tests for UDP tracker server /// -/// cargo test udp_tracker_server -- --nocapture +/// cargo test `udp_tracker_server` -- --nocapture extern crate rand; mod common; @@ -116,7 +116,7 @@ mod udp_tracker_server { } } - /// Creates a new UdpClient connected to a Udp server + /// Creates a new `UdpClient` connected to a Udp server async fn new_connected_udp_client(remote_address: &str) -> UdpClient { let client = UdpClient::bind(&source_address(ephemeral_random_port())).await; client.connect(remote_address).await; @@ -155,7 +155,7 @@ mod udp_tracker_server { } } - /// Creates a new UdpTrackerClient connected to a Udp Tracker server + /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server async fn new_connected_udp_tracker_client(remote_address: &str) -> UdpTrackerClient { let udp_client = new_connected_udp_client(remote_address).await; UdpTrackerClient { udp_client } From f74c93346b2d6aba776867a3db2777101c40a20f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:24:56 +0100 Subject: [PATCH 0216/1003] clippy: fix src/http/response.rs --- src/http/handlers.rs | 14 +++++++------- src/http/response.rs | 29 ++++++++++++++++++++--------- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index cd521b43b..fc55c7c5b 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -9,9 +9,9 @@ use warp::{reject, Rejection, Reply}; use super::errors::ServerError; use super::request::{AnnounceRequest, ScrapeRequest}; -use super::response::{AnnounceResponse, Peer, ScrapeResponse, ScrapeResponseEntry}; -use crate::http::response::ErrorResponse; -use crate::http::WebResult; +use super::response::{Announce, Peer, Scrape, ScrapeResponseEntry}; +use super::WebResult; +use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; use crate::tracker::peer::TorrentPeer; @@ -151,7 +151,7 @@ fn send_announce_response( }) .collect(); - let res = AnnounceResponse { + let res = Announce { interval, interval_min, complete: torrent_stats.seeders, @@ -172,7 +172,7 @@ fn send_announce_response( /// Send scrape response fn send_scrape_response(files: HashMap) -> WebResult { - let res = ScrapeResponse { files }; + let res = Scrape { files }; match res.write() { Ok(body) => Ok(Response::new(body)), @@ -184,12 +184,12 @@ fn send_scrape_response(files: HashMap) -> WebRes pub async fn send_error(r: Rejection) -> std::result::Result { let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); - ErrorResponse { + Error { failure_reason: server_error.to_string(), } .write() } else { - ErrorResponse { + Error { failure_reason: ServerError::InternalServerError.to_string(), } .write() diff --git a/src/http/response.rs b/src/http/response.rs index cb01068fa..98ea6fe73 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::error::Error; use std::io::Write; use std::net::IpAddr; @@ -16,7 +15,7 @@ pub struct Peer { } #[derive(Serialize)] -pub struct AnnounceResponse { +pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] pub interval_min: u32, @@ -26,13 +25,19 @@ pub struct AnnounceResponse { pub peers: Vec, } -impl AnnounceResponse { +impl Announce { + /// # Panics + /// + /// It would panic if the `Announce` struct would contain an inappropriate type. #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() } - pub fn write_compact(&self) -> Result, Box> { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write_compact(&self) -> Result, Box> { let mut peers_v4: Vec = Vec::new(); let mut peers_v6: Vec = Vec::new(); @@ -80,12 +85,15 @@ pub struct ScrapeResponseEntry { } #[derive(Serialize)] -pub struct ScrapeResponse { +pub struct Scrape { pub files: HashMap, } -impl ScrapeResponse { - pub fn write(&self) -> Result, Box> { +impl Scrape { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write(&self) -> Result, Box> { let mut bytes: Vec = Vec::new(); bytes.write_all(b"d5:filesd")?; @@ -109,12 +117,15 @@ impl ScrapeResponse { } #[derive(Serialize)] -pub struct ErrorResponse { +pub struct Error { #[serde(rename = "failure reason")] pub failure_reason: String, } -impl ErrorResponse { +impl Error { + /// # Panics + /// + /// It would panic if the `Error` struct would contain an inappropriate type. #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() From a9f760b8ac8ab9562473347335e10a99db53571d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:25:56 +0100 Subject: [PATCH 0217/1003] clippy: fix src/http/request.rs --- src/http/filters.rs | 14 +++++++------- src/http/handlers.rs | 15 +++++++-------- src/http/request.rs | 4 ++-- src/tracker/peer.rs | 14 +++++--------- 4 files changed, 21 insertions(+), 26 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index d33acbcfa..f28909c7f 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::errors::ServerError; -use super::request::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest}; +use super::request::{Announce, AnnounceRequestQuery, Scrape}; use super::WebResult; use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; use crate::tracker::key::AuthKey; @@ -47,7 +47,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) @@ -56,7 +56,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) @@ -159,8 +159,8 @@ async fn announce_request( info_hashes: Vec, peer_id: PeerId, peer_addr: IpAddr, -) -> WebResult { - Ok(AnnounceRequest { +) -> WebResult { + Ok(Announce { info_hash: info_hashes[0], peer_addr, downloaded: announce_request_query.downloaded.unwrap_or(0), @@ -174,6 +174,6 @@ async fn announce_request( } /// Parse `ScrapeRequest` from `InfoHash` -async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(ScrapeRequest { info_hashes, peer_addr }) +async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { + Ok(Scrape { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index fc55c7c5b..a312ff105 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -8,9 +8,8 @@ use warp::http::Response; use warp::{reject, Rejection, Reply}; use super::errors::ServerError; -use super::request::{AnnounceRequest, ScrapeRequest}; -use super::response::{Announce, Peer, Scrape, ScrapeResponseEntry}; -use super::WebResult; +use super::response::{self, Peer, ScrapeResponseEntry}; +use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; @@ -44,7 +43,7 @@ pub async fn authenticate( /// Handle announce request pub async fn handle_announce( - announce_request: AnnounceRequest, + announce_request: request::Announce, auth_key: Option, tracker: Arc, ) -> WebResult { @@ -86,7 +85,7 @@ pub async fn handle_announce( /// Handle scrape request pub async fn handle_scrape( - scrape_request: ScrapeRequest, + scrape_request: request::Scrape, auth_key: Option, tracker: Arc, ) -> WebResult { @@ -136,7 +135,7 @@ pub async fn handle_scrape( /// Send announce response fn send_announce_response( - announce_request: &AnnounceRequest, + announce_request: &request::Announce, torrent_stats: TorrentStats, peers: Vec, interval: u32, @@ -151,7 +150,7 @@ fn send_announce_response( }) .collect(); - let res = Announce { + let res = response::Announce { interval, interval_min, complete: torrent_stats.seeders, @@ -172,7 +171,7 @@ fn send_announce_response( /// Send scrape response fn send_scrape_response(files: HashMap) -> WebResult { - let res = Scrape { files }; + let res = response::Scrape { files }; match res.write() { Ok(body) => Ok(Response::new(body)), diff --git a/src/http/request.rs b/src/http/request.rs index 2d72a1a3c..b812e1173 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -17,7 +17,7 @@ pub struct AnnounceRequestQuery { } #[derive(Debug)] -pub struct AnnounceRequest { +pub struct Announce { pub info_hash: InfoHash, pub peer_addr: IpAddr, pub downloaded: Bytes, @@ -29,7 +29,7 @@ pub struct AnnounceRequest { pub compact: Option, } -pub struct ScrapeRequest { +pub struct Scrape { pub info_hashes: Vec, pub peer_addr: IpAddr, } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index a5f000eca..a30723d00 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -4,7 +4,7 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::Serialize; -use crate::http::request::AnnounceRequest; +use crate::http::request::Announce; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; @@ -46,11 +46,7 @@ impl TorrentPeer { } #[must_use] - pub fn from_http_announce_request( - announce_request: &AnnounceRequest, - remote_ip: IpAddr, - host_opt_ip: Option, - ) -> Self { + pub fn from_http_announce_request(announce_request: &Announce, remote_ip: IpAddr, host_opt_ip: Option) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); let event: AnnounceEvent = if let Some(event) = &announce_request.event { @@ -285,12 +281,12 @@ mod test { mod torrent_peer_constructor_from_for_http_requests { use std::net::{IpAddr, Ipv4Addr}; - use crate::http::request::AnnounceRequest; + use crate::http::request::Announce; use crate::protocol::common::{InfoHash, PeerId}; use crate::tracker::peer::TorrentPeer; - fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { - AnnounceRequest { + fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { + Announce { info_hash: InfoHash([0u8; 20]), peer_addr, downloaded: 0u64, From 21b6e777375d2007c27cdc0d9cd9820857809f97 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:33:09 +0100 Subject: [PATCH 0218/1003] clippy: fix (ignore) src/config.rs --- cSpell.json | 1 + src/config.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/cSpell.json b/cSpell.json index a2c4235c4..cc3359d58 100644 --- a/cSpell.json +++ b/cSpell.json @@ -7,6 +7,7 @@ "bencode", "binascii", "Bitflu", + "bools", "bufs", "byteorder", "canonicalize", diff --git a/src/config.rs b/src/config.rs index 1199c7fe7..dbfb4a140 100644 --- a/src/config.rs +++ b/src/config.rs @@ -37,6 +37,7 @@ pub struct HttpApiConfig { pub access_tokens: HashMap, } +#[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, From 941e9825dcf0c1360212be87b90234e82199d983 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:44:35 +0100 Subject: [PATCH 0219/1003] clippy: fix src/api/resources/auth_key_resource.rs --- src/api/resources/auth_key_resource.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 4fc5d0cf9..3bc0cefb7 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -54,7 +54,7 @@ mod tests { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()) } - ) + ); } #[test] @@ -72,7 +72,7 @@ mod tests { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(duration_in_secs) } - ) + ); } #[test] From a433c825efa2c4df59fd9e8375da623de5be3bf1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:13:54 +0100 Subject: [PATCH 0220/1003] clippy: fix src/api/server.rs --- src/api/server.rs | 58 +++++++++++++++++++++-------------------- src/jobs/tracker_api.rs | 2 +- 2 files changed, 31 insertions(+), 29 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index ce272b3ac..f9e5bc368 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -59,7 +59,8 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> impl warp::Future { +#[allow(clippy::too_many_lines)] +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -124,31 +125,31 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let db = tracker.get_torrents().await; - let _: Vec<_> = db - .iter() - .map(|(_info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }) - .collect(); + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }); let stats = tracker.get_stats().await; - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + #[allow(clippy::cast_possible_truncation)] + { + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + } Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) }); @@ -168,11 +169,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let db = tracker.get_torrents().await; let torrent_entry_option = db.get(&info_hash); - if torrent_entry_option.is_none() { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); - } - - let torrent_entry = torrent_entry_option.unwrap(); + let torrent_entry = match torrent_entry_option { + Some(torrent_entry) => torrent_entry, + None => { + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); + } + }; let (seeders, completed, leechers) = torrent_entry.get_stats(); let peers = torrent_entry.get_peers(None); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index ac7657858..4e2dcd0c9 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -24,7 +24,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) -> // Run the API server let join_handle = tokio::spawn(async move { - let handel = server::start(bind_addr, tracker); + let handel = server::start(bind_addr, &tracker); assert!(tx.send(ApiServerJobStarted()).is_ok(), "the start job dropped"); From 2ba748925cb748259ed92c6de692624c9bc68cdc Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:32:22 +0100 Subject: [PATCH 0221/1003] clippy: fix src/protocol/common.rs --- src/protocol/common.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/protocol/common.rs b/src/protocol/common.rs index c5c9b4578..d6a98cf03 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -212,9 +212,8 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { serde::de::Unexpected::Str(v), &"expected a hexadecimal string", )); - } else { - Ok(res) - } + }; + Ok(res) } } @@ -249,8 +248,7 @@ impl PeerId { } if self.0[0] == b'-' { let name = match &self.0[1..3] { - b"AG" => "Ares", - b"A~" => "Ares", + b"AG" | b"A~" => "Ares", b"AR" => "Arctic", b"AV" => "Avicora", b"AX" => "BitPump", @@ -333,6 +331,11 @@ impl Serialize for PeerId { client: Option<&'a str>, } + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + let id = std::str::from_utf8(&tmp).ok(); + let obj = PeerIdInfo { id: self.get_id(), client: self.get_client_name(), From 8e3115f80d7f98b70204caf83c253337d640da03 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:45:29 +0100 Subject: [PATCH 0222/1003] clippy: fix src/config.rs --- src/config.rs | 46 +++++++++++++++++++++++++--------------- src/jobs/http_tracker.rs | 4 ++-- src/jobs/udp_tracker.rs | 4 ++-- 3 files changed, 33 insertions(+), 21 deletions(-) diff --git a/src/config.rs b/src/config.rs index dbfb4a140..ac15f96b3 100644 --- a/src/config.rs +++ b/src/config.rs @@ -13,14 +13,14 @@ use crate::databases::database::DatabaseDrivers; use crate::tracker::mode::TrackerMode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct UdpTrackerConfig { +pub struct UdpTracker { pub enabled: bool, pub bind_address: String, } #[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct HttpTrackerConfig { +pub struct HttpTracker { pub enabled: bool, pub bind_address: String, pub ssl_enabled: bool, @@ -31,7 +31,7 @@ pub struct HttpTrackerConfig { } #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct HttpApiConfig { +pub struct HttpApi { pub enabled: bool, pub bind_address: String, pub access_tokens: HashMap, @@ -53,13 +53,15 @@ pub struct Configuration { pub persistent_torrent_completed_stat: bool, pub inactive_peer_cleanup_interval: u64, pub remove_peerless_torrents: bool, - pub udp_trackers: Vec, - pub http_trackers: Vec, - pub http_api: HttpApiConfig, + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub http_api: HttpApi, } #[derive(Debug)] pub enum ConfigurationError { + Message(String), + ConfigError(ConfigError), IOError(std::io::Error), ParseError(toml::de::Error), TrackerModeIncompatible, @@ -68,9 +70,11 @@ pub enum ConfigurationError { impl std::fmt::Display for ConfigurationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { + ConfigurationError::Message(e) => e.fmt(f), + ConfigurationError::ConfigError(e) => e.fmt(f), ConfigurationError::IOError(e) => e.fmt(f), ConfigurationError::ParseError(e) => e.fmt(f), - _ => write!(f, "{:?}", self), + ConfigurationError::TrackerModeIncompatible => write!(f, "{:?}", self), } } } @@ -107,7 +111,7 @@ impl Configuration { remove_peerless_torrents: true, udp_trackers: Vec::new(), http_trackers: Vec::new(), - http_api: HttpApiConfig { + http_api: HttpApi { enabled: true, bind_address: String::from("127.0.0.1:1212"), access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] @@ -116,11 +120,11 @@ impl Configuration { .collect(), }, }; - configuration.udp_trackers.push(UdpTrackerConfig { + configuration.udp_trackers.push(UdpTracker { enabled: false, bind_address: String::from("0.0.0.0:6969"), }); - configuration.http_trackers.push(HttpTrackerConfig { + configuration.http_trackers.push(HttpTracker { enabled: false, bind_address: String::from("0.0.0.0:6969"), ssl_enabled: false, @@ -130,31 +134,39 @@ impl Configuration { configuration } - pub fn load_from_file(path: &str) -> Result { + /// # Errors + /// + /// Will return `Err` if `path` does not exist or has a bad configuration. + pub fn load_from_file(path: &str) -> Result { let config_builder = Config::builder(); #[allow(unused_assignments)] let mut config = Config::default(); if Path::new(path).exists() { - config = config_builder.add_source(File::with_name(path)).build()?; + config = config_builder + .add_source(File::with_name(path)) + .build() + .map_err(ConfigurationError::ConfigError)?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); let config = Configuration::default(); - let _ = config.save_to_file(path); - return Err(ConfigError::Message( + config.save_to_file(path)?; + return Err(ConfigurationError::Message( "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), )); } - let torrust_config: Configuration = config - .try_deserialize() - .map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; + let torrust_config: Configuration = config.try_deserialize().map_err(ConfigurationError::ConfigError)?; Ok(torrust_config) } + /// # Errors + /// + /// Will return `Err` if `filename` does not exist or the user does not have + /// permission to read it. pub fn save_to_file(&self, path: &str) -> Result<(), ConfigurationError> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 6070e0d27..f6023a4e0 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use log::{info, warn}; use tokio::task::JoinHandle; -use crate::config::HttpTrackerConfig; +use crate::config::HttpTracker; use crate::http::server::HttpServer; use crate::tracker::TorrentTracker; #[must_use] -pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; let ssl_cert_path = config.ssl_cert_path.clone(); diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 8bf839380..1b4bc745c 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; -use crate::config::UdpTrackerConfig; +use crate::config::UdpTracker; use crate::tracker::TorrentTracker; use crate::udp::server::UdpServer; #[must_use] -pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); tokio::spawn(async move { From 87160bdf2ffbfc91853037afda231c08bb2491bb Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:49:28 +0100 Subject: [PATCH 0223/1003] clippy: fix src/databases/database.rs --- src/config.rs | 6 +++--- src/databases/database.rs | 14 ++++++++++---- src/tracker/mod.rs | 2 +- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/config.rs b/src/config.rs index ac15f96b3..6eb83ad16 100644 --- a/src/config.rs +++ b/src/config.rs @@ -9,7 +9,7 @@ use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; -use crate::databases::database::DatabaseDrivers; +use crate::databases::database::Drivers; use crate::tracker::mode::TrackerMode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] @@ -42,7 +42,7 @@ pub struct HttpApi { pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, - pub db_driver: DatabaseDrivers, + pub db_driver: Drivers, pub db_path: String, pub announce_interval: u32, pub min_announce_interval: u32, @@ -98,7 +98,7 @@ impl Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::Public, - db_driver: DatabaseDrivers::Sqlite3, + db_driver: Drivers::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, min_announce_interval: 120, diff --git a/src/databases/database.rs b/src/databases/database.rs index 87a91ddeb..212224b25 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -8,18 +8,21 @@ use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub enum DatabaseDrivers { +pub enum Drivers { Sqlite3, MySQL, } -pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { +/// # Errors +/// +/// Will return `r2d2::Error` if `db_path` is not able to create a database. +pub fn connect(db_driver: &Drivers, db_path: &str) -> Result, r2d2::Error> { let database: Box = match db_driver { - DatabaseDrivers::Sqlite3 => { + Drivers::Sqlite3 => { let db = SqliteDatabase::new(db_path)?; Box::new(db) } - DatabaseDrivers::MySQL => { + Drivers::MySQL => { let db = MysqlDatabase::new(db_path)?; Box::new(db) } @@ -32,6 +35,9 @@ pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result Result<(), Error>; async fn load_persistent_torrents(&self) -> Result, Error>; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6aae06a4b..680f2635d 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -40,7 +40,7 @@ impl TorrentTracker { stats_event_sender: Option>, stats_repository: StatsRepository, ) -> Result { - let database = database::connect_database(&config.db_driver, &config.db_path)?; + let database = database::connect(&config.db_driver, &config.db_path)?; Ok(TorrentTracker { config: config.clone(), From 38eabc4ae5647b6d885bcff8dfb32adc04c62b3b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:56:53 +0100 Subject: [PATCH 0224/1003] clippy: fix src/databases/mysql.rs --- src/databases/database.rs | 4 ++-- src/databases/mysql.rs | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index 212224b25..7344010d8 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -2,7 +2,7 @@ use async_trait::async_trait; use derive_more::{Display, Error}; use serde::{Deserialize, Serialize}; -use crate::databases::mysql::MysqlDatabase; +use crate::databases::mysql::Mysql; use crate::databases::sqlite::SqliteDatabase; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; @@ -23,7 +23,7 @@ pub fn connect(db_driver: &Drivers, db_path: &str) -> Result, Box::new(db) } Drivers::MySQL => { - let db = MysqlDatabase::new(db_path)?; + let db = Mysql::new(db_path)?; Box::new(db) } }; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 5e7410ac2..5db358d5a 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -13,11 +13,14 @@ use crate::databases::database::{Database, Error}; use crate::protocol::common::{InfoHash, AUTH_KEY_LENGTH}; use crate::tracker::key::AuthKey; -pub struct MysqlDatabase { +pub struct Mysql { pool: Pool, } -impl MysqlDatabase { +impl Mysql { + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. pub fn new(db_path: &str) -> Result { let opts = Opts::from_url(db_path).expect("Failed to connect to MySQL database."); let builder = OptsBuilder::from_opts(opts); @@ -31,7 +34,7 @@ impl MysqlDatabase { } #[async_trait] -impl Database for MysqlDatabase { +impl Database for Mysql { fn create_database_tables(&self) -> Result<(), database::Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -57,7 +60,7 @@ impl Database for MysqlDatabase { PRIMARY KEY (`id`), UNIQUE (`key`) );", - AUTH_KEY_LENGTH as i8 + i8::try_from(AUTH_KEY_LENGTH).expect("Auth Key Length Should fit within a i8!") ); let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; @@ -95,7 +98,7 @@ impl Database for MysqlDatabase { "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| AuthKey { key, - valid_until: Some(Duration::from_secs(valid_until as u64)), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, ) .map_err(|_| database::Error::QueryReturnedNoRows)?; @@ -188,7 +191,7 @@ impl Database for MysqlDatabase { { Some((key, valid_until)) => Ok(AuthKey { key, - valid_until: Some(Duration::from_secs(valid_until as u64)), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), None => Err(database::Error::InvalidQuery), } From be6676a6315022ce4a3d7d2a02482c2a40a67798 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:59:28 +0100 Subject: [PATCH 0225/1003] clippy: fix src/databases/sqlite.rs --- src/databases/database.rs | 4 ++-- src/databases/sqlite.rs | 19 +++++++++++-------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index 7344010d8..62105dee5 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -3,7 +3,7 @@ use derive_more::{Display, Error}; use serde::{Deserialize, Serialize}; use crate::databases::mysql::Mysql; -use crate::databases::sqlite::SqliteDatabase; +use crate::databases::sqlite::Sqlite; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; @@ -19,7 +19,7 @@ pub enum Drivers { pub fn connect(db_driver: &Drivers, db_path: &str) -> Result, r2d2::Error> { let database: Box = match db_driver { Drivers::Sqlite3 => { - let db = SqliteDatabase::new(db_path)?; + let db = Sqlite::new(db_path)?; Box::new(db) } Drivers::MySQL => { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 19849f297..ee637049b 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -11,20 +11,23 @@ use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; -pub struct SqliteDatabase { +pub struct Sqlite { pool: Pool, } -impl SqliteDatabase { - pub fn new(db_path: &str) -> Result { +impl Sqlite { + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. + pub fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); - Ok(SqliteDatabase { pool }) + Ok(Sqlite { pool }) } } #[async_trait] -impl Database for SqliteDatabase { +impl Database for Sqlite { fn create_database_tables(&self) -> Result<(), database::Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -86,7 +89,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; @@ -191,11 +194,11 @@ impl Database for SqliteDatabase { if let Some(row) = rows.next()? { let key: String = row.get(0).unwrap(); - let valid_until_i64: i64 = row.get(1).unwrap(); + let valid_until: i64 = row.get(1).unwrap(); Ok(AuthKey { key, - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until_i64 as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) } else { Err(database::Error::QueryReturnedNoRows) From b5ce7e9f0cfc6f11a83e781f4e85b3c6c5e93a0d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 21:36:53 +0100 Subject: [PATCH 0226/1003] clippy: fix src/http/filters.rs --- src/http/filters.rs | 66 ++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index f28909c7f..f2e214e87 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -19,13 +19,15 @@ pub fn with_tracker(tracker: Arc) -> impl Filter impl Filter,), Error = Rejection> + Clone { - warp::filters::query::raw().and_then(info_hashes) + warp::filters::query::raw().and_then(|q| async move { info_hashes(&q) }) } /// Check for `PeerId` +#[must_use] pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw().and_then(peer_id) + warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) } /// Pass Arc along @@ -37,34 +39,38 @@ pub fn with_auth_key() -> impl Filter,), Error = Infa } /// Check for `PeerAddress` +#[must_use] pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) .map(move |remote_addr: Option, x_forwarded_for: Option| { (on_reverse_proxy, remote_addr, x_forwarded_for) }) - .and_then(peer_addr) + .and_then(|q| async move { peer_addr(q) }) } /// Check for `AnnounceRequest` +#[must_use] pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) .and(with_peer_addr(on_reverse_proxy)) - .and_then(announce_request) + .and_then(|q, r, s, t| async move { announce_request(q, &r, s, t) }) } /// Check for `ScrapeRequest` +#[must_use] pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) - .and_then(scrape_request) + .and_then(|q, r| async move { scrape_request(q, r) }) } /// Parse `InfoHash` from raw query string -async fn info_hashes(raw_query: String) -> WebResult> { +#[allow(clippy::ptr_arg)] +fn info_hashes(raw_query: &String) -> WebResult> { let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut info_hashes: Vec = Vec::new(); @@ -89,7 +95,8 @@ async fn info_hashes(raw_query: String) -> WebResult> { } /// Parse `PeerId` from raw query string -async fn peer_id(raw_query: String) -> WebResult { +#[allow(clippy::ptr_arg)] +fn peer_id(raw_query: &String) -> WebResult { // put all query params in a vec let split_raw_query: Vec<&str> = raw_query.split('&').collect(); @@ -118,17 +125,14 @@ async fn peer_id(raw_query: String) -> WebResult { } } - if peer_id.is_none() { - Err(reject::custom(ServerError::InvalidPeerId)) - } else { - Ok(peer_id.unwrap()) + match peer_id { + Some(id) => Ok(id), + None => Err(reject::custom(ServerError::InvalidPeerId)), } } /// Get `PeerAddress` from `RemoteAddress` or Forwarded -async fn peer_addr( - (on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option), -) -> WebResult { +fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)); } @@ -137,26 +141,27 @@ async fn peer_addr( return Err(reject::custom(ServerError::AddressNotFound)); } - match on_reverse_proxy { - true => { - let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); - // remove whitespace chars - x_forwarded_for_raw.retain(|c| !c.is_whitespace()); - // get all forwarded ip's in a vec - let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); - // set client ip to last forwarded ip - let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - - IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(ServerError::AddressNotFound)) - } - false => Ok(remote_addr.unwrap().ip()), + if on_reverse_proxy { + let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); + // remove whitespace chars + x_forwarded_for_raw.retain(|c| !c.is_whitespace()); + // get all forwarded ip's in a vec + let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); + // set client ip to last forwarded ip + let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); + + IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(ServerError::AddressNotFound)) + } else { + Ok(remote_addr.unwrap().ip()) } } /// Parse `AnnounceRequest` from raw `AnnounceRequestQuery`, `InfoHash` and Option -async fn announce_request( +#[allow(clippy::unnecessary_wraps)] +#[allow(clippy::ptr_arg)] +fn announce_request( announce_request_query: AnnounceRequestQuery, - info_hashes: Vec, + info_hashes: &Vec, peer_id: PeerId, peer_addr: IpAddr, ) -> WebResult { @@ -174,6 +179,7 @@ async fn announce_request( } /// Parse `ScrapeRequest` from `InfoHash` -async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { +#[allow(clippy::unnecessary_wraps)] +fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { Ok(Scrape { info_hashes, peer_addr }) } From 75bef77799f46c281eb3a8adae947705c9d1186f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:04:00 +0100 Subject: [PATCH 0227/1003] clippy: fix src/http/handlers.rs --- src/http/handlers.rs | 56 ++++++++++++++++++++++++++------------------ src/http/routes.rs | 5 +++- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index a312ff105..064047ba0 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -19,37 +19,38 @@ use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::tracker::TorrentTracker; /// Authenticate `InfoHash` using optional `AuthKey` +/// +/// # Errors +/// +/// Will return `ServerError` that wraps the `TorrentError` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, auth_key: &Option, tracker: Arc, ) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, auth_key).await { - Ok(_) => Ok(()), - Err(e) => { - let err = match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, - }; - - Err(err) - } - } + tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { + TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, + TorrentError::NoPeersFound => ServerError::NoPeersFound, + TorrentError::CouldNotSendResponse => ServerError::InternalServerError, + TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + }) } /// Handle announce request +/// +/// # Errors +/// +/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_announce( announce_request: request::Announce, auth_key: Option, tracker: Arc, ) -> WebResult { - if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { - return Err(reject::custom(e)); - } + authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) + .await + .map_err(reject::custom)?; debug!("{:?}", announce_request); @@ -76,14 +77,18 @@ pub async fn handle_announce( send_announce_response( &announce_request, - torrent_stats, - peers, + &torrent_stats, + &peers, announce_interval, tracker.config.min_announce_interval, ) } /// Handle scrape request +/// +/// # Errors +/// +/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, auth_key: Option, @@ -134,10 +139,11 @@ pub async fn handle_scrape( } /// Send announce response +#[allow(clippy::ptr_arg)] fn send_announce_response( announce_request: &request::Announce, - torrent_stats: TorrentStats, - peers: Vec, + torrent_stats: &TorrentStats, + peers: &Vec, interval: u32, interval_min: u32, ) -> WebResult { @@ -180,7 +186,11 @@ fn send_scrape_response(files: HashMap) -> WebRes } /// Handle all server errors and send error reply -pub async fn send_error(r: Rejection) -> std::result::Result { +/// +/// # Errors +/// +/// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. +pub fn send_error(r: &Rejection) -> std::result::Result { let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); Error { diff --git a/src/http/routes.rs b/src/http/routes.rs index f82bf45bc..992febc2c 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -8,8 +8,11 @@ use super::handlers::{handle_announce, handle_scrape, send_error}; use crate::tracker::TorrentTracker; /// All routes +#[must_use] pub fn routes(tracker: Arc) -> impl Filter + Clone { - announce(tracker.clone()).or(scrape(tracker)).recover(send_error) + announce(tracker.clone()) + .or(scrape(tracker)) + .recover(|q| async move { send_error(&q) }) } /// GET /announce or /announce/ From 208b10eaf1da30627c6503a6854f914c7de4eb6f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:05:30 +0100 Subject: [PATCH 0228/1003] clippy: fix src/http/server.rs --- src/http/server.rs | 8 ++++---- src/jobs/http_tracker.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/http/server.rs b/src/http/server.rs index 97ec30aa0..755fdc73a 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -6,14 +6,14 @@ use crate::tracker::TorrentTracker; /// Server that listens on HTTP, needs a `TorrentTracker` #[derive(Clone)] -pub struct HttpServer { +pub struct Http { tracker: Arc, } -impl HttpServer { +impl Http { #[must_use] - pub fn new(tracker: Arc) -> HttpServer { - HttpServer { tracker } + pub fn new(tracker: Arc) -> Http { + Http { tracker } } /// Start the `HttpServer` diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index f6023a4e0..d0c289e81 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -5,7 +5,7 @@ use log::{info, warn}; use tokio::task::JoinHandle; use crate::config::HttpTracker; -use crate::http::server::HttpServer; +use crate::http::server::Http; use crate::tracker::TorrentTracker; #[must_use] @@ -16,7 +16,7 @@ pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHand let ssl_key_path = config.ssl_key_path.clone(); tokio::spawn(async move { - let http_tracker = HttpServer::new(tracker); + let http_tracker = Http::new(tracker); if !ssl_enabled { info!("Starting HTTP server on: {}", bind_addr); From 577ddb97b0b25eb766bcdd99f222850b9375e013 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:07:52 +0100 Subject: [PATCH 0229/1003] clippy: fix src/jobs/http_tracker.rs --- src/jobs/http_tracker.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index d0c289e81..276da8099 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -8,6 +8,9 @@ use crate::config::HttpTracker; use crate::http::server::Http; use crate::tracker::TorrentTracker; +/// # Panics +/// +/// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. #[must_use] pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); From d092580db3bd53206b44d98df820b0c3f7de391c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:12:33 +0100 Subject: [PATCH 0230/1003] clippy: fix src/jobs/torrent_cleanup.rs --- src/jobs/torrent_cleanup.rs | 6 +++--- src/setup.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 3d7b49d6b..7bdfc1677 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -8,8 +8,8 @@ use crate::config::Configuration; use crate::tracker::TorrentTracker; #[must_use] -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); +pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; tokio::spawn(async move { @@ -28,7 +28,7 @@ pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHa let start_time = Utc::now().time(); info!("Cleaning up torrents.."); tracker.cleanup_torrents().await; - info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()) + info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; } diff --git a/src/setup.rs b/src/setup.rs index 736f448b6..804b6258a 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -54,7 +54,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(config, tracker.clone())); + jobs.push(torrent_cleanup::start_job(config, &tracker)); } jobs From 9adbfd137fa83d90e4e3073ad9adc240afccbc04 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:13:58 +0100 Subject: [PATCH 0231/1003] clippy: fix src/logging.rs --- src/logging.rs | 4 ++-- src/main.rs | 2 +- tests/api.rs | 2 +- tests/udp.rs | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/logging.rs b/src/logging.rs index 7682bace1..4d16f7670 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -7,7 +7,7 @@ use crate::config::Configuration; static INIT: Once = Once::new(); -pub fn setup_logging(cfg: &Configuration) { +pub fn setup(cfg: &Configuration) { let level = config_level_or_default(&cfg.log_level); if level == log::LevelFilter::Off { @@ -35,7 +35,7 @@ fn stdout_config(level: LevelFilter) { record.target(), record.level(), message - )) + )); }) .level(level) .chain(std::io::stdout()) diff --git a/src/main.rs b/src/main.rs index f64354fcf..baffc6fa5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -36,7 +36,7 @@ async fn main() { }; // Initialize logging - logging::setup_logging(&config); + logging::setup(&config); // Run jobs let jobs = setup::setup(&config, tracker.clone()).await; diff --git a/tests/api.rs b/tests/api.rs index 14fefa50e..6cfcbc092 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -288,7 +288,7 @@ mod tracker_api { self.tracker = Some(tracker.clone()); // Initialize logging - logging::setup_logging(&configuration); + logging::setup(&configuration); // Start the HTTP API job self.job = Some(tracker_api::start_job(&configuration, tracker).await); diff --git a/tests/udp.rs b/tests/udp.rs index 54caeaa68..b365c4fc6 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -69,7 +69,7 @@ mod udp_tracker_server { }; // Initialize logging - logging::setup_logging(&configuration); + logging::setup(&configuration); let udp_tracker_config = &configuration.udp_trackers[0]; From c78404ff33915057cc2cbc70a041e324fb30ea43 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:27:38 +0100 Subject: [PATCH 0232/1003] clippy: fix src/protocol/clock/mod.rs --- src/api/resources/auth_key_resource.rs | 6 +- src/protocol/clock/mod.rs | 94 ++++++++++++++------------ src/protocol/clock/time_extent.rs | 24 +++---- src/tracker/key.rs | 14 ++-- src/tracker/peer.rs | 10 +-- src/tracker/torrent.rs | 12 ++-- src/udp/connection_cookie.rs | 10 +-- src/udp/handlers.rs | 4 +- 8 files changed, 91 insertions(+), 83 deletions(-) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 3bc0cefb7..9bcfca596 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -36,7 +36,7 @@ mod tests { use std::time::Duration; use super::AuthKeyResource; - use crate::protocol::clock::{DefaultClock, TimeNow}; + use crate::protocol::clock::{Current, TimeNow}; use crate::tracker::key::AuthKey; #[test] @@ -52,7 +52,7 @@ mod tests { AuthKey::from(auth_key_resource), AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()) + valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } ); } @@ -63,7 +63,7 @@ mod tests { let auth_key = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()), + valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; assert_eq!( diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index 51197dba6..7868d4c5e 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -4,7 +4,7 @@ use std::time::Duration; pub type DurationSinceUnixEpoch = Duration; #[derive(Debug)] -pub enum ClockType { +pub enum Type { WorkingClock, StoppedClock, } @@ -12,14 +12,14 @@ pub enum ClockType { #[derive(Debug)] pub struct Clock; -pub type WorkingClock = Clock<{ ClockType::WorkingClock as usize }>; -pub type StoppedClock = Clock<{ ClockType::StoppedClock as usize }>; +pub type Working = Clock<{ Type::WorkingClock as usize }>; +pub type Stopped = Clock<{ Type::StoppedClock as usize }>; #[cfg(not(test))] -pub type DefaultClock = WorkingClock; +pub type Current = Working; #[cfg(test)] -pub type DefaultClock = StoppedClock; +pub type Current = Stopped; pub trait Time: Sized { fn now() -> DurationSinceUnixEpoch; @@ -40,44 +40,52 @@ pub trait TimeNow: Time { mod tests { use std::any::TypeId; - use crate::protocol::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; + use crate::protocol::clock::{Current, Stopped, Time, Working}; #[test] fn it_should_be_the_stopped_clock_as_default_when_testing() { // We are testing, so we should default to the fixed time. - assert_eq!(TypeId::of::(), TypeId::of::()); - assert_eq!(StoppedClock::now(), DefaultClock::now()) + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), Current::now()); } #[test] fn it_should_have_different_times() { - assert_ne!(TypeId::of::(), TypeId::of::()); - assert_ne!(StoppedClock::now(), WorkingClock::now()) + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); } } mod working_clock { use std::time::SystemTime; - use super::{DurationSinceUnixEpoch, Time, TimeNow, WorkingClock}; + use super::{DurationSinceUnixEpoch, Time, TimeNow, Working}; - impl Time for WorkingClock { + impl Time for Working { fn now() -> DurationSinceUnixEpoch { SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() } } - impl TimeNow for WorkingClock {} + impl TimeNow for Working {} } pub trait StoppedTime: TimeNow { fn local_set(unix_time: &DurationSinceUnixEpoch); fn local_set_to_unix_epoch() { - Self::local_set(&DurationSinceUnixEpoch::ZERO) + Self::local_set(&DurationSinceUnixEpoch::ZERO); } fn local_set_to_app_start_time(); fn local_set_to_system_time_now(); + + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; fn local_reset(); } @@ -86,9 +94,9 @@ mod stopped_clock { use std::num::IntErrorKind; use std::time::Duration; - use super::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; + use super::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow}; - impl Time for StoppedClock { + impl Time for Stopped { fn now() -> DurationSinceUnixEpoch { detail::FIXED_TIME.with(|time| { return *time.borrow(); @@ -96,21 +104,21 @@ mod stopped_clock { } } - impl TimeNow for StoppedClock {} + impl TimeNow for Stopped {} - impl StoppedTime for StoppedClock { + impl StoppedTime for Stopped { fn local_set(unix_time: &DurationSinceUnixEpoch) { detail::FIXED_TIME.with(|time| { *time.borrow_mut() = *unix_time; - }) + }); } fn local_set_to_app_start_time() { - Self::local_set(&detail::get_app_start_time()) + Self::local_set(&detail::get_app_start_time()); } fn local_set_to_system_time_now() { - Self::local_set(&detail::get_app_start_time()) + Self::local_set(&detail::get_app_start_time()); } fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { @@ -140,7 +148,7 @@ mod stopped_clock { } fn local_reset() { - Self::local_set(&detail::get_default_fixed_time()) + Self::local_set(&detail::get_default_fixed_time()); } } @@ -149,58 +157,58 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + use crate::protocol::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; #[test] fn it_should_default_to_zero_when_testing() { - assert_eq!(StoppedClock::now(), DurationSinceUnixEpoch::ZERO) + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); } #[test] fn it_should_possible_to_set_the_time() { // Check we start with ZERO. - assert_eq!(StoppedClock::now(), Duration::ZERO); + assert_eq!(Stopped::now(), Duration::ZERO); // Set to Current Time and Check - let timestamp = WorkingClock::now(); - StoppedClock::local_set(×tamp); - assert_eq!(StoppedClock::now(), timestamp); + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); // Elapse the Current Time and Check - StoppedClock::local_add(×tamp).unwrap(); - assert_eq!(StoppedClock::now(), timestamp + timestamp); + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); // Reset to ZERO and Check - StoppedClock::local_reset(); - assert_eq!(StoppedClock::now(), Duration::ZERO); + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); } #[test] fn it_should_default_to_zero_on_thread_exit() { - assert_eq!(StoppedClock::now(), Duration::ZERO); - let after5 = WorkingClock::add(&Duration::from_secs(5)).unwrap(); - StoppedClock::local_set(&after5); - assert_eq!(StoppedClock::now(), after5); + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); let t = thread::spawn(move || { // each thread starts out with the initial value of ZERO - assert_eq!(StoppedClock::now(), Duration::ZERO); + assert_eq!(Stopped::now(), Duration::ZERO); // and gets set to the current time. - let timestamp = WorkingClock::now(); - StoppedClock::local_set(×tamp); - assert_eq!(StoppedClock::now(), timestamp); + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); }); // wait for the thread to complete and bail out on panic t.join().unwrap(); // we retain our original value of current time + 5sec despite the child thread - assert_eq!(StoppedClock::now(), after5); + assert_eq!(Stopped::now(), after5); // Reset to ZERO and Check - StoppedClock::local_reset(); - assert_eq!(StoppedClock::now(), Duration::ZERO); + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); } } diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index f975e9a04..0ff74400b 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -1,7 +1,7 @@ use std::num::{IntErrorKind, TryFromIntError}; use std::time::Duration; -use super::{ClockType, StoppedClock, TimeNow, WorkingClock}; +use super::{Stopped, TimeNow, Type, Working}; pub trait Extent: Sized + Default { type Base; @@ -156,11 +156,11 @@ where #[derive(Debug)] pub struct TimeExtentMaker {} -pub type WorkingTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; -pub type StoppedTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; +pub type WorkingTimeExtentMaker = TimeExtentMaker<{ Type::WorkingClock as usize }>; +pub type StoppedTimeExtentMaker = TimeExtentMaker<{ Type::StoppedClock as usize }>; -impl MakeTimeExtent for WorkingTimeExtentMaker {} -impl MakeTimeExtent for StoppedTimeExtentMaker {} +impl MakeTimeExtent for WorkingTimeExtentMaker {} +impl MakeTimeExtent for StoppedTimeExtentMaker {} #[cfg(not(test))] pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; @@ -175,7 +175,7 @@ mod test { checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, TimeExtentMultiplier, TimeExtentProduct, MAX, ZERO, }; - use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; + use crate::protocol::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); @@ -443,7 +443,7 @@ mod test { } ); - DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + Current::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), @@ -458,7 +458,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) .unwrap() @@ -493,13 +493,13 @@ mod test { None ); - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) .unwrap() @@ -515,7 +515,7 @@ mod test { #[test] fn it_should_give_a_time_extent() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before( @@ -546,7 +546,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) .unwrap() diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 881dac877..2b6e71223 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -6,7 +6,7 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; #[must_use] @@ -21,12 +21,12 @@ pub fn generate_auth_key(lifetime: Duration) -> AuthKey { AuthKey { key, - valid_until: Some(DefaultClock::add(&lifetime).unwrap()), + valid_until: Some(Current::add(&lifetime).unwrap()), } } pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time: DurationSinceUnixEpoch = DefaultClock::now(); + let current_time: DurationSinceUnixEpoch = Current::now(); if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } @@ -88,7 +88,7 @@ impl From for Error { mod tests { use std::time::Duration; - use crate::protocol::clock::{DefaultClock, StoppedTime}; + use crate::protocol::clock::{Current, StoppedTime}; use crate::tracker::key; #[test] @@ -121,18 +121,18 @@ mod tests { #[test] fn generate_and_check_expired_auth_key() { // Set the time to the current time. - DefaultClock::local_set_to_system_time_now(); + Current::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. let auth_key = key::generate_auth_key(Duration::from_secs(19)); // Mock the time has passed 10 sec. - DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); + Current::local_add(&Duration::from_secs(10)).unwrap(); assert!(key::verify_auth_key(&auth_key).is_ok()); // Mock the time has passed another 10 sec. - DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); + Current::local_add(&Duration::from_secs(10)).unwrap(); assert!(key::verify_auth_key(&auth_key).is_err()); } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index a30723d00..115a2bfb9 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -5,7 +5,7 @@ use serde; use serde::Serialize; use crate::http::request::Announce; -use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; +use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; @@ -37,7 +37,7 @@ impl TorrentPeer { TorrentPeer { peer_id: PeerId(announce_request.peer_id.0), peer_addr, - updated: DefaultClock::now(), + updated: Current::now(), uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, left: announce_request.bytes_left, @@ -63,7 +63,7 @@ impl TorrentPeer { TorrentPeer { peer_id: announce_request.peer_id, peer_addr, - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), downloaded: NumberOfBytes(announce_request.downloaded as i64), left: NumberOfBytes(announce_request.left as i64), @@ -95,7 +95,7 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::protocol::clock::{DefaultClock, Time}; + use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; use crate::tracker::peer::TorrentPeer; @@ -104,7 +104,7 @@ mod test { let torrent_peer = TorrentPeer { peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 46608643d..4007976c9 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -5,7 +5,7 @@ use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use super::peer::TorrentPeer; -use crate::protocol::clock::{DefaultClock, TimeNow}; +use crate::protocol::clock::{Current, TimeNow}; use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone, Debug)] @@ -80,7 +80,7 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = DefaultClock::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); + let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); self.peers.retain(|_, peer| peer.updated > current_cutoff); } } @@ -116,7 +116,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}; + use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; use crate::protocol::common::PeerId; use crate::tracker::peer::TorrentPeer; use crate::tracker::torrent::TorrentEntry; @@ -130,7 +130,7 @@ mod tests { let default_peer = TorrentPeer { peer_id: PeerId([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), @@ -358,8 +358,8 @@ mod tests { let timeout = 120u32; - let now = WorkingClock::now(); - StoppedClock::local_set(&now); + let now = Working::now(); + Stopped::local_set(&now); let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); let inactive_peer = TorrentPeerBuilder::default() diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index b18940dfc..1b77d47e2 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -84,7 +84,7 @@ mod tests { use super::cookie_builder::{self}; use crate::protocol::clock::time_extent::{self, Extent}; - use crate::protocol::clock::{StoppedClock, StoppedTime}; + use crate::protocol::clock::{Stopped, StoppedTime}; use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; // #![feature(const_socketaddr)] @@ -195,7 +195,7 @@ mod tests { let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); let cookie_next = make_connection_cookie(&remote_address); @@ -217,7 +217,7 @@ mod tests { let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); check_connection_cookie(&remote_address, &cookie).unwrap(); } @@ -228,7 +228,7 @@ mod tests { let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); check_connection_cookie(&remote_address, &cookie).unwrap(); } @@ -240,7 +240,7 @@ mod tests { let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); check_connection_cookie(&remote_address, &cookie).unwrap(); } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 81578e9c3..679a11ffc 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -253,7 +253,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::config::Configuration; - use crate::protocol::clock::{DefaultClock, Time}; + use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; use crate::tracker::mode::TrackerMode; use crate::tracker::peer::TorrentPeer; @@ -309,7 +309,7 @@ mod tests { let default_peer = TorrentPeer { peer_id: PeerId([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), From 5ea7c0d8d047316a90235c945607f16ec7eb77fe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:30:17 +0100 Subject: [PATCH 0233/1003] clippy: fix src/protocol/clock/time_extent.rs --- src/protocol/clock/time_extent.rs | 156 ++++++++++++++---------------- src/udp/connection_cookie.rs | 2 +- 2 files changed, 74 insertions(+), 84 deletions(-) diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index 0ff74400b..b4c20cd70 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -10,37 +10,44 @@ pub trait Extent: Sized + Default { fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; + /// # Errors + /// + /// Will return `IntErrorKind` if `add` would overflow the internal `Duration`. fn increase(&self, add: Self::Multiplier) -> Result; + + /// # Errors + /// + /// Will return `IntErrorKind` if `sub` would underflow the internal `Duration`. fn decrease(&self, sub: Self::Multiplier) -> Result; fn total(&self) -> Option>; fn total_next(&self) -> Option>; } -pub type TimeExtentBase = Duration; -pub type TimeExtentMultiplier = u64; -pub type TimeExtentProduct = TimeExtentBase; +pub type Base = Duration; +pub type Multiplier = u64; +pub type Product = Base; #[derive(Debug, Default, Hash, PartialEq, Eq)] pub struct TimeExtent { - pub increment: TimeExtentBase, - pub amount: TimeExtentMultiplier, + pub increment: Base, + pub amount: Multiplier, } pub const ZERO: TimeExtent = TimeExtent { - increment: TimeExtentBase::ZERO, - amount: TimeExtentMultiplier::MIN, + increment: Base::ZERO, + amount: Multiplier::MIN, }; pub const MAX: TimeExtent = TimeExtent { - increment: TimeExtentBase::MAX, - amount: TimeExtentMultiplier::MAX, + increment: Base::MAX, + amount: Multiplier::MAX, }; impl TimeExtent { #[must_use] - pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { + pub const fn from_sec(seconds: u64, amount: &Multiplier) -> Self { Self { - increment: TimeExtentBase::from_secs(seconds), + increment: Base::from_secs(seconds), amount: *amount, } } @@ -61,9 +68,9 @@ fn checked_duration_from_nanos(time: u128) -> Result } impl Extent for TimeExtent { - type Base = TimeExtentBase; - type Multiplier = TimeExtentMultiplier; - type Product = TimeExtentProduct; + type Base = Base; + type Multiplier = Multiplier; + type Product = Product; fn new(increment: &Self::Base, amount: &Self::Multiplier) -> Self { Self { @@ -107,60 +114,58 @@ impl Extent for TimeExtent { } } -pub trait MakeTimeExtent: Sized +pub trait Make: Sized where Clock: TimeNow, { #[must_use] - fn now(increment: &TimeExtentBase) -> Option> { + fn now(increment: &Base) -> Option> { Clock::now() .as_nanos() .checked_div((*increment).as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { + .map(|amount| match Multiplier::try_from(amount) { Err(error) => Err(error), Ok(amount) => Ok(TimeExtent::new(increment, &amount)), }) } #[must_use] - fn now_after(increment: &TimeExtentBase, add_time: &Duration) -> Option> { + fn now_after(increment: &Base, add_time: &Duration) -> Option> { match Clock::add(add_time) { None => None, - Some(time) => { - time.as_nanos() - .checked_div(increment.as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }) - } + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), } } #[must_use] - fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { + fn now_before(increment: &Base, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, - Some(time) => { - time.as_nanos() - .checked_div(increment.as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }) - } + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), } } } #[derive(Debug)] -pub struct TimeExtentMaker {} +pub struct Maker {} -pub type WorkingTimeExtentMaker = TimeExtentMaker<{ Type::WorkingClock as usize }>; -pub type StoppedTimeExtentMaker = TimeExtentMaker<{ Type::StoppedClock as usize }>; +pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; +pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; -impl MakeTimeExtent for WorkingTimeExtentMaker {} -impl MakeTimeExtent for StoppedTimeExtentMaker {} +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} #[cfg(not(test))] pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; @@ -172,8 +177,7 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; mod test { use crate::protocol::clock::time_extent::{ - checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, - TimeExtentMultiplier, TimeExtentProduct, MAX, ZERO, + checked_duration_from_nanos, Base, DefaultTimeExtentMaker, Extent, Make, Multiplier, Product, TimeExtent, MAX, ZERO, }; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; @@ -238,7 +242,7 @@ mod test { #[test] fn it_should_make_empty_for_zero() { - assert_eq!(TimeExtent::from_sec(u64::MIN, &TimeExtentMultiplier::MIN), ZERO); + assert_eq!(TimeExtent::from_sec(u64::MIN, &Multiplier::MIN), ZERO); } #[test] fn it_should_make_from_seconds() { @@ -254,15 +258,15 @@ mod test { #[test] fn it_should_make_empty_for_zero() { - assert_eq!(TimeExtent::new(&TimeExtentBase::ZERO, &TimeExtentMultiplier::MIN), ZERO); + assert_eq!(TimeExtent::new(&Base::ZERO, &Multiplier::MIN), ZERO); } #[test] fn it_should_make_new() { assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount), + TimeExtent::new(&Base::from_millis(2), &TIME_EXTENT_VAL.amount), TimeExtent { - increment: TimeExtentBase::from_millis(2), + increment: Base::from_millis(2), amount: TIME_EXTENT_VAL.amount } ); @@ -328,30 +332,27 @@ mod test { #[test] fn it_should_be_zero_for_zero() { - assert_eq!(ZERO.total().unwrap().unwrap(), TimeExtentProduct::ZERO); + assert_eq!(ZERO.total().unwrap().unwrap(), Product::ZERO); } #[test] fn it_should_give_a_total() { assert_eq!( TIME_EXTENT_VAL.total().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) .total() .unwrap() .unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX)) - .total() - .unwrap() - .unwrap(), - TimeExtentProduct::from_secs(u64::MAX) + TimeExtent::new(&Base::from_secs(1), &(u64::MAX)).total().unwrap().unwrap(), + Product::from_secs(u64::MAX) ); } @@ -378,33 +379,33 @@ mod test { #[test] fn it_should_be_zero_for_zero() { - assert_eq!(ZERO.total_next().unwrap().unwrap(), TimeExtentProduct::ZERO); + assert_eq!(ZERO.total_next().unwrap().unwrap(), Product::ZERO); } #[test] fn it_should_give_a_total() { assert_eq!( TIME_EXTENT_VAL.total_next().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) .total_next() .unwrap() .unwrap(), - TimeExtentProduct::new( + Product::new( TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount), - TimeExtentBase::from_millis(2).as_nanos().try_into().unwrap() + Base::from_millis(2).as_nanos().try_into().unwrap() ) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX - 1)) + TimeExtent::new(&Base::from_secs(1), &(u64::MAX - 1)) .total_next() .unwrap() .unwrap(), - TimeExtentProduct::from_secs(u64::MAX) + Product::from_secs(u64::MAX) ); } @@ -453,16 +454,14 @@ mod test { #[test] fn it_should_fail_for_zero() { - assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + assert_eq!(DefaultTimeExtentMaker::now(&Base::ZERO), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) - .unwrap() - .unwrap_err(), + DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), u64::try_from(u128::MAX).unwrap_err() ); } @@ -488,20 +487,17 @@ mod test { #[test] fn it_should_fail_for_zero() { - assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); Current::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -519,13 +515,13 @@ mod test { assert_eq!( DefaultTimeExtentMaker::now_before( - &TimeExtentBase::from_secs(u64::from(u32::MAX)), + &Base::from_secs(u64::from(u32::MAX)), &Duration::from_secs(u64::from(u32::MAX)) ) .unwrap() .unwrap(), TimeExtent { - increment: TimeExtentBase::from_secs(u64::from(u32::MAX)), + increment: Base::from_secs(u64::from(u32::MAX)), amount: 4_294_967_296 } ); @@ -533,22 +529,16 @@ mod test { #[test] fn it_should_fail_for_zero() { - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::ZERO), None); - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 1b77d47e2..5a1e564dd 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -54,7 +54,7 @@ mod cookie_builder { use std::net::SocketAddr; use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; + use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; use crate::protocol::crypto::keys::seeds::{DefaultSeed, SeedKeeper}; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { From d03269ad9d48a776c7390f18c2a71efa784f1538 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:34:11 +0100 Subject: [PATCH 0234/1003] clippy: fix src/protocol/utils.rs --- src/protocol/utils.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index ac20aa41e..cec02ceaf 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,5 +1,9 @@ use super::clock::DurationSinceUnixEpoch; +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] ser.serialize_u64(unix_time_value.as_millis() as u64) } From efed1bc2c9729c1fcf434db0804570510047359a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 19:37:14 +0100 Subject: [PATCH 0235/1003] clippy: fix src/setup.rs --- src/setup.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/setup.rs b/src/setup.rs index 804b6258a..cfca5eb9e 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -35,7 +35,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< udp_tracker_config.bind_address, config.mode ); } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())) + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); } } From 58e5909379203f32ee5d628414f68c143649ffea Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 19:51:36 +0100 Subject: [PATCH 0236/1003] clippy: fix src/tracker/key.rs --- src/api/resources/auth_key_resource.rs | 18 ++++---- src/databases/database.rs | 8 ++-- src/databases/mysql.rs | 14 +++--- src/databases/sqlite.rs | 14 +++--- src/http/filters.rs | 8 ++-- src/http/handlers.rs | 8 ++-- src/tracker/key.rs | 63 +++++++++++++++----------- src/tracker/mod.rs | 14 +++--- tests/api.rs | 4 +- 9 files changed, 81 insertions(+), 70 deletions(-) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 9bcfca596..9b3cc9646 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -3,7 +3,7 @@ use std::convert::From; use serde::{Deserialize, Serialize}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKeyResource { @@ -11,9 +11,9 @@ pub struct AuthKeyResource { pub valid_until: Option, } -impl From for AuthKey { +impl From for Auth { fn from(auth_key_resource: AuthKeyResource) -> Self { - AuthKey { + Auth { key: auth_key_resource.key, valid_until: auth_key_resource .valid_until @@ -22,8 +22,8 @@ impl From for AuthKey { } } -impl From for AuthKeyResource { - fn from(auth_key: AuthKey) -> Self { +impl From for AuthKeyResource { + fn from(auth_key: Auth) -> Self { AuthKeyResource { key: auth_key.key, valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), @@ -37,7 +37,7 @@ mod tests { use super::AuthKeyResource; use crate::protocol::clock::{Current, TimeNow}; - use crate::tracker::key::AuthKey; + use crate::tracker::key::Auth; #[test] fn it_should_be_convertible_into_an_auth_key() { @@ -49,8 +49,8 @@ mod tests { }; assert_eq!( - AuthKey::from(auth_key_resource), - AuthKey { + Auth::from(auth_key_resource), + Auth { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } @@ -61,7 +61,7 @@ mod tests { fn it_should_be_convertible_from_an_auth_key() { let duration_in_secs = 60; - let auth_key = AuthKey { + let auth_key = Auth { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; diff --git a/src/databases/database.rs b/src/databases/database.rs index 62105dee5..5186f96b3 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::databases::mysql::Mysql; use crate::databases::sqlite::Sqlite; use crate::protocol::common::InfoHash; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum Drivers { @@ -42,7 +42,7 @@ pub trait Database: Sync + Send { async fn load_persistent_torrents(&self) -> Result, Error>; - async fn load_keys(&self) -> Result, Error>; + async fn load_keys(&self) -> Result, Error>; async fn load_whitelist(&self) -> Result, Error>; @@ -54,9 +54,9 @@ pub trait Database: Sync + Send { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - async fn get_key_from_keys(&self, key: &str) -> Result; + async fn get_key_from_keys(&self, key: &str) -> Result; - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; + async fn add_key_to_keys(&self, auth_key: &Auth) -> Result; async fn remove_key_from_keys(&self, key: &str) -> Result; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 5db358d5a..4fd00e31e 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -11,7 +11,7 @@ use r2d2_mysql::MysqlConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::common::{InfoHash, AUTH_KEY_LENGTH}; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; pub struct Mysql { pool: Pool, @@ -90,13 +90,13 @@ impl Database for Mysql { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let keys: Vec = conn + let keys: Vec = conn .query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| AuthKey { + |(key, valid_until): (String, i64)| Auth { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, @@ -182,14 +182,14 @@ impl Database for Mysql { } } - async fn get_key_from_keys(&self, key: &str) -> Result { + async fn get_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some((key, valid_until)) => Ok(AuthKey { + Some((key, valid_until)) => Ok(Auth { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), @@ -197,7 +197,7 @@ impl Database for Mysql { } } - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { + async fn add_key_to_keys(&self, auth_key: &Auth) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let key = auth_key.key.to_string(); diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index ee637049b..159da9922 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -9,7 +9,7 @@ use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::common::InfoHash; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; pub struct Sqlite { pool: Pool, @@ -78,7 +78,7 @@ impl Database for Sqlite { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -87,13 +87,13 @@ impl Database for Sqlite { let key = row.get(0)?; let valid_until: i64 = row.get(1)?; - Ok(AuthKey { + Ok(Auth { key, valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -186,7 +186,7 @@ impl Database for Sqlite { } } - async fn get_key_from_keys(&self, key: &str) -> Result { + async fn get_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -196,7 +196,7 @@ impl Database for Sqlite { let key: String = row.get(0).unwrap(); let valid_until: i64 = row.get(1).unwrap(); - Ok(AuthKey { + Ok(Auth { key, valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) @@ -205,7 +205,7 @@ impl Database for Sqlite { } } - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { + async fn add_key_to_keys(&self, auth_key: &Auth) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute( diff --git a/src/http/filters.rs b/src/http/filters.rs index f2e214e87..3375c781f 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -9,7 +9,7 @@ use super::errors::ServerError; use super::request::{Announce, AnnounceRequestQuery, Scrape}; use super::WebResult; use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; use crate::tracker::TorrentTracker; /// Pass Arc along @@ -32,10 +32,10 @@ pub fn with_peer_id() -> impl Filter + C /// Pass Arc along #[must_use] -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| AuthKey::from_string(&key)) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .map(|key: String| Auth::from_string(&key)) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for `PeerAddress` diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 064047ba0..793de9ef5 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -12,7 +12,7 @@ use super::response::{self, Peer, ScrapeResponseEntry}; use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; @@ -25,7 +25,7 @@ use crate::tracker::TorrentTracker; /// Will return `ServerError` that wraps the `TorrentError` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key: &Option, + auth_key: &Option, tracker: Arc, ) -> Result<(), ServerError> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { @@ -45,7 +45,7 @@ pub async fn authenticate( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) @@ -91,7 +91,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 2b6e71223..673780ad0 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -10,7 +10,10 @@ use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; #[must_use] -pub fn generate_auth_key(lifetime: Duration) -> AuthKey { +/// # Panics +/// +/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +pub fn generate(lifetime: Duration) -> Auth { let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) @@ -19,49 +22,57 @@ pub fn generate_auth_key(lifetime: Duration) -> AuthKey { debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); - AuthKey { + Auth { key, valid_until: Some(Current::add(&lifetime).unwrap()), } } -pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { +/// # Errors +/// +/// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// +/// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +pub fn verify(auth_key: &Auth) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); - if auth_key.valid_until.is_none() { - return Err(Error::KeyInvalid); - } - if auth_key.valid_until.unwrap() < current_time { - return Err(Error::KeyExpired); - } - Ok(()) + match auth_key.valid_until { + Some(valid_untill) => { + if valid_untill < current_time { + Err(Error::KeyExpired) + } else { + Ok(()) + } + } + None => Err(Error::KeyInvalid), + } } #[derive(Serialize, Debug, Eq, PartialEq, Clone)] -pub struct AuthKey { +pub struct Auth { pub key: String, pub valid_until: Option, } -impl AuthKey { +impl Auth { #[must_use] - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { + pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(AuthKey { key, valid_until: None }) + Some(Auth { key, valid_until: None }) } else { None } } #[must_use] - pub fn from_string(key: &str) -> Option { - if key.len() != AUTH_KEY_LENGTH { - None - } else { - Some(AuthKey { + pub fn from_string(key: &str) -> Option { + if key.len() == AUTH_KEY_LENGTH { + Some(Auth { key: key.to_string(), valid_until: None, }) + } else { + None } } } @@ -93,7 +104,7 @@ mod tests { #[test] fn auth_key_from_buffer() { - let auth_key = key::AuthKey::from_buffer([ + let auth_key = key::Auth::from_buffer([ 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, 49, 52, 110, 114, 74, ]); @@ -105,7 +116,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key::AuthKey::from_string(key_string); + let auth_key = key::Auth::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, key_string); @@ -113,9 +124,9 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key::generate_auth_key(Duration::new(9999, 0)); + let auth_key = key::generate(Duration::new(9999, 0)); - assert!(key::verify_auth_key(&auth_key).is_ok()); + assert!(key::verify(&auth_key).is_ok()); } #[test] @@ -124,16 +135,16 @@ mod tests { Current::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. - let auth_key = key::generate_auth_key(Duration::from_secs(19)); + let auth_key = key::generate(Duration::from_secs(19)); // Mock the time has passed 10 sec. Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify_auth_key(&auth_key).is_ok()); + assert!(key::verify(&auth_key).is_ok()); // Mock the time has passed another 10 sec. Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify_auth_key(&auth_key).is_err()); + assert!(key::verify(&auth_key).is_err()); } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 680f2635d..1e24326da 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -20,13 +20,13 @@ use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; use crate::protocol::common::InfoHash; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; pub struct TorrentTracker { pub config: Arc, mode: TrackerMode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -66,8 +66,8 @@ impl TorrentTracker { self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = key::generate_auth_key(lifetime); + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = key::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) @@ -79,10 +79,10 @@ impl TorrentTracker { Ok(()) } - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { + pub async fn verify_auth_key(&self, auth_key: &Auth) -> Result<(), key::Error> { match self.keys.read().await.get(&auth_key.key) { None => Err(key::Error::KeyInvalid), - Some(key) => key::verify_auth_key(key), + Some(key) => key::verify(key), } } @@ -145,7 +145,7 @@ impl TorrentTracker { Ok(()) } - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode if self.is_public() { return Ok(()); diff --git a/tests/api.rs b/tests/api.rs index 6cfcbc092..380ab90ca 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -23,7 +23,7 @@ mod tracker_api { use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::common::{InfoHash, PeerId}; - use torrust_tracker::tracker::key::AuthKey; + use torrust_tracker::tracker::key::Auth; use torrust_tracker::tracker::peer::TorrentPeer; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; @@ -45,7 +45,7 @@ mod tracker_api { assert!(api_server .tracker .unwrap() - .verify_auth_key(&AuthKey::from(auth_key)) + .verify_auth_key(&Auth::from(auth_key)) .await .is_ok()); } From 363b21a19814762321f4401886cc0744e7573eda Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 19:52:30 +0100 Subject: [PATCH 0237/1003] clippy: fix src/tracker/mode.rs --- src/config.rs | 6 +++--- src/tracker/mod.rs | 9 ++++----- src/tracker/mode.rs | 2 +- src/udp/handlers.rs | 15 +++++++++------ 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/config.rs b/src/config.rs index 6eb83ad16..9f6ca7092 100644 --- a/src/config.rs +++ b/src/config.rs @@ -10,7 +10,7 @@ use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; use crate::databases::database::Drivers; -use crate::tracker::mode::TrackerMode; +use crate::tracker::mode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct UdpTracker { @@ -41,7 +41,7 @@ pub struct HttpApi { #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, - pub mode: TrackerMode, + pub mode: mode::Tracker, pub db_driver: Drivers, pub db_path: String, pub announce_interval: u32, @@ -97,7 +97,7 @@ impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - mode: TrackerMode::Public, + mode: mode::Tracker::Public, db_driver: Drivers::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 1e24326da..0312ac3e2 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,7 +13,6 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::mode::TrackerMode; use self::peer::TorrentPeer; use self::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::config::Configuration; @@ -25,7 +24,7 @@ use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; pub struct TorrentTracker { pub config: Arc, - mode: TrackerMode, + mode: mode::Tracker, keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, @@ -55,15 +54,15 @@ impl TorrentTracker { } pub fn is_public(&self) -> bool { - self.mode == TrackerMode::Public + self.mode == mode::Tracker::Public } pub fn is_private(&self) -> bool { - self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + self.mode == mode::Tracker::Private || self.mode == mode::Tracker::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + self.mode == mode::Tracker::Listed || self.mode == mode::Tracker::PrivateListed } pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index f444b4523..f1fff169e 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -2,7 +2,7 @@ use serde; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] -pub enum TrackerMode { +pub enum Tracker { // Will track every new info hash and serve every peer. #[serde(rename = "public")] Public, diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 679a11ffc..ecf1beae0 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -255,27 +255,30 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; - use crate::tracker::mode::TrackerMode; use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::StatsTracker; - use crate::tracker::TorrentTracker; + use crate::tracker::{mode, TorrentTracker}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) } fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Public).into()); initialized_tracker(configuration) } fn initialized_private_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); + let configuration = Arc::new( + TrackerConfigurationBuilder::default() + .with_mode(mode::Tracker::Private) + .into(), + ); initialized_tracker(configuration) } fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Listed).into()); initialized_tracker(configuration) } @@ -355,7 +358,7 @@ mod tests { self } - pub fn with_mode(mut self, mode: TrackerMode) -> Self { + pub fn with_mode(mut self, mode: mode::Tracker) -> Self { self.configuration.mode = mode; self } From 0f281c3ed336ee17446d9ec56a167d82480d8c79 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 20:07:55 +0100 Subject: [PATCH 0238/1003] clippy: fix src/tracker/peer.rs --- src/http/handlers.rs | 7 +++---- src/tracker/mod.rs | 7 +++---- src/tracker/peer.rs | 9 +++++---- src/tracker/torrent.rs | 20 ++++++++++---------- src/udp/handlers.rs | 14 ++++++-------- 5 files changed, 27 insertions(+), 30 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 793de9ef5..5dab842e2 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,10 +13,9 @@ use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::TorrentTracker; +use crate::tracker::{peer, TorrentTracker}; /// Authenticate `InfoHash` using optional `AuthKey` /// @@ -55,7 +54,7 @@ pub async fn handle_announce( debug!("{:?}", announce_request); let peer = - TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + peer::TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) .await; @@ -143,7 +142,7 @@ pub async fn handle_scrape( fn send_announce_response( announce_request: &request::Announce, torrent_stats: &TorrentStats, - peers: &Vec, + peers: &Vec, interval: u32, interval_min: u32, ) -> WebResult { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0312ac3e2..fab254663 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,7 +13,6 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::peer::TorrentPeer; use self::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::config::Configuration; use crate::databases::database; @@ -195,7 +194,7 @@ impl TorrentTracker { } /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -205,7 +204,7 @@ impl TorrentTracker { } /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -214,7 +213,7 @@ impl TorrentTracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::TorrentPeer) -> TorrentStats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 115a2bfb9..d590b590d 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -60,13 +60,14 @@ impl TorrentPeer { AnnounceEvent::None }; + #[allow(clippy::cast_possible_truncation)] TorrentPeer { - peer_id: announce_request.peer_id, + peer_id: announce_request.peer_id.clone(), peer_addr, updated: Current::now(), - uploaded: NumberOfBytes(announce_request.uploaded as i64), - downloaded: NumberOfBytes(announce_request.downloaded as i64), - left: NumberOfBytes(announce_request.left as i64), + uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), + downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), + left: NumberOfBytes(i128::from(announce_request.left) as i64), event, } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4007976c9..734e7a66c 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -4,14 +4,14 @@ use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use super::peer::TorrentPeer; +use super::peer; use crate::protocol::clock::{Current, TimeNow}; use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct TorrentEntry { #[serde(skip)] - pub peers: std::collections::BTreeMap, + pub peers: std::collections::BTreeMap, pub completed: u32, } @@ -25,7 +25,7 @@ impl TorrentEntry { } // Update peer and return completed (times torrent has been downloaded) - pub fn update_peer(&mut self, peer: &TorrentPeer) -> bool { + pub fn update_peer(&mut self, peer: &peer::TorrentPeer) -> bool { let mut did_torrent_stats_change: bool = false; match peer.event { @@ -49,7 +49,7 @@ impl TorrentEntry { } #[must_use] - pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { + pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::TorrentPeer> { self.peers .values() .filter(|peer| match client_addr { @@ -118,16 +118,16 @@ mod tests { use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; use crate::protocol::common::PeerId; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer; use crate::tracker::torrent::TorrentEntry; struct TorrentPeerBuilder { - peer: TorrentPeer, + peer: peer::TorrentPeer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = TorrentPeer { + let default_peer = peer::TorrentPeer { peer_id: PeerId([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: Current::now(), @@ -164,14 +164,14 @@ mod tests { self } - pub fn into(self) -> TorrentPeer { + pub fn into(self) -> peer::TorrentPeer { self.peer } } /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped - fn a_torrent_seeder() -> TorrentPeer { + fn a_torrent_seeder() -> peer::TorrentPeer { TorrentPeerBuilder::default() .with_number_of_bytes_left(0) .with_event_completed() @@ -180,7 +180,7 @@ mod tests { /// A torrent leecher is a peer that is not a seeder. /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> TorrentPeer { + fn a_torrent_leecher() -> peer::TorrentPeer { TorrentPeerBuilder::default() .with_number_of_bytes_left(1) .with_event_completed() diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index ecf1beae0..d1ae72924 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,10 +8,9 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; -use crate::tracker::TorrentTracker; +use crate::tracker::{peer, TorrentTracker}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; @@ -106,7 +105,7 @@ pub async fn handle_announce( authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; - let peer = TorrentPeer::from_udp_announce_request( + let peer = peer::TorrentPeer::from_udp_announce_request( &wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip(), @@ -255,9 +254,8 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; - use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::StatsTracker; - use crate::tracker::{mode, TorrentTracker}; + use crate::tracker::{mode, peer, TorrentTracker}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) @@ -304,12 +302,12 @@ mod tests { } struct TorrentPeerBuilder { - peer: TorrentPeer, + peer: peer::TorrentPeer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = TorrentPeer { + let default_peer = peer::TorrentPeer { peer_id: PeerId([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), @@ -336,7 +334,7 @@ mod tests { self } - pub fn into(self) -> TorrentPeer { + pub fn into(self) -> peer::TorrentPeer { self.peer } } From 3c2232388fa0bd79fe6b2e9068e6c2375202e5ed Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 21:49:03 +0100 Subject: [PATCH 0239/1003] clippy: fix src/tracker/statistics.rs --- src/tracker/mod.rs | 4 ++-- src/tracker/statistics.rs | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index fab254663..5877c7f21 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,7 +13,7 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; +use self::statistics::{Metrics, StatsRepository, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; @@ -244,7 +244,7 @@ impl TorrentTracker { self.torrents.read().await } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { self.stats_repository.get_stats().await } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 609f036aa..fd830fa88 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -25,7 +25,7 @@ pub enum TrackerStatisticsEvent { } #[derive(Debug)] -pub struct TrackerStatistics { +pub struct Metrics { pub tcp4_connections_handled: u64, pub tcp4_announces_handled: u64, pub tcp4_scrapes_handled: u64, @@ -40,13 +40,13 @@ pub struct TrackerStatistics { pub udp6_scrapes_handled: u64, } -impl Default for TrackerStatistics { +impl Default for Metrics { fn default() -> Self { Self::new() } } -impl TrackerStatistics { +impl Metrics { #[must_use] pub fn new() -> Self { Self { @@ -177,7 +177,7 @@ impl TrackerStatisticsEventSender for StatsEventSender { #[derive(Clone)] pub struct StatsRepository { - pub stats: Arc>, + pub stats: Arc>, } impl Default for StatsRepository { @@ -190,11 +190,11 @@ impl StatsRepository { #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), + stats: Arc::new(RwLock::new(Metrics::new())), } } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { self.stats.read().await } @@ -275,7 +275,7 @@ impl StatsRepository { mod tests { mod stats_tracker { - use crate::tracker::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; + use crate::tracker::statistics::{Metrics, StatsTracker, TrackerStatisticsEvent}; #[tokio::test] async fn should_contain_the_tracker_statistics() { @@ -283,7 +283,7 @@ mod tests { let stats = stats_tracker.stats_repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, TrackerStatistics::new().tcp4_announces_handled); + assert_eq!(stats.tcp4_announces_handled, Metrics::new().tcp4_announces_handled); } #[tokio::test] From 143a11e18b5970420a391f848449f19dab7f82da Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 22:18:48 +0100 Subject: [PATCH 0240/1003] refactor: cleanup src/tracker/statistics.rs naming --- src/http/handlers.rs | 11 ++- src/stats.rs | 8 +-- src/tracker/mod.rs | 13 ++-- src/tracker/statistics.rs | 137 +++++++++++++++++++------------------- src/udp/handlers.rs | 75 ++++++++++----------- tests/api.rs | 4 +- tests/udp.rs | 4 +- 7 files changed, 120 insertions(+), 132 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 5dab842e2..5256ef291 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,9 +13,8 @@ use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::{peer, TorrentTracker}; +use crate::tracker::{peer, statistics, TorrentTracker}; /// Authenticate `InfoHash` using optional `AuthKey` /// @@ -67,10 +66,10 @@ pub async fn handle_announce( // send stats event match announce_request.peer_addr { IpAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; } IpAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; } } @@ -127,10 +126,10 @@ pub async fn handle_scrape( // send stats event match scrape_request.peer_addr { IpAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; + tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; } IpAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; + tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; } } diff --git a/src/stats.rs b/src/stats.rs index 738909934..8f87c01a3 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,16 +1,16 @@ -use crate::tracker::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +use crate::tracker::statistics; #[must_use] -pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { +pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { let mut stats_event_sender = None; - let mut stats_tracker = StatsTracker::new(); + let mut stats_tracker = statistics::Keeper::new(); if tracker_usage_statistics { stats_event_sender = Some(stats_tracker.run_event_listener()); } - (stats_event_sender, stats_tracker.stats_repository) + (stats_event_sender, stats_tracker.repository) } #[cfg(test)] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 5877c7f21..d0ab3e514 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,7 +13,6 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::statistics::{Metrics, StatsRepository, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; @@ -27,16 +26,16 @@ pub struct TorrentTracker { keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, - stats_event_sender: Option>, - stats_repository: StatsRepository, + stats_event_sender: Option>, + stats_repository: statistics::Repo, database: Box, } impl TorrentTracker { pub fn new( config: Arc, - stats_event_sender: Option>, - stats_repository: StatsRepository, + stats_event_sender: Option>, + stats_repository: statistics::Repo, ) -> Result { let database = database::connect(&config.db_driver, &config.db_path)?; @@ -244,11 +243,11 @@ impl TorrentTracker { self.torrents.read().await } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { self.stats_repository.get_stats().await } - pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { + pub async fn send_stats_event(&self, event: statistics::Event) -> Option>> { match &self.stats_event_sender { None => None, Some(stats_event_sender) => stats_event_sender.send_event(event).await, diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index fd830fa88..b787e1267 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -5,13 +5,12 @@ use log::debug; #[cfg(test)] use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; -use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; #[derive(Debug, PartialEq, Eq)] -pub enum TrackerStatisticsEvent { +pub enum Event { Tcp4Announce, Tcp4Scrape, Tcp6Announce, @@ -66,91 +65,89 @@ impl Metrics { } } -pub struct StatsTracker { - pub stats_repository: StatsRepository, +pub struct Keeper { + pub repository: Repo, } -impl Default for StatsTracker { +impl Default for Keeper { fn default() -> Self { Self::new() } } -impl StatsTracker { +impl Keeper { #[must_use] pub fn new() -> Self { - Self { - stats_repository: StatsRepository::new(), - } + Self { repository: Repo::new() } } #[must_use] - pub fn new_active_instance() -> (Box, StatsRepository) { + pub fn new_active_instance() -> (Box, Repo) { let mut stats_tracker = Self::new(); let stats_event_sender = stats_tracker.run_event_listener(); - (stats_event_sender, stats_tracker.stats_repository) + (stats_event_sender, stats_tracker.repository) } - pub fn run_event_listener(&mut self) -> Box { - let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - let stats_repository = self.stats_repository.clone(); + let stats_repository = self.repository.clone(); tokio::spawn(async move { event_listener(receiver, stats_repository).await }); - Box::new(StatsEventSender { sender }) + Box::new(Sender { sender }) } } -async fn event_listener(mut receiver: Receiver, stats_repository: StatsRepository) { +async fn event_listener(mut receiver: mpsc::Receiver, stats_repository: Repo) { while let Some(event) = receiver.recv().await { event_handler(event, &stats_repository).await; } } -async fn event_handler(event: TrackerStatisticsEvent, stats_repository: &StatsRepository) { +async fn event_handler(event: Event, stats_repository: &Repo) { match event { // TCP4 - TrackerStatisticsEvent::Tcp4Announce => { + Event::Tcp4Announce => { stats_repository.increase_tcp4_announces().await; stats_repository.increase_tcp4_connections().await; } - TrackerStatisticsEvent::Tcp4Scrape => { + Event::Tcp4Scrape => { stats_repository.increase_tcp4_scrapes().await; stats_repository.increase_tcp4_connections().await; } // TCP6 - TrackerStatisticsEvent::Tcp6Announce => { + Event::Tcp6Announce => { stats_repository.increase_tcp6_announces().await; stats_repository.increase_tcp6_connections().await; } - TrackerStatisticsEvent::Tcp6Scrape => { + Event::Tcp6Scrape => { stats_repository.increase_tcp6_scrapes().await; stats_repository.increase_tcp6_connections().await; } // UDP4 - TrackerStatisticsEvent::Udp4Connect => { + Event::Udp4Connect => { stats_repository.increase_udp4_connections().await; } - TrackerStatisticsEvent::Udp4Announce => { + Event::Udp4Announce => { stats_repository.increase_udp4_announces().await; } - TrackerStatisticsEvent::Udp4Scrape => { + Event::Udp4Scrape => { stats_repository.increase_udp4_scrapes().await; } // UDP6 - TrackerStatisticsEvent::Udp6Connect => { + Event::Udp6Connect => { stats_repository.increase_udp6_connections().await; } - TrackerStatisticsEvent::Udp6Announce => { + Event::Udp6Announce => { stats_repository.increase_udp6_announces().await; } - TrackerStatisticsEvent::Udp6Scrape => { + Event::Udp6Scrape => { stats_repository.increase_udp6_scrapes().await; } } @@ -160,33 +157,33 @@ async fn event_handler(event: TrackerStatisticsEvent, stats_repository: &StatsRe #[async_trait] #[cfg_attr(test, automock)] -pub trait TrackerStatisticsEventSender: Sync + Send { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; +pub trait EventSender: Sync + Send { + async fn send_event(&self, event: Event) -> Option>>; } -pub struct StatsEventSender { - sender: Sender, +pub struct Sender { + sender: mpsc::Sender, } #[async_trait] -impl TrackerStatisticsEventSender for StatsEventSender { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { +impl EventSender for Sender { + async fn send_event(&self, event: Event) -> Option>> { Some(self.sender.send(event).await) } } #[derive(Clone)] -pub struct StatsRepository { +pub struct Repo { pub stats: Arc>, } -impl Default for StatsRepository { +impl Default for Repo { fn default() -> Self { Self::new() } } -impl StatsRepository { +impl Repo { #[must_use] pub fn new() -> Self { Self { @@ -275,37 +272,37 @@ impl StatsRepository { mod tests { mod stats_tracker { - use crate::tracker::statistics::{Metrics, StatsTracker, TrackerStatisticsEvent}; + use crate::tracker::statistics::{Event, Keeper, Metrics}; #[tokio::test] async fn should_contain_the_tracker_statistics() { - let stats_tracker = StatsTracker::new(); + let stats_tracker = Keeper::new(); - let stats = stats_tracker.stats_repository.get_stats().await; + let stats = stats_tracker.repository.get_stats().await; assert_eq!(stats.tcp4_announces_handled, Metrics::new().tcp4_announces_handled); } #[tokio::test] async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = StatsTracker::new(); + let mut stats_tracker = Keeper::new(); let event_sender = stats_tracker.run_event_listener(); - let result = event_sender.send_event(TrackerStatisticsEvent::Udp4Connect).await; + let result = event_sender.send_event(Event::Udp4Connect).await; assert!(result.is_some()); } } mod event_handler { - use crate::tracker::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{event_handler, Event, Repo}; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + event_handler(Event::Tcp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -314,9 +311,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + event_handler(Event::Tcp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -325,9 +322,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + event_handler(Event::Tcp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -336,9 +333,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + event_handler(Event::Tcp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -347,9 +344,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + event_handler(Event::Tcp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -358,9 +355,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + event_handler(Event::Tcp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -369,9 +366,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + event_handler(Event::Tcp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -380,9 +377,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + event_handler(Event::Tcp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -391,9 +388,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Connect, &stats_repository).await; + event_handler(Event::Udp4Connect, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -402,9 +399,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Announce, &stats_repository).await; + event_handler(Event::Udp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -413,9 +410,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Scrape, &stats_repository).await; + event_handler(Event::Udp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -424,9 +421,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Connect, &stats_repository).await; + event_handler(Event::Udp6Connect, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -435,9 +432,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Announce, &stats_repository).await; + event_handler(Event::Udp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -446,9 +443,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Scrape, &stats_repository).await; + event_handler(Event::Udp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index d1ae72924..f460c1b7e 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,9 +8,8 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; -use crate::tracker::{peer, TorrentTracker}; +use crate::tracker::{peer, statistics, TorrentTracker}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; @@ -79,10 +78,10 @@ pub async fn handle_connect( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; + tracker.send_stats_event(statistics::Event::Udp4Connect).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; + tracker.send_stats_event(statistics::Event::Udp6Connect).await; } } @@ -167,10 +166,10 @@ pub async fn handle_announce( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; + tracker.send_stats_event(statistics::Event::Udp4Announce).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; + tracker.send_stats_event(statistics::Event::Udp6Announce).await; } } @@ -223,10 +222,10 @@ pub async fn handle_scrape( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; + tracker.send_stats_event(statistics::Event::Udp4Scrape).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; + tracker.send_stats_event(statistics::Event::Udp6Scrape).await; } } @@ -254,8 +253,7 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; - use crate::tracker::statistics::StatsTracker; - use crate::tracker::{mode, peer, TorrentTracker}; + use crate::tracker::{mode, peer, statistics, TorrentTracker}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) @@ -281,7 +279,7 @@ mod tests { } fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } @@ -375,8 +373,7 @@ mod tests { use mockall::predicate::eq; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -427,10 +424,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Connect)) + .with(eq(statistics::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -438,7 +435,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -447,16 +444,16 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Connect)) + .with(eq(statistics::Event::Udp6Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -548,8 +545,7 @@ mod tests { use mockall::predicate::eq; use crate::protocol::common::PeerId; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -692,16 +688,16 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Announce)) + .with(eq(statistics::Event::Udp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -773,8 +769,7 @@ mod tests { use mockall::predicate::eq; use crate::protocol::common::PeerId; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -924,16 +919,16 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Announce)) + .with(eq(statistics::Event::Udp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -953,7 +948,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::tracker::statistics::StatsTracker; + use crate::tracker::statistics::Keeper; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; @@ -963,7 +958,7 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); @@ -1233,24 +1228,23 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Scrape)) + .with(eq(statistics::Event::Udp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1266,24 +1260,23 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Scrape)) + .with(eq(statistics::Event::Udp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/tests/api.rs b/tests/api.rs index 380ab90ca..a4043fe7c 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -25,7 +25,7 @@ mod tracker_api { use torrust_tracker::protocol::common::{InfoHash, PeerId}; use torrust_tracker::tracker::key::Auth; use torrust_tracker::tracker::peer::TorrentPeer; - use torrust_tracker::tracker::statistics::StatsTracker; + use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; @@ -276,7 +276,7 @@ mod tracker_api { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize stats tracker - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { diff --git a/tests/udp.rs b/tests/udp.rs index b365c4fc6..fabca137a 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -20,7 +20,7 @@ mod udp_tracker_server { use tokio::task::JoinHandle; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::udp_tracker; - use torrust_tracker::tracker::statistics::StatsTracker; + use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; @@ -58,7 +58,7 @@ mod udp_tracker_server { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize stats tracker - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { From 81e72da07fe4359384c058540c3c33c6353b1ad2 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 22:26:32 +0100 Subject: [PATCH 0241/1003] clippy: fix src/tracker/torrent.rs --- src/http/handlers.rs | 19 ++++++++--------- src/tracker/mod.rs | 21 +++++++++---------- src/tracker/torrent.rs | 47 +++++++++++++++++++++--------------------- src/udp/handlers.rs | 15 +++++++------- 4 files changed, 50 insertions(+), 52 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 5256ef291..ace20ada9 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,26 +13,25 @@ use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::{peer, statistics, TorrentTracker}; +use crate::tracker::{peer, statistics, torrent, TorrentTracker}; /// Authenticate `InfoHash` using optional `AuthKey` /// /// # Errors /// -/// Will return `ServerError` that wraps the `TorrentError` if unable to `authenticate_request`. +/// Will return `ServerError` that wraps the `Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, auth_key: &Option, tracker: Arc, ) -> Result<(), ServerError> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, + torrent::Error::NoPeersFound => ServerError::NoPeersFound, + torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, + torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, }) } @@ -140,7 +139,7 @@ pub async fn handle_scrape( #[allow(clippy::ptr_arg)] fn send_announce_response( announce_request: &request::Announce, - torrent_stats: &TorrentStats, + torrent_stats: &torrent::Stats, peers: &Vec, interval: u32, interval_min: u32, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index d0ab3e514..b3a7ab6d6 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -18,14 +18,13 @@ use crate::databases::database; use crate::databases::database::Database; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; pub struct TorrentTracker { pub config: Arc, mode: mode::Tracker, keys: RwLock>, whitelist: RwLock>, - torrents: RwLock>, + torrents: RwLock>, stats_event_sender: Option>, stats_repository: statistics::Repo, database: Box, @@ -142,7 +141,7 @@ impl TorrentTracker { Ok(()) } - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -153,18 +152,18 @@ impl TorrentTracker { match key { Some(key) => { if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid); + return Err(torrent::Error::PeerKeyNotValid); } } None => { - return Err(TorrentError::PeerNotAuthenticated); + return Err(torrent::Error::PeerNotAuthenticated); } } } // check if info_hash is whitelisted if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted); + return Err(torrent::Error::TorrentNotWhitelisted); } Ok(()) @@ -181,7 +180,7 @@ impl TorrentTracker { continue; } - let torrent_entry = TorrentEntry { + let torrent_entry = torrent::Entry { peers: Default::default(), completed, }; @@ -212,11 +211,11 @@ impl TorrentTracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::TorrentPeer) -> TorrentStats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::TorrentPeer) -> torrent::Stats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { - Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), + Entry::Vacant(vacant) => vacant.insert(torrent::Entry::new()), Entry::Occupied(entry) => entry.into_mut(), }; @@ -232,14 +231,14 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - TorrentStats { + torrent::Stats { completed, seeders, leechers, } } - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { self.torrents.read().await } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 734e7a66c..21bcfc513 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -9,16 +9,16 @@ use crate::protocol::clock::{Current, TimeNow}; use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone, Debug)] -pub struct TorrentEntry { +pub struct Entry { #[serde(skip)] pub peers: std::collections::BTreeMap, pub completed: u32, } -impl TorrentEntry { +impl Entry { #[must_use] - pub fn new() -> TorrentEntry { - TorrentEntry { + pub fn new() -> Entry { + Entry { peers: std::collections::BTreeMap::new(), completed: 0, } @@ -72,6 +72,7 @@ impl TorrentEntry { .collect() } + #[allow(clippy::cast_possible_truncation)] #[must_use] pub fn get_stats(&self) -> (u32, u32, u32) { let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; @@ -85,21 +86,21 @@ impl TorrentEntry { } } -impl Default for TorrentEntry { +impl Default for Entry { fn default() -> Self { Self::new() } } #[derive(Debug)] -pub struct TorrentStats { +pub struct Stats { pub completed: u32, pub seeders: u32, pub leechers: u32, } #[derive(Debug)] -pub enum TorrentError { +pub enum Error { TorrentNotWhitelisted, PeerNotAuthenticated, PeerKeyNotValid, @@ -119,7 +120,7 @@ mod tests { use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; use crate::protocol::common::PeerId; use crate::tracker::peer; - use crate::tracker::torrent::TorrentEntry; + use crate::tracker::torrent::Entry; struct TorrentPeerBuilder { peer: peer::TorrentPeer, @@ -189,14 +190,14 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = TorrentEntry::new(); + let torrent_entry = Entry::new(); assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -207,7 +208,7 @@ mod tests { #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -217,7 +218,7 @@ mod tests { #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -229,7 +230,7 @@ mod tests { #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -241,7 +242,7 @@ mod tests { #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -255,7 +256,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -266,7 +267,7 @@ mod tests { #[test] fn a_torrent_entry_could_filter_out_peers_with_a_given_socket_address() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.update_peer(&torrent_peer); // Add peer @@ -287,7 +288,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -299,12 +300,12 @@ mod tests { let peers = torrent_entry.get_peers(None); - assert_eq!(peers.len(), 74) + assert_eq!(peers.len(), 74); } #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_seeder = a_torrent_seeder(); torrent_entry.update_peer(&torrent_seeder); // Add seeder @@ -314,7 +315,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_leecher = a_torrent_leecher(); torrent_entry.update_peer(&torrent_leecher); // Add leecher @@ -325,7 +326,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -340,7 +341,7 @@ mod tests { #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. @@ -354,7 +355,7 @@ mod tests { #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let timeout = 120u32; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index f460c1b7e..632180a92 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,8 +8,7 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::torrent::TorrentError; -use crate::tracker::{peer, statistics, TorrentTracker}; +use crate::tracker::{peer, statistics, torrent, TorrentTracker}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; @@ -18,12 +17,12 @@ pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Ok(_) => Ok(()), Err(e) => { let err = match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, + torrent::Error::NoPeersFound => ServerError::NoPeersFound, + torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, + torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, }; Err(err) From 78221b6174a435ea815f9aefa1f84aa6d9ea4f8e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 22:41:46 +0100 Subject: [PATCH 0242/1003] clippy: fix src/tracker/mod.rs --- src/api/server.rs | 22 ++++++------- src/http/filters.rs | 10 +++--- src/http/handlers.rs | 8 ++--- src/http/routes.rs | 8 ++--- src/http/server.rs | 8 ++--- src/jobs/http_tracker.rs | 4 +-- src/jobs/torrent_cleanup.rs | 4 +-- src/jobs/tracker_api.rs | 4 +-- src/jobs/udp_tracker.rs | 4 +-- src/main.rs | 5 ++- src/setup.rs | 4 +-- src/tracker/mod.rs | 64 +++++++++++++++++++++++++++-------- src/udp/handlers.rs | 66 ++++++++++++++++++------------------- src/udp/server.rs | 6 ++-- tests/api.rs | 7 ++-- tests/udp.rs | 5 ++- 16 files changed, 133 insertions(+), 96 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index f9e5bc368..fac25e297 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -11,7 +11,7 @@ use super::resources::auth_key_resource::AuthKeyResource; use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use crate::protocol::common::InfoHash; -use crate::tracker::TorrentTracker; +use crate::tracker; #[derive(Deserialize, Debug)] struct TorrentInfoQuery { @@ -60,7 +60,7 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> impl warp::Future { +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -72,7 +72,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = api_torrents.clone(); (limits, tracker) }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { + .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { let offset = limits.offset.unwrap_or(0); let limit = min(limits.limit.unwrap_or(1000), 4000); @@ -103,7 +103,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war .and(filters::path::path("stats")) .and(filters::path::end()) .map(move || api_stats.clone()) - .and_then(|tracker: Arc| async move { + .and_then(|tracker: Arc| async move { let mut results = StatsResource { torrents: 0, seeders: 0, @@ -165,7 +165,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t2.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { let db = tracker.get_torrents().await; let torrent_entry_option = db.get(&info_hash); @@ -201,7 +201,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t3.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { match tracker.remove_torrent_from_whitelist(&info_hash).await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -221,7 +221,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t4.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { match tracker.add_torrent_to_whitelist(&info_hash).await { Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(..) => Err(warp::reject::custom(ActionStatus::Err { @@ -241,7 +241,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t5.clone(); (seconds_valid, tracker) }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { + .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from(auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { @@ -261,7 +261,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t6.clone(); (key, tracker) }) - .and_then(|(key, tracker): (String, Arc)| async move { + .and_then(|(key, tracker): (String, Arc)| async move { match tracker.remove_auth_key(&key).await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -278,7 +278,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war .and(filters::path::path("reload")) .and(filters::path::end()) .map(move || t7.clone()) - .and_then(|tracker: Arc| async move { + .and_then(|tracker: Arc| async move { match tracker.load_whitelist().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -295,7 +295,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war .and(filters::path::path("reload")) .and(filters::path::end()) .map(move || t8.clone()) - .and_then(|tracker: Arc| async move { + .and_then(|tracker: Arc| async move { match tracker.load_keys().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { diff --git a/src/http/filters.rs b/src/http/filters.rs index 3375c781f..2c3ab626d 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -9,12 +9,14 @@ use super::errors::ServerError; use super::request::{Announce, AnnounceRequestQuery, Scrape}; use super::WebResult; use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; +use crate::tracker; use crate::tracker::key::Auth; -use crate::tracker::TorrentTracker; -/// Pass Arc along +/// Pass Arc along #[must_use] -pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { +pub fn with_tracker( + tracker: Arc, +) -> impl Filter,), Error = Infallible> + Clone { warp::any().map(move || tracker.clone()) } @@ -30,7 +32,7 @@ pub fn with_peer_id() -> impl Filter + C warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) } -/// Pass Arc along +/// Pass Arc along #[must_use] pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() diff --git a/src/http/handlers.rs b/src/http/handlers.rs index ace20ada9..d4ae76e65 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,7 +13,7 @@ use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::{peer, statistics, torrent, TorrentTracker}; +use crate::tracker::{self, peer, statistics, torrent}; /// Authenticate `InfoHash` using optional `AuthKey` /// @@ -23,7 +23,7 @@ use crate::tracker::{peer, statistics, torrent, TorrentTracker}; pub async fn authenticate( info_hash: &InfoHash, auth_key: &Option, - tracker: Arc, + tracker: Arc, ) -> Result<(), ServerError> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, @@ -43,7 +43,7 @@ pub async fn authenticate( pub async fn handle_announce( announce_request: request::Announce, auth_key: Option, - tracker: Arc, + tracker: Arc, ) -> WebResult { authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) .await @@ -89,7 +89,7 @@ pub async fn handle_announce( pub async fn handle_scrape( scrape_request: request::Scrape, auth_key: Option, - tracker: Arc, + tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; diff --git a/src/http/routes.rs b/src/http/routes.rs index 992febc2c..c46c502e4 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -5,18 +5,18 @@ use warp::{Filter, Rejection}; use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; use super::handlers::{handle_announce, handle_scrape, send_error}; -use crate::tracker::TorrentTracker; +use crate::tracker; /// All routes #[must_use] -pub fn routes(tracker: Arc) -> impl Filter + Clone { +pub fn routes(tracker: Arc) -> impl Filter + Clone { announce(tracker.clone()) .or(scrape(tracker)) .recover(|q| async move { send_error(&q) }) } /// GET /announce or /announce/ -fn announce(tracker: Arc) -> impl Filter + Clone { +fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -26,7 +26,7 @@ fn announce(tracker: Arc) -> impl Filter -fn scrape(tracker: Arc) -> impl Filter + Clone { +fn scrape(tracker: Arc) -> impl Filter + Clone { warp::path::path("scrape") .and(warp::filters::method::get()) .and(with_scrape_request(tracker.config.on_reverse_proxy)) diff --git a/src/http/server.rs b/src/http/server.rs index 755fdc73a..894d3e911 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -2,17 +2,17 @@ use std::net::SocketAddr; use std::sync::Arc; use super::routes; -use crate::tracker::TorrentTracker; +use crate::tracker; -/// Server that listens on HTTP, needs a `TorrentTracker` +/// Server that listens on HTTP, needs a `tracker::TorrentTracker` #[derive(Clone)] pub struct Http { - tracker: Arc, + tracker: Arc, } impl Http { #[must_use] - pub fn new(tracker: Arc) -> Http { + pub fn new(tracker: Arc) -> Http { Http { tracker } } diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 276da8099..b8f031f5a 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -6,13 +6,13 @@ use tokio::task::JoinHandle; use crate::config::HttpTracker; use crate::http::server::Http; -use crate::tracker::TorrentTracker; +use crate::tracker; /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. #[must_use] -pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; let ssl_cert_path = config.ssl_cert_path.clone(); diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 7bdfc1677..073ceda61 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -5,10 +5,10 @@ use log::info; use tokio::task::JoinHandle; use crate::config::Configuration; -use crate::tracker::TorrentTracker; +use crate::tracker; #[must_use] -pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { +pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 4e2dcd0c9..7787ea3f4 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -6,12 +6,12 @@ use tokio::task::JoinHandle; use crate::api::server; use crate::config::Configuration; -use crate::tracker::TorrentTracker; +use crate::tracker; #[derive(Debug)] pub struct ApiServerJobStarted(); -pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { +pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .http_api .bind_address diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 1b4bc745c..d5fdae4c1 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -4,11 +4,11 @@ use log::{error, info, warn}; use tokio::task::JoinHandle; use crate::config::UdpTracker; -use crate::tracker::TorrentTracker; +use crate::tracker; use crate::udp::server::UdpServer; #[must_use] -pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); tokio::spawn(async move { diff --git a/src/main.rs b/src/main.rs index baffc6fa5..a7316cef2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,8 +3,7 @@ use std::sync::Arc; use log::info; use torrust_tracker::config::Configuration; use torrust_tracker::stats::setup_statistics; -use torrust_tracker::tracker::TorrentTracker; -use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time}; +use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, tracker}; #[tokio::main] async fn main() { @@ -28,7 +27,7 @@ async fn main() { let (stats_event_sender, stats_repository) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone(), stats_event_sender, stats_repository) { + let tracker = match tracker::Tracker::new(&config.clone(), stats_event_sender, stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/setup.rs b/src/setup.rs index cfca5eb9e..a7b7c5a82 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -5,9 +5,9 @@ use tokio::task::JoinHandle; use crate::config::Configuration; use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; -use crate::tracker::TorrentTracker; +use crate::tracker; -pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { +pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); // Load peer keys diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index b3a7ab6d6..fcd9ebe2d 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -19,7 +19,7 @@ use crate::databases::database::Database; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -pub struct TorrentTracker { +pub struct Tracker { pub config: Arc, mode: mode::Tracker, keys: RwLock>, @@ -30,15 +30,18 @@ pub struct TorrentTracker { database: Box, } -impl TorrentTracker { +impl Tracker { + /// # Errors + /// + /// Will return a `r2d2::Error` if unable to connect to database. pub fn new( - config: Arc, + config: &Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, - ) -> Result { + ) -> Result { let database = database::connect(&config.db_driver, &config.db_path)?; - Ok(TorrentTracker { + Ok(Tracker { config: config.clone(), mode: config.mode, keys: RwLock::new(std::collections::HashMap::new()), @@ -62,6 +65,9 @@ impl TorrentTracker { self.mode == mode::Tracker::Listed || self.mode == mode::Tracker::PrivateListed } + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = key::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; @@ -69,12 +75,18 @@ impl TorrentTracker { Ok(auth_key) } + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `key` to the database. pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(key); Ok(()) } + /// # Errors + /// + /// Will return a `key::Error` if unable to get any `auth_key`. pub async fn verify_auth_key(&self, auth_key: &Auth) -> Result<(), key::Error> { match self.keys.read().await.get(&auth_key.key) { None => Err(key::Error::KeyInvalid), @@ -82,6 +94,9 @@ impl TorrentTracker { } } + /// # Errors + /// + /// Will return a `database::Error` if unable to `load_keys` from the database. pub async fn load_keys(&self) -> Result<(), database::Error> { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; @@ -89,13 +104,17 @@ impl TorrentTracker { keys.clear(); for key in keys_from_database { - let _ = keys.insert(key.key.clone(), key); + keys.insert(key.key.clone(), key); } Ok(()) } - // Adding torrents is not relevant to public trackers. + /// Adding torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { self.add_torrent_to_database_whitelist(info_hash).await?; self.add_torrent_to_memory_whitelist(info_hash).await; @@ -117,7 +136,11 @@ impl TorrentTracker { self.whitelist.write().await.insert(*info_hash) } - // Removing torrents is not relevant to public trackers. + /// Removing torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { self.database.remove_info_hash_from_whitelist(*info_hash).await?; self.whitelist.write().await.remove(info_hash); @@ -128,6 +151,9 @@ impl TorrentTracker { self.whitelist.read().await.contains(info_hash) } + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. pub async fn load_whitelist(&self) -> Result<(), database::Error> { let whitelisted_torrents_from_database = self.database.load_whitelist().await?; let mut whitelist = self.whitelist.write().await; @@ -141,6 +167,13 @@ impl TorrentTracker { Ok(()) } + /// # Errors + /// + /// Will return a `torrent::Error::PeerKeyNotValid` if the `key` is not valid. + /// + /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. + /// + /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { // no authentication needed in public mode if self.is_public() { @@ -169,7 +202,11 @@ impl TorrentTracker { Ok(()) } - // Loading the torrents from database into memory + /// Loading the torrents from database into memory + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; let mut torrents = self.torrents.write().await; @@ -181,7 +218,7 @@ impl TorrentTracker { } let torrent_entry = torrent::Entry { - peers: Default::default(), + peers: BTreeMap::default(), completed, }; @@ -262,9 +299,10 @@ impl TorrentTracker { torrents_lock.retain(|_, torrent_entry| { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - match self.config.persistent_torrent_completed_stat { - true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), - false => !torrent_entry.peers.is_empty(), + if self.config.persistent_torrent_completed_stat { + torrent_entry.completed > 0 || !torrent_entry.peers.is_empty() + } else { + !torrent_entry.peers.is_empty() } }); } else { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 632180a92..bf34326c6 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,11 +8,11 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::{peer, statistics, torrent, TorrentTracker}; +use crate::tracker::{self, peer, statistics, torrent}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; -pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { +pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, &None).await { Ok(_) => Ok(()), Err(e) => { @@ -30,7 +30,7 @@ pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> } } -pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { +pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { Ok(request) => { let transaction_id = match &request { @@ -52,7 +52,7 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A pub async fn handle_request( request: Request, remote_addr: SocketAddr, - tracker: Arc, + tracker: Arc, ) -> Result { match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, @@ -64,7 +64,7 @@ pub async fn handle_request( pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, - tracker: Arc, + tracker: Arc, ) -> Result { let connection_cookie = make_connection_cookie(&remote_addr); let connection_id = into_connection_id(&connection_cookie); @@ -90,7 +90,7 @@ pub async fn handle_connect( pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, - tracker: Arc, + tracker: Arc, ) -> Result { match check_connection_cookie(&remote_addr, &from_connection_id(&announce_request.connection_id)) { Ok(_) => {} @@ -179,7 +179,7 @@ pub async fn handle_announce( pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, - tracker: Arc, + tracker: Arc, ) -> Result { let db = tracker.get_torrents().await; @@ -252,18 +252,18 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; - use crate::tracker::{mode, peer, statistics, TorrentTracker}; + use crate::tracker::{self, mode, peer, statistics}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) } - fn initialized_public_tracker() -> Arc { + fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Public).into()); initialized_tracker(configuration) } - fn initialized_private_tracker() -> Arc { + fn initialized_private_tracker() -> Arc { let configuration = Arc::new( TrackerConfigurationBuilder::default() .with_mode(mode::Tracker::Private) @@ -272,14 +272,14 @@ mod tests { initialized_tracker(configuration) } - fn initialized_whitelisted_tracker() -> Arc { + fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Listed).into()); initialized_tracker(configuration) } - fn initialized_tracker(configuration: Arc) -> Arc { + fn initialized_tracker(configuration: Arc) -> Arc { let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); - Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) + Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -372,7 +372,7 @@ mod tests { use mockall::predicate::eq; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -434,7 +434,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -452,7 +452,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -544,7 +544,7 @@ mod tests { use mockall::predicate::eq; use crate::protocol::common::PeerId; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -641,7 +641,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -659,7 +659,7 @@ mod tests { .await; } - async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) @@ -696,7 +696,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -768,7 +768,7 @@ mod tests { use mockall::predicate::eq; use crate::protocol::common::PeerId; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -870,7 +870,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -887,7 +887,7 @@ mod tests { .await; } - async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; @@ -927,7 +927,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -947,8 +947,8 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::tracker; use crate::tracker::statistics::Keeper; - use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -959,7 +959,7 @@ mod tests { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1010,7 +1010,7 @@ mod tests { use super::TorrentPeerBuilder; use crate::protocol::common::PeerId; - use crate::tracker::TorrentTracker; + use crate::tracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1051,7 +1051,7 @@ mod tests { ); } - async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { let peer_id = PeerId([255u8; 20]); let peer = TorrentPeerBuilder::default() @@ -1075,7 +1075,7 @@ mod tests { } } - async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1227,7 +1227,7 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; @@ -1243,7 +1243,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1259,7 +1259,7 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; @@ -1275,7 +1275,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/src/udp/server.rs b/src/udp/server.rs index 5c215f9ec..705a6c263 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -6,17 +6,17 @@ use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::tracker::TorrentTracker; +use crate::tracker; use crate::udp::handlers::handle_packet; use crate::udp::MAX_PACKET_SIZE; pub struct UdpServer { socket: Arc, - tracker: Arc, + tracker: Arc, } impl UdpServer { - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { + pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; Ok(UdpServer { diff --git a/tests/api.rs b/tests/api.rs index a4043fe7c..72c3c65c7 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -26,8 +26,7 @@ mod tracker_api { use torrust_tracker::tracker::key::Auth; use torrust_tracker::tracker::peer::TorrentPeer; use torrust_tracker::tracker::statistics::Keeper; - use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; @@ -237,7 +236,7 @@ mod tracker_api { struct ApiServer { pub started: AtomicBool, pub job: Option>, - pub tracker: Option>, + pub tracker: Option>, pub connection_info: Option, } @@ -279,7 +278,7 @@ mod tracker_api { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/tests/udp.rs b/tests/udp.rs index fabca137a..e93894843 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -21,9 +21,8 @@ mod udp_tracker_server { use torrust_tracker::config::Configuration; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::Keeper; - use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; @@ -61,7 +60,7 @@ mod udp_tracker_server { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) From 4a34f685fc3c88e01269ab68b805f07a25ab4a03 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 02:10:57 +0100 Subject: [PATCH 0243/1003] clippy: fix src/udp/connection_cookie.rs --- src/tracker/peer.rs | 5 ++-- src/udp/connection_cookie.rs | 54 +++++++++++++++++++----------------- src/udp/handlers.rs | 54 ++++++++++++++++++------------------ 3 files changed, 59 insertions(+), 54 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index d590b590d..dd49ffaa7 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -131,7 +131,8 @@ mod test { }; use crate::tracker::peer::TorrentPeer; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; + // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. fn sample_ipv4_remote_addr() -> SocketAddr { @@ -153,7 +154,7 @@ mod test { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId(*b"-qB00000000000000000"), diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 5a1e564dd..8a544fa6a 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -22,17 +22,21 @@ pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { } #[must_use] -pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { +pub fn make(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); cookie_builder::build(remote_address, &time_extent) } -pub fn check_connection_cookie( - remote_address: &SocketAddr, - connection_cookie: &Cookie, -) -> Result { +/// # Panics +/// +/// It would panic if the `COOKIE_LIFETIME` constant would be an unreasonably large number. +/// +/// # Errors +/// +/// Will return a `ServerError::InvalidConnectionId` if the supplied `connection_cookie` fails to verify. +pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { // we loop backwards testing each time_extent until we find one that matches. // (or the lifetime of time_extents is exhausted) for offset in 0..=COOKIE_LIFETIME.amount { @@ -85,19 +89,19 @@ mod tests { use super::cookie_builder::{self}; use crate::protocol::clock::time_extent::{self, Extent}; use crate::protocol::clock::{Stopped, StoppedTime}; - use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; + use crate::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); #[test] fn it_should_make_a_connection_cookie() { - let cookie = make_connection_cookie(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); - // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. const ID_COOKIE: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; - assert_eq!(cookie, ID_COOKIE) + let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); + + assert_eq!(cookie, ID_COOKIE); } #[test] @@ -114,7 +118,7 @@ mod tests { //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] - assert_eq!(cookie, cookie_2) + assert_eq!(cookie, cookie_2); } #[test] @@ -132,7 +136,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 255.255.255.255:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [217, 87, 239, 178, 182, 126, 66, 166] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -150,7 +154,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: [::]:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [99, 119, 230, 177, 20, 220, 163, 187] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -168,7 +172,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 0.0.0.0:1, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [38, 8, 0, 102, 92, 170, 220, 11] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -186,51 +190,51 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 18446744073709551615.999999999s, amount: 18446744073709551615 }, cookie: [87, 111, 109, 125, 182, 206, 3, 201] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] fn it_should_make_different_cookies_for_the_next_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); - let cookie_next = make_connection_cookie(&remote_address); + let cookie_next = make(&remote_address); - assert_ne!(cookie, cookie_next) + assert_ne!(cookie, cookie_next); } #[test] fn it_should_be_valid_for_this_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] fn it_should_be_valid_for_the_next_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] fn it_should_be_valid_for_the_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] @@ -238,10 +242,10 @@ mod tests { fn it_should_be_not_valid_after_their_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index bf34326c6..cdf12ed6b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::{ NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; +use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; use crate::tracker::{self, peer, statistics, torrent}; use crate::udp::errors::ServerError; @@ -66,7 +66,7 @@ pub async fn handle_connect( request: &ConnectRequest, tracker: Arc, ) -> Result { - let connection_cookie = make_connection_cookie(&remote_addr); + let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); let response = Response::from(ConnectResponse { @@ -92,7 +92,7 @@ pub async fn handle_announce( announce_request: &AnnounceRequest, tracker: Arc, ) -> Result { - match check_connection_cookie(&remote_addr, &from_connection_id(&announce_request.connection_id)) { + match check(&remote_addr, &from_connection_id(&announce_request.connection_id)) { Ok(_) => {} Err(e) => { return Err(e); @@ -373,7 +373,7 @@ mod tests { use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; use crate::tracker::{self, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -396,7 +396,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -415,7 +415,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -469,7 +469,7 @@ mod tests { TransactionId, }; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { @@ -483,7 +483,7 @@ mod tests { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId([255u8; 20]), @@ -545,7 +545,7 @@ mod tests { use crate::protocol::common::PeerId; use crate::tracker::{self, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ @@ -564,7 +564,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -588,7 +588,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -627,7 +627,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -662,7 +662,7 @@ mod tests { async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() @@ -714,7 +714,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use crate::protocol::common::PeerId; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; @@ -731,7 +731,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -769,7 +769,7 @@ mod tests { use crate::protocol::common::PeerId; use crate::tracker::{self, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ @@ -789,7 +789,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -816,7 +816,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -855,7 +855,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -893,7 +893,7 @@ mod tests { let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() @@ -933,7 +933,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &announce_request, tracker.clone()) @@ -949,7 +949,7 @@ mod tests { use crate::tracker; use crate::tracker::statistics::Keeper; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::TrackerConfigurationBuilder; @@ -974,7 +974,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -1011,7 +1011,7 @@ mod tests { use super::TorrentPeerBuilder; use crate::protocol::common::PeerId; use crate::tracker; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1031,7 +1031,7 @@ mod tests { let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), + connection_id: into_connection_id(&make(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, }; @@ -1069,7 +1069,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(remote_addr)), + connection_id: into_connection_id(&make(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } @@ -1214,7 +1214,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(remote_addr)), + connection_id: into_connection_id(&make(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } From 6e2a34226b1dc17f17e9420dd25e47255fd45fe2 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 19:35:53 +0100 Subject: [PATCH 0244/1003] clippy: fix src/databases/database.rs --- src/databases/database.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index 5186f96b3..5e4a7c1f9 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -61,14 +61,15 @@ pub trait Database: Sync + Send { async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.clone().to_string()).await { - if let Error::QueryReturnedNoRows = e { - return Ok(false); - } else { - return Err(e); - } - } - Ok(true) + self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) + .await + .map_or_else( + |e| match e { + Error::QueryReturnedNoRows => Ok(false), + e => Err(e), + }, + |_| Ok(true), + ) } } From 0a7d9276b4958f56a5e4099c24cee08ed2c8084a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 19:44:59 +0100 Subject: [PATCH 0245/1003] clippy: fix src/jobs/tracker_api.rs --- src/jobs/tracker_api.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 7787ea3f4..2c00aa453 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -11,6 +11,9 @@ use crate::tracker; #[derive(Debug)] pub struct ApiServerJobStarted(); +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .http_api @@ -26,7 +29,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) - let join_handle = tokio::spawn(async move { let handel = server::start(bind_addr, &tracker); - assert!(tx.send(ApiServerJobStarted()).is_ok(), "the start job dropped"); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); handel.await; }); @@ -34,7 +37,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) - // Wait until the API server job is running match rx.await { Ok(_msg) => info!("Torrust API server started"), - Err(_) => panic!("the api server dropped"), + Err(e) => panic!("the api server dropped: {e}"), } join_handle From 220f83af3d6d98bd8fd181b14601b249fcc00772 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:14:12 +0100 Subject: [PATCH 0246/1003] clippy: fix src/udp/handlers.rs --- src/udp/errors.rs | 15 ++++++++ src/udp/handlers.rs | 84 +++++++++++++++++++++------------------------ 2 files changed, 54 insertions(+), 45 deletions(-) diff --git a/src/udp/errors.rs b/src/udp/errors.rs index 8d7b04b4f..f90149a99 100644 --- a/src/udp/errors.rs +++ b/src/udp/errors.rs @@ -1,5 +1,7 @@ use thiserror::Error; +use crate::tracker::torrent; + #[derive(Error, Debug)] pub enum ServerError { #[error("internal server error")] @@ -32,3 +34,16 @@ pub enum ServerError { #[error("bad request")] BadRequest, } + +impl From for ServerError { + fn from(e: torrent::Error) -> Self { + match e { + torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, + torrent::Error::NoPeersFound => ServerError::NoPeersFound, + torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, + torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, + } + } +} diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index cdf12ed6b..dde8d14ae 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,28 +8,10 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::{self, peer, statistics, torrent}; +use crate::tracker::{self, peer, statistics}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; -pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, &None).await { - Ok(_) => Ok(()), - Err(e) => { - let err = match e { - torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, - torrent::Error::NoPeersFound => ServerError::NoPeersFound, - torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, - torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, - }; - - Err(err) - } - } -} - pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { Ok(request) => { @@ -41,14 +23,17 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A match handle_request(request, remote_addr, tracker).await { Ok(response) => response, - Err(e) => handle_error(e, transaction_id), + Err(e) => handle_error(&e, transaction_id), } } // bad request - Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)), + Err(_) => handle_error(&ServerError::BadRequest, TransactionId(0)), } } +/// # Errors +/// +/// If a error happens in the `handle_request` function, it will just return the `ServerError`. pub async fn handle_request( request: Request, remote_addr: SocketAddr, @@ -61,6 +46,9 @@ pub async fn handle_request( } } +/// # Errors +/// +/// This function dose not ever return an error. pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, @@ -87,21 +75,21 @@ pub async fn handle_connect( Ok(response) } +/// # Errors +/// +/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: Arc, ) -> Result { - match check(&remote_addr, &from_connection_id(&announce_request.connection_id)) { - Ok(_) => {} - Err(e) => { - return Err(e); - } - } + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); - authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; + tracker + .authenticate_request(&wrapped_announce_request.info_hash, &None) + .await?; let peer = peer::TorrentPeer::from_udp_announce_request( &wrapped_announce_request.announce_request, @@ -120,12 +108,13 @@ pub async fn handle_announce( .get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr) .await; + #[allow(clippy::cast_possible_truncation)] let announce_response = if remote_addr.is_ipv4() { Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), + announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), peers: peers .iter() .filter_map(|peer| { @@ -143,9 +132,9 @@ pub async fn handle_announce( } else { Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), + announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), peers: peers .iter() .filter_map(|peer| { @@ -175,7 +164,11 @@ pub async fn handle_announce( Ok(announce_response) } -// todo: refactor this, db lock can be a lot shorter +/// # Errors +/// +/// This function dose not ever return an error. +/// +/// TODO: refactor this, db lock can be a lot shorter pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, @@ -190,13 +183,14 @@ pub async fn handle_scrape( let scrape_entry = match db.get(&info_hash) { Some(torrent_info) => { - if authenticate(&info_hash, tracker.clone()).await.is_ok() { + if tracker.authenticate_request(&info_hash, &None).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); + #[allow(clippy::cast_possible_truncation)] TorrentScrapeStatistics { - seeders: NumberOfPeers(seeders as i32), - completed: NumberOfDownloads(completed as i32), - leechers: NumberOfPeers(leechers as i32), + seeders: NumberOfPeers(i64::from(seeders) as i32), + completed: NumberOfDownloads(i64::from(completed) as i32), + leechers: NumberOfPeers(i64::from(leechers) as i32), } } else { TorrentScrapeStatistics { @@ -234,7 +228,7 @@ pub async fn handle_scrape( })) } -fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { +fn handle_error(e: &ServerError, transaction_id: TransactionId) -> Response { let message = e.to_string(); Response::from(ErrorResponse { transaction_id, @@ -260,7 +254,7 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Public).into()); - initialized_tracker(configuration) + initialized_tracker(&configuration) } fn initialized_private_tracker() -> Arc { @@ -269,17 +263,17 @@ mod tests { .with_mode(mode::Tracker::Private) .into(), ); - initialized_tracker(configuration) + initialized_tracker(&configuration) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Listed).into()); - initialized_tracker(configuration) + initialized_tracker(&configuration) } - fn initialized_tracker(configuration: Arc) -> Arc { + fn initialized_tracker(configuration: &Arc) -> Arc { let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); - Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()) + Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { From aa30bb1c933b6091d9eae1fe790b00a349a52a6d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:15:56 +0100 Subject: [PATCH 0247/1003] clippy: fix src/udp/request.rs --- src/udp/handlers.rs | 2 +- src/udp/request.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index dde8d14ae..274af1e2c 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -85,7 +85,7 @@ pub async fn handle_announce( ) -> Result { check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; - let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); + let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request); tracker .authenticate_request(&wrapped_announce_request.info_hash, &None) diff --git a/src/udp/request.rs b/src/udp/request.rs index 53d646f1a..34139384b 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -24,7 +24,7 @@ pub struct AnnounceRequestWrapper { impl AnnounceRequestWrapper { #[must_use] - pub fn new(announce_request: AnnounceRequest) -> Self { + pub fn new(announce_request: &AnnounceRequest) -> Self { AnnounceRequestWrapper { announce_request: announce_request.clone(), info_hash: InfoHash(announce_request.info_hash.0), From 436a0c1e03b09878bbb16102f83fd3350f95e054 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:25:56 +0100 Subject: [PATCH 0248/1003] clippy: fix src/udp/server.rs --- src/jobs/udp_tracker.rs | 4 ++-- src/udp/server.rs | 21 ++++++++++++++------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index d5fdae4c1..57369f660 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -5,14 +5,14 @@ use tokio::task::JoinHandle; use crate::config::UdpTracker; use crate::tracker; -use crate::udp::server::UdpServer; +use crate::udp::server::Udp; #[must_use] pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); tokio::spawn(async move { - match UdpServer::new(tracker, &bind_addr).await { + match Udp::new(tracker, &bind_addr).await { Ok(udp_server) => { info!("Starting UDP server on: {}", bind_addr); udp_server.start().await; diff --git a/src/udp/server.rs b/src/udp/server.rs index 705a6c263..5bd835365 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -10,21 +10,27 @@ use crate::tracker; use crate::udp::handlers::handle_packet; use crate::udp::MAX_PACKET_SIZE; -pub struct UdpServer { +pub struct Udp { socket: Arc, tracker: Arc, } -impl UdpServer { - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { +impl Udp { + /// # Errors + /// + /// Will return `Err` unable to bind to the supplied `bind_address`. + pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; - Ok(UdpServer { + Ok(Udp { socket: Arc::new(socket), tracker, }) } + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. pub async fn start(&self) { loop { let mut data = [0; MAX_PACKET_SIZE]; @@ -43,7 +49,7 @@ impl UdpServer { debug!("{:?}", payload); let response = handle_packet(remote_addr, payload, tracker).await; - UdpServer::send_response(socket, remote_addr, response).await; + Udp::send_response(socket, remote_addr, response).await; } } } @@ -57,11 +63,12 @@ impl UdpServer { match response.write(&mut cursor) { Ok(_) => { + #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner = cursor.get_ref(); debug!("{:?}", &inner[..position]); - UdpServer::send_packet(socket, &remote_addr, &inner[..position]).await; + Udp::send_packet(socket, &remote_addr, &inner[..position]).await; } Err(_) => { debug!("could not write response to bytes."); @@ -71,6 +78,6 @@ impl UdpServer { async fn send_packet(socket: Arc, remote_addr: &SocketAddr, payload: &[u8]) { // doesn't matter if it reaches or not - let _ = socket.send_to(payload, remote_addr).await; + drop(socket.send_to(payload, remote_addr).await); } } From 6564c10de70ebd1fda24443537205bf5186be44c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:30:19 +0100 Subject: [PATCH 0249/1003] clippy: fix src/protocol/crypto.rs --- src/protocol/crypto.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs index 6e1517ef8..2d3f8f6fa 100644 --- a/src/protocol/crypto.rs +++ b/src/protocol/crypto.rs @@ -48,12 +48,12 @@ pub mod keys { #[test] fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { - assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()) + assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()); } #[test] fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { - assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()) + assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()); } } @@ -79,12 +79,12 @@ pub mod keys { #[test] fn it_should_have_a_zero_test_seed() { - assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]) + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); } #[test] fn it_should_default_to_zeroed_seed_when_testing() { - assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED) + assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED); } #[test] From baba21b31f669904c2528b87d91dc07edbcaa60b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:37:27 +0100 Subject: [PATCH 0250/1003] clippy: fix tests/udp.rs --- tests/udp.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index e93894843..8bad37dbe 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -48,7 +48,7 @@ mod udp_tracker_server { } } - pub async fn start(&mut self, configuration: Arc) { + pub fn start(&mut self, configuration: &Arc) { if !self.started.load(Ordering::Relaxed) { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -68,7 +68,7 @@ mod udp_tracker_server { }; // Initialize logging - logging::setup(&configuration); + logging::setup(configuration); let udp_tracker_config = &configuration.udp_trackers[0]; @@ -82,9 +82,9 @@ mod udp_tracker_server { } } - async fn new_running_udp_server(configuration: Arc) -> UdpServer { + fn new_running_udp_server(configuration: &Arc) -> UdpServer { let mut udp_server = UdpServer::new(); - udp_server.start(configuration).await; + udp_server.start(configuration); udp_server } @@ -101,7 +101,7 @@ mod udp_tracker_server { } async fn connect(&self, remote_address: &str) { - self.socket.connect(remote_address).await.unwrap() + self.socket.connect(remote_address).await.unwrap(); } async fn send(&self, bytes: &[u8]) -> usize { @@ -134,12 +134,13 @@ mod udp_tracker_server { let request_data = match request.write(&mut cursor) { Ok(_) => { + #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner_request_buffer = cursor.get_ref(); // Return slice which contains written request data &inner_request_buffer[..position] } - Err(_) => panic!("could not write request to bytes."), + Err(e) => panic!("could not write request to bytes: {e}."), }; self.udp_client.send(request_data).await @@ -199,7 +200,7 @@ mod udp_tracker_server { async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_client(&udp_server.bind_address.unwrap()).await; @@ -216,7 +217,7 @@ mod udp_tracker_server { async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; @@ -248,7 +249,7 @@ mod udp_tracker_server { async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; @@ -282,7 +283,7 @@ mod udp_tracker_server { async fn should_return_a_scrape_response_when_the_client_sends_a_scrape_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; From 0d162a12880754412a989cda09ce91c03dceb6e5 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 28 Nov 2022 18:51:58 +0100 Subject: [PATCH 0251/1003] refactor: correct naming of structs and enums --- src/api/resources/auth_key_resource.rs | 20 +- src/api/resources/torrent_resource.rs | 7 +- src/api/server.rs | 6 +- src/config.rs | 34 +-- src/databases/database.rs | 2 +- src/databases/mysql.rs | 3 +- src/databases/sqlite.rs | 2 +- src/http/{errors.rs => error.rs} | 8 +- src/http/filters.rs | 37 +-- src/http/handlers.rs | 47 ++-- src/http/mod.rs | 2 +- src/http/request.rs | 7 +- src/http/response.rs | 6 +- src/protocol/common.rs | 318 ------------------------- src/protocol/crypto.rs | 30 +-- src/protocol/info_hash.rs | 190 +++++++++++++++ src/protocol/mod.rs | 1 + src/tracker/mod.rs | 10 +- src/tracker/mode.rs | 2 +- src/tracker/peer.rs | 144 ++++++++++- src/tracker/torrent.rs | 13 +- src/udp/connection_cookie.rs | 10 +- src/udp/{errors.rs => error.rs} | 18 +- src/udp/handlers.rs | 65 +++-- src/udp/mod.rs | 2 +- src/udp/request.rs | 8 +- tests/api.rs | 10 +- 27 files changed, 495 insertions(+), 507 deletions(-) rename src/http/{errors.rs => error.rs} (86%) create mode 100644 src/protocol/info_hash.rs rename src/udp/{errors.rs => error.rs} (57%) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 9b3cc9646..b575984db 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -6,13 +6,13 @@ use crate::protocol::clock::DurationSinceUnixEpoch; use crate::tracker::key::Auth; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct AuthKeyResource { +pub struct AuthKey { pub key: String, pub valid_until: Option, } -impl From for Auth { - fn from(auth_key_resource: AuthKeyResource) -> Self { +impl From for Auth { + fn from(auth_key_resource: AuthKey) -> Self { Auth { key: auth_key_resource.key, valid_until: auth_key_resource @@ -22,9 +22,9 @@ impl From for Auth { } } -impl From for AuthKeyResource { +impl From for AuthKey { fn from(auth_key: Auth) -> Self { - AuthKeyResource { + AuthKey { key: auth_key.key, valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), } @@ -35,7 +35,7 @@ impl From for AuthKeyResource { mod tests { use std::time::Duration; - use super::AuthKeyResource; + use super::AuthKey; use crate::protocol::clock::{Current, TimeNow}; use crate::tracker::key::Auth; @@ -43,7 +43,7 @@ mod tests { fn it_should_be_convertible_into_an_auth_key() { let duration_in_secs = 60; - let auth_key_resource = AuthKeyResource { + let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(duration_in_secs), }; @@ -67,8 +67,8 @@ mod tests { }; assert_eq!( - AuthKeyResource::from(auth_key), - AuthKeyResource { + AuthKey::from(auth_key), + AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(duration_in_secs) } @@ -78,7 +78,7 @@ mod tests { #[test] fn it_should_be_convertible_into_json() { assert_eq!( - serde_json::to_string(&AuthKeyResource { + serde_json::to_string(&AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(60) }) diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index eb9620d23..4063b95f5 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,7 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::protocol::common::PeerId; -use crate::tracker::peer::TorrentPeer; +use crate::tracker::peer::{self, TorrentPeer}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentResource { @@ -42,8 +41,8 @@ pub struct PeerIdResource { pub client: Option, } -impl From for PeerIdResource { - fn from(peer_id: PeerId) -> Self { +impl From for PeerIdResource { + fn from(peer_id: peer::Id) -> Self { PeerIdResource { id: peer_id.get_id(), client: peer_id.get_client_name().map(std::string::ToString::to_string), diff --git a/src/api/server.rs b/src/api/server.rs index fac25e297..61fd8ed3d 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,10 +7,10 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; -use super::resources::auth_key_resource::AuthKeyResource; +use super::resources::auth_key_resource::AuthKey; use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker; #[derive(Deserialize, Debug)] @@ -243,7 +243,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl w }) .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from(auth_key))), + Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), })), diff --git a/src/config.rs b/src/config.rs index 9f6ca7092..67177aca1 100644 --- a/src/config.rs +++ b/src/config.rs @@ -41,7 +41,7 @@ pub struct HttpApi { #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, - pub mode: mode::Tracker, + pub mode: mode::Mode, pub db_driver: Drivers, pub db_path: String, pub announce_interval: u32, @@ -59,7 +59,7 @@ pub struct Configuration { } #[derive(Debug)] -pub enum ConfigurationError { +pub enum Error { Message(String), ConfigError(ConfigError), IOError(std::io::Error), @@ -67,19 +67,19 @@ pub enum ConfigurationError { TrackerModeIncompatible, } -impl std::fmt::Display for ConfigurationError { +impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - ConfigurationError::Message(e) => e.fmt(f), - ConfigurationError::ConfigError(e) => e.fmt(f), - ConfigurationError::IOError(e) => e.fmt(f), - ConfigurationError::ParseError(e) => e.fmt(f), - ConfigurationError::TrackerModeIncompatible => write!(f, "{:?}", self), + Error::Message(e) => e.fmt(f), + Error::ConfigError(e) => e.fmt(f), + Error::IOError(e) => e.fmt(f), + Error::ParseError(e) => e.fmt(f), + Error::TrackerModeIncompatible => write!(f, "{:?}", self), } } } -impl std::error::Error for ConfigurationError {} +impl std::error::Error for Error {} impl Configuration { #[must_use] @@ -97,7 +97,7 @@ impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - mode: mode::Tracker::Public, + mode: mode::Mode::Public, db_driver: Drivers::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, @@ -137,7 +137,7 @@ impl Configuration { /// # Errors /// /// Will return `Err` if `path` does not exist or has a bad configuration. - pub fn load_from_file(path: &str) -> Result { + pub fn load_from_file(path: &str) -> Result { let config_builder = Config::builder(); #[allow(unused_assignments)] @@ -147,18 +147,18 @@ impl Configuration { config = config_builder .add_source(File::with_name(path)) .build() - .map_err(ConfigurationError::ConfigError)?; + .map_err(Error::ConfigError)?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); let config = Configuration::default(); config.save_to_file(path)?; - return Err(ConfigurationError::Message( + return Err(Error::Message( "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), )); } - let torrust_config: Configuration = config.try_deserialize().map_err(ConfigurationError::ConfigError)?; + let torrust_config: Configuration = config.try_deserialize().map_err(Error::ConfigError)?; Ok(torrust_config) } @@ -167,7 +167,7 @@ impl Configuration { /// /// Will return `Err` if `filename` does not exist or the user does not have /// permission to read it. - pub fn save_to_file(&self, path: &str) -> Result<(), ConfigurationError> { + pub fn save_to_file(&self, path: &str) -> Result<(), Error> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); Ok(()) @@ -176,7 +176,7 @@ impl Configuration { #[cfg(test)] mod tests { - use crate::config::{Configuration, ConfigurationError}; + use crate::config::{Configuration, Error}; #[cfg(test)] fn default_config_toml() -> String { @@ -294,7 +294,7 @@ mod tests { #[test] fn configuration_error_could_be_displayed() { - let error = ConfigurationError::TrackerModeIncompatible; + let error = Error::TrackerModeIncompatible; assert_eq!(format!("{}", error), "TrackerModeIncompatible"); } diff --git a/src/databases/database.rs b/src/databases/database.rs index 5e4a7c1f9..7055d2a09 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::databases::mysql::Mysql; use crate::databases::sqlite::Sqlite; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 4fd00e31e..0dafc3a60 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -10,7 +10,8 @@ use r2d2_mysql::MysqlConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; -use crate::protocol::common::{InfoHash, AUTH_KEY_LENGTH}; +use crate::protocol::common::AUTH_KEY_LENGTH; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; pub struct Mysql { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 159da9922..39dea8502 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -8,7 +8,7 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; pub struct Sqlite { diff --git a/src/http/errors.rs b/src/http/error.rs similarity index 86% rename from src/http/errors.rs rename to src/http/error.rs index fe0cf26e6..b6c08a8ba 100644 --- a/src/http/errors.rs +++ b/src/http/error.rs @@ -2,12 +2,12 @@ use thiserror::Error; use warp::reject::Reject; #[derive(Error, Debug)] -pub enum ServerError { +pub enum Error { #[error("internal server error")] - InternalServerError, + InternalServer, #[error("info_hash is either missing or invalid")] - InvalidInfoHash, + InvalidInfo, #[error("peer_id is either missing or invalid")] InvalidPeerId, @@ -31,4 +31,4 @@ pub enum ServerError { ExceededInfoHashLimit, } -impl Reject for ServerError {} +impl Reject for Error {} diff --git a/src/http/filters.rs b/src/http/filters.rs index 2c3ab626d..484ae2311 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -5,12 +5,13 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; -use super::errors::ServerError; -use super::request::{Announce, AnnounceRequestQuery, Scrape}; +use super::error::Error; +use super::request::{Announce, AnnounceQuery, Scrape}; use super::WebResult; -use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; -use crate::tracker; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; +use crate::tracker::{self, peer}; /// Pass Arc along #[must_use] @@ -28,7 +29,7 @@ pub fn with_info_hash() -> impl Filter,), Error = Rejec /// Check for `PeerId` #[must_use] -pub fn with_peer_id() -> impl Filter + Clone { +pub fn with_peer_id() -> impl Filter + Clone { warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) } @@ -54,7 +55,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { - warp::filters::query::query::() + warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) .and(with_peer_addr(on_reverse_proxy)) @@ -88,9 +89,9 @@ fn info_hashes(raw_query: &String) -> WebResult> { } if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(ServerError::ExceededInfoHashLimit)) + Err(reject::custom(Error::ExceededInfoHashLimit)) } else if info_hashes.is_empty() { - Err(reject::custom(ServerError::InvalidInfoHash)) + Err(reject::custom(Error::InvalidInfo)) } else { Ok(info_hashes) } @@ -98,11 +99,11 @@ fn info_hashes(raw_query: &String) -> WebResult> { /// Parse `PeerId` from raw query string #[allow(clippy::ptr_arg)] -fn peer_id(raw_query: &String) -> WebResult { +fn peer_id(raw_query: &String) -> WebResult { // put all query params in a vec let split_raw_query: Vec<&str> = raw_query.split('&').collect(); - let mut peer_id: Option = None; + let mut peer_id: Option = None; for v in split_raw_query { // look for the peer_id param @@ -115,32 +116,32 @@ fn peer_id(raw_query: &String) -> WebResult { // peer_id must be 20 bytes if peer_id_bytes.len() != 20 { - return Err(reject::custom(ServerError::InvalidPeerId)); + return Err(reject::custom(Error::InvalidPeerId)); } // clone peer_id_bytes into fixed length array let mut byte_arr: [u8; 20] = Default::default(); byte_arr.clone_from_slice(peer_id_bytes.as_slice()); - peer_id = Some(PeerId(byte_arr)); + peer_id = Some(peer::Id(byte_arr)); break; } } match peer_id { Some(id) => Ok(id), - None => Err(reject::custom(ServerError::InvalidPeerId)), + None => Err(reject::custom(Error::InvalidPeerId)), } } /// Get `PeerAddress` from `RemoteAddress` or Forwarded fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound)); } if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound)); } if on_reverse_proxy { @@ -152,7 +153,7 @@ fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, - peer_id: PeerId, + peer_id: peer::Id, peer_addr: IpAddr, ) -> WebResult { Ok(Announce { diff --git a/src/http/handlers.rs b/src/http/handlers.rs index d4ae76e65..ff5469168 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -7,11 +7,10 @@ use log::debug; use warp::http::Response; use warp::{reject, Rejection, Reply}; -use super::errors::ServerError; -use super::response::{self, Peer, ScrapeResponseEntry}; +use super::error::Error; +use super::response::{self, Peer, ScrapeEntry}; use super::{request, WebResult}; -use crate::http::response::Error; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; use crate::tracker::{self, peer, statistics, torrent}; @@ -20,18 +19,14 @@ use crate::tracker::{self, peer, statistics, torrent}; /// # Errors /// /// Will return `ServerError` that wraps the `Error` if unable to `authenticate_request`. -pub async fn authenticate( - info_hash: &InfoHash, - auth_key: &Option, - tracker: Arc, -) -> Result<(), ServerError> { +pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), Error> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { - torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, - torrent::Error::NoPeersFound => ServerError::NoPeersFound, - torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, - torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, + torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, + torrent::Error::NoPeersFound => Error::NoPeersFound, + torrent::Error::CouldNotSendResponse => Error::InternalServer, + torrent::Error::InvalidInfoHash => Error::InvalidInfo, }) } @@ -91,7 +86,7 @@ pub async fn handle_scrape( auth_key: Option, tracker: Arc, ) -> WebResult { - let mut files: HashMap = HashMap::new(); + let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; for info_hash in &scrape_request.info_hashes { @@ -99,20 +94,20 @@ pub async fn handle_scrape( Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { + ScrapeEntry { complete: seeders, downloaded: completed, incomplete: leechers, } } else { - ScrapeResponseEntry { + ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, } } } - None => ScrapeResponseEntry { + None => ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, @@ -165,7 +160,7 @@ fn send_announce_response( if let Some(1) = announce_request.compact { match res.write_compact() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)), + Err(_) => Err(reject::custom(Error::InternalServer)), } } else { Ok(Response::new(res.write().into())) @@ -173,12 +168,12 @@ fn send_announce_response( } /// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { +fn send_scrape_response(files: HashMap) -> WebResult { let res = response::Scrape { files }; match res.write() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)), + Err(_) => Err(reject::custom(Error::InternalServer)), } } @@ -188,15 +183,15 @@ fn send_scrape_response(files: HashMap) -> WebRes /// /// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. pub fn send_error(r: &Rejection) -> std::result::Result { - let body = if let Some(server_error) = r.find::() { + let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); - Error { + response::Error { failure_reason: server_error.to_string(), } .write() } else { - Error { - failure_reason: ServerError::InternalServerError.to_string(), + response::Error { + failure_reason: Error::InternalServer.to_string(), } .write() }; diff --git a/src/http/mod.rs b/src/http/mod.rs index 6e3ce7111..701dba407 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,4 +1,4 @@ -pub mod errors; +pub mod error; pub mod filters; pub mod handlers; pub mod request; diff --git a/src/http/request.rs b/src/http/request.rs index b812e1173..bc549b698 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -3,10 +3,11 @@ use std::net::IpAddr; use serde::Deserialize; use crate::http::Bytes; -use crate::protocol::common::{InfoHash, PeerId}; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::peer; #[derive(Deserialize)] -pub struct AnnounceRequestQuery { +pub struct AnnounceQuery { pub downloaded: Option, pub uploaded: Option, pub key: Option, @@ -22,7 +23,7 @@ pub struct Announce { pub peer_addr: IpAddr, pub downloaded: Bytes, pub uploaded: Bytes, - pub peer_id: PeerId, + pub peer_id: peer::Id, pub port: u16, pub left: Bytes, pub event: Option, diff --git a/src/http/response.rs b/src/http/response.rs index 98ea6fe73..962e72fac 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -5,7 +5,7 @@ use std::net::IpAddr; use serde; use serde::Serialize; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; #[derive(Serialize)] pub struct Peer { @@ -78,7 +78,7 @@ impl Announce { } #[derive(Serialize)] -pub struct ScrapeResponseEntry { +pub struct ScrapeEntry { pub complete: u32, pub downloaded: u32, pub incomplete: u32, @@ -86,7 +86,7 @@ pub struct ScrapeResponseEntry { #[derive(Serialize)] pub struct Scrape { - pub files: HashMap, + pub files: HashMap, } impl Scrape { diff --git a/src/protocol/common.rs b/src/protocol/common.rs index d6a98cf03..527ae9ebc 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -25,321 +25,3 @@ pub enum AnnounceEventDef { #[derive(Serialize, Deserialize)] #[serde(remote = "NumberOfBytes")] pub struct NumberOfBytesDef(pub i64); - -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct InfoHash(pub [u8; 20]); - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self([0u8; 20]); - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl Ord for InfoHash { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.cmp(&other.0) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - self.0.partial_cmp(&other.0) - } -} - -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash([0u8; 20]); - ret.0.clone_from_slice(data); - ret - } -} - -impl std::convert::From<[u8; 20]> for InfoHash { - fn from(val: [u8; 20]) -> Self { - InfoHash(val) - } -} - -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -#[cfg(test)] -mod tests { - - use std::str::FromStr; - - use serde::{Deserialize, Serialize}; - use serde_json::json; - - use super::InfoHash; - - #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] - struct ContainingInfoHash { - pub info_hash: InfoHash, - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); - assert!(info_hash.is_ok()); - } - - #[test] - fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { - let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); - assert!(info_hash.is_err()); - } - - #[test] - fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { - let info_hash = InfoHash::from_str(&"F".repeat(39)); - assert!(info_hash.is_err()); - - let info_hash = InfoHash::from_str(&"F".repeat(41)); - assert!(info_hash.is_err()); - } - - #[test] - fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - - let output = format!("{}", info_hash); - - assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { - let info_hash: InfoHash = [255u8; 20].as_slice().into(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { - let info_hash: InfoHash = [255u8; 20].into(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn an_info_hash_can_be_serialized() { - let s = ContainingInfoHash { - info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), - }; - - let json_serialized_value = serde_json::to_string(&s).unwrap(); - - assert_eq!( - json_serialized_value, - r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# - ); - } - - #[test] - fn an_info_hash_can_be_deserialized() { - let json = json!({ - "info_hash": "ffffffffffffffffffffffffffffffffffffffff", - }); - - let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); - - assert_eq!( - s, - ContainingInfoHash { - info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - } - ); - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a 40 character long string", - )); - } - - let mut res = InfoHash([0u8; 20]); - - if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a hexadecimal string", - )); - }; - Ok(res) - } -} - -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] -pub struct PeerId(pub [u8; 20]); - -impl std::fmt::Display for PeerId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut buffer = [0u8; 20]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - match bytes_out { - Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), - None => write!(f, ""), - } - } -} - -impl PeerId { - #[must_use] - pub fn get_id(&self) -> Option { - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - - std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) - } - - #[must_use] - pub fn get_client_name(&self) -> Option<&'static str> { - if self.0[0] == b'M' { - return Some("BitTorrent"); - } - if self.0[0] == b'-' { - let name = match &self.0[1..3] { - b"AG" | b"A~" => "Ares", - b"AR" => "Arctic", - b"AV" => "Avicora", - b"AX" => "BitPump", - b"AZ" => "Azureus", - b"BB" => "BitBuddy", - b"BC" => "BitComet", - b"BF" => "Bitflu", - b"BG" => "BTG (uses Rasterbar libtorrent)", - b"BR" => "BitRocket", - b"BS" => "BTSlave", - b"BX" => "~Bittorrent X", - b"CD" => "Enhanced CTorrent", - b"CT" => "CTorrent", - b"DE" => "DelugeTorrent", - b"DP" => "Propagate Data Client", - b"EB" => "EBit", - b"ES" => "electric sheep", - b"FT" => "FoxTorrent", - b"FW" => "FrostWire", - b"FX" => "Freebox BitTorrent", - b"GS" => "GSTorrent", - b"HL" => "Halite", - b"HN" => "Hydranode", - b"KG" => "KGet", - b"KT" => "KTorrent", - b"LH" => "LH-ABC", - b"LP" => "Lphant", - b"LT" => "libtorrent", - b"lt" => "libTorrent", - b"LW" => "LimeWire", - b"MO" => "MonoTorrent", - b"MP" => "MooPolice", - b"MR" => "Miro", - b"MT" => "MoonlightTorrent", - b"NX" => "Net Transport", - b"PD" => "Pando", - b"qB" => "qBittorrent", - b"QD" => "QQDownload", - b"QT" => "Qt 4 Torrent example", - b"RT" => "Retriever", - b"S~" => "Shareaza alpha/beta", - b"SB" => "~Swiftbit", - b"SS" => "SwarmScope", - b"ST" => "SymTorrent", - b"st" => "sharktorrent", - b"SZ" => "Shareaza", - b"TN" => "TorrentDotNET", - b"TR" => "Transmission", - b"TS" => "Torrentstorm", - b"TT" => "TuoTu", - b"UL" => "uLeecher!", - b"UT" => "µTorrent", - b"UW" => "µTorrent Web", - b"VG" => "Vagaa", - b"WD" => "WebTorrent Desktop", - b"WT" => "BitLet", - b"WW" => "WebTorrent", - b"WY" => "FireTorrent", - b"XL" => "Xunlei", - b"XT" => "XanTorrent", - b"XX" => "Xtorrent", - b"ZT" => "ZipTorrent", - _ => return None, - }; - Some(name) - } else { - None - } - } -} - -impl Serialize for PeerId { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - #[derive(Serialize)] - struct PeerIdInfo<'a> { - id: Option, - client: Option<&'a str>, - } - - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - let id = std::str::from_utf8(&tmp).ok(); - - let obj = PeerIdInfo { - id: self.get_id(), - client: self.get_client_name(), - }; - obj.serialize(serializer) - } -} diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs index 2d3f8f6fa..a335e2dba 100644 --- a/src/protocol/crypto.rs +++ b/src/protocol/crypto.rs @@ -1,18 +1,18 @@ pub mod keys { pub mod seeds { - use self::detail::DEFAULT_SEED; + use self::detail::CURRENT_SEED; use crate::ephemeral_instance_keys::{Seed, RANDOM_SEED}; - pub trait SeedKeeper { + pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; fn get_seed() -> &'static Self::Seed; } - pub struct InstanceSeed; - pub struct DefaultSeed; + pub struct Instance; + pub struct Current; - impl SeedKeeper for InstanceSeed { + impl Keeper for Instance { type Seed = Seed; fn get_seed() -> &'static Self::Seed { @@ -20,24 +20,24 @@ pub mod keys { } } - impl SeedKeeper for DefaultSeed { + impl Keeper for Current { type Seed = Seed; #[allow(clippy::needless_borrow)] fn get_seed() -> &'static Self::Seed { - &DEFAULT_SEED + &CURRENT_SEED } } #[cfg(test)] mod tests { use super::detail::ZEROED_TEST_SEED; - use super::{DefaultSeed, InstanceSeed, SeedKeeper}; + use super::{Current, Instance, Keeper}; use crate::ephemeral_instance_keys::Seed; pub struct ZeroedTestSeed; - impl SeedKeeper for ZeroedTestSeed { + impl Keeper for ZeroedTestSeed { type Seed = Seed; #[allow(clippy::needless_borrow)] @@ -48,12 +48,12 @@ pub mod keys { #[test] fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { - assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()); + assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); } #[test] fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { - assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()); + assert_ne!(Current::get_seed(), Instance::get_seed()); } } @@ -64,10 +64,10 @@ pub mod keys { pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; #[cfg(test)] - pub use ZEROED_TEST_SEED as DEFAULT_SEED; + pub use ZEROED_TEST_SEED as CURRENT_SEED; #[cfg(not(test))] - pub use crate::ephemeral_instance_keys::RANDOM_SEED as DEFAULT_SEED; + pub use crate::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; #[cfg(test)] mod tests { @@ -75,7 +75,7 @@ pub mod keys { use crate::ephemeral_instance_keys::RANDOM_SEED; use crate::protocol::crypto::keys::seeds::detail::ZEROED_TEST_SEED; - use crate::protocol::crypto::keys::seeds::DEFAULT_SEED; + use crate::protocol::crypto::keys::seeds::CURRENT_SEED; #[test] fn it_should_have_a_zero_test_seed() { @@ -84,7 +84,7 @@ pub mod keys { #[test] fn it_should_default_to_zeroed_seed_when_testing() { - assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED); + assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); } #[test] diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs new file mode 100644 index 000000000..3b9b2fa35 --- /dev/null +++ b/src/protocol/info_hash.rs @@ -0,0 +1,190 @@ +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +pub struct InfoHash(pub [u8; 20]); + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"expected a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"expected a hexadecimal string", + )); + }; + Ok(res) + } +} + +#[cfg(test)] +mod tests { + + use std::str::FromStr; + + use serde::{Deserialize, Serialize}; + use serde_json::json; + + use super::InfoHash; + + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] + struct ContainingInfoHash { + pub info_hash: InfoHash, + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + assert!(info_hash.is_ok()); + } + + #[test] + fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { + let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { + let info_hash = InfoHash::from_str(&"F".repeat(39)); + assert!(info_hash.is_err()); + + let info_hash = InfoHash::from_str(&"F".repeat(41)); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + let output = format!("{}", info_hash); + + assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { + let info_hash: InfoHash = [255u8; 20].as_slice().into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { + let info_hash: InfoHash = [255u8; 20].into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_serialized() { + let s = ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + }; + + let json_serialized_value = serde_json::to_string(&s).unwrap(); + + assert_eq!( + json_serialized_value, + r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# + ); + } + + #[test] + fn an_info_hash_can_be_deserialized() { + let json = json!({ + "info_hash": "ffffffffffffffffffffffffffffffffffffffff", + }); + + let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); + + assert_eq!( + s, + ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + } + ); + } +} diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index 85e4f90ad..bd4310dcf 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -1,4 +1,5 @@ pub mod clock; pub mod common; pub mod crypto; +pub mod info_hash; pub mod utils; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index fcd9ebe2d..508280b1a 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -16,12 +16,12 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; pub struct Tracker { pub config: Arc, - mode: mode::Tracker, + mode: mode::Mode, keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, @@ -54,15 +54,15 @@ impl Tracker { } pub fn is_public(&self) -> bool { - self.mode == mode::Tracker::Public + self.mode == mode::Mode::Public } pub fn is_private(&self) -> bool { - self.mode == mode::Tracker::Private || self.mode == mode::Tracker::PrivateListed + self.mode == mode::Mode::Private || self.mode == mode::Mode::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == mode::Tracker::Listed || self.mode == mode::Tracker::PrivateListed + self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } /// # Errors diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index f1fff169e..a0dba6e67 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -2,7 +2,7 @@ use serde; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] -pub enum Tracker { +pub enum Mode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] Public, diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index dd49ffaa7..16aada0ed 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -6,12 +6,12 @@ use serde::Serialize; use crate::http::request::Announce; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; -use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; +use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] pub struct TorrentPeer { - pub peer_id: PeerId, + pub peer_id: Id, pub peer_addr: SocketAddr, #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, @@ -35,7 +35,7 @@ impl TorrentPeer { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), + peer_id: Id(announce_request.peer_id.0), peer_addr, updated: Current::now(), uploaded: announce_request.bytes_uploaded, @@ -88,6 +88,133 @@ impl TorrentPeer { } } +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] +pub struct Id(pub [u8; 20]); + +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut buffer = [0u8; 20]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); + match bytes_out { + Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), + None => write!(f, ""), + } + } +} + +impl Id { + #[must_use] + pub fn get_id(&self) -> Option { + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + + std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) + } + + #[must_use] + pub fn get_client_name(&self) -> Option<&'static str> { + if self.0[0] == b'M' { + return Some("BitTorrent"); + } + if self.0[0] == b'-' { + let name = match &self.0[1..3] { + b"AG" | b"A~" => "Ares", + b"AR" => "Arctic", + b"AV" => "Avicora", + b"AX" => "BitPump", + b"AZ" => "Azureus", + b"BB" => "BitBuddy", + b"BC" => "BitComet", + b"BF" => "Bitflu", + b"BG" => "BTG (uses Rasterbar libtorrent)", + b"BR" => "BitRocket", + b"BS" => "BTSlave", + b"BX" => "~Bittorrent X", + b"CD" => "Enhanced CTorrent", + b"CT" => "CTorrent", + b"DE" => "DelugeTorrent", + b"DP" => "Propagate Data Client", + b"EB" => "EBit", + b"ES" => "electric sheep", + b"FT" => "FoxTorrent", + b"FW" => "FrostWire", + b"FX" => "Freebox BitTorrent", + b"GS" => "GSTorrent", + b"HL" => "Halite", + b"HN" => "Hydranode", + b"KG" => "KGet", + b"KT" => "KTorrent", + b"LH" => "LH-ABC", + b"LP" => "Lphant", + b"LT" => "libtorrent", + b"lt" => "libTorrent", + b"LW" => "LimeWire", + b"MO" => "MonoTorrent", + b"MP" => "MooPolice", + b"MR" => "Miro", + b"MT" => "MoonlightTorrent", + b"NX" => "Net Transport", + b"PD" => "Pando", + b"qB" => "qBittorrent", + b"QD" => "QQDownload", + b"QT" => "Qt 4 Torrent example", + b"RT" => "Retriever", + b"S~" => "Shareaza alpha/beta", + b"SB" => "~Swiftbit", + b"SS" => "SwarmScope", + b"ST" => "SymTorrent", + b"st" => "sharktorrent", + b"SZ" => "Shareaza", + b"TN" => "TorrentDotNET", + b"TR" => "Transmission", + b"TS" => "Torrentstorm", + b"TT" => "TuoTu", + b"UL" => "uLeecher!", + b"UT" => "µTorrent", + b"UW" => "µTorrent Web", + b"VG" => "Vagaa", + b"WD" => "WebTorrent Desktop", + b"WT" => "BitLet", + b"WW" => "WebTorrent", + b"WY" => "FireTorrent", + b"XL" => "Xunlei", + b"XT" => "XanTorrent", + b"XX" => "Xtorrent", + b"ZT" => "ZipTorrent", + _ => return None, + }; + Some(name) + } else { + None + } + } +} + +impl Serialize for Id { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + #[derive(Serialize)] + struct PeerIdInfo<'a> { + id: Option, + client: Option<&'a str>, + } + + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + let id = std::str::from_utf8(&tmp).ok(); + + let obj = PeerIdInfo { + id: self.get_id(), + client: self.get_client_name(), + }; + obj.serialize(serializer) + } +} + #[cfg(test)] mod test { mod torrent_peer { @@ -97,13 +224,12 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::protocol::clock::{Current, Time}; - use crate::protocol::common::PeerId; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer::{self, TorrentPeer}; #[test] fn it_should_be_serializable() { let torrent_peer = TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), uploaded: NumberOfBytes(0), @@ -284,8 +410,8 @@ mod test { use std::net::{IpAddr, Ipv4Addr}; use crate::http::request::Announce; - use crate::protocol::common::{InfoHash, PeerId}; - use crate::tracker::peer::TorrentPeer; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer::{self, TorrentPeer}; fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { Announce { @@ -293,7 +419,7 @@ mod test { peer_addr, downloaded: 0u64, uploaded: 0u64, - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: peer::Id(*b"-qB00000000000000000"), port, left: 0u64, event: None, diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 21bcfc513..3e38d2340 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -6,12 +6,12 @@ use serde::{Deserialize, Serialize}; use super::peer; use crate::protocol::clock::{Current, TimeNow}; -use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Entry { #[serde(skip)] - pub peers: std::collections::BTreeMap, + pub peers: std::collections::BTreeMap, pub completed: u32, } @@ -118,7 +118,6 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; - use crate::protocol::common::PeerId; use crate::tracker::peer; use crate::tracker::torrent::Entry; @@ -129,7 +128,7 @@ mod tests { impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { let default_peer = peer::TorrentPeer { - peer_id: PeerId([0u8; 20]), + peer_id: peer::Id([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: Current::now(), uploaded: NumberOfBytes(0), @@ -150,7 +149,7 @@ mod tests { self } - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { self.peer.peer_id = peer_id; self } @@ -278,9 +277,9 @@ mod tests { assert_eq!(peers.len(), 0); } - fn peer_id_from_i32(number: i32) -> PeerId { + fn peer_id_from_i32(number: i32) -> peer::Id { let peer_id = number.to_le_bytes(); - PeerId([ + peer::Id([ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], peer_id[3], ]) diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 8a544fa6a..3daa3e0f6 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; -use super::errors::ServerError; +use super::error::Error; use crate::protocol::clock::time_extent::{Extent, TimeExtent}; pub type Cookie = [u8; 8]; @@ -36,7 +36,7 @@ pub fn make(remote_address: &SocketAddr) -> Cookie { /// # Errors /// /// Will return a `ServerError::InvalidConnectionId` if the supplied `connection_cookie` fails to verify. -pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { +pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { // we loop backwards testing each time_extent until we find one that matches. // (or the lifetime of time_extents is exhausted) for offset in 0..=COOKIE_LIFETIME.amount { @@ -49,7 +49,7 @@ pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result< return Ok(checking_time_extent); } } - Err(ServerError::InvalidConnectionId) + Err(Error::InvalidConnectionId) } mod cookie_builder { @@ -59,7 +59,7 @@ mod cookie_builder { use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; - use crate::protocol::crypto::keys::seeds::{DefaultSeed, SeedKeeper}; + use crate::protocol::crypto::keys::seeds::{Current, Keeper}; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -70,7 +70,7 @@ mod cookie_builder { } pub(super) fn build(remote_address: &SocketAddr, time_extent: &TimeExtent) -> Cookie { - let seed = DefaultSeed::get_seed(); + let seed = Current::get_seed(); let mut hasher = DefaultHasher::new(); diff --git a/src/udp/errors.rs b/src/udp/error.rs similarity index 57% rename from src/udp/errors.rs rename to src/udp/error.rs index f90149a99..c5fbb3929 100644 --- a/src/udp/errors.rs +++ b/src/udp/error.rs @@ -3,9 +3,9 @@ use thiserror::Error; use crate::tracker::torrent; #[derive(Error, Debug)] -pub enum ServerError { +pub enum Error { #[error("internal server error")] - InternalServerError, + InternalServer, #[error("info_hash is either missing or invalid")] InvalidInfoHash, @@ -35,15 +35,15 @@ pub enum ServerError { BadRequest, } -impl From for ServerError { +impl From for Error { fn from(e: torrent::Error) -> Self { match e { - torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, - torrent::Error::NoPeersFound => ServerError::NoPeersFound, - torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, - torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, + torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, + torrent::Error::NoPeersFound => Error::NoPeersFound, + torrent::Error::CouldNotSendResponse => Error::InternalServer, + torrent::Error::InvalidInfoHash => Error::InvalidInfoHash, } } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 274af1e2c..da4bdbf35 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -7,13 +7,14 @@ use aquatic_udp_protocol::{ }; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; -use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; +use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, peer, statistics}; -use crate::udp::errors::ServerError; -use crate::udp::request::AnnounceRequestWrapper; +use crate::udp::error::Error; +use crate::udp::request::AnnounceWrapper; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { - match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { + match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| Error::InternalServer) { Ok(request) => { let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, @@ -27,7 +28,7 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A } } // bad request - Err(_) => handle_error(&ServerError::BadRequest, TransactionId(0)), + Err(_) => handle_error(&Error::BadRequest, TransactionId(0)), } } @@ -38,7 +39,7 @@ pub async fn handle_request( request: Request, remote_addr: SocketAddr, tracker: Arc, -) -> Result { +) -> Result { match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, @@ -53,7 +54,7 @@ pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, tracker: Arc, -) -> Result { +) -> Result { let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); @@ -82,10 +83,10 @@ pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: Arc, -) -> Result { +) -> Result { check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; - let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request); + let wrapped_announce_request = AnnounceWrapper::new(announce_request); tracker .authenticate_request(&wrapped_announce_request.info_hash, &None) @@ -173,7 +174,7 @@ pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc, -) -> Result { +) -> Result { let db = tracker.get_torrents().await; let mut torrent_stats: Vec = Vec::new(); @@ -228,7 +229,7 @@ pub async fn handle_scrape( })) } -fn handle_error(e: &ServerError, transaction_id: TransactionId) -> Response { +fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { let message = e.to_string(); Response::from(ErrorResponse { transaction_id, @@ -245,7 +246,6 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; - use crate::protocol::common::PeerId; use crate::tracker::{self, mode, peer, statistics}; fn default_tracker_config() -> Arc { @@ -253,21 +253,17 @@ mod tests { } fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Public).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Public).into()); initialized_tracker(&configuration) } fn initialized_private_tracker() -> Arc { - let configuration = Arc::new( - TrackerConfigurationBuilder::default() - .with_mode(mode::Tracker::Private) - .into(), - ); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Private).into()); initialized_tracker(&configuration) } fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Listed).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Listed).into()); initialized_tracker(&configuration) } @@ -299,7 +295,7 @@ mod tests { impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { let default_peer = peer::TorrentPeer { - peer_id: PeerId([255u8; 20]), + peer_id: peer::Id([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), uploaded: NumberOfBytes(0), @@ -310,7 +306,7 @@ mod tests { TorrentPeerBuilder { peer: default_peer } } - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { self.peer.peer_id = peer_id; self } @@ -347,7 +343,7 @@ mod tests { self } - pub fn with_mode(mut self, mode: mode::Tracker) -> Self { + pub fn with_mode(mut self, mode: mode::Mode) -> Self { self.configuration.mode = mode; self } @@ -537,8 +533,7 @@ mod tests { }; use mockall::predicate::eq; - use crate::protocol::common::PeerId; - use crate::tracker::{self, statistics}; + use crate::tracker::{self, peer, statistics}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -570,7 +565,7 @@ mod tests { let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); @@ -644,7 +639,7 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let peer_using_ipv6 = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -707,7 +702,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::protocol::common::PeerId; + use crate::tracker::peer; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -740,7 +735,7 @@ mod tests { tracker.config.external_ip.clone().unwrap().parse::().unwrap(); let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(external_ip_in_tracker_configuration), client_port)) .into(); @@ -761,8 +756,7 @@ mod tests { }; use mockall::predicate::eq; - use crate::protocol::common::PeerId; - use crate::tracker::{self, statistics}; + use crate::tracker::{self, peer, statistics}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -795,7 +789,7 @@ mod tests { let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -872,7 +866,7 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let peer_using_ipv4 = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); @@ -1003,8 +997,7 @@ mod tests { }; use super::TorrentPeerBuilder; - use crate::protocol::common::PeerId; - use crate::tracker; + use crate::tracker::{self, peer}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1046,10 +1039,10 @@ mod tests { } async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { - let peer_id = PeerId([255u8; 20]); + let peer_id = peer::Id([255u8; 20]); let peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(*remote_addr) .with_bytes_left(0) .into(); diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 2a8d42d9f..8b8c8c4f8 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -1,5 +1,5 @@ pub mod connection_cookie; -pub mod errors; +pub mod error; pub mod handlers; pub mod request; pub mod server; diff --git a/src/udp/request.rs b/src/udp/request.rs index 34139384b..c4326b291 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -1,6 +1,6 @@ use aquatic_udp_protocol::AnnounceRequest; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; // struct AnnounceRequest { // pub connection_id: i64, @@ -17,15 +17,15 @@ use crate::protocol::common::InfoHash; // pub port: Port // } -pub struct AnnounceRequestWrapper { +pub struct AnnounceWrapper { pub announce_request: AnnounceRequest, pub info_hash: InfoHash, } -impl AnnounceRequestWrapper { +impl AnnounceWrapper { #[must_use] pub fn new(announce_request: &AnnounceRequest) -> Self { - AnnounceRequestWrapper { + AnnounceWrapper { announce_request: announce_request.clone(), info_hash: InfoHash(announce_request.info_hash.0), } diff --git a/tests/api.rs b/tests/api.rs index 72c3c65c7..4f119e6d0 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,15 +16,15 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use tokio::task::JoinHandle; - use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; + use torrust_tracker::api::resources::auth_key_resource::AuthKey; use torrust_tracker::api::resources::stats_resource::StatsResource; use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; - use torrust_tracker::protocol::common::{InfoHash, PeerId}; + use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::key::Auth; - use torrust_tracker::tracker::peer::TorrentPeer; + use torrust_tracker::tracker::peer::{self, TorrentPeer}; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -189,7 +189,7 @@ mod tracker_api { fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { let torrent_peer = TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), @@ -310,7 +310,7 @@ mod tracker_api { Self { connection_info } } - pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKeyResource { + pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { let url = format!( "http://{}/api/key/{}?token={}", &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token From ec21df90b3f6bb949f150ad13dcafadba6a7d18d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 28 Nov 2022 19:38:50 +0100 Subject: [PATCH 0252/1003] rename: Key::Auth to auth::Key --- .../{auth_key_resource.rs => auth_key.rs} | 18 +++++----- src/api/resources/mod.rs | 3 +- src/api/server.rs | 2 +- src/databases/database.rs | 8 ++--- src/databases/mysql.rs | 16 ++++----- src/databases/sqlite.rs | 14 ++++---- src/http/filters.rs | 9 +++-- src/http/handlers.rs | 15 ++++---- src/tracker/{key.rs => auth.rs} | 34 +++++++++---------- src/tracker/mod.rs | 17 +++++----- tests/api.rs | 6 ++-- 11 files changed, 72 insertions(+), 70 deletions(-) rename src/api/resources/{auth_key_resource.rs => auth_key.rs} (88%) rename src/tracker/{key.rs => auth.rs} (83%) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key.rs similarity index 88% rename from src/api/resources/auth_key_resource.rs rename to src/api/resources/auth_key.rs index b575984db..d5c08f496 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key.rs @@ -3,7 +3,7 @@ use std::convert::From; use serde::{Deserialize, Serialize}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::key::Auth; +use crate::tracker::auth; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { @@ -11,9 +11,9 @@ pub struct AuthKey { pub valid_until: Option, } -impl From for Auth { +impl From for auth::Key { fn from(auth_key_resource: AuthKey) -> Self { - Auth { + auth::Key { key: auth_key_resource.key, valid_until: auth_key_resource .valid_until @@ -22,8 +22,8 @@ impl From for Auth { } } -impl From for AuthKey { - fn from(auth_key: Auth) -> Self { +impl From for AuthKey { + fn from(auth_key: auth::Key) -> Self { AuthKey { key: auth_key.key, valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), @@ -37,7 +37,7 @@ mod tests { use super::AuthKey; use crate::protocol::clock::{Current, TimeNow}; - use crate::tracker::key::Auth; + use crate::tracker::auth; #[test] fn it_should_be_convertible_into_an_auth_key() { @@ -49,8 +49,8 @@ mod tests { }; assert_eq!( - Auth::from(auth_key_resource), - Auth { + auth::Key::from(auth_key_resource), + auth::Key { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } @@ -61,7 +61,7 @@ mod tests { fn it_should_be_convertible_from_an_auth_key() { let duration_in_secs = 60; - let auth_key = Auth { + let auth_key = auth::Key { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index 2b3e4b886..f708fc2e4 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -6,6 +6,7 @@ //! - [ ] `TorrentResource`, `TorrentListItemResource`, `TorrentPeerResource`, `PeerIdResource` //! - [ ] `StatsResource` //! - [ ] ... -pub mod auth_key_resource; + +pub mod auth_key; pub mod stats_resource; pub mod torrent_resource; diff --git a/src/api/server.rs b/src/api/server.rs index 61fd8ed3d..af2d66458 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,7 +7,7 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; -use super::resources::auth_key_resource::AuthKey; +use super::resources::auth_key::AuthKey; use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use crate::protocol::info_hash::InfoHash; diff --git a/src/databases/database.rs b/src/databases/database.rs index 7055d2a09..a4dae57ee 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::databases::mysql::Mysql; use crate::databases::sqlite::Sqlite; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; +use crate::tracker::auth; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum Drivers { @@ -42,7 +42,7 @@ pub trait Database: Sync + Send { async fn load_persistent_torrents(&self) -> Result, Error>; - async fn load_keys(&self) -> Result, Error>; + async fn load_keys(&self) -> Result, Error>; async fn load_whitelist(&self) -> Result, Error>; @@ -54,9 +54,9 @@ pub trait Database: Sync + Send { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - async fn get_key_from_keys(&self, key: &str) -> Result; + async fn get_key_from_keys(&self, key: &str) -> Result; - async fn add_key_to_keys(&self, auth_key: &Auth) -> Result; + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; async fn remove_key_from_keys(&self, key: &str) -> Result; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 0dafc3a60..0d79315c6 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -12,7 +12,7 @@ use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; +use crate::tracker::auth; pub struct Mysql { pool: Pool, @@ -61,7 +61,7 @@ impl Database for Mysql { PRIMARY KEY (`id`), UNIQUE (`key`) );", - i8::try_from(AUTH_KEY_LENGTH).expect("Auth Key Length Should fit within a i8!") + i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") ); let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; @@ -91,13 +91,13 @@ impl Database for Mysql { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let keys: Vec = conn + let keys: Vec = conn .query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| Auth { + |(key, valid_until): (String, i64)| auth::Key { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, @@ -183,14 +183,14 @@ impl Database for Mysql { } } - async fn get_key_from_keys(&self, key: &str) -> Result { + async fn get_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some((key, valid_until)) => Ok(Auth { + Some((key, valid_until)) => Ok(auth::Key { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), @@ -198,7 +198,7 @@ impl Database for Mysql { } } - async fn add_key_to_keys(&self, auth_key: &Auth) -> Result { + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let key = auth_key.key.to_string(); diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 39dea8502..c42e9382d 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -9,7 +9,7 @@ use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; +use crate::tracker::auth; pub struct Sqlite { pool: Pool, @@ -78,7 +78,7 @@ impl Database for Sqlite { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -87,13 +87,13 @@ impl Database for Sqlite { let key = row.get(0)?; let valid_until: i64 = row.get(1)?; - Ok(Auth { + Ok(auth::Key { key, valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -186,7 +186,7 @@ impl Database for Sqlite { } } - async fn get_key_from_keys(&self, key: &str) -> Result { + async fn get_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -196,7 +196,7 @@ impl Database for Sqlite { let key: String = row.get(0).unwrap(); let valid_until: i64 = row.get(1).unwrap(); - Ok(Auth { + Ok(auth::Key { key, valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) @@ -205,7 +205,7 @@ impl Database for Sqlite { } } - async fn add_key_to_keys(&self, auth_key: &Auth) -> Result { + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute( diff --git a/src/http/filters.rs b/src/http/filters.rs index 484ae2311..e9432e191 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -10,8 +10,7 @@ use super::request::{Announce, AnnounceQuery, Scrape}; use super::WebResult; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; -use crate::tracker::{self, peer}; +use crate::tracker::{self, auth, peer}; /// Pass Arc along #[must_use] @@ -35,10 +34,10 @@ pub fn with_peer_id() -> impl Filter + /// Pass Arc along #[must_use] -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| Auth::from_string(&key)) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .map(|key: String| auth::Key::from_string(&key)) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for `PeerAddress` diff --git a/src/http/handlers.rs b/src/http/handlers.rs index ff5469168..8d8816885 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -11,15 +11,18 @@ use super::error::Error; use super::response::{self, Peer, ScrapeEntry}; use super::{request, WebResult}; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; -use crate::tracker::{self, peer, statistics, torrent}; +use crate::tracker::{self, auth, peer, statistics, torrent}; -/// Authenticate `InfoHash` using optional `AuthKey` +/// Authenticate `InfoHash` using optional `auth::Key` /// /// # Errors /// /// Will return `ServerError` that wraps the `Error` if unable to `authenticate_request`. -pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), Error> { +pub async fn authenticate( + info_hash: &InfoHash, + auth_key: &Option, + tracker: Arc, +) -> Result<(), Error> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, @@ -37,7 +40,7 @@ pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) @@ -83,7 +86,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); diff --git a/src/tracker/key.rs b/src/tracker/auth.rs similarity index 83% rename from src/tracker/key.rs rename to src/tracker/auth.rs index 673780ad0..7ac6d6939 100644 --- a/src/tracker/key.rs +++ b/src/tracker/auth.rs @@ -13,7 +13,7 @@ use crate::protocol::common::AUTH_KEY_LENGTH; /// # Panics /// /// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. -pub fn generate(lifetime: Duration) -> Auth { +pub fn generate(lifetime: Duration) -> Key { let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) @@ -22,7 +22,7 @@ pub fn generate(lifetime: Duration) -> Auth { debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); - Auth { + Key { key, valid_until: Some(Current::add(&lifetime).unwrap()), } @@ -33,7 +33,7 @@ pub fn generate(lifetime: Duration) -> Auth { /// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. /// /// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. -pub fn verify(auth_key: &Auth) -> Result<(), Error> { +pub fn verify(auth_key: &Key) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); match auth_key.valid_until { @@ -49,25 +49,25 @@ pub fn verify(auth_key: &Auth) -> Result<(), Error> { } #[derive(Serialize, Debug, Eq, PartialEq, Clone)] -pub struct Auth { +pub struct Key { pub key: String, pub valid_until: Option, } -impl Auth { +impl Key { #[must_use] - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { + pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(Auth { key, valid_until: None }) + Some(Key { key, valid_until: None }) } else { None } } #[must_use] - pub fn from_string(key: &str) -> Option { + pub fn from_string(key: &str) -> Option { if key.len() == AUTH_KEY_LENGTH { - Some(Auth { + Some(Key { key: key.to_string(), valid_until: None, }) @@ -100,11 +100,11 @@ mod tests { use std::time::Duration; use crate::protocol::clock::{Current, StoppedTime}; - use crate::tracker::key; + use crate::tracker::auth; #[test] fn auth_key_from_buffer() { - let auth_key = key::Auth::from_buffer([ + let auth_key = auth::Key::from_buffer([ 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, 49, 52, 110, 114, 74, ]); @@ -116,7 +116,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key::Auth::from_string(key_string); + let auth_key = auth::Key::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, key_string); @@ -124,9 +124,9 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key::generate(Duration::new(9999, 0)); + let auth_key = auth::generate(Duration::new(9999, 0)); - assert!(key::verify(&auth_key).is_ok()); + assert!(auth::verify(&auth_key).is_ok()); } #[test] @@ -135,16 +135,16 @@ mod tests { Current::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. - let auth_key = key::generate(Duration::from_secs(19)); + let auth_key = auth::generate(Duration::from_secs(19)); // Mock the time has passed 10 sec. Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify(&auth_key).is_ok()); + assert!(auth::verify(&auth_key).is_ok()); // Mock the time has passed another 10 sec. Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify(&auth_key).is_err()); + assert!(auth::verify(&auth_key).is_err()); } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 508280b1a..806efee54 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,4 +1,4 @@ -pub mod key; +pub mod auth; pub mod mode; pub mod peer; pub mod statistics; @@ -17,12 +17,11 @@ use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; pub struct Tracker { pub config: Arc, mode: mode::Mode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -68,8 +67,8 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = key::generate(lifetime); + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) @@ -87,10 +86,10 @@ impl Tracker { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, auth_key: &Auth) -> Result<(), key::Error> { + pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { match self.keys.read().await.get(&auth_key.key) { - None => Err(key::Error::KeyInvalid), - Some(key) => key::verify(key), + None => Err(auth::Error::KeyInvalid), + Some(key) => auth::verify(key), } } @@ -174,7 +173,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); diff --git a/tests/api.rs b/tests/api.rs index 4f119e6d0..22a222698 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,14 +16,14 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use tokio::task::JoinHandle; - use torrust_tracker::api::resources::auth_key_resource::AuthKey; + use torrust_tracker::api::resources::auth_key::AuthKey; use torrust_tracker::api::resources::stats_resource::StatsResource; use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::key::Auth; + use torrust_tracker::tracker::auth; use torrust_tracker::tracker::peer::{self, TorrentPeer}; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -44,7 +44,7 @@ mod tracker_api { assert!(api_server .tracker .unwrap() - .verify_auth_key(&Auth::from(auth_key)) + .verify_auth_key(&auth::Key::from(auth_key)) .await .is_ok()); } From 36452717a0b21e850adcc7d00282500359cc4a26 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 28 Nov 2022 19:57:03 +0100 Subject: [PATCH 0253/1003] refactor: rename inside databases --- src/config.rs | 6 +-- src/databases/database.rs | 94 --------------------------------------- src/databases/driver.rs | 7 +++ src/databases/error.rs | 21 +++++++++ src/databases/mod.rs | 72 +++++++++++++++++++++++++++++- src/databases/mysql.rs | 67 ++++++++++++++-------------- src/databases/sqlite.rs | 71 +++++++++++++++-------------- src/tracker/mod.rs | 21 +++++---- 8 files changed, 180 insertions(+), 179 deletions(-) delete mode 100644 src/databases/database.rs create mode 100644 src/databases/driver.rs create mode 100644 src/databases/error.rs diff --git a/src/config.rs b/src/config.rs index 67177aca1..a7e7e9df6 100644 --- a/src/config.rs +++ b/src/config.rs @@ -9,7 +9,7 @@ use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; -use crate::databases::database::Drivers; +use crate::databases::driver::Driver; use crate::tracker::mode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] @@ -42,7 +42,7 @@ pub struct HttpApi { pub struct Configuration { pub log_level: Option, pub mode: mode::Mode, - pub db_driver: Drivers, + pub db_driver: Driver, pub db_path: String, pub announce_interval: u32, pub min_announce_interval: u32, @@ -98,7 +98,7 @@ impl Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: mode::Mode::Public, - db_driver: Drivers::Sqlite3, + db_driver: Driver::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, min_announce_interval: 120, diff --git a/src/databases/database.rs b/src/databases/database.rs deleted file mode 100644 index a4dae57ee..000000000 --- a/src/databases/database.rs +++ /dev/null @@ -1,94 +0,0 @@ -use async_trait::async_trait; -use derive_more::{Display, Error}; -use serde::{Deserialize, Serialize}; - -use crate::databases::mysql::Mysql; -use crate::databases::sqlite::Sqlite; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth; - -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub enum Drivers { - Sqlite3, - MySQL, -} - -/// # Errors -/// -/// Will return `r2d2::Error` if `db_path` is not able to create a database. -pub fn connect(db_driver: &Drivers, db_path: &str) -> Result, r2d2::Error> { - let database: Box = match db_driver { - Drivers::Sqlite3 => { - let db = Sqlite::new(db_path)?; - Box::new(db) - } - Drivers::MySQL => { - let db = Mysql::new(db_path)?; - Box::new(db) - } - }; - - database.create_database_tables().expect("Could not create database tables."); - - Ok(database) -} - -#[async_trait] -pub trait Database: Sync + Send { - /// # Errors - /// - /// Will return `Error` if unable to create own tables. - fn create_database_tables(&self) -> Result<(), Error>; - - async fn load_persistent_torrents(&self) -> Result, Error>; - - async fn load_keys(&self) -> Result, Error>; - - async fn load_whitelist(&self) -> Result, Error>; - - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; - - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; - - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; - - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - - async fn get_key_from_keys(&self, key: &str) -> Result; - - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; - - async fn remove_key_from_keys(&self, key: &str) -> Result; - - async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) - .await - .map_or_else( - |e| match e { - Error::QueryReturnedNoRows => Ok(false), - e => Err(e), - }, - |_| Ok(true), - ) - } -} - -#[derive(Debug, Display, PartialEq, Eq, Error)] -#[allow(dead_code)] -pub enum Error { - #[display(fmt = "Query returned no rows.")] - QueryReturnedNoRows, - #[display(fmt = "Invalid query.")] - InvalidQuery, - #[display(fmt = "Database error.")] - DatabaseError, -} - -impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - match e { - r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, - _ => Error::InvalidQuery, - } - } -} diff --git a/src/databases/driver.rs b/src/databases/driver.rs new file mode 100644 index 000000000..7eaa9064e --- /dev/null +++ b/src/databases/driver.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +pub enum Driver { + Sqlite3, + MySQL, +} diff --git a/src/databases/error.rs b/src/databases/error.rs new file mode 100644 index 000000000..467db407f --- /dev/null +++ b/src/databases/error.rs @@ -0,0 +1,21 @@ +use derive_more::{Display, Error}; + +#[derive(Debug, Display, PartialEq, Eq, Error)] +#[allow(dead_code)] +pub enum Error { + #[display(fmt = "Query returned no rows.")] + QueryReturnedNoRows, + #[display(fmt = "Invalid query.")] + InvalidQuery, + #[display(fmt = "Database error.")] + DatabaseError, +} + +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + match e { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, + _ => Error::InvalidQuery, + } + } +} diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 169d99f4d..c1d265b56 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -1,3 +1,73 @@ -pub mod database; +pub mod driver; +pub mod error; pub mod mysql; pub mod sqlite; + +use async_trait::async_trait; + +use self::driver::Driver; +use self::error::Error; +use crate::databases::mysql::Mysql; +use crate::databases::sqlite::Sqlite; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth; + +/// # Errors +/// +/// Will return `r2d2::Error` if `db_path` is not able to create a database. +pub fn connect(db_driver: &Driver, db_path: &str) -> Result, r2d2::Error> { + let database: Box = match db_driver { + Driver::Sqlite3 => { + let db = Sqlite::new(db_path)?; + Box::new(db) + } + Driver::MySQL => { + let db = Mysql::new(db_path)?; + Box::new(db) + } + }; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) +} + +#[async_trait] +pub trait Database: Sync + Send { + /// # Errors + /// + /// Will return `Error` if unable to create own tables. + fn create_database_tables(&self) -> Result<(), Error>; + + async fn load_persistent_torrents(&self) -> Result, Error>; + + async fn load_keys(&self) -> Result, Error>; + + async fn load_whitelist(&self) -> Result, Error>; + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + + async fn get_key_from_keys(&self, key: &str) -> Result; + + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; + + async fn remove_key_from_keys(&self, key: &str) -> Result; + + async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { + self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) + .await + .map_or_else( + |e| match e { + Error::QueryReturnedNoRows => Ok(false), + e => Err(e), + }, + |_| Ok(true), + ) + } +} diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 0d79315c6..8322b2273 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -8,8 +8,7 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; -use crate::databases::database; -use crate::databases::database::{Database, Error}; +use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; @@ -36,7 +35,7 @@ impl Mysql { #[async_trait] impl Database for Mysql { - fn create_database_tables(&self) -> Result<(), database::Error> { + fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, @@ -64,7 +63,7 @@ impl Database for Mysql { i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") ); - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; conn.query_drop(&create_torrents_table) .expect("Could not create torrents table."); @@ -75,8 +74,8 @@ impl Database for Mysql { Ok(()) } - async fn load_persistent_torrents(&self) -> Result, database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn load_persistent_torrents(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let torrents: Vec<(InfoHash, u32)> = conn .query_map( @@ -86,13 +85,13 @@ impl Database for Mysql { (info_hash, completed) }, ) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(torrents) } async fn load_keys(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let keys: Vec = conn .query_map( @@ -102,25 +101,25 @@ impl Database for Mysql { valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, ) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(keys) } async fn load_whitelist(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hashes: Vec = conn .query_map("SELECT info_hash FROM whitelist", |info_hash: String| { InfoHash::from_str(&info_hash).unwrap() }) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(info_hashes) } - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -132,28 +131,28 @@ impl Database for Mysql { } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn .exec_first::( "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }, ) - .map_err(|_| database::Error::DatabaseError)? + .map_err(|_| Error::DatabaseError)? { Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), - None => Err(database::Error::QueryReturnedNoRows), + None => Err(Error::QueryReturnedNoRows), } } - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -164,13 +163,13 @@ impl Database for Mysql { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash = info_hash.to_string(); @@ -178,28 +177,28 @@ impl Database for Mysql { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) - .map_err(|_| database::Error::QueryReturnedNoRows)? + .map_err(|_| Error::QueryReturnedNoRows)? { Some((key, valid_until)) => Ok(auth::Key { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), - None => Err(database::Error::InvalidQuery), + None => Err(Error::InvalidQuery), } } - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); @@ -211,19 +210,19 @@ impl Database for Mysql { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_key_from_keys(&self, key: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index c42e9382d..c5401aacf 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -5,8 +5,7 @@ use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use crate::databases::database; -use crate::databases::database::{Database, Error}; +use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; @@ -28,7 +27,7 @@ impl Sqlite { #[async_trait] impl Database for Sqlite { - fn create_database_tables(&self) -> Result<(), database::Error> { + fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -52,17 +51,17 @@ impl Database for Sqlite { );" .to_string(); - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; conn.execute(&create_whitelist_table, []) .and_then(|_| conn.execute(&create_keys_table, [])) .and_then(|_| conn.execute(&create_torrents_table, [])) - .map_err(|_| database::Error::InvalidQuery) + .map_err(|_| Error::InvalidQuery) .map(|_| ()) } - async fn load_persistent_torrents(&self) -> Result, database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn load_persistent_torrents(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -79,7 +78,7 @@ impl Database for Sqlite { } async fn load_keys(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -99,7 +98,7 @@ impl Database for Sqlite { } async fn load_whitelist(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; @@ -114,8 +113,8 @@ impl Database for Sqlite { Ok(info_hashes) } - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute( "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", @@ -125,17 +124,17 @@ impl Database for Sqlite { if updated > 0 { return Ok(()); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; let mut rows = stmt.query([info_hash])?; @@ -143,51 +142,51 @@ impl Database for Sqlite { match rows.next() { Ok(row) => match row { Some(row) => Ok(InfoHash::from_str(&row.get_unwrap::<_, String>(0)).unwrap()), - None => Err(database::Error::QueryReturnedNoRows), + None => Err(Error::QueryReturnedNoRows), }, Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; let mut rows = stmt.query([key.to_string()])?; @@ -201,12 +200,12 @@ impl Database for Sqlite { valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) } else { - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } } - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", @@ -216,28 +215,28 @@ impl Database for Sqlite { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_key_from_keys(&self, key: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("DELETE FROM keys WHERE key = ?", [key]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 806efee54..bd2da93f0 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -14,8 +14,7 @@ use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::config::Configuration; -use crate::databases::database; -use crate::databases::database::Database; +use crate::databases::{self, Database}; use crate::protocol::info_hash::InfoHash; pub struct Tracker { @@ -38,7 +37,7 @@ impl Tracker { stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = database::connect(&config.db_driver, &config.db_path)?; + let database = databases::connect(&config.db_driver, &config.db_path)?; Ok(Tracker { config: config.clone(), @@ -67,7 +66,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); @@ -77,7 +76,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. - pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { + pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(key); Ok(()) @@ -96,7 +95,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to `load_keys` from the database. - pub async fn load_keys(&self) -> Result<(), database::Error> { + pub async fn load_keys(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; @@ -114,14 +113,14 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.add_torrent_to_database_whitelist(info_hash).await?; self.add_torrent_to_memory_whitelist(info_hash).await; Ok(()) } /// It adds a torrent to the whitelist if it has not been whitelisted previously - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { if self.database.is_info_hash_whitelisted(info_hash).await.unwrap() { return Ok(()); } @@ -140,7 +139,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.database.remove_info_hash_from_whitelist(*info_hash).await?; self.whitelist.write().await.remove(info_hash); Ok(()) @@ -153,7 +152,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub async fn load_whitelist(&self) -> Result<(), database::Error> { + pub async fn load_whitelist(&self) -> Result<(), databases::error::Error> { let whitelisted_torrents_from_database = self.database.load_whitelist().await?; let mut whitelist = self.whitelist.write().await; @@ -206,7 +205,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { + pub async fn load_persistent_torrents(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; let mut torrents = self.torrents.write().await; From 32eb44b318ddc57b574bae940ee384984bf7a24e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 28 Nov 2022 20:08:37 +0100 Subject: [PATCH 0254/1003] refactor: rename inside http --- src/http/filters.rs | 21 ++++++++++----------- src/http/handlers.rs | 17 ++++++++--------- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index e9432e191..0fe369eba 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -6,8 +6,7 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; -use super::request::{Announce, AnnounceQuery, Scrape}; -use super::WebResult; +use super::{request, WebResult}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer}; @@ -51,10 +50,10 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { - warp::filters::query::query::() +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { + warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) .and(with_peer_addr(on_reverse_proxy)) @@ -63,7 +62,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) @@ -162,12 +161,12 @@ fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, peer_id: peer::Id, peer_addr: IpAddr, -) -> WebResult { - Ok(Announce { +) -> WebResult { + Ok(request::Announce { info_hash: info_hashes[0], peer_addr, downloaded: announce_request_query.downloaded.unwrap_or(0), @@ -182,6 +181,6 @@ fn announce_request( /// Parse `ScrapeRequest` from `InfoHash` #[allow(clippy::unnecessary_wraps)] -fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(Scrape { info_hashes, peer_addr }) +fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { + Ok(request::Scrape { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 8d8816885..0e230e785 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -8,8 +8,7 @@ use warp::http::Response; use warp::{reject, Rejection, Reply}; use super::error::Error; -use super::response::{self, Peer, ScrapeEntry}; -use super::{request, WebResult}; +use super::{request, response, WebResult}; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer, statistics, torrent}; @@ -89,7 +88,7 @@ pub async fn handle_scrape( auth_key: Option, tracker: Arc, ) -> WebResult { - let mut files: HashMap = HashMap::new(); + let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; for info_hash in &scrape_request.info_hashes { @@ -97,20 +96,20 @@ pub async fn handle_scrape( Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeEntry { + response::ScrapeEntry { complete: seeders, downloaded: completed, incomplete: leechers, } } else { - ScrapeEntry { + response::ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, } } } - None => ScrapeEntry { + None => response::ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, @@ -142,9 +141,9 @@ fn send_announce_response( interval: u32, interval_min: u32, ) -> WebResult { - let http_peers: Vec = peers + let http_peers: Vec = peers .iter() - .map(|peer| Peer { + .map(|peer| response::Peer { peer_id: peer.peer_id.to_string(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port(), @@ -171,7 +170,7 @@ fn send_announce_response( } /// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { +fn send_scrape_response(files: HashMap) -> WebResult { let res = response::Scrape { files }; match res.write() { From 49a6acbcb922d24016a4d33a91bf4e89f0c09cf8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 17:21:10 +0100 Subject: [PATCH 0255/1003] ci: clippy warning as errors --- .github/workflows/test_build_release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 4acf14277..3924eea4b 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -45,7 +45,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --all-targets + args: --all-targets -- -D clippy::pedantic - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests From 01e71bfe5c0a0c7cd0e54af8096cd6adf8d67efe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 30 Nov 2022 15:31:52 +0100 Subject: [PATCH 0256/1003] clippy: fix src/tracker/peer.rs --- src/api/resources/torrent_resource.rs | 6 +-- src/http/handlers.rs | 5 +-- src/tracker/mod.rs | 6 +-- src/tracker/peer.rs | 54 +++++++++++++-------------- src/tracker/torrent.rs | 16 ++++---- src/udp/handlers.rs | 8 ++-- tests/api.rs | 6 +-- 7 files changed, 49 insertions(+), 52 deletions(-) diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 4063b95f5..bc1a9acf5 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::tracker::peer::{self, TorrentPeer}; +use crate::tracker::peer::{self, Peer}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentResource { @@ -50,9 +50,9 @@ impl From for PeerIdResource { } } -impl From for TorrentPeerResource { +impl From for TorrentPeerResource { #[allow(deprecated)] - fn from(peer: TorrentPeer) -> Self { + fn from(peer: Peer) -> Self { TorrentPeerResource { peer_id: PeerIdResource::from(peer.peer_id), peer_addr: peer.peer_addr.to_string(), diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 0e230e785..2fc878354 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -48,8 +48,7 @@ pub async fn handle_announce( debug!("{:?}", announce_request); - let peer = - peer::TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + let peer = peer::Peer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) .await; @@ -137,7 +136,7 @@ pub async fn handle_scrape( fn send_announce_response( announce_request: &request::Announce, torrent_stats: &torrent::Stats, - peers: &Vec, + peers: &Vec, interval: u32, interval_min: u32, ) -> WebResult { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index bd2da93f0..4b2dabebb 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -227,7 +227,7 @@ impl Tracker { } /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -237,7 +237,7 @@ impl Tracker { } /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -246,7 +246,7 @@ impl Tracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::TorrentPeer) -> torrent::Stats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::Stats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 16aada0ed..2da257d3e 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -10,7 +10,7 @@ use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] -pub struct TorrentPeer { +pub struct Peer { pub peer_id: Id, pub peer_addr: SocketAddr, #[serde(serialize_with = "ser_unix_time_value")] @@ -25,16 +25,16 @@ pub struct TorrentPeer { pub event: AnnounceEvent, } -impl TorrentPeer { +impl Peer { #[must_use] pub fn from_udp_announce_request( announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option, ) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); + let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - TorrentPeer { + Peer { peer_id: Id(announce_request.peer_id.0), peer_addr, updated: Current::now(), @@ -47,7 +47,7 @@ impl TorrentPeer { #[must_use] pub fn from_http_announce_request(announce_request: &Announce, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); + let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { @@ -61,8 +61,8 @@ impl TorrentPeer { }; #[allow(clippy::cast_possible_truncation)] - TorrentPeer { - peer_id: announce_request.peer_id.clone(), + Peer { + peer_id: announce_request.peer_id, peer_addr, updated: Current::now(), uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), @@ -104,6 +104,9 @@ impl std::fmt::Display for Id { impl Id { #[must_use] + /// # Panics + /// + /// It will panic if the `binascii::bin2hex` from a too-small output buffer. pub fn get_id(&self) -> Option { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; @@ -202,11 +205,6 @@ impl Serialize for Id { client: Option<&'a str>, } - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - let id = std::str::from_utf8(&tmp).ok(); - let obj = PeerIdInfo { id: self.get_id(), client: self.get_client_name(), @@ -224,11 +222,11 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::protocol::clock::{Current, Time}; - use crate::tracker::peer::{self, TorrentPeer}; + use crate::tracker::peer::{self, Peer}; #[test] fn it_should_be_serializable() { - let torrent_peer = TorrentPeer { + let torrent_peer = Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), @@ -256,7 +254,7 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer::Peer; use crate::udp::connection_cookie::{into_connection_id, make}; // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. @@ -308,7 +306,7 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -318,7 +316,7 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -329,14 +327,14 @@ mod test { use std::str::FromStr; use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer::Peer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -347,7 +345,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -358,7 +356,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -370,14 +368,14 @@ mod test { use std::str::FromStr; use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer::Peer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -388,7 +386,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -399,7 +397,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -411,7 +409,7 @@ mod test { use crate::http::request::Announce; use crate::protocol::info_hash::InfoHash; - use crate::tracker::peer::{self, TorrentPeer}; + use crate::tracker::peer::{self, Peer}; fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { Announce { @@ -434,7 +432,7 @@ mod test { let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); - let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); @@ -449,7 +447,7 @@ mod test { let announce_request = sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); - let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); assert_ne!(torrent_peer.peer_addr.port(), remote_port); diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 3e38d2340..8058ab891 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -11,7 +11,7 @@ use crate::protocol::common::MAX_SCRAPE_TORRENTS; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Entry { #[serde(skip)] - pub peers: std::collections::BTreeMap, + pub peers: std::collections::BTreeMap, pub completed: u32, } @@ -25,7 +25,7 @@ impl Entry { } // Update peer and return completed (times torrent has been downloaded) - pub fn update_peer(&mut self, peer: &peer::TorrentPeer) -> bool { + pub fn update_peer(&mut self, peer: &peer::Peer) -> bool { let mut did_torrent_stats_change: bool = false; match peer.event { @@ -49,7 +49,7 @@ impl Entry { } #[must_use] - pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::TorrentPeer> { + pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::Peer> { self.peers .values() .filter(|peer| match client_addr { @@ -122,12 +122,12 @@ mod tests { use crate::tracker::torrent::Entry; struct TorrentPeerBuilder { - peer: peer::TorrentPeer, + peer: peer::Peer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::TorrentPeer { + let default_peer = peer::Peer { peer_id: peer::Id([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: Current::now(), @@ -164,14 +164,14 @@ mod tests { self } - pub fn into(self) -> peer::TorrentPeer { + pub fn into(self) -> peer::Peer { self.peer } } /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped - fn a_torrent_seeder() -> peer::TorrentPeer { + fn a_torrent_seeder() -> peer::Peer { TorrentPeerBuilder::default() .with_number_of_bytes_left(0) .with_event_completed() @@ -180,7 +180,7 @@ mod tests { /// A torrent leecher is a peer that is not a seeder. /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::TorrentPeer { + fn a_torrent_leecher() -> peer::Peer { TorrentPeerBuilder::default() .with_number_of_bytes_left(1) .with_event_completed() diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index da4bdbf35..625f42d40 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -92,7 +92,7 @@ pub async fn handle_announce( .authenticate_request(&wrapped_announce_request.info_hash, &None) .await?; - let peer = peer::TorrentPeer::from_udp_announce_request( + let peer = peer::Peer::from_udp_announce_request( &wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip(), @@ -289,12 +289,12 @@ mod tests { } struct TorrentPeerBuilder { - peer: peer::TorrentPeer, + peer: peer::Peer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::TorrentPeer { + let default_peer = peer::Peer { peer_id: peer::Id([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), @@ -321,7 +321,7 @@ mod tests { self } - pub fn into(self) -> peer::TorrentPeer { + pub fn into(self) -> peer::Peer { self.peer } } diff --git a/tests/api.rs b/tests/api.rs index 22a222698..824c198e2 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -24,7 +24,7 @@ mod tracker_api { use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth; - use torrust_tracker::tracker::peer::{self, TorrentPeer}; + use torrust_tracker::tracker::peer::{self, Peer}; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -187,8 +187,8 @@ mod tracker_api { ); } - fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { - let torrent_peer = TorrentPeer { + fn sample_torrent_peer() -> (Peer, TorrentPeerResource) { + let torrent_peer = Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), From 0f075e4daee476db840a4dbf98e3639a84ecd1bc Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 30 Nov 2022 17:57:13 +0100 Subject: [PATCH 0257/1003] refactor: src/api/resource(s) --- src/api/mod.rs | 2 +- src/api/{resources => resource}/auth_key.rs | 0 src/api/{resources => resource}/mod.rs | 7 +- src/api/resource/peer.rs | 47 +++++++++++++ .../stats_resource.rs => resource/stats.rs} | 2 +- src/api/resource/torrent.rs | 21 ++++++ src/api/resources/torrent_resource.rs | 67 ------------------- src/api/server.rs | 15 +++-- src/http/handlers.rs | 2 +- src/tracker/mod.rs | 4 +- src/tracker/torrent.rs | 2 +- tests/api.rs | 34 +++++----- 12 files changed, 103 insertions(+), 100 deletions(-) rename src/api/{resources => resource}/auth_key.rs (100%) rename src/api/{resources => resource}/mod.rs (55%) create mode 100644 src/api/resource/peer.rs rename src/api/{resources/stats_resource.rs => resource/stats.rs} (95%) create mode 100644 src/api/resource/torrent.rs delete mode 100644 src/api/resources/torrent_resource.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index 46ad24218..16abb8e27 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,2 +1,2 @@ -pub mod resources; +pub mod resource; pub mod server; diff --git a/src/api/resources/auth_key.rs b/src/api/resource/auth_key.rs similarity index 100% rename from src/api/resources/auth_key.rs rename to src/api/resource/auth_key.rs diff --git a/src/api/resources/mod.rs b/src/api/resource/mod.rs similarity index 55% rename from src/api/resources/mod.rs rename to src/api/resource/mod.rs index f708fc2e4..e86c550ca 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resource/mod.rs @@ -3,10 +3,11 @@ //! WIP. Not all endpoints have their resource structs. //! //! - [x] `AuthKeys` -//! - [ ] `TorrentResource`, `TorrentListItemResource`, `TorrentPeerResource`, `PeerIdResource` +//! - [ ] `Torrent`, `ListItem`, `Peer`, `PeerId` //! - [ ] `StatsResource` //! - [ ] ... pub mod auth_key; -pub mod stats_resource; -pub mod torrent_resource; +pub mod peer; +pub mod stats; +pub mod torrent; diff --git a/src/api/resource/peer.rs b/src/api/resource/peer.rs new file mode 100644 index 000000000..ff84be197 --- /dev/null +++ b/src/api/resource/peer.rs @@ -0,0 +1,47 @@ +use serde::{Deserialize, Serialize}; + +use crate::tracker; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Peer { + pub peer_id: Id, + pub peer_addr: String, + #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] + pub updated: u128, + pub updated_milliseconds_ago: u128, + pub uploaded: i64, + pub downloaded: i64, + pub left: i64, + pub event: String, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Id { + pub id: Option, + pub client: Option, +} + +impl From for Id { + fn from(peer_id: tracker::peer::Id) -> Self { + Id { + id: peer_id.get_id(), + client: peer_id.get_client_name().map(std::string::ToString::to_string), + } + } +} + +impl From for Peer { + #[allow(deprecated)] + fn from(peer: tracker::peer::Peer) -> Self { + Peer { + peer_id: Id::from(peer.peer_id), + peer_addr: peer.peer_addr.to_string(), + updated: peer.updated.as_millis(), + updated_milliseconds_ago: peer.updated.as_millis(), + uploaded: peer.uploaded.0, + downloaded: peer.downloaded.0, + left: peer.left.0, + event: format!("{:?}", peer.event), + } + } +} diff --git a/src/api/resources/stats_resource.rs b/src/api/resource/stats.rs similarity index 95% rename from src/api/resources/stats_resource.rs rename to src/api/resource/stats.rs index e6f184897..e87f08f63 100644 --- a/src/api/resources/stats_resource.rs +++ b/src/api/resource/stats.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct StatsResource { +pub struct Stats { pub torrents: u32, pub seeders: u32, pub completed: u32, diff --git a/src/api/resource/torrent.rs b/src/api/resource/torrent.rs new file mode 100644 index 000000000..924b61b8c --- /dev/null +++ b/src/api/resource/torrent.rs @@ -0,0 +1,21 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Torrent { + pub info_hash: String, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + #[serde(skip_serializing_if = "Option::is_none")] + pub peers: Option>, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ListItem { + pub info_hash: String, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + // todo: this is always None. Remove field from endpoint? + pub peers: Option>, +} diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs deleted file mode 100644 index bc1a9acf5..000000000 --- a/src/api/resources/torrent_resource.rs +++ /dev/null @@ -1,67 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use crate::tracker::peer::{self, Peer}; - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct TorrentResource { - pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, - #[serde(skip_serializing_if = "Option::is_none")] - pub peers: Option>, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct TorrentListItemResource { - pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, - // todo: this is always None. Remove field from endpoint? - pub peers: Option>, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct TorrentPeerResource { - pub peer_id: PeerIdResource, - pub peer_addr: String, - #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] - pub updated: u128, - pub updated_milliseconds_ago: u128, - pub uploaded: i64, - pub downloaded: i64, - pub left: i64, - pub event: String, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct PeerIdResource { - pub id: Option, - pub client: Option, -} - -impl From for PeerIdResource { - fn from(peer_id: peer::Id) -> Self { - PeerIdResource { - id: peer_id.get_id(), - client: peer_id.get_client_name().map(std::string::ToString::to_string), - } - } -} - -impl From for TorrentPeerResource { - #[allow(deprecated)] - fn from(peer: Peer) -> Self { - TorrentPeerResource { - peer_id: PeerIdResource::from(peer.peer_id), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - updated_milliseconds_ago: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), - } - } -} diff --git a/src/api/server.rs b/src/api/server.rs index af2d66458..5967a8be4 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,9 +7,10 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; -use super::resources::auth_key::AuthKey; -use super::resources::stats_resource::StatsResource; -use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; +use super::resource::auth_key::AuthKey; +use super::resource::peer; +use super::resource::stats::Stats; +use super::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; use crate::tracker; @@ -81,7 +82,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl w .iter() .map(|(info_hash, torrent_entry)| { let (seeders, completed, leechers) = torrent_entry.get_stats(); - TorrentListItemResource { + ListItem { info_hash: info_hash.to_string(), seeders, completed, @@ -104,7 +105,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl w .and(filters::path::end()) .map(move || api_stats.clone()) .and_then(|tracker: Arc| async move { - let mut results = StatsResource { + let mut results = Stats { torrents: 0, seeders: 0, completed: 0, @@ -179,9 +180,9 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl w let peers = torrent_entry.get_peers(None); - let peer_resources = peers.iter().map(|peer| TorrentPeerResource::from(**peer)).collect(); + let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); - Ok(reply::json(&TorrentResource { + Ok(reply::json(&Torrent { info_hash: info_hash.to_string(), seeders, completed, diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 2fc878354..1170b7188 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -135,7 +135,7 @@ pub async fn handle_scrape( #[allow(clippy::ptr_arg)] fn send_announce_response( announce_request: &request::Announce, - torrent_stats: &torrent::Stats, + torrent_stats: &torrent::SwamStats, peers: &Vec, interval: u32, interval_min: u32, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4b2dabebb..4469d682b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -246,7 +246,7 @@ impl Tracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::Stats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwamStats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { @@ -266,7 +266,7 @@ impl Tracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrent::Stats { + torrent::SwamStats { completed, seeders, leechers, diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 8058ab891..e292dff54 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -93,7 +93,7 @@ impl Default for Entry { } #[derive(Debug)] -pub struct Stats { +pub struct SwamStats { pub completed: u32, pub seeders: u32, pub leechers: u32, diff --git a/tests/api.rs b/tests/api.rs index 824c198e2..706cd0b8d 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,16 +16,16 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use tokio::task::JoinHandle; - use torrust_tracker::api::resources::auth_key::AuthKey; - use torrust_tracker::api::resources::stats_resource::StatsResource; - use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; + use torrust_tracker::api::resource; + use torrust_tracker::api::resource::auth_key::AuthKey; + use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth; - use torrust_tracker::tracker::peer::{self, Peer}; use torrust_tracker::tracker::statistics::Keeper; + use torrust_tracker::tracker::{auth, peer}; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; @@ -104,7 +104,7 @@ mod tracker_api { assert_eq!( torrent_resource, - TorrentResource { + Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, @@ -135,7 +135,7 @@ mod tracker_api { assert_eq!( torrent_resources, - vec![TorrentListItemResource { + vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, @@ -166,7 +166,7 @@ mod tracker_api { assert_eq!( stats_resource, - StatsResource { + Stats { torrents: 1, seeders: 1, completed: 0, @@ -187,8 +187,8 @@ mod tracker_api { ); } - fn sample_torrent_peer() -> (Peer, TorrentPeerResource) { - let torrent_peer = Peer { + fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { + let torrent_peer = peer::Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), @@ -197,7 +197,7 @@ mod tracker_api { left: NumberOfBytes(0), event: AnnounceEvent::Started, }; - let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); + let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); (torrent_peer, torrent_peer_resource) } @@ -326,7 +326,7 @@ mod tracker_api { reqwest::Client::new().post(url.clone()).send().await.unwrap() } - pub async fn get_torrent(&self, info_hash: &str) -> TorrentResource { + pub async fn get_torrent(&self, info_hash: &str) -> Torrent { let url = format!( "http://{}/api/torrent/{}?token={}", &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token @@ -338,12 +338,12 @@ mod tracker_api { .send() .await .unwrap() - .json::() + .json::() .await .unwrap() } - pub async fn get_torrents(&self) -> Vec { + pub async fn get_torrents(&self) -> Vec { let url = format!( "http://{}/api/torrents?token={}", &self.connection_info.bind_address, &self.connection_info.api_token @@ -355,12 +355,12 @@ mod tracker_api { .send() .await .unwrap() - .json::>() + .json::>() .await .unwrap() } - pub async fn get_tracker_statistics(&self) -> StatsResource { + pub async fn get_tracker_statistics(&self) -> Stats { let url = format!( "http://{}/api/stats?token={}", &self.connection_info.bind_address, &self.connection_info.api_token @@ -372,7 +372,7 @@ mod tracker_api { .send() .await .unwrap() - .json::() + .json::() .await .unwrap() } From dca755000d3f6b45435e9771c0cbcaeea4fa3680 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Dec 2022 11:56:10 +0000 Subject: [PATCH 0258/1003] fix: [#125] using default sqlite db for tests I was using the deafult DB configuration for some tests. Every test has to use its own database and the database should be located in a different location than the production default location. --- src/udp/handlers.rs | 53 +++++++++++++++++++++++++++++++++------------ tests/udp.rs | 12 +++++++++- 2 files changed, 50 insertions(+), 15 deletions(-) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 625f42d40..d167b3e6d 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -239,17 +239,42 @@ fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { #[cfg(test)] mod tests { + use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use rand::{thread_rng, Rng}; use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::tracker::{self, mode, peer, statistics}; - fn default_tracker_config() -> Arc { - Arc::new(Configuration::default()) + fn tracker_configuration() -> Arc { + Arc::new(default_testing_tracker_configuration()) + } + + fn default_testing_tracker_configuration() -> Configuration { + let mut config = Configuration::default(); + config.log_level = Some("off".to_owned()); + + // Ephemeral socket address + let port = ephemeral_random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &port); + + // Ephemeral database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + config + } + + fn ephemeral_random_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) } fn initialized_public_tracker() -> Arc { @@ -332,7 +357,7 @@ mod tests { impl TrackerConfigurationBuilder { pub fn default() -> TrackerConfigurationBuilder { - let default_configuration = Configuration::default(); + let default_configuration = default_testing_tracker_configuration(); TrackerConfigurationBuilder { configuration: default_configuration, } @@ -361,7 +386,7 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use mockall::predicate::eq; - use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; + use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_connect; @@ -424,7 +449,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -442,7 +467,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -538,7 +563,7 @@ mod tests { use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, + initialized_public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; #[tokio::test] @@ -685,7 +710,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -761,7 +786,7 @@ mod tests { use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, + initialized_public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; #[tokio::test] @@ -915,7 +940,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -1216,7 +1241,7 @@ mod tests { use super::sample_scrape_request; use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1230,7 +1255,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1248,7 +1273,7 @@ mod tests { use super::sample_scrape_request; use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; + use crate::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { @@ -1262,7 +1287,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/tests/udp.rs b/tests/udp.rs index 8bad37dbe..5f7a66856 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -7,6 +7,7 @@ mod common; mod udp_tracker_server { use core::panic; + use std::env; use std::io::Cursor; use std::net::Ipv4Addr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -29,7 +30,16 @@ mod udp_tracker_server { fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); + + // Ephemeral socket address + let port = ephemeral_random_port(); + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &port); + + // Ephemeral database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + Arc::new(config) } From 5af28a291c6f1d9b6197649d8ec6394d1582f3fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 09:39:51 +0000 Subject: [PATCH 0259/1003] fix: the pedantic clippy warnings --- src/config.rs | 4 ++-- src/protocol/info_hash.rs | 2 +- src/tracker/auth.rs | 2 +- src/udp/handlers.rs | 6 ++++-- tests/api.rs | 6 ++++-- tests/udp.rs | 8 +++++--- 6 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/config.rs b/src/config.rs index a7e7e9df6..ba99e0f45 100644 --- a/src/config.rs +++ b/src/config.rs @@ -74,7 +74,7 @@ impl std::fmt::Display for Error { Error::ConfigError(e) => e.fmt(f), Error::IOError(e) => e.fmt(f), Error::ParseError(e) => e.fmt(f), - Error::TrackerModeIncompatible => write!(f, "{:?}", self), + Error::TrackerModeIncompatible => write!(f, "{self:?}"), } } } @@ -296,6 +296,6 @@ mod tests { fn configuration_error_could_be_displayed() { let error = Error::TrackerModeIncompatible; - assert_eq!(format!("{}", error), "TrackerModeIncompatible"); + assert_eq!(format!("{error}"), "TrackerModeIncompatible"); } } diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 3b9b2fa35..9a0900063 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -133,7 +133,7 @@ mod tests { fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let output = format!("{}", info_hash); + let output = format!("{info_hash}"); assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); } diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 7ac6d6939..02450dc82 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -90,7 +90,7 @@ pub enum Error { impl From for Error { fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - eprintln!("{}", e); + eprintln!("{e}"); Error::KeyVerificationError } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index d167b3e6d..001fb2380 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -255,8 +255,10 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); diff --git a/tests/api.rs b/tests/api.rs index 706cd0b8d..84ddac573 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -203,8 +203,10 @@ mod tracker_api { } fn tracker_configuration() -> Arc { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); diff --git a/tests/udp.rs b/tests/udp.rs index 5f7a66856..55384db05 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -28,8 +28,10 @@ mod udp_tracker_server { use crate::common::ephemeral_random_port; fn tracker_configuration() -> Arc { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); @@ -181,7 +183,7 @@ mod udp_tracker_server { /// Generates the source address for the UDP client fn source_address(port: u16) -> String { - format!("127.0.0.1:{}", port) + format!("127.0.0.1:{port}") } fn is_error_response(response: &Response, error_message: &str) -> bool { From b23d64b9c3d58a6f4f7dab8a60775fc234aaadbd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 08:49:56 +0000 Subject: [PATCH 0260/1003] feat: add ssl support for the API New config options have been added to support HTTPs conenctionto the API: ``` [http_api] ssl_enabled = false ssl_cert_path = "./storage/ssl_certificates/localhost.crt" ssl_key_path = "./storage/ssl_certificates/localhost.key" ``` --- src/api/mod.rs | 18 +++ src/api/routes.rs | 307 ++++++++++++++++++++++++++++++++++++ src/api/server.rs | 333 +++------------------------------------ src/config.rs | 41 +++-- src/jobs/http_tracker.rs | 4 +- src/jobs/tracker_api.rs | 26 +-- src/jobs/udp_tracker.rs | 4 +- src/setup.rs | 2 +- tests/api.rs | 2 +- 9 files changed, 393 insertions(+), 344 deletions(-) create mode 100644 src/api/routes.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index 16abb8e27..d254c91ac 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,2 +1,20 @@ pub mod resource; +pub mod routes; pub mod server; + +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Debug)] +pub struct TorrentInfoQuery { + offset: Option, + limit: Option, +} + +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +impl warp::reject::Reject for ActionStatus<'static> {} diff --git a/src/api/routes.rs b/src/api/routes.rs new file mode 100644 index 000000000..76b449e9b --- /dev/null +++ b/src/api/routes.rs @@ -0,0 +1,307 @@ +use std::cmp::min; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use std::time::Duration; + +use serde::Deserialize; +use warp::{filters, reply, Filter}; + +use super::resource::auth_key::AuthKey; +use super::resource::peer; +use super::resource::stats::Stats; +use super::resource::torrent::{ListItem, Torrent}; +use super::{ActionStatus, TorrentInfoQuery}; +use crate::protocol::info_hash::InfoHash; +use crate::tracker; + +fn authenticate(tokens: HashMap) -> impl Filter + Clone { + #[derive(Deserialize)] + struct AuthToken { + token: Option, + } + + let tokens: HashSet = tokens.into_values().collect(); + + let tokens = Arc::new(tokens); + warp::filters::any::any() + .map(move || tokens.clone()) + .and(filters::query::query::()) + .and_then(|tokens: Arc>, token: AuthToken| async move { + match token.token { + Some(token) => { + if !tokens.contains(&token) { + return Err(warp::reject::custom(ActionStatus::Err { + reason: "token not valid".into(), + })); + } + + Ok(()) + } + None => Err(warp::reject::custom(ActionStatus::Err { + reason: "unauthorized".into(), + })), + } + }) + .untuple_one() +} + +#[allow(clippy::too_many_lines)] +#[must_use] +pub fn routes(tracker: &Arc) -> impl Filter + Clone { + // GET /api/torrents?offset=:u32&limit=:u32 + // View torrent list + let api_torrents = tracker.clone(); + let view_torrent_list = filters::method::get() + .and(filters::path::path("torrents")) + .and(filters::path::end()) + .and(filters::query::query()) + .map(move |limits| { + let tracker = api_torrents.clone(); + (limits, tracker) + }) + .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { + let offset = limits.offset.unwrap_or(0); + let limit = min(limits.limit.unwrap_or(1000), 4000); + + let db = tracker.get_torrents().await; + let results: Vec<_> = db + .iter() + .map(|(info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + ListItem { + info_hash: info_hash.to_string(), + seeders, + completed, + leechers, + peers: None, + } + }) + .skip(offset as usize) + .take(limit as usize) + .collect(); + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + }); + + // GET /api/stats + // View tracker status + let api_stats = tracker.clone(); + let view_stats_list = filters::method::get() + .and(filters::path::path("stats")) + .and(filters::path::end()) + .map(move || api_stats.clone()) + .and_then(|tracker: Arc| async move { + let mut results = Stats { + torrents: 0, + seeders: 0, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }; + + let db = tracker.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }); + + let stats = tracker.get_stats().await; + + #[allow(clippy::cast_possible_truncation)] + { + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + } + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + }); + + // GET /api/torrent/:info_hash + // View torrent info + let t2 = tracker.clone(); + let view_torrent_info = filters::method::get() + .and(filters::path::path("torrent")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t2.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + let db = tracker.get_torrents().await; + let torrent_entry_option = db.get(&info_hash); + + let torrent_entry = match torrent_entry_option { + Some(torrent_entry) => torrent_entry, + None => { + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); + } + }; + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + let peers = torrent_entry.get_peers(None); + + let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); + + Ok(reply::json(&Torrent { + info_hash: info_hash.to_string(), + seeders, + completed, + leechers, + peers: Some(peer_resources), + })) + }); + + // DELETE /api/whitelist/:info_hash + // Delete info hash from whitelist + let t3 = tracker.clone(); + let delete_torrent = filters::method::delete() + .and(filters::path::path("whitelist")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t3.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to remove torrent from whitelist".into(), + })), + } + }); + + // POST /api/whitelist/:info_hash + // Add info hash to whitelist + let t4 = tracker.clone(); + let add_torrent = filters::method::post() + .and(filters::path::path("whitelist")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t4.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to whitelist torrent".into(), + })), + } + }); + + // POST /api/key/:seconds_valid + // Generate new key + let t5 = tracker.clone(); + let create_key = filters::method::post() + .and(filters::path::path("key")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |seconds_valid: u64| { + let tracker = t5.clone(); + (seconds_valid, tracker) + }) + .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to generate key".into(), + })), + } + }); + + // DELETE /api/key/:key + // Delete key + let t6 = tracker.clone(); + let delete_key = filters::method::delete() + .and(filters::path::path("key")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |key: String| { + let tracker = t6.clone(); + (key, tracker) + }) + .and_then(|(key, tracker): (String, Arc)| async move { + match tracker.remove_auth_key(&key).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to delete key".into(), + })), + } + }); + + // GET /api/whitelist/reload + // Reload whitelist + let t7 = tracker.clone(); + let reload_whitelist = filters::method::get() + .and(filters::path::path("whitelist")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || t7.clone()) + .and_then(|tracker: Arc| async move { + match tracker.load_whitelist().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload whitelist".into(), + })), + } + }); + + // GET /api/keys/reload + // Reload whitelist + let t8 = tracker.clone(); + let reload_keys = filters::method::get() + .and(filters::path::path("keys")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || t8.clone()) + .and_then(|tracker: Arc| async move { + match tracker.load_keys().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload keys".into(), + })), + } + }); + + let api_routes = filters::path::path("api").and( + view_torrent_list + .or(delete_torrent) + .or(view_torrent_info) + .or(view_stats_list) + .or(add_torrent) + .or(create_key) + .or(delete_key) + .or(reload_whitelist) + .or(reload_keys), + ); + + api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())) +} diff --git a/src/api/server.rs b/src/api/server.rs index 5967a8be4..5d6a3cdfd 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -1,327 +1,32 @@ -use std::cmp::min; -use std::collections::{HashMap, HashSet}; use std::net::SocketAddr; use std::sync::Arc; -use std::time::Duration; -use serde::{Deserialize, Serialize}; -use warp::{filters, reply, serve, Filter}; +use warp::serve; -use super::resource::auth_key::AuthKey; -use super::resource::peer; -use super::resource::stats::Stats; -use super::resource::torrent::{ListItem, Torrent}; -use crate::protocol::info_hash::InfoHash; +use super::routes::routes; use crate::tracker; -#[derive(Deserialize, Debug)] -struct TorrentInfoQuery { - offset: Option, - limit: Option, -} - -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -impl warp::reject::Reject for ActionStatus<'static> {} - -fn authenticate(tokens: HashMap) -> impl Filter + Clone { - #[derive(Deserialize)] - struct AuthToken { - token: Option, - } - - let tokens: HashSet = tokens.into_iter().map(|(_, v)| v).collect(); - - let tokens = Arc::new(tokens); - warp::filters::any::any() - .map(move || tokens.clone()) - .and(filters::query::query::()) - .and_then(|tokens: Arc>, token: AuthToken| async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { - reason: "token not valid".into(), - })); - } - - Ok(()) - } - None => Err(warp::reject::custom(ActionStatus::Err { - reason: "unauthorized".into(), - })), - } - }) - .untuple_one() -} - -#[allow(clippy::too_many_lines)] pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { - // GET /api/torrents?offset=:u32&limit=:u32 - // View torrent list - let api_torrents = tracker.clone(); - let view_torrent_list = filters::method::get() - .and(filters::path::path("torrents")) - .and(filters::path::end()) - .and(filters::query::query()) - .map(move |limits| { - let tracker = api_torrents.clone(); - (limits, tracker) - }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - let db = tracker.get_torrents().await; - let results: Vec<_> = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - ListItem { - info_hash: info_hash.to_string(), - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - }); - - // GET /api/stats - // View tracker status - let api_stats = tracker.clone(); - let view_stats_list = filters::method::get() - .and(filters::path::path("stats")) - .and(filters::path::end()) - .map(move || api_stats.clone()) - .and_then(|tracker: Arc| async move { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }); - - let stats = tracker.get_stats().await; - - #[allow(clippy::cast_possible_truncation)] - { - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - } - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - }); - - // GET /api/torrent/:info_hash - // View torrent info - let t2 = tracker.clone(); - let view_torrent_info = filters::method::get() - .and(filters::path::path("torrent")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t2.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); - - let torrent_entry = match torrent_entry_option { - Some(torrent_entry) => torrent_entry, - None => { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); - } - }; - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - let peers = torrent_entry.get_peers(None); - - let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); - - Ok(reply::json(&Torrent { - info_hash: info_hash.to_string(), - seeders, - completed, - leechers, - peers: Some(peer_resources), - })) - }); - - // DELETE /api/whitelist/:info_hash - // Delete info hash from whitelist - let t3 = tracker.clone(); - let delete_torrent = filters::method::delete() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t3.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to remove torrent from whitelist".into(), - })), - } - }); - - // POST /api/whitelist/:info_hash - // Add info hash to whitelist - let t4 = tracker.clone(); - let add_torrent = filters::method::post() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t4.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to whitelist torrent".into(), - })), - } - }); - - // POST /api/key/:seconds_valid - // Generate new key - let t5 = tracker.clone(); - let create_key = filters::method::post() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |seconds_valid: u64| { - let tracker = t5.clone(); - (seconds_valid, tracker) - }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to generate key".into(), - })), - } - }); - - // DELETE /api/key/:key - // Delete key - let t6 = tracker.clone(); - let delete_key = filters::method::delete() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |key: String| { - let tracker = t6.clone(); - (key, tracker) - }) - .and_then(|(key, tracker): (String, Arc)| async move { - match tracker.remove_auth_key(&key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to delete key".into(), - })), - } - }); + let (_addr, api_server) = serve(routes(tracker)).bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }); - // GET /api/whitelist/reload - // Reload whitelist - let t7 = tracker.clone(); - let reload_whitelist = filters::method::get() - .and(filters::path::path("whitelist")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t7.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_whitelist().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload whitelist".into(), - })), - } - }); + api_server +} - // GET /api/keys/reload - // Reload whitelist - let t8 = tracker.clone(); - let reload_keys = filters::method::get() - .and(filters::path::path("keys")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t8.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_keys().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload keys".into(), - })), - } +pub fn start_tls( + socket_addr: SocketAddr, + ssl_cert_path: String, + ssl_key_path: String, + tracker: &Arc, +) -> impl warp::Future { + let (_addr, api_server) = serve(routes(tracker)) + .tls() + .cert_path(ssl_cert_path) + .key_path(ssl_key_path) + .bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); - let api_routes = filters::path::path("api").and( - view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(view_stats_list) - .or(add_torrent) - .or(create_key) - .or(delete_key) - .or(reload_whitelist) - .or(reload_keys), - ); - - let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); - - let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - api_server } diff --git a/src/config.rs b/src/config.rs index ba99e0f45..66def17cd 100644 --- a/src/config.rs +++ b/src/config.rs @@ -30,10 +30,16 @@ pub struct HttpTracker { pub ssl_key_path: Option, } +#[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpApi { pub enabled: bool, pub bind_address: String, + pub ssl_enabled: bool, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_cert_path: Option, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_key_path: Option, pub access_tokens: HashMap, } @@ -81,20 +87,8 @@ impl std::fmt::Display for Error { impl std::error::Error for Error {} -impl Configuration { - #[must_use] - pub fn get_ext_ip(&self) -> Option { - match &self.external_ip { - None => None, - Some(external_ip) => match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None, - }, - } - } - - #[must_use] - pub fn default() -> Configuration { +impl Default for Configuration { + fn default() -> Self { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: mode::Mode::Public, @@ -114,6 +108,9 @@ impl Configuration { http_api: HttpApi { enabled: true, bind_address: String::from("127.0.0.1:1212"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] .iter() .cloned() @@ -133,6 +130,19 @@ impl Configuration { }); configuration } +} + +impl Configuration { + #[must_use] + pub fn get_ext_ip(&self) -> Option { + match &self.external_ip { + None => None, + Some(external_ip) => match IpAddr::from_str(external_ip) { + Ok(external_ip) => Some(external_ip), + Err(_) => None, + }, + } + } /// # Errors /// @@ -208,6 +218,9 @@ mod tests { [http_api] enabled = true bind_address = "127.0.0.1:1212" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" [http_api.access_tokens] admin = "MyAccessToken" diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index b8f031f5a..c62bc5cc9 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -22,10 +22,10 @@ pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHa let http_tracker = Http::new(tracker); if !ssl_enabled { - info!("Starting HTTP server on: {}", bind_addr); + info!("Starting HTTP server on: http://{}", bind_addr); http_tracker.start(bind_addr).await; } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: {} (TLS)", bind_addr); + info!("Starting HTTPS server on: https://{} (TLS)", bind_addr); http_tracker .start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()) .await; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 2c00aa453..211174f35 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -5,7 +5,7 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::api::server; -use crate::config::Configuration; +use crate::config::HttpApi; use crate::tracker; #[derive(Debug)] @@ -14,24 +14,30 @@ pub struct ApiServerJobStarted(); /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { +pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { let bind_addr = config - .http_api .bind_address .parse::() .expect("Tracker API bind_address invalid."); - - info!("Starting Torrust API server on: {}", bind_addr); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); let (tx, rx) = oneshot::channel::(); // Run the API server let join_handle = tokio::spawn(async move { - let handel = server::start(bind_addr, &tracker); - - tx.send(ApiServerJobStarted()).expect("the start job dropped"); - - handel.await; + if !ssl_enabled { + info!("Starting Torrust API server on: http://{}", bind_addr); + let handle = server::start(bind_addr, &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + handle.await; + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust API server on: https://{}", bind_addr); + let handle = server::start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap(), &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + handle.await; + } }); // Wait until the API server job is running diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 57369f660..d0907c976 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -14,11 +14,11 @@ pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHan tokio::spawn(async move { match Udp::new(tracker, &bind_addr).await { Ok(udp_server) => { - info!("Starting UDP server on: {}", bind_addr); + info!("Starting UDP server on: udp://{}", bind_addr); udp_server.start().await; } Err(e) => { - warn!("Could not start UDP tracker on: {}", bind_addr); + warn!("Could not start UDP tracker on: udp://{}", bind_addr); error!("{}", e); } } diff --git a/src/setup.rs b/src/setup.rs index a7b7c5a82..c045310bb 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -49,7 +49,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve // Start HTTP API server if config.http_api.enabled { - jobs.push(tracker_api::start_job(config, tracker.clone()).await); + jobs.push(tracker_api::start_job(&config.http_api, tracker.clone()).await); } // Remove torrents without peers, every interval diff --git a/tests/api.rs b/tests/api.rs index 84ddac573..dfb8d81b3 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -292,7 +292,7 @@ mod tracker_api { logging::setup(&configuration); // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration, tracker).await); + self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); self.started.store(true, Ordering::Relaxed); } From 19abf0f31df8fcc0c9332f60ae6ab74c181df776 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:06:10 +0000 Subject: [PATCH 0261/1003] fix: error when udp response can't be written Instead of using a "debug" log level. --- src/udp/server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/udp/server.rs b/src/udp/server.rs index 5bd835365..a868cbd10 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -3,7 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; -use log::{debug, info}; +use log::{debug, error, info}; use tokio::net::UdpSocket; use crate::tracker; @@ -71,7 +71,7 @@ impl Udp { Udp::send_packet(socket, &remote_addr, &inner[..position]).await; } Err(_) => { - debug!("could not write response to bytes."); + error!("could not write response to bytes."); } } } From b1ec9dfc86643be3463a3b4d7b7cd1ed2bf2a4b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:15:50 +0000 Subject: [PATCH 0262/1003] feat: change udp tracker console output Using "debug" for sensitive data like IP address and info for generic info we can log even on production. --- src/udp/server.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/udp/server.rs b/src/udp/server.rs index a868cbd10..e85c81e9d 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -45,10 +45,12 @@ impl Udp { Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { let payload = data[..valid_bytes].to_vec(); - debug!("Received {} bytes from {}", payload.len(), remote_addr); - debug!("{:?}", payload); + info!("Received {} bytes", payload.len()); + debug!("From: {}", &remote_addr); + debug!("Payload: {:?}", payload); let response = handle_packet(remote_addr, payload, tracker).await; + Udp::send_response(socket, remote_addr, response).await; } } @@ -56,8 +58,6 @@ impl Udp { } async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { - debug!("sending response to: {:?}", &remote_addr); - let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); @@ -67,8 +67,13 @@ impl Udp { let position = cursor.position() as usize; let inner = cursor.get_ref(); - debug!("{:?}", &inner[..position]); + info!("Sending {} bytes ...", &inner[..position].len()); + debug!("To: {:?}", &remote_addr); + debug!("Payload: {:?}", &inner[..position]); + Udp::send_packet(socket, &remote_addr, &inner[..position]).await; + + info!("{} bytes sent", &inner[..position].len()); } Err(_) => { error!("could not write response to bytes."); From ca0e8afce4a4b5430631020648894215865fe838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:20:13 +0000 Subject: [PATCH 0263/1003] feat: change default http tracker port to 7070 Azure Container Instances do not allow you to open the same port as UDP and TCP. --- README.md | 2 +- src/config.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index beb2591ea..4e464dd68 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ bind_address = "0.0.0.0:6969" [[http_trackers]] enabled = true -bind_address = "0.0.0.0:6969" +bind_address = "0.0.0.0:7070" ssl_enabled = false ssl_cert_path = "" ssl_key_path = "" diff --git a/src/config.rs b/src/config.rs index 66def17cd..d56c2d34d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -123,7 +123,7 @@ impl Default for Configuration { }); configuration.http_trackers.push(HttpTracker { enabled: false, - bind_address: String::from("0.0.0.0:6969"), + bind_address: String::from("0.0.0.0:7070"), ssl_enabled: false, ssl_cert_path: None, ssl_key_path: None, @@ -210,7 +210,7 @@ mod tests { [[http_trackers]] enabled = false - bind_address = "0.0.0.0:6969" + bind_address = "0.0.0.0:7070" ssl_enabled = false ssl_cert_path = "" ssl_key_path = "" From 269e5f5bb085d08f00edb98305313f7b86471719 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 12:00:26 +0000 Subject: [PATCH 0264/1003] feat: move default db_path to storage folder Azure Container Intances do not allow you to mount a single file. I've created a storage folder where we can put all the things we want to persist. --- .gitignore | 2 ++ src/config.rs | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index ba9ceeb53..b80e2038c 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,6 @@ /config.toml /data.db /.vscode/launch.json +/storage/ + diff --git a/src/config.rs b/src/config.rs index d56c2d34d..fdfcb3d09 100644 --- a/src/config.rs +++ b/src/config.rs @@ -93,7 +93,7 @@ impl Default for Configuration { log_level: Option::from(String::from("info")), mode: mode::Mode::Public, db_driver: Driver::Sqlite3, - db_path: String::from("data.db"), + db_path: String::from("./storage/database/data.db"), announce_interval: 120, min_announce_interval: 120, max_peer_timeout: 900, @@ -193,7 +193,7 @@ mod tests { let config = r#"log_level = "info" mode = "public" db_driver = "Sqlite3" - db_path = "data.db" + db_path = "./storage/database/data.db" announce_interval = 120 min_announce_interval = 120 max_peer_timeout = 900 From 3098ed2c59c420167797b3a6f697c697d440c0f2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:26:09 +0000 Subject: [PATCH 0265/1003] feat: remove strip from Cargo.toml The option "strip = true" in the Cargo.toml file prevetns docker to use the cache for the cargo dependencies. ``` [profile.release] ... strip = true ``` More info: https://github.com/LukeMathWalker/cargo-chef/issues/172 --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 80e9009f1..6e835bcb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,6 @@ lto = "thin" debug = 1 opt-level = 3 lto = "fat" -strip = true [dependencies] tokio = { version = "1", features = [ From f8700aacaeaf1fb9d0201377aa35414012e298e4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:30:59 +0000 Subject: [PATCH 0266/1003] feat: allow to inject configuration from env var You can use an env var to pass the configuration instead of using the configuration file in the root folder `config.toml` ``` TORRUST_TRACKER_CONFIG=$(cat config.toml) TORRUST_TRACKER_CONFIG=`cat config.toml` cargo run ``` This allow the applciation to be executed in dockerized environments whithout needing to mount a file or volume for the configuration. --- src/config.rs | 26 +++++++++++++++++++++++--- src/main.rs | 15 +++++++++------ 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index fdfcb3d09..48e28b358 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,10 +1,10 @@ use std::collections::HashMap; -use std::fs; use std::net::IpAddr; use std::path::Path; use std::str::FromStr; +use std::{env, fs}; -use config::{Config, ConfigError, File}; +use config::{Config, ConfigError, File, FileFormat}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; @@ -164,7 +164,7 @@ impl Configuration { let config = Configuration::default(); config.save_to_file(path)?; return Err(Error::Message( - "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), + "Please edit the config.TOML and restart the tracker.".to_string(), )); } @@ -173,6 +173,26 @@ impl Configuration { Ok(torrust_config) } + /// # Errors + /// + /// Will return `Err` if the environment variable does not exist or has a bad configuration. + pub fn load_from_env_var(config_env_var_name: &str) -> Result { + match env::var(config_env_var_name) { + Ok(config_toml) => { + let config_builder = Config::builder() + .add_source(File::from_str(&config_toml, FileFormat::Toml)) + .build() + .map_err(Error::ConfigError)?; + let config = config_builder.try_deserialize().map_err(Error::ConfigError)?; + Ok(config) + } + Err(_) => Err(Error::Message(format!( + "No environment variable for configuration found: {}", + &config_env_var_name + ))), + } + } + /// # Errors /// /// Will return `Err` if `filename` does not exist or the user does not have diff --git a/src/main.rs b/src/main.rs index a7316cef2..199e8f5c5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,4 @@ +use std::env; use std::sync::Arc; use log::info; @@ -7,7 +8,8 @@ use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, trac #[tokio::main] async fn main() { - const CONFIG_PATH: &str = "config.toml"; + const CONFIG_PATH: &str = "./config.toml"; + const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -16,11 +18,12 @@ async fn main() { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize Torrust config - let config = match Configuration::load_from_file(CONFIG_PATH) { - Ok(config) => Arc::new(config), - Err(error) => { - panic!("{}", error) - } + let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); + Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) + } else { + println!("Loading configuration from config file {CONFIG_PATH}"); + Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) }; // Initialize statistics From 46e1a37ec08d6ebaee294348e1fa64245e7d5046 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:34:57 +0000 Subject: [PATCH 0267/1003] feat: docker support --- .dockerignore | 16 ++ .env.local | 1 + .github/workflows/publish_docker_image.yml | 73 ++++++ .github/workflows/test_docker.yml | 26 +++ .gitignore | 1 + Dockerfile | 80 +++++++ bin/install.sh | 13 ++ cSpell.json | 9 + compose.yaml | 48 ++++ config.toml.local | 34 +++ docker/README.md | 250 +++++++++++++++++++++ docker/bin/build.sh | 13 ++ docker/bin/install.sh | 4 + docker/bin/run.sh | 13 ++ 14 files changed, 581 insertions(+) create mode 100644 .dockerignore create mode 100644 .env.local create mode 100644 .github/workflows/publish_docker_image.yml create mode 100644 .github/workflows/test_docker.yml create mode 100644 Dockerfile create mode 100755 bin/install.sh create mode 100644 compose.yaml create mode 100644 config.toml.local create mode 100644 docker/README.md create mode 100755 docker/bin/build.sh create mode 100755 docker/bin/install.sh create mode 100755 docker/bin/run.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..3d8a25cce --- /dev/null +++ b/.dockerignore @@ -0,0 +1,16 @@ +.git +.git-blame-ignore +.github +.gitignore +.vscode +bin/ +config.toml +config.toml.local +cSpell.json +data.db +docker/ +NOTICE +README.md +rustfmt.toml +storage/ +target/ diff --git a/.env.local b/.env.local new file mode 100644 index 000000000..fefed56c4 --- /dev/null +++ b/.env.local @@ -0,0 +1 @@ +TORRUST_TRACKER_USER_UID=1000 \ No newline at end of file diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml new file mode 100644 index 000000000..b8e3791ed --- /dev/null +++ b/.github/workflows/publish_docker_image.yml @@ -0,0 +1,73 @@ +name: Publish docker image + +on: + push: + branches: + - 'develop' + # todo: only during development of issue 11 + - 'docker' + - 'docker-reorganized-pr' + tags: + - "v*" + +env: + # Azure file share volume mount requires the Linux container run as root + # https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations + TORRUST_TRACKER_RUN_AS_USER: root + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: llvm-tools-preview + - uses: Swatinem/rust-cache@v1 + - name: Run Tests + run: cargo test + + dockerhub: + needs: test + runs-on: ubuntu-latest + environment: dockerhub-torrust + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: | + # For example: torrust/tracker + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + build-args: | + RUN_AS_USER=${{ env.TORRUST_TRACKER_RUN_AS_USER }} + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml new file mode 100644 index 000000000..2cfa4de5c --- /dev/null +++ b/.github/workflows/test_docker.yml @@ -0,0 +1,26 @@ +name: Test docker build + +on: + push: + pull_request: + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build docker image + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + push: false + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Build docker-compose images + run: docker compose build diff --git a/.gitignore b/.gitignore index b80e2038c..d574298da 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.env /target **/*.rs.bk /database.json.bz2 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..96d21fa84 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,80 @@ +FROM clux/muslrust:stable AS chef +WORKDIR /app +RUN cargo install cargo-chef + + +FROM chef AS planner +WORKDIR /app +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + + +FROM chef as development +WORKDIR /app +ARG UID=1000 +ARG RUN_AS_USER=appuser +ARG TRACKER_UDP_PORT=6969 +ARG TRACKER_HTTP_PORT=7070 +ARG TRACKER_API_PORT=1212 +# Add the app user for development +ENV USER=appuser +ENV UID=$UID +RUN adduser --uid "${UID}" "${USER}" +# Build dependencies +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --recipe-path recipe.json +# Build the application +COPY . . +RUN cargo build --bin torrust-tracker +USER $RUN_AS_USER:$RUN_AS_USER +EXPOSE $TRACKER_UDP_PORT/udp +EXPOSE $TRACKER_HTTP_PORT/tcp +EXPOSE $TRACKER_API_PORT/tcp +CMD ["cargo", "run"] + + +FROM chef AS builder +WORKDIR /app +ARG UID=1000 +# Add the app user for production +ENV USER=appuser +ENV UID=$UID +RUN adduser \ + --disabled-password \ + --gecos "" \ + --home "/nonexistent" \ + --shell "/sbin/nologin" \ + --no-create-home \ + --uid "${UID}" \ + "${USER}" +# Build dependencies +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --release --target x86_64-unknown-linux-musl --recipe-path recipe.json +# Build the application +COPY . . +RUN cargo build --release --target x86_64-unknown-linux-musl --bin torrust-tracker +# Strip the binary +# More info: https://github.com/LukeMathWalker/cargo-chef/issues/149 +RUN strip /app/target/x86_64-unknown-linux-musl/release/torrust-tracker + + +FROM alpine:latest +WORKDIR /app +ARG RUN_AS_USER=appuser +ARG TRACKER_UDP_PORT=6969 +ARG TRACKER_HTTP_PORT=7070 +ARG TRACKER_API_PORT=1212 +RUN apk --no-cache add ca-certificates +ENV TZ=Etc/UTC +ENV RUN_AS_USER=$RUN_AS_USER +COPY --from=builder /etc/passwd /etc/passwd +COPY --from=builder /etc/group /etc/group +COPY --from=builder --chown=$RUN_AS_USER \ + /app/target/x86_64-unknown-linux-musl/release/torrust-tracker \ + /app/torrust-tracker +RUN chown -R $RUN_AS_USER:$RUN_AS_USER /app +USER $RUN_AS_USER:$RUN_AS_USER +EXPOSE $TRACKER_UDP_PORT/udp +EXPOSE $TRACKER_HTTP_PORT/tcp +EXPOSE $TRACKER_API_PORT/tcp +ENTRYPOINT ["/app/torrust-tracker"] \ No newline at end of file diff --git a/bin/install.sh b/bin/install.sh new file mode 100755 index 000000000..d4314ce93 --- /dev/null +++ b/bin/install.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Generate the default settings file if it does not exist +if ! [ -f "./config.toml" ]; then + cp ./config.toml.local ./config.toml +fi + +# Generate the sqlite database if it does not exist +if ! [ -f "./storage/database/data.db" ]; then + # todo: it should get the path from config.toml and only do it when we use sqlite + touch ./storage/database/data.db + echo ";" | sqlite3 ./storage/database/data.db +fi diff --git a/cSpell.json b/cSpell.json index cc3359d58..5bc67a0c8 100644 --- a/cSpell.json +++ b/cSpell.json @@ -9,31 +9,38 @@ "Bitflu", "bools", "bufs", + "Buildx", "byteorder", "canonicalize", "canonicalized", "chrono", "clippy", "completei", + "dockerhub", "downloadedi", "filesd", "Freebox", "hasher", "hexlify", + "hlocalhost", "Hydranode", "incompletei", + "infoschema", "intervali", "leecher", "leechers", "libtorrent", "Lphant", "mockall", + "myacicontext", "nanos", "nextest", "nocapture", "oneshot", "ostr", "Pando", + "proot", + "Quickstart", "Rasterbar", "repr", "reqwest", @@ -50,9 +57,11 @@ "thiserror", "Torrentstorm", "torrust", + "torrustracker", "typenum", "Unamed", "untuple", + "uroot", "Vagaa", "Xtorrent", "Xunlei" diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 000000000..d11f9c8ae --- /dev/null +++ b/compose.yaml @@ -0,0 +1,48 @@ +name: torrust +services: + + tracker: + build: + context: . + target: development + user: ${TORRUST_TRACKER_USER_UID:-1000}:${TORRUST_TRACKER_USER_UID:-1000} + tty: true + networks: + - server_side + ports: + - 6969:6969/udp + - 7070:7070 + - 1212:1212 + volumes: + - ./:/app + - ~/.cargo:/home/appuser/.cargo + depends_on: + - mysql + + mysql: + image: mysql:8.0 + command: '--default-authentication-plugin=mysql_native_password' + restart: always + healthcheck: + test: ['CMD-SHELL', 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent'] + interval: 3s + retries: 5 + start_period: 30s + environment: + - MYSQL_ROOT_HOST=% + - MYSQL_ROOT_PASSWORD=root_secret_password + - MYSQL_DATABASE=torrust_tracker + - MYSQL_USER=db_user + - MYSQL_PASSWORD=db_user_secret_password + networks: + - server_side + ports: + - 3306:3306 + volumes: + - mysql_data:/var/lib/mysql + +networks: + server_side: {} + +volumes: + mysql_data: {} \ No newline at end of file diff --git a/config.toml.local b/config.toml.local new file mode 100644 index 000000000..baf272d5a --- /dev/null +++ b/config.toml.local @@ -0,0 +1,34 @@ +log_level = "info" +mode = "public" +db_driver = "Sqlite3" +db_path = "./storage/database/data.db" +announce_interval = 120 +min_announce_interval = 120 +max_peer_timeout = 900 +on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true + +[[udp_trackers]] +enabled = false +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +enabled = false +bind_address = "0.0.0.0:7070" +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api] +enabled = true +bind_address = "127.0.0.1:1212" +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..8646b952e --- /dev/null +++ b/docker/README.md @@ -0,0 +1,250 @@ +# Docker + +## Requirements + +- Docker version 20.10.21 +- You need to create the `storage` directory with this structure and files: + +```s +$ tree storage/ +storage/ +├── database +│   └── data.db +└── ssl_certificates + ├── localhost.crt + └── localhost.key +``` + +> NOTE: you only need the `ssl_certificates` directory and certificates in case you have enabled SSL for the one HTTP tracker or the API. + +## Dev environment + +### With docker + +Build and run locally: + +```s +docker context use default +export TORRUST_TRACKER_USER_UID=1000 +./docker/bin/build.sh $TORRUST_TRACKER_USER_UID +./bin/install.sh +./docker/bin/run.sh $TORRUST_TRACKER_USER_UID +``` + +Run using the pre-built public docker image: + +```s +export TORRUST_TRACKER_USER_UID=1000 +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust/torrust-tracker +``` + +> NOTES: +> +> - You have to create the SQLite DB (`data.db`) and configuration (`config.toml`) before running the tracker. See `bin/install.sh`. +> - You have to replace the user UID (`1000`) with yours. +> - Remember to switch to your default docker context `docker context use default`. + +### With docker-compose + +The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you have to change your `config.toml` configuration: + +```toml +db_driver = "MySQL" +db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +``` + +If you want to inject an environment variable into docker-compose you can use the file `.env`. There is a template `.env.local`. + +Build and run it locally: + +```s +docker compose up --build +``` + +After running the "up" command you will have two running containers: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 +34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 +``` + +And you should be able to use the application, for example making a request to the API: + + + +You can stop the containers with: + +```s +docker compose down +``` + +Additionally, you can delete all resources (containers, volumes, networks) with: + +```s +docker compose down -v +``` + +### Access Mysql with docker + +These are some useful commands for MySQL. + +Open a shell in the MySQL container using docker or docker-compose. + +```s +docker exec -it torrust-mysql-1 /bin/bash +docker compose exec mysql /bin/bash +``` + +Connect to MySQL from inside the MySQL container or from the host: + +```s +mysql -h127.0.0.1 -uroot -proot_secret_password +``` + +The when MySQL container is started the first time, it creates the database, user, and permissions needed. +If you see the error "Host is not allowed to connect to this MySQL server" you can check that users have the right permissions in the database. Make sure the user `root` and `db_user` can connect from any host (`%`). + +```s +mysql> SELECT host, user FROM mysql.user; ++-----------+------------------+ +| host | user | ++-----------+------------------+ +| % | db_user | +| % | root | +| localhost | mysql.infoschema | +| localhost | mysql.session | +| localhost | mysql.sys | +| localhost | root | ++-----------+------------------+ +6 rows in set (0.00 sec) +``` + +If the database, user or permissions are not created the reason could be the MySQL container volume can be corrupted. Delete it and start again the containers. + +### SSL Certificates + +You can use a certificate for localhost. You can create your [localhost certificate](https://letsencrypt.org/docs/certificates-for-localhost/#making-and-trusting-your-own-certificates) and use it in the `storage` folder and the configuration file (`config.toml`). For example: + +The storage folder must contain your certificates: + +```s +$ tree storage/ +storage/ +├── database +│   └── data.db +└── ssl_certificates + ├── localhost.crt + └── localhost.key +``` + +You have not enabled it in your `config.toml` file: + +```toml +... +[[http_trackers]] +enabled = true +bind_address = "0.0.0.0:7070" +ssl_enabled = true +ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +ssl_key_path = "./storage/ssl_certificates/localhost.key" + +[http_api] +enabled = true +bind_address = "0.0.0.0:1212" +ssl_enabled = true +ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +ssl_key_path = "./storage/ssl_certificates/localhost.key" +... +``` + +> NOTE: you can enable it independently for each HTTP tracker or the API. + +If you enable the SSL certificate for the API, for example, you can load the API with this URL: + + + +## Prod environment + +In this section, you will learn how to deploy the tracker to a single docker container in Azure Container Instances. + +> NOTE: Azure Container Instances is a solution when you want to run an isolated container. If you need full container orchestration, including service discovery across multiple containers, automatic scaling, and coordinated application upgrades, we recommend [Kubernetes](https://kubernetes.io/). + +Deploy to Azure Container Instance following [docker documentation](https://docs.docker.com/cloud/aci-integration/). + +You have to create the ACI context and the storage: + +```s +docker context create aci myacicontext +docker context use myacicontext +docker volume create test-volume --storage-account torrustracker +``` + +You need to create all the files needed by the application in the storage dir `storage/database`. + +And finally, you can run the container: + +```s +docker run \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume torrustracker/test-volume:/app/storage \ + registry.hub.docker.com/torrust/torrust-tracker:latest +``` + +Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. + +```s +docker run \ + --detach + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \latest + --volume torrustracker/test-volume:/app/storage \ + registry.hub.docker.com/torrust/torrust-tracker:latest +``` + +You should see something like this: + +```s +[+] Running 2/2 + â ¿ Group intelligent-hawking Created 5.0s + â ¿ intelligent-hawking Created 41.7s +2022-12-08T18:39:19.697869300+00:00 [torrust_tracker::logging][INFO] logging initialized. +2022-12-08T18:39:19.712651100+00:00 [torrust_tracker::jobs::udp_tracker][INFO] Starting UDP server on: 0.0.0.0:6969 +2022-12-08T18:39:19.712792700+00:00 [torrust_tracker::jobs::tracker_api][INFO] Starting Torrust API server on: 0.0.0.0:1212 +2022-12-08T18:39:19.725124+00:00 [torrust_tracker::jobs::tracker_api][INFO] Torrust API server started +``` + +You can see the container with: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND STATUS PORTS +intelligent-hawking registry.hub.docker.com/torrust/torrust-tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +``` + +After a while, you can use the tracker API `http://4.236.213.57:1212/api/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. + +> NOTES: +> +> - [There is no support for mounting a single file](https://docs.docker.com/cloud/aci-container-features/#persistent-volumes), or mounting a subfolder from an `Azure File Share`. +> - [ACI does not allow port mapping](https://docs.docker.com/cloud/aci-integration/#exposing-ports). +> - [Azure file share volume mount requires the Linux container run as root](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations). +> - It can take some minutes until the public IP for the ACI container is available. +> - You can use the Azure web UI to download files from the storage. For example, the SQLite database. +> - [It seems you can only expose web interfaces on port 80 on Azure Container Instances](https://stackoverflow.com/a/56768087/3012842). Not official documentation! + +## Links + +- [Deploying Docker containers on Azure](https://docs.docker.com/cloud/aci-integration/). +- [Docker run options for ACI containers](https://docs.docker.com/cloud/aci-container-features/). +- [Quickstart: Deploy a container instance in Azure using the Docker CLI](https://learn.microsoft.com/en-us/azure/container-instances/quickstart-docker-cli). diff --git a/docker/bin/build.sh b/docker/bin/build.sh new file mode 100755 index 000000000..d77d1ad34 --- /dev/null +++ b/docker/bin/build.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_RUN_AS_USER=${TORRUST_TRACKER_RUN_AS_USER:-appuser} + +echo "Building docker image ..." +echo "TORRUST_TRACKER_USER_UID: $TORRUST_TRACKER_USER_UID" +echo "TORRUST_TRACKER_RUN_AS_USER: $TORRUST_TRACKER_RUN_AS_USER" + +docker build \ + --build-arg UID="$TORRUST_TRACKER_USER_UID" \ + --build-arg RUN_AS_USER="$TORRUST_TRACKER_RUN_AS_USER" \ + -t torrust-tracker . diff --git a/docker/bin/install.sh b/docker/bin/install.sh new file mode 100755 index 000000000..a58969378 --- /dev/null +++ b/docker/bin/install.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +./docker/bin/build.sh +./bin/install.sh diff --git a/docker/bin/run.sh b/docker/bin/run.sh new file mode 100755 index 000000000..86465baeb --- /dev/null +++ b/docker/bin/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_CONFIG=$(cat config.toml) + +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --env TORRUST_TRACKER_CONFIG="$TORRUST_TRACKER_CONFIG" \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust-tracker From 032f6a63af2c7ad95f1426e7fdba409569170b89 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 15:58:06 +0000 Subject: [PATCH 0268/1003] fix: docker repo name in README --- docker/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/README.md b/docker/README.md index 8646b952e..e5b4dfe74 100644 --- a/docker/README.md +++ b/docker/README.md @@ -41,7 +41,7 @@ docker run -it \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \ --volume "$(pwd)/storage":"/app/storage" \ - torrust/torrust-tracker + torrust/tracker ``` > NOTES: @@ -197,7 +197,7 @@ docker run \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \ --volume torrustracker/test-volume:/app/storage \ - registry.hub.docker.com/torrust/torrust-tracker:latest + registry.hub.docker.com/torrust/tracker:latest ``` Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. @@ -209,7 +209,7 @@ docker run \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \latest --volume torrustracker/test-volume:/app/storage \ - registry.hub.docker.com/torrust/torrust-tracker:latest + registry.hub.docker.com/torrust/tracker:latest ``` You should see something like this: @@ -229,7 +229,7 @@ You can see the container with: ```s $ docker ps CONTAINER ID IMAGE COMMAND STATUS PORTS -intelligent-hawking registry.hub.docker.com/torrust/torrust-tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +intelligent-hawking registry.hub.docker.com/torrust/tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp ``` After a while, you can use the tracker API `http://4.236.213.57:1212/api/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. From 171a37d3c48cd365b987eaf280b73a5f35855e20 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 15:58:53 +0000 Subject: [PATCH 0269/1003] feat: publish docker image for tags, develop aand main branches --- .github/workflows/publish_docker_image.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index b8e3791ed..c6a103931 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -3,10 +3,8 @@ name: Publish docker image on: push: branches: + - 'main' - 'develop' - # todo: only during development of issue 11 - - 'docker' - - 'docker-reorganized-pr' tags: - "v*" From 6851ec5fc1adf206c0baaa20d6201e119c85a4af Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 Dec 2022 11:52:20 +0000 Subject: [PATCH 0270/1003] fix: docker image run as non root The published docker image runs by deafult as non root user. Some services like ACI could require run the container as root but for those cases they can run their customs builds or change the user while launching the container. --- .github/workflows/publish_docker_image.yml | 7 ++++--- cSpell.json | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index c6a103931..7593fb680 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -3,15 +3,16 @@ name: Publish docker image on: push: branches: - - 'main' - - 'develop' + - "main" + - "develop" tags: - "v*" env: # Azure file share volume mount requires the Linux container run as root # https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations - TORRUST_TRACKER_RUN_AS_USER: root + # TORRUST_TRACKER_RUN_AS_USER: root + TORRUST_TRACKER_RUN_AS_USER: appuser jobs: test: diff --git a/cSpell.json b/cSpell.json index 5bc67a0c8..57b9f3b67 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,5 +1,6 @@ { "words": [ + "appuser", "AUTOINCREMENT", "automock", "Avicora", From e4b2a8eb7a24c763c19207a64d273d634abf3626 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 09:48:47 +0000 Subject: [PATCH 0271/1003] feat: publish docker image only when secrets are set Since dockerhun does not allow scoepd token, we are going to use forks to publish docker images. The "publisher" can set their token on their forks. The workflow is executed only if the secret "DOCKER_HUB_USERNAME" is set in the environment "dockerhub-torrust" --- .github/workflows/publish_docker_image.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index 7593fb680..1587a0bd6 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -15,7 +15,21 @@ env: TORRUST_TRACKER_RUN_AS_USER: appuser jobs: + check-secret: + runs-on: ubuntu-latest + environment: dockerhub-torrust + outputs: + publish: ${{ steps.check.outputs.publish }} + steps: + - id: check + env: + DOCKER_HUB_USERNAME: "${{ secrets.DOCKER_HUB_USERNAME }}" + if: "${{ env.DOCKER_HUB_USERNAME != '' }}" + run: echo "publish=true" >> $GITHUB_OUTPUT + test: + needs: check-secret + if: needs.check-secret.outputs.publish == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -30,6 +44,7 @@ jobs: dockerhub: needs: test + if: needs.check-secret.outputs.publish == 'true' runs-on: ubuntu-latest environment: dockerhub-torrust steps: From 96f386c77f3e503b645e2890884407897559a038 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 16:51:31 +0000 Subject: [PATCH 0272/1003] refactor: [#142] reorganize tests for the API --- tests/api.rs | 382 ------------------------------------------- tests/api/mod.rs | 214 ++++++++++++++++++++++++ tests/tracker_api.rs | 246 ++++++++++++++++++++++++++++ 3 files changed, 460 insertions(+), 382 deletions(-) delete mode 100644 tests/api.rs create mode 100644 tests/api/mod.rs create mode 100644 tests/tracker_api.rs diff --git a/tests/api.rs b/tests/api.rs deleted file mode 100644 index dfb8d81b3..000000000 --- a/tests/api.rs +++ /dev/null @@ -1,382 +0,0 @@ -/// Integration tests for the tracker API -/// -/// cargo test `tracker_api` -- --nocapture -extern crate rand; - -mod common; - -mod tracker_api { - use core::panic; - use std::env; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::str::FromStr; - use std::sync::atomic::{AtomicBool, Ordering}; - use std::sync::Arc; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use reqwest::Response; - use tokio::task::JoinHandle; - use torrust_tracker::api::resource; - use torrust_tracker::api::resource::auth_key::AuthKey; - use torrust_tracker::api::resource::stats::Stats; - use torrust_tracker::api::resource::torrent::{self, Torrent}; - use torrust_tracker::config::Configuration; - use torrust_tracker::jobs::tracker_api; - use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::statistics::Keeper; - use torrust_tracker::tracker::{auth, peer}; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - - use crate::common::ephemeral_random_port; - - #[tokio::test] - async fn should_allow_generating_a_new_auth_key() { - let api_server = ApiServer::new_running_instance().await; - - let seconds_valid = 60; - - let auth_key = ApiClient::new(api_server.get_connection_info().unwrap()) - .generate_auth_key(seconds_valid) - .await; - - // Verify the key with the tracker - assert!(api_server - .tracker - .unwrap() - .verify_auth_key(&auth::Key::from(auth_key)) - .await - .is_ok()); - } - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent() { - let api_server = ApiServer::new_running_instance().await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let res = ApiClient::new(api_server.get_connection_info().unwrap()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_eq!(res.status(), 200); - assert!( - api_server - .tracker - .unwrap() - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await - ); - } - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = ApiServer::new_running_instance().await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let api_client = ApiClient::new(api_server.get_connection_info().unwrap()); - - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); - - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); - } - - #[tokio::test] - async fn should_allow_getting_a_torrent_info() { - let api_server = ApiServer::new_running_instance().await; - let api_connection_info = api_server.get_connection_info().unwrap(); - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let (peer, peer_resource) = sample_torrent_peer(); - - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; - - let torrent_resource = ApiClient::new(api_connection_info).get_torrent(&info_hash.to_string()).await; - - assert_eq!( - torrent_resource, - Torrent { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: Some(vec![peer_resource]) - } - ); - } - - #[tokio::test] - async fn should_allow_getting_torrents() { - let api_server = ApiServer::new_running_instance().await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let (peer, _peer_resource) = sample_torrent_peer(); - - let api_connection_info = api_server.get_connection_info().unwrap(); - - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; - - let torrent_resources = ApiClient::new(api_connection_info).get_torrents().await; - - assert_eq!( - torrent_resources, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None // Torrent list does not include peer list - }] - ); - } - - #[tokio::test] - async fn should_allow_getting_tracker_statistics() { - let api_server = ApiServer::new_running_instance().await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let (peer, _peer_resource) = sample_torrent_peer(); - - let api_connection_info = api_server.get_connection_info().unwrap(); - - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; - - let stats_resource = ApiClient::new(api_connection_info).get_tracker_statistics().await; - - assert_eq!( - stats_resource, - Stats { - torrents: 1, - seeders: 1, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - } - ); - } - - fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { - let torrent_peer = peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); - - (torrent_peer, torrent_peer_resource) - } - - fn tracker_configuration() -> Arc { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.http_api.bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - Arc::new(config) - } - - #[derive(Clone)] - struct ApiConnectionInfo { - pub bind_address: String, - pub api_token: String, - } - - impl ApiConnectionInfo { - pub fn new(bind_address: &str, api_token: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: api_token.to_string(), - } - } - } - - struct ApiServer { - pub started: AtomicBool, - pub job: Option>, - pub tracker: Option>, - pub connection_info: Option, - } - - impl ApiServer { - pub fn new() -> Self { - Self { - started: AtomicBool::new(false), - job: None, - tracker: None, - connection_info: None, - } - } - - pub async fn new_running_instance() -> ApiServer { - let configuration = tracker_configuration(); - ApiServer::new_running_custom_instance(configuration.clone()).await - } - - async fn new_running_custom_instance(configuration: Arc) -> ApiServer { - let mut api_server = ApiServer::new(); - api_server.start(configuration).await; - api_server - } - - pub async fn start(&mut self, configuration: Arc) { - if !self.started.load(Ordering::Relaxed) { - self.connection_info = Some(ApiConnectionInfo::new( - &configuration.http_api.bind_address.clone(), - &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), - )); - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - self.tracker = Some(tracker.clone()); - - // Initialize logging - logging::setup(&configuration); - - // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); - - self.started.store(true, Ordering::Relaxed); - } - } - - pub fn get_connection_info(&self) -> Option { - self.connection_info.clone() - } - } - - struct ApiClient { - connection_info: ApiConnectionInfo, - } - - impl ApiClient { - pub fn new(connection_info: ApiConnectionInfo) -> Self { - Self { connection_info } - } - - pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { - let url = format!( - "http://{}/api/key/{}?token={}", - &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token - ); - reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap() - } - - pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - let url = format!( - "http://{}/api/whitelist/{}?token={}", - &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token - ); - reqwest::Client::new().post(url.clone()).send().await.unwrap() - } - - pub async fn get_torrent(&self, info_hash: &str) -> Torrent { - let url = format!( - "http://{}/api/torrent/{}?token={}", - &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::() - .await - .unwrap() - } - - pub async fn get_torrents(&self) -> Vec { - let url = format!( - "http://{}/api/torrents?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::>() - .await - .unwrap() - } - - pub async fn get_tracker_statistics(&self) -> Stats { - let url = format!( - "http://{}/api/stats?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::() - .await - .unwrap() - } - } -} diff --git a/tests/api/mod.rs b/tests/api/mod.rs new file mode 100644 index 000000000..9e2750122 --- /dev/null +++ b/tests/api/mod.rs @@ -0,0 +1,214 @@ +use core::panic; +use std::env; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use reqwest::Response; +use tokio::task::JoinHandle; +use torrust_tracker::api::resource; +use torrust_tracker::api::resource::auth_key::AuthKey; +use torrust_tracker::api::resource::stats::Stats; +use torrust_tracker::api::resource::torrent::{self, Torrent}; +use torrust_tracker::config::Configuration; +use torrust_tracker::jobs::tracker_api; +use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; +use torrust_tracker::tracker::peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; + +use crate::common::ephemeral_random_port; + +pub fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { + let torrent_peer = peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); + + (torrent_peer, torrent_peer_resource) +} + +pub fn tracker_configuration() -> Arc { + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; + + // Ephemeral socket address + let port = ephemeral_random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &port); + + // Ephemeral database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + Arc::new(config) +} + +#[derive(Clone)] +pub struct ConnectionInfo { + pub bind_address: String, + pub api_token: String, +} + +impl ConnectionInfo { + pub fn new(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: api_token.to_string(), + } + } +} + +pub struct Server { + pub started: AtomicBool, + pub job: Option>, + pub tracker: Option>, + pub connection_info: Option, +} + +impl Server { + pub fn new() -> Self { + Self { + started: AtomicBool::new(false), + job: None, + tracker: None, + connection_info: None, + } + } + + pub async fn new_running_instance() -> Self { + let configuration = tracker_configuration(); + Self::new_running_custom_instance(configuration.clone()).await + } + + async fn new_running_custom_instance(configuration: Arc) -> Self { + let mut api_server = Self::new(); + api_server.start(configuration).await; + api_server + } + + pub async fn start(&mut self, configuration: Arc) { + if !self.started.load(Ordering::Relaxed) { + self.connection_info = Some(ConnectionInfo::new( + &configuration.http_api.bind_address.clone(), + &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), + )); + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + self.tracker = Some(tracker.clone()); + + // Initialize logging + logging::setup(&configuration); + + // Start the HTTP API job + self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); + + self.started.store(true, Ordering::Relaxed); + } + } + + pub fn get_connection_info(&self) -> Option { + self.connection_info.clone() + } +} + +pub struct Client { + connection_info: ConnectionInfo, +} + +impl Client { + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { connection_info } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { + let url = format!( + "http://{}/api/key/{}?token={}", + &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token + ); + reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap() + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { + let url = format!( + "http://{}/api/whitelist/{}?token={}", + &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token + ); + reqwest::Client::new().post(url.clone()).send().await.unwrap() + } + + pub async fn get_torrent(&self, info_hash: &str) -> Torrent { + let url = format!( + "http://{}/api/torrent/{}?token={}", + &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap() + } + + pub async fn get_torrents(&self) -> Vec { + let url = format!( + "http://{}/api/torrents?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap() + } + + pub async fn get_tracker_statistics(&self) -> Stats { + let url = format!( + "http://{}/api/stats?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap() + } +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs new file mode 100644 index 000000000..070126d0a --- /dev/null +++ b/tests/tracker_api.rs @@ -0,0 +1,246 @@ +/// Integration tests for the tracker API +/// +/// ```text +/// cargo test tracker_api -- --nocapture +/// ``` +extern crate rand; + +mod api; +mod common; + +mod tracker_api { + + /* + + Endpoints: + + Stats: + GET /api/stats + + Torrents: + GET /api/torrents?offset=:u32&limit=:u32 + GET /api/torrent/:info_hash + + Whitelisted torrents: + POST /api/whitelist/:info_hash + DELETE /api/whitelist/:info_hash + + Whitelist command: + GET /api/whitelist/reload + + Keys: + POST /api/key/:seconds_valid + GET /api/keys/reload + DELETE /api/key/:key + + */ + + mod for_stats_resources { + use std::str::FromStr; + + use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::{sample_torrent_peer, Client, Server}; + + #[tokio::test] + async fn should_allow_getting_tracker_statistics() { + let api_server = Server::new_running_instance().await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let (peer, _peer_resource) = sample_torrent_peer(); + + let api_connection_info = api_server.get_connection_info().unwrap(); + + // Add a torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let stats_resource = Client::new(api_connection_info).get_tracker_statistics().await; + + assert_eq!( + stats_resource, + Stats { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + } + ); + } + } + + mod for_torrent_resources { + #[tokio::test] + async fn should_allow_getting_torrents() { + let api_server = Server::new_running_instance().await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let (peer, _peer_resource) = sample_torrent_peer(); + + let api_connection_info = api_server.get_connection_info().unwrap(); + + // Add a torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let torrent_resources = Client::new(api_connection_info).get_torrents().await; + + assert_eq!( + torrent_resources, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include peer list + }] + ); + } + + #[tokio::test] + async fn should_allow_getting_a_torrent_info() { + let api_server = Server::new_running_instance().await; + let api_connection_info = api_server.get_connection_info().unwrap(); + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let (peer, peer_resource) = sample_torrent_peer(); + + // Add a torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let torrent_resource = Client::new(api_connection_info).get_torrent(&info_hash.to_string()).await; + + assert_eq!( + torrent_resource, + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![peer_resource]) + } + ); + } + + use std::str::FromStr; + + use torrust_tracker::api::resource::torrent::{self, Torrent}; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::{sample_torrent_peer, Client, Server}; + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent() { + let api_server = Server::new_running_instance().await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let res = Client::new(api_server.get_connection_info().unwrap()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_eq!(res.status(), 200); + assert!( + api_server + .tracker + .unwrap() + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + } + } + + mod for_whitelisted_torrent_resources { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::{Client, Server}; + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent() { + let api_server = Server::new_running_instance().await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let res = Client::new(api_server.get_connection_info().unwrap()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_eq!(res.status(), 200); + assert!( + api_server + .tracker + .unwrap() + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + } + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + let api_server = Server::new_running_instance().await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let api_client = Client::new(api_server.get_connection_info().unwrap()); + + let res = api_client.whitelist_a_torrent(&info_hash).await; + assert_eq!(res.status(), 200); + + let res = api_client.whitelist_a_torrent(&info_hash).await; + assert_eq!(res.status(), 200); + } + } + + mod for_key_resources { + use torrust_tracker::tracker::auth; + + use crate::api::{Client, Server}; + + #[tokio::test] + async fn should_allow_generating_a_new_auth_key() { + let api_server = Server::new_running_instance().await; + + let seconds_valid = 60; + + let auth_key = Client::new(api_server.get_connection_info().unwrap()) + .generate_auth_key(seconds_valid) + .await; + + // Verify the key with the tracker + assert!(api_server + .tracker + .unwrap() + .verify_auth_key(&auth::Key::from(auth_key)) + .await + .is_ok()); + } + } +} From 68d521e3615b7952aa42a0a122944cbadba93048 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 17:29:35 +0000 Subject: [PATCH 0273/1003] refactor: [#142] simplify test api server --- tests/api/mod.rs | 99 ++++++++++++++++++++------------------------ tests/tracker_api.rs | 66 ++++++++++------------------- 2 files changed, 67 insertions(+), 98 deletions(-) diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 9e2750122..14365af9c 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -1,12 +1,10 @@ use core::panic; use std::env; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; -use tokio::task::JoinHandle; use torrust_tracker::api::resource; use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::api::resource::stats::Stats; @@ -14,7 +12,8 @@ use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; -use torrust_tracker::tracker::peer; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::{self, Peer}; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -68,71 +67,63 @@ impl ConnectionInfo { } } -pub struct Server { - pub started: AtomicBool, - pub job: Option>, - pub tracker: Option>, - pub connection_info: Option, +pub async fn start_default_api_server() -> Server { + let configuration = tracker_configuration(); + start_custom_api_server(configuration.clone()).await } -impl Server { - pub fn new() -> Self { - Self { - started: AtomicBool::new(false), - job: None, - tracker: None, - connection_info: None, - } - } +pub async fn start_custom_api_server(configuration: Arc) -> Server { + start(configuration).await +} - pub async fn new_running_instance() -> Self { - let configuration = tracker_configuration(); - Self::new_running_custom_instance(configuration.clone()).await - } +async fn start(configuration: Arc) -> Server { + let connection_info = ConnectionInfo::new( + &configuration.http_api.bind_address.clone(), + &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), + ); - async fn new_running_custom_instance(configuration: Arc) -> Self { - let mut api_server = Self::new(); - api_server.start(configuration).await; - api_server - } + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); - pub async fn start(&mut self, configuration: Arc) { - if !self.started.load(Ordering::Relaxed) { - self.connection_info = Some(ConnectionInfo::new( - &configuration.http_api.bind_address.clone(), - &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), - )); + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + // Initialize logging + logging::setup(&configuration); - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - self.tracker = Some(tracker.clone()); + // Start the HTTP API job + tracker_api::start_job(&configuration.http_api, tracker.clone()).await; - // Initialize logging - logging::setup(&configuration); + Server { + tracker, + connection_info, + } +} - // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); +pub struct Server { + pub tracker: Arc, + pub connection_info: ConnectionInfo, +} - self.started.store(true, Ordering::Relaxed); - } +impl Server { + pub fn get_connection_info(&self) -> ConnectionInfo { + self.connection_info.clone() } - pub fn get_connection_info(&self) -> Option { - self.connection_info.clone() + /// Add a torrent to the tracker + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 070126d0a..3a835204f 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -41,24 +41,19 @@ mod tracker_api { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{sample_torrent_peer, Client, Server}; + use crate::api::{sample_torrent_peer, start_default_api_server, Client}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let (peer, _peer_resource) = sample_torrent_peer(); - let api_connection_info = api_server.get_connection_info().unwrap(); + let api_connection_info = api_server.get_connection_info(); - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; + api_server.add_torrent(&info_hash, &peer).await; let stats_resource = Client::new(api_connection_info).get_tracker_statistics().await; @@ -89,20 +84,15 @@ mod tracker_api { mod for_torrent_resources { #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let (peer, _peer_resource) = sample_torrent_peer(); - let api_connection_info = api_server.get_connection_info().unwrap(); + let api_connection_info = api_server.get_connection_info(); - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; + api_server.add_torrent(&info_hash, &peer).await; let torrent_resources = Client::new(api_connection_info).get_torrents().await; @@ -120,19 +110,14 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = Server::new_running_instance().await; - let api_connection_info = api_server.get_connection_info().unwrap(); + let api_server = start_default_api_server().await; + let api_connection_info = api_server.get_connection_info(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let (peer, peer_resource) = sample_torrent_peer(); - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; + api_server.add_torrent(&info_hash, &peer).await; let torrent_resource = Client::new(api_connection_info).get_torrent(&info_hash.to_string()).await; @@ -153,15 +138,15 @@ mod tracker_api { use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{sample_torrent_peer, Client, Server}; + use crate::api::{sample_torrent_peer, start_default_api_server, Client}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info().unwrap()) + let res = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -169,7 +154,6 @@ mod tracker_api { assert!( api_server .tracker - .unwrap() .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); @@ -181,15 +165,15 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{Client, Server}; + use crate::api::{start_default_api_server, Client}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info().unwrap()) + let res = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -197,7 +181,6 @@ mod tracker_api { assert!( api_server .tracker - .unwrap() .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); @@ -205,11 +188,11 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info().unwrap()); + let api_client = Client::new(api_server.get_connection_info()); let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); @@ -222,25 +205,20 @@ mod tracker_api { mod for_key_resources { use torrust_tracker::tracker::auth; - use crate::api::{Client, Server}; + use crate::api::{start_default_api_server, Client}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let seconds_valid = 60; - let auth_key = Client::new(api_server.get_connection_info().unwrap()) + let auth_key = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; // Verify the key with the tracker - assert!(api_server - .tracker - .unwrap() - .verify_auth_key(&auth::Key::from(auth_key)) - .await - .is_ok()); + assert!(api_server.tracker.verify_auth_key(&auth::Key::from(auth_key)).await.is_ok()); } } } From 3422e93a729bd30b89dbc7dacf3cd993463fd1bf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 17:48:56 +0000 Subject: [PATCH 0274/1003] refactor: [#142] clean api tests --- tests/api/mod.rs | 10 +++------- tests/tracker_api.rs | 39 ++++++++++++++++++--------------------- 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 14365af9c..78c5d9d96 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -5,7 +5,6 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; -use torrust_tracker::api::resource; use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::api::resource::torrent::{self, Torrent}; @@ -19,8 +18,8 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; -pub fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { - let torrent_peer = peer::Peer { +pub fn sample_peer() -> peer::Peer { + peer::Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), @@ -28,10 +27,7 @@ pub fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { downloaded: NumberOfBytes(0), left: NumberOfBytes(0), event: AnnounceEvent::Started, - }; - let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); - - (torrent_peer, torrent_peer_resource) + } } pub fn tracker_configuration() -> Arc { diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 3a835204f..757494691 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -41,21 +41,20 @@ mod tracker_api { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{sample_torrent_peer, start_default_api_server, Client}; + use crate::api::{sample_peer, start_default_api_server, Client}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { let api_server = start_default_api_server().await; - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let (peer, _peer_resource) = sample_torrent_peer(); - - let api_connection_info = api_server.get_connection_info(); - - api_server.add_torrent(&info_hash, &peer).await; + api_server + .add_torrent( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &sample_peer(), + ) + .await; - let stats_resource = Client::new(api_connection_info).get_tracker_statistics().await; + let stats_resource = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; assert_eq!( stats_resource, @@ -88,13 +87,9 @@ mod tracker_api { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let (peer, _peer_resource) = sample_torrent_peer(); - - let api_connection_info = api_server.get_connection_info(); + api_server.add_torrent(&info_hash, &sample_peer()).await; - api_server.add_torrent(&info_hash, &peer).await; - - let torrent_resources = Client::new(api_connection_info).get_torrents().await; + let torrent_resources = Client::new(api_server.get_connection_info()).get_torrents().await; assert_eq!( torrent_resources, @@ -103,7 +98,7 @@ mod tracker_api { seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include peer list + peers: None // Torrent list does not include the peer list for each torrent }] ); } @@ -111,15 +106,16 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_a_torrent_info() { let api_server = start_default_api_server().await; - let api_connection_info = api_server.get_connection_info(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let (peer, peer_resource) = sample_torrent_peer(); + let peer = sample_peer(); api_server.add_torrent(&info_hash, &peer).await; - let torrent_resource = Client::new(api_connection_info).get_torrent(&info_hash.to_string()).await; + let torrent_resource = Client::new(api_server.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; assert_eq!( torrent_resource, @@ -128,17 +124,18 @@ mod tracker_api { seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![peer_resource]) + peers: Some(vec![resource::peer::Peer::from(peer)]) } ); } use std::str::FromStr; + use torrust_tracker::api::resource; use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{sample_torrent_peer, start_default_api_server, Client}; + use crate::api::{sample_peer, start_default_api_server, Client}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { From 07364f43e23b62d68366b72dbee3fbb2897da5f5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 18:14:02 +0000 Subject: [PATCH 0275/1003] refactor: [#142] extract functions in test api Client --- tests/api/mod.rs | 62 ++++++++++++++++-------------------------------- 1 file changed, 20 insertions(+), 42 deletions(-) diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 78c5d9d96..1a2061d04 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -133,69 +133,47 @@ impl Client { } pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { - let url = format!( - "http://{}/api/key/{}?token={}", - &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token - ); - reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap() + self.post(&format!("key/{}", &seconds_valid)).await.json().await.unwrap() } pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - let url = format!( - "http://{}/api/whitelist/{}?token={}", - &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token - ); - reqwest::Client::new().post(url.clone()).send().await.unwrap() + self.post(&format!("whitelist/{}", &info_hash)).await } pub async fn get_torrent(&self, info_hash: &str) -> Torrent { - let url = format!( - "http://{}/api/torrent/{}?token={}", - &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() + self.get(&format!("torrent/{}", &info_hash)) .await - .unwrap() .json::() .await .unwrap() } pub async fn get_torrents(&self) -> Vec { - let url = format!( - "http://{}/api/torrents?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::>() - .await - .unwrap() + self.get("torrents").await.json::>().await.unwrap() } pub async fn get_tracker_statistics(&self) -> Stats { - let url = format!( - "http://{}/api/stats?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ); + self.get("stats").await.json::().await.unwrap() + } + + async fn get(&self, path: &str) -> Response { reqwest::Client::builder() .build() .unwrap() - .get(url) + .get(self.url(path)) .send() .await .unwrap() - .json::() - .await - .unwrap() + } + + async fn post(&self, path: &str) -> Response { + reqwest::Client::new().post(self.url(path).clone()).send().await.unwrap() + } + + fn url(&self, path: &str) -> String { + format!( + "http://{}/api/{path}?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ) } } From 11259e8a33dbfa0f3d141058c84f13bbe8547f7e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 30 Dec 2022 17:25:00 +0000 Subject: [PATCH 0276/1003] test(api): [#142] improved api test coverage --- src/tracker/auth.rs | 4 +- tests/api/mod.rs | 158 +++++++++++++++--- tests/tracker_api.rs | 385 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 491 insertions(+), 56 deletions(-) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 02450dc82..406ef7033 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -4,7 +4,7 @@ use derive_more::{Display, Error}; use log::debug; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; @@ -48,7 +48,7 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { } } -#[derive(Serialize, Debug, Eq, PartialEq, Clone)] +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct Key { pub key: String, pub valid_until: Option, diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 1a2061d04..1528888bf 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -5,9 +5,6 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; -use torrust_tracker::api::resource::auth_key::AuthKey; -use torrust_tracker::api::resource::stats::Stats; -use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; @@ -51,14 +48,21 @@ pub fn tracker_configuration() -> Arc { #[derive(Clone)] pub struct ConnectionInfo { pub bind_address: String, - pub api_token: String, + pub api_token: Option, } impl ConnectionInfo { - pub fn new(bind_address: &str, api_token: &str) -> Self { + pub fn authenticated(bind_address: &str, api_token: &str) -> Self { Self { bind_address: bind_address.to_string(), - api_token: api_token.to_string(), + api_token: Some(api_token.to_string()), + } + } + + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: None, } } } @@ -73,7 +77,7 @@ pub async fn start_custom_api_server(configuration: Arc) -> Serve } async fn start(configuration: Arc) -> Server { - let connection_info = ConnectionInfo::new( + let connection_info = ConnectionInfo::authenticated( &configuration.http_api.bind_address.clone(), &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), ); @@ -117,6 +121,10 @@ impl Server { self.connection_info.clone() } + pub fn get_bind_address(&self) -> String { + self.connection_info.bind_address.clone() + } + /// Add a torrent to the tracker pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -127,53 +135,149 @@ pub struct Client { connection_info: ConnectionInfo, } +type ReqwestQuery = Vec; +type ReqwestQueryParam = (String, String); + +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } + + fn with_token(token: &str) -> Self { + Self { + params: vec![QueryParam::new("token", token)], + } + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} + impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info } } - pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { - self.post(&format!("key/{}", &seconds_valid)).await.json().await.unwrap() + pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { + self.post(&format!("key/{}", &seconds_valid)).await + } + + pub async fn delete_auth_key(&self, key: &str) -> Response { + self.delete(&format!("key/{}", &key)).await + } + + pub async fn reload_keys(&self) -> Response { + self.get("keys/reload", Query::default()).await } pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { self.post(&format!("whitelist/{}", &info_hash)).await } - pub async fn get_torrent(&self, info_hash: &str) -> Torrent { - self.get(&format!("torrent/{}", &info_hash)) - .await - .json::() - .await - .unwrap() + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { + self.delete(&format!("whitelist/{}", &info_hash)).await } - pub async fn get_torrents(&self) -> Vec { - self.get("torrents").await.json::>().await.unwrap() + pub async fn reload_whitelist(&self) -> Response { + self.get("whitelist/reload", Query::default()).await } - pub async fn get_tracker_statistics(&self) -> Stats { - self.get("stats").await.json::().await.unwrap() + pub async fn get_torrent(&self, info_hash: &str) -> Response { + self.get(&format!("torrent/{}", &info_hash), Query::default()).await } - async fn get(&self, path: &str) -> Response { + pub async fn get_torrents(&self, params: Query) -> Response { + self.get("torrents", params).await + } + + pub async fn get_tracker_statistics(&self) -> Response { + self.get("stats", Query::default()).await + } + + async fn get(&self, path: &str, params: Query) -> Response { + let mut query: Query = params; + + if let Some(token) = &self.connection_info.api_token { + query.add_param(QueryParam::new("token", token)); + }; + reqwest::Client::builder() .build() .unwrap() - .get(self.url(path)) + .get(self.base_url(path)) + .query(&ReqwestQuery::from(query)) .send() .await .unwrap() } async fn post(&self, path: &str) -> Response { - reqwest::Client::new().post(self.url(path).clone()).send().await.unwrap() + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + async fn delete(&self, path: &str) -> Response { + reqwest::Client::new() + .delete(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() } - fn url(&self, path: &str) -> String { - format!( - "http://{}/api/{path}?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ) + fn base_url(&self, path: &str) -> String { + format!("http://{}/api/{path}", &self.connection_info.bind_address) + } + + fn query_with_token(&self) -> Query { + match &self.connection_info.api_token { + Some(token) => Query::with_token(token), + None => Query::default(), + } } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 757494691..d02f29374 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -35,13 +35,43 @@ mod tracker_api { */ + use reqwest::Response; + + use crate::api::ConnectionInfo; + + async fn assert_token_not_valid(response: Response) { + assert_eq!(response.status(), 500); + assert_eq!( + response.text().await.unwrap(), + "Unhandled rejection: Err { reason: \"token not valid\" }" + ); + } + + async fn assert_unauthorized(response: Response) { + assert_eq!(response.status(), 500); + assert_eq!( + response.text().await.unwrap(), + "Unhandled rejection: Err { reason: \"unauthorized\" }" + ); + } + + fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::authenticated(bind_address, "invalid token") + } + + fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::anonymous(bind_address) + } + mod for_stats_resources { use std::str::FromStr; use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; + use super::{connection_with_invalid_token, connection_with_no_token}; use crate::api::{sample_peer, start_default_api_server, Client}; + use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { @@ -54,10 +84,11 @@ mod tracker_api { ) .await; - let stats_resource = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; + let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; + assert_eq!(response.status(), 200); assert_eq!( - stats_resource, + response.json::().await.unwrap(), Stats { torrents: 1, seeders: 1, @@ -78,9 +109,36 @@ mod tracker_api { } ); } + + #[tokio::test] + async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .get_tracker_statistics() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .get_tracker_statistics() + .await; + + assert_unauthorized(response).await; + } } mod for_torrent_resources { + use std::str::FromStr; + + use torrust_tracker::api::resource; + use torrust_tracker::api::resource::torrent::{self, Torrent}; + use torrust_tracker::protocol::info_hash::InfoHash; + + use super::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::{sample_peer, start_default_api_server, Client, Query, QueryParam}; + use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; + #[tokio::test] async fn should_allow_getting_torrents() { let api_server = start_default_api_server().await; @@ -89,10 +147,13 @@ mod tracker_api { api_server.add_torrent(&info_hash, &sample_peer()).await; - let torrent_resources = Client::new(api_server.get_connection_info()).get_torrents().await; + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::empty()) + .await; + assert_eq!(response.status(), 200); assert_eq!( - torrent_resources, + response.json::>().await.unwrap(), vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, @@ -103,6 +164,79 @@ mod tracker_api { ); } + #[tokio::test] + async fn should_allow_limiting_the_torrents_in_the_result() { + let api_server = start_default_api_server().await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + api_server.add_torrent(&info_hash_1, &sample_peer()).await; + api_server.add_torrent(&info_hash_2, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_allow_the_torrents_result_pagination() { + let api_server = start_default_api_server().await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + api_server.add_torrent(&info_hash_1, &sample_peer()).await; + api_server.add_torrent(&info_hash_2, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .get_torrents(Query::empty()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .get_torrents(Query::default()) + .await; + + assert_unauthorized(response).await; + } + #[tokio::test] async fn should_allow_getting_a_torrent_info() { let api_server = start_default_api_server().await; @@ -113,12 +247,13 @@ mod tracker_api { api_server.add_torrent(&info_hash, &peer).await; - let torrent_resource = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; + assert_eq!(response.status(), 200); assert_eq!( - torrent_resource, + response.json::().await.unwrap(), Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, @@ -129,31 +264,25 @@ mod tracker_api { ); } - use std::str::FromStr; + #[tokio::test] + async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + let api_server = start_default_api_server().await; - use torrust_tracker::api::resource; - use torrust_tracker::api::resource::torrent::{self, Torrent}; - use torrust_tracker::protocol::info_hash::InfoHash; + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - use crate::api::{sample_peer, start_default_api_server, Client}; + api_server.add_torrent(&info_hash, &sample_peer()).await; - #[tokio::test] - async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api_server().await; + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .get_torrent(&info_hash.to_string()) + .await; - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + assert_token_not_valid(response).await; - let res = Client::new(api_server.get_connection_info()) - .whitelist_a_torrent(&info_hash) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .get_torrent(&info_hash.to_string()) .await; - assert_eq!(res.status(), 200); - assert!( - api_server - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await - ); + assert_unauthorized(response).await; } } @@ -162,7 +291,9 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; + use super::{assert_token_not_valid, connection_with_invalid_token, connection_with_no_token}; use crate::api::{start_default_api_server, Client}; + use crate::tracker_api::assert_unauthorized; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { @@ -197,12 +328,97 @@ mod tracker_api { let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); } + + #[tokio::test] + async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_allow_removing_a_torrent_from_the_whitelist() { + let api_server = start_default_api_server().await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_eq!(response.status(), 200); + assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_token_not_valid(response).await; + + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_allow_reload_the_whitelist_from_the_database() { + let api_server = start_default_api_server().await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; + + assert_eq!(response.status(), 200); + /* This assert fails because the whitelist has not been reloaded yet. + We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent + is whitelisted and use that endpoint to check if the torrent is still there after reloading. + assert!( + !(api_server + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await) + ); + */ + } } mod for_key_resources { - use torrust_tracker::tracker::auth; + use std::time::Duration; + use torrust_tracker::api::resource::auth_key::AuthKey; + use torrust_tracker::tracker::auth::Key; + + use super::{connection_with_invalid_token, connection_with_no_token}; use crate::api::{start_default_api_server, Client}; + use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { @@ -210,12 +426,127 @@ mod tracker_api { let seconds_valid = 60; - let auth_key = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; // Verify the key with the tracker - assert!(api_server.tracker.verify_auth_key(&auth::Key::from(auth_key)).await.is_ok()); + assert!(api_server + .tracker + .verify_auth_key(&Key::from(response.json::().await.unwrap())) + .await + .is_ok()); + } + + #[tokio::test] + async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_allow_deleting_an_auth_key() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(api_server.get_connection_info()) + .delete_auth_key(&auth_key.key) + .await; + + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); + } + + #[tokio::test] + async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + + // Generate new auth key + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .delete_auth_key(&auth_key.key) + .await; + + assert_token_not_valid(response).await; + + // Generate new auth key + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .delete_auth_key(&auth_key.key) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_allow_reloading_keys() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(api_server.get_connection_info()).reload_keys().await; + + assert_eq!(response.status(), 200); + } + + #[tokio::test] + async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .reload_keys() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .reload_keys() + .await; + + assert_unauthorized(response).await; } } } From 901bc342721d137c9ca2b571a7e5632c33142b4b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jan 2023 16:36:38 +0000 Subject: [PATCH 0277/1003] feat: [#143] add axum dependency We are going to reimplement the API with Axum. --- Cargo.lock | 125 +++++++++++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 126 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index e3a6d9c09..8e40508dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -96,6 +96,56 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "base-x" version = "0.2.11" @@ -930,6 +980,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.8.0" @@ -1214,6 +1270,12 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + [[package]] name = "memchr" version = "2.5.0" @@ -2028,6 +2090,12 @@ dependencies = [ "base64", ] +[[package]] +name = "rustversion" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" + [[package]] name = "ryu" version = "1.0.11" @@ -2187,6 +2255,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2420,6 +2497,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "tap" version = "1.0.1" @@ -2680,6 +2763,7 @@ version = "2.3.0" dependencies = [ "aquatic_udp_protocol", "async-trait", + "axum", "binascii", "chrono", "config", @@ -2708,6 +2792,47 @@ dependencies = [ "warp", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.2" diff --git a/Cargo.toml b/Cargo.toml index 6e835bcb5..8ddefe78e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,6 +58,7 @@ async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } +axum = "0.6.1" [dev-dependencies] mockall = "0.11" From cbf88377950d0208cf3f4d8641978cbd1a1a823f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jan 2023 16:38:09 +0000 Subject: [PATCH 0278/1003] feat(api): [#143] scaffolding for new API using Axum - Test scaffolding - Dummy entrypoint --- src/apis/mod.rs | 2 + src/apis/routes.rs | 7 ++ src/apis/server.rs | 35 +++++++++ src/config.rs | 2 +- src/jobs/mod.rs | 1 + src/jobs/tracker_apis.rs | 54 +++++++++++++ src/lib.rs | 1 + src/setup.rs | 19 ++++- tests/api/mod.rs | 61 ++++++++++---- tests/tracker_api.rs | 166 +++++++++++++++++++++++++++------------ 10 files changed, 279 insertions(+), 69 deletions(-) create mode 100644 src/apis/mod.rs create mode 100644 src/apis/routes.rs create mode 100644 src/apis/server.rs create mode 100644 src/jobs/tracker_apis.rs diff --git a/src/apis/mod.rs b/src/apis/mod.rs new file mode 100644 index 000000000..c2ee0fc38 --- /dev/null +++ b/src/apis/mod.rs @@ -0,0 +1,2 @@ +pub mod server; +pub mod routes; diff --git a/src/apis/routes.rs b/src/apis/routes.rs new file mode 100644 index 000000000..2db23c35f --- /dev/null +++ b/src/apis/routes.rs @@ -0,0 +1,7 @@ +use axum::response::Json; +use serde_json::{json, Value}; + +#[allow(clippy::unused_async)] +pub async fn root() -> Json { + Json(json!({ "data": 42 })) +} diff --git a/src/apis/server.rs b/src/apis/server.rs new file mode 100644 index 000000000..3bef75367 --- /dev/null +++ b/src/apis/server.rs @@ -0,0 +1,35 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; +use futures::Future; +use warp::hyper; + +use super::routes::root; +use crate::tracker; + +pub fn start(socket_addr: SocketAddr, _tracker: &Arc) -> impl Future> { + let app = Router::new().route("/", get(root)); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }) +} + +pub fn start_tls( + socket_addr: SocketAddr, + _ssl_cert_path: &str, + _ssl_key_path: &str, + _tracker: &Arc, +) -> impl Future> { + let app = Router::new().route("/", get(root)); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }) +} diff --git a/src/config.rs b/src/config.rs index 48e28b358..820af77d8 100644 --- a/src/config.rs +++ b/src/config.rs @@ -31,7 +31,7 @@ pub struct HttpTracker { } #[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpApi { pub enabled: bool, pub bind_address: String, diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index 8b8f0662b..a06e7d53c 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -2,3 +2,4 @@ pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_api; pub mod udp_tracker; +pub mod tracker_apis; diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs new file mode 100644 index 000000000..b696c923d --- /dev/null +++ b/src/jobs/tracker_apis.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; + +use log::info; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; + +use crate::apis::server; +use crate::config::HttpApi; +use crate::tracker; + +#[derive(Debug)] +pub struct ApiServerJobStarted(); + +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. +pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + if !ssl_enabled { + info!("Starting Torrust APIs server on: http://{}", bind_addr); + let handle = server::start(bind_addr, &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + if let Ok(()) = handle.await { + info!("Stopping Torrust APIs server on {} ...", bind_addr); + } + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust APIs server on: https://{}", bind_addr); + let handle = server::start_tls(bind_addr, &ssl_cert_path.unwrap(), &ssl_key_path.unwrap(), &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + if let Ok(()) = handle.await { + info!("Stopping Torrust APIs server on {} ...", bind_addr); + } + } + }); + + // Wait until the APIs server job is running + match rx.await { + Ok(_msg) => info!("Torrust APIs server started"), + Err(e) => panic!("the apis server was dropped: {e}"), + } + + join_handle +} diff --git a/src/lib.rs b/src/lib.rs index 7e4fe13a7..6edb96dfd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ pub mod setup; pub mod stats; pub mod tracker; pub mod udp; +pub mod apis; #[macro_use] extern crate lazy_static; diff --git a/src/setup.rs b/src/setup.rs index c045310bb..84a1d1c3c 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -1,12 +1,16 @@ +use std::net::SocketAddr; use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; use crate::config::Configuration; -use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; +use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, tracker_apis, udp_tracker}; use crate::tracker; +/// # Panics +/// +/// Will panic if the socket address for API can't be parsed. pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); @@ -52,6 +56,19 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve jobs.push(tracker_api::start_job(&config.http_api, tracker.clone()).await); } + // Start HTTP APIs server (multiple API versions) + if config.http_api.enabled { + // Temporarily running the new API in the 1313 port + let bind_address = config.http_api.bind_address.clone(); + let mut bind_socket: SocketAddr = bind_address.parse().unwrap(); + bind_socket.set_port(1313); + + let mut http_apis_config = config.http_api.clone(); + http_apis_config.bind_address = bind_socket.to_string(); + + jobs.push(tracker_apis::start_job(&http_apis_config, tracker.clone()).await); + } + // Remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { jobs.push(torrent_cleanup::start_job(config, &tracker)); diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 1528888bf..49fde7a81 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use torrust_tracker::config::Configuration; -use torrust_tracker::jobs::tracker_api; +use torrust_tracker::jobs::{tracker_api, tracker_apis}; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::{self, Peer}; @@ -67,16 +67,38 @@ impl ConnectionInfo { } } -pub async fn start_default_api_server() -> Server { +pub async fn start_default_api_server(version: &Version) -> Server { let configuration = tracker_configuration(); - start_custom_api_server(configuration.clone()).await + start_custom_api_server(configuration.clone(), version).await } -pub async fn start_custom_api_server(configuration: Arc) -> Server { - start(configuration).await +pub async fn start_custom_api_server(configuration: Arc, version: &Version) -> Server { + match &version { + Version::Warp => start_warp_api(configuration).await, + Version::Axum => start_axum_api(configuration).await, + } +} + +async fn start_warp_api(configuration: Arc) -> Server { + let server = start(&configuration); + + // Start the HTTP API job + tracker_api::start_job(&configuration.http_api, server.tracker.clone()).await; + + server +} + +async fn start_axum_api(configuration: Arc) -> Server { + let server = start(&configuration); + + // Start HTTP APIs server (multiple API versions) + // Temporarily run the new API on a port number after the current API port + tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; + + server } -async fn start(configuration: Arc) -> Server { +fn start(configuration: &Arc) -> Server { let connection_info = ConnectionInfo::authenticated( &configuration.http_api.bind_address.clone(), &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), @@ -92,7 +114,7 @@ async fn start(configuration: Arc) -> Server { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { + let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) @@ -100,10 +122,7 @@ async fn start(configuration: Arc) -> Server { }; // Initialize logging - logging::setup(&configuration); - - // Start the HTTP API job - tracker_api::start_job(&configuration.http_api, tracker.clone()).await; + logging::setup(configuration); Server { tracker, @@ -133,6 +152,7 @@ impl Server { pub struct Client { connection_info: ConnectionInfo, + base_path: String, } type ReqwestQuery = Vec; @@ -194,9 +214,20 @@ impl From for ReqwestQueryParam { } } +pub enum Version { + Warp, + Axum, +} + impl Client { - pub fn new(connection_info: ConnectionInfo) -> Self { - Self { connection_info } + pub fn new(connection_info: ConnectionInfo, version: &Version) -> Self { + Self { + connection_info, + base_path: match version { + Version::Warp => "/api/".to_string(), + Version::Axum => String::new(), + }, + } } pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { @@ -235,7 +266,7 @@ impl Client { self.get("stats", Query::default()).await } - async fn get(&self, path: &str, params: Query) -> Response { + pub async fn get(&self, path: &str, params: Query) -> Response { let mut query: Query = params; if let Some(token) = &self.connection_info.api_token { @@ -271,7 +302,7 @@ impl Client { } fn base_url(&self, path: &str) -> String { - format!("http://{}/api/{path}", &self.connection_info.bind_address) + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) } fn query_with_token(&self) -> Query { diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index d02f29374..68a295ac3 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -3,6 +3,14 @@ /// ```text /// cargo test tracker_api -- --nocapture /// ``` +/// +/// WIP. We are implementing a new API replacing Warp with Axum. +/// The new API runs in parallel until we finish all endpoints. +/// You can test the new API with: +/// +/// ```text +/// cargo test tracker_apis -- --nocapture +/// ``` extern crate rand; mod api; @@ -70,12 +78,12 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{sample_peer, start_default_api_server, Client}; + use crate::api::{sample_peer, start_default_api_server, Client, Version}; use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; api_server .add_torrent( @@ -84,7 +92,9 @@ mod tracker_api { ) .await; - let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .get_tracker_statistics() + .await; assert_eq!(response.status(), 200); assert_eq!( @@ -112,15 +122,15 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_tracker_statistics() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .get_tracker_statistics() .await; @@ -136,18 +146,18 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{sample_peer, start_default_api_server, Client, Query, QueryParam}; + use crate::api::{sample_peer, start_default_api_server, Client, Query, QueryParam, Version}; use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .get_torrents(Query::empty()) .await; @@ -166,7 +176,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -175,7 +185,7 @@ mod tracker_api { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -194,7 +204,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -203,7 +213,7 @@ mod tracker_api { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -222,15 +232,15 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_torrents(Query::empty()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .get_torrents(Query::default()) .await; @@ -239,7 +249,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -247,7 +257,7 @@ mod tracker_api { api_server.add_torrent(&info_hash, &peer).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .get_torrent(&info_hash.to_string()) .await; @@ -266,19 +276,19 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_torrent(&info_hash.to_string()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .get_torrent(&info_hash.to_string()) .await; @@ -292,16 +302,16 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; use super::{assert_token_not_valid, connection_with_invalid_token, connection_with_no_token}; - use crate::api::{start_default_api_server, Client}; + use crate::api::{start_default_api_server, Client, Version}; use crate::tracker_api::assert_unauthorized; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info()) + let res = Client::new(api_server.get_connection_info(), &Version::Warp) .whitelist_a_torrent(&info_hash) .await; @@ -316,11 +326,11 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info()); + let api_client = Client::new(api_server.get_connection_info(), &Version::Warp); let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); @@ -331,17 +341,17 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .whitelist_a_torrent(&info_hash) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .whitelist_a_torrent(&info_hash) .await; @@ -350,13 +360,13 @@ mod tracker_api { #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; @@ -366,20 +376,20 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; assert_token_not_valid(response).await; api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; @@ -388,13 +398,15 @@ mod tracker_api { #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .reload_whitelist() + .await; assert_eq!(response.status(), 200); /* This assert fails because the whitelist has not been reloaded yet. @@ -417,16 +429,16 @@ mod tracker_api { use torrust_tracker::tracker::auth::Key; use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{start_default_api_server, Client}; + use crate::api::{start_default_api_server, Client, Version}; use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .generate_auth_key(seconds_valid) .await; @@ -440,17 +452,17 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .generate_auth_key(seconds_valid) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .generate_auth_key(seconds_valid) .await; @@ -459,7 +471,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; let auth_key = api_server @@ -468,7 +480,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .delete_auth_key(&auth_key.key) .await; @@ -478,7 +490,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; @@ -489,7 +501,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .delete_auth_key(&auth_key.key) .await; @@ -502,7 +514,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .delete_auth_key(&auth_key.key) .await; @@ -511,7 +523,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_reloading_keys() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; api_server @@ -520,14 +532,16 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(api_server.get_connection_info()).reload_keys().await; + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .reload_keys() + .await; assert_eq!(response.status(), 200); } #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; api_server @@ -536,13 +550,13 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .reload_keys() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .reload_keys() .await; @@ -550,3 +564,51 @@ mod tracker_api { } } } + +mod tracker_apis { + + /* + + Endpoints: + + Root: + - [x] GET / + + Stats: + - [ ] GET /api/stats + + Torrents: + - [ ] GET /api/torrents?offset=:u32&limit=:u32 + - [ ] GET /api/torrent/:info_hash + + Whitelisted torrents: + - [ ] POST /api/whitelist/:info_hash + - [ ] DELETE /api/whitelist/:info_hash + + Whitelist commands: + - [ ] GET /api/whitelist/reload + + Keys: + - [ ] POST /api/key/:seconds_valid + - [ ] DELETE /api/key/:key + + Key commands + - [ ] GET /api/keys/reload + + */ + + mod for_entrypoint { + use crate::api::{start_default_api_server, Client, Query, Version}; + + #[tokio::test] + async fn test_entrypoint() { + let api_server = start_default_api_server(&Version::Axum).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get("/", Query::default()) + .await; + + assert_eq!(response.status(), 200); + } + } +} From 5ee3f93fb515cc26a43a5cab45d84a2011c0cbb7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jan 2023 17:30:23 +0000 Subject: [PATCH 0279/1003] refactor(api): [#143] extract mods for API testing Code for API testing have been reorganized. --- src/apis/mod.rs | 2 +- src/apis/server.rs | 2 + src/jobs/mod.rs | 2 +- src/lib.rs | 2 +- tests/api/asserts.rs | 17 ++ tests/api/client.rs | 162 ++++++++++++++++++ tests/api/connection_info.rs | 29 ++++ tests/api/fixtures.rs | 17 ++ tests/api/mod.rs | 314 +---------------------------------- tests/api/server.rs | 115 +++++++++++++ tests/tracker_api.rs | 109 ++++++------ 11 files changed, 397 insertions(+), 374 deletions(-) create mode 100644 tests/api/asserts.rs create mode 100644 tests/api/client.rs create mode 100644 tests/api/connection_info.rs create mode 100644 tests/api/fixtures.rs create mode 100644 tests/api/server.rs diff --git a/src/apis/mod.rs b/src/apis/mod.rs index c2ee0fc38..f2ec6ffbd 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,2 +1,2 @@ -pub mod server; pub mod routes; +pub mod server; diff --git a/src/apis/server.rs b/src/apis/server.rs index 3bef75367..d42ae8950 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -25,6 +25,8 @@ pub fn start_tls( _ssl_key_path: &str, _tracker: &Arc, ) -> impl Future> { + // todo: for the time being, it's just a copy & paste from start(...). + let app = Router::new().route("/", get(root)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index a06e7d53c..6f9b12bac 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -1,5 +1,5 @@ pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_api; -pub mod udp_tracker; pub mod tracker_apis; +pub mod udp_tracker; diff --git a/src/lib.rs b/src/lib.rs index 6edb96dfd..ebf589aa9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,5 @@ pub mod api; +pub mod apis; pub mod config; pub mod databases; pub mod http; @@ -9,7 +10,6 @@ pub mod setup; pub mod stats; pub mod tracker; pub mod udp; -pub mod apis; #[macro_use] extern crate lazy_static; diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs new file mode 100644 index 000000000..5d664d5c4 --- /dev/null +++ b/tests/api/asserts.rs @@ -0,0 +1,17 @@ +use reqwest::Response; + +pub async fn assert_token_not_valid(response: Response) { + assert_eq!(response.status(), 500); + assert_eq!( + response.text().await.unwrap(), + "Unhandled rejection: Err { reason: \"token not valid\" }" + ); +} + +pub async fn assert_unauthorized(response: Response) { + assert_eq!(response.status(), 500); + assert_eq!( + response.text().await.unwrap(), + "Unhandled rejection: Err { reason: \"unauthorized\" }" + ); +} diff --git a/tests/api/client.rs b/tests/api/client.rs new file mode 100644 index 000000000..e507d817f --- /dev/null +++ b/tests/api/client.rs @@ -0,0 +1,162 @@ +use reqwest::Response; + +use super::connection_info::ConnectionInfo; +use super::Version; + +pub struct Client { + connection_info: ConnectionInfo, + base_path: String, +} + +type ReqwestQuery = Vec; +type ReqwestQueryParam = (String, String); + +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } + + fn with_token(token: &str) -> Self { + Self { + params: vec![QueryParam::new("token", token)], + } + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} + +impl Client { + pub fn new(connection_info: ConnectionInfo, version: &Version) -> Self { + Self { + connection_info, + base_path: match version { + Version::Warp => "/api/".to_string(), + Version::Axum => "/".to_string(), + }, + } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { + self.post(&format!("key/{}", &seconds_valid)).await + } + + pub async fn delete_auth_key(&self, key: &str) -> Response { + self.delete(&format!("key/{}", &key)).await + } + + pub async fn reload_keys(&self) -> Response { + self.get("keys/reload", Query::default()).await + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { + self.post(&format!("whitelist/{}", &info_hash)).await + } + + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { + self.delete(&format!("whitelist/{}", &info_hash)).await + } + + pub async fn reload_whitelist(&self) -> Response { + self.get("whitelist/reload", Query::default()).await + } + + pub async fn get_torrent(&self, info_hash: &str) -> Response { + self.get(&format!("torrent/{}", &info_hash), Query::default()).await + } + + pub async fn get_torrents(&self, params: Query) -> Response { + self.get("torrents", params).await + } + + pub async fn get_tracker_statistics(&self) -> Response { + self.get("stats", Query::default()).await + } + + pub async fn get(&self, path: &str, params: Query) -> Response { + let mut query: Query = params; + + if let Some(token) = &self.connection_info.api_token { + query.add_param(QueryParam::new("token", token)); + }; + + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path)) + .query(&ReqwestQuery::from(query)) + .send() + .await + .unwrap() + } + + async fn post(&self, path: &str) -> Response { + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + async fn delete(&self, path: &str) -> Response { + reqwest::Client::new() + .delete(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } + + fn query_with_token(&self) -> Query { + match &self.connection_info.api_token { + Some(token) => Query::with_token(token), + None => Query::default(), + } + } +} diff --git a/tests/api/connection_info.rs b/tests/api/connection_info.rs new file mode 100644 index 000000000..35314a2fd --- /dev/null +++ b/tests/api/connection_info.rs @@ -0,0 +1,29 @@ +pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::authenticated(bind_address, "invalid token") +} + +pub fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::anonymous(bind_address) +} + +#[derive(Clone)] +pub struct ConnectionInfo { + pub bind_address: String, + pub api_token: Option, +} + +impl ConnectionInfo { + pub fn authenticated(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: Some(api_token.to_string()), + } + } + + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: None, + } + } +} diff --git a/tests/api/fixtures.rs b/tests/api/fixtures.rs new file mode 100644 index 000000000..fa6099309 --- /dev/null +++ b/tests/api/fixtures.rs @@ -0,0 +1,17 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; +use torrust_tracker::tracker::peer; + +pub fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } +} diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 49fde7a81..52980581f 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -1,314 +1,10 @@ -use core::panic; -use std::env; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::sync::Arc; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use reqwest::Response; -use torrust_tracker::config::Configuration; -use torrust_tracker::jobs::{tracker_api, tracker_apis}; -use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; -use torrust_tracker::protocol::info_hash::InfoHash; -use torrust_tracker::tracker::peer::{self, Peer}; -use torrust_tracker::tracker::statistics::Keeper; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - -use crate::common::ephemeral_random_port; - -pub fn sample_peer() -> peer::Peer { - peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - } -} - -pub fn tracker_configuration() -> Arc { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.http_api.bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - Arc::new(config) -} - -#[derive(Clone)] -pub struct ConnectionInfo { - pub bind_address: String, - pub api_token: Option, -} - -impl ConnectionInfo { - pub fn authenticated(bind_address: &str, api_token: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: Some(api_token.to_string()), - } - } - - pub fn anonymous(bind_address: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: None, - } - } -} - -pub async fn start_default_api_server(version: &Version) -> Server { - let configuration = tracker_configuration(); - start_custom_api_server(configuration.clone(), version).await -} - -pub async fn start_custom_api_server(configuration: Arc, version: &Version) -> Server { - match &version { - Version::Warp => start_warp_api(configuration).await, - Version::Axum => start_axum_api(configuration).await, - } -} - -async fn start_warp_api(configuration: Arc) -> Server { - let server = start(&configuration); - - // Start the HTTP API job - tracker_api::start_job(&configuration.http_api, server.tracker.clone()).await; - - server -} - -async fn start_axum_api(configuration: Arc) -> Server { - let server = start(&configuration); - - // Start HTTP APIs server (multiple API versions) - // Temporarily run the new API on a port number after the current API port - tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; - - server -} - -fn start(configuration: &Arc) -> Server { - let connection_info = ConnectionInfo::authenticated( - &configuration.http_api.bind_address.clone(), - &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), - ); - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - Server { - tracker, - connection_info, - } -} - -pub struct Server { - pub tracker: Arc, - pub connection_info: ConnectionInfo, -} - -impl Server { - pub fn get_connection_info(&self) -> ConnectionInfo { - self.connection_info.clone() - } - - pub fn get_bind_address(&self) -> String { - self.connection_info.bind_address.clone() - } - - /// Add a torrent to the tracker - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} - -pub struct Client { - connection_info: ConnectionInfo, - base_path: String, -} - -type ReqwestQuery = Vec; -type ReqwestQueryParam = (String, String); - -#[derive(Default, Debug)] -pub struct Query { - params: Vec, -} - -impl Query { - pub fn empty() -> Self { - Self { params: vec![] } - } - - pub fn params(params: Vec) -> Self { - Self { params } - } - - pub fn add_param(&mut self, param: QueryParam) { - self.params.push(param); - } - - fn with_token(token: &str) -> Self { - Self { - params: vec![QueryParam::new("token", token)], - } - } -} - -impl From for ReqwestQuery { - fn from(url_search_params: Query) -> Self { - url_search_params - .params - .iter() - .map(|param| ReqwestQueryParam::from((*param).clone())) - .collect() - } -} - -#[derive(Clone, Debug)] -pub struct QueryParam { - name: String, - value: String, -} - -impl QueryParam { - pub fn new(name: &str, value: &str) -> Self { - Self { - name: name.to_string(), - value: value.to_string(), - } - } -} - -impl From for ReqwestQueryParam { - fn from(param: QueryParam) -> Self { - (param.name, param.value) - } -} +pub mod asserts; +pub mod client; +pub mod connection_info; +pub mod fixtures; +pub mod server; pub enum Version { Warp, Axum, } - -impl Client { - pub fn new(connection_info: ConnectionInfo, version: &Version) -> Self { - Self { - connection_info, - base_path: match version { - Version::Warp => "/api/".to_string(), - Version::Axum => String::new(), - }, - } - } - - pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { - self.post(&format!("key/{}", &seconds_valid)).await - } - - pub async fn delete_auth_key(&self, key: &str) -> Response { - self.delete(&format!("key/{}", &key)).await - } - - pub async fn reload_keys(&self) -> Response { - self.get("keys/reload", Query::default()).await - } - - pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - self.post(&format!("whitelist/{}", &info_hash)).await - } - - pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { - self.delete(&format!("whitelist/{}", &info_hash)).await - } - - pub async fn reload_whitelist(&self) -> Response { - self.get("whitelist/reload", Query::default()).await - } - - pub async fn get_torrent(&self, info_hash: &str) -> Response { - self.get(&format!("torrent/{}", &info_hash), Query::default()).await - } - - pub async fn get_torrents(&self, params: Query) -> Response { - self.get("torrents", params).await - } - - pub async fn get_tracker_statistics(&self) -> Response { - self.get("stats", Query::default()).await - } - - pub async fn get(&self, path: &str, params: Query) -> Response { - let mut query: Query = params; - - if let Some(token) = &self.connection_info.api_token { - query.add_param(QueryParam::new("token", token)); - }; - - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .query(&ReqwestQuery::from(query)) - .send() - .await - .unwrap() - } - - async fn post(&self, path: &str) -> Response { - reqwest::Client::new() - .post(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())) - .send() - .await - .unwrap() - } - - async fn delete(&self, path: &str) -> Response { - reqwest::Client::new() - .delete(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())) - .send() - .await - .unwrap() - } - - fn base_url(&self, path: &str) -> String { - format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) - } - - fn query_with_token(&self) -> Query { - match &self.connection_info.api_token { - Some(token) => Query::with_token(token), - None => Query::default(), - } - } -} diff --git a/tests/api/server.rs b/tests/api/server.rs new file mode 100644 index 000000000..338b068c8 --- /dev/null +++ b/tests/api/server.rs @@ -0,0 +1,115 @@ +use core::panic; +use std::env; +use std::sync::Arc; + +use torrust_tracker::config::Configuration; +use torrust_tracker::jobs::{tracker_api, tracker_apis}; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; + +use super::connection_info::ConnectionInfo; +use super::Version; +use crate::common::ephemeral_random_port; + +pub fn tracker_configuration() -> Arc { + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; + + // Ephemeral socket address + let port = ephemeral_random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &port); + + // Ephemeral database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + Arc::new(config) +} + +pub async fn start_default_api(version: &Version) -> Server { + let configuration = tracker_configuration(); + start_custom_api(configuration.clone(), version).await +} + +pub async fn start_custom_api(configuration: Arc, version: &Version) -> Server { + match &version { + Version::Warp => start_warp_api(configuration).await, + Version::Axum => start_axum_api(configuration).await, + } +} + +async fn start_warp_api(configuration: Arc) -> Server { + let server = start(&configuration); + + // Start the HTTP API job + tracker_api::start_job(&configuration.http_api, server.tracker.clone()).await; + + server +} + +async fn start_axum_api(configuration: Arc) -> Server { + let server = start(&configuration); + + // Start HTTP APIs server (multiple API versions) + // Temporarily run the new API on a port number after the current API port + tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; + + server +} + +fn start(configuration: &Arc) -> Server { + let connection_info = ConnectionInfo::authenticated( + &configuration.http_api.bind_address.clone(), + &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), + ); + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + Server { + tracker, + connection_info, + } +} + +pub struct Server { + pub tracker: Arc, + pub connection_info: ConnectionInfo, +} + +impl Server { + pub fn get_connection_info(&self) -> ConnectionInfo { + self.connection_info.clone() + } + + pub fn get_bind_address(&self) -> String { + self.connection_info.bind_address.clone() + } + + /// Add a torrent to the tracker + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 68a295ac3..5f022167b 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -43,47 +43,22 @@ mod tracker_api { */ - use reqwest::Response; - - use crate::api::ConnectionInfo; - - async fn assert_token_not_valid(response: Response) { - assert_eq!(response.status(), 500); - assert_eq!( - response.text().await.unwrap(), - "Unhandled rejection: Err { reason: \"token not valid\" }" - ); - } - - async fn assert_unauthorized(response: Response) { - assert_eq!(response.status(), 500); - assert_eq!( - response.text().await.unwrap(), - "Unhandled rejection: Err { reason: \"unauthorized\" }" - ); - } - - fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { - ConnectionInfo::authenticated(bind_address, "invalid token") - } - - fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { - ConnectionInfo::anonymous(bind_address) - } - mod for_stats_resources { use std::str::FromStr; use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{sample_peer, start_default_api_server, Client, Version}; - use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::fixtures::sample_peer; + use crate::api::server::start_default_api; + use crate::api::Version; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; api_server .add_torrent( @@ -122,7 +97,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_tracker_statistics() @@ -145,13 +120,16 @@ mod tracker_api { use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{sample_peer, start_default_api_server, Client, Query, QueryParam, Version}; - use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::{Client, Query, QueryParam}; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::fixtures::sample_peer; + use crate::api::server::start_default_api; + use crate::api::Version; #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -176,7 +154,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -204,7 +182,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -232,7 +210,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_torrents(Query::empty()) @@ -249,7 +227,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -276,7 +254,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -301,13 +279,15 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; - use super::{assert_token_not_valid, connection_with_invalid_token, connection_with_no_token}; - use crate::api::{start_default_api_server, Client, Version}; - use crate::tracker_api::assert_unauthorized; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::server::start_default_api; + use crate::api::Version; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -326,7 +306,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -341,7 +321,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -360,7 +340,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -376,7 +356,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -398,7 +378,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -428,13 +408,15 @@ mod tracker_api { use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; - use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{start_default_api_server, Client, Version}; - use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::server::start_default_api; + use crate::api::Version; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -452,7 +434,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -471,7 +453,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; let auth_key = api_server @@ -490,7 +472,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -523,7 +505,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_reloading_keys() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; api_server @@ -541,7 +523,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; api_server @@ -565,13 +547,14 @@ mod tracker_api { } } +/// The new API implementation using Axum mod tracker_apis { /* Endpoints: - Root: + Root (dummy endpoint to test Axum configuration. To be removed): - [x] GET / Stats: @@ -598,7 +581,9 @@ mod tracker_apis { */ mod for_entrypoint { - use crate::api::{start_default_api_server, Client, Query, Version}; + use crate::api::client::{Client, Query}; + use crate::api::server::start_default_api_server; + use crate::api::Version; #[tokio::test] async fn test_entrypoint() { From 6a9e2d5a9f1396e4486b95d2e187740f368549bf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jan 2023 18:06:38 +0000 Subject: [PATCH 0280/1003] feat(api): [#143] axum api, GET /stats endpoint --- src/apis/routes.rs | 58 ++++++++++++++++++++++++++++++++++++++++++++ src/apis/server.rs | 8 +++--- tests/tracker_api.rs | 57 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 117 insertions(+), 6 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 2db23c35f..1b40ac47e 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,7 +1,65 @@ +use std::sync::Arc; + +use axum::extract::State; use axum::response::Json; use serde_json::{json, Value}; +use crate::api::resource::stats::Stats; +use crate::tracker::Tracker; + #[allow(clippy::unused_async)] pub async fn root() -> Json { Json(json!({ "data": 42 })) } + +#[allow(clippy::unused_async)] +pub async fn get_stats(State(tracker): State>) -> Json { + let mut results = Stats { + torrents: 0, + seeders: 0, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }; + + let db = tracker.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }); + + let stats = tracker.get_stats().await; + + #[allow(clippy::cast_possible_truncation)] + { + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + } + + Json(json!(results)) +} diff --git a/src/apis/server.rs b/src/apis/server.rs index d42ae8950..fb532519f 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -6,11 +6,13 @@ use axum::Router; use futures::Future; use warp::hyper; -use super::routes::root; +use super::routes::{get_stats, root}; use crate::tracker; -pub fn start(socket_addr: SocketAddr, _tracker: &Arc) -> impl Future> { - let app = Router::new().route("/", get(root)); +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + let app = Router::new() + .route("/", get(root)) + .route("/stats", get(get_stats).with_state(tracker.clone())); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 5f022167b..bac9d1324 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -582,18 +582,69 @@ mod tracker_apis { mod for_entrypoint { use crate::api::client::{Client, Query}; - use crate::api::server::start_default_api_server; + use crate::api::server::start_default_api; use crate::api::Version; #[tokio::test] async fn test_entrypoint() { - let api_server = start_default_api_server(&Version::Axum).await; + let api_server = start_default_api(&Version::Axum).await; let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .get("/", Query::default()) + .get("", Query::default()) .await; assert_eq!(response.status(), 200); } } + + mod for_stats_resources { + use std::str::FromStr; + + use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::client::Client; + use crate::api::fixtures::sample_peer; + use crate::api::server::start_default_api; + use crate::api::Version; + + #[tokio::test] + async fn should_allow_getting_tracker_statistics() { + let api_server = start_default_api(&Version::Axum).await; + + api_server + .add_torrent( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &sample_peer(), + ) + .await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_tracker_statistics() + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::().await.unwrap(), + Stats { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + } + ); + } + } } From 7331c82d13f7ef01028c5600ecaa966ac73ef955 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Jan 2023 09:25:10 +0000 Subject: [PATCH 0281/1003] refactor: [#143] replace unwrap with expect --- src/setup.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/setup.rs b/src/setup.rs index 84a1d1c3c..daee7eea8 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -60,7 +60,9 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if config.http_api.enabled { // Temporarily running the new API in the 1313 port let bind_address = config.http_api.bind_address.clone(); - let mut bind_socket: SocketAddr = bind_address.parse().unwrap(); + let mut bind_socket: SocketAddr = bind_address + .parse() + .expect("bind address should be a valid socket address, for example 127.0.0.1:8080"); bind_socket.set_port(1313); let mut http_apis_config = config.http_api.clone(); From 0615c9f028853285fbc6aabac604a623c4d278bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Jan 2023 14:10:54 +0000 Subject: [PATCH 0282/1003] refactor(api): [#143] remove duplicate code - Extract domain logic: `Tracker::get_torrents_metrics`. - Move domain logic from web framework controllers to domain services: `get_metrics`. - Remove duplicate code in current Warp API and new Axum API. --- src/api/resource/stats.rs | 57 +++++++++++++++++++++--------- src/api/routes.rs | 50 ++------------------------ src/apis/routes.rs | 50 ++------------------------ src/tracker/mod.rs | 29 +++++++++++++++ src/tracker/services/mod.rs | 1 + src/tracker/services/statistics.rs | 32 +++++++++++++++++ 6 files changed, 107 insertions(+), 112 deletions(-) create mode 100644 src/tracker/services/mod.rs create mode 100644 src/tracker/services/statistics.rs diff --git a/src/api/resource/stats.rs b/src/api/resource/stats.rs index e87f08f63..c861876fa 100644 --- a/src/api/resource/stats.rs +++ b/src/api/resource/stats.rs @@ -1,21 +1,46 @@ use serde::{Deserialize, Serialize}; +use crate::tracker::services::statistics::TrackerMetrics; + #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Stats { - pub torrents: u32, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, - pub tcp4_connections_handled: u32, - pub tcp4_announces_handled: u32, - pub tcp4_scrapes_handled: u32, - pub tcp6_connections_handled: u32, - pub tcp6_announces_handled: u32, - pub tcp6_scrapes_handled: u32, - pub udp4_connections_handled: u32, - pub udp4_announces_handled: u32, - pub udp4_scrapes_handled: u32, - pub udp6_connections_handled: u32, - pub udp6_announces_handled: u32, - pub udp6_scrapes_handled: u32, + pub torrents: u64, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub tcp4_connections_handled: u64, + pub tcp4_announces_handled: u64, + pub tcp4_scrapes_handled: u64, + pub tcp6_connections_handled: u64, + pub tcp6_announces_handled: u64, + pub tcp6_scrapes_handled: u64, + pub udp4_connections_handled: u64, + pub udp4_announces_handled: u64, + pub udp4_scrapes_handled: u64, + pub udp6_connections_handled: u64, + pub udp6_announces_handled: u64, + pub udp6_scrapes_handled: u64, +} + +impl From for Stats { + fn from(metrics: TrackerMetrics) -> Self { + Self { + torrents: metrics.torrents_metrics.torrents, + seeders: metrics.torrents_metrics.seeders, + completed: metrics.torrents_metrics.completed, + leechers: metrics.torrents_metrics.leechers, + tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, + tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, + tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, + tcp6_connections_handled: metrics.protocol_metrics.tcp6_connections_handled, + tcp6_announces_handled: metrics.protocol_metrics.tcp6_announces_handled, + tcp6_scrapes_handled: metrics.protocol_metrics.tcp6_scrapes_handled, + udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, + udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, + udp4_scrapes_handled: metrics.protocol_metrics.udp4_scrapes_handled, + udp6_connections_handled: metrics.protocol_metrics.udp6_connections_handled, + udp6_announces_handled: metrics.protocol_metrics.udp6_announces_handled, + udp6_scrapes_handled: metrics.protocol_metrics.udp6_scrapes_handled, + } + } } diff --git a/src/api/routes.rs b/src/api/routes.rs index 76b449e9b..73f1269ef 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -13,6 +13,7 @@ use super::resource::torrent::{ListItem, Torrent}; use super::{ActionStatus, TorrentInfoQuery}; use crate::protocol::info_hash::InfoHash; use crate::tracker; +use crate::tracker::services::statistics::get_metrics; fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] @@ -91,54 +92,7 @@ pub fn routes(tracker: &Arc) -> impl Filter| async move { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }); - - let stats = tracker.get_stats().await; - - #[allow(clippy::cast_possible_truncation)] - { - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - } - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + Result::<_, warp::reject::Rejection>::Ok(reply::json(&Stats::from(get_metrics(tracker.clone()).await))) }); // GET /api/torrent/:info_hash diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 1b40ac47e..58eefa8b0 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -5,6 +5,7 @@ use axum::response::Json; use serde_json::{json, Value}; use crate::api::resource::stats::Stats; +use crate::tracker::services::statistics::get_metrics; use crate::tracker::Tracker; #[allow(clippy::unused_async)] @@ -14,52 +15,5 @@ pub async fn root() -> Json { #[allow(clippy::unused_async)] pub async fn get_stats(State(tracker): State>) -> Json { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }); - - let stats = tracker.get_stats().await; - - #[allow(clippy::cast_possible_truncation)] - { - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - } - - Json(json!(results)) + Json(json!(Stats::from(get_metrics(tracker.clone()).await))) } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4469d682b..e0ea41123 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,6 +1,7 @@ pub mod auth; pub mod mode; pub mod peer; +pub mod services; pub mod statistics; pub mod torrent; @@ -28,6 +29,13 @@ pub struct Tracker { database: Box, } +pub struct TorrentsMetrics { + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub torrents: u64, +} + impl Tracker { /// # Errors /// @@ -277,6 +285,27 @@ impl Tracker { self.torrents.read().await } + pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { + let mut torrents_metrics = TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0, + }; + + let db = self.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + torrents_metrics.seeders += u64::from(seeders); + torrents_metrics.completed += u64::from(completed); + torrents_metrics.leechers += u64::from(leechers); + torrents_metrics.torrents += 1; + }); + + torrents_metrics + } + pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { self.stats_repository.get_stats().await } diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs new file mode 100644 index 000000000..3449ec7b4 --- /dev/null +++ b/src/tracker/services/mod.rs @@ -0,0 +1 @@ +pub mod statistics; diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs new file mode 100644 index 000000000..5f8f39856 --- /dev/null +++ b/src/tracker/services/statistics.rs @@ -0,0 +1,32 @@ +use std::sync::Arc; + +use crate::tracker::statistics::Metrics; +use crate::tracker::{TorrentsMetrics, Tracker}; + +pub struct TrackerMetrics { + pub torrents_metrics: TorrentsMetrics, + pub protocol_metrics: Metrics, +} + +pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { + let torrents_metrics = tracker.get_torrents_metrics().await; + let stats = tracker.get_stats().await; + + TrackerMetrics { + torrents_metrics, + protocol_metrics: Metrics { + tcp4_connections_handled: stats.tcp4_connections_handled, + tcp4_announces_handled: stats.tcp4_announces_handled, + tcp4_scrapes_handled: stats.tcp4_scrapes_handled, + tcp6_connections_handled: stats.tcp6_connections_handled, + tcp6_announces_handled: stats.tcp6_announces_handled, + tcp6_scrapes_handled: stats.tcp6_scrapes_handled, + udp4_connections_handled: stats.udp4_connections_handled, + udp4_announces_handled: stats.udp4_announces_handled, + udp4_scrapes_handled: stats.udp4_scrapes_handled, + udp6_connections_handled: stats.udp6_connections_handled, + udp6_announces_handled: stats.udp6_announces_handled, + udp6_scrapes_handled: stats.udp6_scrapes_handled, + }, + } +} From 0f99f7bc077d6b15494a910c930e4045f3db5613 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Jan 2023 17:24:15 +0000 Subject: [PATCH 0283/1003] refactor: [#143] remove duplicate or unneeded code --- src/config.rs | 35 ++++++++++++++++++++++++++++++ src/tracker/mod.rs | 1 + src/tracker/services/statistics.rs | 1 + src/tracker/statistics.rs | 32 +++------------------------ src/udp/handlers.rs | 28 +++--------------------- tests/api/server.rs | 20 ++--------------- tests/common/mod.rs | 8 ------- tests/tracker_api.rs | 1 - tests/udp.rs | 31 +++++++++----------------- 9 files changed, 55 insertions(+), 102 deletions(-) delete mode 100644 tests/common/mod.rs diff --git a/src/config.rs b/src/config.rs index 820af77d8..05a446454 100644 --- a/src/config.rs +++ b/src/config.rs @@ -5,6 +5,7 @@ use std::str::FromStr; use std::{env, fs}; use config::{Config, ConfigError, File, FileFormat}; +use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; @@ -73,6 +74,40 @@ pub enum Error { TrackerModeIncompatible, } +/// This configuration is used for testing. It generates random config values so they do not collide +/// if you run more than one tracker at the same time. +/// +/// # Panics +/// +/// Will panic if it can't convert the temp file path to string +#[must_use] +pub fn ephemeral_configuration() -> Configuration { + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; + + // Ephemeral socket addresses + let api_port = random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + let upd_port = random_port(); + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &upd_port); + + // Ephemeral sqlite database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}_{}.db", &api_port, &upd_port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + config +} + +fn random_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) +} + impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index e0ea41123..4de168908 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -29,6 +29,7 @@ pub struct Tracker { database: Box, } +#[derive(Debug, PartialEq, Default)] pub struct TorrentsMetrics { pub seeders: u64, pub completed: u64, diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 5f8f39856..696ca2ea1 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use crate::tracker::statistics::Metrics; use crate::tracker::{TorrentsMetrics, Tracker}; +#[derive(Debug, PartialEq)] pub struct TrackerMetrics { pub torrents_metrics: TorrentsMetrics, pub protocol_metrics: Metrics, diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index b787e1267..f9f6253fd 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -23,7 +23,7 @@ pub enum Event { Udp6Scrape, } -#[derive(Debug)] +#[derive(Debug, PartialEq, Default)] pub struct Metrics { pub tcp4_connections_handled: u64, pub tcp4_announces_handled: u64, @@ -39,32 +39,6 @@ pub struct Metrics { pub udp6_scrapes_handled: u64, } -impl Default for Metrics { - fn default() -> Self { - Self::new() - } -} - -impl Metrics { - #[must_use] - pub fn new() -> Self { - Self { - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - } - } -} - pub struct Keeper { pub repository: Repo, } @@ -187,7 +161,7 @@ impl Repo { #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(Metrics::new())), + stats: Arc::new(RwLock::new(Metrics::default())), } } @@ -280,7 +254,7 @@ mod tests { let stats = stats_tracker.repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, Metrics::new().tcp4_announces_handled); + assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); } #[tokio::test] diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 001fb2380..076710fb6 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -239,14 +239,13 @@ fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { #[cfg(test)] mod tests { - use std::env; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use rand::{thread_rng, Rng}; - use crate::config::Configuration; + use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::{Current, Time}; use crate::tracker::{self, mode, peer, statistics}; @@ -255,28 +254,7 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.http_api.bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - config - } - - fn ephemeral_random_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) + ephemeral_configuration() } fn initialized_public_tracker() -> Arc { diff --git a/tests/api/server.rs b/tests/api/server.rs index 338b068c8..9819a0847 100644 --- a/tests/api/server.rs +++ b/tests/api/server.rs @@ -1,8 +1,7 @@ use core::panic; -use std::env; use std::sync::Arc; -use torrust_tracker::config::Configuration; +use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::jobs::{tracker_api, tracker_apis}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; @@ -11,24 +10,9 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; use super::Version; -use crate::common::ephemeral_random_port; pub fn tracker_configuration() -> Arc { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.http_api.bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - Arc::new(config) + Arc::new(ephemeral_configuration()) } pub async fn start_default_api(version: &Version) -> Server { diff --git a/tests/common/mod.rs b/tests/common/mod.rs deleted file mode 100644 index 5fd484cf5..000000000 --- a/tests/common/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -use rand::{thread_rng, Rng}; - -pub fn ephemeral_random_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) -} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index bac9d1324..301dd5890 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -14,7 +14,6 @@ extern crate rand; mod api; -mod common; mod tracker_api { diff --git a/tests/udp.rs b/tests/udp.rs index 55384db05..408f4f795 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -3,11 +3,8 @@ /// cargo test `udp_tracker_server` -- --nocapture extern crate rand; -mod common; - mod udp_tracker_server { use core::panic; - use std::env; use std::io::Cursor; use std::net::Ipv4Addr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -17,32 +14,24 @@ mod udp_tracker_server { AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Request, Response, ScrapeRequest, TransactionId, }; + use rand::{thread_rng, Rng}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; - use torrust_tracker::config::Configuration; + use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - use crate::common::ephemeral_random_port; - fn tracker_configuration() -> Arc { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); + Arc::new(ephemeral_configuration()) + } - Arc::new(config) + pub fn ephemeral_random_client_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) } pub struct UdpServer { @@ -129,7 +118,7 @@ mod udp_tracker_server { /// Creates a new `UdpClient` connected to a Udp server async fn new_connected_udp_client(remote_address: &str) -> UdpClient { - let client = UdpClient::bind(&source_address(ephemeral_random_port())).await; + let client = UdpClient::bind(&source_address(ephemeral_random_client_port())).await; client.connect(remote_address).await; client } From 1c6db6e6c47c8ed5ebd5efba8c1a0a541eb93a3f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Jan 2023 17:28:59 +0000 Subject: [PATCH 0284/1003] test: [#143] add tests for extracted functions --- src/api/resource/stats.rs | 54 ++++++++++++++++++++++++++++++ src/tracker/mod.rs | 48 ++++++++++++++++++++++++++ src/tracker/services/statistics.rs | 48 ++++++++++++++++++++++++++ 3 files changed, 150 insertions(+) diff --git a/src/api/resource/stats.rs b/src/api/resource/stats.rs index c861876fa..44ac814dc 100644 --- a/src/api/resource/stats.rs +++ b/src/api/resource/stats.rs @@ -44,3 +44,57 @@ impl From for Stats { } } } + +#[cfg(test)] +mod tests { + use super::Stats; + use crate::tracker::services::statistics::TrackerMetrics; + use crate::tracker::statistics::Metrics; + use crate::tracker::TorrentsMetrics; + + #[test] + fn stats_resource_should_be_converted_from_tracker_metrics() { + assert_eq!( + Stats::from(TrackerMetrics { + torrents_metrics: TorrentsMetrics { + seeders: 1, + completed: 2, + leechers: 3, + torrents: 4 + }, + protocol_metrics: Metrics { + tcp4_connections_handled: 5, + tcp4_announces_handled: 6, + tcp4_scrapes_handled: 7, + tcp6_connections_handled: 8, + tcp6_announces_handled: 9, + tcp6_scrapes_handled: 10, + udp4_connections_handled: 11, + udp4_announces_handled: 12, + udp4_scrapes_handled: 13, + udp6_connections_handled: 14, + udp6_announces_handled: 15, + udp6_scrapes_handled: 16 + } + }), + Stats { + torrents: 4, + seeders: 1, + completed: 2, + leechers: 3, + tcp4_connections_handled: 5, + tcp4_announces_handled: 6, + tcp4_scrapes_handled: 7, + tcp6_connections_handled: 8, + tcp6_announces_handled: 9, + tcp6_scrapes_handled: 10, + udp4_connections_handled: 11, + udp4_announces_handled: 12, + udp4_scrapes_handled: 13, + udp6_connections_handled: 14, + udp6_announces_handled: 15, + udp6_scrapes_handled: 16 + } + ); + } +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4de168908..f33628355 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -340,3 +340,51 @@ impl Tracker { } } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::statistics::Keeper; + use super::{TorrentsMetrics, Tracker}; + use crate::config::{ephemeral_configuration, Configuration}; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } + + pub fn tracker_factory() -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Configuration + let configuration = tracker_configuration(); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + #[tokio::test] + async fn the_tracker_should_collect_torrent_metrics() { + let tracker = tracker_factory(); + + let torrents_metrics = tracker.get_torrents_metrics().await; + + assert_eq!( + torrents_metrics, + TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0 + } + ); + } +} diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 696ca2ea1..bbc069dd3 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -31,3 +31,51 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { }, } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::Tracker; + use crate::config::{ephemeral_configuration, Configuration}; + use crate::tracker; + use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; + use crate::tracker::statistics::Keeper; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } + + pub fn tracker_factory() -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Configuration + let configuration = tracker_configuration(); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + #[tokio::test] + async fn the_statistics_service_should_return_the_tracker_metrics() { + let tracker = Arc::new(tracker_factory()); + + let tracker_metrics = get_metrics(tracker.clone()).await; + + assert_eq!( + tracker_metrics, + TrackerMetrics { + torrents_metrics: tracker::TorrentsMetrics::default(), + protocol_metrics: tracker::statistics::Metrics::default(), + } + ); + } +} From 43dbed933c5a7560fc36aa2fde981d89020d39bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jan 2023 14:12:06 +0000 Subject: [PATCH 0285/1003] feat(api): [#143] authentication with GET param for Axum API It keeps the same contract of the API. It returns 500 status code with error message in "debug" format. --- cSpell.json | 1 + src/apis/middlewares/auth.rs | 62 ++++++++++++++++++++++++++++++++++++ src/apis/middlewares/mod.rs | 1 + src/apis/mod.rs | 1 + src/apis/server.rs | 13 +++++--- src/config.rs | 19 ++++++++++- tests/tracker_api.rs | 19 +++++++++++ 7 files changed, 111 insertions(+), 5 deletions(-) create mode 100644 src/apis/middlewares/auth.rs create mode 100644 src/apis/middlewares/mod.rs diff --git a/cSpell.json b/cSpell.json index 57b9f3b67..801d35dbb 100644 --- a/cSpell.json +++ b/cSpell.json @@ -32,6 +32,7 @@ "leechers", "libtorrent", "Lphant", + "middlewares", "mockall", "myacicontext", "nanos", diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs new file mode 100644 index 000000000..e04d5f2c5 --- /dev/null +++ b/src/apis/middlewares/auth.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use axum::extract::{Query, State}; +use axum::http::{header, Request, StatusCode}; +use axum::middleware::Next; +use axum::response::{IntoResponse, Response}; +use serde::Deserialize; + +use crate::config::{Configuration, HttpApi}; + +#[derive(Deserialize, Debug)] +pub struct QueryParams { + pub token: Option, +} + +/// Middleware for authentication using a "token" GET param. +/// The token must be one of the tokens in the tracker HTTP API configuration. +pub async fn auth( + State(config): State>, + Query(params): Query, + request: Request, + next: Next, +) -> Response +where + B: Send, +{ + let token = match params.token { + None => return AuthError::Unauthorized.into_response(), + Some(token) => token, + }; + + if !authenticate(&token, &config.http_api) { + return AuthError::TokenNotValid.into_response(); + } + + next.run(request).await +} + +enum AuthError { + Unauthorized, + TokenNotValid, +} + +impl IntoResponse for AuthError { + fn into_response(self) -> Response { + let body = match self { + AuthError::Unauthorized => "Unhandled rejection: Err { reason: \"unauthorized\" }", + AuthError::TokenNotValid => "Unhandled rejection: Err { reason: \"token not valid\" }", + }; + + ( + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain")], + body, + ) + .into_response() + } +} + +fn authenticate(token: &str, http_api_config: &HttpApi) -> bool { + http_api_config.contains_token(token) +} diff --git a/src/apis/middlewares/mod.rs b/src/apis/middlewares/mod.rs new file mode 100644 index 000000000..0e4a05d59 --- /dev/null +++ b/src/apis/middlewares/mod.rs @@ -0,0 +1 @@ +pub mod auth; diff --git a/src/apis/mod.rs b/src/apis/mod.rs index f2ec6ffbd..ea1615d6b 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,2 +1,3 @@ +pub mod middlewares; pub mod routes; pub mod server; diff --git a/src/apis/server.rs b/src/apis/server.rs index fb532519f..db7224cde 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -2,17 +2,19 @@ use std::net::SocketAddr; use std::sync::Arc; use axum::routing::get; -use axum::Router; +use axum::{middleware, Router}; use futures::Future; use warp::hyper; +use super::middlewares::auth::auth; use super::routes::{get_stats, root}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() .route("/", get(root)) - .route("/stats", get(get_stats).with_state(tracker.clone())); + .route("/stats", get(get_stats).with_state(tracker.clone())) + .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -25,11 +27,14 @@ pub fn start_tls( socket_addr: SocketAddr, _ssl_cert_path: &str, _ssl_key_path: &str, - _tracker: &Arc, + tracker: &Arc, ) -> impl Future> { // todo: for the time being, it's just a copy & paste from start(...). - let app = Router::new().route("/", get(root)); + let app = Router::new() + .route("/", get(root)) + .route("/stats", get(get_stats).with_state(tracker.clone())) + .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); diff --git a/src/config.rs b/src/config.rs index 05a446454..275339aa0 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::net::IpAddr; use std::path::Path; use std::str::FromStr; @@ -44,6 +44,15 @@ pub struct HttpApi { pub access_tokens: HashMap, } +impl HttpApi { + #[must_use] + pub fn contains_token(&self, token: &str) -> bool { + let tokens: HashMap = self.access_tokens.clone(); + let tokens: HashSet = tokens.into_values().collect(); + tokens.contains(token) + } +} + #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { @@ -366,4 +375,12 @@ mod tests { assert_eq!(format!("{error}"), "TrackerModeIncompatible"); } + + #[test] + fn http_api_configuration_should_check_if_it_contains_a_token() { + let configuration = Configuration::default(); + + assert!(configuration.http_api.contains_token("MyAccessToken")); + assert!(!configuration.http_api.contains_token("NonExistingToken")); + } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 301dd5890..bc69a1c93 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -602,7 +602,9 @@ mod tracker_apis { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; use crate::api::Version; @@ -645,5 +647,22 @@ mod tracker_apis { } ); } + + #[tokio::test] + async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .get_tracker_statistics() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .get_tracker_statistics() + .await; + + assert_unauthorized(response).await; + } } } From 13959452f59949b91789050b8eda597fb82728b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jan 2023 14:22:06 +0000 Subject: [PATCH 0286/1003] refactor(api): [#143] remove dummy api endpoint It was added to test Axum configuration. --- src/apis/routes.rs | 5 ----- src/apis/server.rs | 4 +--- tests/tracker_api.rs | 20 -------------------- 3 files changed, 1 insertion(+), 28 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 58eefa8b0..93474c4c2 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -8,11 +8,6 @@ use crate::api::resource::stats::Stats; use crate::tracker::services::statistics::get_metrics; use crate::tracker::Tracker; -#[allow(clippy::unused_async)] -pub async fn root() -> Json { - Json(json!({ "data": 42 })) -} - #[allow(clippy::unused_async)] pub async fn get_stats(State(tracker): State>) -> Json { Json(json!(Stats::from(get_metrics(tracker.clone()).await))) diff --git a/src/apis/server.rs b/src/apis/server.rs index db7224cde..9ddf4a8d3 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -7,12 +7,11 @@ use futures::Future; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::{get_stats, root}; +use super::routes::get_stats; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() - .route("/", get(root)) .route("/stats", get(get_stats).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); @@ -32,7 +31,6 @@ pub fn start_tls( // todo: for the time being, it's just a copy & paste from start(...). let app = Router::new() - .route("/", get(root)) .route("/stats", get(get_stats).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index bc69a1c93..25d747f27 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -553,9 +553,6 @@ mod tracker_apis { Endpoints: - Root (dummy endpoint to test Axum configuration. To be removed): - - [x] GET / - Stats: - [ ] GET /api/stats @@ -579,23 +576,6 @@ mod tracker_apis { */ - mod for_entrypoint { - use crate::api::client::{Client, Query}; - use crate::api::server::start_default_api; - use crate::api::Version; - - #[tokio::test] - async fn test_entrypoint() { - let api_server = start_default_api(&Version::Axum).await; - - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .get("", Query::default()) - .await; - - assert_eq!(response.status(), 200); - } - } - mod for_stats_resources { use std::str::FromStr; From af51f77743e58c3f0fb9121a97447e90a7b9ba09 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jan 2023 17:19:30 +0000 Subject: [PATCH 0287/1003] feat(api): [#143] add new cargo dependency: axum-server The new API implementation uses Axum. Axum does not support SSL configuration. The "axum-server" crate provides it. --- Cargo.lock | 38 +++++++++++++++++++++++++++++++++++++- Cargo.toml | 1 + 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 8e40508dc..8f8d753b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,6 +56,12 @@ dependencies = [ "either", ] +[[package]] +name = "arc-swap" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" + [[package]] name = "arrayvec" version = "0.5.2" @@ -146,6 +152,26 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum-server" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8456dab8f11484979a86651da8e619b355ede5d61a160755155f6c344bd18c47" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "rustls", + "rustls-pemfile 1.0.1", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "base-x" version = "0.2.11" @@ -2090,6 +2116,15 @@ dependencies = [ "base64", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +dependencies = [ + "base64", +] + [[package]] name = "rustversion" version = "1.0.11" @@ -2764,6 +2799,7 @@ dependencies = [ "aquatic_udp_protocol", "async-trait", "axum", + "axum-server", "binascii", "chrono", "config", @@ -3037,7 +3073,7 @@ dependencies = [ "multipart", "percent-encoding", "pin-project", - "rustls-pemfile", + "rustls-pemfile 0.2.1", "scoped-tls", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 8ddefe78e..434b4cace 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } axum = "0.6.1" +axum-server = { version = "0.4.4", features = ["tls-rustls"] } [dev-dependencies] mockall = "0.11" From fe4303c0af047add8b413e8cb8b4f980e58b7e8c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jan 2023 17:21:18 +0000 Subject: [PATCH 0288/1003] feat(api): [#143] SSL support for the new Axum API --- cSpell.json | 1 + src/apis/server.rs | 24 ++++++++++++++++-------- src/jobs/tracker_apis.rs | 23 +++++++++++++++++------ 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/cSpell.json b/cSpell.json index 801d35dbb..bb15b6d91 100644 --- a/cSpell.json +++ b/cSpell.json @@ -49,6 +49,7 @@ "rngs", "rusqlite", "rustfmt", + "Rustls", "Seedable", "Shareaza", "sharktorrent", diff --git a/src/apis/server.rs b/src/apis/server.rs index 9ddf4a8d3..668959cd6 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -3,7 +3,10 @@ use std::sync::Arc; use axum::routing::get; use axum::{middleware, Router}; +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; use futures::Future; +use log::info; use warp::hyper; use super::middlewares::auth::auth; @@ -19,24 +22,29 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F server.with_graceful_shutdown(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust APIs server on http://{} ...", socket_addr); }) } pub fn start_tls( socket_addr: SocketAddr, - _ssl_cert_path: &str, - _ssl_key_path: &str, + ssl_config: RustlsConfig, tracker: &Arc, -) -> impl Future> { - // todo: for the time being, it's just a copy & paste from start(...). - +) -> impl Future> { let app = Router::new() .route("/stats", get(get_stats).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + let handle = Handle::new(); + let shutdown_handle = handle.clone(); - server.with_graceful_shutdown(async move { + tokio::spawn(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }) + info!("Stopping Torrust APIs server on https://{} ...", socket_addr); + shutdown_handle.shutdown(); + }); + + axum_server::bind_rustls(socket_addr, ssl_config) + .handle(handle) + .serve(app.into_make_service()) } diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs index b696c923d..00e39eeba 100644 --- a/src/jobs/tracker_apis.rs +++ b/src/jobs/tracker_apis.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use axum_server::tls_rustls::RustlsConfig; use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; @@ -29,17 +30,27 @@ pub async fn start_job(config: &HttpApi, tracker: Arc) -> Join let join_handle = tokio::spawn(async move { if !ssl_enabled { info!("Starting Torrust APIs server on: http://{}", bind_addr); + let handle = server::start(bind_addr, &tracker); - tx.send(ApiServerJobStarted()).expect("the start job dropped"); + + tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); + if let Ok(()) = handle.await { - info!("Stopping Torrust APIs server on {} ...", bind_addr); + info!("Torrust APIs server on http://{} stopped", bind_addr); } } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { info!("Starting Torrust APIs server on: https://{}", bind_addr); - let handle = server::start_tls(bind_addr, &ssl_cert_path.unwrap(), &ssl_key_path.unwrap(), &tracker); - tx.send(ApiServerJobStarted()).expect("the start job dropped"); + + let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await + .unwrap(); + + let handle = server::start_tls(bind_addr, ssl_config, &tracker); + + tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); + if let Ok(()) = handle.await { - info!("Stopping Torrust APIs server on {} ...", bind_addr); + info!("Torrust APIs server on https://{} stopped", bind_addr); } } }); @@ -47,7 +58,7 @@ pub async fn start_job(config: &HttpApi, tracker: Arc) -> Join // Wait until the APIs server job is running match rx.await { Ok(_msg) => info!("Torrust APIs server started"), - Err(e) => panic!("the apis server was dropped: {e}"), + Err(e) => panic!("the API server was dropped: {e}"), } join_handle From 16d438dced06f481744160c9199e20fd893822ab Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jan 2023 11:42:24 +0000 Subject: [PATCH 0289/1003] feat(api): [#143] axum api, WIP. GET /api/torrent/:info_hash endpoint Not all cases finished yet. Not found case is pending. --- src/api/resource/torrent.rs | 70 ++++++++++++++++- src/api/routes.rs | 6 +- src/apis/routes.rs | 23 ++++-- src/apis/server.rs | 3 +- src/tracker/services/common.rs | 24 ++++++ src/tracker/services/mod.rs | 2 + src/tracker/services/statistics.rs | 23 +----- src/tracker/services/torrent.rs | 116 +++++++++++++++++++++++++++++ tests/tracker_api.rs | 63 ++++++++++++++++ 9 files changed, 297 insertions(+), 33 deletions(-) create mode 100644 src/tracker/services/common.rs create mode 100644 src/tracker/services/torrent.rs diff --git a/src/api/resource/torrent.rs b/src/api/resource/torrent.rs index 924b61b8c..bec82a132 100644 --- a/src/api/resource/torrent.rs +++ b/src/api/resource/torrent.rs @@ -1,11 +1,14 @@ use serde::{Deserialize, Serialize}; +use super::peer; +use crate::tracker::services::torrent::Info; + #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Torrent { pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, #[serde(skip_serializing_if = "Option::is_none")] pub peers: Option>, } @@ -19,3 +22,64 @@ pub struct ListItem { // todo: this is always None. Remove field from endpoint? pub peers: Option>, } + +impl From for Torrent { + fn from(info: Info) -> Self { + Self { + info_hash: info.info_hash.to_string(), + seeders: info.seeders, + completed: info.completed, + leechers: info.leechers, + peers: info + .peers + .map(|peers| peers.iter().map(|peer| peer::Peer::from(*peer)).collect()), + } + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::api::resource::peer::Peer; + use crate::api::resource::torrent::Torrent; + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + use crate::tracker::services::torrent::Info; + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + #[test] + fn torrent_resource_should_be_converted_from_torrent_info() { + assert_eq!( + Torrent::from(Info { + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + seeders: 1, + completed: 2, + leechers: 3, + peers: Some(vec![sample_peer()]), + }), + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 2, + leechers: 3, + peers: Some(vec![Peer::from(sample_peer())]), + } + ); + } +} diff --git a/src/api/routes.rs b/src/api/routes.rs index 73f1269ef..b29023f2f 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -124,9 +124,9 @@ pub fn routes(tracker: &Arc) -> impl Filter>) -> Json { - Json(json!(Stats::from(get_metrics(tracker.clone()).await))) +pub async fn get_stats(State(tracker): State>) -> Json { + Json(Stats::from(get_metrics(tracker.clone()).await)) +} + +/// # Panics +/// +/// Will panic if the torrent does not exist. +pub async fn get_torrent(State(tracker): State>, Path(info_hash): Path) -> Json { + let info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()) + .await + .unwrap(); + // todo: return "not found" if the torrent does not exist + Json(Torrent::from(info)) } diff --git a/src/apis/server.rs b/src/apis/server.rs index 668959cd6..dcd0924c1 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -10,12 +10,13 @@ use log::info; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::get_stats; +use super::routes::{get_stats, get_torrent}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() .route("/stats", get(get_stats).with_state(tracker.clone())) + .route("/torrent/:info_hash", get(get_torrent).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); diff --git a/src/tracker/services/common.rs b/src/tracker/services/common.rs new file mode 100644 index 000000000..8757e6a21 --- /dev/null +++ b/src/tracker/services/common.rs @@ -0,0 +1,24 @@ +use std::sync::Arc; + +use crate::config::Configuration; +use crate::tracker::statistics::Keeper; +use crate::tracker::Tracker; + +/// # Panics +/// +/// Will panic if tracker cannot be instantiated. +#[must_use] +pub fn tracker_factory(configuration: &Arc) -> Tracker { + // todo: the tracker initialization is duplicated in many places. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } +} diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index 3449ec7b4..ffa5bb253 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -1 +1,3 @@ pub mod statistics; +pub mod torrent; +pub mod common; diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index bbc069dd3..745f5563c 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -36,37 +36,18 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { mod tests { use std::sync::Arc; - use super::Tracker; use crate::config::{ephemeral_configuration, Configuration}; use crate::tracker; + use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; - use crate::tracker::statistics::Keeper; pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } - pub fn tracker_factory() -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Configuration - let configuration = tracker_configuration(); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } - } - #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let tracker = Arc::new(tracker_factory()); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let tracker_metrics = get_metrics(tracker.clone()).await; diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs new file mode 100644 index 000000000..da7d24ce5 --- /dev/null +++ b/src/tracker/services/torrent.rs @@ -0,0 +1,116 @@ +use std::sync::Arc; + +use crate::protocol::info_hash::InfoHash; +use crate::tracker::peer::Peer; +use crate::tracker::Tracker; + +#[derive(Debug, PartialEq)] +pub struct Info { + pub info_hash: InfoHash, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub peers: Option>, +} + +pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { + let db = tracker.get_torrents().await; + + let torrent_entry_option = db.get(info_hash); + + let torrent_entry = match torrent_entry_option { + Some(torrent_entry) => torrent_entry, + None => { + return None; + } + }; + + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + let peers = torrent_entry.get_peers(None); + + let peers = Some(peers.iter().map(|peer| (**peer)).collect()); + + Some(Info { + info_hash: *info_hash, + seeders: u64::from(seeders), + completed: u64::from(completed), + leechers: u64::from(leechers), + peers, + }) +} + +#[cfg(test)] +mod tests { + + mod getting_a_torrent_info { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::torrent::{get_torrent_info, Info}; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + #[tokio::test] + async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let torrent_info = get_torrent_info( + tracker.clone(), + &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), + ) + .await; + + assert!(torrent_info.is_none()); + } + + #[tokio::test] + async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) + .await; + + let torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&hash).unwrap()) + .await + .unwrap(); + + assert_eq!( + torrent_info, + Info { + info_hash: InfoHash::from_str(&hash).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![sample_peer()]), + } + ); + } + } +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 25d747f27..78f8efbb1 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -645,4 +645,67 @@ mod tracker_apis { assert_unauthorized(response).await; } } + + mod for_torrent_resources { + use std::str::FromStr; + + use torrust_tracker::api::resource; + use torrust_tracker::api::resource::torrent::Torrent; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::fixtures::sample_peer; + use crate::api::server::start_default_api; + use crate::api::Version; + + #[tokio::test] + async fn should_allow_getting_a_torrent_info() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let peer = sample_peer(); + + api_server.add_torrent(&info_hash, &peer).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrent(&info_hash.to_string()) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::().await.unwrap(), + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![resource::peer::Peer::from(peer)]) + } + ); + } + + #[tokio::test] + async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + api_server.add_torrent(&info_hash, &sample_peer()).await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .get_torrent(&info_hash.to_string()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .get_torrent(&info_hash.to_string()) + .await; + + assert_unauthorized(response).await; + } + } } From 2aebf9ad396298d41454bfda75528395fab0d085 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 16:53:05 +0000 Subject: [PATCH 0290/1003] test(api): [#143] add test for torrent not known response in GET /api/torrent/:info_hash endpoint --- src/apis/middlewares/auth.rs | 2 +- tests/api/asserts.rs | 8 ++++++++ tests/tracker_api.rs | 15 ++++++++++++++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs index e04d5f2c5..905160a06 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/middlewares/auth.rs @@ -50,7 +50,7 @@ impl IntoResponse for AuthError { ( StatusCode::INTERNAL_SERVER_ERROR, - [(header::CONTENT_TYPE, "text/plain")], + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], body, ) .into_response() diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 5d664d5c4..5e03c2573 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -2,6 +2,7 @@ use reqwest::Response; pub async fn assert_token_not_valid(response: Response) { assert_eq!(response.status(), 500); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); assert_eq!( response.text().await.unwrap(), "Unhandled rejection: Err { reason: \"token not valid\" }" @@ -10,8 +11,15 @@ pub async fn assert_token_not_valid(response: Response) { pub async fn assert_unauthorized(response: Response) { assert_eq!(response.status(), 500); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); assert_eq!( response.text().await.unwrap(), "Unhandled rejection: Err { reason: \"unauthorized\" }" ); } + +pub async fn assert_torrent_not_known(response: Response) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 78f8efbb1..0a942ea45 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -119,7 +119,7 @@ mod tracker_api { use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -251,6 +251,19 @@ mod tracker_api { ); } + #[tokio::test] + async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + let api_server = start_default_api(&Version::Warp).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_not_known(response).await; + } + #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; From a649fe861a5f7508621c4c7630c676dc28c68d8d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 16:54:21 +0000 Subject: [PATCH 0291/1003] feat(api): [#143] axum api. GET /api/torrent/:info_hash endpoint. Not found case --- cSpell.json | 1 + src/apis/routes.rs | 18 ++++++++++-------- tests/tracker_api.rs | 15 ++++++++++++++- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/cSpell.json b/cSpell.json index bb15b6d91..537ea65a5 100644 --- a/cSpell.json +++ b/cSpell.json @@ -26,6 +26,7 @@ "hlocalhost", "Hydranode", "incompletei", + "infohash", "infoschema", "intervali", "leecher", diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 72be81ab0..9fedbc822 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -2,7 +2,8 @@ use std::str::FromStr; use std::sync::Arc; use axum::extract::{Path, State}; -use axum::response::Json; +use axum::response::{IntoResponse, Json, Response}; +use serde_json::json; use crate::api::resource::stats::Stats; use crate::api::resource::torrent::Torrent; @@ -17,11 +18,12 @@ pub async fn get_stats(State(tracker): State>) -> Json { /// # Panics /// -/// Will panic if the torrent does not exist. -pub async fn get_torrent(State(tracker): State>, Path(info_hash): Path) -> Json { - let info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()) - .await - .unwrap(); - // todo: return "not found" if the torrent does not exist - Json(Torrent::from(info)) +/// Will panic if it can't parse the infohash in the request +pub async fn get_torrent(State(tracker): State>, Path(info_hash): Path) -> Response { + let optional_torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()).await; + + match optional_torrent_info { + Some(info) => Json(Torrent::from(info)).into_response(), + None => Json(json!("torrent not known")).into_response(), + } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 0a942ea45..bc5271c21 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -666,7 +666,7 @@ mod tracker_apis { use torrust_tracker::api::resource::torrent::Torrent; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -700,6 +700,19 @@ mod tracker_apis { ); } + #[tokio::test] + async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_not_known(response).await; + } + #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; From ded4d110dc04b71d6448a6dc3c51a6049acf37e3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 16:59:49 +0000 Subject: [PATCH 0292/1003] fix: clippy errors --- src/tracker/services/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index ffa5bb253..2fd557d54 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -1,3 +1,3 @@ +pub mod common; pub mod statistics; pub mod torrent; -pub mod common; From a8061792f4ae1b9fcb95f778d42f706f05206ab5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 17:11:43 +0000 Subject: [PATCH 0293/1003] refactor(api): [#143] use extracted service in the Warp handler --- src/api/routes.rs | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/src/api/routes.rs b/src/api/routes.rs index b29023f2f..f3ff990ff 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -7,13 +7,13 @@ use serde::Deserialize; use warp::{filters, reply, Filter}; use super::resource::auth_key::AuthKey; -use super::resource::peer; use super::resource::stats::Stats; use super::resource::torrent::{ListItem, Torrent}; use super::{ActionStatus, TorrentInfoQuery}; use crate::protocol::info_hash::InfoHash; use crate::tracker; use crate::tracker::services::statistics::get_metrics; +use crate::tracker::services::torrent::get_torrent_info; fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] @@ -107,28 +107,12 @@ pub fn routes(tracker: &Arc) -> impl Filter)| async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); - - let torrent_entry = match torrent_entry_option { - Some(torrent_entry) => torrent_entry, - None => { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); - } - }; - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let optional_torrent_info = get_torrent_info(tracker.clone(), &info_hash).await; - let peers = torrent_entry.get_peers(None); - - let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); - - Ok(reply::json(&Torrent { - info_hash: info_hash.to_string(), - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), - peers: Some(peer_resources), - })) + match optional_torrent_info { + Some(info) => Ok(reply::json(&Torrent::from(info))), + None => Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")), + } }); // DELETE /api/whitelist/:info_hash From c36b121dce4674d43011920c56189facf5972bc5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 20:01:09 +0000 Subject: [PATCH 0294/1003] refactor(api): [#143] extract service tracker::services::torrent::get_torrents It will be used in the new Axum implementaion for the API. In the API enpoint: ``` GET /api/torrents?offset=:u32&limit=:u32 ``` --- src/api/resource/torrent.rs | 61 ++++++++- src/api/routes.rs | 23 +--- src/tracker/services/torrent.rs | 218 +++++++++++++++++++++++++++++--- 3 files changed, 256 insertions(+), 46 deletions(-) diff --git a/src/api/resource/torrent.rs b/src/api/resource/torrent.rs index bec82a132..56fead37a 100644 --- a/src/api/resource/torrent.rs +++ b/src/api/resource/torrent.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use super::peer; -use crate::tracker::services::torrent::Info; +use crate::tracker::services::torrent::{BasicInfo, Info}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Torrent { @@ -16,13 +16,31 @@ pub struct Torrent { #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct ListItem { pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, // todo: this is always None. Remove field from endpoint? pub peers: Option>, } +impl ListItem { + #[must_use] + pub fn new_vec(basic_info_vec: &[BasicInfo]) -> Vec { + basic_info_vec + .iter() + .map(|basic_info| ListItem::from((*basic_info).clone())) + .collect() + } +} + +#[must_use] +pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { + basic_info_vec + .iter() + .map(|basic_info| ListItem::from((*basic_info).clone())) + .collect() +} + impl From for Torrent { fn from(info: Info) -> Self { Self { @@ -37,6 +55,18 @@ impl From for Torrent { } } +impl From for ListItem { + fn from(basic_info: BasicInfo) -> Self { + Self { + info_hash: basic_info.info_hash.to_string(), + seeders: basic_info.seeders, + completed: basic_info.completed, + leechers: basic_info.leechers, + peers: None, + } + } +} + #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -45,11 +75,11 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::api::resource::peer::Peer; - use crate::api::resource::torrent::Torrent; + use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; - use crate::tracker::services::torrent::Info; + use crate::tracker::services::torrent::{BasicInfo, Info}; fn sample_peer() -> peer::Peer { peer::Peer { @@ -82,4 +112,23 @@ mod tests { } ); } + + #[test] + fn torrent_resource_list_item_should_be_converted_from_the_basic_torrent_info() { + assert_eq!( + ListItem::from(BasicInfo { + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + seeders: 1, + completed: 2, + leechers: 3, + }), + ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 2, + leechers: 3, + peers: None, + } + ); + } } diff --git a/src/api/routes.rs b/src/api/routes.rs index f3ff990ff..bb459ee95 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -13,7 +13,7 @@ use super::{ActionStatus, TorrentInfoQuery}; use crate::protocol::info_hash::InfoHash; use crate::tracker; use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::get_torrent_info; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents}; fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] @@ -64,24 +64,9 @@ pub fn routes(tracker: &Arc) -> impl Filter = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - ListItem { - info_hash: info_hash.to_string(), - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + Result::<_, warp::reject::Rejection>::Ok(reply::json(&ListItem::new_vec( + &get_torrents(tracker.clone(), offset, limit).await, + ))) }); // GET /api/stats diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index da7d24ce5..00cdfe136 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -13,6 +13,14 @@ pub struct Info { pub peers: Option>, } +#[derive(Debug, PartialEq, Clone)] +pub struct BasicInfo { + pub info_hash: InfoHash, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, +} + pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { let db = tracker.get_torrents().await; @@ -40,39 +48,60 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op }) } +pub async fn get_torrents(tracker: Arc, offset: u32, limit: u32) -> Vec { + let db = tracker.get_torrents().await; + + db.iter() + .map(|(info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + BasicInfo { + info_hash: *info_hash, + seeders: u64::from(seeders), + completed: u64::from(completed), + leechers: u64::from(leechers), + } + }) + .skip(offset as usize) + .take(limit as usize) + .collect() +} + #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::tracker::peer; + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } mod getting_a_torrent_info { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::config::{ephemeral_configuration, Configuration}; - use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; - use crate::tracker::peer; use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } - fn sample_peer() -> peer::Peer { - peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - } - } - #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); @@ -92,14 +121,11 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&hash).unwrap()) - .await - .unwrap(); + let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); assert_eq!( torrent_info, @@ -113,4 +139,154 @@ mod tests { ); } } + + mod searching_for_torrents { + + use std::str::FromStr; + use std::sync::Arc; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::torrent::tests::sample_peer; + use crate::tracker::services::torrent::{get_torrents, BasicInfo}; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } + + #[tokio::test] + async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let offset = 0; + let limit = 4000; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!(torrents, vec![]); + } + + #[tokio::test] + async fn should_return_a_summarized_info_for_all_torrents() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let offset = 0; + let limit = 4000; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) + .await; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!( + torrents, + vec![BasicInfo { + info_hash: InfoHash::from_str(&hash).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }] + ); + } + + #[tokio::test] + async fn should_allow_limiting_the_number_of_torrents_in_the_result() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let offset = 0; + let limit = 1; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!(torrents.len(), 1); + } + + #[tokio::test] + async fn should_allow_using_pagination_in_the_result() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let offset = 1; + let limit = 4000; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!(torrents.len(), 1); + assert_eq!( + torrents, + vec![BasicInfo { + info_hash: InfoHash::from_str(&hash1).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }] + ); + } + + #[tokio::test] + async fn should_return_torrents_ordered_by_info_hash() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let offset = 0; + let limit = 4000; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!( + torrents, + vec![ + BasicInfo { + info_hash: InfoHash::from_str(&hash2).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }, + BasicInfo { + info_hash: InfoHash::from_str(&hash1).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + } + ] + ); + } + } } From 1515753b3f2365aad5d646e3efeced1b608b15e9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jan 2023 15:37:02 +0000 Subject: [PATCH 0295/1003] feat(api): [#143] axum api. GET /api/torrents endpoint --- src/api/routes.rs | 4 +- src/apis/routes.rs | 46 ++++++++++++-- src/apis/server.rs | 11 ++-- src/tracker/services/torrent.rs | 73 ++++++++++++++++++----- tests/tracker_api.rs | 102 +++++++++++++++++++++++++++++++- 5 files changed, 207 insertions(+), 29 deletions(-) diff --git a/src/api/routes.rs b/src/api/routes.rs index bb459ee95..4280cdb35 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -13,7 +13,7 @@ use super::{ActionStatus, TorrentInfoQuery}; use crate::protocol::info_hash::InfoHash; use crate::tracker; use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents}; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] @@ -65,7 +65,7 @@ pub fn routes(tracker: &Arc) -> impl Filter::Ok(reply::json(&ListItem::new_vec( - &get_torrents(tracker.clone(), offset, limit).await, + &get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await, ))) }); diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 9fedbc822..b86a468e2 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,25 +1,27 @@ +use std::fmt; use std::str::FromStr; use std::sync::Arc; -use axum::extract::{Path, State}; +use axum::extract::{Path, Query, State}; use axum::response::{IntoResponse, Json, Response}; +use serde::{de, Deserialize, Deserializer}; use serde_json::json; use crate::api::resource::stats::Stats; -use crate::api::resource::torrent::Torrent; +use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::get_torrent_info; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; -pub async fn get_stats(State(tracker): State>) -> Json { +pub async fn get_stats_handler(State(tracker): State>) -> Json { Json(Stats::from(get_metrics(tracker.clone()).await)) } /// # Panics /// /// Will panic if it can't parse the infohash in the request -pub async fn get_torrent(State(tracker): State>, Path(info_hash): Path) -> Response { +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { let optional_torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()).await; match optional_torrent_info { @@ -27,3 +29,37 @@ pub async fn get_torrent(State(tracker): State>, Path(info_hash): P None => Json(json!("torrent not known")).into_response(), } } + +#[derive(Deserialize)] +pub struct PaginationParams { + #[serde(default, deserialize_with = "empty_string_as_none")] + pub offset: Option, + pub limit: Option, +} + +pub async fn get_torrents_handler( + State(tracker): State>, + pagination: Query, +) -> Json> { + Json(ListItem::new_vec( + &get_torrents( + tracker.clone(), + &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + ) + .await, + )) +} + +/// Serde deserialization decorator to map empty Strings to None, +fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: fmt::Display, +{ + let opt = Option::::deserialize(de)?; + match opt.as_deref() { + None | Some("") => Ok(None), + Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), + } +} diff --git a/src/apis/server.rs b/src/apis/server.rs index dcd0924c1..879160136 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -10,13 +10,14 @@ use log::info; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::{get_stats, get_torrent}; +use super::routes::{get_stats_handler, get_torrent_handler, get_torrents_handler}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() - .route("/stats", get(get_stats).with_state(tracker.clone())) - .route("/torrent/:info_hash", get(get_torrent).with_state(tracker.clone())) + .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) + .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -33,7 +34,9 @@ pub fn start_tls( tracker: &Arc, ) -> impl Future> { let app = Router::new() - .route("/stats", get(get_stats).with_state(tracker.clone())) + .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) + .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index 00cdfe136..a08fd54d1 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use serde::Deserialize; + use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::Peer; use crate::tracker::Tracker; @@ -21,6 +23,52 @@ pub struct BasicInfo { pub leechers: u64, } +#[derive(Deserialize)] +pub struct Pagination { + pub offset: u32, + pub limit: u32, +} + +impl Pagination { + #[must_use] + pub fn new(offset: u32, limit: u32) -> Self { + Self { offset, limit } + } + + #[must_use] + pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { + let offset = match offset_option { + Some(offset) => offset, + None => Pagination::default_offset(), + }; + let limit = match limit_option { + Some(offset) => offset, + None => Pagination::default_limit(), + }; + + Self { offset, limit } + } + + #[must_use] + pub fn default_offset() -> u32 { + 0 + } + + #[must_use] + pub fn default_limit() -> u32 { + 4000 + } +} + +impl Default for Pagination { + fn default() -> Self { + Self { + offset: Self::default_offset(), + limit: Self::default_limit(), + } + } +} + pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { let db = tracker.get_torrents().await; @@ -48,7 +96,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op }) } -pub async fn get_torrents(tracker: Arc, offset: u32, limit: u32) -> Vec { +pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec { let db = tracker.get_torrents().await; db.iter() @@ -61,8 +109,8 @@ pub async fn get_torrents(tracker: Arc, offset: u32, limit: u32) -> Vec leechers: u64::from(leechers), } }) - .skip(offset as usize) - .take(limit as usize) + .skip(pagination.offset as usize) + .take(pagination.limit as usize) .collect() } @@ -149,7 +197,7 @@ mod tests { use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; - use crate::tracker::services::torrent::{get_torrents, BasicInfo}; + use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) @@ -158,10 +206,8 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let offset = 0; - let limit = 4000; - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; assert_eq!(torrents, vec![]); } @@ -169,8 +215,6 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let offset = 0; - let limit = 4000; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -179,7 +223,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; assert_eq!( torrents, @@ -211,7 +255,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; assert_eq!(torrents.len(), 1); } @@ -235,7 +279,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -265,10 +309,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) .await; - let offset = 0; - let limit = 4000; - - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; assert_eq!( torrents, diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index bc5271c21..e8d1e71eb 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -662,17 +662,115 @@ mod tracker_apis { mod for_torrent_resources { use std::str::FromStr; - use torrust_tracker::api::resource; use torrust_tracker::api::resource::torrent::Torrent; + use torrust_tracker::api::resource::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; - use crate::api::client::Client; + use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; use crate::api::Version; + #[tokio::test] + async fn should_allow_getting_torrents() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + api_server.add_torrent(&info_hash, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrents(Query::empty()) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_allow_limiting_the_torrents_in_the_result() { + let api_server = start_default_api(&Version::Axum).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + api_server.add_torrent(&info_hash_1, &sample_peer()).await; + api_server.add_torrent(&info_hash_2, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_allow_the_torrents_result_pagination() { + let api_server = start_default_api(&Version::Axum).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + api_server.add_torrent(&info_hash_1, &sample_peer()).await; + api_server.add_torrent(&info_hash_2, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .get_torrents(Query::empty()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .get_torrents(Query::default()) + .await; + + assert_unauthorized(response).await; + } + #[tokio::test] async fn should_allow_getting_a_torrent_info() { let api_server = start_default_api(&Version::Axum).await; From e1ed929f1cf0abf450e246413536530accc4d904 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jan 2023 18:01:15 +0000 Subject: [PATCH 0296/1003] test(api): [#143] add tests for database failure --- src/databases/mod.rs | 5 ++ src/databases/mysql.rs | 24 ++++++++ src/databases/sqlite.rs | 22 ++++++++ src/tracker/mod.rs | 6 +- tests/api/asserts.rs | 49 +++++++++++----- tests/api/mod.rs | 11 ++++ tests/tracker_api.rs | 121 ++++++++++++++++++++++++++++++++++++++-- 7 files changed, 218 insertions(+), 20 deletions(-) diff --git a/src/databases/mod.rs b/src/databases/mod.rs index c1d265b56..873dd70eb 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -39,6 +39,11 @@ pub trait Database: Sync + Send { /// Will return `Error` if unable to create own tables. fn create_database_tables(&self) -> Result<(), Error>; + /// # Errors + /// + /// Will return `Err` if unable to drop tables. + fn drop_database_tables(&self) -> Result<(), Error>; + async fn load_persistent_torrents(&self) -> Result, Error>; async fn load_keys(&self) -> Result, Error>; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 8322b2273..71b06378c 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -74,6 +74,30 @@ impl Database for Mysql { Ok(()) } + fn drop_database_tables(&self) -> Result<(), Error> { + let drop_whitelist_table = " + DROP TABLE `whitelist`;" + .to_string(); + + let drop_torrents_table = " + DROP TABLE `torrents`;" + .to_string(); + + let drop_keys_table = " + DROP TABLE `keys`;" + .to_string(); + + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + + conn.query_drop(&drop_whitelist_table) + .expect("Could not drop `whitelist` table."); + conn.query_drop(&drop_torrents_table) + .expect("Could not drop `torrents` table."); + conn.query_drop(&drop_keys_table).expect("Could not drop `keys` table."); + + Ok(()) + } + async fn load_persistent_torrents(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index c5401aacf..1d7caf052 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -60,6 +60,28 @@ impl Database for Sqlite { .map(|_| ()) } + fn drop_database_tables(&self) -> Result<(), Error> { + let drop_whitelist_table = " + DROP TABLE whitelist;" + .to_string(); + + let drop_torrents_table = " + DROP TABLE torrents;" + .to_string(); + + let drop_keys_table = " + DROP TABLE keys;" + .to_string(); + + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + + conn.execute(&drop_whitelist_table, []) + .and_then(|_| conn.execute(&drop_torrents_table, [])) + .and_then(|_| conn.execute(&drop_keys_table, [])) + .map_err(|_| Error::InvalidQuery) + .map(|_| ()) + } + async fn load_persistent_torrents(&self) -> Result, Error> { let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index f33628355..50d006a3f 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -26,7 +26,7 @@ pub struct Tracker { torrents: RwLock>, stats_event_sender: Option>, stats_repository: statistics::Repo, - database: Box, + pub database: Box, } #[derive(Debug, PartialEq, Default)] @@ -130,7 +130,9 @@ impl Tracker { /// It adds a torrent to the whitelist if it has not been whitelisted previously async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - if self.database.is_info_hash_whitelisted(info_hash).await.unwrap() { + let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + + if is_whitelisted { return Ok(()); } diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 5e03c2573..6bf493bc6 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,25 +1,48 @@ use reqwest::Response; +pub async fn assert_torrent_not_known(response: Response) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); +} + pub async fn assert_token_not_valid(response: Response) { - assert_eq!(response.status(), 500); - assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); - assert_eq!( - response.text().await.unwrap(), - "Unhandled rejection: Err { reason: \"token not valid\" }" - ); + assert_unhandled_rejection(response, "token not valid").await; } pub async fn assert_unauthorized(response: Response) { + assert_unhandled_rejection(response, "unauthorized").await; +} + +pub async fn assert_failed_to_remove_torrent_from_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to remove torrent from whitelist").await; +} + +pub async fn assert_failed_to_whitelist_torrent(response: Response) { + assert_unhandled_rejection(response, "failed to whitelist torrent").await; +} + +pub async fn assert_failed_to_generate_key(response: Response) { + assert_unhandled_rejection(response, "failed to generate key").await; +} + +pub async fn assert_failed_to_delete_key(response: Response) { + assert_unhandled_rejection(response, "failed to delete key").await; +} + +pub async fn assert_failed_to_reload_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to reload whitelist").await; +} + +pub async fn assert_failed_to_reload_keys(response: Response) { + assert_unhandled_rejection(response, "failed to reload keys").await; +} + +async fn assert_unhandled_rejection(response: Response, reason: &str) { assert_eq!(response.status(), 500); assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); assert_eq!( response.text().await.unwrap(), - "Unhandled rejection: Err { reason: \"unauthorized\" }" + format!("Unhandled rejection: Err {{ reason: \"{reason}\" }}") ); } - -pub async fn assert_torrent_not_known(response: Response) { - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); -} diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 52980581f..1311a2356 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -1,3 +1,7 @@ +use std::sync::Arc; + +use torrust_tracker::tracker::Tracker; + pub mod asserts; pub mod client; pub mod connection_info; @@ -8,3 +12,10 @@ pub enum Version { Warp, Axum, } + +/// It forces a database error by dropping all tables. +/// That makes any query fail. +/// code-review: alternatively we could inject a database mock in the future. +pub fn force_database_error(tracker: &Arc) { + tracker.database.drop_database_tables().unwrap(); +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index e8d1e71eb..dc667a896 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -37,9 +37,11 @@ mod tracker_api { Keys: POST /api/key/:seconds_valid - GET /api/keys/reload DELETE /api/key/:key + Key command: + GET /api/keys/reload + */ mod for_stats_resources { @@ -291,11 +293,14 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{ + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, + assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized, + }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; - use crate::api::Version; + use crate::api::{force_database_error, Version}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { @@ -350,6 +355,38 @@ mod tracker_api { assert_unauthorized(response).await; } + #[tokio::test] + async fn should_return_an_error_when_the_torrent_cannot_be_whitelisted() { + let api_server = start_default_api(&Version::Warp).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .whitelist_a_torrent(&info_hash) + .await; + + assert_failed_to_whitelist_torrent(response).await; + } + + #[tokio::test] + async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + let api_server = start_default_api(&Version::Warp).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_failed_to_remove_torrent_from_whitelist(response).await; + } + #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; @@ -412,6 +449,23 @@ mod tracker_api { ); */ } + + #[tokio::test] + async fn should_return_an_error_when_the_whitelist_cannot_be_reloaded_from_the_database() { + let api_server = start_default_api(&Version::Warp).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .reload_whitelist() + .await; + + assert_failed_to_reload_whitelist(response).await; + } } mod for_key_resources { @@ -420,11 +474,14 @@ mod tracker_api { use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{ + assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_token_not_valid, + assert_unauthorized, + }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; - use crate::api::Version; + use crate::api::{force_database_error, Version}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { @@ -463,6 +520,20 @@ mod tracker_api { assert_unauthorized(response).await; } + #[tokio::test] + async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { + let api_server = start_default_api(&Version::Warp).await; + + force_database_error(&api_server.tracker); + + let seconds_valid = 60; + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .generate_auth_key(seconds_valid) + .await; + + assert_failed_to_generate_key(response).await; + } + #[tokio::test] async fn should_allow_deleting_an_auth_key() { let api_server = start_default_api(&Version::Warp).await; @@ -482,6 +553,26 @@ mod tracker_api { assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); } + #[tokio::test] + async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { + let api_server = start_default_api(&Version::Warp).await; + + let seconds_valid = 60; + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .delete_auth_key(&auth_key.key) + .await; + + assert_failed_to_delete_key(response).await; + } + #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; @@ -533,6 +624,26 @@ mod tracker_api { assert_eq!(response.status(), 200); } + #[tokio::test] + async fn should_return_an_error_when_keys_cannot_be_reloaded() { + let api_server = start_default_api(&Version::Warp).await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .reload_keys() + .await; + + assert_failed_to_reload_keys(response).await; + } + #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; From 5c5fcbd26f7e5f03d02d80105b85eaf6eb0d4b4d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 12:51:23 +0000 Subject: [PATCH 0297/1003] feat(api): [#143] axum api. POST /api/whitelist/:info_hash endpoint --- src/apis/routes.rs | 41 ++++++++++++++++++- src/apis/server.rs | 18 ++++++++- tests/tracker_api.rs | 94 ++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 143 insertions(+), 10 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index b86a468e2..1315c181a 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -3,8 +3,9 @@ use std::str::FromStr; use std::sync::Arc; use axum::extract::{Path, Query, State}; +use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Json, Response}; -use serde::{de, Deserialize, Deserializer}; +use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::json; use crate::api::resource::stats::Stats; @@ -14,6 +15,31 @@ use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +fn response_ok() -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("{:?}", ActionStatus::Ok), + ) + .into_response() +} + +fn response_err(reason: String) -> Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), + ) + .into_response() +} + pub async fn get_stats_handler(State(tracker): State>) -> Json { Json(Stats::from(get_metrics(tracker.clone()).await)) } @@ -50,6 +76,19 @@ pub async fn get_torrents_handler( )) } +/// # Panics +/// +/// Will panic if it can't parse the infohash in the request +pub async fn add_torrent_to_whitelist_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + match tracker + .add_torrent_to_whitelist(&InfoHash::from_str(&info_hash).unwrap()) + .await + { + Ok(..) => response_ok(), + Err(..) => response_err("failed to whitelist torrent".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index 879160136..1c296cf56 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; -use axum::routing::get; +use axum::routing::{get, post}; use axum::{middleware, Router}; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; @@ -10,14 +10,21 @@ use log::info; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::{get_stats_handler, get_torrent_handler, get_torrents_handler}; +use super::routes::{add_torrent_to_whitelist_handler, get_stats_handler, get_torrent_handler, get_torrents_handler}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() + // Stats .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + // Torrents .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) + // Whitelisted torrents + .route( + "/whitelist/:info_hash", + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -34,9 +41,16 @@ pub fn start_tls( tracker: &Arc, ) -> impl Future> { let app = Router::new() + // Stats .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + // Torrents .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) + // Whitelisted torrents + .route( + "/whitelist/:info_hash", + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index dc667a896..c85068521 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -371,36 +371,36 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + async fn should_allow_removing_a_torrent_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; - assert_failed_to_remove_torrent_from_whitelist(response).await; + assert_eq!(response.status(), 200); + assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } #[tokio::test] - async fn should_allow_removing_a_torrent_from_the_whitelist() { + async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + force_database_error(&api_server.tracker); + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; - assert_eq!(response.status(), 200); - assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); + assert_failed_to_remove_torrent_from_whitelist(response).await; } #[tokio::test] @@ -943,4 +943,84 @@ mod tracker_apis { assert_unauthorized(response).await; } } + + mod for_whitelisted_torrent_resources { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::asserts::{assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::server::start_default_api; + use crate::api::{force_database_error, Version}; + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let res = Client::new(api_server.get_connection_info(), &Version::Axum) + .whitelist_a_torrent(&info_hash) + .await; + + assert_eq!(res.status(), 200); + assert!( + api_server + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + } + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let api_client = Client::new(api_server.get_connection_info(), &Version::Axum); + + let res = api_client.whitelist_a_torrent(&info_hash).await; + assert_eq!(res.status(), 200); + + let res = api_client.whitelist_a_torrent(&info_hash).await; + assert_eq!(res.status(), 200); + } + + #[tokio::test] + async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .whitelist_a_torrent(&info_hash) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .whitelist_a_torrent(&info_hash) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_return_an_error_when_the_torrent_cannot_be_whitelisted() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .whitelist_a_torrent(&info_hash) + .await; + + assert_failed_to_whitelist_torrent(response).await; + } + } } From 2ddf2684b439d28ba9fd4743e13607472e688ee1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 13:32:58 +0000 Subject: [PATCH 0298/1003] feat(api): [#143] axum api. DELETE /api/whitelist/:info_hash endpoint --- src/apis/routes.rs | 16 ++++++++++++ src/apis/server.rs | 15 +++++++++-- tests/tracker_api.rs | 60 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 88 insertions(+), 3 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 1315c181a..9b909abed 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -89,6 +89,22 @@ pub async fn add_torrent_to_whitelist_handler(State(tracker): State } } +/// # Panics +/// +/// Will panic if it can't parse the infohash in the request +pub async fn delete_torrent_from_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match tracker + .remove_torrent_from_whitelist(&InfoHash::from_str(&info_hash).unwrap()) + .await + { + Ok(..) => response_ok(), + Err(..) => response_err("failed to remove torrent from whitelist".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index 1c296cf56..e03eae55a 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; -use axum::routing::{get, post}; +use axum::routing::{delete, get, post}; use axum::{middleware, Router}; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; @@ -10,7 +10,10 @@ use log::info; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::{add_torrent_to_whitelist_handler, get_stats_handler, get_torrent_handler, get_torrents_handler}; +use super::routes::{ + add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, get_stats_handler, get_torrent_handler, + get_torrents_handler, +}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { @@ -25,6 +28,10 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F "/whitelist/:info_hash", post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) + .route( + "/whitelist/:info_hash", + delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -51,6 +58,10 @@ pub fn start_tls( "/whitelist/:info_hash", post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) + .route( + "/whitelist/:info_hash", + delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index c85068521..0acf7e428 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -949,7 +949,10 @@ mod tracker_apis { use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{ + assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_token_not_valid, + assert_unauthorized, + }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; @@ -1022,5 +1025,60 @@ mod tracker_apis { assert_failed_to_whitelist_torrent(response).await; } + + #[tokio::test] + async fn should_allow_removing_a_torrent_from_the_whitelist() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_eq!(response.status(), 200); + assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_failed_to_remove_torrent_from_whitelist(response).await; + } + + #[tokio::test] + async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_token_not_valid(response).await; + + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_unauthorized(response).await; + } } } From a58d8310fd36a99e60b05ce61ccff85f5632cc71 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 14:01:42 +0000 Subject: [PATCH 0299/1003] feat(api): [#143] axum api. GET /api/whitelist/reload endpoint --- src/apis/routes.rs | 7 +++++++ src/apis/server.rs | 14 +++++++++++++- tests/api/asserts.rs | 2 ++ tests/tracker_api.rs | 46 ++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 66 insertions(+), 3 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 9b909abed..93209c285 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -105,6 +105,13 @@ pub async fn delete_torrent_from_whitelist_handler( } } +pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { + match tracker.load_whitelist().await { + Ok(..) => response_ok(), + Err(..) => response_err("failed to reload whitelist".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index e03eae55a..fb0e4b376 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -12,11 +12,12 @@ use warp::hyper; use super::middlewares::auth::auth; use super::routes::{ add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, get_stats_handler, get_torrent_handler, - get_torrents_handler, + get_torrents_handler, reload_whitelist_handler, }; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + // todo: duplicate routes definition. See `start_tls` function. let app = Router::new() // Stats .route("/stats", get(get_stats_handler).with_state(tracker.clone())) @@ -32,6 +33,11 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F "/whitelist/:info_hash", delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), ) + // Whitelist command + .route( + "/whitelist/:info_hash", + get(reload_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -47,6 +53,7 @@ pub fn start_tls( ssl_config: RustlsConfig, tracker: &Arc, ) -> impl Future> { + // todo: duplicate routes definition. See `start` function. let app = Router::new() // Stats .route("/stats", get(get_stats_handler).with_state(tracker.clone())) @@ -62,6 +69,11 @@ pub fn start_tls( "/whitelist/:info_hash", delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), ) + // Whitelist command + .route( + "/whitelist/:info_hash", + get(reload_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 6bf493bc6..0a2b3fad6 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,3 +1,5 @@ +// code-review: should we use macros to return the exact line where the assert fails? + use reqwest::Response; pub async fn assert_torrent_not_known(response: Response) { diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 0acf7e428..37a6033c7 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -950,8 +950,8 @@ mod tracker_apis { use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{ - assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_token_not_valid, - assert_unauthorized, + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, + assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1080,5 +1080,47 @@ mod tracker_apis { assert_unauthorized(response).await; } + + #[tokio::test] + async fn should_allow_reload_the_whitelist_from_the_database() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .reload_whitelist() + .await; + + assert_eq!(response.status(), 200); + /* This assert fails because the whitelist has not been reloaded yet. + We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent + is whitelisted and use that endpoint to check if the torrent is still there after reloading. + assert!( + !(api_server + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await) + ); + */ + } + + #[tokio::test] + async fn should_return_an_error_when_the_whitelist_cannot_be_reloaded_from_the_database() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .reload_whitelist() + .await; + + assert_failed_to_reload_whitelist(response).await; + } } } From 0282e33931718e71ab4f2b05b9948cc50ccc91b1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 14:31:23 +0000 Subject: [PATCH 0300/1003] feat(api): [#143] axum api. POST /api/key/:seconds_valid endpoint --- src/apis/routes.rs | 18 ++++++++++++ src/apis/server.rs | 14 +++++++-- tests/tracker_api.rs | 68 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 2 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 93209c285..d9b9c2691 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,6 +1,7 @@ use std::fmt; use std::str::FromStr; use std::sync::Arc; +use std::time::Duration; use axum::extract::{Path, Query, State}; use axum::http::{header, StatusCode}; @@ -8,6 +9,7 @@ use axum::response::{IntoResponse, Json, Response}; use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::json; +use crate::api::resource::auth_key::AuthKey; use crate::api::resource::stats::Stats; use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; @@ -40,6 +42,15 @@ fn response_err(reason: String) -> Response { .into_response() } +fn response_auth_key(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + pub async fn get_stats_handler(State(tracker): State>) -> Json { Json(Stats::from(get_metrics(tracker.clone()).await)) } @@ -112,6 +123,13 @@ pub async fn reload_whitelist_handler(State(tracker): State>) -> Re } } +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid): Path) -> Response { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), + Err(_) => response_err("failed to generate key".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index fb0e4b376..ecf2a54cc 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -11,8 +11,8 @@ use warp::hyper; use super::middlewares::auth::auth; use super::routes::{ - add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, get_stats_handler, get_torrent_handler, - get_torrents_handler, reload_whitelist_handler, + add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, get_stats_handler, + get_torrent_handler, get_torrents_handler, reload_whitelist_handler, }; use crate::tracker; @@ -38,6 +38,11 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F "/whitelist/:info_hash", get(reload_whitelist_handler).with_state(tracker.clone()), ) + // Keys + .route( + "/key/:seconds_valid", + post(generate_auth_key_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -74,6 +79,11 @@ pub fn start_tls( "/whitelist/:info_hash", get(reload_whitelist_handler).with_state(tracker.clone()), ) + // Keys + .route( + "/key/:seconds_valid", + post(generate_auth_key_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 37a6033c7..7ec789b22 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1123,4 +1123,72 @@ mod tracker_apis { assert_failed_to_reload_whitelist(response).await; } } + + mod for_key_resources { + //use std::time::Duration; + + use torrust_tracker::api::resource::auth_key::AuthKey; + use torrust_tracker::tracker::auth::Key; + + use crate::api::asserts::{assert_failed_to_generate_key, assert_token_not_valid, assert_unauthorized}; + /*use crate::api::asserts::{ + assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_token_not_valid, + assert_unauthorized, + };*/ + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::server::start_default_api; + use crate::api::{force_database_error, Version}; + + #[tokio::test] + async fn should_allow_generating_a_new_auth_key() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .generate_auth_key(seconds_valid) + .await; + + // Verify the key with the tracker + assert!(api_server + .tracker + .verify_auth_key(&Key::from(response.json::().await.unwrap())) + .await + .is_ok()); + } + + #[tokio::test] + async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { + let api_server = start_default_api(&Version::Axum).await; + + force_database_error(&api_server.tracker); + + let seconds_valid = 60; + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .generate_auth_key(seconds_valid) + .await; + + assert_failed_to_generate_key(response).await; + } + } } From 6b2e3bcfb55b75de20af4505f0f45d680b64da25 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 16:10:10 +0000 Subject: [PATCH 0301/1003] feat(api): [#143] axum api. DELETE /api/key/:key endpoint --- src/apis/routes.rs | 11 ++++-- src/apis/server.rs | 18 ++++++---- tests/api/asserts.rs | 6 ++++ tests/tracker_api.rs | 81 ++++++++++++++++++++++++++++++++++++++++---- 4 files changed, 102 insertions(+), 14 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index d9b9c2691..305ecefcc 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -123,13 +123,20 @@ pub async fn reload_whitelist_handler(State(tracker): State>) -> Re } } -pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid): Path) -> Response { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid_or_key)).await { Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), Err(_) => response_err("failed to generate key".to_string()), } } +pub async fn delete_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + match tracker.remove_auth_key(&seconds_valid_or_key).await { + Ok(_) => response_ok(), + Err(_) => response_err("failed to delete key".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index ecf2a54cc..1184908de 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -11,8 +11,8 @@ use warp::hyper; use super::middlewares::auth::auth; use super::routes::{ - add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, get_stats_handler, - get_torrent_handler, get_torrents_handler, reload_whitelist_handler, + add_torrent_to_whitelist_handler, delete_auth_key_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, + get_stats_handler, get_torrent_handler, get_torrents_handler, reload_whitelist_handler, }; use crate::tracker; @@ -40,8 +40,11 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F ) // Keys .route( - "/key/:seconds_valid", - post(generate_auth_key_handler).with_state(tracker.clone()), + "/key/:seconds_valid_or_key", + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); @@ -81,8 +84,11 @@ pub fn start_tls( ) // Keys .route( - "/key/:seconds_valid", - post(generate_auth_key_handler).with_state(tracker.clone()), + "/key/:seconds_valid_or_key", + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 0a2b3fad6..e502292f3 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -2,6 +2,12 @@ use reqwest::Response; +pub async fn assert_ok(response: Response) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), "Ok"); +} + pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 7ec789b22..30c4fa9db 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1125,16 +1125,14 @@ mod tracker_apis { } mod for_key_resources { - //use std::time::Duration; + use std::time::Duration; use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; - use crate::api::asserts::{assert_failed_to_generate_key, assert_token_not_valid, assert_unauthorized}; - /*use crate::api::asserts::{ - assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_token_not_valid, - assert_unauthorized, - };*/ + use crate::api::asserts::{ + assert_failed_to_delete_key, assert_failed_to_generate_key, assert_ok, assert_token_not_valid, assert_unauthorized, + }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; @@ -1190,5 +1188,76 @@ mod tracker_apis { assert_failed_to_generate_key(response).await; } + + #[tokio::test] + async fn should_allow_deleting_an_auth_key() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .delete_auth_key(&auth_key.key) + .await; + + assert_ok(response).await; + } + + #[tokio::test] + async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { + let api_server = start_default_api(&Version::Warp).await; + + let seconds_valid = 60; + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .delete_auth_key(&auth_key.key) + .await; + + assert_failed_to_delete_key(response).await; + } + + #[tokio::test] + async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Warp).await; + + let seconds_valid = 60; + + // Generate new auth key + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + .delete_auth_key(&auth_key.key) + .await; + + assert_token_not_valid(response).await; + + // Generate new auth key + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + .delete_auth_key(&auth_key.key) + .await; + + assert_unauthorized(response).await; + } } } From 03ba166bdc8ebed49305613a175525cab324aea3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 16:18:31 +0000 Subject: [PATCH 0302/1003] feat(api): [#143] axum api. GET /api/keys/reload endpoint --- src/apis/routes.rs | 7 +++++ src/apis/server.rs | 6 +++- tests/tracker_api.rs | 75 ++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 81 insertions(+), 7 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 305ecefcc..b9d0603b5 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -137,6 +137,13 @@ pub async fn delete_auth_key_handler(State(tracker): State>, Path(s } } +pub async fn reload_keys_handler(State(tracker): State>) -> Response { + match tracker.load_keys().await { + Ok(..) => response_ok(), + Err(..) => response_err("failed to reload keys".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index 1184908de..d046f1714 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -12,7 +12,7 @@ use warp::hyper; use super::middlewares::auth::auth; use super::routes::{ add_torrent_to_whitelist_handler, delete_auth_key_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, - get_stats_handler, get_torrent_handler, get_torrents_handler, reload_whitelist_handler, + get_stats_handler, get_torrent_handler, get_torrents_handler, reload_keys_handler, reload_whitelist_handler, }; use crate::tracker; @@ -46,6 +46,8 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) + // Key command + .route("/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -90,6 +92,8 @@ pub fn start_tls( .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) + // Key command + .route("/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 30c4fa9db..46a11b482 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1131,7 +1131,8 @@ mod tracker_apis { use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_failed_to_delete_key, assert_failed_to_generate_key, assert_ok, assert_token_not_valid, assert_unauthorized, + assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_ok, + assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1209,7 +1210,7 @@ mod tracker_apis { #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { - let api_server = start_default_api(&Version::Warp).await; + let api_server = start_default_api(&Version::Axum).await; let seconds_valid = 60; let auth_key = api_server @@ -1220,7 +1221,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info(), &Version::Axum) .delete_auth_key(&auth_key.key) .await; @@ -1229,7 +1230,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; + let api_server = start_default_api(&Version::Axum).await; let seconds_valid = 60; @@ -1240,7 +1241,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) .delete_auth_key(&auth_key.key) .await; @@ -1253,11 +1254,73 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) .delete_auth_key(&auth_key.key) .await; assert_unauthorized(response).await; } + + #[tokio::test] + async fn should_allow_reloading_keys() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .reload_keys() + .await; + + assert_eq!(response.status(), 200); + } + + #[tokio::test] + async fn should_return_an_error_when_keys_cannot_be_reloaded() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .reload_keys() + .await; + + assert_failed_to_reload_keys(response).await; + } + + #[tokio::test] + async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .reload_keys() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .reload_keys() + .await; + + assert_unauthorized(response).await; + } } } From 5d9dd9d90164e59915a200d66b5f2a293cbd38d1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 17:50:18 +0000 Subject: [PATCH 0303/1003] refactor(api): extract asserts in tests --- src/apis/routes.rs | 4 +- tests/api/asserts.rs | 46 ++++++++++- tests/tracker_api.rs | 193 ++++++++++++++++++++++--------------------- 3 files changed, 145 insertions(+), 98 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index b9d0603b5..7b1dc53f9 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -27,8 +27,8 @@ pub enum ActionStatus<'a> { fn response_ok() -> Response { ( StatusCode::OK, - [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - format!("{:?}", ActionStatus::Ok), + [(header::CONTENT_TYPE, "application/json")], + serde_json::to_string(&ActionStatus::Ok).unwrap(), ) .into_response() } diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index e502292f3..1bc067490 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,13 +1,55 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; +use torrust_tracker::api::resource::auth_key::AuthKey; +use torrust_tracker::api::resource::stats::Stats; +use torrust_tracker::api::resource::torrent::{ListItem, Torrent}; + +// Resource responses + +pub async fn assert_stats(response: Response, stats: Stats) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), stats); +} + +pub async fn assert_torrent_list(response: Response, torrents: Vec) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::>().await.unwrap(), torrents); +} + +pub async fn assert_torrent_info(response: Response, torrent: Torrent) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), torrent); +} + +pub async fn assert_auth_key(response: Response) -> AuthKey { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + response.json::().await.unwrap() +} + +pub async fn assert_auth_key_utf8(response: Response) -> AuthKey { + assert_eq!(response.status(), 200); + assert_eq!( + response.headers().get("content-type").unwrap(), + "application/json; charset=utf-8" + ); + response.json::().await.unwrap() +} + +// OK response pub async fn assert_ok(response: Response) { assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); - assert_eq!(response.text().await.unwrap(), "Ok"); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); } +// Error responses + pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 46a11b482..ca1d2332d 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -50,7 +50,7 @@ mod tracker_api { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -72,9 +72,8 @@ mod tracker_api { .get_tracker_statistics() .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::().await.unwrap(), + assert_stats( + response, Stats { torrents: 1, seeders: 1, @@ -92,8 +91,9 @@ mod tracker_api { udp6_connections_handled: 0, udp6_announces_handled: 0, udp6_scrapes_handled: 0, - } - ); + }, + ) + .await; } #[tokio::test] @@ -121,7 +121,9 @@ mod tracker_api { use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; + use crate::api::asserts::{ + assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, + }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -140,17 +142,17 @@ mod tracker_api { .get_torrents(Query::empty()) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -168,17 +170,17 @@ mod tracker_api { .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -196,17 +198,17 @@ mod tracker_api { .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -240,17 +242,17 @@ mod tracker_api { .get_torrent(&info_hash.to_string()) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::().await.unwrap(), + assert_torrent_info( + response, Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![resource::peer::Peer::from(peer)]) - } - ); + peers: Some(vec![resource::peer::Peer::from(peer)]), + }, + ) + .await; } #[tokio::test] @@ -295,7 +297,7 @@ mod tracker_api { use crate::api::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized, + assert_failed_to_whitelist_torrent, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -308,11 +310,11 @@ mod tracker_api { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .whitelist_a_torrent(&info_hash) .await; - assert_eq!(res.status(), 200); + assert_ok(response).await; assert!( api_server .tracker @@ -329,11 +331,11 @@ mod tracker_api { let api_client = Client::new(api_server.get_connection_info(), &Version::Warp); - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; } #[tokio::test] @@ -382,7 +384,7 @@ mod tracker_api { .remove_torrent_from_whitelist(&hash) .await; - assert_eq!(response.status(), 200); + assert_ok(response).await; assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } @@ -437,8 +439,8 @@ mod tracker_api { .reload_whitelist() .await; - assert_eq!(response.status(), 200); - /* This assert fails because the whitelist has not been reloaded yet. + assert_ok(response).await; + /* todo: this assert fails because the whitelist has not been reloaded yet. We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent is whitelisted and use that endpoint to check if the torrent is still there after reloading. assert!( @@ -471,12 +473,11 @@ mod tracker_api { mod for_key_resources { use std::time::Duration; - use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_token_not_valid, - assert_unauthorized, + assert_auth_key, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_ok, + assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -493,10 +494,12 @@ mod tracker_api { .generate_auth_key(seconds_valid) .await; + let auth_key_resource = assert_auth_key(response).await; + // Verify the key with the tracker assert!(api_server .tracker - .verify_auth_key(&Key::from(response.json::().await.unwrap())) + .verify_auth_key(&Key::from(auth_key_resource)) .await .is_ok()); } @@ -549,8 +552,7 @@ mod tracker_api { .delete_auth_key(&auth_key.key) .await; - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); + assert_ok(response).await; } #[tokio::test] @@ -621,7 +623,7 @@ mod tracker_api { .reload_keys() .await; - assert_eq!(response.status(), 200); + assert_ok(response).await; } #[tokio::test] @@ -706,7 +708,7 @@ mod tracker_apis { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -728,9 +730,8 @@ mod tracker_apis { .get_tracker_statistics() .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::().await.unwrap(), + assert_stats( + response, Stats { torrents: 1, seeders: 1, @@ -748,8 +749,9 @@ mod tracker_apis { udp6_connections_handled: 0, udp6_announces_handled: 0, udp6_scrapes_handled: 0, - } - ); + }, + ) + .await; } #[tokio::test] @@ -777,7 +779,9 @@ mod tracker_apis { use torrust_tracker::api::resource::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; + use crate::api::asserts::{ + assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, + }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -796,17 +800,17 @@ mod tracker_apis { .get_torrents(Query::empty()) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -824,17 +828,17 @@ mod tracker_apis { .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -852,17 +856,17 @@ mod tracker_apis { .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -896,17 +900,17 @@ mod tracker_apis { .get_torrent(&info_hash.to_string()) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::().await.unwrap(), + assert_torrent_info( + response, Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![resource::peer::Peer::from(peer)]) - } - ); + peers: Some(vec![resource::peer::Peer::from(peer)]), + }, + ) + .await; } #[tokio::test] @@ -951,7 +955,7 @@ mod tracker_apis { use crate::api::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized, + assert_failed_to_whitelist_torrent, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -964,11 +968,11 @@ mod tracker_apis { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info(), &Version::Axum) .whitelist_a_torrent(&info_hash) .await; - assert_eq!(res.status(), 200); + assert_ok(response).await; assert!( api_server .tracker @@ -985,11 +989,11 @@ mod tracker_apis { let api_client = Client::new(api_server.get_connection_info(), &Version::Axum); - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; } #[tokio::test] @@ -1038,7 +1042,7 @@ mod tracker_apis { .remove_torrent_from_whitelist(&hash) .await; - assert_eq!(response.status(), 200); + assert_ok(response).await; assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } @@ -1093,8 +1097,8 @@ mod tracker_apis { .reload_whitelist() .await; - assert_eq!(response.status(), 200); - /* This assert fails because the whitelist has not been reloaded yet. + assert_ok(response).await; + /* todo: this assert fails because the whitelist has not been reloaded yet. We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent is whitelisted and use that endpoint to check if the torrent is still there after reloading. assert!( @@ -1127,12 +1131,11 @@ mod tracker_apis { mod for_key_resources { use std::time::Duration; - use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_ok, - assert_token_not_valid, assert_unauthorized, + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1149,10 +1152,12 @@ mod tracker_apis { .generate_auth_key(seconds_valid) .await; + let auth_key_resource = assert_auth_key_utf8(response).await; + // Verify the key with the tracker assert!(api_server .tracker - .verify_auth_key(&Key::from(response.json::().await.unwrap())) + .verify_auth_key(&Key::from(auth_key_resource)) .await .is_ok()); } @@ -1276,7 +1281,7 @@ mod tracker_apis { .reload_keys() .await; - assert_eq!(response.status(), 200); + assert_ok(response).await; } #[tokio::test] From 504cb9e8d6ce24eed5e7e0e5dc24b673ced3541d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 18:09:16 +0000 Subject: [PATCH 0304/1003] feat(api): the new Axum api uses the URL prefix /api too. Initially we were using a ddifferent URl to avoid conflicts with the Warp implementation but sicne we are using different ports that is not a problem anymore. This change simplifies switching to the new Axum API, since we only have to start using the new implementation in the port set in the configuration (1212), instead of the temporarily created port for the Axum implementation (1313). --- src/apis/server.rs | 42 ++++++----- tests/api/client.rs | 8 +-- tests/tracker_api.rs | 164 +++++++++++++++++++------------------------ 3 files changed, 98 insertions(+), 116 deletions(-) diff --git a/src/apis/server.rs b/src/apis/server.rs index d046f1714..55f71f9cc 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -20,34 +20,37 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F // todo: duplicate routes definition. See `start_tls` function. let app = Router::new() // Stats - .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) // Torrents - .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) - .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) + .route( + "/api/torrent/:info_hash", + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) // Whitelisted torrents .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist command .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", get(reload_whitelist_handler).with_state(tracker.clone()), ) // Keys .route( - "/key/:seconds_valid_or_key", + "/api/key/:seconds_valid_or_key", post(generate_auth_key_handler) .with_state(tracker.clone()) .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) - // Key command - .route("/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) + // Keys command + .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -66,34 +69,37 @@ pub fn start_tls( // todo: duplicate routes definition. See `start` function. let app = Router::new() // Stats - .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) // Torrents - .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) - .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) + .route( + "/api/torrent/:info_hash", + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) // Whitelisted torrents .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist command .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", get(reload_whitelist_handler).with_state(tracker.clone()), ) // Keys .route( - "/key/:seconds_valid_or_key", + "/api/key/:seconds_valid_or_key", post(generate_auth_key_handler) .with_state(tracker.clone()) .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) - // Key command - .route("/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) + // Keys command + .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/api/client.rs b/tests/api/client.rs index e507d817f..b073adefd 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -1,7 +1,6 @@ use reqwest::Response; use super::connection_info::ConnectionInfo; -use super::Version; pub struct Client { connection_info: ConnectionInfo, @@ -68,13 +67,10 @@ impl From for ReqwestQueryParam { } impl Client { - pub fn new(connection_info: ConnectionInfo, version: &Version) -> Self { + pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info, - base_path: match version { - Version::Warp => "/api/".to_string(), - Version::Axum => "/".to_string(), - }, + base_path: "/api/".to_string(), } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index ca1d2332d..72311e71c 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -39,7 +39,7 @@ mod tracker_api { POST /api/key/:seconds_valid DELETE /api/key/:key - Key command: + Keys command: GET /api/keys/reload */ @@ -68,9 +68,7 @@ mod tracker_api { ) .await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .get_tracker_statistics() - .await; + let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; assert_stats( response, @@ -100,13 +98,13 @@ mod tracker_api { async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_tracker_statistics() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_tracker_statistics() .await; @@ -138,7 +136,7 @@ mod tracker_api { api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::empty()) .await; @@ -166,7 +164,7 @@ mod tracker_api { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -194,7 +192,7 @@ mod tracker_api { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -215,13 +213,13 @@ mod tracker_api { async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrents(Query::empty()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_torrents(Query::default()) .await; @@ -238,7 +236,7 @@ mod tracker_api { api_server.add_torrent(&info_hash, &peer).await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -261,7 +259,7 @@ mod tracker_api { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -276,13 +274,13 @@ mod tracker_api { api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) .await; @@ -310,7 +308,7 @@ mod tracker_api { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -329,7 +327,7 @@ mod tracker_api { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info(), &Version::Warp); + let api_client = Client::new(api_server.get_connection_info()); let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; @@ -344,13 +342,13 @@ mod tracker_api { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .whitelist_a_torrent(&info_hash) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .whitelist_a_torrent(&info_hash) .await; @@ -365,7 +363,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -380,7 +378,7 @@ mod tracker_api { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; @@ -398,7 +396,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; @@ -413,14 +411,14 @@ mod tracker_api { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .remove_torrent_from_whitelist(&hash) .await; assert_token_not_valid(response).await; api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .remove_torrent_from_whitelist(&hash) .await; @@ -435,9 +433,7 @@ mod tracker_api { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .reload_whitelist() - .await; + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. @@ -462,9 +458,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .reload_whitelist() - .await; + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; assert_failed_to_reload_whitelist(response).await; } @@ -490,7 +484,7 @@ mod tracker_api { let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; @@ -510,13 +504,13 @@ mod tracker_api { let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .generate_auth_key(seconds_valid) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .generate_auth_key(seconds_valid) .await; @@ -530,7 +524,7 @@ mod tracker_api { force_database_error(&api_server.tracker); let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; @@ -548,7 +542,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .delete_auth_key(&auth_key.key) .await; @@ -568,7 +562,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .delete_auth_key(&auth_key.key) .await; @@ -588,7 +582,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .delete_auth_key(&auth_key.key) .await; @@ -601,7 +595,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .delete_auth_key(&auth_key.key) .await; @@ -619,9 +613,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .reload_keys() - .await; + let response = Client::new(api_server.get_connection_info()).reload_keys().await; assert_ok(response).await; } @@ -639,9 +631,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .reload_keys() - .await; + let response = Client::new(api_server.get_connection_info()).reload_keys().await; assert_failed_to_reload_keys(response).await; } @@ -657,13 +647,13 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .reload_keys() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .reload_keys() .await; @@ -697,7 +687,7 @@ mod tracker_apis { - [ ] POST /api/key/:seconds_valid - [ ] DELETE /api/key/:key - Key commands + Keys commands - [ ] GET /api/keys/reload */ @@ -726,9 +716,7 @@ mod tracker_apis { ) .await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .get_tracker_statistics() - .await; + let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; assert_stats( response, @@ -758,13 +746,13 @@ mod tracker_apis { async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_tracker_statistics() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_tracker_statistics() .await; @@ -796,7 +784,7 @@ mod tracker_apis { api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::empty()) .await; @@ -824,7 +812,7 @@ mod tracker_apis { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -852,7 +840,7 @@ mod tracker_apis { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -873,13 +861,13 @@ mod tracker_apis { async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrents(Query::empty()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_torrents(Query::default()) .await; @@ -896,7 +884,7 @@ mod tracker_apis { api_server.add_torrent(&info_hash, &peer).await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -919,7 +907,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -934,13 +922,13 @@ mod tracker_apis { api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) .await; @@ -968,7 +956,7 @@ mod tracker_apis { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -987,7 +975,7 @@ mod tracker_apis { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info(), &Version::Axum); + let api_client = Client::new(api_server.get_connection_info()); let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; @@ -1002,13 +990,13 @@ mod tracker_apis { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .whitelist_a_torrent(&info_hash) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .whitelist_a_torrent(&info_hash) .await; @@ -1023,7 +1011,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -1038,7 +1026,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; @@ -1056,7 +1044,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; @@ -1071,14 +1059,14 @@ mod tracker_apis { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .remove_torrent_from_whitelist(&hash) .await; assert_token_not_valid(response).await; api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .remove_torrent_from_whitelist(&hash) .await; @@ -1093,9 +1081,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .reload_whitelist() - .await; + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. @@ -1120,9 +1106,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .reload_whitelist() - .await; + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; assert_failed_to_reload_whitelist(response).await; } @@ -1148,7 +1132,7 @@ mod tracker_apis { let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; @@ -1168,13 +1152,13 @@ mod tracker_apis { let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .generate_auth_key(seconds_valid) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .generate_auth_key(seconds_valid) .await; @@ -1188,7 +1172,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; @@ -1206,7 +1190,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .delete_auth_key(&auth_key.key) .await; @@ -1226,7 +1210,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .delete_auth_key(&auth_key.key) .await; @@ -1246,7 +1230,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .delete_auth_key(&auth_key.key) .await; @@ -1259,7 +1243,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .delete_auth_key(&auth_key.key) .await; @@ -1277,9 +1261,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .reload_keys() - .await; + let response = Client::new(api_server.get_connection_info()).reload_keys().await; assert_ok(response).await; } @@ -1297,9 +1279,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .reload_keys() - .await; + let response = Client::new(api_server.get_connection_info()).reload_keys().await; assert_failed_to_reload_keys(response).await; } @@ -1315,13 +1295,13 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .reload_keys() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .reload_keys() .await; From c502c1d939ef9d51c0457c5b3ebb73d56a04d9b0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 18:32:15 +0000 Subject: [PATCH 0305/1003] refactor(api): [#143] remove duplicate definition of axum router --- src/apis/routes.rs | 44 ++++++++++++++++++++++++ src/apis/server.rs | 86 ++++------------------------------------------ 2 files changed, 50 insertions(+), 80 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 7b1dc53f9..e46e3653d 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -6,9 +6,12 @@ use std::time::Duration; use axum::extract::{Path, Query, State}; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Json, Response}; +use axum::routing::{delete, get, post}; +use axum::{middleware, Router}; use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::json; +use super::middlewares::auth::auth; use crate::api::resource::auth_key::AuthKey; use crate::api::resource::stats::Stats; use crate::api::resource::torrent::{ListItem, Torrent}; @@ -17,6 +20,47 @@ use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; +pub fn router(tracker: &Arc) -> Router { + Router::new() + // Stats + .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) + // Torrents + .route( + "/api/torrent/:info_hash", + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) + // Whitelisted torrents + .route( + "/api/whitelist/:info_hash", + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) + .route( + "/api/whitelist/:info_hash", + delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) + // Whitelist command + .route( + "/api/whitelist/:info_hash", + get(reload_whitelist_handler).with_state(tracker.clone()), + ) + // Keys + .route( + // code-review: Axum does not allow two routes with the same path but different path variable name. + // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: + // POST /api/key + // DELETE /api/key/:key + "/api/key/:seconds_valid_or_key", + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), + ) + // Keys command + .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) + .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) +} + #[derive(Serialize, Debug)] #[serde(tag = "status", rename_all = "snake_case")] pub enum ActionStatus<'a> { diff --git a/src/apis/server.rs b/src/apis/server.rs index 55f71f9cc..bbb3e5852 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,57 +1,17 @@ use std::net::SocketAddr; use std::sync::Arc; -use axum::routing::{delete, get, post}; -use axum::{middleware, Router}; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use futures::Future; use log::info; use warp::hyper; -use super::middlewares::auth::auth; -use super::routes::{ - add_torrent_to_whitelist_handler, delete_auth_key_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, - get_stats_handler, get_torrent_handler, get_torrents_handler, reload_keys_handler, reload_whitelist_handler, -}; -use crate::tracker; +use super::routes::router; +use crate::tracker::Tracker; -pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { - // todo: duplicate routes definition. See `start_tls` function. - let app = Router::new() - // Stats - .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) - // Torrents - .route( - "/api/torrent/:info_hash", - get(get_torrent_handler).with_state(tracker.clone()), - ) - .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) - // Whitelisted torrents - .route( - "/api/whitelist/:info_hash", - post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), - ) - .route( - "/api/whitelist/:info_hash", - delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), - ) - // Whitelist command - .route( - "/api/whitelist/:info_hash", - get(reload_whitelist_handler).with_state(tracker.clone()), - ) - // Keys - .route( - "/api/key/:seconds_valid_or_key", - post(generate_auth_key_handler) - .with_state(tracker.clone()) - .delete(delete_auth_key_handler) - .with_state(tracker.clone()), - ) - // Keys command - .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) - .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + let app = router(tracker); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -64,43 +24,9 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F pub fn start_tls( socket_addr: SocketAddr, ssl_config: RustlsConfig, - tracker: &Arc, + tracker: &Arc, ) -> impl Future> { - // todo: duplicate routes definition. See `start` function. - let app = Router::new() - // Stats - .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) - // Torrents - .route( - "/api/torrent/:info_hash", - get(get_torrent_handler).with_state(tracker.clone()), - ) - .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) - // Whitelisted torrents - .route( - "/api/whitelist/:info_hash", - post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), - ) - .route( - "/api/whitelist/:info_hash", - delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), - ) - // Whitelist command - .route( - "/api/whitelist/:info_hash", - get(reload_whitelist_handler).with_state(tracker.clone()), - ) - // Keys - .route( - "/api/key/:seconds_valid_or_key", - post(generate_auth_key_handler) - .with_state(tracker.clone()) - .delete(delete_auth_key_handler) - .with_state(tracker.clone()), - ) - // Keys command - .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) - .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); + let app = router(tracker); let handle = Handle::new(); let shutdown_handle = handle.clone(); From 517ffde147ebbc4040967803af61f387a7c85722 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 12:21:58 +0000 Subject: [PATCH 0306/1003] fix(api): [#143] fix new Axum API enpoint when URL params are invalid --- src/apis/routes.rs | 127 ++++++++++++++++++++++++++------------- src/tracker/auth.rs | 31 ++++++++++ src/tracker/mod.rs | 1 + tests/api/asserts.rs | 12 ++++ tests/tracker_api.rs | 138 +++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 261 insertions(+), 48 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index e46e3653d..60f5f9da0 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -16,6 +16,7 @@ use crate::api::resource::auth_key::AuthKey; use crate::api::resource::stats::Stats; use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth::KeyId; use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; @@ -37,7 +38,7 @@ pub fn router(tracker: &Arc) -> Router { ) .route( "/api/whitelist/:info_hash", - delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), + delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist command .route( @@ -68,6 +69,19 @@ pub enum ActionStatus<'a> { Err { reason: std::borrow::Cow<'a, str> }, } +// Resource responses + +fn response_auth_key(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + +// OK response + fn response_ok() -> Response { ( StatusCode::OK, @@ -77,20 +91,29 @@ fn response_ok() -> Response { .into_response() } -fn response_err(reason: String) -> Response { +// Error responses + +fn response_invalid_info_hash_param(info_hash: &str) -> Response { + response_bad_request(&format!( + "Invalid URL: invalid infohash param: string \"{}\", expected expected a 40 character long string", + info_hash + )) +} + +fn response_bad_request(body: &str) -> Response { ( - StatusCode::INTERNAL_SERVER_ERROR, + StatusCode::BAD_REQUEST, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), + body.to_owned(), ) .into_response() } -fn response_auth_key(auth_key: &AuthKey) -> Response { +fn response_err(reason: String) -> Response { ( - StatusCode::OK, - [(header::CONTENT_TYPE, "application/json; charset=utf-8")], - serde_json::to_string(auth_key).unwrap(), + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), ) .into_response() } @@ -99,15 +122,22 @@ pub async fn get_stats_handler(State(tracker): State>) -> Json>, Path(info_hash): Path) -> Response { - let optional_torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()).await; +#[derive(Deserialize)] +pub struct InfoHashParam(String); + +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); - match optional_torrent_info { - Some(info) => Json(Torrent::from(info)).into_response(), - None => Json(json!("torrent not known")).into_response(), + match parsing_info_hash_result { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => { + let optional_torrent_info = get_torrent_info(tracker.clone(), &info_hash).await; + + match optional_torrent_info { + Some(info) => Json(Torrent::from(info)).into_response(), + None => Json(json!("torrent not known")).into_response(), + } + } } } @@ -131,32 +161,33 @@ pub async fn get_torrents_handler( )) } -/// # Panics -/// -/// Will panic if it can't parse the infohash in the request -pub async fn add_torrent_to_whitelist_handler(State(tracker): State>, Path(info_hash): Path) -> Response { - match tracker - .add_torrent_to_whitelist(&InfoHash::from_str(&info_hash).unwrap()) - .await - { - Ok(..) => response_ok(), - Err(..) => response_err("failed to whitelist torrent".to_string()), +pub async fn add_torrent_to_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); + + match parsing_info_hash_result { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => response_ok(), + Err(..) => response_err("failed to whitelist torrent".to_string()), + }, } } -/// # Panics -/// -/// Will panic if it can't parse the infohash in the request -pub async fn delete_torrent_from_whitelist_handler( +pub async fn remove_torrent_from_whitelist_handler( State(tracker): State>, - Path(info_hash): Path, + Path(info_hash): Path, ) -> Response { - match tracker - .remove_torrent_from_whitelist(&InfoHash::from_str(&info_hash).unwrap()) - .await - { - Ok(..) => response_ok(), - Err(..) => response_err("failed to remove torrent from whitelist".to_string()), + let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); + + match parsing_info_hash_result { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(..) => response_ok(), + Err(..) => response_err("failed to remove torrent from whitelist".to_string()), + }, } } @@ -168,16 +199,28 @@ pub async fn reload_whitelist_handler(State(tracker): State>) -> Re } pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid_or_key)).await { + let seconds_valid = seconds_valid_or_key; + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), Err(_) => response_err("failed to generate key".to_string()), } } -pub async fn delete_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { - match tracker.remove_auth_key(&seconds_valid_or_key).await { - Ok(_) => response_ok(), - Err(_) => response_err("failed to delete key".to_string()), +#[derive(Deserialize)] +pub struct KeyIdParam(String); + +pub async fn delete_auth_key_handler( + State(tracker): State>, + Path(seconds_valid_or_key): Path, +) -> Response { + let key_id = KeyId::from_str(&seconds_valid_or_key.0); + + match key_id { + Err(_) => response_bad_request(&format!("Invalid auth key id param \"{}\"", seconds_valid_or_key.0)), + Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { + Ok(_) => response_ok(), + Err(_) => response_err("failed to delete key".to_string()), + }, } } diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 406ef7033..c4062bc68 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -1,3 +1,4 @@ +use std::str::FromStr; use std::time::Duration; use derive_more::{Display, Error}; @@ -50,6 +51,8 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct Key { + // todo: replace key field definition with: + // pub key: KeyId, pub key: String, pub valid_until: Option, } @@ -77,6 +80,24 @@ impl Key { } } +#[derive(Debug, Display, PartialEq, Clone)] +pub struct KeyId(String); + +#[derive(Debug, PartialEq, Eq)] +pub struct ParseKeyIdError; + +impl FromStr for KeyId { + type Err = ParseKeyIdError; + + fn from_str(s: &str) -> Result { + if s.len() != AUTH_KEY_LENGTH { + return Err(ParseKeyIdError); + } + + Ok(Self(s.to_string())) + } +} + #[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum Error { @@ -97,6 +118,7 @@ impl From for Error { #[cfg(test)] mod tests { + use std::str::FromStr; use std::time::Duration; use crate::protocol::clock::{Current, StoppedTime}; @@ -122,6 +144,15 @@ mod tests { assert_eq!(auth_key.unwrap().key, key_string); } + #[test] + fn auth_key_id_from_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key_id = auth::KeyId::from_str(key_string); + + assert!(auth_key_id.is_ok()); + assert_eq!(auth_key_id.unwrap().to_string(), key_string); + } + #[test] fn generate_valid_auth_key() { let auth_key = auth::generate(Duration::new(9999, 0)); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 50d006a3f..bbf49e237 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -95,6 +95,7 @@ impl Tracker { /// /// Will return a `key::Error` if unable to get any `auth_key`. pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { + // todo: use auth::KeyId for the function argument `auth_key` match self.keys.read().await.get(&auth_key.key) { None => Err(auth::Error::KeyInvalid), Some(key) => auth::verify(key), diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 1bc067490..d708df58e 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -50,6 +50,18 @@ pub async fn assert_ok(response: Response) { // Error responses +pub async fn assert_bad_request(response: Response, body: &str) { + assert_eq!(response.status(), 400); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), body); +} + +pub async fn assert_method_not_allowed(response: Response) { + assert_eq!(response.status(), 405); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), "HTTP method not allowed"); +} + pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 72311e71c..f43db6255 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -120,7 +120,8 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{ - assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, + assert_bad_request, assert_method_not_allowed, assert_token_not_valid, assert_torrent_info, assert_torrent_list, + assert_torrent_not_known, assert_unauthorized, }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -209,6 +210,27 @@ mod tracker_api { .await; } + #[tokio::test] + async fn should_fail_getting_torrents_when_query_parameters_cannot_be_parsed() { + let api_server = start_default_api(&Version::Warp).await; + + let invalid_offset = "INVALID OFFSET"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; + + assert_bad_request(response, "Invalid query string").await; + + let invalid_limit = "INVALID LIMIT"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; + + assert_bad_request(response, "Invalid query string").await; + } + #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; @@ -266,6 +288,19 @@ mod tracker_api { assert_torrent_not_known(response).await; } + #[tokio::test] + async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_cannot_be_parsed() { + let api_server = start_default_api(&Version::Warp).await; + + let invalid_infohash = "INVALID INFOHASH"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_method_not_allowed(response).await; + } + #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; @@ -470,8 +505,8 @@ mod tracker_api { use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_auth_key, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_ok, - assert_token_not_valid, assert_unauthorized, + assert_auth_key, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_method_not_allowed, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -517,6 +552,19 @@ mod tracker_api { assert_unauthorized(response).await; } + #[tokio::test] + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_cannot_be_parsed() { + let api_server = start_default_api(&Version::Warp).await; + + let invalid_key_duration = -1; + + let response = Client::new(api_server.get_connection_info()) + .generate_auth_key(invalid_key_duration) + .await; + + assert_method_not_allowed(response).await; + } + #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { let api_server = start_default_api(&Version::Warp).await; @@ -549,6 +597,19 @@ mod tracker_api { assert_ok(response).await; } + #[tokio::test] + async fn should_fail_deleting_an_auth_key_when_the_key_id_cannot_be_parsed() { + let api_server = start_default_api(&Version::Warp).await; + + let invalid_auth_key_id = "INVALID AUTH KEY ID"; + + let response = Client::new(api_server.get_connection_info()) + .delete_auth_key(invalid_auth_key_id) + .await; + + assert_failed_to_delete_key(response).await; + } + #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { let api_server = start_default_api(&Version::Warp).await; @@ -768,7 +829,8 @@ mod tracker_apis { use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{ - assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, + assert_bad_request, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, + assert_unauthorized, }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -857,6 +919,27 @@ mod tracker_apis { .await; } + #[tokio::test] + async fn should_fail_getting_torrents_when_query_parameters_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; + + let invalid_offset = "INVALID OFFSET"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + + let invalid_limit = "INVALID LIMIT"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; @@ -914,6 +997,23 @@ mod tracker_apis { assert_torrent_not_known(response).await; } + #[tokio::test] + async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; + + let invalid_infohash = "INVALID INFOHASH"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_bad_request( + response, + "Invalid URL: invalid infohash param: string \"INVALID INFOHASH\", expected expected a 40 character long string", + ) + .await; + } + #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; @@ -1118,8 +1218,8 @@ mod tracker_apis { use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_ok, assert_token_not_valid, assert_unauthorized, + assert_auth_key_utf8, assert_bad_request, assert_failed_to_delete_key, assert_failed_to_generate_key, + assert_failed_to_reload_keys, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1165,6 +1265,19 @@ mod tracker_apis { assert_unauthorized(response).await; } + #[tokio::test] + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; + + let invalid_key_duration = -1; + + let response = Client::new(api_server.get_connection_info()) + .generate_auth_key(invalid_key_duration) + .await; + + assert_bad_request(response, "Invalid URL: Cannot parse `\"-1\"` to a `u64`").await; + } + #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { let api_server = start_default_api(&Version::Axum).await; @@ -1197,6 +1310,19 @@ mod tracker_apis { assert_ok(response).await; } + #[tokio::test] + async fn should_fail_deleting_an_auth_key_when_the_key_id_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; + + let invalid_auth_key_id = "INVALID AUTH KEY ID"; + + let response = Client::new(api_server.get_connection_info()) + .delete_auth_key(invalid_auth_key_id) + .await; + + assert_bad_request(response, "Invalid auth key id param \"INVALID AUTH KEY ID\"").await; + } + #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { let api_server = start_default_api(&Version::Axum).await; From 2da0719966e4c8e42ce277698c9daad95a159423 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 13:37:04 +0000 Subject: [PATCH 0307/1003] test(api): [#143] add tests for authenticaation --- tests/api/client.rs | 30 +++++++++++++----- tests/tracker_api.rs | 74 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 8 deletions(-) diff --git a/tests/api/client.rs b/tests/api/client.rs index b073adefd..5b2072cec 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -117,14 +117,7 @@ impl Client { query.add_param(QueryParam::new("token", token)); }; - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .query(&ReqwestQuery::from(query)) - .send() - .await - .unwrap() + self.get_request_with_query(path, query).await } async fn post(&self, path: &str) -> Response { @@ -149,6 +142,27 @@ impl Client { format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) } + pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path)) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap() + } + + pub async fn get_request(&self, path: &str) -> Response { + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path)) + .send() + .await + .unwrap() + } + fn query_with_token(&self) -> Query { match &self.connection_info.api_token { Some(token) => Query::with_token(token), diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index f43db6255..56dcdb240 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -753,6 +753,80 @@ mod tracker_apis { */ + mod authentication { + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::{Client, Query, QueryParam}; + use crate::api::server::start_default_api; + use crate::api::Version; + + #[tokio::test] + async fn should_authenticate_requests_by_using_a_token_query_param() { + let api_server = start_default_api(&Version::Axum).await; + + let token = api_server.get_connection_info().api_token.unwrap(); + + let response = Client::new(api_server.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) + .await; + + assert_eq!(response.status(), 200); + } + + #[tokio::test] + async fn should_not_authenticate_requests_when_the_token_is_missing() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(api_server.get_connection_info()) + .get_request_with_query("stats", Query::default()) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_not_authenticate_requests_when_the_token_is_empty() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(api_server.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) + .await; + + assert_token_not_valid(response).await; + } + + #[tokio::test] + async fn should_not_authenticate_requests_when_the_token_is_invalid() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(api_server.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) + .await; + + assert_token_not_valid(response).await; + } + + #[tokio::test] + async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { + let api_server = start_default_api(&Version::Axum).await; + + let token = api_server.get_connection_info().api_token.unwrap(); + + // At the beginning of the query component + let response = Client::new(api_server.get_connection_info()) + .get_request(&format!("torrents?token={}&limit=1", &token)) + .await; + + assert_eq!(response.status(), 200); + + // At the end of the query component + let response = Client::new(api_server.get_connection_info()) + .get_request(&format!("torrents?limit=1&token={}", &token)) + .await; + + assert_eq!(response.status(), 200); + } + } + mod for_stats_resources { use std::str::FromStr; From 3bcbbc9a1c784276f6e6dad8dc3b27ca9f7adee7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 13:43:30 +0000 Subject: [PATCH 0308/1003] refactor(api): [#143] normalize test names for errrors --- tests/tracker_api.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 56dcdb240..ec4d1f2eb 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -391,7 +391,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_whitelisted() { + async fn should_fail_when_the_torrent_cannot_be_whitelisted() { let api_server = start_default_api(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -422,7 +422,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -484,7 +484,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_whitelist_cannot_be_reloaded_from_the_database() { + async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -566,7 +566,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { + async fn should_fail_when_the_auth_key_cannot_be_generated() { let api_server = start_default_api(&Version::Warp).await; force_database_error(&api_server.tracker); @@ -611,7 +611,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { + async fn should_fail_when_the_auth_key_cannot_be_deleted() { let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -680,7 +680,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_keys_cannot_be_reloaded() { + async fn should_fail_when_keys_cannot_be_reloaded() { let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -1178,7 +1178,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_whitelisted() { + async fn should_fail_when_the_torrent_cannot_be_whitelisted() { let api_server = start_default_api(&Version::Axum).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1209,7 +1209,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Axum).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1271,7 +1271,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_whitelist_cannot_be_reloaded_from_the_database() { + async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let api_server = start_default_api(&Version::Axum).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1353,7 +1353,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { + async fn should_fail_when_the_auth_key_cannot_be_generated() { let api_server = start_default_api(&Version::Axum).await; force_database_error(&api_server.tracker); @@ -1398,7 +1398,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { + async fn should_fail_when_the_auth_key_cannot_be_deleted() { let api_server = start_default_api(&Version::Axum).await; let seconds_valid = 60; @@ -1467,7 +1467,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_keys_cannot_be_reloaded() { + async fn should_fail_when_keys_cannot_be_reloaded() { let api_server = start_default_api(&Version::Axum).await; let seconds_valid = 60; From aa2a2ef4346044aabaf2c8bf0f7966b2b3f94bf8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 16:06:20 +0000 Subject: [PATCH 0309/1003] fix(api): [#143] do not fail trying to remove a whitelisted torrent twice Previous behavior: When you try to remove a non exisinting whitelisted torrent the response is 500 error. New bahavior: The enpoints checks if the torrent is included in the whitelist. If it does not, then it ignores the reqeust returning a 200 code. It should return a 204 or 404 but the current API only uses these codes: 200, 400, 405, 500. In the new API version we are planning to refctor all endpoints. --- src/tracker/mod.rs | 21 ++++++++++++++++++++- tests/tracker_api.rs | 26 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index bbf49e237..4f1dab49b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -152,11 +152,30 @@ impl Tracker { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.remove_torrent_from_database_whitelist(info_hash).await?; + self.remove_torrent_from_memory_whitelist(info_hash).await; + Ok(()) + } + + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + + if !is_whitelisted { + return Ok(()); + } + self.database.remove_info_hash_from_whitelist(*info_hash).await?; - self.whitelist.write().await.remove(info_hash); + Ok(()) } + pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) + } + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { self.whitelist.read().await.contains(info_hash) } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index ec4d1f2eb..f959f67b9 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -421,6 +421,19 @@ mod tracker_api { assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } + #[tokio::test] + async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + let api_server = start_default_api(&Version::Warp).await; + + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .await; + + assert_ok(response).await; + } + #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; @@ -1208,6 +1221,19 @@ mod tracker_apis { assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } + #[tokio::test] + async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + let api_server = start_default_api(&Version::Axum).await; + + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .await; + + assert_ok(response).await; + } + #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Axum).await; From 39c15c6bcf4690f16617cb92a3fd1094a9e71192 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 19:36:05 +0000 Subject: [PATCH 0310/1003] test(api): [#143] add test for invalid infohash URL path param --- src/apis/routes.rs | 4 +- src/protocol/info_hash.rs | 4 +- tests/api/asserts.rs | 18 ++++++ tests/tracker_api.rs | 126 ++++++++++++++++++++++++++++++-------- 4 files changed, 124 insertions(+), 28 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 60f5f9da0..378aca929 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -42,7 +42,7 @@ pub fn router(tracker: &Arc) -> Router { ) // Whitelist command .route( - "/api/whitelist/:info_hash", + "/api/whitelist/reload", get(reload_whitelist_handler).with_state(tracker.clone()), ) // Keys @@ -95,7 +95,7 @@ fn response_ok() -> Response { fn response_invalid_info_hash_param(info_hash: &str) -> Response { response_bad_request(&format!( - "Invalid URL: invalid infohash param: string \"{}\", expected expected a 40 character long string", + "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", info_hash )) } diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 9a0900063..3d2fad1a5 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -77,7 +77,7 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { if v.len() != 40 { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), - &"expected a 40 character long string", + &"a 40 character long string", )); } @@ -86,7 +86,7 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), - &"expected a hexadecimal string", + &"a hexadecimal string", )); }; Ok(res) diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index d708df58e..23d76f159 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -56,6 +56,13 @@ pub async fn assert_bad_request(response: Response, body: &str) { assert_eq!(response.text().await.unwrap(), body); } +pub async fn assert_not_found(response: Response) { + assert_eq!(response.status(), 404); + // todo: missing header + //assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), ""); +} + pub async fn assert_method_not_allowed(response: Response) { assert_eq!(response.status(), 405); assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); @@ -68,6 +75,17 @@ pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); } +pub async fn assert_invalid_infohash(response: Response, invalid_infohash: &str) { + assert_bad_request( + response, + &format!( + "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", + invalid_infohash + ), + ) + .await; +} + pub async fn assert_token_not_valid(response: Response) { assert_unhandled_rejection(response, "token not valid").await; } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index f959f67b9..a5cb0cc4d 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -766,6 +766,26 @@ mod tracker_apis { */ + // When these infohashes are used in URL path params + // the response is a custom response returned in the handler + fn invalid_infohashes_returning_bad_request() -> Vec { + [ + "0".to_string(), + "-1".to_string(), + "1.1".to_string(), + "INVALID INFOHASH".to_string(), + "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 + "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char + ] + .to_vec() + } + + // When these infohashes are used in URL path params + // the response is an Axum response returned in the handler + fn invalid_infohashes_returning_not_found() -> Vec { + [String::new(), " ".to_string()].to_vec() + } + mod authentication { use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::{Client, Query, QueryParam}; @@ -915,9 +935,10 @@ mod tracker_apis { use torrust_tracker::api::resource::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; + use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ - assert_bad_request, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, - assert_unauthorized, + assert_bad_request, assert_invalid_infohash, assert_not_found, assert_token_not_valid, assert_torrent_info, + assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1007,24 +1028,33 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_getting_torrents_when_query_parameters_cannot_be_parsed() { + async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { let api_server = start_default_api(&Version::Axum).await; - let invalid_offset = "INVALID OFFSET"; + let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) - .await; + for invalid_offset in &invalid_offsets { + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + } - let invalid_limit = "INVALID LIMIT"; + #[tokio::test] + async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) - .await; + let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; + + for invalid_limit in &invalid_limits { + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } } #[tokio::test] @@ -1085,20 +1115,24 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_cannot_be_parsed() { + async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { let api_server = start_default_api(&Version::Axum).await; - let invalid_infohash = "INVALID INFOHASH"; + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(api_server.get_connection_info()) + .get_torrent(invalid_infohash) + .await; - let response = Client::new(api_server.get_connection_info()) - .get_torrent(invalid_infohash) - .await; + assert_invalid_infohash(response, invalid_infohash).await; + } - assert_bad_request( - response, - "Invalid URL: invalid infohash param: string \"INVALID INFOHASH\", expected expected a 40 character long string", - ) - .await; + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(api_server.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } } #[tokio::test] @@ -1128,9 +1162,11 @@ mod tracker_apis { use torrust_tracker::protocol::info_hash::InfoHash; + use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_ok, assert_token_not_valid, assert_unauthorized, + assert_failed_to_whitelist_torrent, assert_invalid_infohash, assert_not_found, assert_ok, assert_token_not_valid, + assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1205,6 +1241,27 @@ mod tracker_apis { assert_failed_to_whitelist_torrent(response).await; } + #[tokio::test] + async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { + let api_server = start_default_api(&Version::Axum).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(api_server.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_invalid_infohash(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(api_server.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + } + #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { let api_server = start_default_api(&Version::Axum).await; @@ -1234,6 +1291,27 @@ mod tracker_apis { assert_ok(response).await; } + #[tokio::test] + async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { + let api_server = start_default_api(&Version::Axum).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_invalid_infohash(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_not_found(response).await; + } + } + #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Axum).await; From 2c222ee906e7e52dd01a41d75f105a67233172c9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 14:17:07 +0000 Subject: [PATCH 0311/1003] test(api): [#143] add test for invalid key duration URL path param --- src/apis/routes.rs | 20 ++++++++++++++++++++ tests/api/client.rs | 4 +++- tests/tracker_api.rs | 22 ++++++++++++++++------ 3 files changed, 39 insertions(+), 7 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 378aca929..5b4fb7e26 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -21,6 +21,26 @@ use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; +/* code-review: + When Axum cannot parse a path or query param it shows a message like this: + + For the "seconds_valid_or_key" path param: + + "Invalid URL: Cannot parse "-1" to a `u64`" + + That message is not an informative message, specially if you have more than one param. + We should show a message similar to the one we use when we parse the value in the handler. + For example: + + "Invalid URL: invalid infohash param: string \"INVALID VALUE\", expected a 40 character long string" + + We can customize the error message by using a custom type with custom serde deserialization. + The same we are using for the "InfoHashVisitor". + + Input data from HTTP requests should use struts with primitive types (first level of validation). + We can put the second level of validation in the application and domain services. +*/ + pub fn router(tracker: &Arc) -> Router { Router::new() // Stats diff --git a/tests/api/client.rs b/tests/api/client.rs index 5b2072cec..b0b864ff5 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -120,7 +120,7 @@ impl Client { self.get_request_with_query(path, query).await } - async fn post(&self, path: &str) -> Response { + pub async fn post(&self, path: &str) -> Response { reqwest::Client::new() .post(self.base_url(path).clone()) .query(&ReqwestQuery::from(self.query_with_token())) @@ -142,6 +142,7 @@ impl Client { format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) } + // Unauthenticated GET request with query component pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { reqwest::Client::builder() .build() @@ -153,6 +154,7 @@ impl Client { .unwrap() } + // Unauthenticated GET request pub async fn get_request(&self, path: &str) -> Response { reqwest::Client::builder() .build() diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index a5cb0cc4d..6c7de84f0 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1444,16 +1444,26 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_generating_a_new_auth_key_when_the_key_duration_cannot_be_parsed() { + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { let api_server = start_default_api(&Version::Axum).await; - let invalid_key_duration = -1; + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; - let response = Client::new(api_server.get_connection_info()) - .generate_auth_key(invalid_key_duration) - .await; + for invalid_key_duration in &invalid_key_durations { + let response = Client::new(api_server.get_connection_info()) + .post(&format!("key/{}", &invalid_key_duration)) + .await; - assert_bad_request(response, "Invalid URL: Cannot parse `\"-1\"` to a `u64`").await; + assert_bad_request( + response, + &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), + ) + .await; + } } #[tokio::test] From 072f3d7d42350ae157817843779499d7bf09587a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 14:25:59 +0000 Subject: [PATCH 0312/1003] test(api): [#143] add more tests for invalid key id URL path param --- tests/tracker_api.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 6c7de84f0..695c9dc9e 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1499,16 +1499,26 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_id_cannot_be_parsed() { + async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { let api_server = start_default_api(&Version::Axum).await; - let invalid_auth_key_id = "INVALID AUTH KEY ID"; + let invalid_auth_key_ids = [ + // "", it returns a 404 + // " ", it returns a 404 + "0", + "-1", + "INVALID AUTH KEY ID", + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8", // 32 char key cspell:disable-line + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line + ]; - let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(invalid_auth_key_id) - .await; + for invalid_auth_key_id in &invalid_auth_key_ids { + let response = Client::new(api_server.get_connection_info()) + .delete_auth_key(invalid_auth_key_id) + .await; - assert_bad_request(response, "Invalid auth key id param \"INVALID AUTH KEY ID\"").await; + assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key_id)).await; + } } #[tokio::test] From 8d32628e6e80bd5429917ea12e6e815577b0f203 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 16:27:33 +0000 Subject: [PATCH 0313/1003] refactor(api): [#143] extract and rename functions --- src/apis/routes.rs | 98 +++++++++++++++++++++++++++++--------------- tests/api/asserts.rs | 22 +++++++--- tests/tracker_api.rs | 25 +++++------ 3 files changed, 93 insertions(+), 52 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 5b4fb7e26..f0585b225 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -17,8 +17,8 @@ use crate::api::resource::stats::Stats; use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::KeyId; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, BasicInfo, Info, Pagination}; use crate::tracker::Tracker; /* code-review: @@ -91,6 +91,18 @@ pub enum ActionStatus<'a> { // Resource responses +fn response_stats(tracker_metrics: TrackerMetrics) -> Json { + Json(Stats::from(tracker_metrics)) +} + +fn response_torrent_list(basic_infos: &[BasicInfo]) -> Json> { + Json(ListItem::new_vec(basic_infos)) +} + +fn response_torrent_info(info: Info) -> Response { + Json(Torrent::from(info)).into_response() +} + fn response_auth_key(auth_key: &AuthKey) -> Response { ( StatusCode::OK, @@ -120,6 +132,10 @@ fn response_invalid_info_hash_param(info_hash: &str) -> Response { )) } +fn response_invalid_auth_key_param(invalid_key: &str) -> Response { + response_bad_request(&format!("Invalid auth key id param \"{invalid_key}\"")) +} + fn response_bad_request(body: &str) -> Response { ( StatusCode::BAD_REQUEST, @@ -129,7 +145,35 @@ fn response_bad_request(body: &str) -> Response { .into_response() } -fn response_err(reason: String) -> Response { +fn response_torrent_not_known() -> Response { + Json(json!("torrent not known")).into_response() +} + +fn response_failed_to_remove_torrent_from_whitelist() -> Response { + response_unhandled_rejection("failed to remove torrent from whitelist".to_string()) +} + +fn response_failed_to_whitelist_torrent() -> Response { + response_unhandled_rejection("failed to whitelist torrent".to_string()) +} + +fn response_failed_to_reload_whitelist() -> Response { + response_unhandled_rejection("failed to reload whitelist".to_string()) +} + +fn response_failed_to_generate_key() -> Response { + response_unhandled_rejection("failed to generate key".to_string()) +} + +fn response_failed_to_delete_key() -> Response { + response_unhandled_rejection("failed to delete key".to_string()) +} + +fn response_failed_to_reload_keys() -> Response { + response_unhandled_rejection("failed to reload keys".to_string()) +} + +fn response_unhandled_rejection(reason: String) -> Response { ( StatusCode::INTERNAL_SERVER_ERROR, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], @@ -139,25 +183,19 @@ fn response_err(reason: String) -> Response { } pub async fn get_stats_handler(State(tracker): State>) -> Json { - Json(Stats::from(get_metrics(tracker.clone()).await)) + response_stats(get_metrics(tracker.clone()).await) } #[derive(Deserialize)] pub struct InfoHashParam(String); pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { - let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); - - match parsing_info_hash_result { + match InfoHash::from_str(&info_hash.0) { Err(_) => response_invalid_info_hash_param(&info_hash.0), - Ok(info_hash) => { - let optional_torrent_info = get_torrent_info(tracker.clone(), &info_hash).await; - - match optional_torrent_info { - Some(info) => Json(Torrent::from(info)).into_response(), - None => Json(json!("torrent not known")).into_response(), - } - } + Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Some(info) => response_torrent_info(info), + None => response_torrent_not_known(), + }, } } @@ -172,26 +210,24 @@ pub async fn get_torrents_handler( State(tracker): State>, pagination: Query, ) -> Json> { - Json(ListItem::new_vec( + response_torrent_list( &get_torrents( tracker.clone(), &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), ) .await, - )) + ) } pub async fn add_torrent_to_whitelist_handler( State(tracker): State>, Path(info_hash): Path, ) -> Response { - let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); - - match parsing_info_hash_result { + match InfoHash::from_str(&info_hash.0) { Err(_) => response_invalid_info_hash_param(&info_hash.0), Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { Ok(..) => response_ok(), - Err(..) => response_err("failed to whitelist torrent".to_string()), + Err(..) => response_failed_to_whitelist_torrent(), }, } } @@ -200,13 +236,11 @@ pub async fn remove_torrent_from_whitelist_handler( State(tracker): State>, Path(info_hash): Path, ) -> Response { - let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); - - match parsing_info_hash_result { + match InfoHash::from_str(&info_hash.0) { Err(_) => response_invalid_info_hash_param(&info_hash.0), Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { Ok(..) => response_ok(), - Err(..) => response_err("failed to remove torrent from whitelist".to_string()), + Err(..) => response_failed_to_remove_torrent_from_whitelist(), }, } } @@ -214,7 +248,7 @@ pub async fn remove_torrent_from_whitelist_handler( pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { match tracker.load_whitelist().await { Ok(..) => response_ok(), - Err(..) => response_err("failed to reload whitelist".to_string()), + Err(..) => response_failed_to_reload_whitelist(), } } @@ -222,7 +256,7 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path let seconds_valid = seconds_valid_or_key; match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), - Err(_) => response_err("failed to generate key".to_string()), + Err(_) => response_failed_to_generate_key(), } } @@ -233,13 +267,11 @@ pub async fn delete_auth_key_handler( State(tracker): State>, Path(seconds_valid_or_key): Path, ) -> Response { - let key_id = KeyId::from_str(&seconds_valid_or_key.0); - - match key_id { - Err(_) => response_bad_request(&format!("Invalid auth key id param \"{}\"", seconds_valid_or_key.0)), + match KeyId::from_str(&seconds_valid_or_key.0) { + Err(_) => response_invalid_auth_key_param(&seconds_valid_or_key.0), Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { Ok(_) => response_ok(), - Err(_) => response_err("failed to delete key".to_string()), + Err(_) => response_failed_to_delete_key(), }, } } @@ -247,7 +279,7 @@ pub async fn delete_auth_key_handler( pub async fn reload_keys_handler(State(tracker): State>) -> Response { match tracker.load_keys().await { Ok(..) => response_ok(), - Err(..) => response_err("failed to reload keys".to_string()), + Err(..) => response_failed_to_reload_keys(), } } diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 23d76f159..d1730fd9b 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -75,7 +75,7 @@ pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); } -pub async fn assert_invalid_infohash(response: Response, invalid_infohash: &str) { +pub async fn assert_invalid_infohash_param(response: Response, invalid_infohash: &str) { assert_bad_request( response, &format!( @@ -86,6 +86,18 @@ pub async fn assert_invalid_infohash(response: Response, invalid_infohash: &str) .await; } +pub async fn assert_invalid_auth_key_param(response: Response, invalid_auth_key: &str) { + assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key)).await; +} + +pub async fn assert_invalid_key_duration_param(response: Response, invalid_key_duration: &str) { + assert_bad_request( + response, + &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), + ) + .await; +} + pub async fn assert_token_not_valid(response: Response) { assert_unhandled_rejection(response, "token not valid").await; } @@ -102,6 +114,10 @@ pub async fn assert_failed_to_whitelist_torrent(response: Response) { assert_unhandled_rejection(response, "failed to whitelist torrent").await; } +pub async fn assert_failed_to_reload_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to reload whitelist").await; +} + pub async fn assert_failed_to_generate_key(response: Response) { assert_unhandled_rejection(response, "failed to generate key").await; } @@ -110,10 +126,6 @@ pub async fn assert_failed_to_delete_key(response: Response) { assert_unhandled_rejection(response, "failed to delete key").await; } -pub async fn assert_failed_to_reload_whitelist(response: Response) { - assert_unhandled_rejection(response, "failed to reload whitelist").await; -} - pub async fn assert_failed_to_reload_keys(response: Response) { assert_unhandled_rejection(response, "failed to reload keys").await; } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 695c9dc9e..b41eb5c29 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -937,7 +937,7 @@ mod tracker_apis { use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ - assert_bad_request, assert_invalid_infohash, assert_not_found, assert_token_not_valid, assert_torrent_info, + assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; use crate::api::client::{Client, Query, QueryParam}; @@ -1123,7 +1123,7 @@ mod tracker_apis { .get_torrent(invalid_infohash) .await; - assert_invalid_infohash(response, invalid_infohash).await; + assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { @@ -1165,8 +1165,8 @@ mod tracker_apis { use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_invalid_infohash, assert_not_found, assert_ok, assert_token_not_valid, - assert_unauthorized, + assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, + assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1250,7 +1250,7 @@ mod tracker_apis { .whitelist_a_torrent(invalid_infohash) .await; - assert_invalid_infohash(response, invalid_infohash).await; + assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { @@ -1300,7 +1300,7 @@ mod tracker_apis { .remove_torrent_from_whitelist(invalid_infohash) .await; - assert_invalid_infohash(response, invalid_infohash).await; + assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { @@ -1396,8 +1396,9 @@ mod tracker_apis { use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_auth_key_utf8, assert_bad_request, assert_failed_to_delete_key, assert_failed_to_generate_key, - assert_failed_to_reload_keys, assert_ok, assert_token_not_valid, assert_unauthorized, + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, + assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1458,11 +1459,7 @@ mod tracker_apis { .post(&format!("key/{}", &invalid_key_duration)) .await; - assert_bad_request( - response, - &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), - ) - .await; + assert_invalid_key_duration_param(response, invalid_key_duration).await; } } @@ -1517,7 +1514,7 @@ mod tracker_apis { .delete_auth_key(invalid_auth_key_id) .await; - assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key_id)).await; + assert_invalid_auth_key_param(response, invalid_auth_key_id).await; } } From 337e12eef5a205bb772dbda24b0f5bb4c42df45a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 16:36:15 +0000 Subject: [PATCH 0314/1003] feat(api): [#143] replace Warp API with Axum implementation --- src/setup.rs | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/src/setup.rs b/src/setup.rs index daee7eea8..e7535e67d 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -1,11 +1,10 @@ -use std::net::SocketAddr; use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; use crate::config::Configuration; -use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, tracker_apis, udp_tracker}; +use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::tracker; /// # Panics @@ -51,24 +50,9 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone())); } - // Start HTTP API server + // Start HTTP API if config.http_api.enabled { - jobs.push(tracker_api::start_job(&config.http_api, tracker.clone()).await); - } - - // Start HTTP APIs server (multiple API versions) - if config.http_api.enabled { - // Temporarily running the new API in the 1313 port - let bind_address = config.http_api.bind_address.clone(); - let mut bind_socket: SocketAddr = bind_address - .parse() - .expect("bind address should be a valid socket address, for example 127.0.0.1:8080"); - bind_socket.set_port(1313); - - let mut http_apis_config = config.http_api.clone(); - http_apis_config.bind_address = bind_socket.to_string(); - - jobs.push(tracker_apis::start_job(&http_apis_config, tracker.clone()).await); + jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); } // Remove torrents without peers, every interval From 77ec52184d4ad2eaaf22dfc0838e1568013b40ee Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 16:55:09 +0000 Subject: [PATCH 0315/1003] refactor(api): [#143] remove Warp API implementation --- src/api/mod.rs | 19 - src/api/routes.rs | 230 ----------- src/api/server.rs | 32 -- src/jobs/mod.rs | 1 - src/jobs/tracker_api.rs | 50 --- tests/api/asserts.rs | 14 +- tests/api/mod.rs | 5 - tests/api/server.rs | 29 +- tests/tracker_api.rs | 817 +++------------------------------------- 9 files changed, 47 insertions(+), 1150 deletions(-) delete mode 100644 src/api/routes.rs delete mode 100644 src/api/server.rs delete mode 100644 src/jobs/tracker_api.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index d254c91ac..c6bee0532 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,20 +1 @@ pub mod resource; -pub mod routes; -pub mod server; - -use serde::{Deserialize, Serialize}; - -#[derive(Deserialize, Debug)] -pub struct TorrentInfoQuery { - offset: Option, - limit: Option, -} - -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -impl warp::reject::Reject for ActionStatus<'static> {} diff --git a/src/api/routes.rs b/src/api/routes.rs deleted file mode 100644 index 4280cdb35..000000000 --- a/src/api/routes.rs +++ /dev/null @@ -1,230 +0,0 @@ -use std::cmp::min; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use std::time::Duration; - -use serde::Deserialize; -use warp::{filters, reply, Filter}; - -use super::resource::auth_key::AuthKey; -use super::resource::stats::Stats; -use super::resource::torrent::{ListItem, Torrent}; -use super::{ActionStatus, TorrentInfoQuery}; -use crate::protocol::info_hash::InfoHash; -use crate::tracker; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; - -fn authenticate(tokens: HashMap) -> impl Filter + Clone { - #[derive(Deserialize)] - struct AuthToken { - token: Option, - } - - let tokens: HashSet = tokens.into_values().collect(); - - let tokens = Arc::new(tokens); - warp::filters::any::any() - .map(move || tokens.clone()) - .and(filters::query::query::()) - .and_then(|tokens: Arc>, token: AuthToken| async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { - reason: "token not valid".into(), - })); - } - - Ok(()) - } - None => Err(warp::reject::custom(ActionStatus::Err { - reason: "unauthorized".into(), - })), - } - }) - .untuple_one() -} - -#[allow(clippy::too_many_lines)] -#[must_use] -pub fn routes(tracker: &Arc) -> impl Filter + Clone { - // GET /api/torrents?offset=:u32&limit=:u32 - // View torrent list - let api_torrents = tracker.clone(); - let view_torrent_list = filters::method::get() - .and(filters::path::path("torrents")) - .and(filters::path::end()) - .and(filters::query::query()) - .map(move |limits| { - let tracker = api_torrents.clone(); - (limits, tracker) - }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&ListItem::new_vec( - &get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await, - ))) - }); - - // GET /api/stats - // View tracker status - let api_stats = tracker.clone(); - let view_stats_list = filters::method::get() - .and(filters::path::path("stats")) - .and(filters::path::end()) - .map(move || api_stats.clone()) - .and_then(|tracker: Arc| async move { - Result::<_, warp::reject::Rejection>::Ok(reply::json(&Stats::from(get_metrics(tracker.clone()).await))) - }); - - // GET /api/torrent/:info_hash - // View torrent info - let t2 = tracker.clone(); - let view_torrent_info = filters::method::get() - .and(filters::path::path("torrent")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t2.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - let optional_torrent_info = get_torrent_info(tracker.clone(), &info_hash).await; - - match optional_torrent_info { - Some(info) => Ok(reply::json(&Torrent::from(info))), - None => Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")), - } - }); - - // DELETE /api/whitelist/:info_hash - // Delete info hash from whitelist - let t3 = tracker.clone(); - let delete_torrent = filters::method::delete() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t3.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to remove torrent from whitelist".into(), - })), - } - }); - - // POST /api/whitelist/:info_hash - // Add info hash to whitelist - let t4 = tracker.clone(); - let add_torrent = filters::method::post() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t4.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to whitelist torrent".into(), - })), - } - }); - - // POST /api/key/:seconds_valid - // Generate new key - let t5 = tracker.clone(); - let create_key = filters::method::post() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |seconds_valid: u64| { - let tracker = t5.clone(); - (seconds_valid, tracker) - }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to generate key".into(), - })), - } - }); - - // DELETE /api/key/:key - // Delete key - let t6 = tracker.clone(); - let delete_key = filters::method::delete() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |key: String| { - let tracker = t6.clone(); - (key, tracker) - }) - .and_then(|(key, tracker): (String, Arc)| async move { - match tracker.remove_auth_key(&key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to delete key".into(), - })), - } - }); - - // GET /api/whitelist/reload - // Reload whitelist - let t7 = tracker.clone(); - let reload_whitelist = filters::method::get() - .and(filters::path::path("whitelist")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t7.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_whitelist().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload whitelist".into(), - })), - } - }); - - // GET /api/keys/reload - // Reload whitelist - let t8 = tracker.clone(); - let reload_keys = filters::method::get() - .and(filters::path::path("keys")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t8.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_keys().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload keys".into(), - })), - } - }); - - let api_routes = filters::path::path("api").and( - view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(view_stats_list) - .or(add_torrent) - .or(create_key) - .or(delete_key) - .or(reload_whitelist) - .or(reload_keys), - ); - - api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())) -} diff --git a/src/api/server.rs b/src/api/server.rs deleted file mode 100644 index 5d6a3cdfd..000000000 --- a/src/api/server.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use warp::serve; - -use super::routes::routes; -use crate::tracker; - -pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { - let (_addr, api_server) = serve(routes(tracker)).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - - api_server -} - -pub fn start_tls( - socket_addr: SocketAddr, - ssl_cert_path: String, - ssl_key_path: String, - tracker: &Arc, -) -> impl warp::Future { - let (_addr, api_server) = serve(routes(tracker)) - .tls() - .cert_path(ssl_cert_path) - .key_path(ssl_key_path) - .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - - api_server -} diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index 6f9b12bac..ba44a56ad 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -1,5 +1,4 @@ pub mod http_tracker; pub mod torrent_cleanup; -pub mod tracker_api; pub mod tracker_apis; pub mod udp_tracker; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs deleted file mode 100644 index 211174f35..000000000 --- a/src/jobs/tracker_api.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::sync::Arc; - -use log::info; -use tokio::sync::oneshot; -use tokio::task::JoinHandle; - -use crate::api::server; -use crate::config::HttpApi; -use crate::tracker; - -#[derive(Debug)] -pub struct ApiServerJobStarted(); - -/// # Panics -/// -/// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - let (tx, rx) = oneshot::channel::(); - - // Run the API server - let join_handle = tokio::spawn(async move { - if !ssl_enabled { - info!("Starting Torrust API server on: http://{}", bind_addr); - let handle = server::start(bind_addr, &tracker); - tx.send(ApiServerJobStarted()).expect("the start job dropped"); - handle.await; - } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting Torrust API server on: https://{}", bind_addr); - let handle = server::start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap(), &tracker); - tx.send(ApiServerJobStarted()).expect("the start job dropped"); - handle.await; - } - }); - - // Wait until the API server job is running - match rx.await { - Ok(_msg) => info!("Torrust API server started"), - Err(e) => panic!("the api server dropped: {e}"), - } - - join_handle -} diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index d1730fd9b..11aac64d1 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -25,12 +25,6 @@ pub async fn assert_torrent_info(response: Response, torrent: Torrent) { assert_eq!(response.json::().await.unwrap(), torrent); } -pub async fn assert_auth_key(response: Response) -> AuthKey { - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - response.json::().await.unwrap() -} - pub async fn assert_auth_key_utf8(response: Response) -> AuthKey { assert_eq!(response.status(), 200); assert_eq!( @@ -58,17 +52,11 @@ pub async fn assert_bad_request(response: Response, body: &str) { pub async fn assert_not_found(response: Response) { assert_eq!(response.status(), 404); - // todo: missing header + // todo: missing header in the response //assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); assert_eq!(response.text().await.unwrap(), ""); } -pub async fn assert_method_not_allowed(response: Response) { - assert_eq!(response.status(), 405); - assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); - assert_eq!(response.text().await.unwrap(), "HTTP method not allowed"); -} - pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 1311a2356..bc4187375 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -8,11 +8,6 @@ pub mod connection_info; pub mod fixtures; pub mod server; -pub enum Version { - Warp, - Axum, -} - /// It forces a database error by dropping all tables. /// That makes any query fail. /// code-review: alternatively we could inject a database mock in the future. diff --git a/tests/api/server.rs b/tests/api/server.rs index 9819a0847..c1cd0630a 100644 --- a/tests/api/server.rs +++ b/tests/api/server.rs @@ -2,47 +2,26 @@ use core::panic; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; -use torrust_tracker::jobs::{tracker_api, tracker_apis}; +use torrust_tracker::jobs::tracker_apis; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; -use super::Version; pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } -pub async fn start_default_api(version: &Version) -> Server { +pub async fn start_default_api() -> Server { let configuration = tracker_configuration(); - start_custom_api(configuration.clone(), version).await + start_custom_api(configuration.clone()).await } -pub async fn start_custom_api(configuration: Arc, version: &Version) -> Server { - match &version { - Version::Warp => start_warp_api(configuration).await, - Version::Axum => start_axum_api(configuration).await, - } -} - -async fn start_warp_api(configuration: Arc) -> Server { +pub async fn start_custom_api(configuration: Arc) -> Server { let server = start(&configuration); - - // Start the HTTP API job - tracker_api::start_job(&configuration.http_api, server.tracker.clone()).await; - - server -} - -async fn start_axum_api(configuration: Arc) -> Server { - let server = start(&configuration); - - // Start HTTP APIs server (multiple API versions) - // Temporarily run the new API on a port number after the current API port tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; - server } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index b41eb5c29..d07d2fe2d 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1,742 +1,12 @@ /// Integration tests for the tracker API /// /// ```text -/// cargo test tracker_api -- --nocapture -/// ``` -/// -/// WIP. We are implementing a new API replacing Warp with Axum. -/// The new API runs in parallel until we finish all endpoints. -/// You can test the new API with: -/// -/// ```text /// cargo test tracker_apis -- --nocapture /// ``` extern crate rand; mod api; -mod tracker_api { - - /* - - Endpoints: - - Stats: - GET /api/stats - - Torrents: - GET /api/torrents?offset=:u32&limit=:u32 - GET /api/torrent/:info_hash - - Whitelisted torrents: - POST /api/whitelist/:info_hash - DELETE /api/whitelist/:info_hash - - Whitelist command: - GET /api/whitelist/reload - - Keys: - POST /api/key/:seconds_valid - DELETE /api/key/:key - - Keys command: - GET /api/keys/reload - - */ - - mod for_stats_resources { - use std::str::FromStr; - - use torrust_tracker::api::resource::stats::Stats; - use torrust_tracker::protocol::info_hash::InfoHash; - - use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::fixtures::sample_peer; - use crate::api::server::start_default_api; - use crate::api::Version; - - #[tokio::test] - async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api(&Version::Warp).await; - - api_server - .add_torrent( - &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), - &sample_peer(), - ) - .await; - - let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; - - assert_stats( - response, - Stats { - torrents: 1, - seeders: 1, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }, - ) - .await; - } - - #[tokio::test] - async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_tracker_statistics() - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .get_tracker_statistics() - .await; - - assert_unauthorized(response).await; - } - } - - mod for_torrent_resources { - use std::str::FromStr; - - use torrust_tracker::api::resource; - use torrust_tracker::api::resource::torrent::{self, Torrent}; - use torrust_tracker::protocol::info_hash::InfoHash; - - use crate::api::asserts::{ - assert_bad_request, assert_method_not_allowed, assert_token_not_valid, assert_torrent_info, assert_torrent_list, - assert_torrent_not_known, assert_unauthorized, - }; - use crate::api::client::{Client, Query, QueryParam}; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::fixtures::sample_peer; - use crate::api::server::start_default_api; - use crate::api::Version; - - #[tokio::test] - async fn should_allow_getting_torrents() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - api_server.add_torrent(&info_hash, &sample_peer()).await; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::empty()) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - } - - #[tokio::test] - async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api(&Version::Warp).await; - - // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - - api_server.add_torrent(&info_hash_1, &sample_peer()).await; - api_server.add_torrent(&info_hash_2, &sample_peer()).await; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - } - - #[tokio::test] - async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api(&Version::Warp).await; - - // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - - api_server.add_torrent(&info_hash_1, &sample_peer()).await; - api_server.add_torrent(&info_hash_2, &sample_peer()).await; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - } - - #[tokio::test] - async fn should_fail_getting_torrents_when_query_parameters_cannot_be_parsed() { - let api_server = start_default_api(&Version::Warp).await; - - let invalid_offset = "INVALID OFFSET"; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) - .await; - - assert_bad_request(response, "Invalid query string").await; - - let invalid_limit = "INVALID LIMIT"; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) - .await; - - assert_bad_request(response, "Invalid query string").await; - } - - #[tokio::test] - async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_torrents(Query::empty()) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .get_torrents(Query::default()) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let peer = sample_peer(); - - api_server.add_torrent(&info_hash, &peer).await; - - let response = Client::new(api_server.get_connection_info()) - .get_torrent(&info_hash.to_string()) - .await; - - assert_torrent_info( - response, - Torrent { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: Some(vec![resource::peer::Peer::from(peer)]), - }, - ) - .await; - } - - #[tokio::test] - async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let response = Client::new(api_server.get_connection_info()) - .get_torrent(&info_hash.to_string()) - .await; - - assert_torrent_not_known(response).await; - } - - #[tokio::test] - async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_cannot_be_parsed() { - let api_server = start_default_api(&Version::Warp).await; - - let invalid_infohash = "INVALID INFOHASH"; - - let response = Client::new(api_server.get_connection_info()) - .get_torrent(invalid_infohash) - .await; - - assert_method_not_allowed(response).await; - } - - #[tokio::test] - async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - api_server.add_torrent(&info_hash, &sample_peer()).await; - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_torrent(&info_hash.to_string()) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .get_torrent(&info_hash.to_string()) - .await; - - assert_unauthorized(response).await; - } - } - - mod for_whitelisted_torrent_resources { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - - use crate::api::asserts::{ - assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_ok, assert_token_not_valid, assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::server::start_default_api; - use crate::api::{force_database_error, Version}; - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(api_server.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_ok(response).await; - assert!( - api_server - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await - ); - } - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let api_client = Client::new(api_server.get_connection_info()); - - let response = api_client.whitelist_a_torrent(&info_hash).await; - assert_ok(response).await; - - let response = api_client.whitelist_a_torrent(&info_hash).await; - assert_ok(response).await; - } - - #[tokio::test] - async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .whitelist_a_torrent(&info_hash) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .whitelist_a_torrent(&info_hash) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_failed_to_whitelist_torrent(response).await; - } - - #[tokio::test] - async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - let response = Client::new(api_server.get_connection_info()) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_ok(response).await; - assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); - } - - #[tokio::test] - async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let api_server = start_default_api(&Version::Warp).await; - - let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(api_server.get_connection_info()) - .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) - .await; - - assert_ok(response).await; - } - - #[tokio::test] - async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_failed_to_remove_torrent_from_whitelist(response).await; - } - - #[tokio::test] - async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_token_not_valid(response).await; - - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; - - assert_ok(response).await; - /* todo: this assert fails because the whitelist has not been reloaded yet. - We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent - is whitelisted and use that endpoint to check if the torrent is still there after reloading. - assert!( - !(api_server - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await) - ); - */ - } - - #[tokio::test] - async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; - - assert_failed_to_reload_whitelist(response).await; - } - } - - mod for_key_resources { - use std::time::Duration; - - use torrust_tracker::tracker::auth::Key; - - use crate::api::asserts::{ - assert_auth_key, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_method_not_allowed, assert_ok, assert_token_not_valid, assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::server::start_default_api; - use crate::api::{force_database_error, Version}; - - #[tokio::test] - async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - - let response = Client::new(api_server.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; - - let auth_key_resource = assert_auth_key(response).await; - - // Verify the key with the tracker - assert!(api_server - .tracker - .verify_auth_key(&Key::from(auth_key_resource)) - .await - .is_ok()); - } - - #[tokio::test] - async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .generate_auth_key(seconds_valid) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .generate_auth_key(seconds_valid) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_fail_generating_a_new_auth_key_when_the_key_duration_cannot_be_parsed() { - let api_server = start_default_api(&Version::Warp).await; - - let invalid_key_duration = -1; - - let response = Client::new(api_server.get_connection_info()) - .generate_auth_key(invalid_key_duration) - .await; - - assert_method_not_allowed(response).await; - } - - #[tokio::test] - async fn should_fail_when_the_auth_key_cannot_be_generated() { - let api_server = start_default_api(&Version::Warp).await; - - force_database_error(&api_server.tracker); - - let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; - - assert_failed_to_generate_key(response).await; - } - - #[tokio::test] - async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - let auth_key = api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.key) - .await; - - assert_ok(response).await; - } - - #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_id_cannot_be_parsed() { - let api_server = start_default_api(&Version::Warp).await; - - let invalid_auth_key_id = "INVALID AUTH KEY ID"; - - let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(invalid_auth_key_id) - .await; - - assert_failed_to_delete_key(response).await; - } - - #[tokio::test] - async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - let auth_key = api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.key) - .await; - - assert_failed_to_delete_key(response).await; - } - - #[tokio::test] - async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - - // Generate new auth key - let auth_key = api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key) - .await; - - assert_token_not_valid(response).await; - - // Generate new auth key - let auth_key = api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_allow_reloading_keys() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(api_server.get_connection_info()).reload_keys().await; - - assert_ok(response).await; - } - - #[tokio::test] - async fn should_fail_when_keys_cannot_be_reloaded() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()).reload_keys().await; - - assert_failed_to_reload_keys(response).await; - } - - #[tokio::test] - async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .reload_keys() - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .reload_keys() - .await; - - assert_unauthorized(response).await; - } - } -} - -/// The new API implementation using Axum mod tracker_apis { /* @@ -790,11 +60,10 @@ mod tracker_apis { use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::{Client, Query, QueryParam}; use crate::api::server::start_default_api; - use crate::api::Version; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let token = api_server.get_connection_info().api_token.unwrap(); @@ -807,7 +76,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(api_server.get_connection_info()) .get_request_with_query("stats", Query::default()) @@ -818,7 +87,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(api_server.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) @@ -829,7 +98,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(api_server.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) @@ -840,7 +109,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let token = api_server.get_connection_info().api_token.unwrap(); @@ -871,11 +140,10 @@ mod tracker_apis { use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; - use crate::api::Version; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; api_server .add_torrent( @@ -912,7 +180,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_tracker_statistics() @@ -944,11 +212,10 @@ mod tracker_apis { use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; - use crate::api::Version; #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -973,7 +240,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1001,7 +268,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1029,7 +296,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; @@ -1044,7 +311,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; @@ -1059,7 +326,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrents(Query::empty()) @@ -1076,7 +343,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1103,7 +370,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1116,7 +383,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(api_server.get_connection_info()) @@ -1137,7 +404,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1170,12 +437,12 @@ mod tracker_apis { }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::force_database_error; use crate::api::server::start_default_api; - use crate::api::{force_database_error, Version}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1194,7 +461,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1209,7 +476,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1228,7 +495,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1243,7 +510,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(api_server.get_connection_info()) @@ -1264,7 +531,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1280,7 +547,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1293,7 +560,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(api_server.get_connection_info()) @@ -1314,7 +581,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1331,7 +598,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1353,7 +620,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1376,7 +643,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1402,12 +669,12 @@ mod tracker_apis { }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::force_database_error; use crate::api::server::start_default_api; - use crate::api::{force_database_error, Version}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; @@ -1427,7 +694,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; @@ -1446,7 +713,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let invalid_key_durations = [ // "", it returns 404 @@ -1465,7 +732,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; force_database_error(&api_server.tracker); @@ -1479,7 +746,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; let auth_key = api_server @@ -1497,7 +764,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let invalid_auth_key_ids = [ // "", it returns a 404 @@ -1520,7 +787,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; let auth_key = api_server @@ -1540,7 +807,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; @@ -1573,7 +840,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_reloading_keys() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; api_server @@ -1589,7 +856,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; api_server @@ -1607,7 +874,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; api_server From 6dd3c482868dbee638e71b4bee86cce5c751d476 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 17:04:38 +0000 Subject: [PATCH 0316/1003] refactor(api): [#143] move API resources mod --- src/api/mod.rs | 1 - src/apis/mod.rs | 1 + src/{api/resource => apis/resources}/auth_key.rs | 0 src/{api/resource => apis/resources}/mod.rs | 0 src/{api/resource => apis/resources}/peer.rs | 0 src/{api/resource => apis/resources}/stats.rs | 0 src/{api/resource => apis/resources}/torrent.rs | 4 ++-- src/apis/routes.rs | 6 +++--- src/lib.rs | 1 - tests/api/asserts.rs | 6 +++--- tests/tracker_api.rs | 8 ++++---- 11 files changed, 13 insertions(+), 14 deletions(-) delete mode 100644 src/api/mod.rs rename src/{api/resource => apis/resources}/auth_key.rs (100%) rename src/{api/resource => apis/resources}/mod.rs (100%) rename src/{api/resource => apis/resources}/peer.rs (100%) rename src/{api/resource => apis/resources}/stats.rs (100%) rename src/{api/resource => apis/resources}/torrent.rs (97%) diff --git a/src/api/mod.rs b/src/api/mod.rs deleted file mode 100644 index c6bee0532..000000000 --- a/src/api/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod resource; diff --git a/src/apis/mod.rs b/src/apis/mod.rs index ea1615d6b..7ed3ecb76 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,3 +1,4 @@ pub mod middlewares; +pub mod resources; pub mod routes; pub mod server; diff --git a/src/api/resource/auth_key.rs b/src/apis/resources/auth_key.rs similarity index 100% rename from src/api/resource/auth_key.rs rename to src/apis/resources/auth_key.rs diff --git a/src/api/resource/mod.rs b/src/apis/resources/mod.rs similarity index 100% rename from src/api/resource/mod.rs rename to src/apis/resources/mod.rs diff --git a/src/api/resource/peer.rs b/src/apis/resources/peer.rs similarity index 100% rename from src/api/resource/peer.rs rename to src/apis/resources/peer.rs diff --git a/src/api/resource/stats.rs b/src/apis/resources/stats.rs similarity index 100% rename from src/api/resource/stats.rs rename to src/apis/resources/stats.rs diff --git a/src/api/resource/torrent.rs b/src/apis/resources/torrent.rs similarity index 97% rename from src/api/resource/torrent.rs rename to src/apis/resources/torrent.rs index 56fead37a..3d8b2f427 100644 --- a/src/api/resource/torrent.rs +++ b/src/apis/resources/torrent.rs @@ -74,8 +74,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::api::resource::peer::Peer; - use crate::api::resource::torrent::{ListItem, Torrent}; + use crate::apis::resources::peer::Peer; + use crate::apis::resources::torrent::{ListItem, Torrent}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/apis/routes.rs b/src/apis/routes.rs index f0585b225..e11e7d4c8 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -12,9 +12,9 @@ use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::json; use super::middlewares::auth::auth; -use crate::api::resource::auth_key::AuthKey; -use crate::api::resource::stats::Stats; -use crate::api::resource::torrent::{ListItem, Torrent}; +use crate::apis::resources::auth_key::AuthKey; +use crate::apis::resources::stats::Stats; +use crate::apis::resources::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::KeyId; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; diff --git a/src/lib.rs b/src/lib.rs index ebf589aa9..e8cf53045 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -pub mod api; pub mod apis; pub mod config; pub mod databases; diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 11aac64d1..07383f795 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::api::resource::auth_key::AuthKey; -use torrust_tracker::api::resource::stats::Stats; -use torrust_tracker::api::resource::torrent::{ListItem, Torrent}; +use torrust_tracker::apis::resources::auth_key::AuthKey; +use torrust_tracker::apis::resources::stats::Stats; +use torrust_tracker::apis::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index d07d2fe2d..9609e80ab 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -132,7 +132,7 @@ mod tracker_apis { mod for_stats_resources { use std::str::FromStr; - use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::apis::resources::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; @@ -199,8 +199,8 @@ mod tracker_apis { mod for_torrent_resources { use std::str::FromStr; - use torrust_tracker::api::resource::torrent::Torrent; - use torrust_tracker::api::resource::{self, torrent}; + use torrust_tracker::apis::resources::torrent::Torrent; + use torrust_tracker::apis::resources::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; @@ -362,7 +362,7 @@ mod tracker_apis { seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![resource::peer::Peer::from(peer)]), + peers: Some(vec![resources::peer::Peer::from(peer)]), }, ) .await; From 2a92b0a01a1f199dab23bfef3ffbd3ee0a9cb272 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 17:18:41 +0000 Subject: [PATCH 0317/1003] refactor(api): extract mod for API responses --- src/apis/mod.rs | 1 + src/apis/responses.rs | 130 ++++++++++++++++++++++++++++++++++++++++++ src/apis/routes.rs | 118 ++++---------------------------------- 3 files changed, 142 insertions(+), 107 deletions(-) create mode 100644 src/apis/responses.rs diff --git a/src/apis/mod.rs b/src/apis/mod.rs index 7ed3ecb76..5a2aca52a 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,4 +1,5 @@ pub mod middlewares; pub mod resources; +pub mod responses; pub mod routes; pub mod server; diff --git a/src/apis/responses.rs b/src/apis/responses.rs new file mode 100644 index 000000000..179c5f8d1 --- /dev/null +++ b/src/apis/responses.rs @@ -0,0 +1,130 @@ +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Json, Response}; +use serde::Serialize; +use serde_json::json; + +use crate::apis::resources::auth_key::AuthKey; +use crate::apis::resources::stats::Stats; +use crate::apis::resources::torrent::{ListItem, Torrent}; +use crate::tracker::services::statistics::TrackerMetrics; +use crate::tracker::services::torrent::{BasicInfo, Info}; + +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +// Resource responses + +#[must_use] +pub fn response_stats(tracker_metrics: TrackerMetrics) -> Json { + Json(Stats::from(tracker_metrics)) +} + +#[must_use] +pub fn response_torrent_list(basic_infos: &[BasicInfo]) -> Json> { + Json(ListItem::new_vec(basic_infos)) +} + +#[must_use] +pub fn response_torrent_info(info: Info) -> Response { + Json(Torrent::from(info)).into_response() +} + +/// # Panics +/// +/// Will panic if it can't convert the `AuthKey` resource to json +#[must_use] +pub fn response_auth_key(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + +// OK response + +/// # Panics +/// +/// Will panic if it can't convert the `ActionStatus` to json +#[must_use] +pub fn response_ok() -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json")], + serde_json::to_string(&ActionStatus::Ok).unwrap(), + ) + .into_response() +} + +// Error responses + +#[must_use] +pub fn response_invalid_info_hash_param(info_hash: &str) -> Response { + response_bad_request(&format!( + "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", + info_hash + )) +} + +#[must_use] +pub fn response_invalid_auth_key_param(invalid_key: &str) -> Response { + response_bad_request(&format!("Invalid auth key id param \"{invalid_key}\"")) +} + +fn response_bad_request(body: &str) -> Response { + ( + StatusCode::BAD_REQUEST, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + body.to_owned(), + ) + .into_response() +} + +#[must_use] +pub fn response_torrent_not_known() -> Response { + Json(json!("torrent not known")).into_response() +} + +#[must_use] +pub fn response_failed_to_remove_torrent_from_whitelist() -> Response { + response_unhandled_rejection("failed to remove torrent from whitelist".to_string()) +} + +#[must_use] +pub fn response_failed_to_whitelist_torrent() -> Response { + response_unhandled_rejection("failed to whitelist torrent".to_string()) +} + +#[must_use] +pub fn response_failed_to_reload_whitelist() -> Response { + response_unhandled_rejection("failed to reload whitelist".to_string()) +} + +#[must_use] +pub fn response_failed_to_generate_key() -> Response { + response_unhandled_rejection("failed to generate key".to_string()) +} + +#[must_use] +pub fn response_failed_to_delete_key() -> Response { + response_unhandled_rejection("failed to delete key".to_string()) +} + +#[must_use] +pub fn response_failed_to_reload_keys() -> Response { + response_unhandled_rejection("failed to reload keys".to_string()) +} + +fn response_unhandled_rejection(reason: String) -> Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), + ) + .into_response() +} diff --git a/src/apis/routes.rs b/src/apis/routes.rs index e11e7d4c8..2f96569e3 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -4,21 +4,25 @@ use std::sync::Arc; use std::time::Duration; use axum::extract::{Path, Query, State}; -use axum::http::{header, StatusCode}; -use axum::response::{IntoResponse, Json, Response}; +use axum::response::{Json, Response}; use axum::routing::{delete, get, post}; use axum::{middleware, Router}; -use serde::{de, Deserialize, Deserializer, Serialize}; -use serde_json::json; +use serde::{de, Deserialize, Deserializer}; use super::middlewares::auth::auth; +use super::responses::{ + response_auth_key, response_failed_to_delete_key, response_failed_to_generate_key, response_failed_to_reload_keys, + response_failed_to_reload_whitelist, response_failed_to_remove_torrent_from_whitelist, response_failed_to_whitelist_torrent, + response_invalid_auth_key_param, response_invalid_info_hash_param, response_ok, response_stats, response_torrent_info, + response_torrent_list, response_torrent_not_known, +}; use crate::apis::resources::auth_key::AuthKey; use crate::apis::resources::stats::Stats; -use crate::apis::resources::torrent::{ListItem, Torrent}; +use crate::apis::resources::torrent::ListItem; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::KeyId; -use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, BasicInfo, Info, Pagination}; +use crate::tracker::services::statistics::get_metrics; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; /* code-review: @@ -82,106 +86,6 @@ pub fn router(tracker: &Arc) -> Router { .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -pub enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -// Resource responses - -fn response_stats(tracker_metrics: TrackerMetrics) -> Json { - Json(Stats::from(tracker_metrics)) -} - -fn response_torrent_list(basic_infos: &[BasicInfo]) -> Json> { - Json(ListItem::new_vec(basic_infos)) -} - -fn response_torrent_info(info: Info) -> Response { - Json(Torrent::from(info)).into_response() -} - -fn response_auth_key(auth_key: &AuthKey) -> Response { - ( - StatusCode::OK, - [(header::CONTENT_TYPE, "application/json; charset=utf-8")], - serde_json::to_string(auth_key).unwrap(), - ) - .into_response() -} - -// OK response - -fn response_ok() -> Response { - ( - StatusCode::OK, - [(header::CONTENT_TYPE, "application/json")], - serde_json::to_string(&ActionStatus::Ok).unwrap(), - ) - .into_response() -} - -// Error responses - -fn response_invalid_info_hash_param(info_hash: &str) -> Response { - response_bad_request(&format!( - "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", - info_hash - )) -} - -fn response_invalid_auth_key_param(invalid_key: &str) -> Response { - response_bad_request(&format!("Invalid auth key id param \"{invalid_key}\"")) -} - -fn response_bad_request(body: &str) -> Response { - ( - StatusCode::BAD_REQUEST, - [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - body.to_owned(), - ) - .into_response() -} - -fn response_torrent_not_known() -> Response { - Json(json!("torrent not known")).into_response() -} - -fn response_failed_to_remove_torrent_from_whitelist() -> Response { - response_unhandled_rejection("failed to remove torrent from whitelist".to_string()) -} - -fn response_failed_to_whitelist_torrent() -> Response { - response_unhandled_rejection("failed to whitelist torrent".to_string()) -} - -fn response_failed_to_reload_whitelist() -> Response { - response_unhandled_rejection("failed to reload whitelist".to_string()) -} - -fn response_failed_to_generate_key() -> Response { - response_unhandled_rejection("failed to generate key".to_string()) -} - -fn response_failed_to_delete_key() -> Response { - response_unhandled_rejection("failed to delete key".to_string()) -} - -fn response_failed_to_reload_keys() -> Response { - response_unhandled_rejection("failed to reload keys".to_string()) -} - -fn response_unhandled_rejection(reason: String) -> Response { - ( - StatusCode::INTERNAL_SERVER_ERROR, - [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), - ) - .into_response() -} - pub async fn get_stats_handler(State(tracker): State>) -> Json { response_stats(get_metrics(tracker.clone()).await) } From 0940463449a314bd35eec17caf0d31f0ae9283a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 17:32:50 +0000 Subject: [PATCH 0318/1003] refactor(api): [#143] extract api mods --- src/apis/handlers.rs | 138 +++++++++++++++++++++++++++++++++++++++++++ src/apis/mod.rs | 1 + src/apis/routes.rs | 138 ++----------------------------------------- 3 files changed, 143 insertions(+), 134 deletions(-) create mode 100644 src/apis/handlers.rs diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs new file mode 100644 index 000000000..d625b761a --- /dev/null +++ b/src/apis/handlers.rs @@ -0,0 +1,138 @@ +use std::fmt; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use axum::extract::{Path, Query, State}; +use axum::response::{Json, Response}; +use serde::{de, Deserialize, Deserializer}; + +use super::responses::{ + response_auth_key, response_failed_to_delete_key, response_failed_to_generate_key, response_failed_to_reload_keys, + response_failed_to_reload_whitelist, response_failed_to_remove_torrent_from_whitelist, response_failed_to_whitelist_torrent, + response_invalid_auth_key_param, response_invalid_info_hash_param, response_ok, response_stats, response_torrent_info, + response_torrent_list, response_torrent_not_known, +}; +use crate::apis::resources::auth_key::AuthKey; +use crate::apis::resources::stats::Stats; +use crate::apis::resources::torrent::ListItem; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth::KeyId; +use crate::tracker::services::statistics::get_metrics; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::tracker::Tracker; + +pub async fn get_stats_handler(State(tracker): State>) -> Json { + response_stats(get_metrics(tracker.clone()).await) +} + +#[derive(Deserialize)] +pub struct InfoHashParam(String); + +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Some(info) => response_torrent_info(info), + None => response_torrent_not_known(), + }, + } +} + +#[derive(Deserialize)] +pub struct PaginationParams { + #[serde(default, deserialize_with = "empty_string_as_none")] + pub offset: Option, + pub limit: Option, +} + +pub async fn get_torrents_handler( + State(tracker): State>, + pagination: Query, +) -> Json> { + response_torrent_list( + &get_torrents( + tracker.clone(), + &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + ) + .await, + ) +} + +pub async fn add_torrent_to_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => response_ok(), + Err(..) => response_failed_to_whitelist_torrent(), + }, + } +} + +pub async fn remove_torrent_from_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(..) => response_ok(), + Err(..) => response_failed_to_remove_torrent_from_whitelist(), + }, + } +} + +pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { + match tracker.load_whitelist().await { + Ok(..) => response_ok(), + Err(..) => response_failed_to_reload_whitelist(), + } +} + +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + let seconds_valid = seconds_valid_or_key; + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), + Err(_) => response_failed_to_generate_key(), + } +} + +#[derive(Deserialize)] +pub struct KeyIdParam(String); + +pub async fn delete_auth_key_handler( + State(tracker): State>, + Path(seconds_valid_or_key): Path, +) -> Response { + match KeyId::from_str(&seconds_valid_or_key.0) { + Err(_) => response_invalid_auth_key_param(&seconds_valid_or_key.0), + Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { + Ok(_) => response_ok(), + Err(_) => response_failed_to_delete_key(), + }, + } +} + +pub async fn reload_keys_handler(State(tracker): State>) -> Response { + match tracker.load_keys().await { + Ok(..) => response_ok(), + Err(..) => response_failed_to_reload_keys(), + } +} + +/// Serde deserialization decorator to map empty Strings to None, +fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: fmt::Display, +{ + let opt = Option::::deserialize(de)?; + match opt.as_deref() { + None | Some("") => Ok(None), + Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), + } +} diff --git a/src/apis/mod.rs b/src/apis/mod.rs index 5a2aca52a..a646d5543 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,3 +1,4 @@ +pub mod handlers; pub mod middlewares; pub mod resources; pub mod responses; diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 2f96569e3..a37d79b4d 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,28 +1,13 @@ -use std::fmt; -use std::str::FromStr; use std::sync::Arc; -use std::time::Duration; -use axum::extract::{Path, Query, State}; -use axum::response::{Json, Response}; use axum::routing::{delete, get, post}; use axum::{middleware, Router}; -use serde::{de, Deserialize, Deserializer}; -use super::middlewares::auth::auth; -use super::responses::{ - response_auth_key, response_failed_to_delete_key, response_failed_to_generate_key, response_failed_to_reload_keys, - response_failed_to_reload_whitelist, response_failed_to_remove_torrent_from_whitelist, response_failed_to_whitelist_torrent, - response_invalid_auth_key_param, response_invalid_info_hash_param, response_ok, response_stats, response_torrent_info, - response_torrent_list, response_torrent_not_known, +use super::handlers::{ + add_torrent_to_whitelist_handler, delete_auth_key_handler, generate_auth_key_handler, get_stats_handler, get_torrent_handler, + get_torrents_handler, reload_keys_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler, }; -use crate::apis::resources::auth_key::AuthKey; -use crate::apis::resources::stats::Stats; -use crate::apis::resources::torrent::ListItem; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::KeyId; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use super::middlewares::auth::auth; use crate::tracker::Tracker; /* code-review: @@ -85,118 +70,3 @@ pub fn router(tracker: &Arc) -> Router { .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } - -pub async fn get_stats_handler(State(tracker): State>) -> Json { - response_stats(get_metrics(tracker.clone()).await) -} - -#[derive(Deserialize)] -pub struct InfoHashParam(String); - -pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), - Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { - Some(info) => response_torrent_info(info), - None => response_torrent_not_known(), - }, - } -} - -#[derive(Deserialize)] -pub struct PaginationParams { - #[serde(default, deserialize_with = "empty_string_as_none")] - pub offset: Option, - pub limit: Option, -} - -pub async fn get_torrents_handler( - State(tracker): State>, - pagination: Query, -) -> Json> { - response_torrent_list( - &get_torrents( - tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), - ) - .await, - ) -} - -pub async fn add_torrent_to_whitelist_handler( - State(tracker): State>, - Path(info_hash): Path, -) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), - Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_whitelist_torrent(), - }, - } -} - -pub async fn remove_torrent_from_whitelist_handler( - State(tracker): State>, - Path(info_hash): Path, -) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), - Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_remove_torrent_from_whitelist(), - }, - } -} - -pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { - match tracker.load_whitelist().await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_reload_whitelist(), - } -} - -pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { - let seconds_valid = seconds_valid_or_key; - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), - Err(_) => response_failed_to_generate_key(), - } -} - -#[derive(Deserialize)] -pub struct KeyIdParam(String); - -pub async fn delete_auth_key_handler( - State(tracker): State>, - Path(seconds_valid_or_key): Path, -) -> Response { - match KeyId::from_str(&seconds_valid_or_key.0) { - Err(_) => response_invalid_auth_key_param(&seconds_valid_or_key.0), - Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { - Ok(_) => response_ok(), - Err(_) => response_failed_to_delete_key(), - }, - } -} - -pub async fn reload_keys_handler(State(tracker): State>) -> Response { - match tracker.load_keys().await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_reload_keys(), - } -} - -/// Serde deserialization decorator to map empty Strings to None, -fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> -where - D: Deserializer<'de>, - T: FromStr, - T::Err: fmt::Display, -{ - let opt = Option::::deserialize(de)?; - match opt.as_deref() { - None | Some("") => Ok(None), - Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), - } -} From 6ddbdd95c8fe2f96e306e753d5b3fa3bb1461838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 17:36:18 +0000 Subject: [PATCH 0319/1003] docs(api): [#143] move comment --- src/apis/responses.rs | 20 ++++++++++++++++++++ src/apis/routes.rs | 20 -------------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/apis/responses.rs b/src/apis/responses.rs index 179c5f8d1..e5314d410 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -9,6 +9,26 @@ use crate::apis::resources::torrent::{ListItem, Torrent}; use crate::tracker::services::statistics::TrackerMetrics; use crate::tracker::services::torrent::{BasicInfo, Info}; +/* code-review: + When Axum cannot parse a path or query param it shows a message like this: + + For the "seconds_valid_or_key" path param: + + "Invalid URL: Cannot parse "-1" to a `u64`" + + That message is not an informative message, specially if you have more than one param. + We should show a message similar to the one we use when we parse the value in the handler. + For example: + + "Invalid URL: invalid infohash param: string \"INVALID VALUE\", expected a 40 character long string" + + We can customize the error message by using a custom type with custom serde deserialization. + The same we are using for the "InfoHashVisitor". + + Input data from HTTP requests should use struts with primitive types (first level of validation). + We can put the second level of validation in the application and domain services. +*/ + #[derive(Serialize, Debug)] #[serde(tag = "status", rename_all = "snake_case")] pub enum ActionStatus<'a> { diff --git a/src/apis/routes.rs b/src/apis/routes.rs index a37d79b4d..281979aa5 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -10,26 +10,6 @@ use super::handlers::{ use super::middlewares::auth::auth; use crate::tracker::Tracker; -/* code-review: - When Axum cannot parse a path or query param it shows a message like this: - - For the "seconds_valid_or_key" path param: - - "Invalid URL: Cannot parse "-1" to a `u64`" - - That message is not an informative message, specially if you have more than one param. - We should show a message similar to the one we use when we parse the value in the handler. - For example: - - "Invalid URL: invalid infohash param: string \"INVALID VALUE\", expected a 40 character long string" - - We can customize the error message by using a custom type with custom serde deserialization. - The same we are using for the "InfoHashVisitor". - - Input data from HTTP requests should use struts with primitive types (first level of validation). - We can put the second level of validation in the application and domain services. -*/ - pub fn router(tracker: &Arc) -> Router { Router::new() // Stats From 6955666bf0d7be54d9bae8936237f7d9608ac8ba Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:10:54 +0000 Subject: [PATCH 0320/1003] docs(api): [#143] remove comment --- tests/tracker_api.rs | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 9609e80ab..e4fff7ca4 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -9,33 +9,6 @@ mod api; mod tracker_apis { - /* - - Endpoints: - - Stats: - - [ ] GET /api/stats - - Torrents: - - [ ] GET /api/torrents?offset=:u32&limit=:u32 - - [ ] GET /api/torrent/:info_hash - - Whitelisted torrents: - - [ ] POST /api/whitelist/:info_hash - - [ ] DELETE /api/whitelist/:info_hash - - Whitelist commands: - - [ ] GET /api/whitelist/reload - - Keys: - - [ ] POST /api/key/:seconds_valid - - [ ] DELETE /api/key/:key - - Keys commands - - [ ] GET /api/keys/reload - - */ - // When these infohashes are used in URL path params // the response is a custom response returned in the handler fn invalid_infohashes_returning_bad_request() -> Vec { From 02dfe3ee87acaa20b4ed4b4432dab2ca9a5e8b34 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:22:12 +0000 Subject: [PATCH 0321/1003] refactor(api): [#143] rename response functions --- src/apis/handlers.rs | 48 +++++++++++++++++++++---------------------- src/apis/responses.rs | 48 +++++++++++++++++++++---------------------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index d625b761a..8a66b4d76 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -8,10 +8,10 @@ use axum::response::{Json, Response}; use serde::{de, Deserialize, Deserializer}; use super::responses::{ - response_auth_key, response_failed_to_delete_key, response_failed_to_generate_key, response_failed_to_reload_keys, - response_failed_to_reload_whitelist, response_failed_to_remove_torrent_from_whitelist, response_failed_to_whitelist_torrent, - response_invalid_auth_key_param, response_invalid_info_hash_param, response_ok, response_stats, response_torrent_info, - response_torrent_list, response_torrent_not_known, + auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, + failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, + invalid_auth_key_param_response, invalid_info_hash_param_response, ok_response, stats_response, torrent_info_response, + torrent_list_response, torrent_not_known_response, }; use crate::apis::resources::auth_key::AuthKey; use crate::apis::resources::stats::Stats; @@ -23,7 +23,7 @@ use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Paginati use crate::tracker::Tracker; pub async fn get_stats_handler(State(tracker): State>) -> Json { - response_stats(get_metrics(tracker.clone()).await) + stats_response(get_metrics(tracker.clone()).await) } #[derive(Deserialize)] @@ -31,10 +31,10 @@ pub struct InfoHashParam(String); pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), + Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { - Some(info) => response_torrent_info(info), - None => response_torrent_not_known(), + Some(info) => torrent_info_response(info), + None => torrent_not_known_response(), }, } } @@ -50,7 +50,7 @@ pub async fn get_torrents_handler( State(tracker): State>, pagination: Query, ) -> Json> { - response_torrent_list( + torrent_list_response( &get_torrents( tracker.clone(), &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), @@ -64,10 +64,10 @@ pub async fn add_torrent_to_whitelist_handler( Path(info_hash): Path, ) -> Response { match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), + Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_whitelist_torrent(), + Ok(..) => ok_response(), + Err(..) => failed_to_whitelist_torrent_response(), }, } } @@ -77,26 +77,26 @@ pub async fn remove_torrent_from_whitelist_handler( Path(info_hash): Path, ) -> Response { match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), + Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_remove_torrent_from_whitelist(), + Ok(..) => ok_response(), + Err(..) => failed_to_remove_torrent_from_whitelist_response(), }, } } pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { match tracker.load_whitelist().await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_reload_whitelist(), + Ok(..) => ok_response(), + Err(..) => failed_to_reload_whitelist_response(), } } pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { let seconds_valid = seconds_valid_or_key; match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), - Err(_) => response_failed_to_generate_key(), + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(_) => failed_to_generate_key_response(), } } @@ -108,18 +108,18 @@ pub async fn delete_auth_key_handler( Path(seconds_valid_or_key): Path, ) -> Response { match KeyId::from_str(&seconds_valid_or_key.0) { - Err(_) => response_invalid_auth_key_param(&seconds_valid_or_key.0), + Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { - Ok(_) => response_ok(), - Err(_) => response_failed_to_delete_key(), + Ok(_) => ok_response(), + Err(_) => failed_to_delete_key_response(), }, } } pub async fn reload_keys_handler(State(tracker): State>) -> Response { match tracker.load_keys().await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_reload_keys(), + Ok(..) => ok_response(), + Err(..) => failed_to_reload_keys_response(), } } diff --git a/src/apis/responses.rs b/src/apis/responses.rs index e5314d410..0b2a14c70 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -39,17 +39,17 @@ pub enum ActionStatus<'a> { // Resource responses #[must_use] -pub fn response_stats(tracker_metrics: TrackerMetrics) -> Json { +pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { Json(Stats::from(tracker_metrics)) } #[must_use] -pub fn response_torrent_list(basic_infos: &[BasicInfo]) -> Json> { +pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { Json(ListItem::new_vec(basic_infos)) } #[must_use] -pub fn response_torrent_info(info: Info) -> Response { +pub fn torrent_info_response(info: Info) -> Response { Json(Torrent::from(info)).into_response() } @@ -57,7 +57,7 @@ pub fn response_torrent_info(info: Info) -> Response { /// /// Will panic if it can't convert the `AuthKey` resource to json #[must_use] -pub fn response_auth_key(auth_key: &AuthKey) -> Response { +pub fn auth_key_response(auth_key: &AuthKey) -> Response { ( StatusCode::OK, [(header::CONTENT_TYPE, "application/json; charset=utf-8")], @@ -72,7 +72,7 @@ pub fn response_auth_key(auth_key: &AuthKey) -> Response { /// /// Will panic if it can't convert the `ActionStatus` to json #[must_use] -pub fn response_ok() -> Response { +pub fn ok_response() -> Response { ( StatusCode::OK, [(header::CONTENT_TYPE, "application/json")], @@ -84,19 +84,19 @@ pub fn response_ok() -> Response { // Error responses #[must_use] -pub fn response_invalid_info_hash_param(info_hash: &str) -> Response { - response_bad_request(&format!( +pub fn invalid_info_hash_param_response(info_hash: &str) -> Response { + bad_request_response(&format!( "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", info_hash )) } #[must_use] -pub fn response_invalid_auth_key_param(invalid_key: &str) -> Response { - response_bad_request(&format!("Invalid auth key id param \"{invalid_key}\"")) +pub fn invalid_auth_key_param_response(invalid_key: &str) -> Response { + bad_request_response(&format!("Invalid auth key id param \"{invalid_key}\"")) } -fn response_bad_request(body: &str) -> Response { +fn bad_request_response(body: &str) -> Response { ( StatusCode::BAD_REQUEST, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], @@ -106,41 +106,41 @@ fn response_bad_request(body: &str) -> Response { } #[must_use] -pub fn response_torrent_not_known() -> Response { +pub fn torrent_not_known_response() -> Response { Json(json!("torrent not known")).into_response() } #[must_use] -pub fn response_failed_to_remove_torrent_from_whitelist() -> Response { - response_unhandled_rejection("failed to remove torrent from whitelist".to_string()) +pub fn failed_to_remove_torrent_from_whitelist_response() -> Response { + unhandled_rejection_response("failed to remove torrent from whitelist".to_string()) } #[must_use] -pub fn response_failed_to_whitelist_torrent() -> Response { - response_unhandled_rejection("failed to whitelist torrent".to_string()) +pub fn failed_to_whitelist_torrent_response() -> Response { + unhandled_rejection_response("failed to whitelist torrent".to_string()) } #[must_use] -pub fn response_failed_to_reload_whitelist() -> Response { - response_unhandled_rejection("failed to reload whitelist".to_string()) +pub fn failed_to_reload_whitelist_response() -> Response { + unhandled_rejection_response("failed to reload whitelist".to_string()) } #[must_use] -pub fn response_failed_to_generate_key() -> Response { - response_unhandled_rejection("failed to generate key".to_string()) +pub fn failed_to_generate_key_response() -> Response { + unhandled_rejection_response("failed to generate key".to_string()) } #[must_use] -pub fn response_failed_to_delete_key() -> Response { - response_unhandled_rejection("failed to delete key".to_string()) +pub fn failed_to_delete_key_response() -> Response { + unhandled_rejection_response("failed to delete key".to_string()) } #[must_use] -pub fn response_failed_to_reload_keys() -> Response { - response_unhandled_rejection("failed to reload keys".to_string()) +pub fn failed_to_reload_keys_response() -> Response { + unhandled_rejection_response("failed to reload keys".to_string()) } -fn response_unhandled_rejection(reason: String) -> Response { +fn unhandled_rejection_response(reason: String) -> Response { ( StatusCode::INTERNAL_SERVER_ERROR, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], From b7c514431d822dfa494fe4fd6925f5241d92de44 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:33:11 +0000 Subject: [PATCH 0322/1003] refactor(api): [#143] change fn return type --- src/apis/handlers.rs | 4 ++-- src/apis/responses.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index 8a66b4d76..8d9689025 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use std::time::Duration; use axum::extract::{Path, Query, State}; -use axum::response::{Json, Response}; +use axum::response::{IntoResponse, Json, Response}; use serde::{de, Deserialize, Deserializer}; use super::responses::{ @@ -33,7 +33,7 @@ pub async fn get_torrent_handler(State(tracker): State>, Path(info_ match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { - Some(info) => torrent_info_response(info), + Some(info) => torrent_info_response(info).into_response(), None => torrent_not_known_response(), }, } diff --git a/src/apis/responses.rs b/src/apis/responses.rs index 0b2a14c70..7f194ab16 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -49,8 +49,8 @@ pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { } #[must_use] -pub fn torrent_info_response(info: Info) -> Response { - Json(Torrent::from(info)).into_response() +pub fn torrent_info_response(info: Info) -> Json { + Json(Torrent::from(info)) } /// # Panics From 1c72ac078bca8e5750d35f82184504ee891304d5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:39:02 +0000 Subject: [PATCH 0323/1003] docs(api): [#143] remove deprecated comment --- src/apis/resources/mod.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/apis/resources/mod.rs b/src/apis/resources/mod.rs index e86c550ca..bf3ce273b 100644 --- a/src/apis/resources/mod.rs +++ b/src/apis/resources/mod.rs @@ -1,12 +1,3 @@ -//! These are the Rest API resources. -//! -//! WIP. Not all endpoints have their resource structs. -//! -//! - [x] `AuthKeys` -//! - [ ] `Torrent`, `ListItem`, `Peer`, `PeerId` -//! - [ ] `StatsResource` -//! - [ ] ... - pub mod auth_key; pub mod peer; pub mod stats; From 0c3ca8798c143828d64b2bbbb13bba7c44fc9313 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:59:48 +0000 Subject: [PATCH 0324/1003] refactor(api): [#143] remove duplicate code --- src/apis/middlewares/auth.rs | 28 ++++++++++++++++------------ src/apis/responses.rs | 5 ++++- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs index 905160a06..e54311d33 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/middlewares/auth.rs @@ -1,11 +1,12 @@ use std::sync::Arc; use axum::extract::{Query, State}; -use axum::http::{header, Request, StatusCode}; +use axum::http::Request; use axum::middleware::Next; use axum::response::{IntoResponse, Response}; use serde::Deserialize; +use crate::apis::responses::unhandled_rejection_response; use crate::config::{Configuration, HttpApi}; #[derive(Deserialize, Debug)] @@ -43,20 +44,23 @@ enum AuthError { impl IntoResponse for AuthError { fn into_response(self) -> Response { - let body = match self { - AuthError::Unauthorized => "Unhandled rejection: Err { reason: \"unauthorized\" }", - AuthError::TokenNotValid => "Unhandled rejection: Err { reason: \"token not valid\" }", - }; - - ( - StatusCode::INTERNAL_SERVER_ERROR, - [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - body, - ) - .into_response() + match self { + AuthError::Unauthorized => unauthorized_response(), + AuthError::TokenNotValid => token_not_valid_response(), + } } } fn authenticate(token: &str, http_api_config: &HttpApi) -> bool { http_api_config.contains_token(token) } + +#[must_use] +pub fn unauthorized_response() -> Response { + unhandled_rejection_response("unauthorized".to_string()) +} + +#[must_use] +pub fn token_not_valid_response() -> Response { + unhandled_rejection_response("token not valid".to_string()) +} diff --git a/src/apis/responses.rs b/src/apis/responses.rs index 7f194ab16..b3d4cbd59 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -140,7 +140,10 @@ pub fn failed_to_reload_keys_response() -> Response { unhandled_rejection_response("failed to reload keys".to_string()) } -fn unhandled_rejection_response(reason: String) -> Response { +/// This error response is to keep backward compatibility with the old Warp API. +/// It should be a plain text or json. +#[must_use] +pub fn unhandled_rejection_response(reason: String) -> Response { ( StatusCode::INTERNAL_SERVER_ERROR, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], From ecb6f2d5a90795e9d4dfc2bc50b42e77ad825e6b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 15:47:49 +0000 Subject: [PATCH 0325/1003] feat: workflow to publish crate on crates.io Workflow to publish the crate on [crates.io](https://crates.io/). It only works if the secret "CRATES_TOKEN" exists in the "crates-io-torrust" environment. Since crates.io does not support scoped tokens, we can publish the crate using a fork where the crate owners can set up their crates.io tokens without sharing them with other maintainers. --- .github/workflows/publish_crate.yml | 54 +++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 .github/workflows/publish_crate.yml diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml new file mode 100644 index 000000000..0352064eb --- /dev/null +++ b/.github/workflows/publish_crate.yml @@ -0,0 +1,54 @@ +name: Publish crate + +on: + push: + tags: + - "v*" + +jobs: + check-secret: + runs-on: ubuntu-latest + environment: crates-io-torrust + outputs: + publish: ${{ steps.check.outputs.publish }} + steps: + - id: check + env: + CRATES_TOKEN: "${{ secrets.CRATES_TOKEN }}" + if: "${{ env.CRATES_TOKEN != '' }}" + run: echo "publish=true" >> $GITHUB_OUTPUT + + test: + needs: check-secret + if: needs.check-secret.outputs.publish == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: llvm-tools-preview + - uses: Swatinem/rust-cache@v1 + - name: Run Tests + run: cargo test + + publish: + needs: test + if: needs.check-secret.outputs.publish == 'true' + runs-on: ubuntu-latest + environment: crates-io-torrust + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Install stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - run: cargo publish + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} From c26b35641b8baeaeaeb3eea68667db52991447cf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jan 2023 13:30:39 +0000 Subject: [PATCH 0326/1003] feat(http): [#159] during HTTP tracker setup wait until job is running Add a communication channel to wait until the new job is running. This is specially useful for testing, becuase tests need the HTTP server up and running before making requests. --- src/jobs/http_tracker.rs | 58 +++++++++++++++++++++++++++++++--------- src/setup.rs | 2 +- 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index c62bc5cc9..8e38039b7 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -2,35 +2,69 @@ use std::net::SocketAddr; use std::sync::Arc; use log::{info, warn}; +use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::config::HttpTracker; use crate::http::server::Http; use crate::tracker; +#[derive(Debug)] +pub struct ServerJobStarted(); + /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. -#[must_use] -pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config.bind_address.parse::().unwrap(); +pub async fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("HTTP tracker server bind_address invalid."); let ssl_enabled = config.ssl_enabled; let ssl_cert_path = config.ssl_cert_path.clone(); let ssl_key_path = config.ssl_key_path.clone(); - tokio::spawn(async move { + let (tx, rx) = oneshot::channel::(); + + // Run the HTTP tracker server + let join_handle = tokio::spawn(async move { let http_tracker = Http::new(tracker); if !ssl_enabled { - info!("Starting HTTP server on: http://{}", bind_addr); - http_tracker.start(bind_addr).await; + info!("Starting HTTP tracker server on: http://{}", bind_addr); + + let handle = http_tracker.start(bind_addr); + + tx.send(ServerJobStarted()) + .expect("HTTP tracker server should not be dropped"); + + handle.await; + + info!("HTTP tracker server on http://{} stopped", bind_addr); } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: https://{} (TLS)", bind_addr); - http_tracker - .start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()) - .await; + info!("Starting HTTPS server on: https://{}", bind_addr); + + let handle = http_tracker.start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()); + + tx.send(ServerJobStarted()) + .expect("HTTP tracker server should not be dropped"); + + handle.await; + + info!("HTTP tracker server on https://{} stopped", bind_addr); } else { - warn!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", bind_addr); + warn!( + "Could not start HTTPS tracker server on: {}, missing SSL Cert or Key!", + bind_addr + ); } - }) + }); + + // Wait until the HTTPS tracker server job is running + match rx.await { + Ok(_msg) => info!("HTTP tracker server started"), + Err(e) => panic!("HTTP tracker server was dropped: {e}"), + } + + join_handle } diff --git a/src/setup.rs b/src/setup.rs index e7535e67d..31be3baac 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -47,7 +47,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone())); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone()).await); } // Start HTTP API From 344920295ff4b013d0bd302e31ec34f66f82c109 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jan 2023 16:24:40 +0000 Subject: [PATCH 0327/1003] test(http): [#159] HTTP tracker tests scaffolding --- src/config.rs | 20 ++++++-- tests/api/client.rs | 90 ++++------------------------------- tests/common/http.rs | 75 +++++++++++++++++++++++++++++ tests/common/mod.rs | 1 + tests/http/asserts.rs | 7 +++ tests/http/client.rs | 35 ++++++++++++++ tests/http/connection_info.rs | 16 +++++++ tests/http/mod.rs | 4 ++ tests/http/server.rs | 64 +++++++++++++++++++++++++ tests/http_tracker.rs | 44 +++++++++++++++++ tests/tracker_api.rs | 7 ++- 11 files changed, 276 insertions(+), 87 deletions(-) create mode 100644 tests/common/http.rs create mode 100644 tests/common/mod.rs create mode 100644 tests/http/asserts.rs create mode 100644 tests/http/client.rs create mode 100644 tests/http/connection_info.rs create mode 100644 tests/http/mod.rs create mode 100644 tests/http/server.rs create mode 100644 tests/http_tracker.rs diff --git a/src/config.rs b/src/config.rs index 275339aa0..3ca4b37d8 100644 --- a/src/config.rs +++ b/src/config.rs @@ -84,27 +84,39 @@ pub enum Error { } /// This configuration is used for testing. It generates random config values so they do not collide -/// if you run more than one tracker at the same time. +/// if you run more than one tracker at the same time. /// /// # Panics /// /// Will panic if it can't convert the temp file path to string #[must_use] pub fn ephemeral_configuration() -> Configuration { + // todo: disable services that are not needed. + // For example: a test for the UDP tracker should disable the API and HTTP tracker. + let mut config = Configuration { - log_level: Some("off".to_owned()), + log_level: Some("off".to_owned()), // Change to `debug` for tests debugging ..Default::default() }; - // Ephemeral socket addresses + // Ephemeral socket address for API let api_port = random_port(); + config.http_api.enabled = true; config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + + // Ephemeral socket address for UDP tracker let upd_port = random_port(); + config.udp_trackers[0].enabled = true; config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &upd_port); + // Ephemeral socket address for HTTP tracker + let http_port = random_port(); + config.http_trackers[0].enabled = true; + config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); + // Ephemeral sqlite database let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}_{}.db", &api_port, &upd_port)); + let temp_file = temp_directory.join(format!("data_{}_{}_{}.db", &api_port, &upd_port, &http_port)); config.db_path = temp_file.to_str().unwrap().to_owned(); config diff --git a/tests/api/client.rs b/tests/api/client.rs index b0b864ff5..4dea732be 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -1,71 +1,14 @@ use reqwest::Response; use super::connection_info::ConnectionInfo; +use crate::common::http::{get, Query, QueryParam, ReqwestQuery}; +/// API Client pub struct Client { connection_info: ConnectionInfo, base_path: String, } -type ReqwestQuery = Vec; -type ReqwestQueryParam = (String, String); - -#[derive(Default, Debug)] -pub struct Query { - params: Vec, -} - -impl Query { - pub fn empty() -> Self { - Self { params: vec![] } - } - - pub fn params(params: Vec) -> Self { - Self { params } - } - - pub fn add_param(&mut self, param: QueryParam) { - self.params.push(param); - } - - fn with_token(token: &str) -> Self { - Self { - params: vec![QueryParam::new("token", token)], - } - } -} - -impl From for ReqwestQuery { - fn from(url_search_params: Query) -> Self { - url_search_params - .params - .iter() - .map(|param| ReqwestQueryParam::from((*param).clone())) - .collect() - } -} - -#[derive(Clone, Debug)] -pub struct QueryParam { - name: String, - value: String, -} - -impl QueryParam { - pub fn new(name: &str, value: &str) -> Self { - Self { - name: name.to_string(), - value: value.to_string(), - } - } -} - -impl From for ReqwestQueryParam { - fn from(param: QueryParam) -> Self { - (param.name, param.value) - } -} - impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { @@ -138,37 +81,22 @@ impl Client { .unwrap() } - fn base_url(&self, path: &str) -> String { - format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) - } - - // Unauthenticated GET request with query component pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .query(&ReqwestQuery::from(params)) - .send() - .await - .unwrap() + get(&self.base_url(path), Some(params)).await } - // Unauthenticated GET request pub async fn get_request(&self, path: &str) -> Response { - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .send() - .await - .unwrap() + get(&self.base_url(path), None).await } fn query_with_token(&self) -> Query { match &self.connection_info.api_token { - Some(token) => Query::with_token(token), + Some(token) => Query::params([QueryParam::new("token", token)].to_vec()), None => Query::default(), } } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } } diff --git a/tests/common/http.rs b/tests/common/http.rs new file mode 100644 index 000000000..1c2e95671 --- /dev/null +++ b/tests/common/http.rs @@ -0,0 +1,75 @@ +use reqwest::Response; + +pub type ReqwestQuery = Vec; +pub type ReqwestQueryParam = (String, String); + +pub async fn get(path: &str, query: Option) -> Response { + match query { + Some(params) => reqwest::Client::builder() + .build() + .unwrap() + .get(path) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap(), + None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), + } +} + +#[derive(Clone, Debug)] +pub struct ConnectionInfo { + pub bind_address: String, +} + +/// URL Query component +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +/// URL query param +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs new file mode 100644 index 000000000..3883215fc --- /dev/null +++ b/tests/common/mod.rs @@ -0,0 +1 @@ +pub mod http; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs new file mode 100644 index 000000000..b82c681a0 --- /dev/null +++ b/tests/http/asserts.rs @@ -0,0 +1,7 @@ +use reqwest::Response; + +pub async fn assert_internal_server_error(response: Response) { + assert_eq!(response.status(), 200); + /* cspell:disable-next-line */ + assert_eq!(response.text().await.unwrap(), "d14:failure reason21:internal server errore"); +} diff --git a/tests/http/client.rs b/tests/http/client.rs new file mode 100644 index 000000000..8bf691474 --- /dev/null +++ b/tests/http/client.rs @@ -0,0 +1,35 @@ +use reqwest::Response; + +use super::connection_info::ConnectionInfo; +use crate::common::http::{get, Query}; + +/// HTTP Tracker Client +pub struct Client { + connection_info: ConnectionInfo, + base_path: String, +} + +impl Client { + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { + connection_info, + base_path: "/".to_string(), + } + } + + pub async fn announce(&self, params: Query) -> Response { + self.get("announce", params).await + } + + pub async fn scrape(&self, params: Query) -> Response { + self.get("scrape", params).await + } + + async fn get(&self, path: &str, params: Query) -> Response { + get(&self.base_url(path), Some(params)).await + } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } +} diff --git a/tests/http/connection_info.rs b/tests/http/connection_info.rs new file mode 100644 index 000000000..debf931e4 --- /dev/null +++ b/tests/http/connection_info.rs @@ -0,0 +1,16 @@ +use torrust_tracker::tracker::auth::Key; + +#[derive(Clone, Debug)] +pub struct ConnectionInfo { + pub bind_address: String, + pub aut_key: Option, +} + +impl ConnectionInfo { + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + aut_key: None, + } + } +} diff --git a/tests/http/mod.rs b/tests/http/mod.rs new file mode 100644 index 000000000..9e79fcd27 --- /dev/null +++ b/tests/http/mod.rs @@ -0,0 +1,4 @@ +pub mod asserts; +pub mod client; +pub mod connection_info; +pub mod server; diff --git a/tests/http/server.rs b/tests/http/server.rs new file mode 100644 index 000000000..ff2b40987 --- /dev/null +++ b/tests/http/server.rs @@ -0,0 +1,64 @@ +use core::panic; +use std::sync::Arc; + +use torrust_tracker::config::{ephemeral_configuration, Configuration}; +use torrust_tracker::jobs::http_tracker; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; + +use super::connection_info::ConnectionInfo; + +pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) +} + +pub async fn start_default_http_tracker() -> Server { + let configuration = tracker_configuration(); + start_custom_http_tracker(configuration.clone()).await +} + +pub async fn start_custom_http_tracker(configuration: Arc) -> Server { + let server = start(&configuration); + http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone()).await; + server +} + +fn start(configuration: &Arc) -> Server { + let connection_info = ConnectionInfo::anonymous(&configuration.http_trackers[0].bind_address.clone()); + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + Server { + tracker, + connection_info, + } +} + +pub struct Server { + pub tracker: Arc, + pub connection_info: ConnectionInfo, +} + +impl Server { + pub fn get_connection_info(&self) -> ConnectionInfo { + self.connection_info.clone() + } +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs new file mode 100644 index 000000000..a1e429bb8 --- /dev/null +++ b/tests/http_tracker.rs @@ -0,0 +1,44 @@ +/// Integration tests for HTTP tracker server +/// +/// cargo test `http_tracker_server` -- --nocapture +mod common; +mod http; + +mod http_tracker_server { + + mod receiving_an_announce_request { + use crate::common::http::Query; + use crate::http::asserts::assert_internal_server_error; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; + + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(Query::default()) + .await; + + assert_internal_server_error(response).await; + } + } + + mod receiving_an_scrape_request { + use crate::common::http::Query; + use crate::http::asserts::assert_internal_server_error; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; + + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .scrape(Query::default()) + .await; + + assert_internal_server_error(response).await; + } + } +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index e4fff7ca4..456b37f7b 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -6,6 +6,7 @@ extern crate rand; mod api; +mod common; mod tracker_apis { @@ -31,8 +32,9 @@ mod tracker_apis { mod authentication { use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; - use crate::api::client::{Client, Query, QueryParam}; + use crate::api::client::Client; use crate::api::server::start_default_api; + use crate::common::http::{Query, QueryParam}; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { @@ -181,10 +183,11 @@ mod tracker_apis { assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; - use crate::api::client::{Client, Query, QueryParam}; + use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; + use crate::common::http::{Query, QueryParam}; #[tokio::test] async fn should_allow_getting_torrents() { From 41ad07f2281ebfb70b1041bf1811af01ac6df4f3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jan 2023 18:58:50 +0000 Subject: [PATCH 0328/1003] refactor(http): [#159] add dependencies: serde_urlencoded and serde_repr --- Cargo.lock | 25 +++++++++++++++++++------ Cargo.toml | 3 +++ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f8d753b3..bc82c64f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2242,9 +2242,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.147" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] @@ -2270,9 +2270,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -2299,6 +2299,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_repr" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2523,9 +2534,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.103" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -2820,6 +2831,8 @@ dependencies = [ "serde", "serde_bencode", "serde_json", + "serde_repr", + "serde_urlencoded", "serde_with", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 434b4cace..0e67c65ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,9 @@ uuid = { version = "1", features = ["v4"] } axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } + [dev-dependencies] mockall = "0.11" reqwest = { version = "0.11.13", features = ["json"] } +serde_urlencoded = "0.7.1" +serde_repr = "0.1.10" From 1a558d2c6c9341654e55357993442dabddf56253 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jan 2023 19:30:51 +0000 Subject: [PATCH 0329/1003] test(http): [#159] add tests for public http tracker --- src/http/mod.rs | 4 ++ src/http/response.rs | 7 +- tests/api/client.rs | 16 ++++- tests/api/mod.rs | 1 - tests/{api => common}/fixtures.rs | 0 tests/common/http.rs | 16 ----- tests/common/mod.rs | 1 + tests/http/asserts.rs | 8 +++ tests/http/client.rs | 29 ++++----- tests/http/mod.rs | 2 + tests/http/requests.rs | 104 ++++++++++++++++++++++++++++++ tests/http/responses.rs | 18 ++++++ tests/http/server.rs | 6 ++ tests/http_tracker.rs | 102 +++++++++++++++++++++++++++-- tests/tracker_api.rs | 4 +- 15 files changed, 272 insertions(+), 46 deletions(-) rename tests/{api => common}/fixtures.rs (100%) create mode 100644 tests/http/requests.rs create mode 100644 tests/http/responses.rs diff --git a/src/http/mod.rs b/src/http/mod.rs index 701dba407..0b5a02a0e 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,3 +1,7 @@ +//! Tracker HTTP/HTTPS Protocol: +//! +//! +//! pub mod error; pub mod filters; pub mod handlers; diff --git a/src/http/response.rs b/src/http/response.rs index 962e72fac..1e9f7fa09 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -2,19 +2,18 @@ use std::collections::HashMap; use std::io::Write; use std::net::IpAddr; -use serde; -use serde::Serialize; +use serde::{self, Deserialize, Serialize}; use crate::protocol::info_hash::InfoHash; -#[derive(Serialize)] +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Peer { pub peer_id: String, pub ip: IpAddr, pub port: u16, } -#[derive(Serialize)] +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] diff --git a/tests/api/client.rs b/tests/api/client.rs index 4dea732be..f99805570 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -1,7 +1,7 @@ use reqwest::Response; use super::connection_info::ConnectionInfo; -use crate::common::http::{get, Query, QueryParam, ReqwestQuery}; +use crate::common::http::{Query, QueryParam, ReqwestQuery}; /// API Client pub struct Client { @@ -100,3 +100,17 @@ impl Client { format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) } } + +async fn get(path: &str, query: Option) -> Response { + match query { + Some(params) => reqwest::Client::builder() + .build() + .unwrap() + .get(path) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap(), + None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), + } +} diff --git a/tests/api/mod.rs b/tests/api/mod.rs index bc4187375..8dd6f4c53 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -5,7 +5,6 @@ use torrust_tracker::tracker::Tracker; pub mod asserts; pub mod client; pub mod connection_info; -pub mod fixtures; pub mod server; /// It forces a database error by dropping all tables. diff --git a/tests/api/fixtures.rs b/tests/common/fixtures.rs similarity index 100% rename from tests/api/fixtures.rs rename to tests/common/fixtures.rs diff --git a/tests/common/http.rs b/tests/common/http.rs index 1c2e95671..902752674 100644 --- a/tests/common/http.rs +++ b/tests/common/http.rs @@ -1,22 +1,6 @@ -use reqwest::Response; - pub type ReqwestQuery = Vec; pub type ReqwestQueryParam = (String, String); -pub async fn get(path: &str, query: Option) -> Response { - match query { - Some(params) => reqwest::Client::builder() - .build() - .unwrap() - .get(path) - .query(&ReqwestQuery::from(params)) - .send() - .await - .unwrap(), - None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), - } -} - #[derive(Clone, Debug)] pub struct ConnectionInfo { pub bind_address: String, diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 3883215fc..810620359 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1 +1,2 @@ +pub mod fixtures; pub mod http; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index b82c681a0..9a1f353c6 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,7 +1,15 @@ use reqwest::Response; +use super::responses::Announce; + pub async fn assert_internal_server_error(response: Response) { assert_eq!(response.status(), 200); /* cspell:disable-next-line */ assert_eq!(response.text().await.unwrap(), "d14:failure reason21:internal server errore"); } + +pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { + assert_eq!(response.status(), 200); + let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); + assert_eq!(announce_response, *expected_announce_response); +} diff --git a/tests/http/client.rs b/tests/http/client.rs index 8bf691474..ae51bc02e 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -1,35 +1,34 @@ use reqwest::Response; use super::connection_info::ConnectionInfo; -use crate::common::http::{get, Query}; +use super::requests::AnnounceQuery; /// HTTP Tracker Client pub struct Client { connection_info: ConnectionInfo, - base_path: String, } impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { - Self { - connection_info, - base_path: "/".to_string(), - } + Self { connection_info } } - pub async fn announce(&self, params: Query) -> Response { - self.get("announce", params).await + pub async fn announce(&self, query: &AnnounceQuery) -> Response { + let path_with_query = format!("announce?{query}"); + self.get(&path_with_query).await } - pub async fn scrape(&self, params: Query) -> Response { - self.get("scrape", params).await - } - - async fn get(&self, path: &str, params: Query) -> Response { - get(&self.base_url(path), Some(params)).await + pub async fn get(&self, path: &str) -> Response { + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path)) + .send() + .await + .unwrap() } fn base_url(&self, path: &str) -> String { - format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + format!("http://{}/{path}", &self.connection_info.bind_address) } } diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 9e79fcd27..2ab8b2c1c 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,4 +1,6 @@ pub mod asserts; pub mod client; pub mod connection_info; +pub mod requests; +pub mod responses; pub mod server; diff --git a/tests/http/requests.rs b/tests/http/requests.rs new file mode 100644 index 000000000..170dc52a9 --- /dev/null +++ b/tests/http/requests.rs @@ -0,0 +1,104 @@ +use std::fmt; +use std::net::IpAddr; + +use percent_encoding::NON_ALPHANUMERIC; +use serde_repr::Serialize_repr; + +pub struct AnnounceQuery { + pub info_hash: ByteArray20, + pub peer_addr: IpAddr, + pub downloaded: BaseTenASCII, + pub uploaded: BaseTenASCII, + pub peer_id: ByteArray20, + pub port: PortNumber, + pub left: BaseTenASCII, + pub event: Option, + pub compact: Option, +} + +impl fmt::Display for AnnounceQuery { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Announce Request: +/// +/// +/// +/// Some parameters are not implemented yet. +impl AnnounceQuery { + /// It builds the URL query component for the announce request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + pub fn build(&self) -> String { + let mut params = vec![ + ( + "info_hash", + percent_encoding::percent_encode(&self.info_hash, NON_ALPHANUMERIC).to_string(), + ), + ("peer_addr", self.peer_addr.to_string()), + ("downloaded", self.downloaded.to_string()), + ("uploaded", self.uploaded.to_string()), + ( + "peer_id", + percent_encoding::percent_encode(&self.peer_id, NON_ALPHANUMERIC).to_string(), + ), + ("port", self.port.to_string()), + ("left", self.left.to_string()), + ]; + + if let Some(event) = &self.event { + params.push(("event", event.to_string())); + } + + if let Some(compact) = &self.compact { + params.push(("compact", compact.to_string())); + } + + params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&") + } +} + +pub type BaseTenASCII = u64; +pub type ByteArray20 = [u8; 20]; +pub type PortNumber = u16; + +pub enum Event { + //tarted, + //Stopped, + Completed, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + //Event::Started => write!(f, "started"), + //Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(Serialize_repr, PartialEq, Debug)] +#[repr(u8)] +pub enum Compact { + //Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + //Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} diff --git a/tests/http/responses.rs b/tests/http/responses.rs new file mode 100644 index 000000000..e82197b03 --- /dev/null +++ b/tests/http/responses.rs @@ -0,0 +1,18 @@ +use serde::{self, Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DictionaryPeer { + pub ip: String, + pub peer_id: String, + pub port: u16, +} diff --git a/tests/http/server.rs b/tests/http/server.rs index ff2b40987..130c68b46 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -3,6 +3,8 @@ use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::jobs::http_tracker; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -61,4 +63,8 @@ impl Server { pub fn get_connection_info(&self) -> ConnectionInfo { self.connection_info.clone() } + + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a1e429bb8..05e2281bd 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -7,7 +7,6 @@ mod http; mod http_tracker_server { mod receiving_an_announce_request { - use crate::common::http::Query; use crate::http::asserts::assert_internal_server_error; use crate::http::client::Client; use crate::http::server::start_default_http_tracker; @@ -16,16 +15,13 @@ mod http_tracker_server { async fn should_fail_when_the_request_is_empty() { let http_tracker_server = start_default_http_tracker().await; - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(Query::default()) - .await; + let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; assert_internal_server_error(response).await; } } mod receiving_an_scrape_request { - use crate::common::http::Query; use crate::http::asserts::assert_internal_server_error; use crate::http::client::Client; use crate::http::server::start_default_http_tracker; @@ -34,11 +30,103 @@ mod http_tracker_server { async fn should_fail_when_the_request_is_empty() { let http_tracker_server = start_default_http_tracker().await; + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + + assert_internal_server_error(response).await; + } + } +} + +mod public_http_tracker_server { + + mod receiving_an_announce_request { + use std::net::{IpAddr, Ipv4Addr}; + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::sample_peer; + use crate::http::asserts::assert_announce_response; + use crate::http::client::Client; + use crate::http::requests::{AnnounceQuery, Compact, Event}; + use crate::http::responses::{Announce, DictionaryPeer}; + use crate::http::server::start_default_http_tracker; + + fn sample_announce_query(info_hash: &InfoHash) -> AnnounceQuery { + AnnounceQuery { + info_hash: info_hash.0, + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: peer::Id(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + } + } + + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let http_tracker_server = start_default_http_tracker().await; + let response = Client::new(http_tracker_server.get_connection_info()) - .scrape(Query::default()) + .announce(&sample_announce_query( + &InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(), + )) .await; - assert_internal_server_error(response).await; + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![], + }, + ) + .await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let http_tracker_server = start_default_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = sample_peer(); + + // Add a peer + http_tracker_server.add_torrent(&info_hash, &peer).await; + + let announce_query = sample_announce_query(&info_hash); + + assert_ne!( + announce_query.peer_id, peer.peer_id.0, + "the new peer id must be different from the previously announced peer otherwise the peer previously added peer in not included in the list" + ); + + // Announce the new peer. This new peer is non included the response peers list + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&announce_query) + .await; + + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: vec![DictionaryPeer { + ip: peer.peer_addr.ip().to_string(), + peer_id: String::new(), + port: peer.peer_addr.port(), + }], + }, + ) + .await; } } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 456b37f7b..47fda3af9 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -113,8 +113,8 @@ mod tracker_apis { use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; + use crate::common::fixtures::sample_peer; #[tokio::test] async fn should_allow_getting_tracker_statistics() { @@ -185,8 +185,8 @@ mod tracker_apis { }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; + use crate::common::fixtures::sample_peer; use crate::common::http::{Query, QueryParam}; #[tokio::test] From dd38045c7d06d9900db52bb44a673288baf5e75e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Jan 2023 13:40:37 +0000 Subject: [PATCH 0330/1003] refactor(http): [#159] extract test builders --- src/http/mod.rs | 1 + tests/common/fixtures.rs | 28 +++++- tests/http/asserts.rs | 6 ++ tests/http/requests.rs | 42 +++++++- tests/http/server.rs | 8 +- tests/http_tracker.rs | 209 +++++++++++++++++++-------------------- tests/tracker_api.rs | 20 ++-- 7 files changed, 191 insertions(+), 123 deletions(-) diff --git a/src/http/mod.rs b/src/http/mod.rs index 0b5a02a0e..2fcb056d8 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,6 +1,7 @@ //! Tracker HTTP/HTTPS Protocol: //! //! +//! //! pub mod error; pub mod filters; diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index fa6099309..78f7d381f 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -2,10 +2,32 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; -use torrust_tracker::tracker::peer; +use torrust_tracker::tracker::peer::{self, Id, Peer}; -pub fn sample_peer() -> peer::Peer { - peer::Peer { +pub struct PeerBuilder { + peer: Peer, +} + +impl PeerBuilder { + pub fn default() -> PeerBuilder { + Self { + peer: default_peer_for_testing(), + } + } + + #[allow(dead_code)] + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.peer.peer_id = *peer_id; + self + } + + pub fn into(self) -> Peer { + self.peer + } +} + +fn default_peer_for_testing() -> Peer { + Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 9a1f353c6..32aaf4d69 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -8,6 +8,12 @@ pub async fn assert_internal_server_error(response: Response) { assert_eq!(response.text().await.unwrap(), "d14:failure reason21:internal server errore"); } +pub async fn assert_empty_announce_response(response: Response) { + assert_eq!(response.status(), 200); + let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); + assert!(announce_response.peers.is_empty()); +} + pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { assert_eq!(response.status(), 200); let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); diff --git a/tests/http/requests.rs b/tests/http/requests.rs index 170dc52a9..7e59494c5 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -1,8 +1,11 @@ use std::fmt; -use std::net::IpAddr; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; use percent_encoding::NON_ALPHANUMERIC; use serde_repr::Serialize_repr; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Id; pub struct AnnounceQuery { pub info_hash: ByteArray20, @@ -102,3 +105,40 @@ impl fmt::Display for Compact { } } } + +pub struct AnnounceQueryBuilder { + announce_query: AnnounceQuery, +} + +impl AnnounceQueryBuilder { + pub fn default() -> AnnounceQueryBuilder { + let default_announce_query = AnnounceQuery { + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: Id(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + }; + Self { + announce_query: default_announce_query, + } + } + + pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.announce_query.info_hash = info_hash.0; + self + } + + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.announce_query.peer_id = peer_id.0; + self + } + + pub fn into(self) -> AnnounceQuery { + self.announce_query + } +} diff --git a/tests/http/server.rs b/tests/http/server.rs index 130c68b46..32d02b060 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -10,8 +10,8 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; -pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) +pub async fn start_public_http_tracker() -> Server { + start_default_http_tracker().await } pub async fn start_default_http_tracker() -> Server { @@ -19,6 +19,10 @@ pub async fn start_default_http_tracker() -> Server { start_custom_http_tracker(configuration.clone()).await } +pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) +} + pub async fn start_custom_http_tracker(configuration: Arc) -> Server { let server = start(&configuration); http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone()).await; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 05e2281bd..bf75dfc26 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -6,127 +6,122 @@ mod http; mod http_tracker_server { - mod receiving_an_announce_request { - use crate::http::asserts::assert_internal_server_error; - use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; + mod for_all_config_modes { - #[tokio::test] - async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; + mod receiving_an_announce_request { + use crate::http::asserts::assert_internal_server_error; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; - let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; - assert_internal_server_error(response).await; - } - } + let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; - mod receiving_an_scrape_request { - use crate::http::asserts::assert_internal_server_error; - use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; + assert_internal_server_error(response).await; + } + } - #[tokio::test] - async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; + mod receiving_an_scrape_request { + use crate::http::asserts::assert_internal_server_error; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; - let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; - assert_internal_server_error(response).await; - } - } -} + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; -mod public_http_tracker_server { - - mod receiving_an_announce_request { - use std::net::{IpAddr, Ipv4Addr}; - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - - use crate::common::fixtures::sample_peer; - use crate::http::asserts::assert_announce_response; - use crate::http::client::Client; - use crate::http::requests::{AnnounceQuery, Compact, Event}; - use crate::http::responses::{Announce, DictionaryPeer}; - use crate::http::server::start_default_http_tracker; - - fn sample_announce_query(info_hash: &InfoHash) -> AnnounceQuery { - AnnounceQuery { - info_hash: info_hash.0, - peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), - downloaded: 0, - uploaded: 0, - peer_id: peer::Id(*b"-qB00000000000000001").0, - port: 17548, - left: 0, - event: Some(Event::Completed), - compact: Some(Compact::NotAccepted), + assert_internal_server_error(response).await; } } + } - #[tokio::test] - async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_default_http_tracker().await; - - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&sample_announce_query( - &InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(), - )) + mod configured_as_public { + + mod receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::{assert_announce_response, assert_empty_announce_response}; + use crate::http::client::Client; + use crate::http::requests::AnnounceQueryBuilder; + use crate::http::responses::{Announce, DictionaryPeer}; + use crate::http::server::start_public_http_tracker; + + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let http_tracker_server = start_public_http_tracker().await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .into(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![], + }, + ) .await; + } - assert_announce_response( - response, - &Announce { - complete: 1, // the peer for this test - incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, - peers: vec![], - }, - ) - .await; - } - - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_default_http_tracker().await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = sample_peer(); - - // Add a peer - http_tracker_server.add_torrent(&info_hash, &peer).await; - - let announce_query = sample_announce_query(&info_hash); - - assert_ne!( - announce_query.peer_id, peer.peer_id.0, - "the new peer id must be different from the previously announced peer otherwise the peer previously added peer in not included in the list" - ); - - // Announce the new peer. This new peer is non included the response peers list - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&announce_query) + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .into(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .into(), + ) + .await; + + let expected_peer = DictionaryPeer { + peer_id: previously_announced_peer.peer_id.to_string(), + ip: previously_announced_peer.peer_addr.ip().to_string(), + port: previously_announced_peer.peer_addr.port(), + }; + + // This new peer is non included on the response peer list + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![expected_peer], + }, + ) .await; - - assert_announce_response( - response, - &Announce { - complete: 2, - incomplete: 0, - interval: 120, - min_interval: 120, - peers: vec![DictionaryPeer { - ip: peer.peer_addr.ip().to_string(), - peer_id: String::new(), - port: peer.peer_addr.port(), - }], - }, - ) - .await; + } } } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 47fda3af9..5710db6a6 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -114,7 +114,7 @@ mod tracker_apis { use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; - use crate::common::fixtures::sample_peer; + use crate::common::fixtures::PeerBuilder; #[tokio::test] async fn should_allow_getting_tracker_statistics() { @@ -123,7 +123,7 @@ mod tracker_apis { api_server .add_torrent( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), - &sample_peer(), + &PeerBuilder::default().into(), ) .await; @@ -186,7 +186,7 @@ mod tracker_apis { use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; - use crate::common::fixtures::sample_peer; + use crate::common::fixtures::PeerBuilder; use crate::common::http::{Query, QueryParam}; #[tokio::test] @@ -195,7 +195,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent(&info_hash, &sample_peer()).await; + api_server.add_torrent(&info_hash, &PeerBuilder::default().into()).await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::empty()) @@ -222,8 +222,8 @@ mod tracker_apis { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server.add_torrent(&info_hash_1, &sample_peer()).await; - api_server.add_torrent(&info_hash_2, &sample_peer()).await; + api_server.add_torrent(&info_hash_1, &PeerBuilder::default().into()).await; + api_server.add_torrent(&info_hash_2, &PeerBuilder::default().into()).await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) @@ -250,8 +250,8 @@ mod tracker_apis { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server.add_torrent(&info_hash_1, &sample_peer()).await; - api_server.add_torrent(&info_hash_2, &sample_peer()).await; + api_server.add_torrent(&info_hash_1, &PeerBuilder::default().into()).await; + api_server.add_torrent(&info_hash_2, &PeerBuilder::default().into()).await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) @@ -323,7 +323,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let peer = sample_peer(); + let peer = PeerBuilder::default().into(); api_server.add_torrent(&info_hash, &peer).await; @@ -384,7 +384,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent(&info_hash, &sample_peer()).await; + api_server.add_torrent(&info_hash, &PeerBuilder::default().into()).await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) From ca8fc22562647649ce05244eddb0d78714d880ea Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Jan 2023 13:41:31 +0000 Subject: [PATCH 0331/1003] test(http): [#159] add tests for public http tracker --- tests/http_tracker.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index bf75dfc26..c58990616 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -122,6 +122,30 @@ mod http_tracker_server { ) .await; } + + #[tokio::test] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().into(); + + // Add a peer + http_tracker_server.add_torrent(&info_hash, &peer).await; + + let announce_query = AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .into(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&announce_query) + .await; + + assert_empty_announce_response(response).await; + } } } } From 7fa8ec84f9a35e01feaf872db36023c8dea45ed1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Jan 2023 13:48:30 +0000 Subject: [PATCH 0332/1003] test(http): add test for failing convertion of peer Id into String If you try to convert a peer Id into a String it returns an empty String. ``` let id = peer::Id(*b"-qB00000000000000000"); assert_eq!(id.to_string(), ""); ``` --- src/tracker/peer.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 2da257d3e..dc362c5bd 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -215,6 +215,21 @@ impl Serialize for Id { #[cfg(test)] mod test { + + mod torrent_peer_id { + use crate::tracker::peer; + + #[test] + fn should_be_converted_into_string() { + // todo: it seems it's not working + let id = peer::Id(*b"-qB00000000000000000"); + assert_eq!(id.to_string(), ""); + + let id = peer::Id(*b"-qB00000000000000001"); + assert_eq!(id.to_string(), ""); + } + } + mod torrent_peer { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; From 62dbffacb79503e9921aeb9b4f154bb43bc65f36 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Jan 2023 19:34:55 +0000 Subject: [PATCH 0333/1003] test(http): [#159] add test for missing announce req params in http tracker --- cSpell.json | 3 + src/http/mod.rs | 10 ++- tests/http/asserts.rs | 61 +++++++++++++++-- tests/http/requests.rs | 146 +++++++++++++++++++++++++++++++--------- tests/http/responses.rs | 6 ++ tests/http_tracker.rs | 74 ++++++++++++++++++-- 6 files changed, 255 insertions(+), 45 deletions(-) diff --git a/cSpell.json b/cSpell.json index 537ea65a5..0d0b73701 100644 --- a/cSpell.json +++ b/cSpell.json @@ -6,6 +6,7 @@ "Avicora", "Azureus", "bencode", + "bencoded", "binascii", "Bitflu", "bools", @@ -39,6 +40,7 @@ "nanos", "nextest", "nocapture", + "numwant", "oneshot", "ostr", "Pando", @@ -62,6 +64,7 @@ "Torrentstorm", "torrust", "torrustracker", + "trackerid", "typenum", "Unamed", "untuple", diff --git a/src/http/mod.rs b/src/http/mod.rs index 2fcb056d8..fa4c263b5 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,7 +1,13 @@ //! Tracker HTTP/HTTPS Protocol: //! -//! -//! +//! Original specification in BEP 3 (section "Trackers"): +//! +//! +//! +//! Other resources: +//! +//! - +//! - //! pub mod error; pub mod filters; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 32aaf4d69..27270f7f2 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,12 +1,7 @@ use reqwest::Response; use super::responses::Announce; - -pub async fn assert_internal_server_error(response: Response) { - assert_eq!(response.status(), 200); - /* cspell:disable-next-line */ - assert_eq!(response.text().await.unwrap(), "d14:failure reason21:internal server errore"); -} +use crate::http::responses::Error; pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); @@ -19,3 +14,57 @@ pub async fn assert_announce_response(response: Response, expected_announce_resp let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); assert_eq!(announce_response, *expected_announce_response); } + +pub async fn assert_is_announce_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let _announce_response: Announce = serde_bencode::from_str(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{}\"", &body)); +} + +// Error responses + +pub async fn assert_internal_server_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'internal server' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "internal server error".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} + +pub async fn assert_invalid_info_hash_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'invalid info_hash' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "info_hash is either missing or invalid".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} + +pub async fn assert_invalid_peer_id_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'invalid peer id' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "peer_id is either missing or invalid".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} diff --git a/tests/http/requests.rs b/tests/http/requests.rs index 7e59494c5..ceff2bd77 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -29,7 +29,7 @@ impl fmt::Display for AnnounceQuery { /// /// /// -/// Some parameters are not implemented yet. +/// Some parameters in the specification are not implemented in this tracker yet. impl AnnounceQuery { /// It builds the URL query component for the announce request. /// @@ -38,35 +38,11 @@ impl AnnounceQuery { /// /// pub fn build(&self) -> String { - let mut params = vec![ - ( - "info_hash", - percent_encoding::percent_encode(&self.info_hash, NON_ALPHANUMERIC).to_string(), - ), - ("peer_addr", self.peer_addr.to_string()), - ("downloaded", self.downloaded.to_string()), - ("uploaded", self.uploaded.to_string()), - ( - "peer_id", - percent_encoding::percent_encode(&self.peer_id, NON_ALPHANUMERIC).to_string(), - ), - ("port", self.port.to_string()), - ("left", self.left.to_string()), - ]; - - if let Some(event) = &self.event { - params.push(("event", event.to_string())); - } - - if let Some(compact) = &self.compact { - params.push(("compact", compact.to_string())); - } + self.params().to_string() + } - params - .iter() - .map(|param| format!("{}={}", param.0, param.1)) - .collect::>() - .join("&") + pub fn params(&self) -> AnnounceQueryParams { + AnnounceQueryParams::from(self) } } @@ -138,7 +114,117 @@ impl AnnounceQueryBuilder { self } - pub fn into(self) -> AnnounceQuery { + pub fn query(self) -> AnnounceQuery { self.announce_query } } + +/// It contains all the GET parameters that can be used in a HTTP Announce request. +/// +/// Sample Announce URL with all the GET parameters (mandatory and optional): +/// +/// ```text +/// http://127.0.0.1:7070/announce? +/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) +/// peer_addr=192.168.1.88 +/// downloaded=0 +/// uploaded=0 +/// peer_id=%2DqB00000000000000000 (mandatory) +/// port=17548 (mandatory) +/// left=0 +/// event=completed +/// compact=0 +/// ``` +pub struct AnnounceQueryParams { + pub info_hash: Option, + pub peer_addr: Option, + pub downloaded: Option, + pub uploaded: Option, + pub peer_id: Option, + pub port: Option, + pub left: Option, + pub event: Option, + pub compact: Option, +} + +impl std::fmt::Display for AnnounceQueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let mut params = vec![]; + + if let Some(info_hash) = &self.info_hash { + params.push(("info_hash", info_hash)); + } + if let Some(peer_addr) = &self.peer_addr { + params.push(("peer_addr", peer_addr)); + } + if let Some(downloaded) = &self.downloaded { + params.push(("downloaded", downloaded)); + } + if let Some(uploaded) = &self.uploaded { + params.push(("uploaded", uploaded)); + } + if let Some(peer_id) = &self.peer_id { + params.push(("peer_id", peer_id)); + } + if let Some(port) = &self.port { + params.push(("port", port)); + } + if let Some(left) = &self.left { + params.push(("left", left)); + } + if let Some(event) = &self.event { + params.push(("event", event)); + } + if let Some(compact) = &self.compact { + params.push(("compact", compact)); + } + + let query = params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl AnnounceQueryParams { + pub fn from(announce_query: &AnnounceQuery) -> Self { + let event = announce_query.event.as_ref().map(std::string::ToString::to_string); + let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + + Self { + info_hash: Some(percent_encoding::percent_encode(&announce_query.info_hash, NON_ALPHANUMERIC).to_string()), + peer_addr: Some(announce_query.peer_addr.to_string()), + downloaded: Some(announce_query.downloaded.to_string()), + uploaded: Some(announce_query.uploaded.to_string()), + peer_id: Some(percent_encoding::percent_encode(&announce_query.peer_id, NON_ALPHANUMERIC).to_string()), + port: Some(announce_query.port.to_string()), + left: Some(announce_query.left.to_string()), + event, + compact, + } + } + + pub fn remove_optional_params(&mut self) { + // todo: make them optional with the Option<...> in the AnnounceQuery struct + // if they are really optional. SO that we can crete a minimal AnnounceQuery + // instead of removing the optional params afterwards. + // + // The original specification on: + // + // says only `ip` and `event` are optional. + // + // On + // says only `ip`, `numwant`, `key` and `trackerid` are optional. + // + // but the server is responding if all these params are not included. + self.peer_addr = None; + self.downloaded = None; + self.uploaded = None; + self.left = None; + self.event = None; + self.compact = None; + } +} diff --git a/tests/http/responses.rs b/tests/http/responses.rs index e82197b03..bb0fda5d8 100644 --- a/tests/http/responses.rs +++ b/tests/http/responses.rs @@ -16,3 +16,9 @@ pub struct DictionaryPeer { pub peer_id: String, pub port: u16, } + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index c58990616..44ec6454c 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -9,22 +9,82 @@ mod http_tracker_server { mod for_all_config_modes { mod receiving_an_announce_request { - use crate::http::asserts::assert_internal_server_error; + use crate::http::asserts::{ + assert_internal_server_error_response, assert_invalid_info_hash_error_response, + assert_invalid_peer_id_error_response, assert_is_announce_response, + }; use crate::http::client::Client; + use crate::http::requests::AnnounceQueryBuilder; use crate::http::server::start_default_http_tracker; + #[tokio::test] + async fn should_respond_when_only_the_mandatory_fields_are_provided() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + #[tokio::test] async fn should_fail_when_the_request_is_empty() { let http_tracker_server = start_default_http_tracker().await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; - assert_internal_server_error(response).await; + assert_internal_server_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_when_a_mandatory_field_is_missing() { + let http_tracker_server = start_default_http_tracker().await; + + // Without `info_hash` param + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + + // Without `peer_id` param + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + + // Without `port` param + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; } } mod receiving_an_scrape_request { - use crate::http::asserts::assert_internal_server_error; + use crate::http::asserts::assert_internal_server_error_response; use crate::http::client::Client; use crate::http::server::start_default_http_tracker; @@ -34,7 +94,7 @@ mod http_tracker_server { let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; - assert_internal_server_error(response).await; + assert_internal_server_error_response(response).await; } } } @@ -62,7 +122,7 @@ mod http_tracker_server { .announce( &AnnounceQueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) - .into(), + .query(), ) .await; @@ -99,7 +159,7 @@ mod http_tracker_server { &AnnounceQueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .into(), + .query(), ) .await; @@ -136,7 +196,7 @@ mod http_tracker_server { let announce_query = AnnounceQueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer.peer_id) - .into(); + .query(); assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); From 8ae4928e4d957e07c89a79b81827727ad30972d4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 24 Jan 2023 11:31:43 +0000 Subject: [PATCH 0334/1003] test(http): [#159] add test for invalid announce request params --- tests/common/fixtures.rs | 12 +++ tests/http/requests.rs | 17 +++- tests/http_tracker.rs | 188 +++++++++++++++++++++++++++++++++++++++ tests/tracker_api.rs | 12 +-- 4 files changed, 219 insertions(+), 10 deletions(-) diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 78f7d381f..0ff6798f6 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -37,3 +37,15 @@ fn default_peer_for_testing() -> Peer { event: AnnounceEvent::Started, } } + +pub fn invalid_info_hashes() -> Vec { + [ + "0".to_string(), + "-1".to_string(), + "1.1".to_string(), + "INVALID INFOHASH".to_string(), + "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 + "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char + ] + .to_vec() +} diff --git a/tests/http/requests.rs b/tests/http/requests.rs index ceff2bd77..885c48939 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -51,7 +51,7 @@ pub type ByteArray20 = [u8; 20]; pub type PortNumber = u16; pub enum Event { - //tarted, + //Started, //Stopped, Completed, } @@ -227,4 +227,19 @@ impl AnnounceQueryParams { self.event = None; self.compact = None; } + + pub fn set(&mut self, param_name: &str, param_value: &str) { + match param_name { + "info_hash" => self.info_hash = Some(param_value.to_string()), + "peer_addr" => self.peer_addr = Some(param_value.to_string()), + "downloaded" => self.downloaded = Some(param_value.to_string()), + "uploaded" => self.uploaded = Some(param_value.to_string()), + "peer_id" => self.peer_id = Some(param_value.to_string()), + "port" => self.port = Some(param_value.to_string()), + "left" => self.left = Some(param_value.to_string()), + "event" => self.event = Some(param_value.to_string()), + "compact" => self.compact = Some(param_value.to_string()), + &_ => panic!("Invalid param name for announce query"), + } + } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 44ec6454c..a28a9efb1 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -9,6 +9,7 @@ mod http_tracker_server { mod for_all_config_modes { mod receiving_an_announce_request { + use crate::common::fixtures::invalid_info_hashes; use crate::http::asserts::{ assert_internal_server_error_response, assert_invalid_info_hash_error_response, assert_invalid_peer_id_error_response, assert_is_announce_response, @@ -81,6 +82,193 @@ mod http_tracker_server { assert_internal_server_error_response(response).await; } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + } + } + + #[tokio::test] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. + + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + + #[tokio::test] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + } + } + + #[tokio::test] + async fn should_fail_when_the_port_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_fail_when_the_left_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_not_fail_when_the_event_param_is_invalid() { + // All invalid values are ignored as if the `event` param was empty + + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase + "Stopped", // It should be lowercase + "Completed", // It should be lowercase + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + } + + #[tokio::test] + async fn should_not_fail_when_the_compact_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } } mod receiving_an_scrape_request { diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 5710db6a6..b79e8a8af 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -10,18 +10,12 @@ mod common; mod tracker_apis { + use crate::common::fixtures::invalid_info_hashes; + // When these infohashes are used in URL path params // the response is a custom response returned in the handler fn invalid_infohashes_returning_bad_request() -> Vec { - [ - "0".to_string(), - "-1".to_string(), - "1.1".to_string(), - "INVALID INFOHASH".to_string(), - "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 - "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char - ] - .to_vec() + invalid_info_hashes() } // When these infohashes are used in URL path params From 8e5c99238cce09fa8308845ebb502aa912d1a32f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 24 Jan 2023 20:57:21 +0000 Subject: [PATCH 0335/1003] refactor(http): [#159] add dependency: serde_bytes It will be used to deserialize bytes from HTTP tracker announce compact responses. For exmaple: ``` pub peers: Vec, ``` --- Cargo.lock | 5 +++-- Cargo.toml | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bc82c64f3..e27eace74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2261,9 +2261,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.7" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" +checksum = "718dc5fff5b36f99093fc49b280cfc96ce6fc824317783bff5a1fed0c7a64819" dependencies = [ "serde", ] @@ -2830,6 +2830,7 @@ dependencies = [ "reqwest", "serde", "serde_bencode", + "serde_bytes", "serde_json", "serde_repr", "serde_urlencoded", diff --git a/Cargo.toml b/Cargo.toml index 0e67c65ae..9afbc16a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,3 +67,4 @@ mockall = "0.11" reqwest = { version = "0.11.13", features = ["json"] } serde_urlencoded = "0.7.1" serde_repr = "0.1.10" +serde_bytes = "0.11.8" From 96fb56cde69387118e36f5f0b9e20da6bdde200b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 24 Jan 2023 21:00:10 +0000 Subject: [PATCH 0336/1003] test(http): [#159] add test for compact announce response --- tests/http/asserts.rs | 29 +++++++++++++-- tests/http/requests.rs | 9 +++-- tests/http/responses.rs | 80 ++++++++++++++++++++++++++++++++++++++++- tests/http_tracker.rs | 47 ++++++++++++++++++++++-- 4 files changed, 156 insertions(+), 9 deletions(-) diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 27270f7f2..b5d84b0a1 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,7 +1,7 @@ use reqwest::Response; -use super::responses::Announce; -use crate::http::responses::Error; +use super::responses::{Announce, DecodedCompactAnnounce}; +use crate::http::responses::{CompactAnnounce, Error}; pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); @@ -11,10 +11,33 @@ pub async fn assert_empty_announce_response(response: Response) { pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { assert_eq!(response.status(), 200); - let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); + let body = response.text().await.unwrap(); + let announce_response: Announce = serde_bencode::from_str(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{}\"", &body)); assert_eq!(announce_response, *expected_announce_response); } +/// Sample bencoded response as byte array: +/// +/// ```text +/// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" +/// ``` +pub async fn assert_compact_announce_response(response: Response, expected_response: &DecodedCompactAnnounce) { + assert_eq!(response.status(), 200); + + let bytes = response.bytes().await.unwrap(); + + let compact_announce: CompactAnnounce = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { + panic!( + "response body should be a valid compact announce response, got \"{:?}\"", + &bytes + ) + }); + let actual_response = DecodedCompactAnnounce::from(compact_announce); + + assert_eq!(actual_response, *expected_response); +} + pub async fn assert_is_announce_response(response: Response) { assert_eq!(response.status(), 200); let body = response.text().await.unwrap(); diff --git a/tests/http/requests.rs b/tests/http/requests.rs index 885c48939..5453d9261 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -69,14 +69,14 @@ impl fmt::Display for Event { #[derive(Serialize_repr, PartialEq, Debug)] #[repr(u8)] pub enum Compact { - //Accepted = 1, + Accepted = 1, NotAccepted = 0, } impl fmt::Display for Compact { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - //Compact::Accepted => write!(f, "1"), + Compact::Accepted => write!(f, "1"), Compact::NotAccepted => write!(f, "0"), } } @@ -114,6 +114,11 @@ impl AnnounceQueryBuilder { self } + pub fn with_compact(mut self, compact: Compact) -> Self { + self.announce_query.compact = Some(compact); + self + } + pub fn query(self) -> AnnounceQuery { self.announce_query } diff --git a/tests/http/responses.rs b/tests/http/responses.rs index bb0fda5d8..0bef39b09 100644 --- a/tests/http/responses.rs +++ b/tests/http/responses.rs @@ -1,3 +1,5 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use serde::{self, Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq)] @@ -7,7 +9,7 @@ pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] pub min_interval: u32, - pub peers: Vec, + pub peers: Vec, // Peers with IPV4 } #[derive(Serialize, Deserialize, Debug, PartialEq)] @@ -17,6 +19,82 @@ pub struct DictionaryPeer { pub port: u16, } +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct CompactAnnounce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + #[serde(with = "serde_bytes")] + pub peers: Vec, +} + +#[derive(Debug, PartialEq)] +pub struct DecodedCompactAnnounce { + // code-review: there could be a way to deserialize this struct directly + // by using serde instead of doing it manually. Or at least using a custom deserializer. + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + pub min_interval: u32, + pub peers: CompactPeerList, +} + +#[derive(Debug, PartialEq)] +pub struct CompactPeerList { + peers: Vec, +} + +impl CompactPeerList { + pub fn new(peers: Vec) -> Self { + Self { peers } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeer { + ip: Ipv4Addr, + port: u16, +} + +impl CompactPeer { + pub fn new(socket_addr: &SocketAddr) -> Self { + match socket_addr.ip() { + IpAddr::V4(ip) => Self { + ip, + port: socket_addr.port(), + }, + IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), + } + } + + pub fn new_from_bytes(bytes: &[u8]) -> Self { + Self { + ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), + port: u16::from_be_bytes([bytes[4], bytes[5]]), + } + } +} + +impl From for DecodedCompactAnnounce { + fn from(compact_announce: CompactAnnounce) -> Self { + let mut peers = vec![]; + + for peer_bytes in compact_announce.peers.chunks_exact(6) { + peers.push(CompactPeer::new_from_bytes(peer_bytes)); + } + + Self { + complete: compact_announce.complete, + incomplete: compact_announce.incomplete, + interval: compact_announce.interval, + min_interval: compact_announce.min_interval, + peers: CompactPeerList::new(peers), + } + } +} + #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Error { #[serde(rename = "failure reason")] diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a28a9efb1..5b492cbc1 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -296,10 +296,12 @@ mod http_tracker_server { use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::{assert_announce_response, assert_empty_announce_response}; + use crate::http::asserts::{ + assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, + }; use crate::http::client::Client; - use crate::http::requests::AnnounceQueryBuilder; - use crate::http::responses::{Announce, DictionaryPeer}; + use crate::http::requests::{AnnounceQueryBuilder, Compact}; + use crate::http::responses::{Announce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer}; use crate::http::server::start_public_http_tracker; #[tokio::test] @@ -394,6 +396,45 @@ mod http_tracker_server { assert_empty_announce_response(response).await; } + + #[tokio::test] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .into(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = DecodedCompactAnnounce { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + } } } } From 3fce688466034f85ccee6f4128f65b8856780d9f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 25 Jan 2023 10:30:16 +0000 Subject: [PATCH 0337/1003] refactor(http): extract converter --- tests/http/responses.rs | 11 +++++++++++ tests/http_tracker.rs | 12 +++--------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/http/responses.rs b/tests/http/responses.rs index 0bef39b09..7cf283916 100644 --- a/tests/http/responses.rs +++ b/tests/http/responses.rs @@ -1,6 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{self, Deserialize, Serialize}; +use torrust_tracker::tracker::peer::Peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -19,6 +20,16 @@ pub struct DictionaryPeer { pub port: u16, } +impl From for DictionaryPeer { + fn from(peer: Peer) -> Self { + DictionaryPeer { + peer_id: peer.peer_id.to_string(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} + #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct CompactAnnounce { pub complete: u32, diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 5b492cbc1..a6f44acec 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -343,7 +343,7 @@ mod http_tracker_server { // Add the Peer 1 http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; - // Announce the new Peer 2 + // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(http_tracker_server.get_connection_info()) .announce( &AnnounceQueryBuilder::default() @@ -353,13 +353,7 @@ mod http_tracker_server { ) .await; - let expected_peer = DictionaryPeer { - peer_id: previously_announced_peer.peer_id.to_string(), - ip: previously_announced_peer.peer_addr.ip().to_string(), - port: previously_announced_peer.peer_addr.port(), - }; - - // This new peer is non included on the response peer list + // It should only contain teh previously announced peer assert_announce_response( response, &Announce { @@ -367,7 +361,7 @@ mod http_tracker_server { incomplete: 0, interval: http_tracker_server.tracker.config.announce_interval, min_interval: http_tracker_server.tracker.config.min_announce_interval, - peers: vec![expected_peer], + peers: vec![DictionaryPeer::from(previously_announced_peer)], }, ) .await; From 85a489426c0562627d23728d4ab691ec8949cd89 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 25 Jan 2023 11:08:10 +0000 Subject: [PATCH 0338/1003] test(http): [#159] add test for default announce response format --- tests/http/requests.rs | 5 +++++ tests/http_tracker.rs | 44 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/tests/http/requests.rs b/tests/http/requests.rs index 5453d9261..e24103b79 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -119,6 +119,11 @@ impl AnnounceQueryBuilder { self } + pub fn without_compact(mut self) -> Self { + self.announce_query.compact = None; + self + } + pub fn query(self) -> AnnounceQuery { self.announce_query } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a6f44acec..26c378b44 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -292,6 +292,7 @@ mod http_tracker_server { mod receiving_an_announce_request { use std::str::FromStr; + use reqwest::Response; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -301,7 +302,9 @@ mod http_tracker_server { }; use crate::http::client::Client; use crate::http::requests::{AnnounceQueryBuilder, Compact}; - use crate::http::responses::{Announce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer}; + use crate::http::responses::{ + Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, + }; use crate::http::server::start_public_http_tracker; #[tokio::test] @@ -429,6 +432,45 @@ mod http_tracker_server { assert_compact_announce_response(response, &expected_response).await; } + + #[tokio::test] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .into(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } } } } From 080f3c43ad55a1a44ae1507c31b21a82e2a944c3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 25 Jan 2023 13:03:55 +0000 Subject: [PATCH 0339/1003] test(http): [#159] add tests for uptadint statistics after announce request --- tests/http/client.rs | 29 ++++++++----- tests/http/requests.rs | 5 +++ tests/http/server.rs | 21 ++++++++++ tests/http_tracker.rs | 93 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 136 insertions(+), 12 deletions(-) diff --git a/tests/http/client.rs b/tests/http/client.rs index ae51bc02e..df9e012f0 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -1,4 +1,6 @@ -use reqwest::Response; +use std::net::IpAddr; + +use reqwest::{Client as ReqwestClient, Response}; use super::connection_info::ConnectionInfo; use super::requests::AnnounceQuery; @@ -6,26 +8,31 @@ use super::requests::AnnounceQuery; /// HTTP Tracker Client pub struct Client { connection_info: ConnectionInfo, + reqwest_client: ReqwestClient, } impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { - Self { connection_info } + Self { + connection_info, + reqwest_client: reqwest::Client::builder().build().unwrap(), + } + } + + /// Creates the new client binding it to an specific local address + pub fn bind(connection_info: ConnectionInfo, local_address: IpAddr) -> Self { + Self { + connection_info, + reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), + } } pub async fn announce(&self, query: &AnnounceQuery) -> Response { - let path_with_query = format!("announce?{query}"); - self.get(&path_with_query).await + self.get(&format!("announce?{query}")).await } pub async fn get(&self, path: &str) -> Response { - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .send() - .await - .unwrap() + self.reqwest_client.get(self.base_url(path)).send().await.unwrap() } fn base_url(&self, path: &str) -> String { diff --git a/tests/http/requests.rs b/tests/http/requests.rs index e24103b79..9135020e9 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -119,6 +119,11 @@ impl AnnounceQueryBuilder { self } + pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { + self.announce_query.peer_addr = *peer_addr; + self + } + pub fn without_compact(mut self) -> Self { self.announce_query.compact = None; self diff --git a/tests/http/server.rs b/tests/http/server.rs index 32d02b060..0eb672e58 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -1,4 +1,5 @@ use core::panic; +use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; @@ -10,10 +11,30 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; +/// Starts a HTTP tracker with mode "public" pub async fn start_public_http_tracker() -> Server { start_default_http_tracker().await } +/// Starts a HTTP tracker with a wildcard IPV6 address. +/// The configuration in the `config.toml` file would be like this: +/// +/// ```text +/// [[http_trackers]] +/// bind_address = "[::]:7070" +/// ``` +pub async fn start_ipv6_http_tracker() -> Server { + let mut configuration = ephemeral_configuration(); + + // Change socket address to "wildcard address" (unspecified address which means any IP address) + // but keeping the random port generated with the ephemeral configuration. + let socket_addr: SocketAddr = configuration.http_trackers[0].bind_address.parse().unwrap(); + let new_ipv6_socket_address = format!("[::]:{}", socket_addr.port()); + configuration.http_trackers[0].bind_address = new_ipv6_socket_address; + + start_custom_http_tracker(Arc::new(configuration)).await +} + pub async fn start_default_http_tracker() -> Server { let configuration = tracker_configuration(); start_custom_http_tracker(configuration.clone()).await diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 26c378b44..c29c6af6f 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -290,6 +290,7 @@ mod http_tracker_server { mod configured_as_public { mod receiving_an_announce_request { + use std::net::{IpAddr, Ipv6Addr}; use std::str::FromStr; use reqwest::Response; @@ -305,7 +306,7 @@ mod http_tracker_server { use crate::http::responses::{ Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, }; - use crate::http::server::start_public_http_tracker; + use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { @@ -471,6 +472,96 @@ mod http_tracker_server { let compact_announce = serde_bencode::from_bytes::(&bytes); compact_announce.is_ok() } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker().await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker().await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker().await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker().await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker().await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker().await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + } } } } From 5cc2ac1595c802e9f25d2d98085cedc097f69be4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 12:00:36 +0000 Subject: [PATCH 0340/1003] refactor(http): [#159] add dependency local-ip-address We need it to get the address of the HTTP client we use in tests. --- Cargo.lock | 23 +++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 24 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index e27eace74..8347362ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1268,6 +1268,18 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "local-ip-address" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faa9d02443a1741e9f51dafdfcbffb3863b2a89c457d762b40337d6c5153ef81" +dependencies = [ + "libc", + "neli", + "thiserror", + "windows-sys 0.42.0", +] + [[package]] name = "lock_api" version = "0.4.9" @@ -1495,6 +1507,16 @@ dependencies = [ "tempfile", ] +[[package]] +name = "neli" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9053554eb5dcb7e10d9cdab1206965bde870eed5d0d341532ca035e3ba221508" +dependencies = [ + "byteorder", + "libc", +] + [[package]] name = "nix" version = "0.23.1" @@ -2819,6 +2841,7 @@ dependencies = [ "futures", "hex", "lazy_static", + "local-ip-address", "log", "mockall", "openssl", diff --git a/Cargo.toml b/Cargo.toml index 9afbc16a8..cf90da8f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,3 +68,4 @@ reqwest = { version = "0.11.13", features = ["json"] } serde_urlencoded = "0.7.1" serde_repr = "0.1.10" serde_bytes = "0.11.8" +local-ip-address = "0.5.1" From 452b81abb546b7cbf53ab643c936b8a0a365e27e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 12:03:09 +0000 Subject: [PATCH 0341/1003] test(http): [#159] add tests for assigning IP to peers in announce request --- tests/http/client.rs | 13 +++ tests/http/server.rs | 26 ++++- tests/http_tracker.rs | 216 ++++++++++++++++++++++++++++++++++-------- 3 files changed, 213 insertions(+), 42 deletions(-) diff --git a/tests/http/client.rs b/tests/http/client.rs index df9e012f0..d66ec2a00 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -31,10 +31,23 @@ impl Client { self.get(&format!("announce?{query}")).await } + pub async fn announce_with_header(&self, query: &AnnounceQuery, key: &str, value: &str) -> Response { + self.get_with_header(&format!("announce?{query}"), key, value).await + } + pub async fn get(&self, path: &str) -> Response { self.reqwest_client.get(self.base_url(path)).send().await.unwrap() } + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { + self.reqwest_client + .get(self.base_url(path)) + .header(key, value) + .send() + .await + .unwrap() + } + fn base_url(&self, path: &str) -> String { format!("http://{}/{path}", &self.connection_info.bind_address) } diff --git a/tests/http/server.rs b/tests/http/server.rs index 0eb672e58..506bf75e7 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -1,5 +1,5 @@ use core::panic; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; @@ -35,6 +35,30 @@ pub async fn start_ipv6_http_tracker() -> Server { start_custom_http_tracker(Arc::new(configuration)).await } +/// Starts a HTTP tracker with an specific `external_ip`. +/// The configuration in the `config.toml` file would be like this: +/// +/// ```text +/// external_ip = "2.137.87.41" +/// ``` +pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr) -> Server { + let mut configuration = ephemeral_configuration(); + configuration.external_ip = Some(external_ip.to_string()); + start_custom_http_tracker(Arc::new(configuration)).await +} + +/// Starts a HTTP tracker `on_reverse_proxy`. +/// The configuration in the `config.toml` file would be like this: +/// +/// ```text +/// on_reverse_proxy = true +/// ``` +pub async fn start_http_tracker_on_reverse_proxy() -> Server { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = true; + start_custom_http_tracker(Arc::new(configuration)).await +} + pub async fn start_default_http_tracker() -> Server { let configuration = tracker_configuration(); start_custom_http_tracker(configuration.clone()).await diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index c29c6af6f..2a1a6004b 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -9,14 +9,29 @@ mod http_tracker_server { mod for_all_config_modes { mod receiving_an_announce_request { - use crate::common::fixtures::invalid_info_hashes; + use std::net::{IpAddr, Ipv6Addr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ + assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, assert_internal_server_error_response, assert_invalid_info_hash_error_response, assert_invalid_peer_id_error_response, assert_is_announce_response, }; use crate::http::client::Client; - use crate::http::requests::AnnounceQueryBuilder; - use crate::http::server::start_default_http_tracker; + use crate::http::requests::{AnnounceQueryBuilder, Compact}; + use crate::http::responses::{ + Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, + }; + use crate::http::server::{ + start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, + start_ipv6_http_tracker, start_public_http_tracker, + }; #[tokio::test] async fn should_respond_when_only_the_mandatory_fields_are_provided() { @@ -269,44 +284,6 @@ mod http_tracker_server { assert_internal_server_error_response(response).await; } } - } - - mod receiving_an_scrape_request { - use crate::http::asserts::assert_internal_server_error_response; - use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; - - #[tokio::test] - async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; - - let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; - - assert_internal_server_error_response(response).await; - } - } - } - - mod configured_as_public { - - mod receiving_an_announce_request { - use std::net::{IpAddr, Ipv6Addr}; - use std::str::FromStr; - - use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::{ - assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, - }; - use crate::http::client::Client; - use crate::http::requests::{AnnounceQueryBuilder, Compact}; - use crate::http::responses::{ - Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, - }; - use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { @@ -562,6 +539,163 @@ mod http_tracker_server { assert_eq!(stats.tcp6_announces_handled, 0); } + + #[tokio::test] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + + let http_tracker_server = start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let http_tracker_server = + start_http_tracker_with_external_ip(&IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let http_tracker_server = start_http_tracker_on_reverse_proxy().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let client = Client::new(http_tracker_server.get_connection_info()); + + let announce_query = AnnounceQueryBuilder::default().with_info_hash(&info_hash).query(); + + // todo: shouldn't be the the leftmost IP address? + // THe application is taken the the rightmost IP address. See function http::filters::peer_addr + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + } } + + mod receiving_an_scrape_request { + use crate::http::asserts::assert_internal_server_error_response; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; + + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; + + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + + assert_internal_server_error_response(response).await; + } + } + } + + mod configured_as_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} + } + + mod configured_as_private { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} + } + + mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} } } From 86155d6b337c8421e1b6d439fe4befc0a5cc6a63 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 12:29:58 +0000 Subject: [PATCH 0342/1003] refactor(http): improve readability --- tests/http_tracker.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 2a1a6004b..9cd43a155 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -25,9 +25,7 @@ mod http_tracker_server { }; use crate::http::client::Client; use crate::http::requests::{AnnounceQueryBuilder, Compact}; - use crate::http::responses::{ - Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, - }; + use crate::http::responses::{self, Announce, CompactAnnounce, CompactPeer, CompactPeerList, DictionaryPeer}; use crate::http::server::{ start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, start_ipv6_http_tracker, start_public_http_tracker, @@ -400,7 +398,7 @@ mod http_tracker_server { ) .await; - let expected_response = DecodedCompactAnnounce { + let expected_response = responses::DecodedCompactAnnounce { complete: 2, incomplete: 0, interval: 120, From 11492a32b666747194153f74e7ecc646d429953a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 13:12:09 +0000 Subject: [PATCH 0343/1003] test(http): [#159] add tests for announce request in whitelisted mode --- tests/http/asserts.rs | 15 +++++++++++++++ tests/http/server.rs | 12 +++++++++++- tests/http_tracker.rs | 43 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 68 insertions(+), 2 deletions(-) diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index b5d84b0a1..60a6a2013 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -91,3 +91,18 @@ pub async fn assert_invalid_peer_id_error_response(response: Response) { }; assert_eq!(error_response, expected_error_response); } + +pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'torrent not on whitelist' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "torrent not on whitelist".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} diff --git a/tests/http/server.rs b/tests/http/server.rs index 506bf75e7..5cd1fec19 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::jobs::http_tracker; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::mode::Mode; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -13,7 +14,16 @@ use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" pub async fn start_public_http_tracker() -> Server { - start_default_http_tracker().await + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Public; + start_custom_http_tracker(Arc::new(configuration)).await +} + +/// Starts a HTTP tracker with mode "listed" +pub async fn start_whitelisted_http_tracker() -> Server { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Listed; + start_custom_http_tracker(Arc::new(configuration)).await } /// Starts a HTTP tracker with a wildcard IPV6 address. diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 9cd43a155..05a2dfba1 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -678,7 +678,48 @@ mod http_tracker_server { mod configured_as_whitelisted { - mod and_receiving_an_announce_request {} + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::http::client::Client; + use crate::http::requests::AnnounceQueryBuilder; + use crate::http::server::start_whitelisted_http_tracker; + + #[tokio::test] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let http_tracker_server = start_whitelisted_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + } + + #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { + let http_tracker_server = start_whitelisted_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker_server + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + } + } mod receiving_an_scrape_request {} } From badb7912091efde29da415c5527b6571c960e29f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 18:05:48 +0000 Subject: [PATCH 0344/1003] test(http): [#159] add tests for announce request in private mode --- src/tracker/auth.rs | 10 ++++++ tests/http/asserts.rs | 30 ++++++++++++++++++ tests/http/client.rs | 51 +++++++++++++++++++++++++---- tests/http/connection_info.rs | 6 ++-- tests/http/server.rs | 7 ++++ tests/http_tracker.rs | 60 ++++++++++++++++++++++++++++++++++- 6 files changed, 153 insertions(+), 11 deletions(-) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index c4062bc68..3b8af96a1 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -78,6 +78,16 @@ impl Key { None } } + + /// # Panics + /// + /// Will fail if the key id is not a valid key id. + #[must_use] + pub fn id(&self) -> KeyId { + // todo: replace the type of field `key` with type `KeyId`. + // The constructor should fail if an invalid KeyId is provided. + KeyId::from_str(&self.key).unwrap() + } } #[derive(Debug, Display, PartialEq, Clone)] diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 60a6a2013..cf4683a7b 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -106,3 +106,33 @@ pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) }; assert_eq!(error_response, expected_error_response); } + +pub async fn assert_peer_not_authenticated_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'peer not authenticated' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "peer not authenticated".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} + +pub async fn assert_invalid_authentication_key_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'invalid authentication key' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "invalid authentication key".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} diff --git a/tests/http/client.rs b/tests/http/client.rs index d66ec2a00..062484e83 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -1,6 +1,7 @@ use std::net::IpAddr; use reqwest::{Client as ReqwestClient, Response}; +use torrust_tracker::tracker::auth::KeyId; use super::connection_info::ConnectionInfo; use super::requests::AnnounceQuery; @@ -9,13 +10,23 @@ use super::requests::AnnounceQuery; pub struct Client { connection_info: ConnectionInfo, reqwest_client: ReqwestClient, + key_id: Option, } +/// URL components in this context: +/// +/// ```text +/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// \_____________________/\_______________/ \__________________________________________________________/ +/// | | | +/// base url path query +/// ``` impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info, reqwest_client: reqwest::Client::builder().build().unwrap(), + key_id: None, } } @@ -24,31 +35,57 @@ impl Client { Self { connection_info, reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), + key_id: None, + } + } + + pub fn authenticated(connection_info: ConnectionInfo, key_id: KeyId) -> Self { + Self { + connection_info, + reqwest_client: reqwest::Client::builder().build().unwrap(), + key_id: Some(key_id), } } pub async fn announce(&self, query: &AnnounceQuery) -> Response { - self.get(&format!("announce?{query}")).await + self.get(&self.build_announce_path_and_query(query)).await } - pub async fn announce_with_header(&self, query: &AnnounceQuery, key: &str, value: &str) -> Response { - self.get_with_header(&format!("announce?{query}"), key, value).await + pub async fn announce_with_header(&self, query: &AnnounceQuery, key_id: &str, value: &str) -> Response { + self.get_with_header(&self.build_announce_path_and_query(query), key_id, value) + .await } pub async fn get(&self, path: &str) -> Response { - self.reqwest_client.get(self.base_url(path)).send().await.unwrap() + self.reqwest_client.get(self.build_url(path)).send().await.unwrap() } pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { self.reqwest_client - .get(self.base_url(path)) + .get(self.build_url(path)) .header(key, value) .send() .await .unwrap() } - fn base_url(&self, path: &str) -> String { - format!("http://{}/{path}", &self.connection_info.bind_address) + fn build_announce_path_and_query(&self, query: &AnnounceQuery) -> String { + format!("{}?{query}", self.build_path("announce")) + } + + fn build_path(&self, path: &str) -> String { + match &self.key_id { + Some(key_id) => format!("{path}/{key_id}"), + None => path.to_string(), + } + } + + fn build_url(&self, path: &str) -> String { + let base_url = self.base_url(); + format!("{base_url}{path}") + } + + fn base_url(&self) -> String { + format!("http://{}/", &self.connection_info.bind_address) } } diff --git a/tests/http/connection_info.rs b/tests/http/connection_info.rs index debf931e4..fb1dbf64e 100644 --- a/tests/http/connection_info.rs +++ b/tests/http/connection_info.rs @@ -1,16 +1,16 @@ -use torrust_tracker::tracker::auth::Key; +use torrust_tracker::tracker::auth::KeyId; #[derive(Clone, Debug)] pub struct ConnectionInfo { pub bind_address: String, - pub aut_key: Option, + pub key_id: Option, } impl ConnectionInfo { pub fn anonymous(bind_address: &str) -> Self { Self { bind_address: bind_address.to_string(), - aut_key: None, + key_id: None, } } } diff --git a/tests/http/server.rs b/tests/http/server.rs index 5cd1fec19..6741d8e97 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -26,6 +26,13 @@ pub async fn start_whitelisted_http_tracker() -> Server { start_custom_http_tracker(Arc::new(configuration)).await } +/// Starts a HTTP tracker with mode "listed" +pub async fn start_private_http_tracker() -> Server { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Private; + start_custom_http_tracker(Arc::new(configuration)).await +} + /// Starts a HTTP tracker with a wildcard IPV6 address. /// The configuration in the `config.toml` file would be like this: /// diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 05a2dfba1..65f42f415 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -726,7 +726,65 @@ mod http_tracker_server { mod configured_as_private { - mod and_receiving_an_announce_request {} + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + + use crate::http::asserts::{ + assert_invalid_authentication_key_error_response, assert_is_announce_response, + assert_peer_not_authenticated_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::AnnounceQueryBuilder; + use crate::http::server::start_private_http_tracker; + + #[tokio::test] + async fn should_respond_to_peers_providing_a_valid_authentication_key() { + let http_tracker_server = start_private_http_tracker().await; + + let key = http_tracker_server + .tracker + .generate_auth_key(Duration::from_secs(60)) + .await + .unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let http_tracker_server = start_private_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_peer_not_authenticated_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_authentication_key_is_not_valid() { + let http_tracker_server = start_private_http_tracker().await; + + // The tracker does not have this key + let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + assert_invalid_authentication_key_error_response(response).await; + } + } mod receiving_an_scrape_request {} } From e1765f315b5e2f4d3f30ed3678865e3dd4b6ceda Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 27 Jan 2023 16:25:52 +0000 Subject: [PATCH 0345/1003] fix: new clippy errors --- src/apis/middlewares/auth.rs | 5 +---- src/apis/responses.rs | 3 +-- src/tracker/services/torrent.rs | 9 +++------ tests/api/asserts.rs | 5 +---- 4 files changed, 6 insertions(+), 16 deletions(-) diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs index e54311d33..758ba1cda 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/middlewares/auth.rs @@ -25,10 +25,7 @@ pub async fn auth( where B: Send, { - let token = match params.token { - None => return AuthError::Unauthorized.into_response(), - Some(token) => token, - }; + let Some(token) = params.token else { return AuthError::Unauthorized.into_response() }; if !authenticate(&token, &config.http_api) { return AuthError::TokenNotValid.into_response(); diff --git a/src/apis/responses.rs b/src/apis/responses.rs index b3d4cbd59..b150b4bff 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -86,8 +86,7 @@ pub fn ok_response() -> Response { #[must_use] pub fn invalid_info_hash_param_response(info_hash: &str) -> Response { bad_request_response(&format!( - "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", - info_hash + "Invalid URL: invalid infohash param: string \"{info_hash}\", expected a 40 character long string" )) } diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index a08fd54d1..ba66d15f4 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -74,12 +74,9 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let torrent_entry_option = db.get(info_hash); - let torrent_entry = match torrent_entry_option { - Some(torrent_entry) => torrent_entry, - None => { - return None; - } - }; + let Some(torrent_entry) = torrent_entry_option else { + return None; + }; let (seeders, completed, leechers) = torrent_entry.get_stats(); diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 07383f795..5f9d39705 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -66,10 +66,7 @@ pub async fn assert_torrent_not_known(response: Response) { pub async fn assert_invalid_infohash_param(response: Response, invalid_infohash: &str) { assert_bad_request( response, - &format!( - "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", - invalid_infohash - ), + &format!("Invalid URL: invalid infohash param: string \"{invalid_infohash}\", expected a 40 character long string"), ) .await; } From fcd60e221e87b2d43f3422227154b6e5825c0c73 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 27 Jan 2023 18:50:41 +0000 Subject: [PATCH 0346/1003] fix(http): Display impl for tracker::peer:ID ``` peer::Id(*b"-qB00000000000000000").to_string() ``` was always returning an empty string. It has been changed to return the Hex representations of the byte array. --- src/apis/resources/peer.rs | 2 +- src/tracker/peer.rs | 56 +++++++++++++++++++++++------- tests/http_tracker.rs | 70 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+), 13 deletions(-) diff --git a/src/apis/resources/peer.rs b/src/apis/resources/peer.rs index ff84be197..5284d26f6 100644 --- a/src/apis/resources/peer.rs +++ b/src/apis/resources/peer.rs @@ -24,7 +24,7 @@ pub struct Id { impl From for Id { fn from(peer_id: tracker::peer::Id) -> Self { Id { - id: peer_id.get_id(), + id: peer_id.to_hex_string(), client: peer_id.get_client_name().map(std::string::ToString::to_string), } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index dc362c5bd..5da894f54 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -93,10 +93,8 @@ pub struct Id(pub [u8; 20]); impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut buffer = [0u8; 20]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - match bytes_out { - Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), + match self.to_hex_string() { + Some(hex) => write!(f, "{hex}"), None => write!(f, ""), } } @@ -104,14 +102,36 @@ impl std::fmt::Display for Id { impl Id { #[must_use] + /// Converts to hex string. + /// + /// For the Id `-qB00000000000000000` ti returns `2d71423030303030303030303030303030303030` + /// + /// For example: + /// + ///```text + /// Bytes = Hex + /// -qB00000000000000000 = 2d71423030303030303030303030303030303030 + /// -qB00000000000000000 = 2d 71 42 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 + /// + /// ------------- + /// |Char | Hex | + /// ------------- + /// | - | 2D | + /// | q | 71 | + /// | B | 42 | + /// | 0 | 30 | + /// ------------- + /// ``` + /// + /// Return `None` is some of the bytes are invalid UTF8 values. + /// /// # Panics /// /// It will panic if the `binascii::bin2hex` from a too-small output buffer. - pub fn get_id(&self) -> Option { + pub fn to_hex_string(&self) -> Option { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; binascii::bin2hex(&self.0, &mut tmp).unwrap(); - std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) } @@ -206,7 +226,7 @@ impl Serialize for Id { } let obj = PeerIdInfo { - id: self.get_id(), + id: self.to_hex_string(), client: self.get_client_name(), }; obj.serialize(serializer) @@ -220,13 +240,25 @@ mod test { use crate::tracker::peer; #[test] - fn should_be_converted_into_string() { - // todo: it seems it's not working + fn should_be_converted_to_hex_string() { + let id = peer::Id(*b"-qB00000000000000000"); + assert_eq!(id.to_hex_string().unwrap(), "2d71423030303030303030303030303030303030"); + + let id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + assert_eq!(id.to_hex_string().unwrap(), "009f9296009f9296009f9296009f9296009f9296"); + } + + #[test] + fn should_be_converted_into_string_type_using_the_hex_string_format() { let id = peer::Id(*b"-qB00000000000000000"); - assert_eq!(id.to_string(), ""); + assert_eq!(id.to_string(), "2d71423030303030303030303030303030303030"); - let id = peer::Id(*b"-qB00000000000000001"); - assert_eq!(id.to_string(), ""); + let id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + assert_eq!(id.to_string(), "009f9296009f9296009f9296009f9296009f9296"); } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 65f42f415..cec0e4f88 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -5,6 +5,19 @@ mod common; mod http; mod http_tracker_server { + use std::str::FromStr; + + use percent_encoding::NON_ALPHANUMERIC; + use torrust_tracker::protocol::info_hash::InfoHash; + + #[test] + fn calculate_info_hash_param() { + let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let param = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); + + assert_eq!(param, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); + } mod for_all_config_modes { @@ -796,3 +809,60 @@ mod http_tracker_server { mod receiving_an_scrape_request {} } } + +mod percent_encoding { + // todo: these operations are used in the HTTP tracker but they have not been extracted into independent functions. + // These tests document the operations. This behavior could be move to some functions int he future if they are extracted. + + use std::str::FromStr; + + use percent_encoding::NON_ALPHANUMERIC; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + #[test] + fn how_to_encode_an_info_hash() { + let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let encoded_info_hash = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); + + assert_eq!(encoded_info_hash, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); + } + + #[test] + fn how_to_decode_an_info_hash() { + let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + + let info_hash_bytes = percent_encoding::percent_decode_str(encoded_infohash).collect::>(); + let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)).unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() + ); + } + + #[test] + fn how_to_encode_a_peer_id() { + let peer_id = peer::Id(*b"-qB00000000000000000"); + + let encoded_peer_id = percent_encoding::percent_encode(&peer_id.0, NON_ALPHANUMERIC).to_string(); + + assert_eq!(encoded_peer_id, "%2DqB00000000000000000"); + } + + #[test] + fn how_to_decode_a_peer_id() { + let encoded_peer_id = "%2DqB00000000000000000"; + + let bytes_vec = percent_encoding::percent_decode_str(encoded_peer_id).collect::>(); + + // Clone peer_id_bytes into fixed length array + let mut peer_id_bytes: [u8; 20] = Default::default(); + peer_id_bytes.clone_from_slice(bytes_vec.as_slice()); + + let peer_id = peer::Id(peer_id_bytes); + + assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); + } +} From 953a1000d176ba8d28d7b6e3cd26b1dc723ca5a7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 11:08:36 +0000 Subject: [PATCH 0347/1003] docs(http): [#159] add links to info about scrape requests --- cSpell.json | 1 + tests/http_tracker.rs | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/cSpell.json b/cSpell.json index 0d0b73701..5d0a6e1f1 100644 --- a/cSpell.json +++ b/cSpell.json @@ -70,6 +70,7 @@ "untuple", "uroot", "Vagaa", + "Vuze", "Xtorrent", "Xunlei" ] diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index cec0e4f88..91c48c09c 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -674,14 +674,22 @@ mod http_tracker_server { } mod receiving_an_scrape_request { + + // Scrape specification: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + use crate::http::asserts::assert_internal_server_error_response; use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; + use crate::http::server::start_public_http_tracker; #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; - + let http_tracker_server = start_public_http_tracker().await; let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; assert_internal_server_error_response(response).await; From d7610eff410fae2ee2d715fc281e0c36cb92886a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 11:52:30 +0000 Subject: [PATCH 0348/1003] refactor(http): [#159] move mods to folders We will use one mod per type of request and response. --- tests/http/asserts.rs | 9 +-- tests/http/client.rs | 8 +- .../{requests.rs => requests/announce.rs} | 30 +++---- tests/http/requests/mod.rs | 1 + .../{responses.rs => responses/announce.rs} | 8 +- tests/http/responses/mod.rs | 1 + tests/http_tracker.rs | 79 ++++++++++--------- 7 files changed, 69 insertions(+), 67 deletions(-) rename tests/http/{requests.rs => requests/announce.rs} (93%) create mode 100644 tests/http/requests/mod.rs rename tests/http/{responses.rs => responses/announce.rs} (94%) create mode 100644 tests/http/responses/mod.rs diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index cf4683a7b..ec31b1ee4 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,7 +1,6 @@ use reqwest::Response; -use super::responses::{Announce, DecodedCompactAnnounce}; -use crate::http::responses::{CompactAnnounce, Error}; +use super::responses::announce::{Announce, Compact, DecodedCompact, Error}; pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); @@ -22,18 +21,18 @@ pub async fn assert_announce_response(response: Response, expected_announce_resp /// ```text /// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" /// ``` -pub async fn assert_compact_announce_response(response: Response, expected_response: &DecodedCompactAnnounce) { +pub async fn assert_compact_announce_response(response: Response, expected_response: &DecodedCompact) { assert_eq!(response.status(), 200); let bytes = response.bytes().await.unwrap(); - let compact_announce: CompactAnnounce = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { + let compact_announce: Compact = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { panic!( "response body should be a valid compact announce response, got \"{:?}\"", &bytes ) }); - let actual_response = DecodedCompactAnnounce::from(compact_announce); + let actual_response = DecodedCompact::from(compact_announce); assert_eq!(actual_response, *expected_response); } diff --git a/tests/http/client.rs b/tests/http/client.rs index 062484e83..2d53463dd 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -4,7 +4,7 @@ use reqwest::{Client as ReqwestClient, Response}; use torrust_tracker::tracker::auth::KeyId; use super::connection_info::ConnectionInfo; -use super::requests::AnnounceQuery; +use super::requests::announce::Query; /// HTTP Tracker Client pub struct Client { @@ -47,11 +47,11 @@ impl Client { } } - pub async fn announce(&self, query: &AnnounceQuery) -> Response { + pub async fn announce(&self, query: &Query) -> Response { self.get(&self.build_announce_path_and_query(query)).await } - pub async fn announce_with_header(&self, query: &AnnounceQuery, key_id: &str, value: &str) -> Response { + pub async fn announce_with_header(&self, query: &Query, key_id: &str, value: &str) -> Response { self.get_with_header(&self.build_announce_path_and_query(query), key_id, value) .await } @@ -69,7 +69,7 @@ impl Client { .unwrap() } - fn build_announce_path_and_query(&self, query: &AnnounceQuery) -> String { + fn build_announce_path_and_query(&self, query: &Query) -> String { format!("{}?{query}", self.build_path("announce")) } diff --git a/tests/http/requests.rs b/tests/http/requests/announce.rs similarity index 93% rename from tests/http/requests.rs rename to tests/http/requests/announce.rs index 9135020e9..8fe43348f 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests/announce.rs @@ -7,7 +7,7 @@ use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; -pub struct AnnounceQuery { +pub struct Query { pub info_hash: ByteArray20, pub peer_addr: IpAddr, pub downloaded: BaseTenASCII, @@ -19,7 +19,7 @@ pub struct AnnounceQuery { pub compact: Option, } -impl fmt::Display for AnnounceQuery { +impl fmt::Display for Query { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.build()) } @@ -30,7 +30,7 @@ impl fmt::Display for AnnounceQuery { /// /// /// Some parameters in the specification are not implemented in this tracker yet. -impl AnnounceQuery { +impl Query { /// It builds the URL query component for the announce request. /// /// This custom URL query params encoding is needed because `reqwest` does not allow @@ -41,8 +41,8 @@ impl AnnounceQuery { self.params().to_string() } - pub fn params(&self) -> AnnounceQueryParams { - AnnounceQueryParams::from(self) + pub fn params(&self) -> QueryParams { + QueryParams::from(self) } } @@ -82,13 +82,13 @@ impl fmt::Display for Compact { } } -pub struct AnnounceQueryBuilder { - announce_query: AnnounceQuery, +pub struct QueryBuilder { + announce_query: Query, } -impl AnnounceQueryBuilder { - pub fn default() -> AnnounceQueryBuilder { - let default_announce_query = AnnounceQuery { +impl QueryBuilder { + pub fn default() -> QueryBuilder { + let default_announce_query = Query { info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, @@ -129,7 +129,7 @@ impl AnnounceQueryBuilder { self } - pub fn query(self) -> AnnounceQuery { + pub fn query(self) -> Query { self.announce_query } } @@ -150,7 +150,7 @@ impl AnnounceQueryBuilder { /// event=completed /// compact=0 /// ``` -pub struct AnnounceQueryParams { +pub struct QueryParams { pub info_hash: Option, pub peer_addr: Option, pub downloaded: Option, @@ -162,7 +162,7 @@ pub struct AnnounceQueryParams { pub compact: Option, } -impl std::fmt::Display for AnnounceQueryParams { +impl std::fmt::Display for QueryParams { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let mut params = vec![]; @@ -204,8 +204,8 @@ impl std::fmt::Display for AnnounceQueryParams { } } -impl AnnounceQueryParams { - pub fn from(announce_query: &AnnounceQuery) -> Self { +impl QueryParams { + pub fn from(announce_query: &Query) -> Self { let event = announce_query.event.as_ref().map(std::string::ToString::to_string); let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); diff --git a/tests/http/requests/mod.rs b/tests/http/requests/mod.rs new file mode 100644 index 000000000..74894de33 --- /dev/null +++ b/tests/http/requests/mod.rs @@ -0,0 +1 @@ +pub mod announce; diff --git a/tests/http/responses.rs b/tests/http/responses/announce.rs similarity index 94% rename from tests/http/responses.rs rename to tests/http/responses/announce.rs index 7cf283916..6bdc82cdd 100644 --- a/tests/http/responses.rs +++ b/tests/http/responses/announce.rs @@ -31,7 +31,7 @@ impl From for DictionaryPeer { } #[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct CompactAnnounce { +pub struct Compact { pub complete: u32, pub incomplete: u32, pub interval: u32, @@ -42,7 +42,7 @@ pub struct CompactAnnounce { } #[derive(Debug, PartialEq)] -pub struct DecodedCompactAnnounce { +pub struct DecodedCompact { // code-review: there could be a way to deserialize this struct directly // by using serde instead of doing it manually. Or at least using a custom deserializer. pub complete: u32, @@ -88,8 +88,8 @@ impl CompactPeer { } } -impl From for DecodedCompactAnnounce { - fn from(compact_announce: CompactAnnounce) -> Self { +impl From for DecodedCompact { + fn from(compact_announce: Compact) -> Self { let mut peers = vec![]; for peer_bytes in compact_announce.peers.chunks_exact(6) { diff --git a/tests/http/responses/mod.rs b/tests/http/responses/mod.rs new file mode 100644 index 000000000..74894de33 --- /dev/null +++ b/tests/http/responses/mod.rs @@ -0,0 +1 @@ +pub mod announce; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 91c48c09c..b315f82c2 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -37,8 +37,9 @@ mod http_tracker_server { assert_invalid_peer_id_error_response, assert_is_announce_response, }; use crate::http::client::Client; - use crate::http::requests::{AnnounceQueryBuilder, Compact}; - use crate::http::responses::{self, Announce, CompactAnnounce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::requests::announce::{Compact, QueryBuilder}; + use crate::http::responses; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; use crate::http::server::{ start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, start_ipv6_http_tracker, start_public_http_tracker, @@ -48,7 +49,7 @@ mod http_tracker_server { async fn should_respond_when_only_the_mandatory_fields_are_provided() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.remove_optional_params(); @@ -74,7 +75,7 @@ mod http_tracker_server { // Without `info_hash` param - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.info_hash = None; @@ -86,7 +87,7 @@ mod http_tracker_server { // Without `peer_id` param - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.peer_id = None; @@ -98,7 +99,7 @@ mod http_tracker_server { // Without `port` param - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.port = None; @@ -113,7 +114,7 @@ mod http_tracker_server { async fn should_fail_when_the_info_hash_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); @@ -135,7 +136,7 @@ mod http_tracker_server { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); @@ -150,7 +151,7 @@ mod http_tracker_server { async fn should_fail_when_the_downloaded_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -169,7 +170,7 @@ mod http_tracker_server { async fn should_fail_when_the_uploaded_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -188,7 +189,7 @@ mod http_tracker_server { async fn should_fail_when_the_peer_id_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = [ "0", @@ -214,7 +215,7 @@ mod http_tracker_server { async fn should_fail_when_the_port_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -233,7 +234,7 @@ mod http_tracker_server { async fn should_fail_when_the_left_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -254,7 +255,7 @@ mod http_tracker_server { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = [ "0", @@ -281,7 +282,7 @@ mod http_tracker_server { async fn should_not_fail_when_the_compact_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -302,7 +303,7 @@ mod http_tracker_server { let response = Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) .query(), ) @@ -338,7 +339,7 @@ mod http_tracker_server { // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .query(), @@ -369,7 +370,7 @@ mod http_tracker_server { // Add a peer http_tracker_server.add_torrent(&info_hash, &peer).await; - let announce_query = AnnounceQueryBuilder::default() + let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer.peer_id) .query(); @@ -403,7 +404,7 @@ mod http_tracker_server { // Announce the new Peer 2 accepting compact responses let response = Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .with_compact(Compact::Accepted) @@ -411,7 +412,7 @@ mod http_tracker_server { ) .await; - let expected_response = responses::DecodedCompactAnnounce { + let expected_response = responses::announce::DecodedCompact { complete: 2, incomplete: 0, interval: 120, @@ -444,7 +445,7 @@ mod http_tracker_server { // https://www.bittorrent.org/beps/bep_0023.html let response = Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .without_compact() @@ -457,7 +458,7 @@ mod http_tracker_server { async fn is_a_compact_announce_response(response: Response) -> bool { let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); + let compact_announce = serde_bencode::from_bytes::(&bytes); compact_announce.is_ok() } @@ -466,7 +467,7 @@ mod http_tracker_server { let http_tracker_server = start_public_http_tracker().await; Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; let stats = http_tracker_server.tracker.get_stats().await; @@ -479,7 +480,7 @@ mod http_tracker_server { let http_tracker_server = start_ipv6_http_tracker().await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; let stats = http_tracker_server.tracker.get_stats().await; @@ -495,7 +496,7 @@ mod http_tracker_server { Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) .query(), ) @@ -511,7 +512,7 @@ mod http_tracker_server { let http_tracker_server = start_public_http_tracker().await; Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; let stats = http_tracker_server.tracker.get_stats().await; @@ -524,7 +525,7 @@ mod http_tracker_server { let http_tracker_server = start_ipv6_http_tracker().await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; let stats = http_tracker_server.tracker.get_stats().await; @@ -540,7 +541,7 @@ mod http_tracker_server { Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) .query(), ) @@ -560,7 +561,7 @@ mod http_tracker_server { let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - let announce_query = AnnounceQueryBuilder::default() + let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); @@ -591,7 +592,7 @@ mod http_tracker_server { let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - let announce_query = AnnounceQueryBuilder::default() + let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); @@ -624,7 +625,7 @@ mod http_tracker_server { let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - let announce_query = AnnounceQueryBuilder::default() + let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); @@ -653,7 +654,7 @@ mod http_tracker_server { let client = Client::new(http_tracker_server.get_connection_info()); - let announce_query = AnnounceQueryBuilder::default().with_info_hash(&info_hash).query(); + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); // todo: shouldn't be the the leftmost IP address? // THe application is taken the the rightmost IP address. See function http::filters::peer_addr @@ -706,7 +707,7 @@ mod http_tracker_server { use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; - use crate::http::requests::AnnounceQueryBuilder; + use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_whitelisted_http_tracker; #[tokio::test] @@ -716,7 +717,7 @@ mod http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_torrent_not_in_whitelist_error_response(response).await; @@ -735,7 +736,7 @@ mod http_tracker_server { .expect("should add the torrent to the whitelist"); let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_is_announce_response(response).await; @@ -759,7 +760,7 @@ mod http_tracker_server { assert_peer_not_authenticated_error_response, }; use crate::http::client::Client; - use crate::http::requests::AnnounceQueryBuilder; + use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_private_http_tracker; #[tokio::test] @@ -773,7 +774,7 @@ mod http_tracker_server { .unwrap(); let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; assert_is_announce_response(response).await; @@ -786,7 +787,7 @@ mod http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_peer_not_authenticated_error_response(response).await; @@ -800,7 +801,7 @@ mod http_tracker_server { let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; assert_invalid_authentication_key_error_response(response).await; From dc304e74542e2237c42f5241754ed272399910a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 19:55:18 +0000 Subject: [PATCH 0349/1003] test(http): [#159] scaffolding to test scrape responses in http tracker --- tests/http/asserts.rs | 3 +- tests/http/bencode.rs | 1 + tests/http/client.rs | 15 ++++- tests/http/mod.rs | 1 + tests/http/requests/announce.rs | 3 +- tests/http/requests/mod.rs | 1 + tests/http/requests/scrape.rs | 108 +++++++++++++++++++++++++++++++ tests/http/responses/announce.rs | 6 -- tests/http/responses/error.rs | 7 ++ tests/http/responses/mod.rs | 2 + tests/http/responses/scrape.rs | 91 ++++++++++++++++++++++++++ tests/http_tracker.rs | 54 +++++++++++++++- 12 files changed, 280 insertions(+), 12 deletions(-) create mode 100644 tests/http/bencode.rs create mode 100644 tests/http/requests/scrape.rs create mode 100644 tests/http/responses/error.rs create mode 100644 tests/http/responses/scrape.rs diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index ec31b1ee4..c75af1f74 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,6 +1,7 @@ use reqwest::Response; -use super::responses::announce::{Announce, Compact, DecodedCompact, Error}; +use super::responses::announce::{Announce, Compact, DecodedCompact}; +use crate::http::responses::error::Error; pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); diff --git a/tests/http/bencode.rs b/tests/http/bencode.rs new file mode 100644 index 000000000..b67b278d7 --- /dev/null +++ b/tests/http/bencode.rs @@ -0,0 +1 @@ +pub type ByteArray20 = [u8; 20]; diff --git a/tests/http/client.rs b/tests/http/client.rs index 2d53463dd..b59cf2ac6 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -4,7 +4,8 @@ use reqwest::{Client as ReqwestClient, Response}; use torrust_tracker::tracker::auth::KeyId; use super::connection_info::ConnectionInfo; -use super::requests::announce::Query; +use super::requests::announce::{self, Query}; +use super::requests::scrape; /// HTTP Tracker Client pub struct Client { @@ -47,10 +48,14 @@ impl Client { } } - pub async fn announce(&self, query: &Query) -> Response { + pub async fn announce(&self, query: &announce::Query) -> Response { self.get(&self.build_announce_path_and_query(query)).await } + pub async fn scrape(&self, query: &scrape::Query) -> Response { + self.get(&self.build_scrape_path_and_query(query)).await + } + pub async fn announce_with_header(&self, query: &Query, key_id: &str, value: &str) -> Response { self.get_with_header(&self.build_announce_path_and_query(query), key_id, value) .await @@ -69,10 +74,14 @@ impl Client { .unwrap() } - fn build_announce_path_and_query(&self, query: &Query) -> String { + fn build_announce_path_and_query(&self, query: &announce::Query) -> String { format!("{}?{query}", self.build_path("announce")) } + fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { + format!("{}?{query}", self.build_path("scrape")) + } + fn build_path(&self, path: &str) -> String { match &self.key_id { Some(key_id) => format!("{path}/{key_id}"), diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 2ab8b2c1c..87087026f 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,4 +1,5 @@ pub mod asserts; +pub mod bencode; pub mod client; pub mod connection_info; pub mod requests; diff --git a/tests/http/requests/announce.rs b/tests/http/requests/announce.rs index 8fe43348f..5656d8f1d 100644 --- a/tests/http/requests/announce.rs +++ b/tests/http/requests/announce.rs @@ -7,6 +7,8 @@ use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; +use crate::http::bencode::ByteArray20; + pub struct Query { pub info_hash: ByteArray20, pub peer_addr: IpAddr, @@ -47,7 +49,6 @@ impl Query { } pub type BaseTenASCII = u64; -pub type ByteArray20 = [u8; 20]; pub type PortNumber = u16; pub enum Event { diff --git a/tests/http/requests/mod.rs b/tests/http/requests/mod.rs index 74894de33..776d2dfbf 100644 --- a/tests/http/requests/mod.rs +++ b/tests/http/requests/mod.rs @@ -1 +1,2 @@ pub mod announce; +pub mod scrape; diff --git a/tests/http/requests/scrape.rs b/tests/http/requests/scrape.rs new file mode 100644 index 000000000..6198f1680 --- /dev/null +++ b/tests/http/requests/scrape.rs @@ -0,0 +1,108 @@ +use std::fmt; +use std::str::FromStr; + +use percent_encoding::NON_ALPHANUMERIC; +use torrust_tracker::protocol::info_hash::InfoHash; + +use crate::http::bencode::ByteArray20; + +pub struct Query { + pub info_hash: Vec, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Scrape Request: +/// +/// +impl Query { + /// It builds the URL query component for the scrape request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + pub fn build(&self) -> String { + self.params().to_string() + } + + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub struct QueryBuilder { + scrape_query: Query, +} + +impl QueryBuilder { + pub fn default() -> QueryBuilder { + let default_scrape_query = Query { + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), + }; + Self { + scrape_query: default_scrape_query, + } + } + + pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash = [info_hash.0].to_vec(); + self + } + + pub fn query(self) -> Query { + self.scrape_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Scrape request. +/// +/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. +/// +/// Sample Scrape URL with all the GET parameters: +/// +/// For `IpV4`: +/// +/// ```text +/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// For `IpV6`: +/// +/// ```text +/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// You can add as many info hashes as you want, just adding the same param again. +pub struct QueryParams { + pub info_hash: Vec, +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .info_hash + .iter() + .map(|info_hash| format!("info_hash={}", &info_hash)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(scrape_query: &Query) -> Self { + let info_hashes = scrape_query + .info_hash + .iter() + .map(|info_hash_bytes| percent_encoding::percent_encode(info_hash_bytes, NON_ALPHANUMERIC).to_string()) + .collect::>(); + + Self { info_hash: info_hashes } + } +} diff --git a/tests/http/responses/announce.rs b/tests/http/responses/announce.rs index 6bdc82cdd..838a0b41c 100644 --- a/tests/http/responses/announce.rs +++ b/tests/http/responses/announce.rs @@ -105,9 +105,3 @@ impl From for DecodedCompact { } } } - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Error { - #[serde(rename = "failure reason")] - pub failure_reason: String, -} diff --git a/tests/http/responses/error.rs b/tests/http/responses/error.rs new file mode 100644 index 000000000..12c53a0cf --- /dev/null +++ b/tests/http/responses/error.rs @@ -0,0 +1,7 @@ +use serde::{self, Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/tests/http/responses/mod.rs b/tests/http/responses/mod.rs index 74894de33..bdc689056 100644 --- a/tests/http/responses/mod.rs +++ b/tests/http/responses/mod.rs @@ -1 +1,3 @@ pub mod announce; +pub mod error; +pub mod scrape; diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs new file mode 100644 index 000000000..450006815 --- /dev/null +++ b/tests/http/responses/scrape.rs @@ -0,0 +1,91 @@ +use std::collections::HashMap; +use std::str; + +use serde::{self, Deserialize, Serialize}; +use serde_bencode::value::Value; + +use crate::http::bencode::ByteArray20; + +#[derive(Debug, PartialEq)] +pub struct Response { + pub files: HashMap, +} + +impl Response { + pub fn from_bytes(bytes: &[u8]) -> Self { + let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); + Self::from(scrape_response) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct File { + pub complete: i64, + pub downloaded: i64, + pub incomplete: i64, +} + +impl From for Response { + fn from(scrape_response: DeserializedResponse) -> Self { + // todo: + // - Use `try_from` trait instead of `from`. + // - Improve error messages. + // - Extract parser function out of the trait. + // - Extract parser for each nested element. + // - Extract function to instantiate [u8; 20] from Vec. + let mut files: HashMap = HashMap::new(); + + match scrape_response.files { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = match &file_value { + Value::Dict(dict) => { + let mut file = File { + complete: 0, + downloaded: 0, + incomplete: 0, + }; + + for file_field in dict { + let value = match file_field.1 { + Value::Int(number) => *number, + _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), + }; + + if file_field.0 == b"complete" { + file.complete = value; + } else if file_field.0 == b"downloaded" { + file.downloaded = value; + } else if file_field.0 == b"incomplete" { + file.incomplete = value; + } else { + panic!("Error parsing bencoded scrape response. Invalid field"); + } + } + + file + } + _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), + }; + + // Clone Vec into [u8; 20] + let mut info_hash_byte_array: [u8; 20] = Default::default(); + info_hash_byte_array.clone_from_slice(info_hash_byte_vec.as_slice()); + + files.insert(info_hash_byte_array, file); + } + } + _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), + } + + Self { files } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct DeserializedResponse { + pub files: Value, +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index b315f82c2..d2272fc31 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -676,7 +676,7 @@ mod http_tracker_server { mod receiving_an_scrape_request { - // Scrape specification: + // Scrape documentation: // // BEP 48. Tracker Protocol Extension: Scrape // https://www.bittorrent.org/beps/bep_0048.html @@ -684,8 +684,17 @@ mod http_tracker_server { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Scrape + use std::collections::HashMap; + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; use crate::http::asserts::assert_internal_server_error_response; use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, Response}; use crate::http::server::start_public_http_tracker; #[tokio::test] @@ -695,6 +704,49 @@ mod http_tracker_server { assert_internal_server_error_response(response).await; } + + #[tokio::test] + async fn should_return_the_scrape_response() { + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .into(); + + // Add the Peer + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Scrape the tracker + let response = Client::new(http_tracker_server.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + // todo: extract scrape response builder or named constructor. + // A builder with an "add_file(info_hash_bytes: &[u8], file: File)" method could be a good solution. + let mut files = HashMap::new(); + files.insert( + info_hash.0, + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ); + let expected_scrape_response = Response { files }; + + // todo: extract assert + assert_eq!(response.status(), 200); + let bytes = response.bytes().await.unwrap(); + let scrape_response = Response::from_bytes(&bytes); + assert_eq!(scrape_response, expected_scrape_response); + } } } From 2754189ed63dc715712279b4442f4922ce403271 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 20:27:58 +0000 Subject: [PATCH 0350/1003] refactor(http): [#159] rename struct in announce responses to follow new scrape conventions Deserialization from bencoded bytes for announce and scrape request is done in two phases. First using `serde_bencode::from_bytes` and later with a custom parser. The reason is the `serde_bencode` crate does not allow direct deserialization for the strcuts we need. The strcut resulting from the first deserialization done by `serde_bencode` is the `DeserializedCompact` and the second one just `Compact`. So the prefix `Deserialized` is used when the bytes in the reponse body are converted into a struct. --- src/protocol/info_hash.rs | 8 ++++++++ tests/http/asserts.rs | 10 ++++++---- tests/http/responses/announce.rs | 8 ++++---- tests/http_tracker.rs | 6 +++--- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 3d2fad1a5..83a595c1f 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -1,6 +1,14 @@ #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); +impl InfoHash { + /// For readability, when accessing the bytes array + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } +} + impl std::fmt::Display for InfoHash { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let mut chars = [0u8; 40]; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index c75af1f74..4e2214317 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,6 +1,6 @@ use reqwest::Response; -use super::responses::announce::{Announce, Compact, DecodedCompact}; +use super::responses::announce::{Announce, Compact, DeserializedCompact}; use crate::http::responses::error::Error; pub async fn assert_empty_announce_response(response: Response) { @@ -22,18 +22,20 @@ pub async fn assert_announce_response(response: Response, expected_announce_resp /// ```text /// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" /// ``` -pub async fn assert_compact_announce_response(response: Response, expected_response: &DecodedCompact) { +pub async fn assert_compact_announce_response(response: Response, expected_response: &Compact) { assert_eq!(response.status(), 200); let bytes = response.bytes().await.unwrap(); - let compact_announce: Compact = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { + // todo: move to DeserializedCompact constructor and make DeserializedCompact struct private + let compact_announce: DeserializedCompact = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { panic!( "response body should be a valid compact announce response, got \"{:?}\"", &bytes ) }); - let actual_response = DecodedCompact::from(compact_announce); + + let actual_response = Compact::from(compact_announce); assert_eq!(actual_response, *expected_response); } diff --git a/tests/http/responses/announce.rs b/tests/http/responses/announce.rs index 838a0b41c..85f0347cc 100644 --- a/tests/http/responses/announce.rs +++ b/tests/http/responses/announce.rs @@ -31,7 +31,7 @@ impl From for DictionaryPeer { } #[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Compact { +pub struct DeserializedCompact { pub complete: u32, pub incomplete: u32, pub interval: u32, @@ -42,7 +42,7 @@ pub struct Compact { } #[derive(Debug, PartialEq)] -pub struct DecodedCompact { +pub struct Compact { // code-review: there could be a way to deserialize this struct directly // by using serde instead of doing it manually. Or at least using a custom deserializer. pub complete: u32, @@ -88,8 +88,8 @@ impl CompactPeer { } } -impl From for DecodedCompact { - fn from(compact_announce: Compact) -> Self { +impl From for Compact { + fn from(compact_announce: DeserializedCompact) -> Self { let mut peers = vec![]; for peer_bytes in compact_announce.peers.chunks_exact(6) { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index d2272fc31..3e1391c63 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -412,7 +412,7 @@ mod http_tracker_server { ) .await; - let expected_response = responses::announce::DecodedCompact { + let expected_response = responses::announce::Compact { complete: 2, incomplete: 0, interval: 120, @@ -458,7 +458,7 @@ mod http_tracker_server { async fn is_a_compact_announce_response(response: Response) -> bool { let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); + let compact_announce = serde_bencode::from_bytes::(&bytes); compact_announce.is_ok() } @@ -732,7 +732,7 @@ mod http_tracker_server { // A builder with an "add_file(info_hash_bytes: &[u8], file: File)" method could be a good solution. let mut files = HashMap::new(); files.insert( - info_hash.0, + info_hash.bytes(), File { complete: 1, downloaded: 0, From c89a1f38ff96bacc4830c71824f2c7e2871ac838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 20:51:04 +0000 Subject: [PATCH 0351/1003] fix(http): [#159] minor text fixes --- src/tracker/peer.rs | 2 +- tests/http/server.rs | 6 +++--- tests/http_tracker.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 5da894f54..22889381f 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -104,7 +104,7 @@ impl Id { #[must_use] /// Converts to hex string. /// - /// For the Id `-qB00000000000000000` ti returns `2d71423030303030303030303030303030303030` + /// For the Id `-qB00000000000000000` it returns `2d71423030303030303030303030303030303030` /// /// For example: /// diff --git a/tests/http/server.rs b/tests/http/server.rs index 6741d8e97..e48ecd88d 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -12,21 +12,21 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; -/// Starts a HTTP tracker with mode "public" +/// Starts a HTTP tracker with mode "public" in settings pub async fn start_public_http_tracker() -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Public; start_custom_http_tracker(Arc::new(configuration)).await } -/// Starts a HTTP tracker with mode "listed" +/// Starts a HTTP tracker with mode "listed" in settings pub async fn start_whitelisted_http_tracker() -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Listed; start_custom_http_tracker(Arc::new(configuration)).await } -/// Starts a HTTP tracker with mode "listed" +/// Starts a HTTP tracker with mode "private" in settings pub async fn start_private_http_tracker() -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Private; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 3e1391c63..099e1d360 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -46,7 +46,7 @@ mod http_tracker_server { }; #[tokio::test] - async fn should_respond_when_only_the_mandatory_fields_are_provided() { + async fn should_respond_if_only_the_mandatory_fields_are_provided() { let http_tracker_server = start_default_http_tracker().await; let mut params = QueryBuilder::default().query().params(); @@ -251,7 +251,7 @@ mod http_tracker_server { #[tokio::test] async fn should_not_fail_when_the_event_param_is_invalid() { - // All invalid values are ignored as if the `event` param was empty + // All invalid values are ignored as if the `event` param were empty let http_tracker_server = start_default_http_tracker().await; From 7ee588a565e8746f6195adfea890ee78a7e96dd7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 31 Jan 2023 18:20:27 +0000 Subject: [PATCH 0352/1003] refactor(test): [#159] refactor tests for scrape request --- cSpell.json | 4 +- src/tracker/peer.rs | 2 +- tests/common/fixtures.rs | 12 ++ tests/http/asserts.rs | 16 ++- tests/http/bencode.rs | 14 ++ tests/http/requests/announce.rs | 2 +- tests/http/responses/scrape.rs | 223 +++++++++++++++++++++++--------- tests/http_tracker.rs | 76 +++++------ 8 files changed, 235 insertions(+), 114 deletions(-) diff --git a/cSpell.json b/cSpell.json index 5d0a6e1f1..dc51c87c5 100644 --- a/cSpell.json +++ b/cSpell.json @@ -72,6 +72,8 @@ "Vagaa", "Vuze", "Xtorrent", - "Xunlei" + "Xunlei", + "xxxxxxxxxxxxxxxxxxxxd", + "yyyyyyyyyyyyyyyyyyyyd" ] } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 22889381f..3f639f970 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -20,7 +20,7 @@ pub struct Peer { #[serde(with = "NumberOfBytesDef")] pub downloaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] - pub left: NumberOfBytes, + pub left: NumberOfBytes, // The number of bytes this peer still has to download #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 0ff6798f6..5e644c45f 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -21,6 +21,18 @@ impl PeerBuilder { self } + #[allow(dead_code)] + pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + #[allow(dead_code)] + pub fn build(self) -> Peer { + self.into() + } + + #[allow(dead_code)] pub fn into(self) -> Peer { self.peer } diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 4e2214317..b8ccfee22 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,6 +1,7 @@ use reqwest::Response; use super::responses::announce::{Announce, Compact, DeserializedCompact}; +use super::responses::scrape; use crate::http::responses::error::Error; pub async fn assert_empty_announce_response(response: Response) { @@ -17,7 +18,7 @@ pub async fn assert_announce_response(response: Response, expected_announce_resp assert_eq!(announce_response, *expected_announce_response); } -/// Sample bencoded response as byte array: +/// Sample bencoded announce response as byte array: /// /// ```text /// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" @@ -40,6 +41,19 @@ pub async fn assert_compact_announce_response(response: Response, expected_respo assert_eq!(actual_response, *expected_response); } +/// Sample bencoded scrape response as byte array: +/// +/// ```text +/// b"d5:filesd20:\x9c8B\"\x13\xe3\x0b\xff!+0\xc3`\xd2o\x9a\x02\x13d\"d8:completei1e10:downloadedi0e10:incompletei0eeee" +/// ``` +pub async fn assert_scrape_response(response: Response, expected_response: &scrape::Response) { + assert_eq!(response.status(), 200); + + let scrape_response = scrape::Response::try_from_bytes(&response.bytes().await.unwrap()).unwrap(); + + assert_eq!(scrape_response, *expected_response); +} + pub async fn assert_is_announce_response(response: Response) { assert_eq!(response.status(), 200); let body = response.text().await.unwrap(); diff --git a/tests/http/bencode.rs b/tests/http/bencode.rs index b67b278d7..d107089cf 100644 --- a/tests/http/bencode.rs +++ b/tests/http/bencode.rs @@ -1 +1,15 @@ pub type ByteArray20 = [u8; 20]; + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/tests/http/requests/announce.rs b/tests/http/requests/announce.rs index 5656d8f1d..a8ebc95f8 100644 --- a/tests/http/requests/announce.rs +++ b/tests/http/requests/announce.rs @@ -225,7 +225,7 @@ impl QueryParams { pub fn remove_optional_params(&mut self) { // todo: make them optional with the Option<...> in the AnnounceQuery struct - // if they are really optional. SO that we can crete a minimal AnnounceQuery + // if they are really optional. So that we can crete a minimal AnnounceQuery // instead of removing the optional params afterwards. // // The original specification on: diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs index 450006815..c9081a10f 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/http/responses/scrape.rs @@ -4,84 +4,36 @@ use std::str; use serde::{self, Deserialize, Serialize}; use serde_bencode::value::Value; -use crate::http::bencode::ByteArray20; +use crate::http::bencode::{ByteArray20, InfoHash}; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Default)] pub struct Response { pub files: HashMap, } impl Response { - pub fn from_bytes(bytes: &[u8]) -> Self { + pub fn try_from_bytes(bytes: &[u8]) -> Result { let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); - Self::from(scrape_response) + Self::try_from(scrape_response) + } + + pub fn empty() -> Self { + Self::default() } } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct File { - pub complete: i64, - pub downloaded: i64, - pub incomplete: i64, + pub complete: i64, // The number of active peers that have completed downloading + pub downloaded: i64, // The number of peers that have ever completed downloading + pub incomplete: i64, // The number of active peers that have not completed downloading } -impl From for Response { - fn from(scrape_response: DeserializedResponse) -> Self { - // todo: - // - Use `try_from` trait instead of `from`. - // - Improve error messages. - // - Extract parser function out of the trait. - // - Extract parser for each nested element. - // - Extract function to instantiate [u8; 20] from Vec. - let mut files: HashMap = HashMap::new(); - - match scrape_response.files { - Value::Dict(dict) => { - for file_element in dict { - let info_hash_byte_vec = file_element.0; - let file_value = file_element.1; - - let file = match &file_value { - Value::Dict(dict) => { - let mut file = File { - complete: 0, - downloaded: 0, - incomplete: 0, - }; - - for file_field in dict { - let value = match file_field.1 { - Value::Int(number) => *number, - _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), - }; - - if file_field.0 == b"complete" { - file.complete = value; - } else if file_field.0 == b"downloaded" { - file.downloaded = value; - } else if file_field.0 == b"incomplete" { - file.incomplete = value; - } else { - panic!("Error parsing bencoded scrape response. Invalid field"); - } - } - - file - } - _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), - }; - - // Clone Vec into [u8; 20] - let mut info_hash_byte_array: [u8; 20] = Default::default(); - info_hash_byte_array.clone_from_slice(info_hash_byte_vec.as_slice()); - - files.insert(info_hash_byte_array, file); - } - } - _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), - } +impl TryFrom for Response { + type Error = BencodeParseError; - Self { files } + fn try_from(scrape_response: DeserializedResponse) -> Result { + parse_bencoded_response(&scrape_response.files) } } @@ -89,3 +41,148 @@ impl From for Response { struct DeserializedResponse { pub files: Value, } + +pub struct ResponseBuilder { + response: Response, +} + +impl ResponseBuilder { + pub fn default() -> Self { + Self { + response: Response::empty(), + } + } + + pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { + self.response.files.insert(info_hash_bytes, file); + self + } + + pub fn build(self) -> Response { + self.response + } +} + +#[derive(Debug)] +pub enum BencodeParseError { + InvalidValueExpectedDict { value: Value }, + InvalidValueExpectedInt { value: Value }, + InvalidFileField { value: Value }, + MissingFileField { field_name: String }, +} + +/// It parses a bencoded scrape response into a `Response` struct. +/// +/// For example: +/// +/// ```text +/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e +/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +/// ``` +/// +/// Response (JSON encoded for readability): +/// +/// ```text +/// { +/// 'files': { +/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +/// } +/// } +fn parse_bencoded_response(value: &Value) -> Result { + let mut files: HashMap = HashMap::new(); + + match value { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = parse_bencoded_file(file_value).unwrap(); + + files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + } + + Ok(Response { files }) +} + +/// It parses a bencoded dictionary into a `File` struct. +/// +/// For example: +/// +/// +/// ```text +/// d8:completei11e10:downloadedi13772e10:incompletei19ee +/// ``` +/// +/// into: +/// +/// ```text +/// File { +/// complete: 11, +/// downloaded: 13772, +/// incomplete: 19, +/// } +/// ``` +fn parse_bencoded_file(value: &Value) -> Result { + let file = match &value { + Value::Dict(dict) => { + let mut complete = None; + let mut downloaded = None; + let mut incomplete = None; + + for file_field in dict { + let field_name = file_field.0; + + let field_value = match file_field.1 { + Value::Int(number) => Ok(*number), + _ => Err(BencodeParseError::InvalidValueExpectedInt { + value: file_field.1.clone(), + }), + }?; + + if field_name == b"complete" { + complete = Some(field_value); + } else if field_name == b"downloaded" { + downloaded = Some(field_value); + } else if field_name == b"incomplete" { + incomplete = Some(field_value); + } else { + return Err(BencodeParseError::InvalidFileField { + value: file_field.1.clone(), + }); + } + } + + if complete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "complete".to_string(), + }); + } + + if downloaded.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "downloaded".to_string(), + }); + } + + if incomplete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "incomplete".to_string(), + }); + } + + File { + complete: complete.unwrap(), + downloaded: downloaded.unwrap(), + incomplete: incomplete.unwrap(), + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + }; + + Ok(file) +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 099e1d360..888da393a 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -5,19 +5,6 @@ mod common; mod http; mod http_tracker_server { - use std::str::FromStr; - - use percent_encoding::NON_ALPHANUMERIC; - use torrust_tracker::protocol::info_hash::InfoHash; - - #[test] - fn calculate_info_hash_param() { - let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - - let param = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); - - assert_eq!(param, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); - } mod for_all_config_modes { @@ -331,7 +318,7 @@ mod http_tracker_server { // Peer 1 let previously_announced_peer = PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .into(); + .build(); // Add the Peer 1 http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; @@ -365,7 +352,7 @@ mod http_tracker_server { let http_tracker_server = start_public_http_tracker().await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = PeerBuilder::default().into(); + let peer = PeerBuilder::default().build(); // Add a peer http_tracker_server.add_torrent(&info_hash, &peer).await; @@ -396,7 +383,7 @@ mod http_tracker_server { // Peer 1 let previously_announced_peer = PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .into(); + .build(); // Add the Peer 1 http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; @@ -435,7 +422,7 @@ mod http_tracker_server { // Peer 1 let previously_announced_peer = PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .into(); + .build(); // Add the Peer 1 http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; @@ -684,17 +671,16 @@ mod http_tracker_server { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Scrape - use std::collections::HashMap; use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_internal_server_error_response; + use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; use crate::http::client::Client; use crate::http::requests; - use crate::http::responses::scrape::{File, Response}; + use crate::http::responses::scrape::{File, ResponseBuilder}; use crate::http::server::start_public_http_tracker; #[tokio::test] @@ -707,20 +693,21 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_scrape_response() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker().await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - // Peer - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .into(); - - // Add the Peer - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - // Scrape the tracker - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(http_tracker.get_connection_info()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -728,24 +715,18 @@ mod http_tracker_server { ) .await; - // todo: extract scrape response builder or named constructor. - // A builder with an "add_file(info_hash_bytes: &[u8], file: File)" method could be a good solution. - let mut files = HashMap::new(); - files.insert( - info_hash.bytes(), - File { - complete: 1, - downloaded: 0, - incomplete: 0, - }, - ); - let expected_scrape_response = Response { files }; + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); - // todo: extract assert - assert_eq!(response.status(), 200); - let bytes = response.bytes().await.unwrap(); - let scrape_response = Response::from_bytes(&bytes); - assert_eq!(scrape_response, expected_scrape_response); + assert_scrape_response(response, &expected_scrape_response).await; } } } @@ -776,6 +757,7 @@ mod http_tracker_server { } #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { let http_tracker_server = start_whitelisted_http_tracker().await; From c24d744270f3fc6e7872e3c183e19392270b0435 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 31 Jan 2023 18:54:19 +0000 Subject: [PATCH 0353/1003] refactor(test): [#159] refactor tests for announce request --- tests/http/asserts.rs | 3 +-- tests/http/responses/announce.rs | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index b8ccfee22..a58558bc0 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -28,8 +28,7 @@ pub async fn assert_compact_announce_response(response: Response, expected_respo let bytes = response.bytes().await.unwrap(); - // todo: move to DeserializedCompact constructor and make DeserializedCompact struct private - let compact_announce: DeserializedCompact = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { + let compact_announce = DeserializedCompact::from_bytes(&bytes).unwrap_or_else(|_| { panic!( "response body should be a valid compact announce response, got \"{:?}\"", &bytes diff --git a/tests/http/responses/announce.rs b/tests/http/responses/announce.rs index 85f0347cc..e976ba9ae 100644 --- a/tests/http/responses/announce.rs +++ b/tests/http/responses/announce.rs @@ -41,6 +41,12 @@ pub struct DeserializedCompact { pub peers: Vec, } +impl DeserializedCompact { + pub fn from_bytes(bytes: &[u8]) -> Result { + serde_bencode::from_bytes::(bytes) + } +} + #[derive(Debug, PartialEq)] pub struct Compact { // code-review: there could be a way to deserialize this struct directly From c46e4171e5b086e021f5c803e524c9334433464b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 13:49:42 +0000 Subject: [PATCH 0354/1003] test(http): [#159] add more tests for scrape request --- tests/common/fixtures.rs | 6 + tests/http/asserts.rs | 2 +- tests/http/requests/scrape.rs | 11 + tests/http/responses/scrape.rs | 22 +- tests/http_tracker.rs | 365 ++++++++++++++++++++++++++++++++- 5 files changed, 391 insertions(+), 15 deletions(-) diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 5e644c45f..2abaca244 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -27,6 +27,12 @@ impl PeerBuilder { self } + #[allow(dead_code)] + pub fn with_no_bytes_pending_to_download(mut self) -> Self { + self.peer.left = NumberOfBytes(0); + self + } + #[allow(dead_code)] pub fn build(self) -> Peer { self.into() diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index a58558bc0..59f4ed42a 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -48,7 +48,7 @@ pub async fn assert_compact_announce_response(response: Response, expected_respo pub async fn assert_scrape_response(response: Response, expected_response: &scrape::Response) { assert_eq!(response.status(), 200); - let scrape_response = scrape::Response::try_from_bytes(&response.bytes().await.unwrap()).unwrap(); + let scrape_response = scrape::Response::try_from_bencoded(&response.bytes().await.unwrap()).unwrap(); assert_eq!(scrape_response, *expected_response); } diff --git a/tests/http/requests/scrape.rs b/tests/http/requests/scrape.rs index 6198f1680..6ab46974b 100644 --- a/tests/http/requests/scrape.rs +++ b/tests/http/requests/scrape.rs @@ -54,6 +54,11 @@ impl QueryBuilder { self } + pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash.push(info_hash.0); + self + } + pub fn query(self) -> Query { self.scrape_query } @@ -82,6 +87,12 @@ pub struct QueryParams { pub info_hash: Vec, } +impl QueryParams { + pub fn set_one_info_hash_param(&mut self, info_hash: &str) { + self.info_hash = vec![info_hash.to_string()]; + } +} + impl std::fmt::Display for QueryParams { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let query = self diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs index c9081a10f..5bf938ebe 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/http/responses/scrape.rs @@ -12,23 +12,31 @@ pub struct Response { } impl Response { - pub fn try_from_bytes(bytes: &[u8]) -> Result { - let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); - Self::try_from(scrape_response) + pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { + let mut files: HashMap = HashMap::new(); + files.insert(info_hash_bytes, file); + Self { files } } - pub fn empty() -> Self { - Self::default() + pub fn try_from_bencoded(bytes: &[u8]) -> Result { + let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); + Self::try_from(scrape_response) } } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] pub struct File { pub complete: i64, // The number of active peers that have completed downloading pub downloaded: i64, // The number of peers that have ever completed downloading pub incomplete: i64, // The number of active peers that have not completed downloading } +impl File { + pub fn zeroed() -> Self { + Self::default() + } +} + impl TryFrom for Response { type Error = BencodeParseError; @@ -49,7 +57,7 @@ pub struct ResponseBuilder { impl ResponseBuilder { pub fn default() -> Self { Self { - response: Response::empty(), + response: Response::default(), } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 888da393a..44bb8609d 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -9,6 +9,18 @@ mod http_tracker_server { mod for_all_config_modes { mod receiving_an_announce_request { + + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + use std::net::{IpAddr, Ipv6Addr}; use std::str::FromStr; @@ -671,17 +683,19 @@ mod http_tracker_server { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Scrape + use std::net::IpAddr; use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; - use crate::common::fixtures::PeerBuilder; + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; use crate::http::client::Client; use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_public_http_tracker; + use crate::http::requests::scrape::QueryBuilder; + use crate::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; #[tokio::test] async fn should_fail_when_the_request_is_empty() { @@ -692,7 +706,25 @@ mod http_tracker_server { } #[tokio::test] - async fn should_return_the_scrape_response() { + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_public_http_tracker().await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + // code-review: it's not returning the invalid info hash error + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { let http_tracker = start_public_http_tracker().await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -728,6 +760,123 @@ mod http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } + + #[tokio::test] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + let http_tracker = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let http_tracker = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + } + + #[tokio::test] + async fn should_accept_multiple_infohashes() { + let http_tracker = start_public_http_tracker().await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let http_tracker = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let http_tracker = start_ipv6_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } } } @@ -777,7 +926,92 @@ mod http_tracker_server { } } - mod receiving_an_scrape_request {} + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_whitelisted_http_tracker; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let http_tracker = start_whitelisted_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let http_tracker = start_whitelisted_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + http_tracker + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } } mod configured_as_private { @@ -798,7 +1032,7 @@ mod http_tracker_server { use crate::http::server::start_private_http_tracker; #[tokio::test] - async fn should_respond_to_peers_providing_a_valid_authentication_key() { + async fn should_respond_to_authenticated_peers() { let http_tracker_server = start_private_http_tracker().await; let key = http_tracker_server @@ -842,7 +1076,124 @@ mod http_tracker_server { } } - mod receiving_an_scrape_request {} + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_private_http_tracker; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let http_tracker = start_private_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let http_tracker = start_private_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + + let http_tracker = start_private_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } } mod configured_as_private_and_whitelisted { From b8793e751c59b35945ffe2e02409a80bea44f428 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 18:01:05 +0000 Subject: [PATCH 0355/1003] feat: [#165] upgrade workflow action: actions/checkout@v3 --- .github/workflows/publish_crate.yml | 2 +- .github/workflows/publish_docker_image.yml | 2 +- .github/workflows/test_build_release.yml | 6 +++--- .github/workflows/test_docker.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index 0352064eb..40f332a8c 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -23,7 +23,7 @@ jobs: if: needs.check-secret.outputs.publish == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index 1587a0bd6..5983bf6a2 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -32,7 +32,7 @@ jobs: if: needs.check-secret.outputs.publish == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 3924eea4b..c86cf9994 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -9,7 +9,7 @@ jobs: env: CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -29,7 +29,7 @@ jobs: env: CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -61,7 +61,7 @@ jobs: env: CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml index 2cfa4de5c..0c3fc36d8 100644 --- a/.github/workflows/test_docker.yml +++ b/.github/workflows/test_docker.yml @@ -8,7 +8,7 @@ jobs: test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 From 3d1a12b46e7c2326495865ff5d19ed405970abd7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 18:05:41 +0000 Subject: [PATCH 0356/1003] feat: [#165] upgrade workflow action: Swatinem/rust-cache@v2 --- .github/workflows/publish_crate.yml | 2 +- .github/workflows/publish_docker_image.yml | 2 +- .github/workflows/test_build_release.yml | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index 40f332a8c..644d8af6c 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -29,7 +29,7 @@ jobs: profile: minimal toolchain: stable components: llvm-tools-preview - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Run Tests run: cargo test diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index 5983bf6a2..fd82a499e 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -38,7 +38,7 @@ jobs: profile: minimal toolchain: stable components: llvm-tools-preview - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Run Tests run: cargo test diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index c86cf9994..38760c747 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -16,7 +16,7 @@ jobs: toolchain: nightly override: true components: rustfmt, clippy - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Check Rust Formatting uses: actions-rs/cargo@v1 with: @@ -35,7 +35,7 @@ jobs: profile: minimal toolchain: stable components: llvm-tools-preview - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Check Rust Code uses: actions-rs/cargo@v1 with: @@ -66,7 +66,7 @@ jobs: with: profile: minimal toolchain: stable - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Build Torrust Tracker run: cargo build --release - name: Upload Build Artifact From b14270b5d9666cd4b7260a882c91e007f85cc788 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 18:39:52 +0000 Subject: [PATCH 0357/1003] feat: [#165] replace workflow action actions-rs/toolchain@v1 with dtolnay/rust-toolchain@stable --- .github/workflows/publish_crate.yml | 7 ++----- .github/workflows/publish_docker_image.yml | 3 +-- .github/workflows/test_build_release.yml | 10 +++------- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index 644d8af6c..c120a0fc5 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -24,9 +24,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v2 @@ -43,11 +42,9 @@ jobs: uses: actions/checkout@v3 - name: Install stable toolchain - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable - override: true - run: cargo publish env: diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index fd82a499e..20152a727 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -33,9 +33,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 38760c747..6153ff77b 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -10,11 +10,9 @@ jobs: CARGO_TERM_COLOR: always steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: nightly - override: true components: rustfmt, clippy - uses: Swatinem/rust-cache@v2 - name: Check Rust Formatting @@ -30,9 +28,8 @@ jobs: CARGO_TERM_COLOR: always steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v2 @@ -62,9 +59,8 @@ jobs: CARGO_TERM_COLOR: always steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable - uses: Swatinem/rust-cache@v2 - name: Build Torrust Tracker From 42e7e64542a6c570333405c227ca89ba371e6c33 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 18:45:01 +0000 Subject: [PATCH 0358/1003] refactor: [#165] remove unmantained workflow action to run cargo commmands Replaced with using the `cargo` command directly. --- .github/workflows/test_build_release.yml | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 6153ff77b..3b9a9a44a 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -16,10 +16,7 @@ jobs: components: rustfmt, clippy - uses: Swatinem/rust-cache@v2 - name: Check Rust Formatting - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --check + run: cargo fmt --check test: needs: format @@ -34,15 +31,9 @@ jobs: components: llvm-tools-preview - uses: Swatinem/rust-cache@v2 - name: Check Rust Code - uses: actions-rs/cargo@v1 - with: - command: check - args: --all-targets + run: cargo check --all-targets - name: Clippy Rust Code - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-targets -- -D clippy::pedantic + run: cargo clippy --all-targets -- -D clippy::pedantic - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests From e0761c49e1d0d6c8ff2ff5caf1e569e769c67cfd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Feb 2023 10:20:54 +0000 Subject: [PATCH 0359/1003] feat: upload code coverage to codecov.io --- .github/workflows/codecov.yml | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 .github/workflows/codecov.yml diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml new file mode 100644 index 000000000..05551bafc --- /dev/null +++ b/.github/workflows/codecov.yml @@ -0,0 +1,40 @@ +name: Upload code coverage + +on: + push: + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + components: rustfmt, llvm-tools-preview + - name: Build + run: cargo build --release + env: + CARGO_INCREMENTAL: "0" + RUSTFLAGS: "-Cinstrument-coverage" + RUSTDOCFLAGS: "-Cinstrument-coverage" + - name: Test + run: cargo test --all-features --no-fail-fast + env: + CARGO_INCREMENTAL: "0" + RUSTFLAGS: "-Cinstrument-coverage" + RUSTDOCFLAGS: "-Cinstrument-coverage" + - name: Install grcov + run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi + - name: Run grcov + run: grcov . --binary-path target/debug/deps/ -s . -t lcov --branch --ignore-not-existing --ignore '../**' --ignore '/*' -o coverage.lcov + - uses: codecov/codecov-action@v3 + with: + files: ./coverage.lcov + flags: rust + fail_ci_if_error: true # optional (default = false) From 95a2cd19ab26fae1e503a4f756d95c441769cbcc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Feb 2023 13:34:58 +0000 Subject: [PATCH 0360/1003] refactor(udp): refactor tests to follow mod structure conventions Use the same dir/mod structure as in API and HTTP tracker integration tests. --- tests/common/fixtures.rs | 3 + tests/common/mod.rs | 1 + tests/common/udp.rs | 41 ++++++ tests/udp.rs | 310 --------------------------------------- tests/udp/asserts.rs | 23 +++ tests/udp/client.rs | 65 ++++++++ tests/udp/mod.rs | 3 + tests/udp/server.rs | 67 +++++++++ tests/udp_tracker.rs | 175 ++++++++++++++++++++++ 9 files changed, 378 insertions(+), 310 deletions(-) create mode 100644 tests/common/udp.rs delete mode 100644 tests/udp.rs create mode 100644 tests/udp/asserts.rs create mode 100644 tests/udp/client.rs create mode 100644 tests/udp/mod.rs create mode 100644 tests/udp/server.rs create mode 100644 tests/udp_tracker.rs diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 2abaca244..1ead0db0c 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -9,6 +9,7 @@ pub struct PeerBuilder { } impl PeerBuilder { + #[allow(dead_code)] pub fn default() -> PeerBuilder { Self { peer: default_peer_for_testing(), @@ -44,6 +45,7 @@ impl PeerBuilder { } } +#[allow(dead_code)] fn default_peer_for_testing() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000000"), @@ -56,6 +58,7 @@ fn default_peer_for_testing() -> Peer { } } +#[allow(dead_code)] pub fn invalid_info_hashes() -> Vec { [ "0".to_string(), diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 810620359..b57996292 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,2 +1,3 @@ pub mod fixtures; pub mod http; +pub mod udp; diff --git a/tests/common/udp.rs b/tests/common/udp.rs new file mode 100644 index 000000000..3d84e2b97 --- /dev/null +++ b/tests/common/udp.rs @@ -0,0 +1,41 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::net::UdpSocket; + +/// A generic UDP client +pub struct Client { + pub socket: Arc, +} + +impl Client { + #[allow(dead_code)] + pub async fn connected(remote_socket_addr: &SocketAddr, local_socket_addr: &SocketAddr) -> Client { + let client = Client::bind(local_socket_addr).await; + client.connect(remote_socket_addr).await; + client + } + + pub async fn bind(local_socket_addr: &SocketAddr) -> Self { + let socket = UdpSocket::bind(local_socket_addr).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } + + pub async fn connect(&self, remote_address: &SocketAddr) { + self.socket.connect(remote_address).await.unwrap(); + } + + #[allow(dead_code)] + pub async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + #[allow(dead_code)] + pub async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } +} diff --git a/tests/udp.rs b/tests/udp.rs deleted file mode 100644 index 408f4f795..000000000 --- a/tests/udp.rs +++ /dev/null @@ -1,310 +0,0 @@ -/// Integration tests for UDP tracker server -/// -/// cargo test `udp_tracker_server` -- --nocapture -extern crate rand; - -mod udp_tracker_server { - use core::panic; - use std::io::Cursor; - use std::net::Ipv4Addr; - use std::sync::atomic::{AtomicBool, Ordering}; - use std::sync::Arc; - - use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, - Port, Request, Response, ScrapeRequest, TransactionId, - }; - use rand::{thread_rng, Rng}; - use tokio::net::UdpSocket; - use tokio::task::JoinHandle; - use torrust_tracker::config::{ephemeral_configuration, Configuration}; - use torrust_tracker::jobs::udp_tracker; - use torrust_tracker::tracker::statistics::Keeper; - use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - - fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) - } - - pub fn ephemeral_random_client_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) - } - - pub struct UdpServer { - pub started: AtomicBool, - pub job: Option>, - pub bind_address: Option, - } - - impl UdpServer { - pub fn new() -> Self { - Self { - started: AtomicBool::new(false), - job: None, - bind_address: None, - } - } - - pub fn start(&mut self, configuration: &Arc) { - if !self.started.load(Ordering::Relaxed) { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - let udp_tracker_config = &configuration.udp_trackers[0]; - - // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); - - self.bind_address = Some(udp_tracker_config.bind_address.clone()); - - self.started.store(true, Ordering::Relaxed); - } - } - } - - fn new_running_udp_server(configuration: &Arc) -> UdpServer { - let mut udp_server = UdpServer::new(); - udp_server.start(configuration); - udp_server - } - - struct UdpClient { - socket: Arc, - } - - impl UdpClient { - async fn bind(local_address: &str) -> Self { - let socket = UdpSocket::bind(local_address).await.unwrap(); - Self { - socket: Arc::new(socket), - } - } - - async fn connect(&self, remote_address: &str) { - self.socket.connect(remote_address).await.unwrap(); - } - - async fn send(&self, bytes: &[u8]) -> usize { - self.socket.writable().await.unwrap(); - self.socket.send(bytes).await.unwrap() - } - - async fn receive(&self, bytes: &mut [u8]) -> usize { - self.socket.readable().await.unwrap(); - self.socket.recv(bytes).await.unwrap() - } - } - - /// Creates a new `UdpClient` connected to a Udp server - async fn new_connected_udp_client(remote_address: &str) -> UdpClient { - let client = UdpClient::bind(&source_address(ephemeral_random_client_port())).await; - client.connect(remote_address).await; - client - } - - struct UdpTrackerClient { - pub udp_client: UdpClient, - } - - impl UdpTrackerClient { - async fn send(&self, request: Request) -> usize { - // Write request into a buffer - let request_buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(request_buffer); - - let request_data = match request.write(&mut cursor) { - Ok(_) => { - #[allow(clippy::cast_possible_truncation)] - let position = cursor.position() as usize; - let inner_request_buffer = cursor.get_ref(); - // Return slice which contains written request data - &inner_request_buffer[..position] - } - Err(e) => panic!("could not write request to bytes: {e}."), - }; - - self.udp_client.send(request_data).await - } - - async fn receive(&self) -> Response { - let mut response_buffer = [0u8; MAX_PACKET_SIZE]; - - let payload_size = self.udp_client.receive(&mut response_buffer).await; - - Response::from_bytes(&response_buffer[..payload_size], true).unwrap() - } - } - - /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server - async fn new_connected_udp_tracker_client(remote_address: &str) -> UdpTrackerClient { - let udp_client = new_connected_udp_client(remote_address).await; - UdpTrackerClient { udp_client } - } - - fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] - } - - fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] - } - - /// Generates the source address for the UDP client - fn source_address(port: u16) -> String { - format!("127.0.0.1:{port}") - } - - fn is_error_response(response: &Response, error_message: &str) -> bool { - match response { - Response::Error(error_response) => error_response.message.starts_with(error_message), - _ => false, - } - } - - fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { - match response { - Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, - _ => false, - } - } - - fn is_ipv4_announce_response(response: &Response) -> bool { - matches!(response, Response::AnnounceIpv4(_)) - } - - fn is_scrape_response(response: &Response) -> bool { - matches!(response, Response::Scrape(_)) - } - - #[tokio::test] - async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let configuration = tracker_configuration(); - - let udp_server = new_running_udp_server(&configuration); - - let client = new_connected_udp_client(&udp_server.bind_address.unwrap()).await; - - client.send(&empty_udp_request()).await; - - let mut buffer = empty_buffer(); - client.receive(&mut buffer).await; - let response = Response::from_bytes(&buffer, true).unwrap(); - - assert!(is_error_response(&response, "bad request")); - } - - #[tokio::test] - async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { - let configuration = tracker_configuration(); - - let udp_server = new_running_udp_server(&configuration); - - let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - assert!(is_connect_response(&response, TransactionId(123))); - } - - async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { - let connect_request = ConnectRequest { transaction_id }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), - } - } - - #[tokio::test] - async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { - let configuration = tracker_configuration(); - - let udp_server = new_running_udp_server(&configuration); - - let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; - - // Send announce request - - let announce_request = AnnounceRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hash: InfoHash([0u8; 20]), - peer_id: PeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client.udp_client.socket.local_addr().unwrap().port()), - }; - - client.send(announce_request.into()).await; - - let response = client.receive().await; - - assert!(is_ipv4_announce_response(&response)); - } - - #[tokio::test] - async fn should_return_a_scrape_response_when_the_client_sends_a_scrape_request() { - let configuration = tracker_configuration(); - - let udp_server = new_running_udp_server(&configuration); - - let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; - - // Send scrape request - - // Full scrapes are not allowed so it will return "bad request" error with empty vector - let info_hashes = vec![InfoHash([0u8; 20])]; - - let scrape_request = ScrapeRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hashes, - }; - - client.send(scrape_request.into()).await; - - let response = client.receive().await; - - assert!(is_scrape_response(&response)); - } -} diff --git a/tests/udp/asserts.rs b/tests/udp/asserts.rs new file mode 100644 index 000000000..bf8fb6728 --- /dev/null +++ b/tests/udp/asserts.rs @@ -0,0 +1,23 @@ +use aquatic_udp_protocol::{Response, TransactionId}; + +pub fn is_error_response(response: &Response, error_message: &str) -> bool { + match response { + Response::Error(error_response) => error_response.message.starts_with(error_message), + _ => false, + } +} + +pub fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { + match response { + Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, + _ => false, + } +} + +pub fn is_ipv4_announce_response(response: &Response) -> bool { + matches!(response, Response::AnnounceIpv4(_)) +} + +pub fn is_scrape_response(response: &Response) -> bool { + matches!(response, Response::Scrape(_)) +} diff --git a/tests/udp/client.rs b/tests/udp/client.rs new file mode 100644 index 000000000..3cb4d6134 --- /dev/null +++ b/tests/udp/client.rs @@ -0,0 +1,65 @@ +use std::io::Cursor; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use aquatic_udp_protocol::{Request, Response}; +use rand::{thread_rng, Rng}; +use torrust_tracker::udp::MAX_PACKET_SIZE; + +use crate::common::udp::Client as UdpClient; + +/// Creates a new generic UDP client connected to a generic UDP server +pub async fn new_udp_client_connected(remote_address: &SocketAddr) -> UdpClient { + let local_address = loopback_socket_address(ephemeral_random_client_port()); + UdpClient::connected(remote_address, &local_address).await +} + +/// Creates a new UDP tracker client connected to a UDP tracker server +pub async fn new_udp_tracker_client_connected(remote_address: &SocketAddr) -> Client { + let udp_client = new_udp_client_connected(remote_address).await; + Client { udp_client } +} + +pub fn ephemeral_random_client_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) +} + +fn loopback_socket_address(port: u16) -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port) +} + +/// A UDP tracker client +pub struct Client { + pub udp_client: UdpClient, // A generic UDP client +} + +impl Client { + pub async fn send(&self, request: Request) -> usize { + // Write request into a buffer + let request_buffer = vec![0u8; MAX_PACKET_SIZE]; + let mut cursor = Cursor::new(request_buffer); + + let request_data = match request.write(&mut cursor) { + Ok(_) => { + #[allow(clippy::cast_possible_truncation)] + let position = cursor.position() as usize; + let inner_request_buffer = cursor.get_ref(); + // Return slice which contains written request data + &inner_request_buffer[..position] + } + Err(e) => panic!("could not write request to bytes: {e}."), + }; + + self.udp_client.send(request_data).await + } + + pub async fn receive(&self) -> Response { + let mut response_buffer = [0u8; MAX_PACKET_SIZE]; + + let payload_size = self.udp_client.receive(&mut response_buffer).await; + + Response::from_bytes(&response_buffer[..payload_size], true).unwrap() + } +} diff --git a/tests/udp/mod.rs b/tests/udp/mod.rs new file mode 100644 index 000000000..16a77bb99 --- /dev/null +++ b/tests/udp/mod.rs @@ -0,0 +1,3 @@ +pub mod asserts; +pub mod client; +pub mod server; diff --git a/tests/udp/server.rs b/tests/udp/server.rs new file mode 100644 index 000000000..401d4cf92 --- /dev/null +++ b/tests/udp/server.rs @@ -0,0 +1,67 @@ +use std::net::SocketAddr; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker::config::{ephemeral_configuration, Configuration}; +use torrust_tracker::jobs::udp_tracker; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; + +pub fn start_udp_tracker(configuration: &Arc) -> Server { + let mut udp_server = Server::new(); + udp_server.start(configuration); + udp_server +} + +pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) +} +pub struct Server { + pub started: AtomicBool, + pub job: Option>, + pub bind_address: Option, +} + +impl Server { + pub fn new() -> Self { + Self { + started: AtomicBool::new(false), + job: None, + bind_address: None, + } + } + + pub fn start(&mut self, configuration: &Arc) { + if !self.started.load(Ordering::Relaxed) { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + let udp_tracker_config = &configuration.udp_trackers[0]; + + // Start the UDP tracker job + self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); + + self.bind_address = Some(udp_tracker_config.bind_address.parse().unwrap()); + + self.started.store(true, Ordering::Relaxed); + } + } +} diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs new file mode 100644 index 000000000..0287d01b7 --- /dev/null +++ b/tests/udp_tracker.rs @@ -0,0 +1,175 @@ +/// Integration tests for UDP tracker server +/// +/// cargo test `udp_tracker_server` -- --nocapture +extern crate rand; + +mod common; +mod udp; + +mod udp_tracker_server { + + // UDP tracker documentation: + // + // BEP 15. UDP Tracker Protocol for BitTorrent + // https://www.bittorrent.org/beps/bep_0015.html + + use core::panic; + + use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; + use torrust_tracker::udp::MAX_PACKET_SIZE; + + use crate::udp::asserts::is_error_response; + use crate::udp::client::{new_udp_client_connected, Client}; + use crate::udp::server::{start_udp_tracker, tracker_configuration}; + + fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] + } + + fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] + } + + async fn send_connection_request(transaction_id: TransactionId, client: &Client) -> ConnectionId { + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + } + } + + #[tokio::test] + async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + let configuration = tracker_configuration(); + + let udp_server = start_udp_tracker(&configuration); + + let client = new_udp_client_connected(&udp_server.bind_address.unwrap()).await; + + client.send(&empty_udp_request()).await; + + let mut buffer = empty_buffer(); + client.receive(&mut buffer).await; + let response = Response::from_bytes(&buffer, true).unwrap(); + + assert!(is_error_response(&response, "bad request")); + } + + mod receiving_a_connection_request { + use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + + use crate::udp::asserts::is_connect_response; + use crate::udp::client::new_udp_tracker_client_connected; + use crate::udp::server::{start_udp_tracker, tracker_configuration}; + + #[tokio::test] + async fn should_return_a_connect_response() { + let configuration = tracker_configuration(); + + let udp_server = start_udp_tracker(&configuration); + + let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + assert!(is_connect_response(&response, TransactionId(123))); + } + } + + mod receiving_an_announce_request { + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, + TransactionId, + }; + + use crate::udp::asserts::is_ipv4_announce_response; + use crate::udp::client::new_udp_tracker_client_connected; + use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp_tracker_server::send_connection_request; + + #[tokio::test] + async fn should_return_an_announce_response() { + let configuration = tracker_configuration(); + + let udp_server = start_udp_tracker(&configuration); + + let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send announce request + + let announce_request = AnnounceRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hash: InfoHash([0u8; 20]), + peer_id: PeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client.udp_client.socket.local_addr().unwrap().port()), + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + assert!(is_ipv4_announce_response(&response)); + } + } + + mod receiving_an_scrape_request { + use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + + use crate::udp::asserts::is_scrape_response; + use crate::udp::client::new_udp_tracker_client_connected; + use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp_tracker_server::send_connection_request; + + #[tokio::test] + async fn should_return_a_scrape_response() { + let configuration = tracker_configuration(); + + let udp_server = start_udp_tracker(&configuration); + + let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send scrape request + + // Full scrapes are not allowed you need to pass an array of info hashes otherwise + // it will return "bad request" error with empty vector + let info_hashes = vec![InfoHash([0u8; 20])]; + + let scrape_request = ScrapeRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hashes, + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + assert!(is_scrape_response(&response)); + } + } +} From 895592795766a6151e45b6b80f29aa3293d36f59 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 17:39:37 +0100 Subject: [PATCH 0361/1003] dev: check errors to contain responce --- cSpell.json | 2 + tests/api/asserts.rs | 26 +++++++--- tests/http/asserts.rs | 112 +++++++++++++++++------------------------- 3 files changed, 68 insertions(+), 72 deletions(-) diff --git a/cSpell.json b/cSpell.json index dc51c87c5..9f10d99e4 100644 --- a/cSpell.json +++ b/cSpell.json @@ -28,6 +28,7 @@ "Hydranode", "incompletei", "infohash", + "infohashes", "infoschema", "intervali", "leecher", @@ -58,6 +59,7 @@ "sharktorrent", "socketaddr", "sqllite", + "subsec", "Swatinem", "Swiftbit", "thiserror", diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 5f9d39705..5a4abfb62 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -37,9 +37,20 @@ pub async fn assert_auth_key_utf8(response: Response) -> AuthKey { // OK response pub async fn assert_ok(response: Response) { - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); + let response_status = response.status(); + let response_headers = response.headers().get("content-type").cloned().unwrap(); + let response_text = response.text().await.unwrap(); + + let details = format!( + r#" + status: ´{response_status}´ + headers: ´{response_headers:?}´ + text: ´"{response_text}"´"# + ); + + assert_eq!(response_status, 200, "details:{details}."); + assert_eq!(response_headers, "application/json", "\ndetails:{details}."); + assert_eq!(response_text, "{\"status\":\"ok\"}", "\ndetails:{details}."); } // Error responses @@ -118,8 +129,11 @@ pub async fn assert_failed_to_reload_keys(response: Response) { async fn assert_unhandled_rejection(response: Response, reason: &str) { assert_eq!(response.status(), 500); assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); - assert_eq!( - response.text().await.unwrap(), - format!("Unhandled rejection: Err {{ reason: \"{reason}\" }}") + + let reason_text = format!("Unhandled rejection: Err {{ reason: \"{reason}"); + let response_text = response.text().await.unwrap(); + assert!( + response_text.contains(&reason_text), + ":\n response: `\"{response_text}\"`\n dose not contain: `\"{reason_text}\"`." ); } diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 59f4ed42a..cd6bcb499 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,9 +1,27 @@ +use std::panic::Location; + use reqwest::Response; use super::responses::announce::{Announce, Compact, DeserializedCompact}; use super::responses::scrape; use crate::http::responses::error::Error; +pub fn assert_error_bencoded(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { + let error_failure_reason = serde_bencode::from_str::(response_text) + .unwrap_or_else(|_| panic!( + "response body should be a valid bencoded string for the '{expected_failure_reason}' error, got \"{response_text}\"" + ) + ) + .failure_reason; + + assert!( + error_failure_reason.contains(expected_failure_reason), + r#": + response: `"{error_failure_reason}"` + dose not contain: `"{expected_failure_reason}"`, {location}"# + ); +} + pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); @@ -64,90 +82,52 @@ pub async fn assert_is_announce_response(response: Response) { pub async fn assert_internal_server_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'internal server' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "internal server error".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded(&response.text().await.unwrap(), "internal server", Location::caller()); } pub async fn assert_invalid_info_hash_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'invalid info_hash' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "info_hash is either missing or invalid".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded( + &response.text().await.unwrap(), + "info_hash is either missing or invalid", + Location::caller(), + ); } pub async fn assert_invalid_peer_id_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'invalid peer id' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "peer_id is either missing or invalid".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded( + &response.text().await.unwrap(), + "peer_id is either missing or invalid", + Location::caller(), + ); } pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'torrent not on whitelist' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "torrent not on whitelist".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded( + &response.text().await.unwrap(), + "torrent not on whitelist", + Location::caller(), + ); } pub async fn assert_peer_not_authenticated_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'peer not authenticated' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "peer not authenticated".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded(&response.text().await.unwrap(), "peer not authenticated", Location::caller()); } pub async fn assert_invalid_authentication_key_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'invalid authentication key' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "invalid authentication key".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded( + &response.text().await.unwrap(), + "invalid authentication key", + Location::caller(), + ); } From ff9d379d80f90d9778deca8daae91c177b4158da Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:28:34 +0100 Subject: [PATCH 0362/1003] dev: edit api to pass-through underlying error --- src/apis/handlers.rs | 20 ++++++++++---------- src/apis/responses.rs | 26 ++++++++++++++------------ 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index 8d9689025..38959edbe 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -66,8 +66,8 @@ pub async fn add_torrent_to_whitelist_handler( match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => ok_response(), - Err(..) => failed_to_whitelist_torrent_response(), + Ok(_) => ok_response(), + Err(e) => failed_to_whitelist_torrent_response(e), }, } } @@ -79,16 +79,16 @@ pub async fn remove_torrent_from_whitelist_handler( match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(..) => ok_response(), - Err(..) => failed_to_remove_torrent_from_whitelist_response(), + Ok(_) => ok_response(), + Err(e) => failed_to_remove_torrent_from_whitelist_response(e), }, } } pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { match tracker.load_whitelist().await { - Ok(..) => ok_response(), - Err(..) => failed_to_reload_whitelist_response(), + Ok(_) => ok_response(), + Err(e) => failed_to_reload_whitelist_response(e), } } @@ -96,7 +96,7 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path let seconds_valid = seconds_valid_or_key; match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), - Err(_) => failed_to_generate_key_response(), + Err(e) => failed_to_generate_key_response(e), } } @@ -111,15 +111,15 @@ pub async fn delete_auth_key_handler( Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { Ok(_) => ok_response(), - Err(_) => failed_to_delete_key_response(), + Err(e) => failed_to_delete_key_response(e), }, } } pub async fn reload_keys_handler(State(tracker): State>) -> Response { match tracker.load_keys().await { - Ok(..) => ok_response(), - Err(..) => failed_to_reload_keys_response(), + Ok(_) => ok_response(), + Err(e) => failed_to_reload_keys_response(e), } } diff --git a/src/apis/responses.rs b/src/apis/responses.rs index b150b4bff..3b0946396 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -1,3 +1,5 @@ +use std::error::Error; + use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Json, Response}; use serde::Serialize; @@ -110,33 +112,33 @@ pub fn torrent_not_known_response() -> Response { } #[must_use] -pub fn failed_to_remove_torrent_from_whitelist_response() -> Response { - unhandled_rejection_response("failed to remove torrent from whitelist".to_string()) +pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) } #[must_use] -pub fn failed_to_whitelist_torrent_response() -> Response { - unhandled_rejection_response("failed to whitelist torrent".to_string()) +pub fn failed_to_whitelist_torrent_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) } #[must_use] -pub fn failed_to_reload_whitelist_response() -> Response { - unhandled_rejection_response("failed to reload whitelist".to_string()) +pub fn failed_to_reload_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload whitelist: {e}")) } #[must_use] -pub fn failed_to_generate_key_response() -> Response { - unhandled_rejection_response("failed to generate key".to_string()) +pub fn failed_to_generate_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to generate key: {e}")) } #[must_use] -pub fn failed_to_delete_key_response() -> Response { - unhandled_rejection_response("failed to delete key".to_string()) +pub fn failed_to_delete_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to delete key: {e}")) } #[must_use] -pub fn failed_to_reload_keys_response() -> Response { - unhandled_rejection_response("failed to reload keys".to_string()) +pub fn failed_to_reload_keys_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload keys: {e}")) } /// This error response is to keep backward compatibility with the old Warp API. From aa0f371d5b1b91770ce4c75634a2087abb82327f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:12:50 +0100 Subject: [PATCH 0363/1003] dev: add located error module --- src/lib.rs | 1 + src/located_error.rs | 103 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 src/located_error.rs diff --git a/src/lib.rs b/src/lib.rs index e8cf53045..cbda2854c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,7 @@ pub mod config; pub mod databases; pub mod http; pub mod jobs; +pub mod located_error; pub mod logging; pub mod protocol; pub mod setup; diff --git a/src/located_error.rs b/src/located_error.rs new file mode 100644 index 000000000..d45517e5a --- /dev/null +++ b/src/located_error.rs @@ -0,0 +1,103 @@ +// https://stackoverflow.com/questions/74336993/getting-line-numbers-with-when-using-boxdyn-stderrorerror + +use std::error::Error; +use std::panic::Location; +use std::sync::Arc; + +pub struct Located(pub E); + +#[derive(Debug)] +pub struct LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + source: Arc, + location: Box>, +} + +impl<'a, E> std::fmt::Display for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}, {}", self.source, self.location) + } +} + +impl<'a, E> Error for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync + 'static, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.source) + } +} + +impl<'a, E> Clone for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn clone(&self) -> Self { + LocatedError { + source: self.source.clone(), + location: self.location.clone(), + } + } +} + +#[allow(clippy::from_over_into)] +impl<'a, E> Into> for Located +where + E: Error + Send + Sync, + Arc: Clone, +{ + #[track_caller] + fn into(self) -> LocatedError<'a, E> { + let e = LocatedError { + source: Arc::new(self.0), + location: Box::new(*std::panic::Location::caller()), + }; + log::debug!("{e}"); + e + } +} + +#[allow(clippy::from_over_into)] +impl<'a> Into> for Arc { + #[track_caller] + fn into(self) -> LocatedError<'a, dyn std::error::Error + Send + Sync> { + LocatedError { + source: self, + location: Box::new(*std::panic::Location::caller()), + } + } +} + +#[cfg(test)] +mod tests { + use std::panic::Location; + + use super::LocatedError; + use crate::located_error::Located; + + #[derive(thiserror::Error, Debug)] + enum TestError { + #[error("Test")] + Test, + } + + #[track_caller] + fn get_caller_location() -> Location<'static> { + *Location::caller() + } + + #[test] + fn error_should_include_location() { + let e = TestError::Test; + + let b: LocatedError = Located(e).into(); + let l = get_caller_location(); + + assert_eq!(b.location.file(), l.file()); + } +} From 41501397b7c05a24ad03f6a7ac7191292388cd51 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:16:04 +0100 Subject: [PATCH 0364/1003] dev: located error for database --- src/databases/driver.rs | 25 ++++- src/databases/error.rs | 100 ++++++++++++++++--- src/databases/mod.rs | 70 ++++++++------ src/databases/mysql.rs | 186 +++++++++++++++-------------------- src/databases/sqlite.rs | 210 +++++++++++++++++++--------------------- src/tracker/mod.rs | 5 +- 6 files changed, 329 insertions(+), 267 deletions(-) diff --git a/src/databases/driver.rs b/src/databases/driver.rs index 7eaa9064e..c601f1866 100644 --- a/src/databases/driver.rs +++ b/src/databases/driver.rs @@ -1,7 +1,30 @@ use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +use super::error::Error; +use super::mysql::Mysql; +use super::sqlite::Sqlite; +use super::{Builder, Database}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] pub enum Driver { Sqlite3, MySQL, } + +impl Driver { + /// . + /// + /// # Errors + /// + /// This function will return an error if unable to connect to the database. + pub fn build(&self, db_path: &str) -> Result, Error> { + let database = match self { + Driver::Sqlite3 => Builder::::build(db_path), + Driver::MySQL => Builder::::build(db_path), + }?; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) + } +} diff --git a/src/databases/error.rs b/src/databases/error.rs index 467db407f..4bee82f19 100644 --- a/src/databases/error.rs +++ b/src/databases/error.rs @@ -1,21 +1,95 @@ -use derive_more::{Display, Error}; +use std::panic::Location; +use std::sync::Arc; -#[derive(Debug, Display, PartialEq, Eq, Error)] -#[allow(dead_code)] +use r2d2_mysql::mysql::UrlError; + +use super::driver::Driver; +use crate::located_error::{Located, LocatedError}; + +#[derive(thiserror::Error, Debug, Clone)] pub enum Error { - #[display(fmt = "Query returned no rows.")] - QueryReturnedNoRows, - #[display(fmt = "Invalid query.")] - InvalidQuery, - #[display(fmt = "Database error.")] - DatabaseError, + #[error("The {driver} query unexpectedly returned nothing: {source}")] + QueryReturnedNoRows { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + driver: Driver, + }, + + #[error("The {driver} query was malformed: {source}")] + InvalidQuery { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + driver: Driver, + }, + + #[error("Unable to insert record into {driver} database, {location}")] + InsertFailed { + location: &'static Location<'static>, + driver: Driver, + }, + + #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] + DeleteFailed { + location: &'static Location<'static>, + error_code: usize, + driver: Driver, + }, + + #[error("Failed to connect to {driver} database: {source}")] + ConnectionError { + source: LocatedError<'static, UrlError>, + driver: Driver, + }, + + #[error("Failed to create r2d2 {driver} connection pool: {source}")] + ConnectionPool { + source: LocatedError<'static, r2d2::Error>, + driver: Driver, + }, } impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - match e { - r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, - _ => Error::InvalidQuery, + #[track_caller] + fn from(err: r2d2_sqlite::rusqlite::Error) -> Self { + match err { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { + source: (Arc::new(err) as Arc).into(), + driver: Driver::Sqlite3, + }, + _ => Error::InvalidQuery { + source: (Arc::new(err) as Arc).into(), + driver: Driver::Sqlite3, + }, + } + } +} + +impl From for Error { + #[track_caller] + fn from(err: r2d2_mysql::mysql::Error) -> Self { + let e: Arc = Arc::new(err); + Error::InvalidQuery { + source: e.into(), + driver: Driver::MySQL, + } + } +} + +impl From for Error { + #[track_caller] + fn from(err: UrlError) -> Self { + Self::ConnectionError { + source: Located(err).into(), + driver: Driver::MySQL, + } + } +} + +impl From<(r2d2::Error, Driver)> for Error { + #[track_caller] + fn from(e: (r2d2::Error, Driver)) -> Self { + let (err, driver) = e; + Self::ConnectionPool { + source: Located(err).into(), + driver, } } } diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 873dd70eb..809decc2c 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -3,37 +3,48 @@ pub mod error; pub mod mysql; pub mod sqlite; +use std::marker::PhantomData; + use async_trait::async_trait; -use self::driver::Driver; use self::error::Error; -use crate::databases::mysql::Mysql; -use crate::databases::sqlite::Sqlite; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; -/// # Errors -/// -/// Will return `r2d2::Error` if `db_path` is not able to create a database. -pub fn connect(db_driver: &Driver, db_path: &str) -> Result, r2d2::Error> { - let database: Box = match db_driver { - Driver::Sqlite3 => { - let db = Sqlite::new(db_path)?; - Box::new(db) - } - Driver::MySQL => { - let db = Mysql::new(db_path)?; - Box::new(db) - } - }; - - database.create_database_tables().expect("Could not create database tables."); - - Ok(database) +pub(self) struct Builder +where + T: Database, +{ + phantom: PhantomData, +} + +impl Builder +where + T: Database + 'static, +{ + /// . + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create a database. + pub(self) fn build(db_path: &str) -> Result, Error> { + Ok(Box::new(T::new(db_path)?)) + } } #[async_trait] pub trait Database: Sync + Send { + /// . + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create a database. + fn new(db_path: &str) -> Result + where + Self: std::marker::Sized; + + /// . + /// /// # Errors /// /// Will return `Error` if unable to create own tables. @@ -52,27 +63,22 @@ pub trait Database: Sync + Send { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error>; async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - async fn get_key_from_keys(&self, key: &str) -> Result; + async fn get_key_from_keys(&self, key: &str) -> Result, Error>; async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) - .await - .map_or_else( - |e| match e { - Error::QueryReturnedNoRows => Ok(false), - e => Err(e), - }, - |_| Ok(true), - ) + Ok(self + .get_info_hash_from_whitelist(&info_hash.clone().to_string()) + .await? + .is_some()) } } diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 71b06378c..ac54ebb82 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -8,33 +8,32 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; +use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; +const DRIVER: Driver = Driver::MySQL; + pub struct Mysql { pool: Pool, } -impl Mysql { +#[async_trait] +impl Database for Mysql { /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. - pub fn new(db_path: &str) -> Result { - let opts = Opts::from_url(db_path).expect("Failed to connect to MySQL database."); + fn new(db_path: &str) -> Result { + let opts = Opts::from_url(db_path)?; let builder = OptsBuilder::from_opts(opts); let manager = MysqlConnectionManager::new(builder); - let pool = r2d2::Pool::builder() - .build(manager) - .expect("Failed to create r2d2 MySQL connection pool."); + let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; Ok(Self { pool }) } -} -#[async_trait] -impl Database for Mysql { fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -63,7 +62,7 @@ impl Database for Mysql { i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") ); - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; conn.query_drop(&create_torrents_table) .expect("Could not create torrents table."); @@ -87,7 +86,7 @@ impl Database for Mysql { DROP TABLE `keys`;" .to_string(); - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; conn.query_drop(&drop_whitelist_table) .expect("Could not drop `whitelist` table."); @@ -99,155 +98,124 @@ impl Database for Mysql { } async fn load_persistent_torrents(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - let torrents: Vec<(InfoHash, u32)> = conn - .query_map( - "SELECT info_hash, completed FROM torrents", - |(info_hash_string, completed): (String, u32)| { - let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); - (info_hash, completed) - }, - ) - .map_err(|_| Error::QueryReturnedNoRows)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let torrents = conn.query_map( + "SELECT info_hash, completed FROM torrents", + |(info_hash_string, completed): (String, u32)| { + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + (info_hash, completed) + }, + )?; Ok(torrents) } async fn load_keys(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - let keys: Vec = conn - .query_map( - "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| auth::Key { - key, - valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), - }, - ) - .map_err(|_| Error::QueryReturnedNoRows)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let keys = conn.query_map( + "SELECT `key`, valid_until FROM `keys`", + |(key, valid_until): (String, i64)| auth::Key { + key, + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + }, + )?; Ok(keys) } async fn load_whitelist(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let info_hashes: Vec = conn - .query_map("SELECT info_hash FROM whitelist", |info_hash: String| { - InfoHash::from_str(&info_hash).unwrap() - }) - .map_err(|_| Error::QueryReturnedNoRows)?; + let info_hashes = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { + InfoHash::from_str(&info_hash).unwrap() + })?; Ok(info_hashes) } async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); debug!("{}", info_hash_str); - match conn.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", params! { info_hash_str, completed }) { - Ok(_) => { - Ok(()) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn - .exec_first::( - "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", - params! { info_hash }, - ) - .map_err(|_| Error::DatabaseError)? - { - Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), - None => Err(Error::QueryReturnedNoRows), - } + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let select = conn.exec_first::( + "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", + params! { info_hash }, + )?; + + let info_hash = select.map(|f| InfoHash::from_str(&f).expect("Failed to decode InfoHash String from DB!")); + + Ok(info_hash) } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); - match conn.exec_drop( + conn.exec_drop( "INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }, - ) { - Ok(_) => Ok(1), - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + )?; + + Ok(1) } async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash = info_hash.to_string(); - match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { - Ok(_) => Ok(1), - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash })?; + + Ok(1) } - async fn get_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - match conn - .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) - .map_err(|_| Error::QueryReturnedNoRows)? - { - Some((key, valid_until)) => Ok(auth::Key { - key, - valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), - }), - None => Err(Error::InvalidQuery), - } + let query = + conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }); + + let key = query?; + + Ok(key.map(|(key, expiry)| auth::Key { + key, + valid_until: Some(Duration::from_secs(expiry.unsigned_abs())), + })) } async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); - match conn.exec_drop( + conn.exec_drop( "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", params! { key, valid_until }, - ) { - Ok(_) => Ok(1), - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + )?; + + Ok(1) } async fn remove_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { - Ok(_) => Ok(1), - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key })?; + + Ok(1) } } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 1d7caf052..3425b15c8 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -1,32 +1,32 @@ +use std::panic::Location; use std::str::FromStr; use async_trait::async_trait; -use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; +use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; +const DRIVER: Driver = Driver::Sqlite3; + pub struct Sqlite { pool: Pool, } -impl Sqlite { +#[async_trait] +impl Database for Sqlite { /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. - pub fn new(db_path: &str) -> Result { + fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); - let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); - Ok(Sqlite { pool }) + Pool::new(cm).map_or_else(|err| Err((err, Driver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) } -} -#[async_trait] -impl Database for Sqlite { fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -51,13 +51,13 @@ impl Database for Sqlite { );" .to_string(); - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.execute(&create_whitelist_table, [])?; + conn.execute(&create_keys_table, [])?; + conn.execute(&create_torrents_table, [])?; - conn.execute(&create_whitelist_table, []) - .and_then(|_| conn.execute(&create_keys_table, [])) - .and_then(|_| conn.execute(&create_torrents_table, [])) - .map_err(|_| Error::InvalidQuery) - .map(|_| ()) + Ok(()) } fn drop_database_tables(&self) -> Result<(), Error> { @@ -73,17 +73,17 @@ impl Database for Sqlite { DROP TABLE keys;" .to_string(); - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; conn.execute(&drop_whitelist_table, []) .and_then(|_| conn.execute(&drop_torrents_table, [])) - .and_then(|_| conn.execute(&drop_keys_table, [])) - .map_err(|_| Error::InvalidQuery) - .map(|_| ()) + .and_then(|_| conn.execute(&drop_keys_table, []))?; + + Ok(()) } async fn load_persistent_torrents(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -94,13 +94,16 @@ impl Database for Sqlite { Ok((info_hash, completed)) })?; + //torrent_iter?; + //let torrent_iter = torrent_iter.unwrap(); + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); Ok(torrents) } async fn load_keys(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -120,7 +123,7 @@ impl Database for Sqlite { } async fn load_whitelist(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; @@ -136,130 +139,117 @@ impl Database for Sqlite { } async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - match conn.execute( + let insert = conn.execute( "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", [info_hash.to_string(), completed.to_string()], - ) { - Ok(updated) => { - if updated > 0 { - return Ok(()); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; + let mut rows = stmt.query([info_hash])?; - match rows.next() { - Ok(row) => match row { - Some(row) => Ok(InfoHash::from_str(&row.get_unwrap::<_, String>(0)).unwrap()), - None => Err(Error::QueryReturnedNoRows), - }, - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + let query = rows.next()?; + + Ok(query.map(|f| InfoHash::from_str(&f.get_unwrap::<_, String>(0)).unwrap())) } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { - return Ok(updated); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()])?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(insert) } } async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { - return Ok(updated); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let deleted = conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()])?; + + if deleted == 1 { + // should only remove a single record. + Ok(deleted) + } else { + Err(Error::DeleteFailed { + location: Location::caller(), + error_code: deleted, + driver: DRIVER, + }) } } - async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; + let mut rows = stmt.query([key.to_string()])?; - if let Some(row) = rows.next()? { - let key: String = row.get(0).unwrap(); - let valid_until: i64 = row.get(1).unwrap(); + let key = rows.next()?; - Ok(auth::Key { - key, - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), - }) - } else { - Err(Error::QueryReturnedNoRows) - } + Ok(key.map(|f| { + let expiry: i64 = f.get(1).unwrap(); + auth::Key { + key: f.get(0).unwrap(), + valid_until: Some(DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs())), + } + })) } async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - match conn.execute( + let insert = conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", [auth_key.key.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], - ) { - Ok(updated) => { - if updated > 0 { - return Ok(updated); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(insert) } } async fn remove_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn.execute("DELETE FROM keys WHERE key = ?", [key]) { - Ok(updated) => { - if updated > 0 { - return Ok(updated); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key])?; + + if deleted == 1 { + // should only remove a single record. + Ok(deleted) + } else { + Err(Error::DeleteFailed { + location: Location::caller(), + error_code: deleted, + driver: DRIVER, + }) } } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4f1dab49b..e3b3cf88b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -15,6 +15,7 @@ use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::config::Configuration; +use crate::databases::driver::Driver; use crate::databases::{self, Database}; use crate::protocol::info_hash::InfoHash; @@ -45,8 +46,8 @@ impl Tracker { config: &Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, - ) -> Result { - let database = databases::connect(&config.db_driver, &config.db_path)?; + ) -> Result { + let database = Driver::build(&config.db_driver, &config.db_path)?; Ok(Tracker { config: config.clone(), From 14e1c8f8de737321b116b70c6777952597c3c133 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:35:02 +0100 Subject: [PATCH 0365/1003] dev: located config errors --- src/config.rs | 87 +++++++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 45 deletions(-) diff --git a/src/config.rs b/src/config.rs index 3ca4b37d8..7ed0f9fa7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,16 +1,21 @@ use std::collections::{HashMap, HashSet}; use std::net::IpAddr; +use std::panic::Location; use std::path::Path; use std::str::FromStr; +use std::sync::Arc; use std::{env, fs}; use config::{Config, ConfigError, File, FileFormat}; +use log::warn; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; +use thiserror::Error; use {std, toml}; use crate::databases::driver::Driver; +use crate::located_error::{Located, LocatedError}; use crate::tracker::mode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] @@ -74,13 +79,30 @@ pub struct Configuration { pub http_api: HttpApi, } -#[derive(Debug)] +#[derive(Error, Debug)] pub enum Error { - Message(String), - ConfigError(ConfigError), - IOError(std::io::Error), - ParseError(toml::de::Error), - TrackerModeIncompatible, + #[error("Unable to load from Environmental Variable: {source}")] + UnableToLoadFromEnvironmentVariable { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("Default configuration created at: `{path}`, please review and reload tracker, {location}")] + CreatedNewConfigHalt { + location: &'static Location<'static>, + path: String, + }, + + #[error("Failed processing the configuration: {source}")] + ConfigError { source: LocatedError<'static, ConfigError> }, +} + +impl From for Error { + #[track_caller] + fn from(err: ConfigError) -> Self { + Self::ConfigError { + source: Located(err).into(), + } + } } /// This configuration is used for testing. It generates random config values so they do not collide @@ -129,20 +151,6 @@ fn random_port() -> u16 { rng.gen_range(49152..65535) } -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Error::Message(e) => e.fmt(f), - Error::ConfigError(e) => e.fmt(f), - Error::IOError(e) => e.fmt(f), - Error::ParseError(e) => e.fmt(f), - Error::TrackerModeIncompatible => write!(f, "{self:?}"), - } - } -} - -impl std::error::Error for Error {} - impl Default for Configuration { fn default() -> Self { let mut configuration = Configuration { @@ -210,21 +218,19 @@ impl Configuration { let mut config = Config::default(); if Path::new(path).exists() { - config = config_builder - .add_source(File::with_name(path)) - .build() - .map_err(Error::ConfigError)?; + config = config_builder.add_source(File::with_name(path)).build()?; } else { - eprintln!("No config file found."); - eprintln!("Creating config file.."); + warn!("No config file found."); + warn!("Creating config file.."); let config = Configuration::default(); config.save_to_file(path)?; - return Err(Error::Message( - "Please edit the config.TOML and restart the tracker.".to_string(), - )); + return Err(Error::CreatedNewConfigHalt { + location: Location::caller(), + path: path.to_string(), + }); } - let torrust_config: Configuration = config.try_deserialize().map_err(Error::ConfigError)?; + let torrust_config: Configuration = config.try_deserialize()?; Ok(torrust_config) } @@ -237,15 +243,13 @@ impl Configuration { Ok(config_toml) => { let config_builder = Config::builder() .add_source(File::from_str(&config_toml, FileFormat::Toml)) - .build() - .map_err(Error::ConfigError)?; - let config = config_builder.try_deserialize().map_err(Error::ConfigError)?; + .build()?; + let config = config_builder.try_deserialize()?; Ok(config) } - Err(_) => Err(Error::Message(format!( - "No environment variable for configuration found: {}", - &config_env_var_name - ))), + Err(e) => Err(Error::UnableToLoadFromEnvironmentVariable { + source: (Arc::new(e) as Arc).into(), + }), } } @@ -262,7 +266,7 @@ impl Configuration { #[cfg(test)] mod tests { - use crate::config::{Configuration, Error}; + use crate::config::Configuration; #[cfg(test)] fn default_config_toml() -> String { @@ -381,13 +385,6 @@ mod tests { assert_eq!(configuration, Configuration::default()); } - #[test] - fn configuration_error_could_be_displayed() { - let error = Error::TrackerModeIncompatible; - - assert_eq!(format!("{error}"), "TrackerModeIncompatible"); - } - #[test] fn http_api_configuration_should_check_if_it_contains_a_token() { let configuration = Configuration::default(); From 4e0c99314a619ccf67b0993b2ef78079016b48dc Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:35:54 +0100 Subject: [PATCH 0366/1003] dev: located tracker and auth errors --- src/http/handlers.rs | 9 ++---- src/tracker/auth.rs | 67 +++++++++++++++++++++++++++++++++--------- src/tracker/error.rs | 20 +++++++++++++ src/tracker/mod.rs | 28 +++++++++++++----- src/tracker/torrent.rs | 10 ------- src/udp/error.rs | 18 ++++++------ 6 files changed, 106 insertions(+), 46 deletions(-) create mode 100644 src/tracker/error.rs diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 1170b7188..129e0d9ea 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -23,12 +23,9 @@ pub async fn authenticate( tracker: Arc, ) -> Result<(), Error> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { - torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, - torrent::Error::NoPeersFound => Error::NoPeersFound, - torrent::Error::CouldNotSendResponse => Error::InternalServer, - torrent::Error::InvalidInfoHash => Error::InvalidInfo, + tracker::error::Error::TorrentNotWhitelisted { info_hash, location } => Error::TorrentNotWhitelisted, + tracker::error::Error::PeerNotAuthenticated { location } => Error::PeerNotAuthenticated, + tracker::error::Error::PeerKeyNotValid { key, source } => Error::PeerKeyNotValid, }) } diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 3b8af96a1..197e0dc37 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -1,12 +1,17 @@ +use std::panic::Location; use std::str::FromStr; +use std::sync::Arc; use std::time::Duration; -use derive_more::{Display, Error}; +use chrono::{DateTime, NaiveDateTime, Utc}; +use derive_more::Display; use log::debug; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; +use thiserror::Error; +use crate::located_error::LocatedError; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; @@ -38,14 +43,19 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); match auth_key.valid_until { - Some(valid_untill) => { - if valid_untill < current_time { - Err(Error::KeyExpired) + Some(valid_until) => { + if valid_until < current_time { + Err(Error::KeyExpired { + location: Location::caller(), + }) } else { Ok(()) } } - None => Err(Error::KeyInvalid), + None => Err(Error::UnableToReadKey { + location: Location::caller(), + key: Box::new(auth_key.clone()), + }), } } @@ -57,6 +67,29 @@ pub struct Key { pub valid_until: Option, } +impl std::fmt::Display for Key { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "key: `{}`, valid until `{}`", + self.key, + match self.valid_until { + Some(duration) => format!( + "{}", + DateTime::::from_utc( + NaiveDateTime::from_timestamp( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ), + Utc + ) + ), + None => "Empty!?".to_string(), + } + ) + } +} + impl Key { #[must_use] pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { @@ -108,21 +141,27 @@ impl FromStr for KeyId { } } -#[derive(Debug, Display, PartialEq, Eq, Error)] +#[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { - #[display(fmt = "Key could not be verified.")] - KeyVerificationError, - #[display(fmt = "Key is invalid.")] - KeyInvalid, - #[display(fmt = "Key has expired.")] - KeyExpired, + #[error("Key could not be verified: {source}")] + KeyVerificationError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("Failed to read key: {key}, {location}")] + UnableToReadKey { + location: &'static Location<'static>, + key: Box, + }, + #[error("Key has expired, {location}")] + KeyExpired { location: &'static Location<'static> }, } impl From for Error { fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - eprintln!("{e}"); - Error::KeyVerificationError + Error::KeyVerificationError { + source: (Arc::new(e) as Arc).into(), + } } } diff --git a/src/tracker/error.rs b/src/tracker/error.rs new file mode 100644 index 000000000..51bcbf3bb --- /dev/null +++ b/src/tracker/error.rs @@ -0,0 +1,20 @@ +use std::panic::Location; + +use crate::located_error::LocatedError; + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("The supplied key: {key:?}, is not valid: {source}")] + PeerKeyNotValid { + key: super::auth::Key, + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("The peer is not authenticated, {location}")] + PeerNotAuthenticated { location: &'static Location<'static> }, + + #[error("The torrent: {info_hash}, is not whitelisted, {location}")] + TorrentNotWhitelisted { + info_hash: crate::protocol::info_hash::InfoHash, + location: &'static Location<'static>, + }, +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index e3b3cf88b..acbf7d536 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,4 +1,5 @@ pub mod auth; +pub mod error; pub mod mode; pub mod peer; pub mod services; @@ -8,12 +9,14 @@ pub mod torrent; use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::net::SocketAddr; +use std::panic::Location; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; +use self::error::Error; use crate::config::Configuration; use crate::databases::driver::Driver; use crate::databases::{self, Database}; @@ -41,7 +44,7 @@ pub struct TorrentsMetrics { impl Tracker { /// # Errors /// - /// Will return a `r2d2::Error` if unable to connect to database. + /// Will return a `databases::error::Error` if unable to connect to database. pub fn new( config: &Arc, stats_event_sender: Option>, @@ -98,7 +101,10 @@ impl Tracker { pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { // todo: use auth::KeyId for the function argument `auth_key` match self.keys.read().await.get(&auth_key.key) { - None => Err(auth::Error::KeyInvalid), + None => Err(auth::Error::UnableToReadKey { + location: Location::caller(), + key: Box::new(auth_key.clone()), + }), Some(key) => auth::verify(key), } } @@ -204,7 +210,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -214,19 +220,27 @@ impl Tracker { if self.is_private() { match key { Some(key) => { - if self.verify_auth_key(key).await.is_err() { - return Err(torrent::Error::PeerKeyNotValid); + if let Err(e) = self.verify_auth_key(key).await { + return Err(Error::PeerKeyNotValid { + key: key.clone(), + source: (Arc::new(e) as Arc).into(), + }); } } None => { - return Err(torrent::Error::PeerNotAuthenticated); + return Err(Error::PeerNotAuthenticated { + location: Location::caller(), + }); } } } // check if info_hash is whitelisted if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(torrent::Error::TorrentNotWhitelisted); + return Err(Error::TorrentNotWhitelisted { + info_hash: *info_hash, + location: Location::caller(), + }); } Ok(()) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index e292dff54..b5535a932 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -99,16 +99,6 @@ pub struct SwamStats { pub leechers: u32, } -#[derive(Debug)] -pub enum Error { - TorrentNotWhitelisted, - PeerNotAuthenticated, - PeerKeyNotValid, - NoPeersFound, - CouldNotSendResponse, - InvalidInfoHash, -} - #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/src/udp/error.rs b/src/udp/error.rs index c5fbb3929..2fbaaf984 100644 --- a/src/udp/error.rs +++ b/src/udp/error.rs @@ -1,6 +1,6 @@ use thiserror::Error; -use crate::tracker::torrent; +use crate::tracker; #[derive(Error, Debug)] pub enum Error { @@ -35,15 +35,15 @@ pub enum Error { BadRequest, } -impl From for Error { - fn from(e: torrent::Error) -> Self { +impl From for Error { + fn from(e: tracker::error::Error) -> Self { match e { - torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, - torrent::Error::NoPeersFound => Error::NoPeersFound, - torrent::Error::CouldNotSendResponse => Error::InternalServer, - torrent::Error::InvalidInfoHash => Error::InvalidInfoHash, + tracker::error::Error::TorrentNotWhitelisted { + info_hash: _, + location: _, + } => Error::TorrentNotWhitelisted, + tracker::error::Error::PeerNotAuthenticated { location: _ } => Error::PeerNotAuthenticated, + tracker::error::Error::PeerKeyNotValid { key: _, source: _ } => Error::PeerKeyNotValid, } } } From 662123bbf82a5107729d26539e1f050852d8e59e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 21:12:58 +0100 Subject: [PATCH 0367/1003] dev: located udp errors --- src/udp/connection_cookie.rs | 5 +++- src/udp/error.rs | 56 +++++++++++------------------------- src/udp/handlers.rs | 18 ++++++++++-- 3 files changed, 36 insertions(+), 43 deletions(-) diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 3daa3e0f6..ef2a8b219 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -1,4 +1,5 @@ use std::net::SocketAddr; +use std::panic::Location; use aquatic_udp_protocol::ConnectionId; @@ -49,7 +50,9 @@ pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result< return Ok(checking_time_extent); } } - Err(Error::InvalidConnectionId) + Err(Error::InvalidConnectionId { + location: Location::caller(), + }) } mod cookie_builder { diff --git a/src/udp/error.rs b/src/udp/error.rs index 2fbaaf984..de66eb2bf 100644 --- a/src/udp/error.rs +++ b/src/udp/error.rs @@ -1,49 +1,27 @@ +use std::panic::Location; + use thiserror::Error; -use crate::tracker; +use crate::located_error::LocatedError; #[derive(Error, Debug)] pub enum Error { - #[error("internal server error")] - InternalServer, + #[error("tracker server error: {source}")] + TrackerError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, - #[error("info_hash is either missing or invalid")] - InvalidInfoHash, + #[error("internal server error: {message}, {location}")] + InternalServer { + location: &'static Location<'static>, + message: String, + }, #[error("connection id could not be verified")] - InvalidConnectionId, - - #[error("could not find remote address")] - AddressNotFound, - - #[error("torrent has no peers")] - NoPeersFound, - - #[error("torrent not on whitelist")] - TorrentNotWhitelisted, - - #[error("peer not authenticated")] - PeerNotAuthenticated, - - #[error("invalid authentication key")] - PeerKeyNotValid, - - #[error("exceeded info_hash limit")] - ExceededInfoHashLimit, - - #[error("bad request")] - BadRequest, -} + InvalidConnectionId { location: &'static Location<'static> }, -impl From for Error { - fn from(e: tracker::error::Error) -> Self { - match e { - tracker::error::Error::TorrentNotWhitelisted { - info_hash: _, - location: _, - } => Error::TorrentNotWhitelisted, - tracker::error::Error::PeerNotAuthenticated { location: _ } => Error::PeerNotAuthenticated, - tracker::error::Error::PeerKeyNotValid { key: _, source: _ } => Error::PeerKeyNotValid, - } - } + #[error("bad request: {source}")] + BadRequest { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 076710fb6..b36399f89 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -1,4 +1,5 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::panic::Location; use std::sync::Arc; use aquatic_udp_protocol::{ @@ -14,7 +15,10 @@ use crate::udp::error::Error; use crate::udp::request::AnnounceWrapper; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { - match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| Error::InternalServer) { + match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { + message: format!("{e:?}"), + location: Location::caller(), + }) { Ok(request) => { let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, @@ -28,7 +32,12 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A } } // bad request - Err(_) => handle_error(&Error::BadRequest, TransactionId(0)), + Err(e) => handle_error( + &Error::BadRequest { + source: (Arc::new(e) as Arc).into(), + }, + TransactionId(0), + ), } } @@ -90,7 +99,10 @@ pub async fn handle_announce( tracker .authenticate_request(&wrapped_announce_request.info_hash, &None) - .await?; + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + })?; let peer = peer::Peer::from_udp_announce_request( &wrapped_announce_request.announce_request, From 08a712dfbf5c86afc8ec49df0ed5c4e80c35b149 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 21:22:41 +0100 Subject: [PATCH 0368/1003] dev: located error for http --- src/http/error.rs | 58 ++++++++++++++++++++++++------------------- src/http/filters.rs | 37 +++++++++++++++++++++------ src/http/handlers.rs | 42 ++++++++++++++++++++----------- tests/http/asserts.rs | 20 ++++++--------- 4 files changed, 97 insertions(+), 60 deletions(-) diff --git a/src/http/error.rs b/src/http/error.rs index b6c08a8ba..f07c32f6d 100644 --- a/src/http/error.rs +++ b/src/http/error.rs @@ -1,34 +1,40 @@ +use std::panic::Location; + use thiserror::Error; use warp::reject::Reject; +use crate::located_error::LocatedError; + #[derive(Error, Debug)] pub enum Error { - #[error("internal server error")] - InternalServer, - - #[error("info_hash is either missing or invalid")] - InvalidInfo, - - #[error("peer_id is either missing or invalid")] - InvalidPeerId, - - #[error("could not find remote address")] - AddressNotFound, - - #[error("torrent has no peers")] - NoPeersFound, - - #[error("torrent not on whitelist")] - TorrentNotWhitelisted, - - #[error("peer not authenticated")] - PeerNotAuthenticated, - - #[error("invalid authentication key")] - PeerKeyNotValid, - - #[error("exceeded info_hash limit")] - ExceededInfoHashLimit, + #[error("tracker server error: {source}")] + TrackerError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("internal server error: {message}, {location}")] + InternalServer { + location: &'static Location<'static>, + message: String, + }, + + #[error("no valid infohashes found, {location}")] + EmptyInfoHash { location: &'static Location<'static> }, + + #[error("peer_id is either missing or invalid, {location}")] + InvalidPeerId { location: &'static Location<'static> }, + + #[error("could not find remote address: {message}, {location}")] + AddressNotFound { + location: &'static Location<'static>, + message: String, + }, + + #[error("too many infohashes: {message}, {location}")] + TwoManyInfoHashes { + location: &'static Location<'static>, + message: String, + }, } impl Reject for Error {} diff --git a/src/http/filters.rs b/src/http/filters.rs index 0fe369eba..2760c995c 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -1,5 +1,6 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; use std::str::FromStr; use std::sync::Arc; @@ -87,9 +88,14 @@ fn info_hashes(raw_query: &String) -> WebResult> { } if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(Error::ExceededInfoHashLimit)) + Err(reject::custom(Error::TwoManyInfoHashes { + location: Location::caller(), + message: format! {"found: {}, but limit is: {}",info_hashes.len(), MAX_SCRAPE_TORRENTS}, + })) } else if info_hashes.is_empty() { - Err(reject::custom(Error::InvalidInfo)) + Err(reject::custom(Error::EmptyInfoHash { + location: Location::caller(), + })) } else { Ok(info_hashes) } @@ -114,7 +120,9 @@ fn peer_id(raw_query: &String) -> WebResult { // peer_id must be 20 bytes if peer_id_bytes.len() != 20 { - return Err(reject::custom(Error::InvalidPeerId)); + return Err(reject::custom(Error::InvalidPeerId { + location: Location::caller(), + })); } // clone peer_id_bytes into fixed length array @@ -128,18 +136,26 @@ fn peer_id(raw_query: &String) -> WebResult { match peer_id { Some(id) => Ok(id), - None => Err(reject::custom(Error::InvalidPeerId)), + None => Err(reject::custom(Error::InvalidPeerId { + location: Location::caller(), + })), } } /// Get `PeerAddress` from `RemoteAddress` or Forwarded fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(Error::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound { + location: Location::caller(), + message: "neither on have remote address or on a reverse proxy".to_string(), + })); } if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(Error::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound { + location: Location::caller(), + message: "must have a x-forwarded-for when using a reverse proxy".to_string(), + })); } if on_reverse_proxy { @@ -151,7 +167,14 @@ fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, tracker: Arc, ) -> Result<(), Error> { - tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { - tracker::error::Error::TorrentNotWhitelisted { info_hash, location } => Error::TorrentNotWhitelisted, - tracker::error::Error::PeerNotAuthenticated { location } => Error::PeerNotAuthenticated, - tracker::error::Error::PeerKeyNotValid { key, source } => Error::PeerKeyNotValid, - }) + tracker + .authenticate_request(info_hash, auth_key) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) } /// Handle announce request @@ -39,9 +41,7 @@ pub async fn handle_announce( auth_key: Option, tracker: Arc, ) -> WebResult { - authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) - .await - .map_err(reject::custom)?; + authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await?; debug!("{:?}", announce_request); @@ -158,7 +158,10 @@ fn send_announce_response( if let Some(1) = announce_request.compact { match res.write_compact() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(Error::InternalServer)), + Err(e) => Err(reject::custom(Error::InternalServer { + message: e.to_string(), + location: Location::caller(), + })), } } else { Ok(Response::new(res.write().into())) @@ -171,7 +174,10 @@ fn send_scrape_response(files: HashMap) -> WebR match res.write() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(Error::InternalServer)), + Err(e) => Err(reject::custom(Error::InternalServer { + message: e.to_string(), + location: Location::caller(), + })), } } @@ -181,15 +187,21 @@ fn send_scrape_response(files: HashMap) -> WebR /// /// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. pub fn send_error(r: &Rejection) -> std::result::Result { - let body = if let Some(server_error) = r.find::() { - debug!("{:?}", server_error); + let warp_reject_error = r.find::(); + + let body = if let Some(error) = warp_reject_error { + debug!("{:?}", error); response::Error { - failure_reason: server_error.to_string(), + failure_reason: error.to_string(), } .write() } else { response::Error { - failure_reason: Error::InternalServer.to_string(), + failure_reason: Error::InternalServer { + message: "Undefined".to_string(), + location: Location::caller(), + } + .to_string(), } .write() }; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index cd6bcb499..211a7bb33 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -91,7 +91,7 @@ pub async fn assert_invalid_info_hash_error_response(response: Response) { assert_error_bencoded( &response.text().await.unwrap(), - "info_hash is either missing or invalid", + "no valid infohashes found", Location::caller(), ); } @@ -109,25 +109,21 @@ pub async fn assert_invalid_peer_id_error_response(response: Response) { pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( - &response.text().await.unwrap(), - "torrent not on whitelist", - Location::caller(), - ); + assert_error_bencoded(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); } pub async fn assert_peer_not_authenticated_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded(&response.text().await.unwrap(), "peer not authenticated", Location::caller()); + assert_error_bencoded( + &response.text().await.unwrap(), + "The peer is not authenticated", + Location::caller(), + ); } pub async fn assert_invalid_authentication_key_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( - &response.text().await.unwrap(), - "invalid authentication key", - Location::caller(), - ); + assert_error_bencoded(&response.text().await.unwrap(), "is not valid", Location::caller()); } From 0dc305023772ef5d65d1bf1ae6c41daff44f797e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Feb 2023 15:37:38 +0000 Subject: [PATCH 0369/1003] feat(http): [#160] scaffolding for HTTP tracker using Axum We are going to migrate the HTTP tracker from Warp to Axum. This is the basic scaffolding for Axum. Tests have been duplicated to test the new Axum implementation. The setup allows executing both versions: the Warp version on production and both versions (Warp and Axum) on testing env. --- src/http/axum/handlers.rs | 9 + src/http/axum/mod.rs | 5 + src/http/axum/resources/mod.rs | 1 + src/http/axum/resources/ok.rs | 4 + src/http/axum/responses.rs | 10 + src/http/axum/routes.rs | 13 + src/http/axum/server.rs | 43 + src/http/mod.rs | 9 + src/jobs/http_tracker.rs | 68 +- src/setup.rs | 3 +- tests/http/server.rs | 33 +- tests/http_tracker.rs | 1376 ++++++++++++++++++++++++++++++-- 12 files changed, 1507 insertions(+), 67 deletions(-) create mode 100644 src/http/axum/handlers.rs create mode 100644 src/http/axum/mod.rs create mode 100644 src/http/axum/resources/mod.rs create mode 100644 src/http/axum/resources/ok.rs create mode 100644 src/http/axum/responses.rs create mode 100644 src/http/axum/routes.rs create mode 100644 src/http/axum/server.rs diff --git a/src/http/axum/handlers.rs b/src/http/axum/handlers.rs new file mode 100644 index 000000000..b2f20786b --- /dev/null +++ b/src/http/axum/handlers.rs @@ -0,0 +1,9 @@ +use axum::response::Json; + +use super::resources::ok::Ok; +use super::responses::ok_response; + +#[allow(clippy::unused_async)] +pub async fn get_status_handler() -> Json { + ok_response() +} diff --git a/src/http/axum/mod.rs b/src/http/axum/mod.rs new file mode 100644 index 000000000..57773d810 --- /dev/null +++ b/src/http/axum/mod.rs @@ -0,0 +1,5 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; +pub mod server; diff --git a/src/http/axum/resources/mod.rs b/src/http/axum/resources/mod.rs new file mode 100644 index 000000000..a493c2ac2 --- /dev/null +++ b/src/http/axum/resources/mod.rs @@ -0,0 +1 @@ +pub mod ok; diff --git a/src/http/axum/resources/ok.rs b/src/http/axum/resources/ok.rs new file mode 100644 index 000000000..adc56e6ea --- /dev/null +++ b/src/http/axum/resources/ok.rs @@ -0,0 +1,4 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Ok {} diff --git a/src/http/axum/responses.rs b/src/http/axum/responses.rs new file mode 100644 index 000000000..9c5896b35 --- /dev/null +++ b/src/http/axum/responses.rs @@ -0,0 +1,10 @@ +// Resource responses + +use axum::Json; + +use super::resources::ok::Ok; + +#[must_use] +pub fn ok_response() -> Json { + Json(Ok {}) +} diff --git a/src/http/axum/routes.rs b/src/http/axum/routes.rs new file mode 100644 index 000000000..9ab58938f --- /dev/null +++ b/src/http/axum/routes.rs @@ -0,0 +1,13 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::get_status_handler; +use crate::tracker::Tracker; + +pub fn router(_tracker: &Arc) -> Router { + Router::new() + // Status + .route("/status", get(get_status_handler)) +} diff --git a/src/http/axum/server.rs b/src/http/axum/server.rs new file mode 100644 index 000000000..541dda33e --- /dev/null +++ b/src/http/axum/server.rs @@ -0,0 +1,43 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; +use futures::Future; +use log::info; +use warp::hyper; + +use super::routes::router; +use crate::tracker::Tracker; + +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + let app = router(tracker); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust HTTP tracker server on http://{} ...", socket_addr); + }) +} + +pub fn start_tls( + socket_addr: SocketAddr, + ssl_config: RustlsConfig, + tracker: &Arc, +) -> impl Future> { + let app = router(tracker); + + let handle = Handle::new(); + let shutdown_handle = handle.clone(); + + tokio::spawn(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust HTTP tracker server on https://{} ...", socket_addr); + shutdown_handle.shutdown(); + }); + + axum_server::bind_rustls(socket_addr, ssl_config) + .handle(handle) + .serve(app.into_make_service()) +} diff --git a/src/http/mod.rs b/src/http/mod.rs index fa4c263b5..9cd21aab5 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -9,6 +9,9 @@ //! - //! - //! + +use serde::{Deserialize, Serialize}; +pub mod axum; pub mod error; pub mod filters; pub mod handlers; @@ -19,3 +22,9 @@ pub mod server; pub type Bytes = u64; pub type WebResult = std::result::Result; + +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum Version { + Warp, + Axum, +} diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 8e38039b7..6b069301d 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -1,21 +1,31 @@ use std::net::SocketAddr; use std::sync::Arc; +use axum_server::tls_rustls::RustlsConfig; use log::{info, warn}; use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::config::HttpTracker; +use crate::http::axum::server; use crate::http::server::Http; +use crate::http::Version; use crate::tracker; #[derive(Debug)] pub struct ServerJobStarted(); +pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { + match version { + Version::Warp => start_warp(config, tracker.clone()).await, + Version::Axum => start_axum(config, tracker.clone()).await, + } +} + /// # Panics /// -/// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. -pub async fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { +/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. +async fn start_warp(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .bind_address .parse::() @@ -68,3 +78,57 @@ pub async fn start_job(config: &HttpTracker, tracker: Arc) -> join_handle } + +/// # Panics +/// +/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. +async fn start_axum(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + if !ssl_enabled { + info!("Starting Torrust HTTP tracker server on: http://{}", bind_addr); + + let handle = server::start(bind_addr, &tracker); + + tx.send(ServerJobStarted()) + .expect("the HTTP tracker server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust HTTP tracker server on http://{} stopped", bind_addr); + } + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust HTTP tracker server on: https://{}", bind_addr); + + let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await + .unwrap(); + + let handle = server::start_tls(bind_addr, ssl_config, &tracker); + + tx.send(ServerJobStarted()) + .expect("the HTTP tracker server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust HTTP tracker server on https://{} stopped", bind_addr); + } + } + }); + + // Wait until the HTTP tracker server job is running + match rx.await { + Ok(_msg) => info!("Torrust HTTP tracker server started"), + Err(e) => panic!("the HTTP tracker server was dropped: {e}"), + } + + join_handle +} diff --git a/src/setup.rs b/src/setup.rs index 31be3baac..3461667cc 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -4,6 +4,7 @@ use log::warn; use tokio::task::JoinHandle; use crate::config::Configuration; +use crate::http::Version; use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::tracker; @@ -47,7 +48,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone()).await); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Warp).await); } // Start HTTP API diff --git a/tests/http/server.rs b/tests/http/server.rs index e48ecd88d..e5266eee5 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -3,6 +3,7 @@ use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; +use torrust_tracker::http::Version; use torrust_tracker::jobs::http_tracker; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::mode::Mode; @@ -13,24 +14,24 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings -pub async fn start_public_http_tracker() -> Server { +pub async fn start_public_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Public; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings -pub async fn start_whitelisted_http_tracker() -> Server { +pub async fn start_whitelisted_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Listed; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings -pub async fn start_private_http_tracker() -> Server { +pub async fn start_private_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Private; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with a wildcard IPV6 address. @@ -40,7 +41,7 @@ pub async fn start_private_http_tracker() -> Server { /// [[http_trackers]] /// bind_address = "[::]:7070" /// ``` -pub async fn start_ipv6_http_tracker() -> Server { +pub async fn start_ipv6_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); // Change socket address to "wildcard address" (unspecified address which means any IP address) @@ -49,7 +50,7 @@ pub async fn start_ipv6_http_tracker() -> Server { let new_ipv6_socket_address = format!("[::]:{}", socket_addr.port()); configuration.http_trackers[0].bind_address = new_ipv6_socket_address; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with an specific `external_ip`. @@ -58,10 +59,10 @@ pub async fn start_ipv6_http_tracker() -> Server { /// ```text /// external_ip = "2.137.87.41" /// ``` -pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr) -> Server { +pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.external_ip = Some(external_ip.to_string()); - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker `on_reverse_proxy`. @@ -70,24 +71,24 @@ pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr) -> Server /// ```text /// on_reverse_proxy = true /// ``` -pub async fn start_http_tracker_on_reverse_proxy() -> Server { +pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.on_reverse_proxy = true; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } -pub async fn start_default_http_tracker() -> Server { +pub async fn start_default_http_tracker(version: Version) -> Server { let configuration = tracker_configuration(); - start_custom_http_tracker(configuration.clone()).await + start_custom_http_tracker(configuration.clone(), version).await } pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } -pub async fn start_custom_http_tracker(configuration: Arc) -> Server { +pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { let server = start(&configuration); - http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone()).await; + http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone(), version).await; server } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 44bb8609d..201f8e705 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -4,7 +4,7 @@ mod common; mod http; -mod http_tracker_server { +mod warp_http_tracker_server { mod for_all_config_modes { @@ -26,6 +26,7 @@ mod http_tracker_server { use local_ip_address::local_ip; use reqwest::Response; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -46,7 +47,7 @@ mod http_tracker_server { #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -61,7 +62,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; @@ -70,7 +71,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; // Without `info_hash` param @@ -111,7 +112,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -133,7 +134,7 @@ mod http_tracker_server { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -148,7 +149,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -167,7 +168,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -186,7 +187,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -212,7 +213,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -231,7 +232,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -252,7 +253,7 @@ mod http_tracker_server { async fn should_not_fail_when_the_event_param_is_invalid() { // All invalid values are ignored as if the `event` param were empty - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -279,7 +280,7 @@ mod http_tracker_server { #[tokio::test] async fn should_not_fail_when_the_compact_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -298,7 +299,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()) .announce( @@ -323,7 +324,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -361,7 +362,7 @@ mod http_tracker_server { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -388,7 +389,7 @@ mod http_tracker_server { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -427,7 +428,7 @@ mod http_tracker_server { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -463,7 +464,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce(&QueryBuilder::default().query()) @@ -476,7 +477,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker().await; + let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -491,7 +492,7 @@ mod http_tracker_server { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce( @@ -508,7 +509,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce(&QueryBuilder::default().query()) @@ -521,7 +522,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker().await; + let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -536,7 +537,7 @@ mod http_tracker_server { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce( @@ -553,7 +554,7 @@ mod http_tracker_server { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -583,7 +584,8 @@ mod http_tracker_server { 127.0.0.1 external_ip = "2.137.87.41" */ - let http_tracker_server = start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap()).await; + let http_tracker_server = + start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -614,9 +616,11 @@ mod http_tracker_server { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let http_tracker_server = - start_http_tracker_with_external_ip(&IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()) - .await; + let http_tracker_server = start_http_tracker_with_external_ip( + &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + Version::Warp, + ) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -647,7 +651,7 @@ mod http_tracker_server { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let http_tracker_server = start_http_tracker_on_reverse_proxy().await; + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -686,6 +690,7 @@ mod http_tracker_server { use std::net::IpAddr; use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -699,7 +704,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; assert_internal_server_error_response(response).await; @@ -707,7 +712,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -725,7 +730,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -763,7 +768,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -801,7 +806,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -818,7 +823,7 @@ mod http_tracker_server { #[tokio::test] async fn should_accept_multiple_infohashes() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -842,7 +847,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -861,7 +866,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let http_tracker = start_ipv6_http_tracker().await; + let http_tracker = start_ipv6_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -885,6 +890,7 @@ mod http_tracker_server { mod and_receiving_an_announce_request { use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; @@ -894,7 +900,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let http_tracker_server = start_whitelisted_http_tracker().await; + let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -908,7 +914,7 @@ mod http_tracker_server { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let http_tracker_server = start_whitelisted_http_tracker().await; + let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -929,6 +935,7 @@ mod http_tracker_server { mod receiving_an_scrape_request { use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -941,7 +948,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let http_tracker = start_whitelisted_http_tracker().await; + let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -970,7 +977,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let http_tracker = start_whitelisted_http_tracker().await; + let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1020,6 +1027,7 @@ mod http_tracker_server { use std::str::FromStr; use std::time::Duration; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; @@ -1033,7 +1041,7 @@ mod http_tracker_server { #[tokio::test] async fn should_respond_to_authenticated_peers() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; let key = http_tracker_server .tracker @@ -1050,7 +1058,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1063,7 +1071,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; // The tracker does not have this key let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -1081,6 +1089,7 @@ mod http_tracker_server { use std::str::FromStr; use std::time::Duration; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; use torrust_tracker::tracker::peer; @@ -1094,7 +1103,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1123,7 +1132,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1165,7 +1174,1278 @@ mod http_tracker_server { async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } + } + + mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} + } +} + +mod axum_http_tracker_server { + + // WIP: migration HTTP from Warp to Axum + + use torrust_tracker::http::Version; + + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; + + #[tokio::test] + async fn should_return_the_status() { + // This is a temporary test to test the new Axum HTTP tracker server scaffolding + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let response = Client::new(http_tracker_server.get_connection_info()).get("status").await; + + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), "{}"); + } + + mod for_all_config_modes { + + mod receiving_an_announce_request { + + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + + use std::net::{IpAddr, Ipv6Addr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{ + assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, + assert_internal_server_error_response, assert_invalid_info_hash_error_response, + assert_invalid_peer_id_error_response, assert_is_announce_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::{Compact, QueryBuilder}; + use crate::http::responses; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::server::{ + start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, + start_ipv6_http_tracker, start_public_http_tracker, + }; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_a_mandatory_field_is_missing() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + // Without `info_hash` param + + let mut params = QueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + + // Without `peer_id` param + + let mut params = QueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + + // Without `port` param + + let mut params = QueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. + + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_port_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_left_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_event_param_is_invalid() { + // All invalid values are ignored as if the `event` param were empty + + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase + "Stopped", // It should be lowercase + "Completed", // It should be lowercase + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_compact_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![], + }, + ) + .await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_list_of_previously_announced_peers() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .query(), + ) + .await; + + // It should only contain teh previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, + ) + .await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); + + // Add a peer + http_tracker_server.add_torrent(&info_hash, &peer).await; + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&announce_query) + .await; + + assert_empty_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + + let http_tracker_server = + start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let http_tracker_server = start_http_tracker_with_external_ip( + &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + Version::Axum, + ) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let client = Client::new(http_tracker_server.get_connection_info()); + + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + + // todo: shouldn't be the the leftmost IP address? + // THe application is taken the the rightmost IP address. See function http::filters::peer_addr + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + } + } + + mod receiving_an_scrape_request { + + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + + use std::net::IpAddr; + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::requests::scrape::QueryBuilder; + use crate::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + // code-review: it's not returning the invalid info hash error + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_accept_multiple_infohashes() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let http_tracker = start_ipv6_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + } + } + + mod configured_as_whitelisted { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_whitelisted_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_allow_announcing_a_whitelisted_torrent() { + let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker_server + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + } + } + + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_whitelisted_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + http_tracker + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } + } + + mod configured_as_private { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + + use crate::http::asserts::{ + assert_invalid_authentication_key_error_response, assert_is_announce_response, + assert_peer_not_authenticated_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_private_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_respond_to_authenticated_peers() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let key = http_tracker_server + .tracker + .generate_auth_key(Duration::from_secs(60)) + .await + .unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + .announce(&QueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_peer_not_authenticated_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_peer_authentication_key_is_not_valid() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + // The tracker does not have this key + let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + .announce(&QueryBuilder::default().query()) + .await; + + assert_invalid_authentication_key_error_response(response).await; + } + } + + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_private_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let http_tracker = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let http_tracker = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + + let http_tracker = start_private_http_tracker(Version::Axum).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); From 7dc48387920510427f63e964e4dbf29b56d3cf87 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Feb 2023 10:08:28 +0000 Subject: [PATCH 0370/1003] refactor(http): [#160] extract functions for percent decoding --- src/http/filters.rs | 22 +++--- src/http/mod.rs | 1 + src/http/percent_encoding.rs | 66 ++++++++++++++++ src/protocol/info_hash.rs | 71 +++++++++++++++++ src/tracker/peer.rs | 134 ++++++++++++++++++++++++++++++++ tests/http/bencode.rs | 15 ---- tests/http/mod.rs | 23 +++++- tests/http/requests/announce.rs | 7 +- tests/http/requests/scrape.rs | 5 +- tests/http/responses/scrape.rs | 2 +- tests/http_tracker.rs | 67 +++------------- 11 files changed, 318 insertions(+), 95 deletions(-) create mode 100644 src/http/percent_encoding.rs delete mode 100644 tests/http/bencode.rs diff --git a/src/http/filters.rs b/src/http/filters.rs index 2760c995c..e02eac523 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; +use super::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use super::{request, WebResult}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; @@ -78,9 +79,11 @@ fn info_hashes(raw_query: &String) -> WebResult> { for v in split_raw_query { if v.contains("info_hash") { + // get raw percent encoded infohash let raw_info_hash = v.split('=').collect::>()[1]; - let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); + + let info_hash = percent_decode_info_hash(raw_info_hash); + if let Ok(ih) = info_hash { info_hashes.push(ih); } @@ -112,24 +115,17 @@ fn peer_id(raw_query: &String) -> WebResult { for v in split_raw_query { // look for the peer_id param if v.contains("peer_id") { - // get raw percent_encoded peer_id + // get raw percent encoded peer id let raw_peer_id = v.split('=').collect::>()[1]; - // decode peer_id - let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); - - // peer_id must be 20 bytes - if peer_id_bytes.len() != 20 { + if let Ok(id) = percent_decode_peer_id(raw_peer_id) { + peer_id = Some(id); + } else { return Err(reject::custom(Error::InvalidPeerId { location: Location::caller(), })); } - // clone peer_id_bytes into fixed length array - let mut byte_arr: [u8; 20] = Default::default(); - byte_arr.clone_from_slice(peer_id_bytes.as_slice()); - - peer_id = Some(peer::Id(byte_arr)); break; } } diff --git a/src/http/mod.rs b/src/http/mod.rs index 9cd21aab5..15f7abb52 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -15,6 +15,7 @@ pub mod axum; pub mod error; pub mod filters; pub mod handlers; +pub mod percent_encoding; pub mod request; pub mod response; pub mod routes; diff --git a/src/http/percent_encoding.rs b/src/http/percent_encoding.rs new file mode 100644 index 000000000..9b5b79ed7 --- /dev/null +++ b/src/http/percent_encoding.rs @@ -0,0 +1,66 @@ +use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::tracker::peer::{self, IdConversionError}; + +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid `InfoHash`. +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); + InfoHash::try_from(bytes) +} + +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid `peer::Id`. +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); + peer::Id::try_from(bytes) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn it_should_decode_a_percent_encoded_info_hash() { + let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + + let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() + ); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_info_hash() { + let invalid_encoded_infohash = "invalid percent-encoded infohash"; + + let info_hash = percent_decode_info_hash(invalid_encoded_infohash); + + assert!(info_hash.is_err()); + } + + #[test] + fn it_should_decode_a_percent_encoded_peer_id() { + let encoded_peer_id = "%2DqB00000000000000000"; + + let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); + + assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_peer_id() { + let invalid_encoded_peer_id = "invalid percent-encoded peer id"; + + let peer_id = percent_decode_peer_id(invalid_encoded_peer_id); + + assert!(peer_id.is_err()); + } +} diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 83a595c1f..320636725 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -1,7 +1,24 @@ +use std::panic::Location; + +use thiserror::Error; + #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); +const INFO_HASH_BYTES_LEN: usize = 20; + impl InfoHash { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + /// For readability, when accessing the bytes array #[must_use] pub fn bytes(&self) -> [u8; 20] { @@ -57,6 +74,40 @@ impl std::convert::From<[u8; 20]> for InfoHash { } } +#[derive(Error, Debug)] +pub enum ConversionError { + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + impl serde::ser::Serialize for InfoHash { fn serialize(&self, serializer: S) -> Result { let mut buffer = [0u8; 40]; @@ -166,6 +217,26 @@ mod tests { ); } + #[test] + fn an_info_hash_can_be_created_from_a_byte_vector() { + let info_hash: InfoHash = [255u8; 20].to_vec().try_into().unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_less_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 19].to_vec()).is_err()); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_more_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 21].to_vec()).is_err()); + } + #[test] fn an_info_hash_can_be_serialized() { let s = ContainingInfoHash { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 3f639f970..16c96e04b 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,8 +1,10 @@ use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::Serialize; +use thiserror::Error; use crate::http::request::Announce; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; @@ -91,6 +93,69 @@ impl Peer { #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] pub struct Id(pub [u8; 20]); +const PEER_ID_BYTES_LEN: usize = 20; + +#[derive(Error, Debug)] +pub enum IdConversionError { + #[error("not enough bytes for peer id: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for peer id: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl Id { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), PEER_ID_BYTES_LEN); + let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } +} + +impl From<[u8; 20]> for Id { + fn from(bytes: [u8; 20]) -> Self { + Id(bytes) + } +} + +impl TryFrom> for Id { + type Error = IdConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < PEER_ID_BYTES_LEN { + return Err(IdConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + if bytes.len() > PEER_ID_BYTES_LEN { + return Err(IdConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl std::str::FromStr for Id { + type Err = IdConversionError; + + fn from_str(s: &str) -> Result { + Self::try_from(s.as_bytes().to_vec()) + } +} + impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.to_hex_string() { @@ -239,6 +304,75 @@ mod test { mod torrent_peer_id { use crate::tracker::peer; + #[test] + fn should_be_instantiated_from_a_byte_slice() { + let id = peer::Id::from_bytes(&[ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_instantiate_from_a_byte_slice_with_less_than_20_bytes() { + let less_than_20_bytes = [0; 19]; + let _ = peer::Id::from_bytes(&less_than_20_bytes); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_instantiate_from_a_byte_slice_with_more_than_20_bytes() { + let more_than_20_bytes = [0; 21]; + let _ = peer::Id::from_bytes(&more_than_20_bytes); + } + + #[test] + fn should_be_converted_from_a_20_byte_array() { + let id = peer::Id::from([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + fn should_be_converted_from_a_byte_vector() { + let id = peer::Id::try_from( + [ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ] + .to_vec(), + ) + .unwrap(); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_convert_from_a_byte_vector_with_less_than_20_bytes() { + let _ = peer::Id::try_from([0; 19].to_vec()).unwrap(); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_convert_from_a_byte_vector_with_more_than_20_bytes() { + let _ = peer::Id::try_from([0; 21].to_vec()).unwrap(); + } + #[test] fn should_be_converted_to_hex_string() { let id = peer::Id(*b"-qB00000000000000000"); diff --git a/tests/http/bencode.rs b/tests/http/bencode.rs deleted file mode 100644 index d107089cf..000000000 --- a/tests/http/bencode.rs +++ /dev/null @@ -1,15 +0,0 @@ -pub type ByteArray20 = [u8; 20]; - -pub struct InfoHash(ByteArray20); - -impl InfoHash { - pub fn new(vec: &[u8]) -> Self { - let mut byte_array_20: ByteArray20 = Default::default(); - byte_array_20.clone_from_slice(vec); - Self(byte_array_20) - } - - pub fn bytes(&self) -> ByteArray20 { - self.0 - } -} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 87087026f..8c1e3c995 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,7 +1,28 @@ pub mod asserts; -pub mod bencode; pub mod client; pub mod connection_info; pub mod requests; pub mod responses; pub mod server; + +use percent_encoding::NON_ALPHANUMERIC; + +pub type ByteArray20 = [u8; 20]; + +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/tests/http/requests/announce.rs b/tests/http/requests/announce.rs index a8ebc95f8..87aa3425f 100644 --- a/tests/http/requests/announce.rs +++ b/tests/http/requests/announce.rs @@ -2,12 +2,11 @@ use std::fmt; use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; -use percent_encoding::NON_ALPHANUMERIC; use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; -use crate::http::bencode::ByteArray20; +use crate::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: ByteArray20, @@ -211,11 +210,11 @@ impl QueryParams { let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); Self { - info_hash: Some(percent_encoding::percent_encode(&announce_query.info_hash, NON_ALPHANUMERIC).to_string()), + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), peer_addr: Some(announce_query.peer_addr.to_string()), downloaded: Some(announce_query.downloaded.to_string()), uploaded: Some(announce_query.uploaded.to_string()), - peer_id: Some(percent_encoding::percent_encode(&announce_query.peer_id, NON_ALPHANUMERIC).to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), port: Some(announce_query.port.to_string()), left: Some(announce_query.left.to_string()), event, diff --git a/tests/http/requests/scrape.rs b/tests/http/requests/scrape.rs index 6ab46974b..979dad540 100644 --- a/tests/http/requests/scrape.rs +++ b/tests/http/requests/scrape.rs @@ -1,10 +1,9 @@ use std::fmt; use std::str::FromStr; -use percent_encoding::NON_ALPHANUMERIC; use torrust_tracker::protocol::info_hash::InfoHash; -use crate::http::bencode::ByteArray20; +use crate::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: Vec, @@ -111,7 +110,7 @@ impl QueryParams { let info_hashes = scrape_query .info_hash .iter() - .map(|info_hash_bytes| percent_encoding::percent_encode(info_hash_bytes, NON_ALPHANUMERIC).to_string()) + .map(percent_encode_byte_array) .collect::>(); Self { info_hash: info_hashes } diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs index 5bf938ebe..1aea517cf 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/http/responses/scrape.rs @@ -4,7 +4,7 @@ use std::str; use serde::{self, Deserialize, Serialize}; use serde_bencode::value::Value; -use crate::http::bencode::{ByteArray20, InfoHash}; +use crate::http::{ByteArray20, InfoHash}; #[derive(Debug, PartialEq, Default)] pub struct Response { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 201f8e705..60219d9fe 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1,6 +1,14 @@ /// Integration tests for HTTP tracker server /// -/// cargo test `http_tracker_server` -- --nocapture +/// Warp version: +/// ```text +/// cargo test `warp_http_tracker_server` -- --nocapture +/// ``` +/// +/// Axum version ()WIP): +/// ```text +/// cargo test `warp_http_tracker_server` -- --nocapture +/// ``` mod common; mod http; @@ -2483,60 +2491,3 @@ mod axum_http_tracker_server { mod receiving_an_scrape_request {} } } - -mod percent_encoding { - // todo: these operations are used in the HTTP tracker but they have not been extracted into independent functions. - // These tests document the operations. This behavior could be move to some functions int he future if they are extracted. - - use std::str::FromStr; - - use percent_encoding::NON_ALPHANUMERIC; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - - #[test] - fn how_to_encode_an_info_hash() { - let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - - let encoded_info_hash = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); - - assert_eq!(encoded_info_hash, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); - } - - #[test] - fn how_to_decode_an_info_hash() { - let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; - - let info_hash_bytes = percent_encoding::percent_decode_str(encoded_infohash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)).unwrap(); - - assert_eq!( - info_hash, - InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() - ); - } - - #[test] - fn how_to_encode_a_peer_id() { - let peer_id = peer::Id(*b"-qB00000000000000000"); - - let encoded_peer_id = percent_encoding::percent_encode(&peer_id.0, NON_ALPHANUMERIC).to_string(); - - assert_eq!(encoded_peer_id, "%2DqB00000000000000000"); - } - - #[test] - fn how_to_decode_a_peer_id() { - let encoded_peer_id = "%2DqB00000000000000000"; - - let bytes_vec = percent_encoding::percent_decode_str(encoded_peer_id).collect::>(); - - // Clone peer_id_bytes into fixed length array - let mut peer_id_bytes: [u8; 20] = Default::default(); - peer_id_bytes.clone_from_slice(bytes_vec.as_slice()); - - let peer_id = peer::Id(peer_id_bytes); - - assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); - } -} From 9c25febf41652f23fc21144369e1e8b0b0f1d40a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Feb 2023 09:33:32 +0000 Subject: [PATCH 0371/1003] feat(http): [#160] Axum extractor to extract announce request params from url query WIP: only for mandatory params. --- cSpell.json | 1 + src/http/axum/extractors.rs | 159 ++++++++++++++++++++++++++++++++++++ src/http/axum/handlers.rs | 16 ++++ src/http/axum/mod.rs | 2 + src/http/axum/query.rs | 138 +++++++++++++++++++++++++++++++ src/http/axum/routes.rs | 6 +- src/tracker/peer.rs | 11 +++ 7 files changed, 331 insertions(+), 2 deletions(-) create mode 100644 src/http/axum/extractors.rs create mode 100644 src/http/axum/query.rs diff --git a/cSpell.json b/cSpell.json index 9f10d99e4..a451d18dc 100644 --- a/cSpell.json +++ b/cSpell.json @@ -73,6 +73,7 @@ "uroot", "Vagaa", "Vuze", + "whitespaces", "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", diff --git a/src/http/axum/extractors.rs b/src/http/axum/extractors.rs new file mode 100644 index 000000000..260c3e705 --- /dev/null +++ b/src/http/axum/extractors.rs @@ -0,0 +1,159 @@ +use std::panic::Location; +use std::str::FromStr; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::http::StatusCode; +use thiserror::Error; + +use super::query::Query; +use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::tracker::peer::{self, IdConversionError}; + +pub struct ExtractAnnounceParams(pub AnnounceParams); + +#[derive(Debug, PartialEq)] +pub struct AnnounceParams { + pub info_hash: InfoHash, + pub peer_id: peer::Id, + pub port: u16, +} + +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + #[error("missing infohash {location}")] + MissingInfoHash { location: &'static Location<'static> }, + #[error("invalid infohash {location}")] + InvalidInfoHash { location: &'static Location<'static> }, + #[error("missing peer id {location}")] + MissingPeerId { location: &'static Location<'static> }, + #[error("invalid peer id {location}")] + InvalidPeerId { location: &'static Location<'static> }, + #[error("missing port {location}")] + MissingPort { location: &'static Location<'static> }, + #[error("invalid port {location}")] + InvalidPort { location: &'static Location<'static> }, +} + +impl From for ParseAnnounceQueryError { + #[track_caller] + fn from(_err: IdConversionError) -> Self { + Self::InvalidPeerId { + location: Location::caller(), + } + } +} + +impl From for ParseAnnounceQueryError { + #[track_caller] + fn from(_err: ConversionError) -> Self { + Self::InvalidPeerId { + location: Location::caller(), + } + } +} + +impl TryFrom for AnnounceParams { + type Error = ParseAnnounceQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hash: extract_info_hash(&query)?, + peer_id: extract_peer_id(&query)?, + port: extract_port(&query)?, + }) + } +} + +fn extract_info_hash(query: &Query) -> Result { + match query.get_param("info_hash") { + Some(raw_info_hash) => Ok(percent_decode_info_hash(&raw_info_hash)?), + None => { + return Err(ParseAnnounceQueryError::MissingInfoHash { + location: Location::caller(), + }) + } + } +} + +fn extract_peer_id(query: &Query) -> Result { + match query.get_param("peer_id") { + Some(raw_peer_id) => Ok(percent_decode_peer_id(&raw_peer_id)?), + None => { + return Err(ParseAnnounceQueryError::MissingPeerId { + location: Location::caller(), + }) + } + } +} + +fn extract_port(query: &Query) -> Result { + match query.get_param("port") { + Some(raw_port) => Ok(u16::from_str(&raw_port).map_err(|_e| ParseAnnounceQueryError::InvalidPort { + location: Location::caller(), + })?), + None => { + return Err(ParseAnnounceQueryError::MissingPort { + location: Location::caller(), + }) + } + } +} + +#[async_trait] +impl FromRequestParts for ExtractAnnounceParams +where + S: Send + Sync, +{ + type Rejection = (StatusCode, &'static str); + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let raw_query = parts.uri.query(); + + if raw_query.is_none() { + return Err((StatusCode::BAD_REQUEST, "missing query params")); + } + + let query = raw_query.unwrap().parse::(); + + if query.is_err() { + return Err((StatusCode::BAD_REQUEST, "can't parse query params")); + } + + let announce_params = AnnounceParams::try_from(query.unwrap()); + + if announce_params.is_err() { + return Err((StatusCode::BAD_REQUEST, "can't parse query params for announce request")); + } + + Ok(ExtractAnnounceParams(announce_params.unwrap())) + } +} + +#[cfg(test)] +mod tests { + use super::AnnounceParams; + use crate::http::axum::query::Query; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn announce_request_params_should_be_extracted_from_url_query_params() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + + let query = raw_query.parse::().unwrap(); + + let announce_params = AnnounceParams::try_from(query).unwrap(); + + assert_eq!( + announce_params, + AnnounceParams { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + } + ); + } +} diff --git a/src/http/axum/handlers.rs b/src/http/axum/handlers.rs index b2f20786b..050fa8e69 100644 --- a/src/http/axum/handlers.rs +++ b/src/http/axum/handlers.rs @@ -1,9 +1,25 @@ +use std::sync::Arc; + +use axum::extract::State; use axum::response::Json; +use super::extractors::ExtractAnnounceParams; use super::resources::ok::Ok; use super::responses::ok_response; +use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn get_status_handler() -> Json { ok_response() } + +/// # Panics +/// +/// todo +#[allow(clippy::unused_async)] +pub async fn announce_handler( + State(_tracker): State>, + ExtractAnnounceParams(_announce_params): ExtractAnnounceParams, +) -> Json { + todo!() +} diff --git a/src/http/axum/mod.rs b/src/http/axum/mod.rs index 57773d810..9d96362df 100644 --- a/src/http/axum/mod.rs +++ b/src/http/axum/mod.rs @@ -1,4 +1,6 @@ +pub mod extractors; pub mod handlers; +pub mod query; pub mod resources; pub mod responses; pub mod routes; diff --git a/src/http/axum/query.rs b/src/http/axum/query.rs new file mode 100644 index 000000000..5037d5d0e --- /dev/null +++ b/src/http/axum/query.rs @@ -0,0 +1,138 @@ +use std::collections::HashMap; +use std::panic::Location; +use std::str::FromStr; + +use thiserror::Error; +pub struct Query { + params: HashMap, +} + +#[derive(Error, Debug)] +pub enum ParseQueryError { + #[error("invalid param {raw_param} in {location}")] + InvalidParam { + location: &'static Location<'static>, + raw_param: String, + }, +} + +impl FromStr for Query { + type Err = ParseQueryError; + + fn from_str(raw_query: &str) -> Result { + let mut params: HashMap = HashMap::new(); + + let raw_params = raw_query.trim().trim_start_matches('?').split('&').collect::>(); + + for raw_param in raw_params { + let param: Param = raw_param.parse()?; + params.insert(param.name, param.value); + } + + Ok(Self { params }) + } +} + +#[derive(Debug, PartialEq)] +struct Param { + name: String, + value: String, +} + +impl FromStr for Param { + type Err = ParseQueryError; + + fn from_str(raw_param: &str) -> Result { + let pair = raw_param.split('=').collect::>(); + + if pair.len() > 2 { + return Err(ParseQueryError::InvalidParam { + location: Location::caller(), + raw_param: raw_param.to_owned(), + }); + } + + Ok(Self { + name: pair[0].to_owned(), + value: pair[1].to_owned(), + }) + } +} + +impl Query { + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(std::string::ToString::to_string) + } +} + +#[cfg(test)] +mod tests { + use super::Query; + use crate::http::axum::query::Param; + + #[test] + fn it_should_parse_the_query_params_from_an_url_query_string() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!( + query.get_param("info_hash").unwrap(), + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + ); + assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); + assert_eq!(query.get_param("port").unwrap(), "17548"); + } + + #[test] + fn it_should_fail_parsing_an_invalid_query_string() { + let invalid_raw_query = "name=value=value"; + + let query = invalid_raw_query.parse::(); + + assert!(query.is_err()); + } + + #[test] + fn it_should_ignore_the_preceding_question_mark_if_it_exists() { + let raw_query = "?name=value"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name").unwrap(), "value"); + } + + #[test] + fn it_should_trim_whitespaces() { + let raw_query = " name=value "; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name").unwrap(), "value"); + } + + #[test] + fn it_should_parse_a_single_query_param() { + let raw_param = "name=value"; + + let param = raw_param.parse::().unwrap(); + + assert_eq!( + param, + Param { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } + + #[test] + fn it_should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; + + let query = invalid_raw_param.parse::(); + + assert!(query.is_err()); + } +} diff --git a/src/http/axum/routes.rs b/src/http/axum/routes.rs index 9ab58938f..8e4980682 100644 --- a/src/http/axum/routes.rs +++ b/src/http/axum/routes.rs @@ -3,11 +3,13 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; -use super::handlers::get_status_handler; +use super::handlers::{announce_handler, get_status_handler}; use crate::tracker::Tracker; -pub fn router(_tracker: &Arc) -> Router { +pub fn router(tracker: &Arc) -> Router { Router::new() // Status .route("/status", get(get_status_handler)) + // Announce request + .route("/announce", get(announce_handler).with_state(tracker.clone())) } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 16c96e04b..c132d1e2c 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -331,6 +331,17 @@ mod test { let _ = peer::Id::from_bytes(&more_than_20_bytes); } + #[test] + fn should_be_instantiated_from_a_string() { + let id = "-qB00000000000000001".parse::().unwrap(); + + let expected_id = peer::Id([ + 45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, + ]); + + assert_eq!(id, expected_id); + } + #[test] fn should_be_converted_from_a_20_byte_array() { let id = peer::Id::from([ From 995397eae4a4b96820f52bcd203b2e0bd67be745 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Feb 2023 12:56:42 +0000 Subject: [PATCH 0372/1003] refactor(http): [#160] reorganize dirs for Axum and Warp HTTP tracker implementations We are going to start sharing code bettween both implementation (Warp and Axum). We need to keep common code separated because Warp implementation will be removed when Axum implementation is finished. --- .../{axum => axum_implementation}/extractors.rs | 2 +- src/http/{axum => axum_implementation}/handlers.rs | 0 src/http/{axum => axum_implementation}/mod.rs | 0 src/http/{axum => axum_implementation}/query.rs | 2 +- .../{axum => axum_implementation}/resources/mod.rs | 0 .../{axum => axum_implementation}/resources/ok.rs | 0 .../{axum => axum_implementation}/responses.rs | 0 src/http/{axum => axum_implementation}/routes.rs | 0 src/http/{axum => axum_implementation}/server.rs | 0 src/http/mod.rs | 14 +++----------- src/http/{ => warp_implementation}/error.rs | 0 src/http/{ => warp_implementation}/filters.rs | 2 +- src/http/{ => warp_implementation}/handlers.rs | 0 src/http/warp_implementation/mod.rs | 12 ++++++++++++ src/http/{ => warp_implementation}/request.rs | 2 +- src/http/{ => warp_implementation}/response.rs | 0 src/http/{ => warp_implementation}/routes.rs | 0 src/http/{ => warp_implementation}/server.rs | 0 src/jobs/http_tracker.rs | 4 ++-- src/tracker/peer.rs | 4 ++-- 20 files changed, 23 insertions(+), 19 deletions(-) rename src/http/{axum => axum_implementation}/extractors.rs (98%) rename src/http/{axum => axum_implementation}/handlers.rs (100%) rename src/http/{axum => axum_implementation}/mod.rs (100%) rename src/http/{axum => axum_implementation}/query.rs (98%) rename src/http/{axum => axum_implementation}/resources/mod.rs (100%) rename src/http/{axum => axum_implementation}/resources/ok.rs (100%) rename src/http/{axum => axum_implementation}/responses.rs (100%) rename src/http/{axum => axum_implementation}/routes.rs (100%) rename src/http/{axum => axum_implementation}/server.rs (100%) rename src/http/{ => warp_implementation}/error.rs (100%) rename src/http/{ => warp_implementation}/filters.rs (98%) rename src/http/{ => warp_implementation}/handlers.rs (100%) create mode 100644 src/http/warp_implementation/mod.rs rename src/http/{ => warp_implementation}/request.rs (94%) rename src/http/{ => warp_implementation}/response.rs (100%) rename src/http/{ => warp_implementation}/routes.rs (100%) rename src/http/{ => warp_implementation}/server.rs (100%) diff --git a/src/http/axum/extractors.rs b/src/http/axum_implementation/extractors.rs similarity index 98% rename from src/http/axum/extractors.rs rename to src/http/axum_implementation/extractors.rs index 260c3e705..a1f3fad1e 100644 --- a/src/http/axum/extractors.rs +++ b/src/http/axum_implementation/extractors.rs @@ -135,7 +135,7 @@ where #[cfg(test)] mod tests { use super::AnnounceParams; - use crate::http::axum::query::Query; + use crate::http::axum_implementation::query::Query; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/http/axum/handlers.rs b/src/http/axum_implementation/handlers.rs similarity index 100% rename from src/http/axum/handlers.rs rename to src/http/axum_implementation/handlers.rs diff --git a/src/http/axum/mod.rs b/src/http/axum_implementation/mod.rs similarity index 100% rename from src/http/axum/mod.rs rename to src/http/axum_implementation/mod.rs diff --git a/src/http/axum/query.rs b/src/http/axum_implementation/query.rs similarity index 98% rename from src/http/axum/query.rs rename to src/http/axum_implementation/query.rs index 5037d5d0e..c7c20b22d 100644 --- a/src/http/axum/query.rs +++ b/src/http/axum_implementation/query.rs @@ -69,7 +69,7 @@ impl Query { #[cfg(test)] mod tests { use super::Query; - use crate::http::axum::query::Param; + use crate::http::axum_implementation::query::Param; #[test] fn it_should_parse_the_query_params_from_an_url_query_string() { diff --git a/src/http/axum/resources/mod.rs b/src/http/axum_implementation/resources/mod.rs similarity index 100% rename from src/http/axum/resources/mod.rs rename to src/http/axum_implementation/resources/mod.rs diff --git a/src/http/axum/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs similarity index 100% rename from src/http/axum/resources/ok.rs rename to src/http/axum_implementation/resources/ok.rs diff --git a/src/http/axum/responses.rs b/src/http/axum_implementation/responses.rs similarity index 100% rename from src/http/axum/responses.rs rename to src/http/axum_implementation/responses.rs diff --git a/src/http/axum/routes.rs b/src/http/axum_implementation/routes.rs similarity index 100% rename from src/http/axum/routes.rs rename to src/http/axum_implementation/routes.rs diff --git a/src/http/axum/server.rs b/src/http/axum_implementation/server.rs similarity index 100% rename from src/http/axum/server.rs rename to src/http/axum_implementation/server.rs diff --git a/src/http/mod.rs b/src/http/mod.rs index 15f7abb52..039a2067b 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -11,18 +11,10 @@ //! use serde::{Deserialize, Serialize}; -pub mod axum; -pub mod error; -pub mod filters; -pub mod handlers; -pub mod percent_encoding; -pub mod request; -pub mod response; -pub mod routes; -pub mod server; -pub type Bytes = u64; -pub type WebResult = std::result::Result; +pub mod axum_implementation; +pub mod percent_encoding; +pub mod warp_implementation; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { diff --git a/src/http/error.rs b/src/http/warp_implementation/error.rs similarity index 100% rename from src/http/error.rs rename to src/http/warp_implementation/error.rs diff --git a/src/http/filters.rs b/src/http/warp_implementation/filters.rs similarity index 98% rename from src/http/filters.rs rename to src/http/warp_implementation/filters.rs index e02eac523..176170330 100644 --- a/src/http/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -7,8 +7,8 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; -use super::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use super::{request, WebResult}; +use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer}; diff --git a/src/http/handlers.rs b/src/http/warp_implementation/handlers.rs similarity index 100% rename from src/http/handlers.rs rename to src/http/warp_implementation/handlers.rs diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs new file mode 100644 index 000000000..4fbfb48fb --- /dev/null +++ b/src/http/warp_implementation/mod.rs @@ -0,0 +1,12 @@ +use warp::Rejection; + +pub mod error; +pub mod filters; +pub mod handlers; +pub mod request; +pub mod response; +pub mod routes; +pub mod server; + +pub type Bytes = u64; +pub type WebResult = std::result::Result; diff --git a/src/http/request.rs b/src/http/warp_implementation/request.rs similarity index 94% rename from src/http/request.rs rename to src/http/warp_implementation/request.rs index bc549b698..f666b48c5 100644 --- a/src/http/request.rs +++ b/src/http/warp_implementation/request.rs @@ -2,7 +2,7 @@ use std::net::IpAddr; use serde::Deserialize; -use crate::http::Bytes; +use crate::http::warp_implementation::Bytes; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/http/response.rs b/src/http/warp_implementation/response.rs similarity index 100% rename from src/http/response.rs rename to src/http/warp_implementation/response.rs diff --git a/src/http/routes.rs b/src/http/warp_implementation/routes.rs similarity index 100% rename from src/http/routes.rs rename to src/http/warp_implementation/routes.rs diff --git a/src/http/server.rs b/src/http/warp_implementation/server.rs similarity index 100% rename from src/http/server.rs rename to src/http/warp_implementation/server.rs diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 6b069301d..aa96af884 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -7,8 +7,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::config::HttpTracker; -use crate::http::axum::server; -use crate::http::server::Http; +use crate::http::axum_implementation::server; +use crate::http::warp_implementation::server::Http; use crate::http::Version; use crate::tracker; diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index c132d1e2c..04e4cdb45 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -6,7 +6,7 @@ use serde; use serde::Serialize; use thiserror::Error; -use crate::http::request::Announce; +use crate::http::warp_implementation::request::Announce; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; @@ -599,7 +599,7 @@ mod test { mod torrent_peer_constructor_from_for_http_requests { use std::net::{IpAddr, Ipv4Addr}; - use crate::http::request::Announce; + use crate::http::warp_implementation::request::Announce; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::{self, Peer}; From 4b8fbfbbc9dc33e72c7e592dbd74d3f1f206e36d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 14:23:17 +0000 Subject: [PATCH 0373/1003] refactor: the tracker is responsible for assigning the IP to peers --- src/http/warp_implementation/handlers.rs | 10 +- src/tracker/mod.rs | 113 +++++++++++++++++- src/tracker/peer.rs | 144 ++++------------------- src/udp/handlers.rs | 14 +-- 4 files changed, 151 insertions(+), 130 deletions(-) diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 229cb4587..0fd332cae 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; use std::convert::Infallible; -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; @@ -41,11 +41,15 @@ pub async fn handle_announce( auth_key: Option, tracker: Arc, ) -> WebResult { + debug!("http announce request: {:#?}", announce_request); + authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await?; - debug!("{:?}", announce_request); + // build the peer + let peer_ip = tracker.assign_ip_address_to_peer(&announce_request.peer_addr); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); + let peer = peer::Peer::from_http_announce_request(&announce_request, &peer_socket_address); - let peer = peer::Peer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) .await; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index acbf7d536..f31a71fbb 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -8,7 +8,7 @@ pub mod torrent; use std::collections::btree_map::Entry; use std::collections::BTreeMap; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; use std::time::Duration; @@ -76,6 +76,12 @@ impl Tracker { self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } + /// It assigns a socket address to the peer + #[must_use] + pub fn assign_ip_address_to_peer(&self, remote_client_ip: &IpAddr) -> IpAddr { + assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip()) + } + /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -378,6 +384,15 @@ impl Tracker { } } +#[must_use] +pub fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { + if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { + host_ip + } else { + *remote_client_ip + } +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -424,4 +439,100 @@ mod tests { } ); } + + mod the_tracker_assigning_the_ip_to_the_peer { + + use std::net::{IpAddr, Ipv4Addr}; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn should_use_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + mod when_the_client_ip_is_a_ipv4_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip() + { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + + mod when_client_ip_is_a_ipv6_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip() + { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 04e4cdb45..e824a0cbc 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,4 +1,4 @@ -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::panic::Location; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; @@ -29,16 +29,10 @@ pub struct Peer { impl Peer { #[must_use] - pub fn from_udp_announce_request( - announce_request: &aquatic_udp_protocol::AnnounceRequest, - remote_ip: IpAddr, - host_opt_ip: Option, - ) -> Self { - let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - + pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_addr: &SocketAddr) -> Self { Peer { peer_id: Id(announce_request.peer_id.0), - peer_addr, + peer_addr: *peer_addr, updated: Current::now(), uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, @@ -48,9 +42,7 @@ impl Peer { } #[must_use] - pub fn from_http_announce_request(announce_request: &Announce, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - + pub fn from_http_announce_request(announce_request: &Announce, peer_addr: &SocketAddr) -> Self { let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { "started" => AnnounceEvent::Started, @@ -65,7 +57,7 @@ impl Peer { #[allow(clippy::cast_possible_truncation)] Peer { peer_id: announce_request.peer_id, - peer_addr, + peer_addr: *peer_addr, updated: Current::now(), uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), @@ -74,16 +66,6 @@ impl Peer { } } - // potentially substitute localhost ip with external ip - #[must_use] - pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if let Some(host_ip) = host_opt_ip.filter(|_| remote_ip.is_loopback()) { - SocketAddr::new(host_ip, port) - } else { - SocketAddr::new(remote_ip, port) - } - } - #[must_use] pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped @@ -446,6 +428,7 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; + use crate::tracker::assign_ip_address_to_peer; use crate::tracker::peer::Peer; use crate::udp::connection_cookie::{into_connection_id, make}; @@ -498,7 +481,10 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); + + let torrent_peer = Peer::from_udp_announce_request(&announce_request, &peer_socket_address); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -508,99 +494,21 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - - mod when_source_udp_ip_is_a_ipv_4_loopback_ip { - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::str::FromStr; - - use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::tracker::peer::Peer; - - #[test] - fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - - #[test] - fn it_should_use_the_external_host_ip_in_tracker_configuration_if_defined() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); - } - - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_defined_even_if_the_external_ip_is_an_ipv6_ip() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); - let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, &peer_socket_address); - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); - } - } - - mod when_source_udp_ip_is_a_ipv6_loopback_ip { - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::str::FromStr; - - use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::tracker::peer::Peer; - - #[test] - fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - - #[test] - fn it_should_use_the_external_host_ip_in_tracker_configuration_if_defined() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); - } - - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_defined_even_if_the_external_ip_is_an_ipv4_ip() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); - } + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } } mod torrent_peer_constructor_from_for_http_requests { - use std::net::{IpAddr, Ipv4Addr}; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use crate::http::warp_implementation::request::Announce; use crate::protocol::info_hash::InfoHash; + use crate::tracker::assign_ip_address_to_peer; use crate::tracker::peer::{self, Peer}; fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { @@ -618,13 +526,16 @@ mod test { } #[test] - fn it_should_use_the_source_ip_in_the_udp_heder_as_the_peer_ip_address_ignoring_the_peer_ip_in_the_announce_request() { + fn it_should_use_the_source_ip_in_the_udp_header_as_the_peer_ip_address_ignoring_the_peer_ip_in_the_announce_request() { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); - let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); + + let torrent_peer = Peer::from_http_announce_request(&announce_request, &peer_socket_address); assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); @@ -639,18 +550,13 @@ mod test { let announce_request = sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); - let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); + + let torrent_peer = Peer::from_http_announce_request(&announce_request, &peer_socket_address); assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); assert_ne!(torrent_peer.peer_addr.port(), remote_port); } - - // todo: other cases are already covered by UDP cases. - // Code review: - // We should extract the method "peer_addr_from_ip_and_port_and_opt_host_ip" from TorrentPeer. - // It could be another service responsible for assigning the IP to the peer. - // So we can test that behavior independently from where you use it. - // We could also build the peer with the IP in the announce request and let the tracker decide - // wether it has to change it or not depending on tracker configuration. } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index b36399f89..b6d4bed7b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -6,6 +6,7 @@ use aquatic_udp_protocol::{ AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; +use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; @@ -93,6 +94,8 @@ pub async fn handle_announce( announce_request: &AnnounceRequest, tracker: Arc, ) -> Result { + debug!("udp announce request: {:#?}", announce_request); + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; let wrapped_announce_request = AnnounceWrapper::new(announce_request); @@ -104,13 +107,10 @@ pub async fn handle_announce( source: (Arc::new(e) as Arc).into(), })?; - let peer = peer::Peer::from_udp_announce_request( - &wrapped_announce_request.announce_request, - remote_addr.ip(), - tracker.config.get_ext_ip(), - ); - - //let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; + // build the peer + let peer_ip = tracker.assign_ip_address_to_peer(&remote_addr.ip()); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); + let peer = peer::Peer::from_udp_announce_request(&wrapped_announce_request.announce_request, &peer_socket_address); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer) From 05ea74177dd033aed5704131869b8ae60a223432 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 16:38:46 +0000 Subject: [PATCH 0374/1003] refactor: move code from domain to delivery layer --- src/http/warp_implementation/handlers.rs | 19 +- src/http/warp_implementation/mod.rs | 1 + src/http/warp_implementation/peer_builder.rs | 32 ++++ src/tracker/peer.rs | 181 +------------------ src/udp/handlers.rs | 9 +- src/udp/mod.rs | 1 + src/udp/peer_builder.rs | 18 ++ src/udp/request.rs | 15 -- 8 files changed, 63 insertions(+), 213 deletions(-) create mode 100644 src/http/warp_implementation/peer_builder.rs create mode 100644 src/udp/peer_builder.rs diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 0fd332cae..f914e7555 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; use std::convert::Infallible; -use std::net::{IpAddr, SocketAddr}; +use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; @@ -10,6 +10,7 @@ use warp::{reject, Rejection, Reply}; use super::error::Error; use super::{request, response, WebResult}; +use crate::http::warp_implementation::peer_builder; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer, statistics, torrent}; @@ -31,11 +32,9 @@ pub async fn authenticate( }) } -/// Handle announce request -/// /// # Errors /// -/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. +/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. pub async fn handle_announce( announce_request: request::Announce, auth_key: Option, @@ -45,10 +44,9 @@ pub async fn handle_announce( authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await?; - // build the peer let peer_ip = tracker.assign_ip_address_to_peer(&announce_request.peer_addr); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); - let peer = peer::Peer::from_http_announce_request(&announce_request, &peer_socket_address); + + let peer = peer_builder::from_request(&announce_request, &peer_ip); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) @@ -57,9 +55,6 @@ pub async fn handle_announce( // get all torrent peers excluding the peer_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - let announce_interval = tracker.config.announce_interval; - - // send stats event match announce_request.peer_addr { IpAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Tcp4Announce).await; @@ -73,13 +68,11 @@ pub async fn handle_announce( &announce_request, &torrent_stats, &peers, - announce_interval, + tracker.config.announce_interval, tracker.config.min_announce_interval, ) } -/// Handle scrape request -/// /// # Errors /// /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs index 4fbfb48fb..1dec73b29 100644 --- a/src/http/warp_implementation/mod.rs +++ b/src/http/warp_implementation/mod.rs @@ -3,6 +3,7 @@ use warp::Rejection; pub mod error; pub mod filters; pub mod handlers; +pub mod peer_builder; pub mod request; pub mod response; pub mod routes; diff --git a/src/http/warp_implementation/peer_builder.rs b/src/http/warp_implementation/peer_builder.rs new file mode 100644 index 000000000..70cf7b508 --- /dev/null +++ b/src/http/warp_implementation/peer_builder.rs @@ -0,0 +1,32 @@ +use std::net::{IpAddr, SocketAddr}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + +use super::request::Announce; +use crate::protocol::clock::{Current, Time}; +use crate::tracker::peer::Peer; + +#[must_use] +pub fn from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { + let event: AnnounceEvent = if let Some(event) = &announce_request.event { + match event.as_ref() { + "started" => AnnounceEvent::Started, + "stopped" => AnnounceEvent::Stopped, + "completed" => AnnounceEvent::Completed, + _ => AnnounceEvent::None, + } + } else { + AnnounceEvent::None + }; + + #[allow(clippy::cast_possible_truncation)] + Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port), + updated: Current::now(), + uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), + downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), + left: NumberOfBytes(i128::from(announce_request.left) as i64), + event, + } +} diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index e824a0cbc..24cc99f9b 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -6,8 +6,7 @@ use serde; use serde::Serialize; use thiserror::Error; -use crate::http::warp_implementation::request::Announce; -use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; +use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; @@ -28,44 +27,6 @@ pub struct Peer { } impl Peer { - #[must_use] - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_addr: &SocketAddr) -> Self { - Peer { - peer_id: Id(announce_request.peer_id.0), - peer_addr: *peer_addr, - updated: Current::now(), - uploaded: announce_request.bytes_uploaded, - downloaded: announce_request.bytes_downloaded, - left: announce_request.bytes_left, - event: announce_request.event, - } - } - - #[must_use] - pub fn from_http_announce_request(announce_request: &Announce, peer_addr: &SocketAddr) -> Self { - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None, - } - } else { - AnnounceEvent::None - }; - - #[allow(clippy::cast_possible_truncation)] - Peer { - peer_id: announce_request.peer_id, - peer_addr: *peer_addr, - updated: Current::now(), - uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), - downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), - left: NumberOfBytes(i128::from(announce_request.left) as i64), - event, - } - } - #[must_use] pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped @@ -419,144 +380,4 @@ mod test { ); } } - - mod torrent_peer_constructor_from_udp_requests { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, - }; - - use crate::tracker::assign_ip_address_to_peer; - use crate::tracker::peer::Peer; - use crate::udp::connection_cookie::{into_connection_id, make}; - - // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. - - fn sample_ipv4_remote_addr() -> SocketAddr { - sample_ipv4_socket_address() - } - - fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - } - - struct AnnounceRequestBuilder { - request: AnnounceRequest, - } - - impl AnnounceRequestBuilder { - pub fn default() -> AnnounceRequestBuilder { - let client_ip = Ipv4Addr::new(126, 0, 0, 1); - let client_port = 8080; - let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); - - let default_request = AnnounceRequest { - connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), - transaction_id: TransactionId(0i32), - info_hash: info_hash_aquatic, - peer_id: AquaticPeerId(*b"-qB00000000000000000"), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(client_ip), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client_port), - }; - AnnounceRequestBuilder { - request: default_request, - } - } - - pub fn into(self) -> AnnounceRequest { - self.request - } - } - - #[test] - fn it_should_use_the_udp_source_ip_as_the_peer_ip_address_instead_of_the_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - let announce_request = AnnounceRequestBuilder::default().into(); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); - - let torrent_peer = Peer::from_udp_announce_request(&announce_request, &peer_socket_address); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - - #[test] - fn it_should_always_use_the_port_in_the_announce_request_for_the_peer_port() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - let announce_request = AnnounceRequestBuilder::default().into(); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); - - let torrent_peer = Peer::from_udp_announce_request(&announce_request, &peer_socket_address); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - } - - mod torrent_peer_constructor_from_for_http_requests { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use crate::http::warp_implementation::request::Announce; - use crate::protocol::info_hash::InfoHash; - use crate::tracker::assign_ip_address_to_peer; - use crate::tracker::peer::{self, Peer}; - - fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { - Announce { - info_hash: InfoHash([0u8; 20]), - peer_addr, - downloaded: 0u64, - uploaded: 0u64, - peer_id: peer::Id(*b"-qB00000000000000000"), - port, - left: 0u64, - event: None, - compact: None, - } - } - - #[test] - fn it_should_use_the_source_ip_in_the_udp_header_as_the_peer_ip_address_ignoring_the_peer_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - - let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); - - let torrent_peer = Peer::from_http_announce_request(&announce_request, &peer_socket_address); - - assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); - assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); - } - - #[test] - fn it_should_always_use_the_port_in_the_announce_request_for_the_peer_port_ignoring_the_port_in_the_udp_header() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - let remote_port = 8080; - - let port_in_announce_request = 8081; - let announce_request = - sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); - - let torrent_peer = Peer::from_http_announce_request(&announce_request, &peer_socket_address); - - assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); - assert_ne!(torrent_peer.peer_addr.port(), remote_port); - } - } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index b6d4bed7b..53efa7ecc 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -11,8 +11,9 @@ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::{self, peer, statistics}; +use crate::tracker::{self, statistics}; use crate::udp::error::Error; +use crate::udp::peer_builder; use crate::udp::request::AnnounceWrapper; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { @@ -107,10 +108,9 @@ pub async fn handle_announce( source: (Arc::new(e) as Arc).into(), })?; - // build the peer let peer_ip = tracker.assign_ip_address_to_peer(&remote_addr.ip()); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); - let peer = peer::Peer::from_udp_announce_request(&wrapped_announce_request.announce_request, &peer_socket_address); + + let peer = peer_builder::from_request(&wrapped_announce_request, &peer_ip); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer) @@ -164,7 +164,6 @@ pub async fn handle_announce( }) }; - // send stats event match remote_addr { SocketAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Udp4Announce).await; diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 8b8c8c4f8..b6431f752 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -3,6 +3,7 @@ pub mod error; pub mod handlers; pub mod request; pub mod server; +pub mod peer_builder; pub type Bytes = u64; pub type Port = u16; diff --git a/src/udp/peer_builder.rs b/src/udp/peer_builder.rs new file mode 100644 index 000000000..84eae64f9 --- /dev/null +++ b/src/udp/peer_builder.rs @@ -0,0 +1,18 @@ +use std::net::{IpAddr, SocketAddr}; + +use super::request::AnnounceWrapper; +use crate::protocol::clock::{Current, Time}; +use crate::tracker::peer::{Id, Peer}; + +#[must_use] +pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> Peer { + Peer { + peer_id: Id(announce_wrapper.announce_request.peer_id.0), + peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), + updated: Current::now(), + uploaded: announce_wrapper.announce_request.bytes_uploaded, + downloaded: announce_wrapper.announce_request.bytes_downloaded, + left: announce_wrapper.announce_request.bytes_left, + event: announce_wrapper.announce_request.event, + } +} diff --git a/src/udp/request.rs b/src/udp/request.rs index c4326b291..28d75f860 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -2,21 +2,6 @@ use aquatic_udp_protocol::AnnounceRequest; use crate::protocol::info_hash::InfoHash; -// struct AnnounceRequest { -// pub connection_id: i64, -// pub transaction_id: i32, -// pub info_hash: InfoHash, -// pub peer_id: PeerId, -// pub bytes_downloaded: Bytes, -// pub bytes_uploaded: Bytes, -// pub bytes_left: Bytes, -// pub event: AnnounceEvent, -// pub ip_address: Option, -// pub key: u32, -// pub peers_wanted: u32, -// pub port: Port -// } - pub struct AnnounceWrapper { pub announce_request: AnnounceRequest, pub info_hash: InfoHash, From 156ac4d0c9bb9a734d586564de4eb24bac60f399 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 17:17:23 +0000 Subject: [PATCH 0375/1003] refactor: clean announce request handlers There is duplicate code in announce handlers for UDP and HTTP tracker. This change makes them more similar in order to extract the common part later. --- src/http/warp_implementation/handlers.rs | 16 ++++---- src/tracker/mod.rs | 2 +- src/udp/handlers.rs | 51 +++++++++++++----------- 3 files changed, 37 insertions(+), 32 deletions(-) diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index f914e7555..2a0aa005c 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -42,20 +42,20 @@ pub async fn handle_announce( ) -> WebResult { debug!("http announce request: {:#?}", announce_request); - authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await?; + let info_hash = announce_request.info_hash; + let remote_client_ip = announce_request.peer_addr; - let peer_ip = tracker.assign_ip_address_to_peer(&announce_request.peer_addr); + authenticate(&info_hash, &auth_key, tracker.clone()).await?; + + let peer_ip = tracker.assign_ip_address_to_peer(&remote_client_ip); let peer = peer_builder::from_request(&announce_request, &peer_ip); - let torrent_stats = tracker - .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) - .await; + let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - // get all torrent peers excluding the peer_addr - let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; + let peers = tracker.get_other_peers(&info_hash, &peer.peer_addr).await; - match announce_request.peer_addr { + match remote_client_ip { IpAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Tcp4Announce).await; } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index f31a71fbb..a6ea6d3b0 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -279,7 +279,7 @@ impl Tracker { } /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + pub async fn get_other_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 53efa7ecc..283041333 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -87,6 +87,18 @@ pub async fn handle_connect( Ok(response) } +/// # Errors +/// +/// Will return `Error` if unable to `authenticate_request`. +pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), Error> { + tracker + .authenticate_request(info_hash, &None) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) +} + /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. @@ -101,25 +113,27 @@ pub async fn handle_announce( let wrapped_announce_request = AnnounceWrapper::new(announce_request); - tracker - .authenticate_request(&wrapped_announce_request.info_hash, &None) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - })?; + let info_hash = wrapped_announce_request.info_hash; + let remote_client_ip = remote_addr.ip(); + + authenticate(&info_hash, tracker.clone()).await?; - let peer_ip = tracker.assign_ip_address_to_peer(&remote_addr.ip()); + let peer_ip = tracker.assign_ip_address_to_peer(&remote_client_ip); let peer = peer_builder::from_request(&wrapped_announce_request, &peer_ip); - let torrent_stats = tracker - .update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer) - .await; + let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - // get all peers excluding the client_addr - let peers = tracker - .get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr) - .await; + let peers = tracker.get_other_peers(&info_hash, &peer.peer_addr).await; + + match remote_client_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Announce).await; + } + } #[allow(clippy::cast_possible_truncation)] let announce_response = if remote_addr.is_ipv4() { @@ -164,15 +178,6 @@ pub async fn handle_announce( }) }; - match remote_addr { - SocketAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Udp4Announce).await; - } - SocketAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Udp6Announce).await; - } - } - Ok(announce_response) } From cecbc17352af2d61ba6c6aa6ebcfbb62283004f4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 18:16:49 +0000 Subject: [PATCH 0376/1003] refactor: extract duplicate code from announce request handlers --- src/http/warp_implementation/handlers.rs | 12 ++++-------- src/tracker/mod.rs | 20 +++++++++++++++++++- src/tracker/peer.rs | 6 +++++- src/udp/handlers.rs | 22 ++++++++++------------ src/udp/mod.rs | 2 +- 5 files changed, 39 insertions(+), 23 deletions(-) diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 2a0aa005c..fd927150f 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -47,13 +47,9 @@ pub async fn handle_announce( authenticate(&info_hash, &auth_key, tracker.clone()).await?; - let peer_ip = tracker.assign_ip_address_to_peer(&remote_client_ip); + let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); - let peer = peer_builder::from_request(&announce_request, &peer_ip); - - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - - let peers = tracker.get_other_peers(&info_hash, &peer.peer_addr).await; + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; match remote_client_ip { IpAddr::V4(_) => { @@ -66,8 +62,8 @@ pub async fn handle_announce( send_announce_response( &announce_request, - &torrent_stats, - &peers, + &response.swam_stats, + &response.peers, tracker.config.announce_interval, tracker.config.min_announce_interval, ) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a6ea6d3b0..42dbec17c 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -17,6 +17,8 @@ use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; use self::error::Error; +use self::peer::Peer; +use self::torrent::SwamStats; use crate::config::Configuration; use crate::databases::driver::Driver; use crate::databases::{self, Database}; @@ -41,6 +43,11 @@ pub struct TorrentsMetrics { pub torrents: u64, } +pub struct AnnounceResponse { + pub peers: Vec, + pub swam_stats: SwamStats, +} + impl Tracker { /// # Errors /// @@ -76,7 +83,18 @@ impl Tracker { self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } - /// It assigns a socket address to the peer + /// It handles an announce request + pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceResponse { + peer.change_ip(&self.assign_ip_address_to_peer(remote_client_ip)); + + let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + + let peers = self.get_other_peers(info_hash, &peer.peer_addr).await; + + AnnounceResponse { peers, swam_stats } + } + + /// It assigns an IP address to the peer #[must_use] pub fn assign_ip_address_to_peer(&self, remote_client_ip: &IpAddr) -> IpAddr { assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip()) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 24cc99f9b..7559463db 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; @@ -31,6 +31,10 @@ impl Peer { pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + + pub fn change_ip(&mut self, new_ip: &IpAddr) { + self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); + } } #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 283041333..8978beb70 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -118,13 +118,9 @@ pub async fn handle_announce( authenticate(&info_hash, tracker.clone()).await?; - let peer_ip = tracker.assign_ip_address_to_peer(&remote_client_ip); + let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); - let peer = peer_builder::from_request(&wrapped_announce_request, &peer_ip); - - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - - let peers = tracker.get_other_peers(&info_hash, &peer.peer_addr).await; + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; match remote_client_ip { IpAddr::V4(_) => { @@ -140,9 +136,10 @@ pub async fn handle_announce( Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), - peers: peers + leechers: NumberOfPeers(i64::from(response.swam_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swam_stats.seeders) as i32), + peers: response + .peers .iter() .filter_map(|peer| { if let IpAddr::V4(ip) = peer.peer_addr.ip() { @@ -160,9 +157,10 @@ pub async fn handle_announce( Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), - peers: peers + leechers: NumberOfPeers(i64::from(response.swam_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swam_stats.seeders) as i32), + peers: response + .peers .iter() .filter_map(|peer| { if let IpAddr::V6(ip) = peer.peer_addr.ip() { diff --git a/src/udp/mod.rs b/src/udp/mod.rs index b6431f752..7b755a20b 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -1,9 +1,9 @@ pub mod connection_cookie; pub mod error; pub mod handlers; +pub mod peer_builder; pub mod request; pub mod server; -pub mod peer_builder; pub type Bytes = u64; pub type Port = u16; From 3b207954ce787a1cec1a0c442146adeea7f0b623 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 19:17:50 +0000 Subject: [PATCH 0377/1003] refactor: function does not need to be pub anymore --- src/tracker/mod.rs | 12 ++++++++---- src/tracker/torrent.rs | 12 ++++++------ 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 42dbec17c..989980828 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -89,7 +89,8 @@ impl Tracker { let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - let peers = self.get_other_peers(info_hash, &peer.peer_addr).await; + // todo: remove peer by using its `Id` instead of its socket address: `get_peers_excluding_peer(peer_id: peer::Id)` + let peers = self.get_peers_excluding_peers_with_address(info_hash, &peer.peer_addr).await; AnnounceResponse { peers, swam_stats } } @@ -296,13 +297,16 @@ impl Tracker { Ok(()) } - /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_other_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + async fn get_peers_excluding_peers_with_address( + &self, + info_hash: &InfoHash, + excluded_address: &SocketAddr, + ) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(Some(client_addr)).into_iter().copied().collect(), + Some(entry) => entry.get_peers(Some(excluded_address)).into_iter().copied().collect(), } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index b5535a932..b7b79f0f5 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -49,22 +49,22 @@ impl Entry { } #[must_use] - pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::Peer> { + pub fn get_peers(&self, optional_excluded_address: Option<&SocketAddr>) -> Vec<&peer::Peer> { self.peers .values() - .filter(|peer| match client_addr { + .filter(|peer| match optional_excluded_address { // Don't filter on ip_version None => true, // Filter out different ip_version from remote_addr - Some(remote_addr) => { + Some(excluded_address) => { // Skip ip address of client - if peer.peer_addr.ip() == remote_addr.ip() { + if peer.peer_addr.ip() == excluded_address.ip() { return false; } match peer.peer_addr.ip() { - IpAddr::V4(_) => remote_addr.is_ipv4(), - IpAddr::V6(_) => remote_addr.is_ipv6(), + IpAddr::V4(_) => excluded_address.is_ipv4(), + IpAddr::V6(_) => excluded_address.is_ipv6(), } } }) From 7fcc19d33a532c6ce8a8f06085a62cdea033b787 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 19:41:46 +0000 Subject: [PATCH 0378/1003] refactor: remove unneeded method and make another function private. --- src/tracker/mod.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 989980828..48bd76128 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -85,7 +85,7 @@ impl Tracker { /// It handles an announce request pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceResponse { - peer.change_ip(&self.assign_ip_address_to_peer(remote_client_ip)); + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -95,12 +95,6 @@ impl Tracker { AnnounceResponse { peers, swam_stats } } - /// It assigns an IP address to the peer - #[must_use] - pub fn assign_ip_address_to_peer(&self, remote_client_ip: &IpAddr) -> IpAddr { - assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip()) - } - /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -407,7 +401,7 @@ impl Tracker { } #[must_use] -pub fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { +fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { host_ip } else { From 03024e27971803ede2e96400ccfd532a80d53256 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Feb 2023 19:24:16 +0000 Subject: [PATCH 0379/1003] refactor(http): extract function to get client IP on reverse proxy --- src/http/axum_implementation/handlers.rs | 29 ++++-- src/http/axum_implementation/mod.rs | 2 +- .../{extractors.rs => requests/announce.rs} | 28 +++--- src/http/axum_implementation/requests/mod.rs | 1 + src/http/handlers/announce.rs | 1 + src/http/handlers/mod.rs | 88 +++++++++++++++++++ src/http/mod.rs | 1 + src/http/warp_implementation/filters.rs | 50 +++++------ tests/http/asserts.rs | 20 +++++ tests/http_tracker.rs | 84 ++++++++++++++++++ 10 files changed, 255 insertions(+), 49 deletions(-) rename src/http/axum_implementation/{extractors.rs => requests/announce.rs} (86%) create mode 100644 src/http/axum_implementation/requests/mod.rs create mode 100644 src/http/handlers/announce.rs create mode 100644 src/http/handlers/mod.rs diff --git a/src/http/axum_implementation/handlers.rs b/src/http/axum_implementation/handlers.rs index 050fa8e69..f7c6ba8f9 100644 --- a/src/http/axum_implementation/handlers.rs +++ b/src/http/axum_implementation/handlers.rs @@ -2,8 +2,9 @@ use std::sync::Arc; use axum::extract::State; use axum::response::Json; +use log::debug; -use super::extractors::ExtractAnnounceParams; +use super::requests::announce::ExtractAnnounceRequest; use super::resources::ok::Ok; use super::responses::ok_response; use crate::tracker::Tracker; @@ -13,13 +14,29 @@ pub async fn get_status_handler() -> Json { ok_response() } -/// # Panics -/// -/// todo +/// WIP #[allow(clippy::unused_async)] pub async fn announce_handler( State(_tracker): State>, - ExtractAnnounceParams(_announce_params): ExtractAnnounceParams, + ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, ) -> Json { - todo!() + /* todo: + - Extract remote client ip from request + - Build the `Peer` + - Call the `tracker.announce` method + - Send event for stats + - Move response from Warp to shared mod + - Send response + */ + + // Sample announce URL used for debugging: + // http://0.0.0.0:7070/announce?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548 + + debug!("http announce request: {:#?}", announce_request); + + let info_hash = announce_request.info_hash; + + debug!("info_hash: {:#?}", &info_hash); + + ok_response() } diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index 9d96362df..9e5e07979 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -1,6 +1,6 @@ -pub mod extractors; pub mod handlers; pub mod query; +pub mod requests; pub mod resources; pub mod responses; pub mod routes; diff --git a/src/http/axum_implementation/extractors.rs b/src/http/axum_implementation/requests/announce.rs similarity index 86% rename from src/http/axum_implementation/extractors.rs rename to src/http/axum_implementation/requests/announce.rs index a1f3fad1e..004301744 100644 --- a/src/http/axum_implementation/extractors.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -7,15 +7,15 @@ use axum::http::request::Parts; use axum::http::StatusCode; use thiserror::Error; -use super::query::Query; +use crate::http::axum_implementation::query::Query; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; -pub struct ExtractAnnounceParams(pub AnnounceParams); +pub struct ExtractAnnounceRequest(pub Announce); #[derive(Debug, PartialEq)] -pub struct AnnounceParams { +pub struct Announce { pub info_hash: InfoHash, pub peer_id: peer::Id, pub port: u16, @@ -55,7 +55,7 @@ impl From for ParseAnnounceQueryError { } } -impl TryFrom for AnnounceParams { +impl TryFrom for Announce { type Error = ParseAnnounceQueryError; fn try_from(query: Query) -> Result { @@ -103,13 +103,15 @@ fn extract_port(query: &Query) -> Result { } #[async_trait] -impl FromRequestParts for ExtractAnnounceParams +impl FromRequestParts for ExtractAnnounceRequest where S: Send + Sync, { type Rejection = (StatusCode, &'static str); async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + // todo: error responses body should be bencoded + let raw_query = parts.uri.query(); if raw_query.is_none() { @@ -122,34 +124,34 @@ where return Err((StatusCode::BAD_REQUEST, "can't parse query params")); } - let announce_params = AnnounceParams::try_from(query.unwrap()); + let announce_request = Announce::try_from(query.unwrap()); - if announce_params.is_err() { + if announce_request.is_err() { return Err((StatusCode::BAD_REQUEST, "can't parse query params for announce request")); } - Ok(ExtractAnnounceParams(announce_params.unwrap())) + Ok(ExtractAnnounceRequest(announce_request.unwrap())) } } #[cfg(test)] mod tests { - use super::AnnounceParams; + use super::Announce; use crate::http::axum_implementation::query::Query; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; #[test] - fn announce_request_params_should_be_extracted_from_url_query_params() { + fn announce_request_should_be_extracted_from_url_query_params() { let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; let query = raw_query.parse::().unwrap(); - let announce_params = AnnounceParams::try_from(query).unwrap(); + let announce_request = Announce::try_from(query).unwrap(); assert_eq!( - announce_params, - AnnounceParams { + announce_request, + Announce { info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), peer_id: "-qB00000000000000001".parse::().unwrap(), port: 17548, diff --git a/src/http/axum_implementation/requests/mod.rs b/src/http/axum_implementation/requests/mod.rs new file mode 100644 index 000000000..74894de33 --- /dev/null +++ b/src/http/axum_implementation/requests/mod.rs @@ -0,0 +1 @@ +pub mod announce; diff --git a/src/http/handlers/announce.rs b/src/http/handlers/announce.rs new file mode 100644 index 000000000..1f77cb921 --- /dev/null +++ b/src/http/handlers/announce.rs @@ -0,0 +1 @@ +pub fn handler() {} diff --git a/src/http/handlers/mod.rs b/src/http/handlers/mod.rs new file mode 100644 index 000000000..4481ddffd --- /dev/null +++ b/src/http/handlers/mod.rs @@ -0,0 +1,88 @@ +use std::net::{AddrParseError, IpAddr}; +use std::panic::Location; +use std::str::FromStr; + +use thiserror::Error; + +use crate::located_error::{Located, LocatedError}; + +pub mod announce; + +#[derive(Error, Debug)] +pub enum XForwardedForParseError { + #[error("Empty X-Forwarded-For header value, {location}")] + EmptyValue { location: &'static Location<'static> }, + + #[error("Invalid IP in X-Forwarded-For header: {source}")] + InvalidIp { source: LocatedError<'static, AddrParseError> }, +} + +impl From for XForwardedForParseError { + #[track_caller] + fn from(err: AddrParseError) -> Self { + Self::InvalidIp { + source: Located(err).into(), + } + } +} + +/// It extracts the last IP address from the `X-Forwarded-For` http header value. +/// +/// # Errors +/// +/// Will return and error if the last IP in the `X-Forwarded-For` header is not a valid IP +pub fn maybe_rightmost_forwarded_ip(x_forwarded_for_value: &str) -> Result { + let mut x_forwarded_for_raw = x_forwarded_for_value.to_string(); + + // Remove whitespace chars + x_forwarded_for_raw.retain(|c| !c.is_whitespace()); + + // Get all forwarded IP's in a vec + let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); + + match x_forwarded_ips.last() { + Some(last_ip) => match IpAddr::from_str(last_ip) { + Ok(ip) => Ok(ip), + Err(err) => Err(err.into()), + }, + None => Err(XForwardedForParseError::EmptyValue { + location: Location::caller(), + }), + } +} + +#[cfg(test)] +mod tests { + + use std::net::IpAddr; + use std::str::FromStr; + + use crate::http::handlers::maybe_rightmost_forwarded_ip; + + #[test] + fn the_last_forwarded_ip_can_be_parsed_from_the_the_corresponding_http_header() { + assert!(maybe_rightmost_forwarded_ip("").is_err()); + + assert!(maybe_rightmost_forwarded_ip("INVALID IP").is_err()); + + assert_eq!( + maybe_rightmost_forwarded_ip("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap(), + IpAddr::from_str("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap() + ); + + assert_eq!( + maybe_rightmost_forwarded_ip("203.0.113.195").unwrap(), + IpAddr::from_str("203.0.113.195").unwrap() + ); + + assert_eq!( + maybe_rightmost_forwarded_ip("203.0.113.195, 2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap(), + IpAddr::from_str("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap() + ); + + assert_eq!( + maybe_rightmost_forwarded_ip("203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178").unwrap(), + IpAddr::from_str("150.172.238.178").unwrap() + ); + } +} diff --git a/src/http/mod.rs b/src/http/mod.rs index 039a2067b..1425afe07 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -13,6 +13,7 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; +pub mod handlers; pub mod percent_encoding; pub mod warp_implementation; diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index 176170330..2a218491b 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -1,13 +1,13 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; use std::panic::Location; -use std::str::FromStr; use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; use super::{request, WebResult}; +use crate::http::handlers::maybe_rightmost_forwarded_ip; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; @@ -138,41 +138,33 @@ fn peer_id(raw_query: &String) -> WebResult { } } -/// Get `PeerAddress` from `RemoteAddress` or Forwarded -fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { - if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: "neither on have remote address or on a reverse proxy".to_string(), - })); - } +/// Get peer IP from HTTP client IP or X-Forwarded-For HTTP header +fn peer_addr( + (on_reverse_proxy, remote_client_ip, maybe_x_forwarded_for): (bool, Option, Option), +) -> WebResult { + if on_reverse_proxy { + if maybe_x_forwarded_for.is_none() { + return Err(reject::custom(Error::AddressNotFound { + location: Location::caller(), + message: "must have a x-forwarded-for when using a reverse proxy".to_string(), + })); + } - if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: "must have a x-forwarded-for when using a reverse proxy".to_string(), - })); - } + let x_forwarded_for = maybe_x_forwarded_for.unwrap(); - if on_reverse_proxy { - let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); - // remove whitespace chars - x_forwarded_for_raw.retain(|c| !c.is_whitespace()); - // get all forwarded ip's in a vec - let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); - // set client ip to last forwarded ip - let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - - IpAddr::from_str(x_forwarded_ip).map_err(|e| { + maybe_rightmost_forwarded_ip(&x_forwarded_for).map_err(|e| { reject::custom(Error::AddressNotFound { location: Location::caller(), - message: format!( - "on remote proxy and unable to parse the last x-forwarded-ip: `{e}`, from `{x_forwarded_for_raw}`" - ), + message: format!("on remote proxy and unable to parse the last x-forwarded-ip: `{e}`, from `{x_forwarded_for}`"), }) }) + } else if remote_client_ip.is_none() { + return Err(reject::custom(Error::AddressNotFound { + location: Location::caller(), + message: "neither on have remote address or on a reverse proxy".to_string(), + })); } else { - Ok(remote_addr.unwrap().ip()) + return Ok(remote_client_ip.unwrap().ip()); } } diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 211a7bb33..8a1e2b554 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -127,3 +127,23 @@ pub async fn assert_invalid_authentication_key_error_response(response: Response assert_error_bencoded(&response.text().await.unwrap(), "is not valid", Location::caller()); } + +pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_error_bencoded( + &response.text().await.unwrap(), + "could not find remote address: must have a x-forwarded-for when using a reverse proxy", + Location::caller(), + ); +} + +pub async fn assert_invalid_remote_address_on_xff_header_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_error_bencoded( + &response.text().await.unwrap(), + "could not find remote address: on remote proxy and unable to parse the last x-forwarded-ip", + Location::caller(), + ); +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 60219d9fe..409c5d343 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -16,6 +16,47 @@ mod warp_http_tracker_server { mod for_all_config_modes { + mod running_on_reverse_proxy { + use torrust_tracker::http::Version; + + use crate::http::asserts::{ + assert_could_not_find_remote_address_on_xff_header_error_response, + assert_invalid_remote_address_on_xff_header_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_http_tracker_on_reverse_proxy; + + #[tokio::test] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_could_not_find_remote_address_on_xff_header_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; + + assert_invalid_remote_address_on_xff_header_error_response(response).await; + } + } + mod receiving_an_announce_request { // Announce request documentation: @@ -1243,6 +1284,49 @@ mod axum_http_tracker_server { mod for_all_config_modes { + mod and_running_on_reverse_proxy { + use torrust_tracker::http::Version; + + use crate::http::asserts::{ + assert_could_not_find_remote_address_on_xff_header_error_response, + assert_invalid_remote_address_on_xff_header_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_http_tracker_on_reverse_proxy; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_could_not_find_remote_address_on_xff_header_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; + + assert_invalid_remote_address_on_xff_header_error_response(response).await; + } + } + mod receiving_an_announce_request { // Announce request documentation: From d0c8eb07246431850a8c6bcfafb69d3fc3a1b83c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 13 Feb 2023 15:58:08 +0000 Subject: [PATCH 0380/1003] refactor(http): reorganize mods --- .../{handlers.rs => handlers/announce.rs} | 15 +++++---------- src/http/axum_implementation/handlers/mod.rs | 2 ++ src/http/axum_implementation/handlers/status.rs | 11 +++++++++++ src/http/axum_implementation/responses.rs | 10 ---------- src/http/axum_implementation/responses/mod.rs | 1 + src/http/axum_implementation/responses/ok.rs | 8 ++++++++ src/http/axum_implementation/routes.rs | 5 +++-- src/http/handlers/announce.rs | 1 - src/http/mod.rs | 1 - .../filter_helpers.rs} | 4 +--- src/http/warp_implementation/filters.rs | 2 +- src/http/warp_implementation/mod.rs | 5 +++-- 12 files changed, 35 insertions(+), 30 deletions(-) rename src/http/axum_implementation/{handlers.rs => handlers/announce.rs} (76%) create mode 100644 src/http/axum_implementation/handlers/mod.rs create mode 100644 src/http/axum_implementation/handlers/status.rs delete mode 100644 src/http/axum_implementation/responses.rs create mode 100644 src/http/axum_implementation/responses/mod.rs create mode 100644 src/http/axum_implementation/responses/ok.rs delete mode 100644 src/http/handlers/announce.rs rename src/http/{handlers/mod.rs => warp_implementation/filter_helpers.rs} (97%) diff --git a/src/http/axum_implementation/handlers.rs b/src/http/axum_implementation/handlers/announce.rs similarity index 76% rename from src/http/axum_implementation/handlers.rs rename to src/http/axum_implementation/handlers/announce.rs index f7c6ba8f9..71bd0a0e2 100644 --- a/src/http/axum_implementation/handlers.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -4,19 +4,14 @@ use axum::extract::State; use axum::response::Json; use log::debug; -use super::requests::announce::ExtractAnnounceRequest; -use super::resources::ok::Ok; -use super::responses::ok_response; +use crate::http::axum_implementation::requests::announce::ExtractAnnounceRequest; +use crate::http::axum_implementation::resources::ok::Ok; +use crate::http::axum_implementation::responses::ok; use crate::tracker::Tracker; -#[allow(clippy::unused_async)] -pub async fn get_status_handler() -> Json { - ok_response() -} - /// WIP #[allow(clippy::unused_async)] -pub async fn announce_handler( +pub async fn handle( State(_tracker): State>, ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, ) -> Json { @@ -38,5 +33,5 @@ pub async fn announce_handler( debug!("info_hash: {:#?}", &info_hash); - ok_response() + ok::response() } diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs new file mode 100644 index 000000000..bff05984c --- /dev/null +++ b/src/http/axum_implementation/handlers/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod status; diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs new file mode 100644 index 000000000..3e9c98466 --- /dev/null +++ b/src/http/axum_implementation/handlers/status.rs @@ -0,0 +1,11 @@ +/// Temporary handler for testing and debugging the new Axum implementation +/// It should be removed once the migration to Axum is finished. +use axum::response::Json; + +use crate::http::axum_implementation::resources::ok::Ok; +use crate::http::axum_implementation::responses::ok; + +#[allow(clippy::unused_async)] +pub async fn get_status_handler() -> Json { + ok::response() +} diff --git a/src/http/axum_implementation/responses.rs b/src/http/axum_implementation/responses.rs deleted file mode 100644 index 9c5896b35..000000000 --- a/src/http/axum_implementation/responses.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Resource responses - -use axum::Json; - -use super::resources::ok::Ok; - -#[must_use] -pub fn ok_response() -> Json { - Json(Ok {}) -} diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs new file mode 100644 index 000000000..a493c2ac2 --- /dev/null +++ b/src/http/axum_implementation/responses/mod.rs @@ -0,0 +1 @@ +pub mod ok; diff --git a/src/http/axum_implementation/responses/ok.rs b/src/http/axum_implementation/responses/ok.rs new file mode 100644 index 000000000..b08ea032f --- /dev/null +++ b/src/http/axum_implementation/responses/ok.rs @@ -0,0 +1,8 @@ +use axum::Json; + +use crate::http::axum_implementation::resources::ok::Ok; + +#[must_use] +pub fn response() -> Json { + Json(Ok {}) +} diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 8e4980682..625d4656f 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; -use super::handlers::{announce_handler, get_status_handler}; +use super::handlers::announce::handle; +use super::handlers::status::get_status_handler; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { @@ -11,5 +12,5 @@ pub fn router(tracker: &Arc) -> Router { // Status .route("/status", get(get_status_handler)) // Announce request - .route("/announce", get(announce_handler).with_state(tracker.clone())) + .route("/announce", get(handle).with_state(tracker.clone())) } diff --git a/src/http/handlers/announce.rs b/src/http/handlers/announce.rs deleted file mode 100644 index 1f77cb921..000000000 --- a/src/http/handlers/announce.rs +++ /dev/null @@ -1 +0,0 @@ -pub fn handler() {} diff --git a/src/http/mod.rs b/src/http/mod.rs index 1425afe07..039a2067b 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -13,7 +13,6 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; -pub mod handlers; pub mod percent_encoding; pub mod warp_implementation; diff --git a/src/http/handlers/mod.rs b/src/http/warp_implementation/filter_helpers.rs similarity index 97% rename from src/http/handlers/mod.rs rename to src/http/warp_implementation/filter_helpers.rs index 4481ddffd..89188d868 100644 --- a/src/http/handlers/mod.rs +++ b/src/http/warp_implementation/filter_helpers.rs @@ -6,8 +6,6 @@ use thiserror::Error; use crate::located_error::{Located, LocatedError}; -pub mod announce; - #[derive(Error, Debug)] pub enum XForwardedForParseError { #[error("Empty X-Forwarded-For header value, {location}")] @@ -57,7 +55,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; - use crate::http::handlers::maybe_rightmost_forwarded_ip; + use super::maybe_rightmost_forwarded_ip; #[test] fn the_last_forwarded_ip_can_be_parsed_from_the_the_corresponding_http_header() { diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index 2a218491b..fc8ef20bc 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -6,8 +6,8 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; +use super::filter_helpers::maybe_rightmost_forwarded_ip; use super::{request, WebResult}; -use crate::http::handlers::maybe_rightmost_forwarded_ip; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs index 1dec73b29..2ceda2e68 100644 --- a/src/http/warp_implementation/mod.rs +++ b/src/http/warp_implementation/mod.rs @@ -1,6 +1,5 @@ -use warp::Rejection; - pub mod error; +pub mod filter_helpers; pub mod filters; pub mod handlers; pub mod peer_builder; @@ -9,5 +8,7 @@ pub mod response; pub mod routes; pub mod server; +use warp::Rejection; + pub type Bytes = u64; pub type WebResult = std::result::Result; From f327dcfa9139ff88b8582b79f0e07489187a7349 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 13 Feb 2023 19:45:17 +0000 Subject: [PATCH 0381/1003] fix(http): [#184] bencoded error responses for announce request HTTP tracker error responser must be bencoded. Fixed in the new Axum implementation. --- src/http/axum_implementation/query.rs | 2 +- .../axum_implementation/requests/announce.rs | 55 +++++++++++++------ .../axum_implementation/responses/error.rs | 40 ++++++++++++++ src/http/axum_implementation/responses/mod.rs | 1 + tests/http/asserts.rs | 50 +++++++++++++---- tests/http_tracker.rs | 53 ++++++++++-------- 6 files changed, 151 insertions(+), 50 deletions(-) create mode 100644 src/http/axum_implementation/responses/error.rs diff --git a/src/http/axum_implementation/query.rs b/src/http/axum_implementation/query.rs index c7c20b22d..3c9c676f1 100644 --- a/src/http/axum_implementation/query.rs +++ b/src/http/axum_implementation/query.rs @@ -45,7 +45,7 @@ impl FromStr for Param { fn from_str(raw_param: &str) -> Result { let pair = raw_param.split('=').collect::>(); - if pair.len() > 2 { + if pair.len() != 2 { return Err(ParseQueryError::InvalidParam { location: Location::caller(), raw_param: raw_param.to_owned(), diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 004301744..34a9ad98a 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -4,10 +4,11 @@ use std::str::FromStr; use axum::async_trait; use axum::extract::FromRequestParts; use axum::http::request::Parts; -use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; use thiserror::Error; -use crate::http::axum_implementation::query::Query; +use crate::http::axum_implementation::query::{ParseQueryError, Query}; +use crate::http::axum_implementation::responses; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; @@ -23,17 +24,17 @@ pub struct Announce { #[derive(Error, Debug)] pub enum ParseAnnounceQueryError { - #[error("missing infohash {location}")] + #[error("missing info_hash param: {location}")] MissingInfoHash { location: &'static Location<'static> }, - #[error("invalid infohash {location}")] + #[error("invalid info_hash param: {location}")] InvalidInfoHash { location: &'static Location<'static> }, - #[error("missing peer id {location}")] + #[error("missing peer_id param: {location}")] MissingPeerId { location: &'static Location<'static> }, - #[error("invalid peer id {location}")] + #[error("invalid peer_id param: {location}")] InvalidPeerId { location: &'static Location<'static> }, - #[error("missing port {location}")] + #[error("missing port param: {location}")] MissingPort { location: &'static Location<'static> }, - #[error("invalid port {location}")] + #[error("invalid port param: {location}")] InvalidPort { location: &'static Location<'static> }, } @@ -49,12 +50,31 @@ impl From for ParseAnnounceQueryError { impl From for ParseAnnounceQueryError { #[track_caller] fn from(_err: ConversionError) -> Self { - Self::InvalidPeerId { + Self::InvalidInfoHash { location: Location::caller(), } } } +impl From for responses::error::Error { + fn from(err: ParseQueryError) -> Self { + responses::error::Error { + // code-review: should we expose error location in public HTTP tracker API? + // Error message example: "Cannot parse query params: invalid param a=b=c in src/http/axum_implementation/query.rs:50:27" + failure_reason: format!("Cannot parse query params: {err}"), + } + } +} + +impl From for responses::error::Error { + fn from(err: ParseAnnounceQueryError) -> Self { + responses::error::Error { + // code-review: should we expose error location in public HTTP tracker API? + failure_reason: format!("Cannot parse query params for announce request: {err}"), + } + } +} + impl TryFrom for Announce { type Error = ParseAnnounceQueryError; @@ -107,27 +127,28 @@ impl FromRequestParts for ExtractAnnounceRequest where S: Send + Sync, { - type Rejection = (StatusCode, &'static str); + type Rejection = Response; async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - // todo: error responses body should be bencoded - let raw_query = parts.uri.query(); if raw_query.is_none() { - return Err((StatusCode::BAD_REQUEST, "missing query params")); + return Err(responses::error::Error { + failure_reason: "missing query params for announce request".to_string(), + } + .into_response()); } let query = raw_query.unwrap().parse::(); - if query.is_err() { - return Err((StatusCode::BAD_REQUEST, "can't parse query params")); + if let Err(error) = query { + return Err(responses::error::Error::from(error).into_response()); } let announce_request = Announce::try_from(query.unwrap()); - if announce_request.is_err() { - return Err((StatusCode::BAD_REQUEST, "can't parse query params for announce request")); + if let Err(error) = announce_request { + return Err(responses::error::Error::from(error).into_response()); } Ok(ExtractAnnounceRequest(announce_request.unwrap())) diff --git a/src/http/axum_implementation/responses/error.rs b/src/http/axum_implementation/responses/error.rs new file mode 100644 index 000000000..bcf2aaa57 --- /dev/null +++ b/src/http/axum_implementation/responses/error.rs @@ -0,0 +1,40 @@ +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use serde::{self, Serialize}; + +#[derive(Serialize)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} + +impl Error { + /// # Panics + /// + /// It would panic if the `Error` struct contained an inappropriate type. + #[must_use] + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } +} + +impl IntoResponse for Error { + fn into_response(self) -> Response { + (StatusCode::OK, self.write()).into_response() + } +} + +#[cfg(test)] +mod tests { + + use super::Error; + + #[test] + fn http_tracker_errors_can_be_bencoded() { + let err = Error { + failure_reason: "error message".to_owned(), + }; + + assert_eq!(err.write(), "d14:failure reason13:error messagee"); // cspell:disable-line + } +} diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs index a493c2ac2..d55a66679 100644 --- a/src/http/axum_implementation/responses/mod.rs +++ b/src/http/axum_implementation/responses/mod.rs @@ -1 +1,2 @@ +pub mod error; pub mod ok; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 8a1e2b554..e146f252d 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -6,7 +6,7 @@ use super::responses::announce::{Announce, Compact, DeserializedCompact}; use super::responses::scrape; use crate::http::responses::error::Error; -pub fn assert_error_bencoded(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { +pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { let error_failure_reason = serde_bencode::from_str::(response_text) .unwrap_or_else(|_| panic!( "response body should be a valid bencoded string for the '{expected_failure_reason}' error, got \"{response_text}\"" @@ -18,7 +18,7 @@ pub fn assert_error_bencoded(response_text: &String, expected_failure_reason: &s error_failure_reason.contains(expected_failure_reason), r#": response: `"{error_failure_reason}"` - dose not contain: `"{expected_failure_reason}"`, {location}"# + does not contain: `"{expected_failure_reason}"`, {location}"# ); } @@ -83,13 +83,13 @@ pub async fn assert_is_announce_response(response: Response) { pub async fn assert_internal_server_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded(&response.text().await.unwrap(), "internal server", Location::caller()); + assert_bencoded_error(&response.text().await.unwrap(), "internal server", Location::caller()); } pub async fn assert_invalid_info_hash_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "no valid infohashes found", Location::caller(), @@ -99,7 +99,7 @@ pub async fn assert_invalid_info_hash_error_response(response: Response) { pub async fn assert_invalid_peer_id_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "peer_id is either missing or invalid", Location::caller(), @@ -109,13 +109,13 @@ pub async fn assert_invalid_peer_id_error_response(response: Response) { pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); + assert_bencoded_error(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); } pub async fn assert_peer_not_authenticated_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "The peer is not authenticated", Location::caller(), @@ -125,13 +125,13 @@ pub async fn assert_peer_not_authenticated_error_response(response: Response) { pub async fn assert_invalid_authentication_key_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded(&response.text().await.unwrap(), "is not valid", Location::caller()); + assert_bencoded_error(&response.text().await.unwrap(), "is not valid", Location::caller()); } pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "could not find remote address: must have a x-forwarded-for when using a reverse proxy", Location::caller(), @@ -141,9 +141,39 @@ pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(r pub async fn assert_invalid_remote_address_on_xff_header_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "could not find remote address: on remote proxy and unable to parse the last x-forwarded-ip", Location::caller(), ); } + +// Specific errors for announce request + +pub async fn assert_missing_query_params_for_announce_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for announce request", + Location::caller(), + ); +} + +pub async fn assert_bad_announce_request_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(" for announce request: {failure}")).await; +} + +pub async fn assert_cannot_parse_query_param_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(": {failure}")).await; +} + +pub async fn assert_cannot_parse_query_params_error_response(response: Response, failure: &str) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + &format!("Cannot parse query params{failure}"), + Location::caller(), + ); +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 409c5d343..85494c301 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -110,7 +110,7 @@ mod warp_http_tracker_server { } #[tokio::test] - async fn should_fail_when_the_request_is_empty() { + async fn should_fail_when_the_url_query_component_is_empty() { let http_tracker_server = start_default_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; @@ -1351,9 +1351,10 @@ mod axum_http_tracker_server { use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ - assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, - assert_internal_server_error_response, assert_invalid_info_hash_error_response, - assert_invalid_peer_id_error_response, assert_is_announce_response, + assert_announce_response, assert_bad_announce_request_error_response, + assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, + assert_compact_announce_response, assert_empty_announce_response, assert_internal_server_error_response, + assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, }; use crate::http::client::Client; use crate::http::requests::announce::{Compact, QueryBuilder}; @@ -1380,18 +1381,29 @@ mod axum_http_tracker_server { assert_is_announce_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] - async fn should_fail_when_the_request_is_empty() { + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; - assert_internal_server_error_response(response).await; + assert_missing_query_params_for_announce_request_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] + async fn should_fail_when_url_query_parameters_are_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let invalid_query_param = "a=b=c"; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{invalid_query_param}")) + .await; + + assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + } + + #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1405,7 +1417,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_invalid_info_hash_error_response(response).await; + assert_bad_announce_request_error_response(response, "missing info_hash param").await; // Without `peer_id` param @@ -1417,7 +1429,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_invalid_peer_id_error_response(response).await; + assert_bad_announce_request_error_response(response, "missing peer_id param").await; // Without `port` param @@ -1429,11 +1441,10 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "missing port param").await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1446,7 +1457,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_invalid_info_hash_error_response(response).await; + assert_cannot_parse_query_params_error_response(response, "").await; } } @@ -1511,8 +1522,7 @@ mod axum_http_tracker_server { } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1534,12 +1544,11 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_invalid_peer_id_error_response(response).await; + assert_cannot_parse_query_params_error_response(response, "").await; } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1554,7 +1563,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_cannot_parse_query_params_error_response(response, "").await; } } From 831805743a4841903a65c55072e157d0cee78a39 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 Feb 2023 14:07:02 +0000 Subject: [PATCH 0382/1003] feat: [#184] add dependency: axum-client-ip It will be used to extract the right most IP in the X-Forwarded-For header when the tracer is running on reverse proxy. --- Cargo.lock | 28 ++++++++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 29 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 8347362ab..6f9d9231b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,6 +135,17 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum-client-ip" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d719fabd6813392bbc10e1fe67f2977fad52791a836e51236f7e02f2482e017" +dependencies = [ + "axum", + "forwarded-header-value", + "serde", +] + [[package]] name = "axum-core" version = "0.3.0" @@ -706,6 +717,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror", +] + [[package]] name = "fragile" version = "2.0.0" @@ -1550,6 +1571,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonempty" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" + [[package]] name = "normalize-line-endings" version = "0.3.0" @@ -2832,6 +2859,7 @@ dependencies = [ "aquatic_udp_protocol", "async-trait", "axum", + "axum-client-ip", "axum-server", "binascii", "chrono", diff --git a/Cargo.toml b/Cargo.toml index cf90da8f1..75ffa7935 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,7 @@ aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } +axum-client-ip = "0.4.0" [dev-dependencies] From 42bd313c3ff7e1bf9351cb89aa7c9e4e70dcb170 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 Feb 2023 14:12:23 +0000 Subject: [PATCH 0383/1003] feat: [#184] calculate remote client ip depending on whether the tracker is running on reverse proxy or not Obtaining the remote peer client IP could be a complex task. See: https://adam-p.ca/blog/2022/03/x-forwarded-for/#multiple-headers We were using a custom function to extract the rigth most IP in the X-Forwarded-For HTTP header. This commit starts using an external crate for that. --- .../axum_implementation/handlers/announce.rs | 10 +- .../axum_implementation/handlers/status.rs | 5 +- src/http/axum_implementation/resources/ok.rs | 7 +- src/http/axum_implementation/responses/ok.rs | 9 +- src/http/axum_implementation/routes.rs | 8 ++ src/http/axum_implementation/server.rs | 4 +- tests/http_tracker.rs | 120 ++++++++++++++++-- 7 files changed, 144 insertions(+), 19 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 71bd0a0e2..9b373495d 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use axum::extract::State; use axum::response::Json; +use axum_client_ip::{InsecureClientIp, SecureClientIp}; use log::debug; use crate::http::axum_implementation::requests::announce::ExtractAnnounceRequest; @@ -14,6 +15,8 @@ use crate::tracker::Tracker; pub async fn handle( State(_tracker): State>, ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, + insecure_ip: InsecureClientIp, + secure_ip: SecureClientIp, ) -> Json { /* todo: - Extract remote client ip from request @@ -27,11 +30,12 @@ pub async fn handle( // Sample announce URL used for debugging: // http://0.0.0.0:7070/announce?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548 - debug!("http announce request: {:#?}", announce_request); - let info_hash = announce_request.info_hash; + debug!("http announce request: {:#?}", announce_request); debug!("info_hash: {:#?}", &info_hash); + debug!("remote client ip, insecure_ip: {:#?}", &insecure_ip); + debug!("remote client ip, secure_ip: {:#?}", &secure_ip); - ok::response() + ok::response(&insecure_ip.0, &secure_ip.0) } diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs index 3e9c98466..37d88321c 100644 --- a/src/http/axum_implementation/handlers/status.rs +++ b/src/http/axum_implementation/handlers/status.rs @@ -1,11 +1,12 @@ /// Temporary handler for testing and debugging the new Axum implementation /// It should be removed once the migration to Axum is finished. use axum::response::Json; +use axum_client_ip::{InsecureClientIp, SecureClientIp}; use crate::http::axum_implementation::resources::ok::Ok; use crate::http::axum_implementation::responses::ok; #[allow(clippy::unused_async)] -pub async fn get_status_handler() -> Json { - ok::response() +pub async fn get_status_handler(insecure_ip: InsecureClientIp, secure_ip: SecureClientIp) -> Json { + ok::response(&insecure_ip.0, &secure_ip.0) } diff --git a/src/http/axum_implementation/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs index adc56e6ea..4a3495d0f 100644 --- a/src/http/axum_implementation/resources/ok.rs +++ b/src/http/axum_implementation/resources/ok.rs @@ -1,4 +1,9 @@ +use std::net::IpAddr; + use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct Ok {} +pub struct Ok { + pub remote_client_insecure_ip: IpAddr, + pub remote_client_secure_ip: IpAddr, +} diff --git a/src/http/axum_implementation/responses/ok.rs b/src/http/axum_implementation/responses/ok.rs index b08ea032f..a2d61749d 100644 --- a/src/http/axum_implementation/responses/ok.rs +++ b/src/http/axum_implementation/responses/ok.rs @@ -1,8 +1,13 @@ +use std::net::IpAddr; + use axum::Json; use crate::http::axum_implementation::resources::ok::Ok; #[must_use] -pub fn response() -> Json { - Json(Ok {}) +pub fn response(remote_client_insecure_ip: &IpAddr, remote_client_secure_ip: &IpAddr) -> Json { + Json(Ok { + remote_client_insecure_ip: *remote_client_insecure_ip, + remote_client_secure_ip: *remote_client_secure_ip, + }) } diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 625d4656f..a32a60ec0 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -2,15 +2,23 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; +use axum_client_ip::SecureClientIpSource; use super::handlers::announce::handle; use super::handlers::status::get_status_handler; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { + let secure_client_ip_source = if tracker.config.on_reverse_proxy { + SecureClientIpSource::RightmostXForwardedFor + } else { + SecureClientIpSource::ConnectInfo + }; + Router::new() // Status .route("/status", get(get_status_handler)) // Announce request .route("/announce", get(handle).with_state(tracker.clone())) + .layer(secure_client_ip_source.into_extension()) } diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/server.rs index 541dda33e..30c580af6 100644 --- a/src/http/axum_implementation/server.rs +++ b/src/http/axum_implementation/server.rs @@ -13,7 +13,7 @@ use crate::tracker::Tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = router(tracker); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); server.with_graceful_shutdown(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); @@ -39,5 +39,5 @@ pub fn start_tls( axum_server::bind_rustls(socket_addr, ssl_config) .handle(handle) - .serve(app.into_make_service()) + .serve(app.into_make_service_with_connect_info::()) } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 85494c301..b1b7735dc 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -708,9 +708,6 @@ mod warp_http_tracker_server { let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - // todo: shouldn't be the the leftmost IP address? - // THe application is taken the the rightmost IP address. See function http::filters::peer_addr - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For client .announce_with_header( &announce_query, @@ -1266,6 +1263,8 @@ mod axum_http_tracker_server { // WIP: migration HTTP from Warp to Axum + use local_ip_address::local_ip; + use torrust_tracker::http::axum_implementation::resources::ok::Ok; use torrust_tracker::http::Version; use crate::http::client::Client; @@ -1274,12 +1273,118 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_status() { // This is a temporary test to test the new Axum HTTP tracker server scaffolding + let http_tracker_server = start_default_http_tracker(Version::Axum).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("status").await; + let client_ip = local_ip().unwrap(); + + let response = Client::bind(http_tracker_server.get_connection_info(), client_ip) + .get("status") + .await; + + let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); + + assert_eq!( + ok, + Ok { + remote_client_insecure_ip: client_ip, + remote_client_secure_ip: client_ip + } + ); + } + + mod should_get_the_remote_client_ip_from_the_http_request { + + // Temporary tests to test that the new Axum HTTP tracker gets the right remote client IP. + // Once the implementation is finished, test for announce request will cover these cases. + + use std::net::IpAddr; + use std::str::FromStr; + + use local_ip_address::local_ip; + use torrust_tracker::http::axum_implementation::resources::ok::Ok; + use torrust_tracker::http::Version; + + use crate::http::client::Client; + use crate::http::server::{start_http_tracker_on_reverse_proxy, start_public_http_tracker}; + + #[tokio::test] + async fn when_the_client_ip_is_a_local_ip_it_should_assign_that_ip() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let client_ip = local_ip().unwrap(); + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let response = client.get("status").await; + + let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); + + assert_eq!( + ok, + Ok { + remote_client_insecure_ip: client_ip, + remote_client_secure_ip: client_ip + } + ); + } - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), "{}"); + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_that_ip() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let response = client.get("status").await; + + let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); + + assert_eq!( + ok, + Ok { + remote_client_insecure_ip: client_ip, + remote_client_secure_ip: client_ip + } + ); + } + + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_as_secure_ip_the_right_most_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: remote client ip: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let client = Client::new(http_tracker_server.get_connection_info()); + + let left_most_ip = IpAddr::from_str("203.0.113.195").unwrap(); + let right_most_ip = IpAddr::from_str("150.172.238.178").unwrap(); + + let response = client + .get_with_header( + "status", + "X-Forwarded-For", + &format!("{left_most_ip},2001:db8:85a3:8d3:1319:8a2e:370:7348,{right_most_ip}"), + ) + .await; + + let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); + + assert_eq!( + ok, + Ok { + remote_client_insecure_ip: left_most_ip, + remote_client_secure_ip: right_most_ip + } + ); + } } mod for_all_config_modes { @@ -2014,9 +2119,6 @@ mod axum_http_tracker_server { let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - // todo: shouldn't be the the leftmost IP address? - // THe application is taken the the rightmost IP address. See function http::filters::peer_addr - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For client .announce_with_header( &announce_query, From 3eb7475100b2dc4bc99a8badb8ac225617e6cdd0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 Feb 2023 18:50:10 +0000 Subject: [PATCH 0384/1003] feat(http): [#184] normal (non-compact) announce response in axum tracker Implemeneted the normal (non-compact) announce response in the new Axum implementation for the HTTP tracker. Only for the tracker public mode and with only the mandatory announce request params. --- .../axum_implementation/handlers/announce.rs | 67 ++++++++------ .../axum_implementation/responses/announce.rs | 91 +++++++++++++++++++ src/http/axum_implementation/responses/mod.rs | 1 + src/tracker/mod.rs | 9 +- tests/http_tracker.rs | 38 +++----- 5 files changed, 154 insertions(+), 52 deletions(-) create mode 100644 src/http/axum_implementation/responses/announce.rs diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 9b373495d..3ae0b7334 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -1,41 +1,56 @@ +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use axum::extract::State; -use axum::response::Json; -use axum_client_ip::{InsecureClientIp, SecureClientIp}; -use log::debug; +use axum::response::{IntoResponse, Response}; +use axum_client_ip::SecureClientIp; -use crate::http::axum_implementation::requests::announce::ExtractAnnounceRequest; -use crate::http::axum_implementation::resources::ok::Ok; -use crate::http::axum_implementation::responses::ok; -use crate::tracker::Tracker; +use crate::http::axum_implementation::requests::announce::{Announce, ExtractAnnounceRequest}; +use crate::http::axum_implementation::responses; +use crate::protocol::clock::{Current, Time}; +use crate::tracker::peer::Peer; +use crate::tracker::{statistics, Tracker}; /// WIP #[allow(clippy::unused_async)] pub async fn handle( - State(_tracker): State>, + State(tracker): State>, ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, - insecure_ip: InsecureClientIp, secure_ip: SecureClientIp, -) -> Json { - /* todo: - - Extract remote client ip from request - - Build the `Peer` - - Call the `tracker.announce` method - - Send event for stats - - Move response from Warp to shared mod - - Send response - */ - - // Sample announce URL used for debugging: - // http://0.0.0.0:7070/announce?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548 +) -> Response { + // todo: compact response and optional params let info_hash = announce_request.info_hash; + let remote_client_ip = secure_ip.0; - debug!("http announce request: {:#?}", announce_request); - debug!("info_hash: {:#?}", &info_hash); - debug!("remote client ip, insecure_ip: {:#?}", &insecure_ip); - debug!("remote client ip, secure_ip: {:#?}", &secure_ip); + let mut peer = peer_from_request(&announce_request, &remote_client_ip); - ok::response(&insecure_ip.0, &secure_ip.0) + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; + + match remote_client_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; + } + } + + responses::announce::Announce::from(response).into_response() +} + +#[must_use] +fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { + #[allow(clippy::cast_possible_truncation)] + Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port), + updated: Current::now(), + // todo: optional parameters not included in the announce request yet + uploaded: NumberOfBytes(i128::from(0) as i64), + downloaded: NumberOfBytes(i128::from(0) as i64), + left: NumberOfBytes(i128::from(0) as i64), + event: AnnounceEvent::None, + } } diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs new file mode 100644 index 000000000..63ec74ac2 --- /dev/null +++ b/src/http/axum_implementation/responses/announce.rs @@ -0,0 +1,91 @@ +use std::net::IpAddr; + +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use serde::{self, Deserialize, Serialize}; + +use crate::tracker::{self, AnnounceResponse}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub interval: u32, + #[serde(rename = "min interval")] + pub interval_min: u32, + pub complete: u32, + pub incomplete: u32, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Peer { + pub peer_id: String, + pub ip: IpAddr, + pub port: u16, +} + +impl From for Peer { + fn from(peer: tracker::peer::Peer) -> Self { + Peer { + peer_id: peer.peer_id.to_string(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } +} + +impl Announce { + /// # Panics + /// + /// It would panic if the `Announce` struct contained an inappropriate type. + #[must_use] + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } +} + +impl IntoResponse for Announce { + fn into_response(self) -> Response { + (StatusCode::OK, self.write()).into_response() + } +} + +impl From for Announce { + fn from(domain_announce_response: AnnounceResponse) -> Self { + let peers: Vec = domain_announce_response.peers.iter().map(|peer| Peer::from(*peer)).collect(); + + Self { + interval: domain_announce_response.interval, + interval_min: domain_announce_response.interval_min, + complete: domain_announce_response.swam_stats.seeders, + incomplete: domain_announce_response.swam_stats.leechers, + peers, + } + } +} + +#[cfg(test)] +mod tests { + + use std::net::IpAddr; + use std::str::FromStr; + + use super::{Announce, Peer}; + + #[test] + fn announce_response_can_be_bencoded() { + let response = Announce { + interval: 1, + interval_min: 2, + complete: 3, + incomplete: 4, + peers: vec![Peer { + peer_id: "-qB00000000000000001".to_string(), + ip: IpAddr::from_str("127.0.0.1").unwrap(), + port: 8080, + }], + }; + + // cspell:disable-next-line + assert_eq!(response.write(), "d8:completei3e10:incompletei4e8:intervali1e12:min intervali2e5:peersld2:ip9:127.0.0.17:peer_id20:-qB000000000000000014:porti8080eeee"); + } +} diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs index d55a66679..ad7d0a78c 100644 --- a/src/http/axum_implementation/responses/mod.rs +++ b/src/http/axum_implementation/responses/mod.rs @@ -1,2 +1,3 @@ +pub mod announce; pub mod error; pub mod ok; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 48bd76128..cb3bd0e96 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -46,6 +46,8 @@ pub struct TorrentsMetrics { pub struct AnnounceResponse { pub peers: Vec, pub swam_stats: SwamStats, + pub interval: u32, + pub interval_min: u32, } impl Tracker { @@ -92,7 +94,12 @@ impl Tracker { // todo: remove peer by using its `Id` instead of its socket address: `get_peers_excluding_peer(peer_id: peer::Id)` let peers = self.get_peers_excluding_peers_with_address(info_hash, &peer.peer_addr).await; - AnnounceResponse { peers, swam_stats } + AnnounceResponse { + peers, + swam_stats, + interval: self.config.announce_interval, + interval_min: self.config.min_announce_interval, + } } /// # Errors diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index b1b7735dc..c01e0c4ee 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1470,8 +1470,7 @@ mod axum_http_tracker_server { start_ipv6_http_tracker, start_public_http_tracker, }; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1742,8 +1741,7 @@ mod axum_http_tracker_server { } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1768,8 +1766,7 @@ mod axum_http_tracker_server { .await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1793,7 +1790,7 @@ mod axum_http_tracker_server { ) .await; - // It should only contain teh previously announced peer + // It should only contain the previously announced peer assert_announce_response( response, &Announce { @@ -1807,8 +1804,7 @@ mod axum_http_tracker_server { .await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1872,8 +1868,7 @@ mod axum_http_tracker_server { assert_compact_announce_response(response, &expected_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_not_return_the_compact_response_by_default() { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. @@ -1912,8 +1907,7 @@ mod axum_http_tracker_server { compact_announce.is_ok() } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1926,8 +1920,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp4_connections_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; @@ -1960,8 +1953,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp6_connections_handled, 0); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1974,8 +1966,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp4_announces_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; @@ -2032,8 +2023,7 @@ mod axum_http_tracker_server { assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( ) { /* We assume that both the client and tracker share the same public IP. @@ -2065,8 +2055,7 @@ mod axum_http_tracker_server { assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( ) { /* We assume that both the client and tracker share the same public IP. @@ -2101,8 +2090,7 @@ mod axum_http_tracker_server { assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( ) { /* From b1612f6acef4ee717e77e19a6f72f9f539af620a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Feb 2023 14:12:03 +0000 Subject: [PATCH 0385/1003] test(http): improve tests --- src/http/axum_implementation/query.rs | 184 +++++++++++++----- .../axum_implementation/requests/announce.rs | 62 ++++-- 2 files changed, 178 insertions(+), 68 deletions(-) diff --git a/src/http/axum_implementation/query.rs b/src/http/axum_implementation/query.rs index 3c9c676f1..cad58c17b 100644 --- a/src/http/axum_implementation/query.rs +++ b/src/http/axum_implementation/query.rs @@ -3,7 +3,16 @@ use std::panic::Location; use std::str::FromStr; use thiserror::Error; + +/// Represent a URL query component with some restrictions. +/// It does not allow duplicate param names like this: `param1=value1¶m1=value2` +/// It would take the second value for `param1`. pub struct Query { + /* code-review: + - Consider using `HashMap`, because it does not allow you to add a second value for the same param name. + - Consider using a third-party crate. + - Conversion from/to string is not deterministic. Params can be in a different order in the query string. + */ params: HashMap, } @@ -33,6 +42,38 @@ impl FromStr for Query { } } +impl From> for Query { + fn from(raw_params: Vec<(&str, &str)>) -> Self { + let mut params: HashMap = HashMap::new(); + + for raw_param in raw_params { + params.insert(raw_param.0.to_owned(), raw_param.1.to_owned()); + } + + Self { params } + } +} + +impl std::fmt::Display for Query { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .params + .iter() + .map(|param| format!("{}", Param::new(param.0, param.1))) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl Query { + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(std::string::ToString::to_string) + } +} + #[derive(Debug, PartialEq)] struct Param { name: String, @@ -59,80 +100,121 @@ impl FromStr for Param { } } -impl Query { - #[must_use] - pub fn get_param(&self, name: &str) -> Option { - self.params.get(name).map(std::string::ToString::to_string) +impl std::fmt::Display for Param { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}={}", self.name, self.value) + } +} + +impl Param { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_owned(), + value: value.to_owned(), + } } } #[cfg(test)] mod tests { - use super::Query; - use crate::http::axum_implementation::query::Param; - #[test] - fn it_should_parse_the_query_params_from_an_url_query_string() { - let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + mod url_query { + use crate::http::axum_implementation::query::Query; - let query = raw_query.parse::().unwrap(); + #[test] + fn should_parse_the_query_params_from_an_url_query_string() { + let raw_query = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; - assert_eq!( - query.get_param("info_hash").unwrap(), - "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" - ); - assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); - assert_eq!(query.get_param("port").unwrap(), "17548"); - } + let query = raw_query.parse::().unwrap(); - #[test] - fn it_should_fail_parsing_an_invalid_query_string() { - let invalid_raw_query = "name=value=value"; + assert_eq!( + query.get_param("info_hash").unwrap(), + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + ); + assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); + assert_eq!(query.get_param("port").unwrap(), "17548"); + } - let query = invalid_raw_query.parse::(); + #[test] + fn should_fail_parsing_an_invalid_query_string() { + let invalid_raw_query = "name=value=value"; - assert!(query.is_err()); - } + let query = invalid_raw_query.parse::(); - #[test] - fn it_should_ignore_the_preceding_question_mark_if_it_exists() { - let raw_query = "?name=value"; + assert!(query.is_err()); + } - let query = raw_query.parse::().unwrap(); + #[test] + fn should_ignore_the_preceding_question_mark_if_it_exists() { + let raw_query = "?name=value"; - assert_eq!(query.get_param("name").unwrap(), "value"); - } + let query = raw_query.parse::().unwrap(); - #[test] - fn it_should_trim_whitespaces() { - let raw_query = " name=value "; + assert_eq!(query.get_param("name").unwrap(), "value"); + } - let query = raw_query.parse::().unwrap(); + #[test] + fn should_trim_whitespaces() { + let raw_query = " name=value "; - assert_eq!(query.get_param("name").unwrap(), "value"); - } + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name").unwrap(), "value"); + } + + #[test] + fn should_be_instantiated_from_a_string_pair_vector() { + let query = Query::from(vec![("param1", "value1"), ("param2", "value2")]).to_string(); - #[test] - fn it_should_parse_a_single_query_param() { - let raw_param = "name=value"; + assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + } - let param = raw_param.parse::().unwrap(); + #[test] + fn should_not_allow_more_than_one_value_for_the_same_param() { + let query = Query::from(vec![("param1", "value1"), ("param1", "value2"), ("param1", "value3")]).to_string(); - assert_eq!( - param, - Param { - name: "name".to_string(), - value: "value".to_string(), - } - ); + assert_eq!(query, "param1=value3"); + } + + #[test] + fn should_be_displayed() { + let query = "param1=value1¶m2=value2".parse::().unwrap().to_string(); + + assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + } } - #[test] - fn it_should_fail_parsing_an_invalid_query_param() { - let invalid_raw_param = "name=value=value"; + mod url_query_param { + use crate::http::axum_implementation::query::Param; + + #[test] + fn should_parse_a_single_query_param() { + let raw_param = "name=value"; + + let param = raw_param.parse::().unwrap(); - let query = invalid_raw_param.parse::(); + assert_eq!( + param, + Param { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } + + #[test] + fn should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; + + let query = invalid_raw_param.parse::(); - assert!(query.is_err()); + assert!(query.is_err()); + } + + #[test] + fn should_be_displayed() { + assert_eq!("name=value".parse::().unwrap().to_string(), "name=value"); + } } } diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 34a9ad98a..b91945d0a 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -13,10 +13,13 @@ use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_pee use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; +pub type Bytes = u64; + pub struct ExtractAnnounceRequest(pub Announce); #[derive(Debug, PartialEq)] pub struct Announce { + // Mandatory params pub info_hash: InfoHash, pub peer_id: peer::Id, pub port: u16, @@ -157,26 +160,51 @@ where #[cfg(test)] mod tests { - use super::Announce; - use crate::http::axum_implementation::query::Query; - use crate::protocol::info_hash::InfoHash; - use crate::tracker::peer; - #[test] - fn announce_request_should_be_extracted_from_url_query_params() { - let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + mod announce_request { + + use crate::http::axum_implementation::query::Query; + use crate::http::axum_implementation::requests::announce::Announce; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn should_be_instantiated_from_url_query_params() { + let raw_query = Query::from(vec![ + ("info_hash", "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + ("peer_id", "-qB00000000000000001"), + ("port", "17548"), + ]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let announce_request = Announce::try_from(query).unwrap(); + + assert_eq!( + announce_request, + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + } + ); + } + + #[test] + fn should_fail_instantiating_from_url_query_params_if_the_query_does_not_include_all_the_mandatory_params() { + let raw_query_without_info_hash = "peer_id=-qB00000000000000001&port=17548"; - let query = raw_query.parse::().unwrap(); + assert!(Announce::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); - let announce_request = Announce::try_from(query).unwrap(); + let raw_query_without_peer_id = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&port=17548"; - assert_eq!( - announce_request, - Announce { - info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), - peer_id: "-qB00000000000000001".parse::().unwrap(), - port: 17548, - } - ); + assert!(Announce::try_from(raw_query_without_peer_id.parse::().unwrap()).is_err()); + + let raw_query_without_port = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001"; + + assert!(Announce::try_from(raw_query_without_port.parse::().unwrap()).is_err()); + } } } From 74ed59221ac22c3cd8fb0accf2a48375201d75d1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Feb 2023 19:31:21 +0000 Subject: [PATCH 0386/1003] feat(http): [#184] added optional params to announce req in Axum implementation --- .../axum_implementation/handlers/announce.rs | 26 +- .../axum_implementation/requests/announce.rs | 407 +++++++++++++++--- tests/http_tracker.rs | 67 ++- 3 files changed, 401 insertions(+), 99 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 3ae0b7334..92bce5a4f 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -5,8 +5,9 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use axum::extract::State; use axum::response::{IntoResponse, Response}; use axum_client_ip::SecureClientIp; +use log::debug; -use crate::http::axum_implementation::requests::announce::{Announce, ExtractAnnounceRequest}; +use crate::http::axum_implementation::requests::announce::{Announce, Event, ExtractAnnounceRequest}; use crate::http::axum_implementation::responses; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; @@ -19,7 +20,7 @@ pub async fn handle( ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, secure_ip: SecureClientIp, ) -> Response { - // todo: compact response and optional params + debug!("http announce request: {:#?}", announce_request); let info_hash = announce_request.info_hash; let remote_client_ip = secure_ip.0; @@ -42,15 +43,24 @@ pub async fn handle( #[must_use] fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { - #[allow(clippy::cast_possible_truncation)] Peer { peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), updated: Current::now(), - // todo: optional parameters not included in the announce request yet - uploaded: NumberOfBytes(i128::from(0) as i64), - downloaded: NumberOfBytes(i128::from(0) as i64), - left: NumberOfBytes(i128::from(0) as i64), - event: AnnounceEvent::None, + uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), + downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), + left: NumberOfBytes(announce_request.left.unwrap_or(0)), + event: map_to_aquatic_event(&announce_request.event), + } +} + +fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, + Event::Stopped => aquatic_udp_protocol::AnnounceEvent::Stopped, + Event::Completed => aquatic_udp_protocol::AnnounceEvent::Completed, + }, + None => aquatic_udp_protocol::AnnounceEvent::None, } } diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index b91945d0a..36e94a3fd 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -1,3 +1,4 @@ +use std::fmt; use std::panic::Location; use std::str::FromStr; @@ -10,60 +11,139 @@ use thiserror::Error; use crate::http::axum_implementation::query::{ParseQueryError, Query}; use crate::http::axum_implementation::responses; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::located_error::{Located, LocatedError}; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; -pub type Bytes = u64; +pub type NumberOfBytes = i64; pub struct ExtractAnnounceRequest(pub Announce); +// Param names in the URL query +const INFO_HASH: &str = "info_hash"; +const PEER_ID: &str = "peer_id"; +const PORT: &str = "port"; +const DOWNLOADED: &str = "downloaded"; +const UPLOADED: &str = "uploaded"; +const LEFT: &str = "left"; +const EVENT: &str = "event"; +const COMPACT: &str = "compact"; + #[derive(Debug, PartialEq)] pub struct Announce { // Mandatory params pub info_hash: InfoHash, pub peer_id: peer::Id, pub port: u16, + // Optional params + pub downloaded: Option, + pub uploaded: Option, + pub left: Option, + pub event: Option, + pub compact: Option, } -#[derive(Error, Debug)] -pub enum ParseAnnounceQueryError { - #[error("missing info_hash param: {location}")] - MissingInfoHash { location: &'static Location<'static> }, - #[error("invalid info_hash param: {location}")] - InvalidInfoHash { location: &'static Location<'static> }, - #[error("missing peer_id param: {location}")] - MissingPeerId { location: &'static Location<'static> }, - #[error("invalid peer_id param: {location}")] - InvalidPeerId { location: &'static Location<'static> }, - #[error("missing port param: {location}")] - MissingPort { location: &'static Location<'static> }, - #[error("invalid port param: {location}")] - InvalidPort { location: &'static Location<'static> }, -} - -impl From for ParseAnnounceQueryError { - #[track_caller] - fn from(_err: IdConversionError) -> Self { - Self::InvalidPeerId { - location: Location::caller(), +#[derive(PartialEq, Debug)] +pub enum Event { + Started, + Stopped, + Completed, +} + +impl FromStr for Event { + type Err = ParseAnnounceQueryError; + + fn from_str(raw_param: &str) -> Result { + match raw_param { + "started" => Ok(Self::Started), + "stopped" => Ok(Self::Stopped), + "completed" => Ok(Self::Completed), + _ => Err(ParseAnnounceQueryError::InvalidParam { + param_name: EVENT.to_owned(), + param_value: raw_param.to_owned(), + location: Location::caller(), + }), } } } -impl From for ParseAnnounceQueryError { - #[track_caller] - fn from(_err: ConversionError) -> Self { - Self::InvalidInfoHash { - location: Location::caller(), +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Event::Started => write!(f, "started"), + Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(PartialEq, Debug)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), } } } +impl FromStr for Compact { + type Err = ParseAnnounceQueryError; + + fn from_str(raw_param: &str) -> Result { + match raw_param { + "1" => Ok(Self::Accepted), + "0" => Ok(Self::NotAccepted), + _ => Err(ParseAnnounceQueryError::InvalidParam { + param_name: COMPACT.to_owned(), + param_value: raw_param.to_owned(), + location: Location::caller(), + }), + } + } +} + +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {location}")] + InvalidParam { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("param value overflow {param_value} for {param_name} in {location}")] + NumberOfBytesOverflow { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, ConversionError>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidPeerIdParam { + param_name: String, + param_value: String, + source: LocatedError<'static, IdConversionError>, + }, +} + impl From for responses::error::Error { fn from(err: ParseQueryError) -> Self { responses::error::Error { - // code-review: should we expose error location in public HTTP tracker API? - // Error message example: "Cannot parse query params: invalid param a=b=c in src/http/axum_implementation/query.rs:50:27" failure_reason: format!("Cannot parse query params: {err}"), } } @@ -72,7 +152,6 @@ impl From for responses::error::Error { impl From for responses::error::Error { fn from(err: ParseAnnounceQueryError) -> Self { responses::error::Error { - // code-review: should we expose error location in public HTTP tracker API? failure_reason: format!("Cannot parse query params for announce request: {err}"), } } @@ -86,45 +165,120 @@ impl TryFrom for Announce { info_hash: extract_info_hash(&query)?, peer_id: extract_peer_id(&query)?, port: extract_port(&query)?, + downloaded: extract_downloaded(&query)?, + uploaded: extract_uploaded(&query)?, + left: extract_left(&query)?, + event: extract_event(&query)?, + compact: extract_compact(&query)?, }) } } +// Mandatory params + fn extract_info_hash(query: &Query) -> Result { - match query.get_param("info_hash") { - Some(raw_info_hash) => Ok(percent_decode_info_hash(&raw_info_hash)?), + match query.get_param(INFO_HASH) { + Some(raw_param) => { + Ok( + percent_decode_info_hash(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidInfoHashParam { + param_name: INFO_HASH.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?, + ) + } None => { - return Err(ParseAnnounceQueryError::MissingInfoHash { + return Err(ParseAnnounceQueryError::MissingParam { location: Location::caller(), + param_name: INFO_HASH.to_owned(), }) } } } fn extract_peer_id(query: &Query) -> Result { - match query.get_param("peer_id") { - Some(raw_peer_id) => Ok(percent_decode_peer_id(&raw_peer_id)?), + match query.get_param(PEER_ID) { + Some(raw_param) => Ok( + percent_decode_peer_id(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidPeerIdParam { + param_name: PEER_ID.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?, + ), None => { - return Err(ParseAnnounceQueryError::MissingPeerId { + return Err(ParseAnnounceQueryError::MissingParam { location: Location::caller(), + param_name: PEER_ID.to_owned(), }) } } } fn extract_port(query: &Query) -> Result { - match query.get_param("port") { - Some(raw_port) => Ok(u16::from_str(&raw_port).map_err(|_e| ParseAnnounceQueryError::InvalidPort { + match query.get_param(PORT) { + Some(raw_param) => Ok(u16::from_str(&raw_param).map_err(|_e| ParseAnnounceQueryError::InvalidParam { + param_name: PORT.to_owned(), + param_value: raw_param.clone(), location: Location::caller(), })?), None => { - return Err(ParseAnnounceQueryError::MissingPort { + return Err(ParseAnnounceQueryError::MissingParam { location: Location::caller(), + param_name: PORT.to_owned(), }) } } } +// Optional params + +fn extract_downloaded(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(DOWNLOADED, query) +} + +fn extract_uploaded(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(UPLOADED, query) +} + +fn extract_left(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(LEFT, query) +} + +fn extract_number_of_bytes_from_param(param_name: &str, query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(param_name) { + Some(raw_param) => { + let number_of_bytes = u64::from_str(&raw_param).map_err(|_e| ParseAnnounceQueryError::InvalidParam { + param_name: param_name.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + })?; + + Ok(Some(i64::try_from(number_of_bytes).map_err(|_e| { + ParseAnnounceQueryError::NumberOfBytesOverflow { + param_name: param_name.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + } + })?)) + } + None => Ok(None), + } +} + +fn extract_event(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(EVENT) { + Some(raw_param) => Ok(Some(Event::from_str(&raw_param)?)), + None => Ok(None), + } +} + +fn extract_compact(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(COMPACT) { + Some(raw_param) => Ok(Some(Compact::from_str(&raw_param)?)), + None => Ok(None), + } +} + #[async_trait] impl FromRequestParts for ExtractAnnounceRequest where @@ -164,16 +318,18 @@ mod tests { mod announce_request { use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::announce::Announce; + use crate::http::axum_implementation::requests::announce::{ + Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, + }; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; #[test] - fn should_be_instantiated_from_url_query_params() { + fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { let raw_query = Query::from(vec![ - ("info_hash", "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), - ("peer_id", "-qB00000000000000001"), - ("port", "17548"), + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), ]) .to_string(); @@ -187,24 +343,171 @@ mod tests { info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), peer_id: "-qB00000000000000001".parse::().unwrap(), port: 17548, + downloaded: None, + uploaded: None, + left: None, + event: None, + compact: None, } ); } #[test] - fn should_fail_instantiating_from_url_query_params_if_the_query_does_not_include_all_the_mandatory_params() { - let raw_query_without_info_hash = "peer_id=-qB00000000000000001&port=17548"; + fn should_be_instantiated_from_the_url_query_params() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (DOWNLOADED, "1"), + (UPLOADED, "2"), + (LEFT, "3"), + (EVENT, "started"), + (COMPACT, "0"), + ]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let announce_request = Announce::try_from(query).unwrap(); + + assert_eq!( + announce_request, + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + downloaded: Some(1), + uploaded: Some(2), + left: Some(3), + event: Some(Event::Started), + compact: Some(Compact::NotAccepted), + } + ); + } + + mod when_it_is_instantiated_from_the_url_query_params { + + use crate::http::axum_implementation::query::Query; + use crate::http::axum_implementation::requests::announce::{ + Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, + }; - assert!(Announce::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); + #[test] + fn it_should_fail_if_the_query_does_not_include_all_the_mandatory_params() { + let raw_query_without_info_hash = "peer_id=-qB00000000000000001&port=17548"; - let raw_query_without_peer_id = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&port=17548"; + assert!(Announce::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); - assert!(Announce::try_from(raw_query_without_peer_id.parse::().unwrap()).is_err()); + let raw_query_without_peer_id = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&port=17548"; - let raw_query_without_port = - "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001"; + assert!(Announce::try_from(raw_query_without_peer_id.parse::().unwrap()).is_err()); - assert!(Announce::try_from(raw_query_without_port.parse::().unwrap()).is_err()); + let raw_query_without_port = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001"; + + assert!(Announce::try_from(raw_query_without_port.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_info_hash_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "INVALID_INFO_HASH_VALUE"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_peer_id_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "INVALID_PEER_ID_VALUE"), + (PORT, "17548"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_port_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "INVALID_PORT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_downloaded_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (DOWNLOADED, "INVALID_DOWNLOADED_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_uploaded_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (UPLOADED, "INVALID_UPLOADED_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_left_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (LEFT, "INVALID_LEFT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_event_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (EVENT, "INVALID_EVENT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_compact_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (COMPACT, "INVALID_COMPACT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } } } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index c01e0c4ee..650bc447e 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1458,8 +1458,8 @@ mod axum_http_tracker_server { use crate::http::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, - assert_compact_announce_response, assert_empty_announce_response, assert_internal_server_error_response, - assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, + assert_missing_query_params_for_announce_request_error_response, }; use crate::http::client::Client; use crate::http::requests::announce::{Compact, QueryBuilder}; @@ -1521,7 +1521,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_bad_announce_request_error_response(response, "missing info_hash param").await; + assert_bad_announce_request_error_response(response, "missing param info_hash").await; // Without `peer_id` param @@ -1533,7 +1533,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_bad_announce_request_error_response(response, "missing peer_id param").await; + assert_bad_announce_request_error_response(response, "missing param peer_id").await; // Without `port` param @@ -1545,7 +1545,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_bad_announce_request_error_response(response, "missing port param").await; + assert_bad_announce_request_error_response(response, "missing param port").await; } #[tokio::test] @@ -1565,13 +1565,12 @@ mod axum_http_tracker_server { } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_not_fail_when_the_peer_address_param_is_invalid() { // AnnounceQuery does not even contain the `peer_addr` // The peer IP is obtained in two ways: - // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. - // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1586,8 +1585,7 @@ mod axum_http_tracker_server { assert_is_announce_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1602,12 +1600,11 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1622,7 +1619,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } @@ -1648,7 +1645,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_cannot_parse_query_params_error_response(response, "").await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } @@ -1667,12 +1664,11 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_cannot_parse_query_params_error_response(response, "").await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1687,15 +1683,12 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } - //#[tokio::test] - #[allow(dead_code)] - async fn should_not_fail_when_the_event_param_is_invalid() { - // All invalid values are ignored as if the `event` param were empty - + #[tokio::test] + async fn should_fail_when_the_event_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; let mut params = QueryBuilder::default().query().params(); @@ -1705,9 +1698,9 @@ mod axum_http_tracker_server { "-1", "1.1", "a", - "Started", // It should be lowercase - "Stopped", // It should be lowercase - "Completed", // It should be lowercase + "Started", // It should be lowercase to be valid: `started` + "Stopped", // It should be lowercase to be valid: `stopped` + "Completed", // It should be lowercase to be valid: `completed` ]; for invalid_value in invalid_values { @@ -1717,13 +1710,12 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_is_announce_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } - //#[tokio::test] - #[allow(dead_code)] - async fn should_not_fail_when_the_compact_param_is_invalid() { + #[tokio::test] + async fn should_fail_when_the_compact_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; let mut params = QueryBuilder::default().query().params(); @@ -1737,7 +1729,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } @@ -1933,8 +1925,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp6_connections_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. @@ -1979,8 +1970,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp6_announces_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. @@ -1999,8 +1989,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp6_announces_handled, 0); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; From 02e25168c77edf1e6ad4bfa17803ee18883c2c0d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Feb 2023 20:54:40 +0000 Subject: [PATCH 0387/1003] feat(http): [#184] Axum extractor for peer IP It uses a wrapper for another extractor becuase that extractor cannot be optional. We need to get the rigth most IP in the X-Forwarded-For header only when the tracker is runnin gon reverse proxy. More info: https://github.com/imbolc/axum-client-ip/issues/9#issuecomment-1433039362 --- .../axum_implementation/extractors/mod.rs | 2 + .../axum_implementation/extractors/peer_ip.rs | 52 +++++++++++++++++++ .../extractors/remote_client_ip.rs | 51 ++++++++++++++++++ .../axum_implementation/handlers/announce.rs | 20 ++++--- .../axum_implementation/handlers/status.rs | 6 +-- src/http/axum_implementation/mod.rs | 1 + .../axum_implementation/requests/announce.rs | 8 +-- src/http/axum_implementation/resources/ok.rs | 7 ++- src/http/axum_implementation/responses/ok.rs | 8 ++- src/http/axum_implementation/routes.rs | 8 +-- tests/http_tracker.rs | 31 +++++++---- 11 files changed, 156 insertions(+), 38 deletions(-) create mode 100644 src/http/axum_implementation/extractors/mod.rs create mode 100644 src/http/axum_implementation/extractors/peer_ip.rs create mode 100644 src/http/axum_implementation/extractors/remote_client_ip.rs diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs new file mode 100644 index 000000000..71ceea999 --- /dev/null +++ b/src/http/axum_implementation/extractors/mod.rs @@ -0,0 +1,2 @@ +pub mod peer_ip; +pub mod remote_client_ip; diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs new file mode 100644 index 000000000..3f76dc67c --- /dev/null +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -0,0 +1,52 @@ +use std::net::IpAddr; +use std::panic::Location; + +use axum::response::{IntoResponse, Response}; +use thiserror::Error; + +use super::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::responses; + +#[derive(Error, Debug)] +pub enum ResolutionError { + #[error("missing the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}")] + MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + #[error("cannot get the client IP from the connection info in {location}")] + MissingClientIp { location: &'static Location<'static> }, +} + +impl From for responses::error::Error { + fn from(err: ResolutionError) -> Self { + responses::error::Error { + failure_reason: format!("{err}"), + } + } +} + +/// It resolves the peer IP. +/// +/// # Errors +/// +/// Will return an error if the peer IP cannot be obtained according to the configuration. +/// For example, if the IP is extracted from an HTTP header which is missing in the request. +pub fn peer_ip(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { + if on_reverse_proxy { + if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { + Ok(ip) + } else { + Err( + responses::error::Error::from(ResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }) + .into_response(), + ) + } + } else if let Some(ip) = remote_client_ip.connection_info_ip { + Ok(ip) + } else { + Err(responses::error::Error::from(ResolutionError::MissingClientIp { + location: Location::caller(), + }) + .into_response()) + } +} diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/remote_client_ip.rs new file mode 100644 index 000000000..7b6f3fed2 --- /dev/null +++ b/src/http/axum_implementation/extractors/remote_client_ip.rs @@ -0,0 +1,51 @@ +use std::net::{IpAddr, SocketAddr}; + +use axum::async_trait; +use axum::extract::{ConnectInfo, FromRequestParts}; +use axum::http::request::Parts; +use axum::response::Response; +use axum_client_ip::RightmostXForwardedFor; +use serde::{Deserialize, Serialize}; + +/// Given this request chain: +/// +/// client <-> http proxy 1 <-> http proxy 2 <-> server +/// ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 +/// X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +/// +/// This extractor extracts these values from the HTTP headers and connection info. +/// +/// `right_most_x_forwarded_for` = 126.0.0.2 +/// `connection_info_ip` = 126.0.0.1 +/// +/// More info about inner extractors : +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct RemoteClientIp { + pub right_most_x_forwarded_for: Option, + pub connection_info_ip: Option, +} + +#[async_trait] +impl FromRequestParts for RemoteClientIp +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { + Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), + Err(_) => None, + }; + + let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { + Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), + Err(_) => None, + }; + + Ok(RemoteClientIp { + right_most_x_forwarded_for, + connection_info_ip, + }) + } +} diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 92bce5a4f..af70b87e7 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -4,32 +4,38 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use axum_client_ip::SecureClientIp; use log::debug; +use crate::http::axum_implementation::extractors::peer_ip::peer_ip; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::requests::announce::{Announce, Event, ExtractAnnounceRequest}; use crate::http::axum_implementation::responses; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; use crate::tracker::{statistics, Tracker}; -/// WIP #[allow(clippy::unused_async)] pub async fn handle( State(tracker): State>, ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, - secure_ip: SecureClientIp, + remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); let info_hash = announce_request.info_hash; - let remote_client_ip = secure_ip.0; - let mut peer = peer_from_request(&announce_request, &remote_client_ip); + let peer_ip = peer_ip(tracker.config.on_reverse_proxy, &remote_client_ip); - let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; + let peer_ip = match peer_ip { + Ok(peer_ip) => peer_ip, + Err(err) => return err, + }; - match remote_client_ip { + let mut peer = peer_from_request(&announce_request, &peer_ip); + + let response = tracker.announce(&info_hash, &mut peer, &peer_ip).await; + + match peer_ip { IpAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Tcp4Announce).await; } diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs index 37d88321c..d4031aef5 100644 --- a/src/http/axum_implementation/handlers/status.rs +++ b/src/http/axum_implementation/handlers/status.rs @@ -1,12 +1,12 @@ /// Temporary handler for testing and debugging the new Axum implementation /// It should be removed once the migration to Axum is finished. use axum::response::Json; -use axum_client_ip::{InsecureClientIp, SecureClientIp}; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::resources::ok::Ok; use crate::http::axum_implementation::responses::ok; #[allow(clippy::unused_async)] -pub async fn get_status_handler(insecure_ip: InsecureClientIp, secure_ip: SecureClientIp) -> Json { - ok::response(&insecure_ip.0, &secure_ip.0) +pub async fn get_status_handler(remote_client_ip: RemoteClientIp) -> Json { + ok::response(&remote_client_ip) } diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index 9e5e07979..4b7d90e60 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -1,3 +1,4 @@ +pub mod extractors; pub mod handlers; pub mod query; pub mod requests; diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 36e94a3fd..463df4fbe 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -110,6 +110,8 @@ impl FromStr for Compact { #[derive(Error, Debug)] pub enum ParseAnnounceQueryError { + #[error("missing query params for announce request in {location}")] + MissingParams { location: &'static Location<'static> }, #[error("missing param {param_name} in {location}")] MissingParam { location: &'static Location<'static>, @@ -290,9 +292,9 @@ where let raw_query = parts.uri.query(); if raw_query.is_none() { - return Err(responses::error::Error { - failure_reason: "missing query params for announce request".to_string(), - } + return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { + location: Location::caller(), + }) .into_response()); } diff --git a/src/http/axum_implementation/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs index 4a3495d0f..f941b9fb3 100644 --- a/src/http/axum_implementation/resources/ok.rs +++ b/src/http/axum_implementation/resources/ok.rs @@ -1,9 +1,8 @@ -use std::net::IpAddr; - use serde::{Deserialize, Serialize}; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; + #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Ok { - pub remote_client_insecure_ip: IpAddr, - pub remote_client_secure_ip: IpAddr, + pub remote_client_ip: RemoteClientIp, } diff --git a/src/http/axum_implementation/responses/ok.rs b/src/http/axum_implementation/responses/ok.rs index a2d61749d..dfd062b51 100644 --- a/src/http/axum_implementation/responses/ok.rs +++ b/src/http/axum_implementation/responses/ok.rs @@ -1,13 +1,11 @@ -use std::net::IpAddr; - use axum::Json; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::resources::ok::Ok; #[must_use] -pub fn response(remote_client_insecure_ip: &IpAddr, remote_client_secure_ip: &IpAddr) -> Json { +pub fn response(remote_client_ip: &RemoteClientIp) -> Json { Json(Ok { - remote_client_insecure_ip: *remote_client_insecure_ip, - remote_client_secure_ip: *remote_client_secure_ip, + remote_client_ip: remote_client_ip.clone(), }) } diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index a32a60ec0..6138f5acf 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -9,16 +9,10 @@ use super::handlers::status::get_status_handler; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { - let secure_client_ip_source = if tracker.config.on_reverse_proxy { - SecureClientIpSource::RightmostXForwardedFor - } else { - SecureClientIpSource::ConnectInfo - }; - Router::new() // Status .route("/status", get(get_status_handler)) // Announce request .route("/announce", get(handle).with_state(tracker.clone())) - .layer(secure_client_ip_source.into_extension()) + .layer(SecureClientIpSource::ConnectInfo.into_extension()) } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 650bc447e..ded30a0b4 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1264,6 +1264,7 @@ mod axum_http_tracker_server { // WIP: migration HTTP from Warp to Axum use local_ip_address::local_ip; + use torrust_tracker::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use torrust_tracker::http::axum_implementation::resources::ok::Ok; use torrust_tracker::http::Version; @@ -1287,8 +1288,10 @@ mod axum_http_tracker_server { assert_eq!( ok, Ok { - remote_client_insecure_ip: client_ip, - remote_client_secure_ip: client_ip + remote_client_ip: RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: Some(client_ip) + } } ); } @@ -1302,6 +1305,7 @@ mod axum_http_tracker_server { use std::str::FromStr; use local_ip_address::local_ip; + use torrust_tracker::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use torrust_tracker::http::axum_implementation::resources::ok::Ok; use torrust_tracker::http::Version; @@ -1323,8 +1327,10 @@ mod axum_http_tracker_server { assert_eq!( ok, Ok { - remote_client_insecure_ip: client_ip, - remote_client_secure_ip: client_ip + remote_client_ip: RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: Some(client_ip) + } } ); } @@ -1345,8 +1351,10 @@ mod axum_http_tracker_server { assert_eq!( ok, Ok { - remote_client_insecure_ip: client_ip, - remote_client_secure_ip: client_ip + remote_client_ip: RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: Some(client_ip) + } } ); } @@ -1362,7 +1370,10 @@ mod axum_http_tracker_server { let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; - let client = Client::new(http_tracker_server.get_connection_info()); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); let left_most_ip = IpAddr::from_str("203.0.113.195").unwrap(); let right_most_ip = IpAddr::from_str("150.172.238.178").unwrap(); @@ -1380,8 +1391,10 @@ mod axum_http_tracker_server { assert_eq!( ok, Ok { - remote_client_insecure_ip: left_most_ip, - remote_client_secure_ip: right_most_ip + remote_client_ip: RemoteClientIp { + right_most_x_forwarded_for: Some(right_most_ip), + connection_info_ip: Some(client_ip) + } } ); } From 99dbbe41f6576e9f075ba1bfa82c4361da5c0c38 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Feb 2023 21:43:33 +0000 Subject: [PATCH 0388/1003] refactor(http): [#184] extract announce service in Axum tracker --- .../axum_implementation/extractors/peer_ip.rs | 2 +- .../axum_implementation/handlers/announce.rs | 24 +++++-------------- src/http/axum_implementation/mod.rs | 1 + .../axum_implementation/services/announce.rs | 24 +++++++++++++++++++ src/http/axum_implementation/services/mod.rs | 1 + src/http/warp_implementation/handlers.rs | 3 +++ 6 files changed, 36 insertions(+), 19 deletions(-) create mode 100644 src/http/axum_implementation/services/announce.rs create mode 100644 src/http/axum_implementation/services/mod.rs diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs index 3f76dc67c..7d615d0dc 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -29,7 +29,7 @@ impl From for responses::error::Error { /// /// Will return an error if the peer IP cannot be obtained according to the configuration. /// For example, if the IP is extracted from an HTTP header which is missing in the request. -pub fn peer_ip(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { +pub fn assign_ip_address_to_peer(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { if on_reverse_proxy { if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { Ok(ip) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index af70b87e7..1fb111b8b 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -6,13 +6,13 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::axum_implementation::extractors::peer_ip::peer_ip; +use crate::http::axum_implementation::extractors::peer_ip::assign_ip_address_to_peer; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::requests::announce::{Announce, Event, ExtractAnnounceRequest}; -use crate::http::axum_implementation::responses; +use crate::http::axum_implementation::{responses, services}; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; -use crate::tracker::{statistics, Tracker}; +use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn handle( @@ -22,31 +22,19 @@ pub async fn handle( ) -> Response { debug!("http announce request: {:#?}", announce_request); - let info_hash = announce_request.info_hash; - - let peer_ip = peer_ip(tracker.config.on_reverse_proxy, &remote_client_ip); - - let peer_ip = match peer_ip { + let peer_ip = match assign_ip_address_to_peer(tracker.config.on_reverse_proxy, &remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, }; let mut peer = peer_from_request(&announce_request, &peer_ip); - let response = tracker.announce(&info_hash, &mut peer, &peer_ip).await; - - match peer_ip { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Tcp4Announce).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Tcp6Announce).await; - } - } + let response = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; responses::announce::Announce::from(response).into_response() } +/// It ignores the peer address in the announce request params. #[must_use] fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { Peer { diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index 4b7d90e60..d8431457a 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -6,3 +6,4 @@ pub mod resources; pub mod responses; pub mod routes; pub mod server; +pub mod services; diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs new file mode 100644 index 000000000..9481354ba --- /dev/null +++ b/src/http/axum_implementation/services/announce.rs @@ -0,0 +1,24 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use crate::protocol::info_hash::InfoHash; +use crate::tracker::peer::Peer; +use crate::tracker::{statistics, AnnounceResponse, Tracker}; + +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceResponse { + let original_peer_ip = peer.peer_addr.ip(); + + // The tracker could change the original peer ip + let response = tracker.announce(&info_hash, peer, &original_peer_ip).await; + + match original_peer_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; + } + } + + response +} diff --git a/src/http/axum_implementation/services/mod.rs b/src/http/axum_implementation/services/mod.rs new file mode 100644 index 000000000..74894de33 --- /dev/null +++ b/src/http/axum_implementation/services/mod.rs @@ -0,0 +1 @@ +pub mod announce; diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index fd927150f..400cc5762 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -49,6 +49,9 @@ pub async fn handle_announce( let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); + // todo: we should be use the http::axum_implementation::services::announce::announce service, + // but this Warp implementation is going to be removed. + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; match remote_client_ip { From 30918daf1aa147ceaa48ee95ef87427de754ff4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Feb 2023 22:41:41 +0000 Subject: [PATCH 0389/1003] refactor(http): [#184] move extractor to extractor mod --- .../extractors/announce_request.rs | 45 +++++++ .../axum_implementation/extractors/mod.rs | 1 + .../axum_implementation/handlers/announce.rs | 5 +- .../axum_implementation/requests/announce.rs | 111 ++++++------------ src/tracker/peer.rs | 2 + 5 files changed, 87 insertions(+), 77 deletions(-) create mode 100644 src/http/axum_implementation/extractors/announce_request.rs diff --git a/src/http/axum_implementation/extractors/announce_request.rs b/src/http/axum_implementation/extractors/announce_request.rs new file mode 100644 index 000000000..0371be9a4 --- /dev/null +++ b/src/http/axum_implementation/extractors/announce_request.rs @@ -0,0 +1,45 @@ +use std::panic::Location; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; + +use crate::http::axum_implementation::query::Query; +use crate::http::axum_implementation::requests::announce::{Announce, ParseAnnounceQueryError}; +use crate::http::axum_implementation::responses; + +pub struct ExtractRequest(pub Announce); + +#[async_trait] +impl FromRequestParts for ExtractRequest +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let raw_query = parts.uri.query(); + + if raw_query.is_none() { + return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { + location: Location::caller(), + }) + .into_response()); + } + + let query = raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error).into_response()); + } + + let announce_request = Announce::try_from(query.unwrap()); + + if let Err(error) = announce_request { + return Err(responses::error::Error::from(error).into_response()); + } + + Ok(ExtractRequest(announce_request.unwrap())) + } +} diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 71ceea999..65b2775a9 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,2 +1,3 @@ +pub mod announce_request; pub mod peer_ip; pub mod remote_client_ip; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 1fb111b8b..0960510ba 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -6,9 +6,10 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::peer_ip::assign_ip_address_to_peer; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::requests::announce::{Announce, Event, ExtractAnnounceRequest}; +use crate::http::axum_implementation::requests::announce::{Announce, Event}; use crate::http::axum_implementation::{responses, services}; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; @@ -17,7 +18,7 @@ use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn handle( State(tracker): State>, - ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, + ExtractRequest(announce_request): ExtractRequest, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 463df4fbe..0f9a6fbfe 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -2,10 +2,6 @@ use std::fmt; use std::panic::Location; use std::str::FromStr; -use axum::async_trait; -use axum::extract::FromRequestParts; -use axum::http::request::Parts; -use axum::response::{IntoResponse, Response}; use thiserror::Error; use crate::http::axum_implementation::query::{ParseQueryError, Query}; @@ -17,9 +13,7 @@ use crate::tracker::peer::{self, IdConversionError}; pub type NumberOfBytes = i64; -pub struct ExtractAnnounceRequest(pub Announce); - -// Param names in the URL query +// Query param names const INFO_HASH: &str = "info_hash"; const PEER_ID: &str = "peer_id"; const PORT: &str = "port"; @@ -43,6 +37,41 @@ pub struct Announce { pub compact: Option, } +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + #[error("missing query params for announce request in {location}")] + MissingParams { location: &'static Location<'static> }, + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {location}")] + InvalidParam { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("param value overflow {param_value} for {param_name} in {location}")] + NumberOfBytesOverflow { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, ConversionError>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidPeerIdParam { + param_name: String, + param_value: String, + source: LocatedError<'static, IdConversionError>, + }, +} + #[derive(PartialEq, Debug)] pub enum Event { Started, @@ -108,41 +137,6 @@ impl FromStr for Compact { } } -#[derive(Error, Debug)] -pub enum ParseAnnounceQueryError { - #[error("missing query params for announce request in {location}")] - MissingParams { location: &'static Location<'static> }, - #[error("missing param {param_name} in {location}")] - MissingParam { - location: &'static Location<'static>, - param_name: String, - }, - #[error("invalid param value {param_value} for {param_name} in {location}")] - InvalidParam { - param_name: String, - param_value: String, - location: &'static Location<'static>, - }, - #[error("param value overflow {param_value} for {param_name} in {location}")] - NumberOfBytesOverflow { - param_name: String, - param_value: String, - location: &'static Location<'static>, - }, - #[error("invalid param value {param_value} for {param_name} in {source}")] - InvalidInfoHashParam { - param_name: String, - param_value: String, - source: LocatedError<'static, ConversionError>, - }, - #[error("invalid param value {param_value} for {param_name} in {source}")] - InvalidPeerIdParam { - param_name: String, - param_value: String, - source: LocatedError<'static, IdConversionError>, - }, -} - impl From for responses::error::Error { fn from(err: ParseQueryError) -> Self { responses::error::Error { @@ -281,39 +275,6 @@ fn extract_compact(query: &Query) -> Result, ParseAnnounceQueryE } } -#[async_trait] -impl FromRequestParts for ExtractAnnounceRequest -where - S: Send + Sync, -{ - type Rejection = Response; - - async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - let raw_query = parts.uri.query(); - - if raw_query.is_none() { - return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { - location: Location::caller(), - }) - .into_response()); - } - - let query = raw_query.unwrap().parse::(); - - if let Err(error) = query { - return Err(responses::error::Error::from(error).into_response()); - } - - let announce_request = Announce::try_from(query.unwrap()); - - if let Err(error) = announce_request { - return Err(responses::error::Error::from(error).into_response()); - } - - Ok(ExtractAnnounceRequest(announce_request.unwrap())) - } -} - #[cfg(test)] mod tests { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 7559463db..735754529 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -22,6 +22,8 @@ pub struct Peer { pub downloaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, // The number of bytes this peer still has to download + // code-review: aquatic_udp_protocol::request::AnnounceEvent is used also for the HTTP tracker. + // Maybe we should use our own enum and use the¡is one only for the UDP tracker. #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } From da638d603ac8d43365361cfd8a101ba581d67325 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Feb 2023 22:47:39 +0000 Subject: [PATCH 0390/1003] docs(http): fix extractor docs --- src/http/axum_implementation/extractors/remote_client_ip.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/remote_client_ip.rs index 7b6f3fed2..e852a1b6f 100644 --- a/src/http/axum_implementation/extractors/remote_client_ip.rs +++ b/src/http/axum_implementation/extractors/remote_client_ip.rs @@ -16,7 +16,7 @@ use serde::{Deserialize, Serialize}; /// This extractor extracts these values from the HTTP headers and connection info. /// /// `right_most_x_forwarded_for` = 126.0.0.2 -/// `connection_info_ip` = 126.0.0.1 +/// `connection_info_ip` = 126.0.0.3 /// /// More info about inner extractors : #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] From db21e6d08f1943b7ec614a93f42a116c747fc2ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Feb 2023 19:14:39 +0000 Subject: [PATCH 0391/1003] feat(http): [#187] compact announce response in public mode in Axum --- .../axum_implementation/handlers/announce.rs | 16 +- .../axum_implementation/responses/announce.rs | 293 ++++++++++++++++-- .../axum_implementation/services/announce.rs | 4 +- src/tracker/mod.rs | 6 +- tests/http_tracker.rs | 3 +- 5 files changed, 290 insertions(+), 32 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 0960510ba..81f57e810 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -9,8 +9,9 @@ use log::debug; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::peer_ip::assign_ip_address_to_peer; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::requests::announce::{Announce, Event}; -use crate::http::axum_implementation::{responses, services}; +use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; +use crate::http::axum_implementation::responses::announce; +use crate::http::axum_implementation::services; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; use crate::tracker::Tracker; @@ -30,9 +31,16 @@ pub async fn handle( let mut peer = peer_from_request(&announce_request, &peer_ip); - let response = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; + let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; - responses::announce::Announce::from(response).into_response() + match announce_request.compact { + Some(compact) => match compact { + Compact::Accepted => announce::Compact::from(announce_data).into_response(), + Compact::NotAccepted => announce::NonCompact::from(announce_data).into_response(), + }, + // Default response format non compact + None => announce::NonCompact::from(announce_data).into_response(), + } } /// It ignores the peer address in the announce request params. diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs index 63ec74ac2..303adcad9 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/axum_implementation/responses/announce.rs @@ -1,13 +1,22 @@ +use std::io::Write; use std::net::IpAddr; +use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use serde::{self, Deserialize, Serialize}; +use thiserror::Error; -use crate::tracker::{self, AnnounceResponse}; +use crate::http::axum_implementation::responses; +use crate::tracker::{self, AnnounceData}; +/// Normal (non compact) "announce" response +/// +/// BEP 03: The ``BitTorrent`` Protocol Specification +/// +/// #[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Announce { +pub struct NonCompact { pub interval: u32, #[serde(rename = "min interval")] pub interval_min: u32, @@ -33,7 +42,7 @@ impl From for Peer { } } -impl Announce { +impl NonCompact { /// # Panics /// /// It would panic if the `Announce` struct contained an inappropriate type. @@ -43,14 +52,14 @@ impl Announce { } } -impl IntoResponse for Announce { +impl IntoResponse for NonCompact { fn into_response(self) -> Response { (StatusCode::OK, self.write()).into_response() } } -impl From for Announce { - fn from(domain_announce_response: AnnounceResponse) -> Self { +impl From for NonCompact { + fn from(domain_announce_response: AnnounceData) -> Self { let peers: Vec = domain_announce_response.peers.iter().map(|peer| Peer::from(*peer)).collect(); Self { @@ -63,29 +72,271 @@ impl From for Announce { } } +/// Compact "announce" response +/// +/// BEP 23: Tracker Returns Compact Peer Lists +/// +/// +/// BEP 07: IPv6 Tracker Extension +/// +/// +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Compact { + pub interval: u32, + #[serde(rename = "min interval")] + pub interval_min: u32, + pub complete: u32, + pub incomplete: u32, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct CompactPeer { + pub ip: IpAddr, + pub port: u16, +} + +impl CompactPeer { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + match self.ip { + IpAddr::V4(ip) => { + bytes.write_all(&u32::from(ip).to_be_bytes())?; + } + IpAddr::V6(ip) => { + bytes.write_all(&u128::from(ip).to_be_bytes())?; + } + } + bytes.write_all(&self.port.to_be_bytes())?; + Ok(bytes) + } +} + +impl From for CompactPeer { + fn from(peer: tracker::peer::Peer) -> Self { + CompactPeer { + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } +} + +impl Compact { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + + // Begin dictionary + bytes.write_all(b"d")?; + + // Write `interval` + // Dictionary key + bytes.write_all(b"8:interval")?; + // Dictionary key value + bytes.write_all(b"i")?; // Begin integer + bytes.write_all(self.interval.to_string().as_bytes())?; + bytes.write_all(b"e")?; // End integer + + // Write `interval_min` + // Dictionary key + bytes.write_all(b"12:min interval")?; + // Dictionary key value + bytes.write_all(b"i")?; // Begin integer + bytes.write_all(self.interval_min.to_string().as_bytes())?; + bytes.write_all(b"e")?; // End integer + + // Write `complete` + // Dictionary key + bytes.write_all(b"8:complete")?; + // Dictionary key value + bytes.write_all(b"i")?; // Begin integer + bytes.write_all(self.complete.to_string().as_bytes())?; + bytes.write_all(b"e")?; // End integer + + // Write `incomplete` + // Dictionary key + bytes.write_all(b"10:incomplete")?; + // Dictionary key value + bytes.write_all(b"i")?; // Begin integer + bytes.write_all(self.incomplete.to_string().as_bytes())?; + bytes.write_all(b"e")?; // End integer + + // Write peers with IPV4 IPs (BEP 23) + + // Dictionary key + bytes.write_all(b"5:peers")?; + // Dictionary key value + let mut peers_v4: Vec = Vec::new(); + for compact_peer in &self.peers { + match compact_peer.ip { + IpAddr::V4(_ip) => { + let peer_bytes = compact_peer.write()?; + peers_v4.write_all(&peer_bytes)?; + } + IpAddr::V6(_) => {} + } + } + bytes.write_all(peers_v4.len().to_string().as_bytes())?; // Begin byte string + bytes.write_all(b":")?; + bytes.write_all(peers_v4.as_slice())?; + + // todo: why is this `e` here? + bytes.write_all(b"e")?; + + // Write peers with IPV6 IPs (BEP 07) + + // Dictionary key + bytes.write_all(b"6:peers6")?; + // Dictionary key value + let mut peers_v6: Vec = Vec::new(); + for compact_peer in &self.peers { + match compact_peer.ip { + IpAddr::V6(_ip) => { + let peer_bytes = compact_peer.write()?; + peers_v6.write_all(&peer_bytes)?; + } + IpAddr::V4(_) => {} + } + } + bytes.write_all(peers_v6.len().to_string().as_bytes())?; // Begin byte string + bytes.write_all(b":")?; + bytes.write_all(peers_v6.as_slice())?; // End byte string + + // End dictionary + bytes.write_all(b"e")?; + + Ok(bytes) + } +} + +#[derive(Error, Debug)] +pub enum CompactSerializationError { + #[error("cannot write bytes: {inner_error} in {location}")] + CannotWriteBytes { + location: &'static Location<'static>, + inner_error: String, + }, +} + +impl From for responses::error::Error { + fn from(err: CompactSerializationError) -> Self { + responses::error::Error { + failure_reason: format!("{err}"), + } + } +} + +impl IntoResponse for Compact { + fn into_response(self) -> Response { + match self.write() { + Ok(bytes) => (StatusCode::OK, bytes).into_response(), + Err(err) => responses::error::Error::from(CompactSerializationError::CannotWriteBytes { + location: Location::caller(), + inner_error: format!("{err}"), + }) + .into_response(), + } + } +} + +impl From for Compact { + fn from(domain_announce_response: AnnounceData) -> Self { + let peers: Vec = domain_announce_response + .peers + .iter() + .map(|peer| CompactPeer::from(*peer)) + .collect(); + + Self { + interval: domain_announce_response.interval, + interval_min: domain_announce_response.interval_min, + complete: domain_announce_response.swam_stats.seeders, + incomplete: domain_announce_response.swam_stats.leechers, + peers, + } + } +} + #[cfg(test)] mod tests { - use std::net::IpAddr; - use std::str::FromStr; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + + use super::{NonCompact, Peer}; + use crate::http::axum_implementation::responses::announce::{Compact, CompactPeer}; - use super::{Announce, Peer}; + // Some ascii values used in tests: + // + // +-----------------+ + // | Dec | Hex | Chr | + // +-----------------+ + // | 105 | 69 | i | + // | 112 | 70 | p | + // +-----------------+ + // + // IP addresses and port numbers used in tests are chosen so that their bencoded representation + // is also a valid string which makes asserts more readable. #[test] - fn announce_response_can_be_bencoded() { - let response = Announce { - interval: 1, - interval_min: 2, - complete: 3, - incomplete: 4, - peers: vec![Peer { - peer_id: "-qB00000000000000001".to_string(), - ip: IpAddr::from_str("127.0.0.1").unwrap(), - port: 8080, - }], + fn non_compact_announce_response_can_be_bencoded() { + let response = NonCompact { + interval: 111, + interval_min: 222, + complete: 333, + incomplete: 444, + peers: vec![ + // IPV4 + Peer { + peer_id: "-qB00000000000000001".to_string(), + ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 + port: 0x7070, // 28784 + }, + // IPV6 + Peer { + peer_id: "-qB00000000000000002".to_string(), + ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + port: 0x7070, // 28784 + }, + ], }; // cspell:disable-next-line - assert_eq!(response.write(), "d8:completei3e10:incompletei4e8:intervali1e12:min intervali2e5:peersld2:ip9:127.0.0.17:peer_id20:-qB000000000000000014:porti8080eeee"); + assert_eq!(response.write(), "d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer_id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer_id20:-qB000000000000000024:porti28784eeee"); + } + + #[test] + fn compact_announce_response_can_be_bencoded() { + let response = Compact { + interval: 111, + interval_min: 222, + complete: 333, + incomplete: 444, + peers: vec![ + // IPV4 + CompactPeer { + ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 + port: 0x7070, // 28784 + }, + // IPV6 + CompactPeer { + ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + port: 0x7070, // 28784 + }, + ], + }; + + let bytes = response.write().unwrap(); + + // cspell:disable-next-line + assert_eq!( + bytes, + // cspell:disable-next-line + b"d8:intervali111e12:min intervali222e8:completei333e10:incompletei444e5:peers6:iiiippe6:peers618:iiiiiiiiiiiiiiiippe" + ); } } diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 9481354ba..6378c3008 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::Peer; -use crate::tracker::{statistics, AnnounceResponse, Tracker}; +use crate::tracker::{statistics, AnnounceData, Tracker}; -pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceResponse { +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index cb3bd0e96..d406446ec 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -43,7 +43,7 @@ pub struct TorrentsMetrics { pub torrents: u64, } -pub struct AnnounceResponse { +pub struct AnnounceData { pub peers: Vec, pub swam_stats: SwamStats, pub interval: u32, @@ -86,7 +86,7 @@ impl Tracker { } /// It handles an announce request - pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceResponse { + pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -94,7 +94,7 @@ impl Tracker { // todo: remove peer by using its `Id` instead of its socket address: `get_peers_excluding_peer(peer_id: peer::Id)` let peers = self.get_peers_excluding_peers_with_address(info_hash, &peer.peer_addr).await; - AnnounceResponse { + AnnounceData { peers, swam_stats, interval: self.config.announce_interval, diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index ded30a0b4..3f0f4fbe3 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1833,8 +1833,7 @@ mod axum_http_tracker_server { assert_empty_announce_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_compact_response() { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html From ce95d7ae9ae3c09d833fa81f9a8d0e69a37842ab Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Feb 2023 13:15:56 +0000 Subject: [PATCH 0392/1003] feat: [#187] add cargo dependency: bip_bencode It will be used to build the bencoded compact annouce response in the HTTP tracker. We are currently writting directly bytes into a byte buffer, but Bencode specification imposes some restrictions like: The keys in a dictionary must me alphabetically orderded. We are not doing that in the current implementation. --- Cargo.lock | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++- Cargo.toml | 1 + 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 6f9d9231b..05b439353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,15 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -183,6 +192,21 @@ dependencies = [ "tower-service", ] +[[package]] +name = "backtrace" +version = "0.3.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide 0.6.2", + "object", + "rustc-demangle", +] + [[package]] name = "base-x" version = "0.2.11" @@ -236,6 +260,15 @@ dependencies = [ "which", ] +[[package]] +name = "bip_bencode" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6048cc5d9680544a5098a290d2845df7dae292c97687b9896b70365bad0ea416" +dependencies = [ + "error-chain", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -637,6 +670,15 @@ dependencies = [ "termcolor", ] +[[package]] +name = "error-chain" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" +dependencies = [ + "backtrace", +] + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -675,7 +717,7 @@ checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.5.4", ] [[package]] @@ -913,6 +955,12 @@ dependencies = [ "wasi 0.11.0+wasi-snapshot-preview1", ] +[[package]] +name = "gimli" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" + [[package]] name = "glob" version = "0.3.0" @@ -1381,6 +1429,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.8.4" @@ -1643,6 +1700,15 @@ dependencies = [ "libc", ] +[[package]] +name = "object" +version = "0.30.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" version = "1.15.0" @@ -2120,6 +2186,12 @@ dependencies = [ "serde", ] +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -2862,6 +2934,7 @@ dependencies = [ "axum-client-ip", "axum-server", "binascii", + "bip_bencode", "chrono", "config", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index 75ffa7935..917bc9e31 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ uuid = { version = "1", features = ["v4"] } axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } axum-client-ip = "0.4.0" +bip_bencode = "0.4.4" [dev-dependencies] From ad81009778ccfe0680ce9f91a22d03d97d686c5e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Feb 2023 13:18:35 +0000 Subject: [PATCH 0393/1003] fix(http): [#187] compact announce response - Remove extra byte "e" between "peer" and "peer6" dictionary keys. - Alphabetically order dictionary keys. --- .../axum_implementation/responses/announce.rs | 70 +++---------------- 1 file changed, 11 insertions(+), 59 deletions(-) diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs index 303adcad9..16bb51e4c 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/axum_implementation/responses/announce.rs @@ -4,6 +4,7 @@ use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; +use bip_bencode::{ben_bytes, ben_int, ben_map}; use serde::{self, Deserialize, Serialize}; use thiserror::Error; @@ -129,48 +130,6 @@ impl Compact { /// /// Will return `Err` if internally interrupted. pub fn write(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - - // Begin dictionary - bytes.write_all(b"d")?; - - // Write `interval` - // Dictionary key - bytes.write_all(b"8:interval")?; - // Dictionary key value - bytes.write_all(b"i")?; // Begin integer - bytes.write_all(self.interval.to_string().as_bytes())?; - bytes.write_all(b"e")?; // End integer - - // Write `interval_min` - // Dictionary key - bytes.write_all(b"12:min interval")?; - // Dictionary key value - bytes.write_all(b"i")?; // Begin integer - bytes.write_all(self.interval_min.to_string().as_bytes())?; - bytes.write_all(b"e")?; // End integer - - // Write `complete` - // Dictionary key - bytes.write_all(b"8:complete")?; - // Dictionary key value - bytes.write_all(b"i")?; // Begin integer - bytes.write_all(self.complete.to_string().as_bytes())?; - bytes.write_all(b"e")?; // End integer - - // Write `incomplete` - // Dictionary key - bytes.write_all(b"10:incomplete")?; - // Dictionary key value - bytes.write_all(b"i")?; // Begin integer - bytes.write_all(self.incomplete.to_string().as_bytes())?; - bytes.write_all(b"e")?; // End integer - - // Write peers with IPV4 IPs (BEP 23) - - // Dictionary key - bytes.write_all(b"5:peers")?; - // Dictionary key value let mut peers_v4: Vec = Vec::new(); for compact_peer in &self.peers { match compact_peer.ip { @@ -181,18 +140,7 @@ impl Compact { IpAddr::V6(_) => {} } } - bytes.write_all(peers_v4.len().to_string().as_bytes())?; // Begin byte string - bytes.write_all(b":")?; - bytes.write_all(peers_v4.as_slice())?; - - // todo: why is this `e` here? - bytes.write_all(b"e")?; - - // Write peers with IPV6 IPs (BEP 07) - // Dictionary key - bytes.write_all(b"6:peers6")?; - // Dictionary key value let mut peers_v6: Vec = Vec::new(); for compact_peer in &self.peers { match compact_peer.ip { @@ -203,12 +151,16 @@ impl Compact { IpAddr::V4(_) => {} } } - bytes.write_all(peers_v6.len().to_string().as_bytes())?; // Begin byte string - bytes.write_all(b":")?; - bytes.write_all(peers_v6.as_slice())?; // End byte string - // End dictionary - bytes.write_all(b"e")?; + let bytes = (ben_map! { + "complete" => ben_int!(i64::from(self.complete)), + "incomplete" => ben_int!(i64::from(self.incomplete)), + "interval" => ben_int!(i64::from(self.interval)), + "min interval" => ben_int!(i64::from(self.interval_min)), + "peers" => ben_bytes!(peers_v4), + "peers6" => ben_bytes!(peers_v6) + }) + .encode(); Ok(bytes) } @@ -336,7 +288,7 @@ mod tests { assert_eq!( bytes, // cspell:disable-next-line - b"d8:intervali111e12:min intervali222e8:completei333e10:incompletei444e5:peers6:iiiippe6:peers618:iiiiiiiiiiiiiiiippe" + b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe" ); } } From b1fa1e5669da3986c56367a6171d5faba7783d61 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Feb 2023 13:49:49 +0000 Subject: [PATCH 0394/1003] test(http): enable axum http tracker tests Those test should have been enabled when the implementation was done. --- .../axum_implementation/extractors/peer_ip.rs | 4 +++- tests/http/asserts.rs | 10 ++++++++++ tests/http_tracker.rs | 17 ++++++----------- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs index 7d615d0dc..9f7e92a9b 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -9,7 +9,9 @@ use crate::http::axum_implementation::responses; #[derive(Error, Debug)] pub enum ResolutionError { - #[error("missing the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}")] + #[error( + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" + )] MissingRightMostXForwardedForIp { location: &'static Location<'static> }, #[error("cannot get the client IP from the connection info in {location}")] MissingClientIp { location: &'static Location<'static> }, diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index e146f252d..ffb857951 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -138,6 +138,16 @@ pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(r ); } +pub async fn assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration)", + Location::caller(), + ); +} + pub async fn assert_invalid_remote_address_on_xff_header_error_response(response: Response) { assert_eq!(response.status(), 200); diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 3f0f4fbe3..413d28bcf 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1405,19 +1405,15 @@ mod axum_http_tracker_server { mod and_running_on_reverse_proxy { use torrust_tracker::http::Version; - use crate::http::asserts::{ - assert_could_not_find_remote_address_on_xff_header_error_response, - assert_invalid_remote_address_on_xff_header_error_response, - }; + use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_http_tracker_on_reverse_proxy; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the - // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. + // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; @@ -1427,11 +1423,10 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_could_not_find_remote_address_on_xff_header_error_response(response).await; + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; @@ -1441,7 +1436,7 @@ mod axum_http_tracker_server { .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; - assert_invalid_remote_address_on_xff_header_error_response(response).await; + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; } } From 502072250b74d369036636144ee95e733b6107d4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Feb 2023 17:47:42 +0000 Subject: [PATCH 0395/1003] feat(http): consider peers to be different if they have the same IP but a different port Other refactores were made to improve funtions names. BREAKING CHANGE: before this a peer with the same IP as the client that is making the announce request was removed from the announce response regardless whether they have the same IP or not. --- src/tracker/mod.rs | 18 +- src/tracker/peer.rs | 15 + src/tracker/services/torrent.rs | 2 +- src/tracker/torrent.rs | 470 ++++++++++++++++++-------------- 4 files changed, 284 insertions(+), 221 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index d406446ec..e01fe6a19 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -8,7 +8,7 @@ pub mod torrent; use std::collections::btree_map::Entry; use std::collections::BTreeMap; -use std::net::{IpAddr, SocketAddr}; +use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use std::time::Duration; @@ -91,8 +91,7 @@ impl Tracker { let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - // todo: remove peer by using its `Id` instead of its socket address: `get_peers_excluding_peer(peer_id: peer::Id)` - let peers = self.get_peers_excluding_peers_with_address(info_hash, &peer.peer_addr).await; + let peers = self.get_peers_for_peer(info_hash, peer).await; AnnounceData { peers, @@ -298,16 +297,12 @@ impl Tracker { Ok(()) } - async fn get_peers_excluding_peers_with_address( - &self, - info_hash: &InfoHash, - excluded_address: &SocketAddr, - ) -> Vec { + async fn get_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(Some(excluded_address)).into_iter().copied().collect(), + Some(entry) => entry.get_peers_for_peer(peer).into_iter().copied().collect(), } } @@ -317,11 +312,14 @@ impl Tracker { match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(None).into_iter().copied().collect(), + Some(entry) => entry.get_all_peers().into_iter().copied().collect(), } } pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwamStats { + // code-review: consider splitting the function in two (command and query segregation). + // `update_torrent_with_peer` and `get_stats` + let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 735754529..18ce1b75f 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -10,6 +10,12 @@ use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; +#[derive(PartialEq, Eq, Debug)] +pub enum IPVersion { + IPv4, + IPv6, +} + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] pub struct Peer { pub peer_id: Id, @@ -37,6 +43,15 @@ impl Peer { pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } + + /// The IP version used by the peer: IPV4 or IPV6 + #[must_use] + pub fn ip_version(&self) -> IPVersion { + if self.peer_addr.is_ipv4() { + return IPVersion::IPv4; + } + IPVersion::IPv6 + } } #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index ba66d15f4..e2353876e 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -80,7 +80,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let (seeders, completed, leechers) = torrent_entry.get_stats(); - let peers = torrent_entry.get_peers(None); + let peers = torrent_entry.get_all_peers(); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index b7b79f0f5..c2db6b027 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,10 +1,9 @@ -use std::net::{IpAddr, SocketAddr}; use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use super::peer; +use super::peer::{self, Peer}; use crate::protocol::clock::{Current, TimeNow}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; @@ -48,26 +47,24 @@ impl Entry { did_torrent_stats_change } + /// Get all peers, limiting the result to the maximum number of scrape torrents. #[must_use] - pub fn get_peers(&self, optional_excluded_address: Option<&SocketAddr>) -> Vec<&peer::Peer> { + pub fn get_all_peers(&self) -> Vec<&peer::Peer> { + self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() + } + + /// Returns the list of peers for a given client. The list filters out: + /// - The client peer that is making the request to the tracker + /// - Other peers that are not using the same IP version as the client peer. + #[must_use] + pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { self.peers .values() - .filter(|peer| match optional_excluded_address { - // Don't filter on ip_version - None => true, - // Filter out different ip_version from remote_addr - Some(excluded_address) => { - // Skip ip address of client - if peer.peer_addr.ip() == excluded_address.ip() { - return false; - } - - match peer.peer_addr.ip() { - IpAddr::V4(_) => excluded_address.is_ipv4(), - IpAddr::V6(_) => excluded_address.is_ipv6(), - } - } - }) + // Take peers which are not the client peer + .filter(|peer| peer.peer_addr != client.peer_addr) + // Take only peers with the same IP version as the client peer + .filter(|peer| peer.ip_version() == client.ip_version()) + // Limit the number of peers on the result .take(MAX_SCRAPE_TORRENTS as usize) .collect() } @@ -101,264 +98,317 @@ pub struct SwamStats { #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::ops::Sub; - use std::time::Duration; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + mod torrent_entry { - use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; - use crate::tracker::peer; - use crate::tracker::torrent::Entry; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::ops::Sub; + use std::time::Duration; - struct TorrentPeerBuilder { - peer: peer::Peer, - } + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([0u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } - } + use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; + use crate::tracker::peer; + use crate::tracker::torrent::Entry; - pub fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self + struct TorrentPeerBuilder { + peer: peer::Peer, } - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = peer::Peer { + peer_id: peer::Id([0u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: Current::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; - self + pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + pub fn into(self) -> peer::Peer { + self.peer + } } - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self + /// A torrent seeder is a peer with 0 bytes left to download which + /// has not announced it has stopped + fn a_torrent_seeder() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(0) + .with_event_completed() + .into() } - pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self + /// A torrent leecher is a peer that is not a seeder. + /// Leecher: left > 0 OR event = Stopped + fn a_torrent_leecher() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(1) + .with_event_completed() + .into() } - pub fn into(self) -> peer::Peer { - self.peer + #[test] + fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { + let torrent_entry = Entry::new(); + + assert_eq!(torrent_entry.get_all_peers().len(), 0); } - } - /// A torrent seeder is a peer with 0 bytes left to download which - /// has not announced it has stopped - fn a_torrent_seeder() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(0) - .with_event_completed() - .into() - } + #[test] + fn a_new_peer_can_be_added_to_a_torrent_entry() { + let mut torrent_entry = Entry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); - /// A torrent leecher is a peer that is not a seeder. - /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(1) - .with_event_completed() - .into() - } + torrent_entry.update_peer(&torrent_peer); // Add the peer - #[test] - fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = Entry::new(); + assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); + assert_eq!(torrent_entry.get_all_peers().len(), 1); + } - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } + #[test] + fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); - #[test] - fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = Entry::new(); - let torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer - torrent_entry.update_peer(&torrent_peer); // Add the peer + assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); + } - assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); - assert_eq!(torrent_entry.get_peers(None).len(), 1); - } + #[test] + fn a_peer_can_be_updated_in_a_torrent_entry() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer - #[test] - fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_peer = TorrentPeerBuilder::default().into(); + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - torrent_entry.update_peer(&torrent_peer); // Add the peer + assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); + } - assert_eq!(torrent_entry.get_peers(None), vec![&torrent_peer]); - } + #[test] + fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer - #[test] - fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_peer.event = AnnounceEvent::Stopped; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + assert_eq!(torrent_entry.get_all_peers().len(), 0); + } - assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); - } + #[test] + fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); - #[test] - fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_entry.update_peer(&torrent_peer); // Add the peer - torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } + assert!(stats_have_changed); + } - #[test] - fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); + #[test] + fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( + ) { + let mut torrent_entry = Entry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + // Add a peer that did not exist before in the entry + let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + assert!(torrent_stats_have_not_changed); + } - assert!(stats_have_changed); - } + #[test] + fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() + { + let mut torrent_entry = Entry::new(); + let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); + torrent_entry.update_peer(&torrent_peer); // Add peer - #[test] - fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( - ) { - let mut torrent_entry = Entry::new(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + // Get peers excluding the one we have just added + let peers = torrent_entry.get_peers_for_peer(&torrent_peer); - // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); + assert_eq!(peers.len(), 0); + } - assert!(torrent_stats_have_not_changed); - } + #[test] + fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_peers_that_do_not_use_the_same_ip_version( + ) { + let mut torrent_entry = Entry::new(); - #[test] - fn a_torrent_entry_could_filter_out_peers_with_a_given_socket_address() { - let mut torrent_entry = Entry::new(); - let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.update_peer(&torrent_peer); // Add peer + // Add peer 1 using IPV4 + let peer1_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer_1 = TorrentPeerBuilder::default().with_peer_address(peer1_socket_address).into(); + torrent_entry.update_peer(&torrent_peer_1); - // Get peers excluding the one we have just added - let peers = torrent_entry.get_peers(Some(&peer_socket_address)); + // Add peer 2 using IPV6 + let peer2_socket_address = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff)), 8080); + let torrent_peer_2 = TorrentPeerBuilder::default().with_peer_address(peer2_socket_address).into(); + torrent_entry.update_peer(&torrent_peer_2); - assert_eq!(peers.len(), 0); - } + // Get peers for peer 1 + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); - fn peer_id_from_i32(number: i32) -> peer::Id { - let peer_id = number.to_le_bytes(); - peer::Id([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], - peer_id[3], - ]) - } + // Peer using IPV6 should not be included + assert_eq!(peers.len(), 0); + } + + #[test] + fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { + let mut torrent_entry = Entry::new(); - #[test] - fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = Entry::new(); + let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + + // Add peer 1 + let torrent_peer_1 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8080)) + .into(); + torrent_entry.update_peer(&torrent_peer_1); - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let torrent_peer = TorrentPeerBuilder::default() - .with_peer_id(peer_id_from_i32(peer_number)) + // Add peer 2 + let torrent_peer_2 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8081)) .into(); - torrent_entry.update_peer(&torrent_peer); + torrent_entry.update_peer(&torrent_peer_2); + + // Get peers for peer 1 + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); + + // The peer 2 using the same IP but different port should be included + assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); + assert_eq!(peers[0].peer_addr.port(), 8081); } - let peers = torrent_entry.get_peers(None); + fn peer_id_from_i32(number: i32) -> peer::Id { + let peer_id = number.to_le_bytes(); + peer::Id([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], + peer_id[2], peer_id[3], + ]) + } - assert_eq!(peers.len(), 74); - } + #[test] + fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { + let mut torrent_entry = Entry::new(); - #[test] - fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_seeder = a_torrent_seeder(); + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let torrent_peer = TorrentPeerBuilder::default() + .with_peer_id(peer_id_from_i32(peer_number)) + .into(); + torrent_entry.update_peer(&torrent_peer); + } - torrent_entry.update_peer(&torrent_seeder); // Add seeder + let peers = torrent_entry.get_all_peers(); - assert_eq!(torrent_entry.get_stats().0, 1); - } + assert_eq!(peers.len(), 74); + } - #[test] - fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_leecher = a_torrent_leecher(); + #[test] + fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_seeder = a_torrent_seeder(); - torrent_entry.update_peer(&torrent_leecher); // Add leecher + torrent_entry.update_peer(&torrent_seeder); // Add seeder - assert_eq!(torrent_entry.get_stats().2, 1); - } + assert_eq!(torrent_entry.get_stats().0, 1); + } - #[test] - fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( - ) { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + #[test] + fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_leecher = a_torrent_leecher(); - // Announce "Completed" torrent download event. - torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.update_peer(&torrent_peer); // Update the peer + torrent_entry.update_peer(&torrent_leecher); // Add leecher - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; + assert_eq!(torrent_entry.get_stats().2, 1); + } - assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); - } + #[test] + fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( + ) { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer - #[test] - fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = Entry::new(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + // Announce "Completed" torrent download event. + torrent_peer.event = AnnounceEvent::Completed; + torrent_entry.update_peer(&torrent_peer); // Update the peer - // Announce "Completed" torrent download event. - // It's the first event announced from this peer. - torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; + assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); + } - assert_eq!(number_of_peers_with_completed_torrent, 0); - } + #[test] + fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { + let mut torrent_entry = Entry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Announce "Completed" torrent download event. + // It's the first event announced from this peer. + torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer - #[test] - fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = Entry::new(); + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; - let timeout = 120u32; + assert_eq!(number_of_peers_with_completed_torrent, 0); + } - let now = Working::now(); - Stopped::local_set(&now); + #[test] + fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { + let mut torrent_entry = Entry::new(); - let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); - let inactive_peer = TorrentPeerBuilder::default() - .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) - .into(); - torrent_entry.update_peer(&inactive_peer); // Add the peer + let timeout = 120u32; - torrent_entry.remove_inactive_peers(timeout); + let now = Working::now(); + Stopped::local_set(&now); - assert_eq!(torrent_entry.peers.len(), 0); + let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); + let inactive_peer = TorrentPeerBuilder::default() + .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) + .into(); + torrent_entry.update_peer(&inactive_peer); // Add the peer + + torrent_entry.remove_inactive_peers(timeout); + + assert_eq!(torrent_entry.peers.len(), 0); + } } } From 7b9131ec9255299e6685565488c5b6b386725414 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Feb 2023 18:36:09 +0000 Subject: [PATCH 0396/1003] fix(http): [#187] fix non-compact announce response Fixed some errors in the normal (non-compact) announce response in the HTTP tracker. Only fo rthe new Aux, implementation: - The peer id should be a 20-byte string not the hex value representation as an string. - Response body (bencode) should be binary (bytes). - The "peer id" key in the peer dictionary should have a white space "peer id" intead of "peer_id" (with underscore). --- .../axum_implementation/responses/announce.rs | 109 ++++++++++++------ src/tracker/peer.rs | 10 ++ tests/http/asserts.rs | 14 +-- tests/http/asserts_warp.rs | 15 +++ tests/http/mod.rs | 1 + tests/http/responses/announce.rs | 8 +- tests/http/responses/announce_warp.rs | 30 +++++ tests/http/responses/mod.rs | 1 + tests/http_tracker.rs | 12 +- 9 files changed, 149 insertions(+), 51 deletions(-) create mode 100644 tests/http/asserts_warp.rs create mode 100644 tests/http/responses/announce_warp.rs diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs index 16bb51e4c..a91266490 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/axum_implementation/responses/announce.rs @@ -4,7 +4,7 @@ use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use bip_bencode::{ben_bytes, ben_int, ben_map}; +use bip_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; use serde::{self, Deserialize, Serialize}; use thiserror::Error; @@ -28,15 +28,26 @@ pub struct NonCompact { #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Peer { - pub peer_id: String, + pub peer_id: [u8; 20], pub ip: IpAddr, pub port: u16, } +impl Peer { + #[must_use] + pub fn ben_map(&self) -> BencodeMut { + ben_map! { + "peer id" => ben_bytes!(self.peer_id.clone().to_vec()), + "ip" => ben_bytes!(self.ip.to_string()), + "port" => ben_int!(i64::from(self.port)) + } + } +} + impl From for Peer { fn from(peer: tracker::peer::Peer) -> Self { Peer { - peer_id: peer.peer_id.to_string(), + peer_id: peer.peer_id.to_bytes(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port(), } @@ -46,16 +57,29 @@ impl From for Peer { impl NonCompact { /// # Panics /// - /// It would panic if the `Announce` struct contained an inappropriate type. + /// Will return an error if it can't access the bencode as a mutable `BListAccess`. #[must_use] - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() + pub fn body(&self) -> Vec { + let mut peers_list = ben_list!(); + let peers_list_mut = peers_list.list_mut().unwrap(); + for peer in &self.peers { + peers_list_mut.push(peer.ben_map()); + } + + (ben_map! { + "complete" => ben_int!(i64::from(self.complete)), + "incomplete" => ben_int!(i64::from(self.incomplete)), + "interval" => ben_int!(i64::from(self.interval)), + "min interval" => ben_int!(i64::from(self.interval_min)), + "peers" => peers_list.clone() + }) + .encode() } } impl IntoResponse for NonCompact { fn into_response(self) -> Response { - (StatusCode::OK, self.write()).into_response() + (StatusCode::OK, self.body()).into_response() } } @@ -101,7 +125,7 @@ impl CompactPeer { /// # Errors /// /// Will return `Err` if internally interrupted. - pub fn write(&self) -> Result, Box> { + pub fn bytes(&self) -> Result, Box> { let mut bytes: Vec = Vec::new(); match self.ip { IpAddr::V4(ip) => { @@ -129,39 +153,45 @@ impl Compact { /// # Errors /// /// Will return `Err` if internally interrupted. - pub fn write(&self) -> Result, Box> { - let mut peers_v4: Vec = Vec::new(); + pub fn body(&self) -> Result, Box> { + let bytes = (ben_map! { + "complete" => ben_int!(i64::from(self.complete)), + "incomplete" => ben_int!(i64::from(self.incomplete)), + "interval" => ben_int!(i64::from(self.interval)), + "min interval" => ben_int!(i64::from(self.interval_min)), + "peers" => ben_bytes!(self.peers_v4_bytes()?), + "peers6" => ben_bytes!(self.peers_v6_bytes()?) + }) + .encode(); + + Ok(bytes) + } + + fn peers_v4_bytes(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); for compact_peer in &self.peers { match compact_peer.ip { IpAddr::V4(_ip) => { - let peer_bytes = compact_peer.write()?; - peers_v4.write_all(&peer_bytes)?; + let peer_bytes = compact_peer.bytes()?; + bytes.write_all(&peer_bytes)?; } IpAddr::V6(_) => {} } } + Ok(bytes) + } - let mut peers_v6: Vec = Vec::new(); + fn peers_v6_bytes(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); for compact_peer in &self.peers { match compact_peer.ip { IpAddr::V6(_ip) => { - let peer_bytes = compact_peer.write()?; - peers_v6.write_all(&peer_bytes)?; + let peer_bytes = compact_peer.bytes()?; + bytes.write_all(&peer_bytes)?; } IpAddr::V4(_) => {} } } - - let bytes = (ben_map! { - "complete" => ben_int!(i64::from(self.complete)), - "incomplete" => ben_int!(i64::from(self.incomplete)), - "interval" => ben_int!(i64::from(self.interval)), - "min interval" => ben_int!(i64::from(self.interval_min)), - "peers" => ben_bytes!(peers_v4), - "peers6" => ben_bytes!(peers_v6) - }) - .encode(); - Ok(bytes) } } @@ -185,7 +215,7 @@ impl From for responses::error::Error { impl IntoResponse for Compact { fn into_response(self) -> Response { - match self.write() { + match self.body() { Ok(bytes) => (StatusCode::OK, bytes).into_response(), Err(err) => responses::error::Error::from(CompactSerializationError::CannotWriteBytes { location: Location::caller(), @@ -244,21 +274,28 @@ mod tests { peers: vec![ // IPV4 Peer { - peer_id: "-qB00000000000000001".to_string(), + peer_id: *b"-qB00000000000000001", ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 port: 0x7070, // 28784 }, // IPV6 Peer { - peer_id: "-qB00000000000000002".to_string(), + peer_id: *b"-qB00000000000000002", ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), port: 0x7070, // 28784 }, ], }; + let bytes = response.body(); + // cspell:disable-next-line - assert_eq!(response.write(), "d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer_id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer_id20:-qB000000000000000024:porti28784eeee"); + let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); } #[test] @@ -282,13 +319,15 @@ mod tests { ], }; - let bytes = response.write().unwrap(); + let bytes = response.body().unwrap(); - // cspell:disable-next-line - assert_eq!( - bytes, + let expected_bytes = // cspell:disable-next-line - b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe" + b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() ); } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 18ce1b75f..c6d87f036 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -84,6 +84,11 @@ impl Id { ret.0.clone_from_slice(bytes); ret } + + #[must_use] + pub fn to_bytes(&self) -> [u8; 20] { + self.0 + } } impl From<[u8; 20]> for Id { @@ -369,6 +374,11 @@ mod test { ]); assert_eq!(id.to_string(), "009f9296009f9296009f9296009f9296009f9296"); } + + #[test] + fn should_return_the_inner_bytes() { + assert_eq!(peer::Id(*b"-qB00000000000000000").to_bytes(), *b"-qB00000000000000000"); + } } mod torrent_peer { diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index ffb857951..a10edc9e6 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -30,17 +30,15 @@ pub async fn assert_empty_announce_response(response: Response) { pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let announce_response: Announce = serde_bencode::from_str(&body) - .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{}\"", &body)); + + let body = response.bytes().await.unwrap(); + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); + assert_eq!(announce_response, *expected_announce_response); } -/// Sample bencoded announce response as byte array: -/// -/// ```text -/// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" -/// ``` pub async fn assert_compact_announce_response(response: Response, expected_response: &Compact) { assert_eq!(response.status(), 200); diff --git a/tests/http/asserts_warp.rs b/tests/http/asserts_warp.rs new file mode 100644 index 000000000..6bda82f6c --- /dev/null +++ b/tests/http/asserts_warp.rs @@ -0,0 +1,15 @@ +/// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. +use reqwest::Response; + +use super::responses::announce_warp::WarpAnnounce; + +pub async fn assert_warp_announce_response(response: Response, expected_announce_response: &WarpAnnounce) { + assert_eq!(response.status(), 200); + + let body = response.text().await.unwrap(); + + let announce_response: WarpAnnounce = serde_bencode::from_str(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); + + assert_eq!(announce_response, *expected_announce_response); +} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 8c1e3c995..40616025b 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,4 +1,5 @@ pub mod asserts; +pub mod asserts_warp; pub mod client; pub mod connection_info; pub mod requests; diff --git a/tests/http/responses/announce.rs b/tests/http/responses/announce.rs index e976ba9ae..8a07ebd5e 100644 --- a/tests/http/responses/announce.rs +++ b/tests/http/responses/announce.rs @@ -10,20 +10,22 @@ pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] pub min_interval: u32, - pub peers: Vec, // Peers with IPV4 + pub peers: Vec, // Peers using IPV4 and IPV6 } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct DictionaryPeer { pub ip: String, - pub peer_id: String, + #[serde(rename = "peer id")] + #[serde(with = "serde_bytes")] + pub peer_id: Vec, pub port: u16, } impl From for DictionaryPeer { fn from(peer: Peer) -> Self { DictionaryPeer { - peer_id: peer.peer_id.to_string(), + peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), port: peer.peer_addr.port(), } diff --git a/tests/http/responses/announce_warp.rs b/tests/http/responses/announce_warp.rs new file mode 100644 index 000000000..0fcf05eb8 --- /dev/null +++ b/tests/http/responses/announce_warp.rs @@ -0,0 +1,30 @@ +/// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. +use serde::{self, Deserialize, Serialize}; +use torrust_tracker::tracker::peer::Peer; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct WarpAnnounce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, // Peers using IPV4 +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct WarpDictionaryPeer { + pub ip: String, + pub peer_id: String, + pub port: u16, +} + +impl From for WarpDictionaryPeer { + fn from(peer: Peer) -> Self { + Self { + peer_id: peer.peer_id.to_string(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} diff --git a/tests/http/responses/mod.rs b/tests/http/responses/mod.rs index bdc689056..aecb53fed 100644 --- a/tests/http/responses/mod.rs +++ b/tests/http/responses/mod.rs @@ -1,3 +1,4 @@ pub mod announce; +pub mod announce_warp; pub mod error; pub mod scrape; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 413d28bcf..9e62b94a2 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -85,10 +85,12 @@ mod warp_http_tracker_server { assert_internal_server_error_response, assert_invalid_info_hash_error_response, assert_invalid_peer_id_error_response, assert_is_announce_response, }; + use crate::http::asserts_warp::assert_warp_announce_response; use crate::http::client::Client; use crate::http::requests::announce::{Compact, QueryBuilder}; use crate::http::responses; - use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList}; + use crate::http::responses::announce_warp::{WarpAnnounce, WarpDictionaryPeer}; use crate::http::server::{ start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, start_ipv6_http_tracker, start_public_http_tracker, @@ -395,15 +397,15 @@ mod warp_http_tracker_server { ) .await; - // It should only contain teh previously announced peer - assert_announce_response( + // It should only contain the previously announced peer + assert_warp_announce_response( response, - &Announce { + &WarpAnnounce { complete: 2, incomplete: 0, interval: http_tracker_server.tracker.config.announce_interval, min_interval: http_tracker_server.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(previously_announced_peer)], + peers: vec![WarpDictionaryPeer::from(previously_announced_peer)], }, ) .await; From 0df1a79f31030c30d2dd903c2b27e1bd049c4a45 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Feb 2023 19:21:43 +0000 Subject: [PATCH 0397/1003] fix(http): [#187] add peers using both IPv4 an IPv6 in announce response The normal (non-compact) announce response should included all peers except the peer making the request. We were excluding peers that are nos using the same IP version as the peer making the request. Peers are included in the bencoded response in the "peers" key of the main bencoded dictionary. --- src/tracker/torrent.rs | 31 ++---------- tests/api/server.rs | 2 +- tests/common/fixtures.rs | 6 +++ tests/http/server.rs | 2 +- tests/http_tracker.rs | 106 ++++++++++++++++++++++++++++++--------- tests/tracker_api.rs | 24 ++++++--- 6 files changed, 110 insertions(+), 61 deletions(-) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index c2db6b027..3161cd36b 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -53,17 +53,14 @@ impl Entry { self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() } - /// Returns the list of peers for a given client. The list filters out: - /// - The client peer that is making the request to the tracker - /// - Other peers that are not using the same IP version as the client peer. + /// Returns the list of peers for a given client. + /// It filters out the input peer. #[must_use] pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { self.peers .values() // Take peers which are not the client peer .filter(|peer| peer.peer_addr != client.peer_addr) - // Take only peers with the same IP version as the client peer - .filter(|peer| peer.ip_version() == client.ip_version()) // Limit the number of peers on the result .take(MAX_SCRAPE_TORRENTS as usize) .collect() @@ -101,7 +98,7 @@ mod tests { mod torrent_entry { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::Sub; use std::time::Duration; @@ -268,28 +265,6 @@ mod tests { assert_eq!(peers.len(), 0); } - #[test] - fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_peers_that_do_not_use_the_same_ip_version( - ) { - let mut torrent_entry = Entry::new(); - - // Add peer 1 using IPV4 - let peer1_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer_1 = TorrentPeerBuilder::default().with_peer_address(peer1_socket_address).into(); - torrent_entry.update_peer(&torrent_peer_1); - - // Add peer 2 using IPV6 - let peer2_socket_address = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff)), 8080); - let torrent_peer_2 = TorrentPeerBuilder::default().with_peer_address(peer2_socket_address).into(); - torrent_entry.update_peer(&torrent_peer_2); - - // Get peers for peer 1 - let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); - - // Peer using IPV6 should not be included - assert_eq!(peers.len(), 0); - } - #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { let mut torrent_entry = Entry::new(); diff --git a/tests/api/server.rs b/tests/api/server.rs index c1cd0630a..0e23a4320 100644 --- a/tests/api/server.rs +++ b/tests/api/server.rs @@ -72,7 +72,7 @@ impl Server { } /// Add a torrent to the tracker - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 1ead0db0c..d4b3e9812 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -22,6 +22,12 @@ impl PeerBuilder { self } + #[allow(dead_code)] + pub fn with_peer_addr(mut self, peer_addr: &SocketAddr) -> Self { + self.peer.peer_addr = *peer_addr; + self + } + #[allow(dead_code)] pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes(left); diff --git a/tests/http/server.rs b/tests/http/server.rs index e5266eee5..1c8d1cb77 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -131,7 +131,7 @@ impl Server { self.connection_info.clone() } - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 9e62b94a2..a09802724 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -385,7 +385,9 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(http_tracker_server.get_connection_info()) @@ -419,7 +421,7 @@ mod warp_http_tracker_server { let peer = PeerBuilder::default().build(); // Add a peer - http_tracker_server.add_torrent(&info_hash, &peer).await; + http_tracker_server.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -450,7 +452,9 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2 accepting compact responses let response = Client::new(http_tracker_server.get_connection_info()) @@ -489,7 +493,9 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -783,7 +789,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -821,7 +827,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1001,7 +1007,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1030,7 +1036,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1156,7 +1162,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1185,7 +1191,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1227,7 +1233,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1455,7 +1461,7 @@ mod axum_http_tracker_server { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Announce - use std::net::{IpAddr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; use local_ip_address::local_ip; @@ -1780,7 +1786,9 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(http_tracker_server.get_connection_info()) @@ -1806,6 +1814,54 @@ mod axum_http_tracker_server { .await; } + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Announce a peer using IPV4 + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) + .build(); + http_tracker_server.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + + // Announce a peer using IPV6 + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + )) + .build(); + http_tracker_server.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + + // Announce the new Peer. + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000003")) + .query(), + ) + .await; + + // The newly announced peer is not included on the response peer list, + // but all the previously announced peers should be included regardless the IP version they are using. + assert_announce_response( + response, + &Announce { + complete: 3, + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], + }, + ) + .await; + } + #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1814,7 +1870,7 @@ mod axum_http_tracker_server { let peer = PeerBuilder::default().build(); // Add a peer - http_tracker_server.add_torrent(&info_hash, &peer).await; + http_tracker_server.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -1845,7 +1901,9 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2 accepting compact responses let response = Client::new(http_tracker_server.get_connection_info()) @@ -1884,7 +1942,9 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -2181,7 +2241,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2220,7 +2280,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2406,7 +2466,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2436,7 +2496,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2566,7 +2626,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2596,7 +2656,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2639,7 +2699,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index b79e8a8af..193c6487c 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -115,7 +115,7 @@ mod tracker_apis { let api_server = start_default_api().await; api_server - .add_torrent( + .add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), &PeerBuilder::default().into(), ) @@ -189,7 +189,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent(&info_hash, &PeerBuilder::default().into()).await; + api_server.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::empty()) @@ -216,8 +216,12 @@ mod tracker_apis { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server.add_torrent(&info_hash_1, &PeerBuilder::default().into()).await; - api_server.add_torrent(&info_hash_2, &PeerBuilder::default().into()).await; + api_server + .add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()) + .await; + api_server + .add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()) + .await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) @@ -244,8 +248,12 @@ mod tracker_apis { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server.add_torrent(&info_hash_1, &PeerBuilder::default().into()).await; - api_server.add_torrent(&info_hash_2, &PeerBuilder::default().into()).await; + api_server + .add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()) + .await; + api_server + .add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()) + .await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) @@ -319,7 +327,7 @@ mod tracker_apis { let peer = PeerBuilder::default().into(); - api_server.add_torrent(&info_hash, &peer).await; + api_server.add_torrent_peer(&info_hash, &peer).await; let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) @@ -378,7 +386,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent(&info_hash, &PeerBuilder::default().into()).await; + api_server.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) From ea8d4d8432f20a93850a90f6aee0e325e55c7dc6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Feb 2023 13:13:55 +0000 Subject: [PATCH 0398/1003] feat(http): [#191] add route and extractor for scrape req in Axum HTTP tracker with only one infohash in the URL: http://localhost:7070/scrape?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0 It does not allow more than one infohas yet. --- .../axum_implementation/extractors/mod.rs | 1 + .../extractors/scrape_request.rs | 45 ++++++ src/http/axum_implementation/handlers/mod.rs | 1 + .../axum_implementation/handlers/scrape.rs | 19 +++ .../axum_implementation/handlers/status.rs | 2 +- src/http/axum_implementation/requests/mod.rs | 1 + .../axum_implementation/requests/scrape.rs | 137 ++++++++++++++++++ src/http/axum_implementation/routes.rs | 10 +- 8 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 src/http/axum_implementation/extractors/scrape_request.rs create mode 100644 src/http/axum_implementation/handlers/scrape.rs create mode 100644 src/http/axum_implementation/requests/scrape.rs diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 65b2775a9..380eeda6d 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,3 +1,4 @@ pub mod announce_request; pub mod peer_ip; pub mod remote_client_ip; +pub mod scrape_request; diff --git a/src/http/axum_implementation/extractors/scrape_request.rs b/src/http/axum_implementation/extractors/scrape_request.rs new file mode 100644 index 000000000..4212abfcb --- /dev/null +++ b/src/http/axum_implementation/extractors/scrape_request.rs @@ -0,0 +1,45 @@ +use std::panic::Location; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; + +use crate::http::axum_implementation::query::Query; +use crate::http::axum_implementation::requests::scrape::{ParseScrapeQueryError, Scrape}; +use crate::http::axum_implementation::responses; + +pub struct ExtractRequest(pub Scrape); + +#[async_trait] +impl FromRequestParts for ExtractRequest +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let raw_query = parts.uri.query(); + + if raw_query.is_none() { + return Err(responses::error::Error::from(ParseScrapeQueryError::MissingParams { + location: Location::caller(), + }) + .into_response()); + } + + let query = raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error).into_response()); + } + + let scrape_request = Scrape::try_from(query.unwrap()); + + if let Err(error) = scrape_request { + return Err(responses::error::Error::from(error).into_response()); + } + + Ok(ExtractRequest(scrape_request.unwrap())) + } +} diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index bff05984c..4e6849534 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -1,2 +1,3 @@ pub mod announce; +pub mod scrape; pub mod status; diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs new file mode 100644 index 000000000..094bf844b --- /dev/null +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -0,0 +1,19 @@ +use std::sync::Arc; + +use axum::extract::State; +use log::debug; + +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; +use crate::tracker::Tracker; + +#[allow(clippy::unused_async)] +pub async fn handle( + State(_tracker): State>, + ExtractRequest(scrape_request): ExtractRequest, + _remote_client_ip: RemoteClientIp, +) -> String { + debug!("http scrape request: {:#?}", &scrape_request); + + format!("{:#?}", &scrape_request) +} diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs index d4031aef5..8a058b456 100644 --- a/src/http/axum_implementation/handlers/status.rs +++ b/src/http/axum_implementation/handlers/status.rs @@ -7,6 +7,6 @@ use crate::http::axum_implementation::resources::ok::Ok; use crate::http::axum_implementation::responses::ok; #[allow(clippy::unused_async)] -pub async fn get_status_handler(remote_client_ip: RemoteClientIp) -> Json { +pub async fn handle(remote_client_ip: RemoteClientIp) -> Json { ok::response(&remote_client_ip) } diff --git a/src/http/axum_implementation/requests/mod.rs b/src/http/axum_implementation/requests/mod.rs index 74894de33..776d2dfbf 100644 --- a/src/http/axum_implementation/requests/mod.rs +++ b/src/http/axum_implementation/requests/mod.rs @@ -1 +1,2 @@ pub mod announce; +pub mod scrape; diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/axum_implementation/requests/scrape.rs new file mode 100644 index 000000000..483738a03 --- /dev/null +++ b/src/http/axum_implementation/requests/scrape.rs @@ -0,0 +1,137 @@ +use std::panic::Location; + +use thiserror::Error; + +use crate::http::axum_implementation::query::Query; +use crate::http::axum_implementation::responses; +use crate::http::percent_encoding::percent_decode_info_hash; +use crate::located_error::{Located, LocatedError}; +use crate::protocol::info_hash::{ConversionError, InfoHash}; + +pub type NumberOfBytes = i64; + +// Query param name +const INFO_HASH_SCRAPE_PARAM: &str = "info_hash"; + +#[derive(Debug, PartialEq)] +pub struct Scrape { + pub info_hashes: Vec, +} + +#[derive(Error, Debug)] +pub enum ParseScrapeQueryError { + #[error("missing query params for scrape request in {location}")] + MissingParams { location: &'static Location<'static> }, + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {location}")] + InvalidParam { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, ConversionError>, + }, +} + +impl From for responses::error::Error { + fn from(err: ParseScrapeQueryError) -> Self { + responses::error::Error { + failure_reason: format!("Cannot parse query params for scrape request: {err}"), + } + } +} + +impl TryFrom for Scrape { + type Error = ParseScrapeQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hashes: extract_info_hashes(&query)?, + }) + } +} + +fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryError> { + match query.get_param(INFO_HASH_SCRAPE_PARAM) { + Some(raw_param) => { + let mut info_hashes = vec![]; + + // todo: multiple infohashes + + let info_hash = percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { + param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?; + + info_hashes.push(info_hash); + + Ok(info_hashes) + } + None => { + return Err(ParseScrapeQueryError::MissingParam { + location: Location::caller(), + param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + }) + } + } +} + +#[cfg(test)] +mod tests { + + mod scrape_request { + + use crate::http::axum_implementation::query::Query; + use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH_SCRAPE_PARAM}; + use crate::protocol::info_hash::InfoHash; + + #[test] + fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { + let raw_query = Query::from(vec![( + INFO_HASH_SCRAPE_PARAM, + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0", + )]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let scrape_request = Scrape::try_from(query).unwrap(); + + assert_eq!( + scrape_request, + Scrape { + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + } + ); + } + + mod when_it_is_instantiated_from_the_url_query_params { + + use crate::http::axum_implementation::query::Query; + use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH_SCRAPE_PARAM}; + + #[test] + fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { + let raw_query_without_info_hash = "another_param=NOT_RELEVANT"; + + assert!(Scrape::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_info_hash_param_is_invalid() { + let raw_query = Query::from(vec![(INFO_HASH_SCRAPE_PARAM, "INVALID_INFO_HASH_VALUE")]).to_string(); + + assert!(Scrape::try_from(raw_query.parse::().unwrap()).is_err()); + } + } + } +} diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 6138f5acf..1d4d67e73 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -4,15 +4,17 @@ use axum::routing::get; use axum::Router; use axum_client_ip::SecureClientIpSource; -use super::handlers::announce::handle; -use super::handlers::status::get_status_handler; +use super::handlers::{announce, scrape, status}; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { Router::new() // Status - .route("/status", get(get_status_handler)) + .route("/status", get(status::handle)) // Announce request - .route("/announce", get(handle).with_state(tracker.clone())) + .route("/announce", get(announce::handle).with_state(tracker.clone())) + // Scrape request + .route("/scrape", get(scrape::handle).with_state(tracker.clone())) + // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) } From 0cab696061eade23a0080c4a802e67df2c8c939a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Feb 2023 18:28:31 +0000 Subject: [PATCH 0399/1003] feat(http): [#191] add cargo dependency: multimap THat dependency will be use to store URL query param in a MultiMap struct, becuase query params can have multiple values like this: ``` param1=value1¶m1=value2 ``` The multimaps allows to add multiple values to a HashMap. --- Cargo.lock | 10 ++++++++++ Cargo.toml | 1 + cSpell.json | 1 + 3 files changed, 12 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 05b439353..cfd8aaba8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1477,6 +1477,15 @@ dependencies = [ "syn", ] +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +dependencies = [ + "serde", +] + [[package]] name = "multipart" version = "0.18.0" @@ -2945,6 +2954,7 @@ dependencies = [ "local-ip-address", "log", "mockall", + "multimap", "openssl", "percent-encoding", "r2d2", diff --git a/Cargo.toml b/Cargo.toml index 917bc9e31..fa126a152 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,6 +62,7 @@ axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } axum-client-ip = "0.4.0" bip_bencode = "0.4.4" +multimap = "0.8.3" [dev-dependencies] diff --git a/cSpell.json b/cSpell.json index a451d18dc..b8aceb568 100644 --- a/cSpell.json +++ b/cSpell.json @@ -37,6 +37,7 @@ "Lphant", "middlewares", "mockall", + "multimap", "myacicontext", "nanos", "nextest", From 30cf3b9d66c4452d74719b0164b0258bd106bd50 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Feb 2023 18:30:58 +0000 Subject: [PATCH 0400/1003] feat(http): [#192] Query struct allows multiple values for the same param The `torrust_tracker::http::axum_implementation::query` allow mutiple values for the same URL query param, for example: ``` param1=value1¶m2=value2 ``` It's needed in the `scrape` request: http://localhost:7070/scrape?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0 --- src/http/axum_implementation/query.rs | 229 ++++++++++++++++++-------- 1 file changed, 157 insertions(+), 72 deletions(-) diff --git a/src/http/axum_implementation/query.rs b/src/http/axum_implementation/query.rs index cad58c17b..8b01e9db7 100644 --- a/src/http/axum_implementation/query.rs +++ b/src/http/axum_implementation/query.rs @@ -1,19 +1,50 @@ -use std::collections::HashMap; use std::panic::Location; use std::str::FromStr; +use multimap::MultiMap; use thiserror::Error; -/// Represent a URL query component with some restrictions. -/// It does not allow duplicate param names like this: `param1=value1¶m1=value2` -/// It would take the second value for `param1`. +type ParamName = String; +type ParamValue = String; + +/// Represent a URL query component: +/// +/// ```text +/// URI = scheme ":" ["//" authority] path ["?" query] ["#" fragment] +/// ``` +#[derive(Debug)] pub struct Query { /* code-review: - - Consider using `HashMap`, because it does not allow you to add a second value for the same param name. - Consider using a third-party crate. - Conversion from/to string is not deterministic. Params can be in a different order in the query string. */ - params: HashMap, + params: MultiMap, +} + +impl Query { + /// Returns only the first param value even if it has multiple values like this: + /// + /// ```text + /// param1=value1¶m1=value2 + /// ``` + /// + /// In that case `get_param("param1")` will return `value1`. + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(|pair| pair.value.clone()) + } + + /// Returns all the param values as a vector even if it has only one value. + #[must_use] + pub fn get_param_vec(&self, name: &str) -> Option> { + self.params.get_vec(name).map(|pairs| { + let mut param_values = vec![]; + for pair in pairs { + param_values.push(pair.value.to_string()); + } + param_values + }) + } } #[derive(Error, Debug)] @@ -29,13 +60,14 @@ impl FromStr for Query { type Err = ParseQueryError; fn from_str(raw_query: &str) -> Result { - let mut params: HashMap = HashMap::new(); + let mut params: MultiMap = MultiMap::new(); let raw_params = raw_query.trim().trim_start_matches('?').split('&').collect::>(); for raw_param in raw_params { - let param: Param = raw_param.parse()?; - params.insert(param.name, param.value); + let pair: NameValuePair = raw_param.parse()?; + let param_name = pair.name.clone(); + params.insert(param_name, pair); } Ok(Self { params }) @@ -44,10 +76,10 @@ impl FromStr for Query { impl From> for Query { fn from(raw_params: Vec<(&str, &str)>) -> Self { - let mut params: HashMap = HashMap::new(); + let mut params: MultiMap = MultiMap::new(); for raw_param in raw_params { - params.insert(raw_param.0.to_owned(), raw_param.1.to_owned()); + params.insert(raw_param.0.to_owned(), NameValuePair::new(raw_param.0, raw_param.1)); } Self { params } @@ -58,8 +90,8 @@ impl std::fmt::Display for Query { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let query = self .params - .iter() - .map(|param| format!("{}", Param::new(param.0, param.1))) + .iter_all() + .map(|param| format!("{}", FieldValuePairSet::from_vec(param.1))) .collect::>() .join("&"); @@ -67,20 +99,22 @@ impl std::fmt::Display for Query { } } -impl Query { - #[must_use] - pub fn get_param(&self, name: &str) -> Option { - self.params.get(name).map(std::string::ToString::to_string) - } +#[derive(Debug, PartialEq, Clone)] +struct NameValuePair { + name: ParamName, + value: ParamValue, } -#[derive(Debug, PartialEq)] -struct Param { - name: String, - value: String, +impl NameValuePair { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_owned(), + value: value.to_owned(), + } + } } -impl FromStr for Param { +impl FromStr for NameValuePair { type Err = ParseQueryError; fn from_str(raw_param: &str) -> Result { @@ -100,18 +134,39 @@ impl FromStr for Param { } } -impl std::fmt::Display for Param { +impl std::fmt::Display for NameValuePair { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}={}", self.name, self.value) } } -impl Param { - pub fn new(name: &str, value: &str) -> Self { - Self { - name: name.to_owned(), - value: value.to_owned(), +#[derive(Debug, PartialEq)] +struct FieldValuePairSet { + pairs: Vec, +} + +impl FieldValuePairSet { + fn from_vec(pair_vec: &Vec) -> Self { + let mut pairs: Vec = vec![]; + + for pair in pair_vec { + pairs.push(pair.clone()); } + + Self { pairs } + } +} + +impl std::fmt::Display for FieldValuePairSet { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .pairs + .iter() + .map(|pair| format!("{pair}")) + .collect::>() + .join("&"); + + write!(f, "{query}") } } @@ -136,6 +191,14 @@ mod tests { assert_eq!(query.get_param("port").unwrap(), "17548"); } + #[test] + fn should_be_instantiated_from_a_string_pair_vector() { + let query = Query::from(vec![("param1", "value1"), ("param2", "value2")]); + + assert_eq!(query.get_param("param1"), Some("value1".to_string())); + assert_eq!(query.get_param("param2"), Some("value2".to_string())); + } + #[test] fn should_fail_parsing_an_invalid_query_string() { let invalid_raw_query = "name=value=value"; @@ -151,7 +214,7 @@ mod tests { let query = raw_query.parse::().unwrap(); - assert_eq!(query.get_param("name").unwrap(), "value"); + assert_eq!(query.get_param("name"), Some("value".to_string())); } #[test] @@ -160,61 +223,83 @@ mod tests { let query = raw_query.parse::().unwrap(); - assert_eq!(query.get_param("name").unwrap(), "value"); - } - - #[test] - fn should_be_instantiated_from_a_string_pair_vector() { - let query = Query::from(vec![("param1", "value1"), ("param2", "value2")]).to_string(); - - assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + assert_eq!(query.get_param("name"), Some("value".to_string())); } - #[test] - fn should_not_allow_more_than_one_value_for_the_same_param() { - let query = Query::from(vec![("param1", "value1"), ("param1", "value2"), ("param1", "value3")]).to_string(); - - assert_eq!(query, "param1=value3"); + mod should_allow_more_than_one_value_for_the_same_param { + use crate::http::axum_implementation::query::Query; + + #[test] + fn instantiated_from_a_vector() { + let query1 = Query::from(vec![("param1", "value1"), ("param1", "value2")]); + assert_eq!( + query1.get_param_vec("param1"), + Some(vec!["value1".to_string(), "value2".to_string()]) + ); + } + + #[test] + fn parsed_from_an_string() { + let query2 = "param1=value1¶m1=value2".parse::().unwrap(); + assert_eq!( + query2.get_param_vec("param1"), + Some(vec!["value1".to_string(), "value2".to_string()]) + ); + } } - #[test] - fn should_be_displayed() { - let query = "param1=value1¶m2=value2".parse::().unwrap().to_string(); - - assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + mod should_be_displayed { + use crate::http::axum_implementation::query::Query; + + #[test] + fn with_one_param() { + assert_eq!("param1=value1".parse::().unwrap().to_string(), "param1=value1"); + } + + #[test] + fn with_multiple_params() { + let query = "param1=value1¶m2=value2".parse::().unwrap().to_string(); + assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + } + + #[test] + fn with_multiple_values_for_the_same_param() { + let query = "param1=value1¶m1=value2".parse::().unwrap().to_string(); + assert!(query == "param1=value1¶m1=value2" || query == "param1=value2¶m1=value1"); + } } - } - mod url_query_param { - use crate::http::axum_implementation::query::Param; + mod param_name_value_pair { + use crate::http::axum_implementation::query::NameValuePair; - #[test] - fn should_parse_a_single_query_param() { - let raw_param = "name=value"; + #[test] + fn should_parse_a_single_query_param() { + let raw_param = "name=value"; - let param = raw_param.parse::().unwrap(); + let param = raw_param.parse::().unwrap(); - assert_eq!( - param, - Param { - name: "name".to_string(), - value: "value".to_string(), - } - ); - } + assert_eq!( + param, + NameValuePair { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } - #[test] - fn should_fail_parsing_an_invalid_query_param() { - let invalid_raw_param = "name=value=value"; + #[test] + fn should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; - let query = invalid_raw_param.parse::(); + let query = invalid_raw_param.parse::(); - assert!(query.is_err()); - } + assert!(query.is_err()); + } - #[test] - fn should_be_displayed() { - assert_eq!("name=value".parse::().unwrap().to_string(), "name=value"); + #[test] + fn should_be_displayed() { + assert_eq!("name=value".parse::().unwrap().to_string(), "name=value"); + } } } } From 2de8265eaba098f5c69cd8cbfcbc37f05d958044 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Feb 2023 18:33:47 +0000 Subject: [PATCH 0401/1003] feat(http): [#191] parse scrape req with multiple infohashes --- .../axum_implementation/requests/scrape.rs | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/axum_implementation/requests/scrape.rs index 483738a03..0f23039bb 100644 --- a/src/http/axum_implementation/requests/scrape.rs +++ b/src/http/axum_implementation/requests/scrape.rs @@ -60,19 +60,20 @@ impl TryFrom for Scrape { } fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryError> { - match query.get_param(INFO_HASH_SCRAPE_PARAM) { - Some(raw_param) => { + match query.get_param_vec(INFO_HASH_SCRAPE_PARAM) { + Some(raw_params) => { let mut info_hashes = vec![]; - // todo: multiple infohashes + for raw_param in raw_params { + let info_hash = + percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { + param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?; - let info_hash = percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { - param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), - param_value: raw_param.clone(), - source: Located(err).into(), - })?; - - info_hashes.push(info_hash); + info_hashes.push(info_hash); + } Ok(info_hashes) } From 0c7735a0b14a03f1268daa41b232d0918cbfe37f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Feb 2023 18:34:13 +0000 Subject: [PATCH 0402/1003] fix(http): typo in comment --- src/http/percent_encoding.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/percent_encoding.rs b/src/http/percent_encoding.rs index 9b5b79ed7..3774519fb 100644 --- a/src/http/percent_encoding.rs +++ b/src/http/percent_encoding.rs @@ -3,7 +3,7 @@ use crate::tracker::peer::{self, IdConversionError}; /// # Errors /// -/// Will return `Err` if if the decoded bytes do not represent a valid `InfoHash`. +/// Will return `Err` if the decoded bytes do not represent a valid `InfoHash`. pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); InfoHash::try_from(bytes) From c4bee79c7ad15018bffbf39e69e663022aac16b6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Feb 2023 16:43:06 +0000 Subject: [PATCH 0403/1003] feat(http): [#191] add Tracker::scrape function This function returns the data we need for a scrape response regardless the method that the client is using to communicate with the tracker (UDP or HTTP). --- .../axum_implementation/handlers/scrape.rs | 14 +- src/tracker/mod.rs | 348 +++++++++++++----- src/tracker/torrent.rs | 18 + 3 files changed, 286 insertions(+), 94 deletions(-) diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 094bf844b..2246ea7db 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -9,11 +9,21 @@ use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn handle( - State(_tracker): State>, + State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, _remote_client_ip: RemoteClientIp, ) -> String { debug!("http scrape request: {:#?}", &scrape_request); - format!("{:#?}", &scrape_request) + /* + todo: + - Add the service that sends the event for statistics. + - Build the HTTP bencoded response. + */ + + let scrape_data = tracker.scrape(&scrape_request.info_hashes).await; + + debug!("scrape data: {:#?}", &scrape_data); + + "todo".to_string() } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index e01fe6a19..0a3bd7c0b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -7,7 +7,7 @@ pub mod statistics; pub mod torrent; use std::collections::btree_map::Entry; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; @@ -18,7 +18,7 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use self::error::Error; use self::peer::Peer; -use self::torrent::SwamStats; +use self::torrent::{SwamStats, SwarmMetadata}; use crate::config::Configuration; use crate::databases::driver::Driver; use crate::databases::{self, Database}; @@ -50,6 +50,27 @@ pub struct AnnounceData { pub interval_min: u32, } +#[derive(Debug, PartialEq, Default)] +pub struct ScrapeData { + files: HashMap, +} + +impl ScrapeData { + #[must_use] + pub fn empty() -> Self { + let files: HashMap = HashMap::new(); + Self { files } + } + + pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { + self.files.insert(*info_hash, swarm_metadata); + } + + pub fn add_file_with_no_metadata(&mut self, info_hash: &InfoHash) { + self.files.insert(*info_hash, SwarmMetadata::default()); + } +} + impl Tracker { /// # Errors /// @@ -85,8 +106,14 @@ impl Tracker { self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } - /// It handles an announce request + /// It handles an announce request. + /// + /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { + // code-review: maybe instead of mutating the peer we could just return + // a tuple with the new peer and the announce data: (Peer, AnnounceData). + // It could even be a different struct: `StoredPeer` or `PublicPeer`. + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -101,6 +128,27 @@ impl Tracker { } } + /// It handles a scrape request. + /// + /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). + pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { + let mut scrape_data = ScrapeData::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, self.get_swarm_metadata(info_hash).await); + } + + scrape_data + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + let torrents = self.get_torrents().await; + match torrents.get(info_hash) { + Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + None => SwarmMetadata::default(), + } + } + /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -416,143 +464,259 @@ fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Opt #[cfg(test)] mod tests { - use std::sync::Arc; - use super::statistics::Keeper; - use super::{TorrentsMetrics, Tracker}; - use crate::config::{ephemeral_configuration, Configuration}; + mod the_tracker { - pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) - } + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::tracker::peer::{self, Peer}; + use crate::tracker::statistics::Keeper; + use crate::tracker::{TorrentsMetrics, Tracker}; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } - pub fn tracker_factory() -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + pub fn tracker_factory() -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - // Configuration - let configuration = tracker_configuration(); + // Configuration + let configuration = tracker_configuration(); - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - // Initialize Torrust tracker - match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) + // Initialize Torrust tracker + match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } } } - } - #[tokio::test] - async fn the_tracker_should_collect_torrent_metrics() { - let tracker = tracker_factory(); - - let torrents_metrics = tracker.get_torrents_metrics().await; + /// A peer that has completed downloading. + fn complete_peer() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } - assert_eq!( - torrents_metrics, - TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, - torrents: 0 + /// A peer that has NOT completed downloading. + fn incomplete_peer() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(1000), // Still bytes to download + event: AnnounceEvent::Started, } - ); - } + } - mod the_tracker_assigning_the_ip_to_the_peer { + #[tokio::test] + async fn should_collect_torrent_metrics() { + let tracker = tracker_factory(); - use std::net::{IpAddr, Ipv4Addr}; + let torrents_metrics = tracker.get_torrents_metrics().await; - use crate::tracker::assign_ip_address_to_peer; + assert_eq!( + torrents_metrics, + TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0 + } + ); + } - #[test] - fn should_use_the_source_ip_instead_of_the_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + mod handling_an_announce_request { + mod should_assign_the_ip_to_the_peer { - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + use std::net::{IpAddr, Ipv4Addr}; - assert_eq!(peer_ip, remote_ip); - } + use crate::tracker::assign_ip_address_to_peer; - mod when_the_client_ip_is_a_ipv4_loopback_ip { + #[test] + fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - use crate::tracker::assign_ip_address_to_peer; + assert_eq!(peer_ip, remote_ip); + } - #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + mod and_when_the_client_ip_is_a_ipv4_loopback_ip { - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; - assert_eq!(peer_ip, remote_ip); - } + use crate::tracker::assign_ip_address_to_peer; - #[test] - fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + assert_eq!(peer_ip, remote_ip); + } - assert_eq!(peer_ip, tracker_external_ip); - } + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( + ) { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + + mod and_when_client_ip_is_a_ipv6_loopback_ip { - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip() - { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; - let tracker_external_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + use crate::tracker::assign_ip_address_to_peer; - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - assert_eq!(peer_ip, tracker_external_ip); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( + ) { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } } } - mod when_client_ip_is_a_ipv6_loopback_ip { + mod handling_a_scrape_request { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; + use std::net::{IpAddr, Ipv4Addr}; - use crate::tracker::assign_ip_address_to_peer; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, tracker_factory}; + use crate::tracker::{ScrapeData, SwarmMetadata}; - #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + #[tokio::test] + async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent() { + let tracker = tracker_factory(); - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; - assert_eq!(peer_ip, remote_ip); - } + let scrape_data = tracker.scrape(&info_hashes).await; - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + let mut expected_scrape_data = ScrapeData::empty(); - let tracker_external_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + assert_eq!(scrape_data, expected_scrape_data); + } - assert_eq!(peer_ip, tracker_external_ip); + #[tokio::test] + async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { + let tracker = tracker_factory(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + // Announce a "complete" peer for the torrent + let mut complete_peer = complete_peer(); + tracker + .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) + .await; + + // Announce an "incomplete" peer for the torrent + let mut incomplete_peer = incomplete_peer(); + tracker + .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) + .await; + + // Scrape + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 0, // the "complete" peer does not count because it was not previously known + downloaded: 0, + incomplete: 1, // the "incomplete" peer we have just announced + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); } - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip() - { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + #[tokio::test] + async fn it_should_allow_scraping_for_multiple_torrents() { + let tracker = tracker_factory(); + + let info_hashes = vec![ + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), + ]; - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let scrape_data = tracker.scrape(&info_hashes).await; - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[1]); - assert_eq!(peer_ip, tracker_external_ip); + assert_eq!(scrape_data, expected_scrape_data); } } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 3161cd36b..34017599d 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -14,6 +14,13 @@ pub struct Entry { pub completed: u32, } +#[derive(Debug, PartialEq, Default)] +pub struct SwarmMetadata { + pub complete: u32, // The number of active peers that have completed downloading + pub downloaded: u32, // The number of peers that have ever completed downloading + pub incomplete: u32, // The number of active peers that have not completed downloading +} + impl Entry { #[must_use] pub fn new() -> Entry { @@ -74,6 +81,17 @@ impl Entry { (seeders, self.completed, leechers) } + #[must_use] + pub fn get_swarm_metadata(&self) -> SwarmMetadata { + // code-review: consider using always this function instead of `get_stats`. + let (seeders, completed, leechers) = self.get_stats(); + SwarmMetadata { + complete: seeders, + downloaded: completed, + incomplete: leechers, + } + } + pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); self.peers.retain(|_, peer| peer.updated > current_cutoff); From ae1a076c57bc74fbd73dc42e54df373513c642d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Feb 2023 17:23:47 +0000 Subject: [PATCH 0404/1003] feat(http): [#191] add scrape app service --- .../axum_implementation/extractors/peer_ip.rs | 2 +- .../axum_implementation/handlers/announce.rs | 4 ++-- .../axum_implementation/handlers/scrape.rs | 21 +++++++++++++------ .../axum_implementation/services/announce.rs | 4 ++-- src/http/axum_implementation/services/mod.rs | 1 + .../axum_implementation/services/scrape.rs | 20 ++++++++++++++++++ src/tracker/statistics.rs | 2 ++ tests/http_tracker.rs | 6 ++---- 8 files changed, 45 insertions(+), 15 deletions(-) create mode 100644 src/http/axum_implementation/services/scrape.rs diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs index 9f7e92a9b..aae348d99 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -31,7 +31,7 @@ impl From for responses::error::Error { /// /// Will return an error if the peer IP cannot be obtained according to the configuration. /// For example, if the IP is extracted from an HTTP header which is missing in the request. -pub fn assign_ip_address_to_peer(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { +pub fn resolve(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { if on_reverse_proxy { if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { Ok(ip) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 81f57e810..d5fa7f3a4 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -7,7 +7,7 @@ use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::peer_ip::assign_ip_address_to_peer; +use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; use crate::http::axum_implementation::responses::announce; @@ -24,7 +24,7 @@ pub async fn handle( ) -> Response { debug!("http announce request: {:#?}", announce_request); - let peer_ip = match assign_ip_address_to_peer(tracker.config.on_reverse_proxy, &remote_client_ip) { + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, }; diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 2246ea7db..1f1d3ece9 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,29 +1,38 @@ use std::sync::Arc; use axum::extract::State; +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; use log::debug; +use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; +use crate::http::axum_implementation::services; use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn handle( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - _remote_client_ip: RemoteClientIp, -) -> String { + remote_client_ip: RemoteClientIp, +) -> Response { debug!("http scrape request: {:#?}", &scrape_request); /* todo: - - Add the service that sends the event for statistics. - - Build the HTTP bencoded response. + - [x] Add the service that sends the event for statistics. + - [ ] Build the HTTP bencoded response. */ - let scrape_data = tracker.scrape(&scrape_request.info_hashes).await; + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { + Ok(peer_ip) => peer_ip, + Err(err) => return err, + }; + + let scrape_data = services::scrape::invoke(tracker.clone(), &scrape_request.info_hashes, &peer_ip).await; debug!("scrape data: {:#?}", &scrape_data); - "todo".to_string() + (StatusCode::OK, "todo").into_response() } diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 6378c3008..356dbaeb9 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -9,7 +9,7 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let response = tracker.announce(&info_hash, peer, &original_peer_ip).await; + let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip).await; match original_peer_ip { IpAddr::V4(_) => { @@ -20,5 +20,5 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) } } - response + announce_data } diff --git a/src/http/axum_implementation/services/mod.rs b/src/http/axum_implementation/services/mod.rs index 74894de33..776d2dfbf 100644 --- a/src/http/axum_implementation/services/mod.rs +++ b/src/http/axum_implementation/services/mod.rs @@ -1 +1,2 @@ pub mod announce; +pub mod scrape; diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs new file mode 100644 index 000000000..f40b8f999 --- /dev/null +++ b/src/http/axum_implementation/services/scrape.rs @@ -0,0 +1,20 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use crate::protocol::info_hash::InfoHash; +use crate::tracker::{statistics, ScrapeData, Tracker}; + +pub async fn invoke(tracker: Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { + let scrape_data = tracker.scrape(info_hashes).await; + + match original_peer_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; + } + } + + scrape_data +} diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index f9f6253fd..f9079962c 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -11,6 +11,8 @@ const CHANNEL_BUFFER_SIZE: usize = 65_535; #[derive(Debug, PartialEq, Eq)] pub enum Event { + // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } + // Attributes are enums too. Tcp4Announce, Tcp4Scrape, Tcp6Announce, diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a09802724..d324e560b 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2354,8 +2354,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { let http_tracker = start_public_http_tracker(Version::Axum).await; @@ -2374,8 +2373,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp4_scrapes_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { let http_tracker = start_ipv6_http_tracker(Version::Axum).await; From 86ce93cb9e0d5113bccaeba9c16abe6ffdeafcad Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Feb 2023 13:07:00 +0000 Subject: [PATCH 0405/1003] feat(http): [#192] scrape request for Axum HTTP tracker --- .../axum_implementation/handlers/scrape.rs | 13 +-- .../axum_implementation/requests/scrape.rs | 28 ++--- src/http/axum_implementation/responses/mod.rs | 1 + .../axum_implementation/responses/scrape.rs | 106 ++++++++++++++++++ src/tracker/mod.rs | 2 +- tests/http/asserts.rs | 46 +++++--- tests/http_tracker.rs | 30 +++-- 7 files changed, 162 insertions(+), 64 deletions(-) create mode 100644 src/http/axum_implementation/responses/scrape.rs diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 1f1d3ece9..51b6fa84d 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,14 +1,13 @@ use std::sync::Arc; use axum::extract::State; -use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; -use crate::http::axum_implementation::services; +use crate::http::axum_implementation::{responses, services}; use crate::tracker::Tracker; #[allow(clippy::unused_async)] @@ -19,12 +18,6 @@ pub async fn handle( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - /* - todo: - - [x] Add the service that sends the event for statistics. - - [ ] Build the HTTP bencoded response. - */ - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, @@ -32,7 +25,5 @@ pub async fn handle( let scrape_data = services::scrape::invoke(tracker.clone(), &scrape_request.info_hashes, &peer_ip).await; - debug!("scrape data: {:#?}", &scrape_data); - - (StatusCode::OK, "todo").into_response() + responses::scrape::Bencoded::from(scrape_data).into_response() } diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/axum_implementation/requests/scrape.rs index 0f23039bb..da50d4be5 100644 --- a/src/http/axum_implementation/requests/scrape.rs +++ b/src/http/axum_implementation/requests/scrape.rs @@ -10,8 +10,8 @@ use crate::protocol::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; -// Query param name -const INFO_HASH_SCRAPE_PARAM: &str = "info_hash"; +// Query param names +const INFO_HASH: &str = "info_hash"; #[derive(Debug, PartialEq)] pub struct Scrape { @@ -27,12 +27,6 @@ pub enum ParseScrapeQueryError { location: &'static Location<'static>, param_name: String, }, - #[error("invalid param value {param_value} for {param_name} in {location}")] - InvalidParam { - param_name: String, - param_value: String, - location: &'static Location<'static>, - }, #[error("invalid param value {param_value} for {param_name} in {source}")] InvalidInfoHashParam { param_name: String, @@ -60,14 +54,14 @@ impl TryFrom for Scrape { } fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryError> { - match query.get_param_vec(INFO_HASH_SCRAPE_PARAM) { + match query.get_param_vec(INFO_HASH) { Some(raw_params) => { let mut info_hashes = vec![]; for raw_param in raw_params { let info_hash = percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { - param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + param_name: INFO_HASH.to_owned(), param_value: raw_param.clone(), source: Located(err).into(), })?; @@ -80,7 +74,7 @@ fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryE None => { return Err(ParseScrapeQueryError::MissingParam { location: Location::caller(), - param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + param_name: INFO_HASH.to_owned(), }) } } @@ -92,16 +86,12 @@ mod tests { mod scrape_request { use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH_SCRAPE_PARAM}; + use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH}; use crate::protocol::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { - let raw_query = Query::from(vec![( - INFO_HASH_SCRAPE_PARAM, - "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0", - )]) - .to_string(); + let raw_query = Query::from(vec![(INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0")]).to_string(); let query = raw_query.parse::().unwrap(); @@ -118,7 +108,7 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH_SCRAPE_PARAM}; + use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH}; #[test] fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { @@ -129,7 +119,7 @@ mod tests { #[test] fn it_should_fail_if_the_info_hash_param_is_invalid() { - let raw_query = Query::from(vec![(INFO_HASH_SCRAPE_PARAM, "INVALID_INFO_HASH_VALUE")]).to_string(); + let raw_query = Query::from(vec![(INFO_HASH, "INVALID_INFO_HASH_VALUE")]).to_string(); assert!(Scrape::try_from(raw_query.parse::().unwrap()).is_err()); } diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs index ad7d0a78c..7e8666934 100644 --- a/src/http/axum_implementation/responses/mod.rs +++ b/src/http/axum_implementation/responses/mod.rs @@ -1,3 +1,4 @@ pub mod announce; pub mod error; pub mod ok; +pub mod scrape; diff --git a/src/http/axum_implementation/responses/scrape.rs b/src/http/axum_implementation/responses/scrape.rs new file mode 100644 index 000000000..3fc34a0e5 --- /dev/null +++ b/src/http/axum_implementation/responses/scrape.rs @@ -0,0 +1,106 @@ +use std::borrow::Cow; + +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use bip_bencode::{ben_int, ben_map, BMutAccess}; + +use crate::tracker::ScrapeData; + +#[derive(Debug, PartialEq, Default)] +pub struct Bencoded { + scrape_data: ScrapeData, +} + +impl Bencoded { + /// # Panics + /// + /// Will return an error if it can't access the bencode as a mutable `BDictAccess`. + #[must_use] + pub fn body(&self) -> Vec { + let mut scrape_list = ben_map!(); + + let scrape_list_mut = scrape_list.dict_mut().unwrap(); + + for (info_hash, value) in &self.scrape_data.files { + scrape_list_mut.insert( + Cow::from(info_hash.bytes().to_vec()), + ben_map! { + "complete" => ben_int!(i64::from(value.complete)), + "downloaded" => ben_int!(i64::from(value.downloaded)), + "incomplete" => ben_int!(i64::from(value.incomplete)) + }, + ); + } + + (ben_map! { + "files" => scrape_list + }) + .encode() + } +} + +impl From for Bencoded { + fn from(scrape_data: ScrapeData) -> Self { + Self { scrape_data } + } +} + +impl IntoResponse for Bencoded { + fn into_response(self) -> Response { + (StatusCode::OK, self.body()).into_response() + } +} + +#[cfg(test)] +mod tests { + + mod scrape_response { + use crate::http::axum_implementation::responses::scrape::Bencoded; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::ScrapeData; + + fn sample_scrape_data() -> ScrapeData { + let info_hash = InfoHash([0x69; 20]); + let mut scrape_data = ScrapeData::empty(); + scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 1, + downloaded: 2, + incomplete: 3, + }, + ); + scrape_data + } + + #[test] + fn should_be_converted_from_scrape_data() { + let response = Bencoded::from(sample_scrape_data()); + + assert_eq!( + response, + Bencoded { + scrape_data: sample_scrape_data() + } + ); + } + + #[test] + fn should_be_bencoded() { + let response = Bencoded { + scrape_data: sample_scrape_data(), + }; + + let bytes = response.body(); + + // cspell:disable-next-line + let expected_bytes = b"d5:filesd20:iiiiiiiiiiiiiiiiiiiid8:completei1e10:downloadedi2e10:incompletei3eeee"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); + } + } +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0a3bd7c0b..3e5e97439 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -52,7 +52,7 @@ pub struct AnnounceData { #[derive(Debug, PartialEq, Default)] pub struct ScrapeData { - files: HashMap, + pub files: HashMap, } impl ScrapeData { diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index a10edc9e6..cd45571da 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -78,6 +78,36 @@ pub async fn assert_is_announce_response(response: Response) { // Error responses +// Specific errors for announce request + +pub async fn assert_missing_query_params_for_announce_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for announce request", + Location::caller(), + ); +} + +pub async fn assert_bad_announce_request_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(" for announce request: {failure}")).await; +} + +// Specific errors for scrape request + +pub async fn assert_missing_query_params_for_scrape_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for scrape request", + Location::caller(), + ); +} + +// Other errors + pub async fn assert_internal_server_error_response(response: Response) { assert_eq!(response.status(), 200); @@ -156,22 +186,6 @@ pub async fn assert_invalid_remote_address_on_xff_header_error_response(response ); } -// Specific errors for announce request - -pub async fn assert_missing_query_params_for_announce_request_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "missing query params for announce request", - Location::caller(), - ); -} - -pub async fn assert_bad_announce_request_error_response(response: Response, failure: &str) { - assert_cannot_parse_query_params_error_response(response, &format!(" for announce request: {failure}")).await; -} - pub async fn assert_cannot_parse_query_param_error_response(response: Response, failure: &str) { assert_cannot_parse_query_params_error_response(response, &format!(": {failure}")).await; } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index d324e560b..a341e13ed 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2198,24 +2198,25 @@ mod axum_http_tracker_server { use torrust_tracker::tracker::peer; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; + use crate::http::asserts::{ + assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, + assert_scrape_response, + }; use crate::http::client::Client; use crate::http::requests; use crate::http::requests::scrape::QueryBuilder; use crate::http::responses::scrape::{self, File, ResponseBuilder}; use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; - //#[tokio::test] - #[allow(dead_code)] - async fn should_fail_when_the_request_is_empty() { + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; - assert_internal_server_error_response(response).await; + assert_missing_query_params_for_scrape_request_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -2228,13 +2229,11 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - // code-review: it's not returning the invalid info hash error - assert_internal_server_error_response(response).await; + assert_cannot_parse_query_params_error_response(response, "").await; } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { let http_tracker = start_public_http_tracker(Version::Axum).await; @@ -2272,8 +2271,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { let http_tracker = start_public_http_tracker(Version::Axum).await; @@ -2311,8 +2309,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { let http_tracker = start_public_http_tracker(Version::Axum).await; @@ -2329,8 +2326,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_accept_multiple_infohashes() { let http_tracker = start_public_http_tracker(Version::Axum).await; From 4b3f9793970b9724d9757952cb09b9e0f95101fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Feb 2023 13:46:17 +0000 Subject: [PATCH 0406/1003] refactor(udp): [#192] use new tracker::scrape method in UDP tracker --- src/tracker/torrent.rs | 4 ++-- src/udp/handlers.rs | 48 ++++++++++++++++++------------------------ 2 files changed, 22 insertions(+), 30 deletions(-) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 34017599d..dc41b083e 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -16,9 +16,9 @@ pub struct Entry { #[derive(Debug, PartialEq, Default)] pub struct SwarmMetadata { - pub complete: u32, // The number of active peers that have completed downloading + pub complete: u32, // The number of active peers that have completed downloading (seeders) pub downloaded: u32, // The number of peers that have ever completed downloading - pub incomplete: u32, // The number of active peers that have not completed downloading + pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) } impl Entry { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 8978beb70..6c54a6106 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -182,51 +182,43 @@ pub async fn handle_announce( /// # Errors /// /// This function dose not ever return an error. -/// -/// TODO: refactor this, db lock can be a lot shorter pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc, ) -> Result { - let db = tracker.get_torrents().await; + // Convert from aquatic infohashes + let mut info_hashes = vec![]; + for info_hash in &request.info_hashes { + info_hashes.push(InfoHash(info_hash.0)); + } + + let scrape_data = tracker.scrape(&info_hashes).await; let mut torrent_stats: Vec = Vec::new(); - for info_hash in &request.info_hashes { - let info_hash = InfoHash(info_hash.0); - - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - if tracker.authenticate_request(&info_hash, &None).await.is_ok() { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - #[allow(clippy::cast_possible_truncation)] - TorrentScrapeStatistics { - seeders: NumberOfPeers(i64::from(seeders) as i32), - completed: NumberOfDownloads(i64::from(completed) as i32), - leechers: NumberOfPeers(i64::from(leechers) as i32), - } - } else { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), - } - } + for file in &scrape_data.files { + let info_hash = file.0; + let swarm_metadata = file.1; + + let scrape_entry = if tracker.authenticate_request(info_hash, &None).await.is_ok() { + #[allow(clippy::cast_possible_truncation)] + TorrentScrapeStatistics { + seeders: NumberOfPeers(i64::from(swarm_metadata.complete) as i32), + completed: NumberOfDownloads(i64::from(swarm_metadata.downloaded) as i32), + leechers: NumberOfPeers(i64::from(swarm_metadata.incomplete) as i32), } - None => TorrentScrapeStatistics { + } else { + TorrentScrapeStatistics { seeders: NumberOfPeers(0), completed: NumberOfDownloads(0), leechers: NumberOfPeers(0), - }, + } }; torrent_stats.push(scrape_entry); } - drop(db); - // send stats event match remote_addr { SocketAddr::V4(_) => { From 7cdd63ee42b4868734038280c6a4f83e07c511ad Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Feb 2023 18:52:25 +0000 Subject: [PATCH 0407/1003] refactor: [#171] use KeyId in auth:Key The struct `KeyId` was extracted to wrap the primitive type but it was not being used in the `auth::Key` struct. --- src/apis/resources/auth_key.rs | 14 +++---- src/databases/mod.rs | 3 ++ src/databases/mysql.rs | 8 ++-- src/databases/sqlite.rs | 11 +++--- src/http/warp_implementation/filters.rs | 16 ++++++-- src/http/warp_implementation/handlers.rs | 13 ++++--- src/http/warp_implementation/routes.rs | 6 +-- src/tracker/auth.rs | 49 +++++++++++++----------- src/tracker/error.rs | 4 +- src/tracker/mod.rs | 24 +++++++----- tests/tracker_api.rs | 12 +++--- 11 files changed, 91 insertions(+), 69 deletions(-) diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index d5c08f496..207a0c482 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -3,18 +3,18 @@ use std::convert::From; use serde::{Deserialize, Serialize}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::auth; +use crate::tracker::auth::{self, KeyId}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { - pub key: String, + pub key: String, // todo: rename to `id` pub valid_until: Option, } impl From for auth::Key { fn from(auth_key_resource: AuthKey) -> Self { auth::Key { - key: auth_key_resource.key, + id: auth_key_resource.key.parse::().unwrap(), valid_until: auth_key_resource .valid_until .map(|valid_until| DurationSinceUnixEpoch::new(valid_until, 0)), @@ -25,7 +25,7 @@ impl From for auth::Key { impl From for AuthKey { fn from(auth_key: auth::Key) -> Self { AuthKey { - key: auth_key.key, + key: auth_key.id.to_string(), valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), } } @@ -37,7 +37,7 @@ mod tests { use super::AuthKey; use crate::protocol::clock::{Current, TimeNow}; - use crate::tracker::auth; + use crate::tracker::auth::{self, KeyId}; #[test] fn it_should_be_convertible_into_an_auth_key() { @@ -51,7 +51,7 @@ mod tests { assert_eq!( auth::Key::from(auth_key_resource), auth::Key { - key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } ); @@ -62,7 +62,7 @@ mod tests { let duration_in_secs = 60; let auth_key = auth::Key { - key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 809decc2c..70cc9eb75 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -63,16 +63,19 @@ pub trait Database: Sync + Send { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + // todo: replace type `&str` with `&InfoHash` async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error>; async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + // todo: replace type `&str` with `&KeyId` async fn get_key_from_keys(&self, key: &str) -> Result, Error>; async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; + // todo: replace type `&str` with `&KeyId` async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index ac54ebb82..532ba1dcb 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -12,7 +12,7 @@ use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth; +use crate::tracker::auth::{self, KeyId}; const DRIVER: Driver = Driver::MySQL; @@ -117,7 +117,7 @@ impl Database for Mysql { let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| auth::Key { - key, + id: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, )?; @@ -192,7 +192,7 @@ impl Database for Mysql { let key = query?; Ok(key.map(|(key, expiry)| auth::Key { - key, + id: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(expiry.unsigned_abs())), })) } @@ -200,7 +200,7 @@ impl Database for Mysql { async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let key = auth_key.key.to_string(); + let key = auth_key.id.to_string(); let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); conn.exec_drop( diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 3425b15c8..d6915c850 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -9,7 +9,7 @@ use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth; +use crate::tracker::auth::{self, KeyId}; const DRIVER: Driver = Driver::Sqlite3; @@ -108,11 +108,11 @@ impl Database for Sqlite { let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; let keys_iter = stmt.query_map([], |row| { - let key = row.get(0)?; + let key: String = row.get(0)?; let valid_until: i64 = row.get(1)?; Ok(auth::Key { - key, + id: key.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; @@ -211,8 +211,9 @@ impl Database for Sqlite { Ok(key.map(|f| { let expiry: i64 = f.get(1).unwrap(); + let id: String = f.get(0).unwrap(); auth::Key { - key: f.get(0).unwrap(), + id: id.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs())), } })) @@ -223,7 +224,7 @@ impl Database for Sqlite { let insert = conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.key.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], + [auth_key.id.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], )?; if insert == 0 { diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index fc8ef20bc..eb7abcd4d 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -1,6 +1,7 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; use std::panic::Location; +use std::str::FromStr; use std::sync::Arc; use warp::{reject, Filter, Rejection}; @@ -11,7 +12,8 @@ use super::{request, WebResult}; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::{self, auth, peer}; +use crate::tracker::auth::KeyId; +use crate::tracker::{self, peer}; /// Pass Arc along #[must_use] @@ -35,10 +37,16 @@ pub fn with_peer_id() -> impl Filter + /// Pass Arc along #[must_use] -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key_id() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| auth::Key::from_string(&key)) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .map(|key: String| { + let key_id = KeyId::from_str(&key); + match key_id { + Ok(id) => Some(id), + Err(_) => None, + } + }) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for `PeerAddress` diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 400cc5762..6019bf016 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -12,6 +12,7 @@ use super::error::Error; use super::{request, response, WebResult}; use crate::http::warp_implementation::peer_builder; use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth::KeyId; use crate::tracker::{self, auth, peer, statistics, torrent}; /// Authenticate `InfoHash` using optional `auth::Key` @@ -21,11 +22,11 @@ use crate::tracker::{self, auth, peer, statistics, torrent}; /// Will return `ServerError` that wraps the `tracker::error::Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key: &Option, + auth_key_id: &Option, tracker: Arc, ) -> Result<(), Error> { tracker - .authenticate_request(info_hash, auth_key) + .authenticate_request(info_hash, auth_key_id) .await .map_err(|e| Error::TrackerError { source: (Arc::new(e) as Arc).into(), @@ -37,7 +38,7 @@ pub async fn authenticate( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key: Option, + auth_key_id: Option, tracker: Arc, ) -> WebResult { debug!("http announce request: {:#?}", announce_request); @@ -45,7 +46,7 @@ pub async fn handle_announce( let info_hash = announce_request.info_hash; let remote_client_ip = announce_request.peer_addr; - authenticate(&info_hash, &auth_key, tracker.clone()).await?; + authenticate(&info_hash, &auth_key_id, tracker.clone()).await?; let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); @@ -77,7 +78,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key: Option, + auth_key_id: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); @@ -86,7 +87,7 @@ pub async fn handle_scrape( for info_hash in &scrape_request.info_hashes { let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { - if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { + if authenticate(info_hash, &auth_key_id, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); response::ScrapeEntry { complete: seeders, diff --git a/src/http/warp_implementation/routes.rs b/src/http/warp_implementation/routes.rs index c46c502e4..2ee60e8c9 100644 --- a/src/http/warp_implementation/routes.rs +++ b/src/http/warp_implementation/routes.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use super::filters::{with_announce_request, with_auth_key_id, with_scrape_request, with_tracker}; use super::handlers::{handle_announce, handle_scrape, send_error}; use crate::tracker; @@ -20,7 +20,7 @@ fn announce(tracker: Arc) -> impl Filter) -> impl Filter Key { - let key: String = thread_rng() + let random_id: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) .map(char::from) .collect(); - debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); + debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); Key { - key, + id: random_id.parse::().unwrap(), valid_until: Some(Current::add(&lifetime).unwrap()), } } @@ -54,16 +54,14 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { } None => Err(Error::UnableToReadKey { location: Location::caller(), - key: Box::new(auth_key.clone()), + key_id: Box::new(auth_key.id.clone()), }), } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct Key { - // todo: replace key field definition with: - // pub key: KeyId, - pub key: String, + pub id: KeyId, pub valid_until: Option, } @@ -72,7 +70,7 @@ impl std::fmt::Display for Key { write!( f, "key: `{}`, valid until `{}`", - self.key, + self.id, match self.valid_until { Some(duration) => format!( "{}", @@ -91,20 +89,29 @@ impl std::fmt::Display for Key { } impl Key { + /// # Panics + /// + /// Will panic if bytes cannot be converted into a valid `KeyId`. #[must_use] pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(Key { key, valid_until: None }) + Some(Key { + id: key.parse::().unwrap(), + valid_until: None, + }) } else { None } } + /// # Panics + /// + /// Will panic if string cannot be converted into a valid `KeyId`. #[must_use] pub fn from_string(key: &str) -> Option { if key.len() == AUTH_KEY_LENGTH { Some(Key { - key: key.to_string(), + id: key.parse::().unwrap(), valid_until: None, }) } else { @@ -112,18 +119,13 @@ impl Key { } } - /// # Panics - /// - /// Will fail if the key id is not a valid key id. #[must_use] pub fn id(&self) -> KeyId { - // todo: replace the type of field `key` with type `KeyId`. - // The constructor should fail if an invalid KeyId is provided. - KeyId::from_str(&self.key).unwrap() + self.id.clone() } } -#[derive(Debug, Display, PartialEq, Clone)] +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] pub struct KeyId(String); #[derive(Debug, PartialEq, Eq)] @@ -148,10 +150,10 @@ pub enum Error { KeyVerificationError { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, - #[error("Failed to read key: {key}, {location}")] + #[error("Failed to read key: {key_id}, {location}")] UnableToReadKey { location: &'static Location<'static>, - key: Box, + key_id: Box, }, #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, @@ -171,7 +173,7 @@ mod tests { use std::time::Duration; use crate::protocol::clock::{Current, StoppedTime}; - use crate::tracker::auth; + use crate::tracker::auth::{self, KeyId}; #[test] fn auth_key_from_buffer() { @@ -181,7 +183,10 @@ mod tests { ]); assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().key, "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"); + assert_eq!( + auth_key.unwrap().id, + "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse::().unwrap() + ); } #[test] @@ -190,7 +195,7 @@ mod tests { let auth_key = auth::Key::from_string(key_string); assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().key, key_string); + assert_eq!(auth_key.unwrap().id, key_string.parse::().unwrap()); } #[test] diff --git a/src/tracker/error.rs b/src/tracker/error.rs index 51bcbf3bb..acc85a1c2 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -4,9 +4,9 @@ use crate::located_error::LocatedError; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { - #[error("The supplied key: {key:?}, is not valid: {source}")] + #[error("The supplied key: {key_id:?}, is not valid: {source}")] PeerKeyNotValid { - key: super::auth::Key, + key_id: super::auth::KeyId, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, #[error("The peer is not authenticated, {location}")] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 3e5e97439..147c889ac 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -16,6 +16,7 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; +use self::auth::KeyId; use self::error::Error; use self::peer::Peer; use self::torrent::{SwamStats, SwarmMetadata}; @@ -27,7 +28,7 @@ use crate::protocol::info_hash::InfoHash; pub struct Tracker { pub config: Arc, mode: mode::Mode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -155,28 +156,31 @@ impl Tracker { pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + self.keys.write().await.insert(auth_key.id.clone(), auth_key.clone()); Ok(auth_key) } /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. + /// + /// # Panics + /// + /// Will panic if key cannot be converted into a valid `KeyId`. pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key).await?; - self.keys.write().await.remove(key); + self.keys.write().await.remove(&key.parse::().unwrap()); Ok(()) } /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { - // todo: use auth::KeyId for the function argument `auth_key` - match self.keys.read().await.get(&auth_key.key) { + pub async fn verify_auth_key(&self, key_id: &KeyId) -> Result<(), auth::Error> { + match self.keys.read().await.get(key_id) { None => Err(auth::Error::UnableToReadKey { location: Location::caller(), - key: Box::new(auth_key.clone()), + key_id: Box::new(key_id.clone()), }), Some(key) => auth::verify(key), } @@ -192,7 +196,7 @@ impl Tracker { keys.clear(); for key in keys_from_database { - keys.insert(key.key.clone(), key); + keys.insert(key.id.clone(), key); } Ok(()) @@ -283,7 +287,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -295,7 +299,7 @@ impl Tracker { Some(key) => { if let Err(e) = self.verify_auth_key(key).await { return Err(Error::PeerKeyNotValid { - key: key.clone(), + key_id: key.clone(), source: (Arc::new(e) as Arc).into(), }); } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 193c6487c..bec22e2b4 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -638,7 +638,7 @@ mod tracker_apis { mod for_key_resources { use std::time::Duration; - use torrust_tracker::tracker::auth::Key; + use torrust_tracker::tracker::auth::KeyId; use crate::api::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, @@ -665,7 +665,7 @@ mod tracker_apis { // Verify the key with the tracker assert!(api_server .tracker - .verify_auth_key(&Key::from(auth_key_resource)) + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); } @@ -734,7 +734,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.key) + .delete_auth_key(&auth_key.id.to_string()) .await; assert_ok(response).await; @@ -777,7 +777,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.key) + .delete_auth_key(&auth_key.id.to_string()) .await; assert_failed_to_delete_key(response).await; @@ -797,7 +797,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key) + .delete_auth_key(&auth_key.id.to_string()) .await; assert_token_not_valid(response).await; @@ -810,7 +810,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key) + .delete_auth_key(&auth_key.id.to_string()) .await; assert_unauthorized(response).await; From 28e655fbd698f64b71c16d78bd3dbd211419d47d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Feb 2023 19:03:16 +0000 Subject: [PATCH 0408/1003] refactor: [#171] rename auth::Key to auth::ExpiringKey --- src/apis/resources/auth_key.rs | 14 +++++++------- src/databases/mod.rs | 6 +++--- src/databases/mysql.rs | 10 +++++----- src/databases/sqlite.rs | 12 ++++++------ src/tracker/auth.rs | 24 ++++++++++++------------ src/tracker/mod.rs | 4 ++-- 6 files changed, 35 insertions(+), 35 deletions(-) diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index 207a0c482..e9989ca75 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -11,9 +11,9 @@ pub struct AuthKey { pub valid_until: Option, } -impl From for auth::Key { +impl From for auth::ExpiringKey { fn from(auth_key_resource: AuthKey) -> Self { - auth::Key { + auth::ExpiringKey { id: auth_key_resource.key.parse::().unwrap(), valid_until: auth_key_resource .valid_until @@ -22,8 +22,8 @@ impl From for auth::Key { } } -impl From for AuthKey { - fn from(auth_key: auth::Key) -> Self { +impl From for AuthKey { + fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { key: auth_key.id.to_string(), valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), @@ -49,8 +49,8 @@ mod tests { }; assert_eq!( - auth::Key::from(auth_key_resource), - auth::Key { + auth::ExpiringKey::from(auth_key_resource), + auth::ExpiringKey { id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } @@ -61,7 +61,7 @@ mod tests { fn it_should_be_convertible_from_an_auth_key() { let duration_in_secs = 60; - let auth_key = auth::Key { + let auth_key = auth::ExpiringKey { id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 70cc9eb75..038be0ea3 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -57,7 +57,7 @@ pub trait Database: Sync + Send { async fn load_persistent_torrents(&self) -> Result, Error>; - async fn load_keys(&self) -> Result, Error>; + async fn load_keys(&self) -> Result, Error>; async fn load_whitelist(&self) -> Result, Error>; @@ -71,9 +71,9 @@ pub trait Database: Sync + Send { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; // todo: replace type `&str` with `&KeyId` - async fn get_key_from_keys(&self, key: &str) -> Result, Error>; + async fn get_key_from_keys(&self, key: &str) -> Result, Error>; - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; // todo: replace type `&str` with `&KeyId` async fn remove_key_from_keys(&self, key: &str) -> Result; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 532ba1dcb..0d545aaa9 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -111,12 +111,12 @@ impl Database for Mysql { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| auth::Key { + |(key, valid_until): (String, i64)| auth::ExpiringKey { id: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, @@ -183,7 +183,7 @@ impl Database for Mysql { Ok(1) } - async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + async fn get_key_from_keys(&self, key: &str) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = @@ -191,13 +191,13 @@ impl Database for Mysql { let key = query?; - Ok(key.map(|(key, expiry)| auth::Key { + Ok(key.map(|(key, expiry)| auth::ExpiringKey { id: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(expiry.unsigned_abs())), })) } - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.id.to_string(); diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index d6915c850..ab0addf4b 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -102,7 +102,7 @@ impl Database for Sqlite { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -111,13 +111,13 @@ impl Database for Sqlite { let key: String = row.get(0)?; let valid_until: i64 = row.get(1)?; - Ok(auth::Key { + Ok(auth::ExpiringKey { id: key.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -200,7 +200,7 @@ impl Database for Sqlite { } } - async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + async fn get_key_from_keys(&self, key: &str) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -212,14 +212,14 @@ impl Database for Sqlite { Ok(key.map(|f| { let expiry: i64 = f.get(1).unwrap(); let id: String = f.get(0).unwrap(); - auth::Key { + auth::ExpiringKey { id: id.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs())), } })) } - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 53304657a..22f734e48 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -19,7 +19,7 @@ use crate::protocol::common::AUTH_KEY_LENGTH; /// # Panics /// /// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. -pub fn generate(lifetime: Duration) -> Key { +pub fn generate(lifetime: Duration) -> ExpiringKey { let random_id: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) @@ -28,7 +28,7 @@ pub fn generate(lifetime: Duration) -> Key { debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); - Key { + ExpiringKey { id: random_id.parse::().unwrap(), valid_until: Some(Current::add(&lifetime).unwrap()), } @@ -39,7 +39,7 @@ pub fn generate(lifetime: Duration) -> Key { /// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. /// /// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. -pub fn verify(auth_key: &Key) -> Result<(), Error> { +pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); match auth_key.valid_until { @@ -60,12 +60,12 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] -pub struct Key { +pub struct ExpiringKey { pub id: KeyId, pub valid_until: Option, } -impl std::fmt::Display for Key { +impl std::fmt::Display for ExpiringKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, @@ -88,14 +88,14 @@ impl std::fmt::Display for Key { } } -impl Key { +impl ExpiringKey { /// # Panics /// /// Will panic if bytes cannot be converted into a valid `KeyId`. #[must_use] - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { + pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(Key { + Some(ExpiringKey { id: key.parse::().unwrap(), valid_until: None, }) @@ -108,9 +108,9 @@ impl Key { /// /// Will panic if string cannot be converted into a valid `KeyId`. #[must_use] - pub fn from_string(key: &str) -> Option { + pub fn from_string(key: &str) -> Option { if key.len() == AUTH_KEY_LENGTH { - Some(Key { + Some(ExpiringKey { id: key.parse::().unwrap(), valid_until: None, }) @@ -177,7 +177,7 @@ mod tests { #[test] fn auth_key_from_buffer() { - let auth_key = auth::Key::from_buffer([ + let auth_key = auth::ExpiringKey::from_buffer([ 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, 49, 52, 110, 114, 74, ]); @@ -192,7 +192,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::Key::from_string(key_string); + let auth_key = auth::ExpiringKey::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().id, key_string.parse::().unwrap()); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 147c889ac..0fb434aea 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -28,7 +28,7 @@ use crate::protocol::info_hash::InfoHash; pub struct Tracker { pub config: Arc, mode: mode::Mode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -153,7 +153,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.id.clone(), auth_key.clone()); From f3afab1af86f6e4afd786b1e8d7f8ca721001d6d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Feb 2023 15:26:27 +0000 Subject: [PATCH 0409/1003] feat(http): [#195] announce request in private mode for the new Auxm HTTP tracker implementation. --- .../axum_implementation/handlers/announce.rs | 43 ++++++++++++++++--- src/http/axum_implementation/handlers/auth.rs | 41 ++++++++++++++++++ src/http/axum_implementation/handlers/mod.rs | 1 + src/http/axum_implementation/routes.rs | 3 +- tests/http/asserts.rs | 22 +++------- tests/http/asserts_warp.rs | 19 ++++++++ tests/http_tracker.rs | 28 +++++------- 7 files changed, 117 insertions(+), 40 deletions(-) create mode 100644 src/http/axum_implementation/handlers/auth.rs diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index d5fa7f3a4..3ad11df51 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -1,39 +1,70 @@ use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use axum::extract::State; +use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::handlers::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; -use crate::http::axum_implementation::responses::announce; +use crate::http::axum_implementation::responses::{self, announce}; use crate::http::axum_implementation::services; use crate::protocol::clock::{Current, Time}; +use crate::tracker::auth::KeyId; use crate::tracker::peer::Peer; use crate::tracker::Tracker; #[allow(clippy::unused_async)] -pub async fn handle( +pub async fn handle_without_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { + if tracker.is_private() { + return responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + }) + .into_response(); + } + + handle(&tracker, &announce_request, &remote_client_ip).await +} + +#[allow(clippy::unused_async)] +pub async fn handle_with_key( + State(tracker): State>, + ExtractRequest(announce_request): ExtractRequest, + Path(key_id): Path, + remote_client_ip: RemoteClientIp, +) -> Response { + debug!("http announce request: {:#?}", announce_request); + + match auth::authenticate(&key_id, &tracker).await { + Ok(_) => (), + Err(error) => return responses::error::Error::from(error).into_response(), + } + + handle(&tracker, &announce_request, &remote_client_ip).await +} + +async fn handle(tracker: &Arc, announce_request: &Announce, remote_client_ip: &RemoteClientIp) -> Response { + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, }; - let mut peer = peer_from_request(&announce_request, &peer_ip); + let mut peer = peer_from_request(announce_request, &peer_ip); let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; - match announce_request.compact { + match &announce_request.compact { Some(compact) => match compact { Compact::Accepted => announce::Compact::from(announce_data).into_response(), Compact::NotAccepted => announce::NonCompact::from(announce_data).into_response(), diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs new file mode 100644 index 000000000..13f5b27e6 --- /dev/null +++ b/src/http/axum_implementation/handlers/auth.rs @@ -0,0 +1,41 @@ +use std::panic::Location; +use std::sync::Arc; + +use thiserror::Error; + +use crate::http::axum_implementation::responses; +use crate::tracker::auth::{self, KeyId}; +use crate::tracker::Tracker; + +#[derive(Debug, Error)] +pub enum Error { + #[error("Missing authentication key for private tracker. Error in {location}")] + MissingAuthKey { location: &'static Location<'static> }, +} + +/// # Errors +/// +/// Will return an error if the the authentication key cannot be verified. +pub async fn authenticate(key_id: &KeyId, tracker: &Arc) -> Result<(), auth::Error> { + if tracker.is_private() { + tracker.verify_auth_key(key_id).await + } else { + Ok(()) + } +} + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), + } + } +} + +impl From for responses::error::Error { + fn from(err: auth::Error) -> Self { + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), + } + } +} diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index 4e6849534..0d8aa7f52 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -1,3 +1,4 @@ pub mod announce; +pub mod auth; pub mod scrape; pub mod status; diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 1d4d67e73..646dd0aa3 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -12,7 +12,8 @@ pub fn router(tracker: &Arc) -> Router { // Status .route("/status", get(status::handle)) // Announce request - .route("/announce", get(announce::handle).with_state(tracker.clone())) + .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) + .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) // Scrape request .route("/scrape", get(scrape::handle).with_state(tracker.clone())) // Add extension to get the client IP from the connection info diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index cd45571da..0d5441f89 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -140,22 +140,6 @@ pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) assert_bencoded_error(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); } -pub async fn assert_peer_not_authenticated_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "The peer is not authenticated", - Location::caller(), - ); -} - -pub async fn assert_invalid_authentication_key_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error(&response.text().await.unwrap(), "is not valid", Location::caller()); -} - pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(response: Response) { assert_eq!(response.status(), 200); @@ -199,3 +183,9 @@ pub async fn assert_cannot_parse_query_params_error_response(response: Response, Location::caller(), ); } + +pub async fn assert_authentication_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error(&response.text().await.unwrap(), "Authentication error", Location::caller()); +} diff --git a/tests/http/asserts_warp.rs b/tests/http/asserts_warp.rs index 6bda82f6c..d1a936efa 100644 --- a/tests/http/asserts_warp.rs +++ b/tests/http/asserts_warp.rs @@ -1,7 +1,10 @@ +use std::panic::Location; + /// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. use reqwest::Response; use super::responses::announce_warp::WarpAnnounce; +use crate::http::asserts::assert_bencoded_error; pub async fn assert_warp_announce_response(response: Response, expected_announce_response: &WarpAnnounce) { assert_eq!(response.status(), 200); @@ -13,3 +16,19 @@ pub async fn assert_warp_announce_response(response: Response, expected_announce assert_eq!(announce_response, *expected_announce_response); } + +pub async fn assert_warp_peer_not_authenticated_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "The peer is not authenticated", + Location::caller(), + ); +} + +pub async fn assert_warp_invalid_authentication_key_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error(&response.text().await.unwrap(), "is not valid", Location::caller()); +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a341e13ed..28ed252e9 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1085,9 +1085,9 @@ mod warp_http_tracker_server { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; - use crate::http::asserts::{ - assert_invalid_authentication_key_error_response, assert_is_announce_response, - assert_peer_not_authenticated_error_response, + use crate::http::asserts::assert_is_announce_response; + use crate::http::asserts_warp::{ + assert_warp_invalid_authentication_key_error_response, assert_warp_peer_not_authenticated_error_response, }; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; @@ -1120,7 +1120,7 @@ mod warp_http_tracker_server { .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; - assert_peer_not_authenticated_error_response(response).await; + assert_warp_peer_not_authenticated_error_response(response).await; } #[tokio::test] @@ -1134,7 +1134,7 @@ mod warp_http_tracker_server { .announce(&QueryBuilder::default().query()) .await; - assert_invalid_authentication_key_error_response(response).await; + assert_warp_invalid_authentication_key_error_response(response).await; } } @@ -2539,16 +2539,12 @@ mod axum_http_tracker_server { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; - use crate::http::asserts::{ - assert_invalid_authentication_key_error_response, assert_is_announce_response, - assert_peer_not_authenticated_error_response, - }; + use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_private_http_tracker; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_respond_to_authenticated_peers() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; @@ -2565,8 +2561,7 @@ mod axum_http_tracker_server { assert_is_announce_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; @@ -2576,11 +2571,10 @@ mod axum_http_tracker_server { .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; - assert_peer_not_authenticated_error_response(response).await; + assert_authentication_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; @@ -2591,7 +2585,7 @@ mod axum_http_tracker_server { .announce(&QueryBuilder::default().query()) .await; - assert_invalid_authentication_key_error_response(response).await; + assert_authentication_error_response(response).await; } } From 468009da1f22fecc00b83b6408e1c010e1a15fdb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Feb 2023 16:50:35 +0000 Subject: [PATCH 0410/1003] feat(http): [#196] scrape request in private mode --- .../axum_implementation/handlers/scrape.rs | 48 +++++++++++++++++-- src/http/axum_implementation/routes.rs | 3 +- .../axum_implementation/services/scrape.rs | 20 ++++++-- src/tracker/mod.rs | 11 +++++ tests/http_tracker.rs | 10 ++-- 5 files changed, 78 insertions(+), 14 deletions(-) diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 51b6fa84d..37e9a9062 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,29 +1,69 @@ use std::sync::Arc; -use axum::extract::State; +use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; +use crate::http::axum_implementation::handlers::auth; +use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::{responses, services}; +use crate::tracker::auth::KeyId; use crate::tracker::Tracker; #[allow(clippy::unused_async)] -pub async fn handle( +pub async fn handle_without_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { + if tracker.is_private() { + return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await; + } + + handle_real_scrape(&tracker, &scrape_request, &remote_client_ip).await +} + +#[allow(clippy::unused_async)] +pub async fn handle_with_key( + State(tracker): State>, + ExtractRequest(scrape_request): ExtractRequest, + Path(key_id): Path, + remote_client_ip: RemoteClientIp, +) -> Response { + debug!("http scrape request: {:#?}", &scrape_request); + + match auth::authenticate(&key_id, &tracker).await { + Ok(_) => (), + Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, + } + + handle_real_scrape(&tracker, &scrape_request, &remote_client_ip).await +} + +async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &RemoteClientIp) -> Response { + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { + Ok(peer_ip) => peer_ip, + Err(err) => return err, + }; + + let scrape_data = services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; + + responses::scrape::Bencoded::from(scrape_data).into_response() +} + +/// When authentication fails in `private` mode the tracker returns empty swarm metadata for all the requested infohashes. +async fn handle_fake_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &RemoteClientIp) -> Response { + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, }; - let scrape_data = services::scrape::invoke(tracker.clone(), &scrape_request.info_hashes, &peer_ip).await; + let scrape_data = services::scrape::fake_invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; responses::scrape::Bencoded::from(scrape_data).into_response() } diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 646dd0aa3..21b7260ae 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -15,7 +15,8 @@ pub fn router(tracker: &Arc) -> Router { .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) // Scrape request - .route("/scrape", get(scrape::handle).with_state(tracker.clone())) + .route("/scrape", get(scrape::handle_without_key).with_state(tracker.clone())) + .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker.clone())) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) } diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs index f40b8f999..30f00a47b 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/axum_implementation/services/scrape.rs @@ -4,9 +4,25 @@ use std::sync::Arc; use crate::protocol::info_hash::InfoHash; use crate::tracker::{statistics, ScrapeData, Tracker}; -pub async fn invoke(tracker: Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { +pub async fn invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { let scrape_data = tracker.scrape(info_hashes).await; + send_scrape_event(original_peer_ip, tracker).await; + + scrape_data +} + +/// When the peer is not authenticated and the tracker is running in `private` mode, +/// the tracker returns empty stats for all the torrents. +pub async fn fake_invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { + let scrape_data = tracker.empty_scrape_for(info_hashes); + + send_scrape_event(original_peer_ip, tracker).await; + + scrape_data +} + +async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { match original_peer_ip { IpAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; @@ -15,6 +31,4 @@ pub async fn invoke(tracker: Arc, info_hashes: &Vec, original tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; } } - - scrape_data } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0fb434aea..2604c5045 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -142,6 +142,17 @@ impl Tracker { scrape_data } + // It return empty swarm metadata for all the infohashes. + pub fn empty_scrape_for(&self, info_hashes: &Vec) -> ScrapeData { + let mut scrape_data = ScrapeData::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, SwarmMetadata::default()); + } + + scrape_data + } + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { let torrents = self.get_torrents().await; match torrents.get(info_hash) { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 28ed252e9..f60c755e0 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2606,8 +2606,7 @@ mod axum_http_tracker_server { use crate::http::responses::scrape::{File, ResponseBuilder}; use crate::http::server::start_private_http_tracker; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { let http_tracker = start_private_http_tracker(Version::Axum).await; @@ -2636,8 +2635,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { let http_tracker = start_private_http_tracker(Version::Axum).await; @@ -2677,10 +2675,10 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error + // code-review: should this really be this way? let http_tracker = start_private_http_tracker(Version::Axum).await; From 95a69e5d9bd616c0507b90d80d5817fcdde36068 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Feb 2023 17:51:55 +0000 Subject: [PATCH 0411/1003] feat(http): [#196] return auth error when the key query param cannot be parsed Returns an specific authentication error when the peer uses a key with an invalid format. --- .../axum_implementation/handlers/announce.rs | 11 +++++- src/http/axum_implementation/handlers/auth.rs | 13 +++++++ .../axum_implementation/handlers/scrape.rs | 12 ++++++- tests/http_tracker.rs | 34 +++++++++++++++++-- 4 files changed, 66 insertions(+), 4 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 3ad11df51..b9b964605 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -7,6 +7,7 @@ use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use log::debug; +use super::auth::KeyIdParam; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; @@ -41,11 +42,19 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - Path(key_id): Path, + Path(key_id_param): Path, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); + let Ok(key_id) = key_id_param.value().parse::() else { + return responses::error::Error::from( + auth::Error::InvalidKeyFormat { + location: Location::caller() + }) + .into_response() + }; + match auth::authenticate(&key_id, &tracker).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs index 13f5b27e6..3b9aebc23 100644 --- a/src/http/axum_implementation/handlers/auth.rs +++ b/src/http/axum_implementation/handlers/auth.rs @@ -1,16 +1,29 @@ use std::panic::Location; use std::sync::Arc; +use serde::Deserialize; use thiserror::Error; use crate::http::axum_implementation::responses; use crate::tracker::auth::{self, KeyId}; use crate::tracker::Tracker; +#[derive(Deserialize)] +pub struct KeyIdParam(String); + +impl KeyIdParam { + #[must_use] + pub fn value(&self) -> String { + self.0.clone() + } +} + #[derive(Debug, Error)] pub enum Error { #[error("Missing authentication key for private tracker. Error in {location}")] MissingAuthKey { location: &'static Location<'static> }, + #[error("Invalid format authentication key. Error in {location}")] + InvalidKeyFormat { location: &'static Location<'static> }, } /// # Errors diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 37e9a9062..814cdbfa4 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,9 +1,11 @@ +use std::panic::Location; use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use log::debug; +use super::auth::KeyIdParam; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; @@ -32,11 +34,19 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - Path(key_id): Path, + Path(key_id_param): Path, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); + let Ok(key_id) = key_id_param.value().parse::() else { + return responses::error::Error::from( + auth::Error::InvalidKeyFormat { + location: Location::caller() + }) + .into_response() + }; + match auth::authenticate(&key_id, &tracker).await { Ok(_) => (), Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index f60c755e0..0536ab0b7 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2575,7 +2575,22 @@ mod axum_http_tracker_server { } #[tokio::test] - async fn should_fail_if_the_peer_authentication_key_is_not_valid() { + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let invalid_key_id = "INVALID_KEY_ID"; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!( + "announce/{invalid_key_id}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; // The tracker does not have this key @@ -2600,12 +2615,27 @@ mod axum_http_tracker_server { use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; + use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; use crate::http::server::start_private_http_tracker; + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let invalid_key_id = "INVALID_KEY_ID"; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!( + "scrape/{invalid_key_id}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + )) + .await; + + assert_authentication_error_response(response).await; + } + #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { let http_tracker = start_private_http_tracker(Version::Axum).await; From ea249733f4f74ae1c35e47fda965d97b83681f11 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 11:20:20 +0000 Subject: [PATCH 0412/1003] refactor(http): [#200] move function to Tracker --- src/http/axum_implementation/handlers/announce.rs | 2 +- src/http/axum_implementation/handlers/auth.rs | 15 +-------------- src/http/axum_implementation/handlers/scrape.rs | 2 +- src/tracker/mod.rs | 11 +++++++++++ 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index b9b964605..9f39a5bdf 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -55,7 +55,7 @@ pub async fn handle_with_key( .into_response() }; - match auth::authenticate(&key_id, &tracker).await { + match tracker.authenticate(&key_id).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), } diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs index 3b9aebc23..366526664 100644 --- a/src/http/axum_implementation/handlers/auth.rs +++ b/src/http/axum_implementation/handlers/auth.rs @@ -1,12 +1,10 @@ use std::panic::Location; -use std::sync::Arc; use serde::Deserialize; use thiserror::Error; use crate::http::axum_implementation::responses; -use crate::tracker::auth::{self, KeyId}; -use crate::tracker::Tracker; +use crate::tracker::auth; #[derive(Deserialize)] pub struct KeyIdParam(String); @@ -26,17 +24,6 @@ pub enum Error { InvalidKeyFormat { location: &'static Location<'static> }, } -/// # Errors -/// -/// Will return an error if the the authentication key cannot be verified. -pub async fn authenticate(key_id: &KeyId, tracker: &Arc) -> Result<(), auth::Error> { - if tracker.is_private() { - tracker.verify_auth_key(key_id).await - } else { - Ok(()) - } -} - impl From for responses::error::Error { fn from(err: Error) -> Self { responses::error::Error { diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 814cdbfa4..6edf2fdb8 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -47,7 +47,7 @@ pub async fn handle_with_key( .into_response() }; - match auth::authenticate(&key_id, &tracker).await { + match tracker.authenticate(&key_id).await { Ok(_) => (), Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 2604c5045..31eeef6dc 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -334,6 +334,17 @@ impl Tracker { Ok(()) } + /// # Errors + /// + /// Will return an error if the the authentication key cannot be verified. + pub async fn authenticate(&self, key_id: &KeyId) -> Result<(), auth::Error> { + if self.is_private() { + self.verify_auth_key(key_id).await + } else { + Ok(()) + } + } + /// Loading the torrents from database into memory /// /// # Errors From cd14c6b334c4a129ebf997f7095f3fe0cdd9e62b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 11:58:13 +0000 Subject: [PATCH 0413/1003] feat(http): [#200] announce request in listed mode --- .../axum_implementation/handlers/announce.rs | 8 +++++- src/http/axum_implementation/handlers/mod.rs | 11 ++++++++ .../axum_implementation/handlers/scrape.rs | 3 ++- src/tracker/mod.rs | 25 +++++++++++++++++++ tests/http_tracker.rs | 6 ++--- 5 files changed, 47 insertions(+), 6 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 9f39a5bdf..e4cd476fa 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -28,7 +28,7 @@ pub async fn handle_without_key( ) -> Response { debug!("http announce request: {:#?}", announce_request); - if tracker.is_private() { + if tracker.requires_authentication() { return responses::error::Error::from(auth::Error::MissingAuthKey { location: Location::caller(), }) @@ -47,6 +47,7 @@ pub async fn handle_with_key( ) -> Response { debug!("http announce request: {:#?}", announce_request); + // todo: extract to Axum extractor. Duplicate code in `scrape` handler. let Ok(key_id) = key_id_param.value().parse::() else { return responses::error::Error::from( auth::Error::InvalidKeyFormat { @@ -64,6 +65,11 @@ pub async fn handle_with_key( } async fn handle(tracker: &Arc, announce_request: &Announce, remote_client_ip: &RemoteClientIp) -> Response { + match tracker.authorize(&announce_request.info_hash).await { + Ok(_) => (), + Err(error) => return responses::error::Error::from(error).into_response(), + } + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index 0d8aa7f52..7cc5022e6 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -1,4 +1,15 @@ +use super::responses; +use crate::tracker::error::Error; + pub mod announce; pub mod auth; pub mod scrape; pub mod status; + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Tracker error: {err}"), + } + } +} diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 6edf2fdb8..649d630b0 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -23,7 +23,7 @@ pub async fn handle_without_key( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - if tracker.is_private() { + if tracker.requires_authentication() { return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await; } @@ -39,6 +39,7 @@ pub async fn handle_with_key( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); + // todo: extract to Axum extractor. Duplicate code in `announce` handler. let Ok(key_id) = key_id_param.value().parse::() else { return responses::error::Error::from( auth::Error::InvalidKeyFormat { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 31eeef6dc..7733940c9 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -107,6 +107,10 @@ impl Tracker { self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } + pub fn requires_authentication(&self) -> bool { + self.is_private() + } + /// It handles an announce request. /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). @@ -345,6 +349,27 @@ impl Tracker { } } + /// The only authorization process is the whitelist. + /// + /// # Errors + /// + /// Will return an error if the tracker is running in `listed` mode + /// and the infohash is not whitelisted. + pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { + if !self.is_whitelisted() { + return Ok(()); + } + + if self.is_info_hash_whitelisted(info_hash).await { + return Ok(()); + } + + return Err(Error::TorrentNotWhitelisted { + info_hash: *info_hash, + location: Location::caller(), + }); + } + /// Loading the torrents from database into memory /// /// # Errors diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 0536ab0b7..60ccae06b 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2403,8 +2403,7 @@ mod axum_http_tracker_server { use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_whitelisted_http_tracker; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; @@ -2417,8 +2416,7 @@ mod axum_http_tracker_server { assert_torrent_not_in_whitelist_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; From a8e02b35985921284f121f63182526df45017106 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 12:31:35 +0000 Subject: [PATCH 0414/1003] feat(http): [#201] scrape request in listed mode --- src/tracker/mod.rs | 6 +++++- src/tracker/torrent.rs | 7 +++++++ tests/http_tracker.rs | 6 ++---- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 7733940c9..a3e0ed4fc 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -140,7 +140,11 @@ impl Tracker { let mut scrape_data = ScrapeData::empty(); for info_hash in info_hashes { - scrape_data.add_file(info_hash, self.get_swarm_metadata(info_hash).await); + let swarm_metadata = match self.authorize(info_hash).await { + Ok(_) => self.get_swarm_metadata(info_hash).await, + Err(_) => SwarmMetadata::zeroed(), + }; + scrape_data.add_file(info_hash, swarm_metadata); } scrape_data diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index dc41b083e..aa155dfac 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -21,6 +21,13 @@ pub struct SwarmMetadata { pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) } +impl SwarmMetadata { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} + impl Entry { #[must_use] pub fn new() -> Entry { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 60ccae06b..96062b46e 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2450,8 +2450,7 @@ mod axum_http_tracker_server { use crate::http::responses::scrape::{File, ResponseBuilder}; use crate::http::server::start_whitelisted_http_tracker; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; @@ -2480,8 +2479,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; From 2ffbf3d550b2bb09df1afba3454e96283b702b6d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 14:25:59 +0000 Subject: [PATCH 0415/1003] refactor(http): [#204] remove temporary status endpoint It was only added to test the initial HTTP scaffolding for the Axum implmentation. --- src/http/axum_implementation/handlers/mod.rs | 1 - .../axum_implementation/handlers/status.rs | 12 -- src/http/axum_implementation/mod.rs | 1 - src/http/axum_implementation/resources/mod.rs | 1 - src/http/axum_implementation/resources/ok.rs | 8 - src/http/axum_implementation/responses/mod.rs | 1 - src/http/axum_implementation/responses/ok.rs | 11 -- src/http/axum_implementation/routes.rs | 4 +- tests/http_tracker.rs | 139 +----------------- 9 files changed, 2 insertions(+), 176 deletions(-) delete mode 100644 src/http/axum_implementation/handlers/status.rs delete mode 100644 src/http/axum_implementation/resources/mod.rs delete mode 100644 src/http/axum_implementation/resources/ok.rs delete mode 100644 src/http/axum_implementation/responses/ok.rs diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index 7cc5022e6..e6b13ae91 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -4,7 +4,6 @@ use crate::tracker::error::Error; pub mod announce; pub mod auth; pub mod scrape; -pub mod status; impl From for responses::error::Error { fn from(err: Error) -> Self { diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs deleted file mode 100644 index 8a058b456..000000000 --- a/src/http/axum_implementation/handlers/status.rs +++ /dev/null @@ -1,12 +0,0 @@ -/// Temporary handler for testing and debugging the new Axum implementation -/// It should be removed once the migration to Axum is finished. -use axum::response::Json; - -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::resources::ok::Ok; -use crate::http::axum_implementation::responses::ok; - -#[allow(clippy::unused_async)] -pub async fn handle(remote_client_ip: RemoteClientIp) -> Json { - ok::response(&remote_client_ip) -} diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index d8431457a..ecc60e1f8 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -2,7 +2,6 @@ pub mod extractors; pub mod handlers; pub mod query; pub mod requests; -pub mod resources; pub mod responses; pub mod routes; pub mod server; diff --git a/src/http/axum_implementation/resources/mod.rs b/src/http/axum_implementation/resources/mod.rs deleted file mode 100644 index a493c2ac2..000000000 --- a/src/http/axum_implementation/resources/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod ok; diff --git a/src/http/axum_implementation/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs deleted file mode 100644 index f941b9fb3..000000000 --- a/src/http/axum_implementation/resources/ok.rs +++ /dev/null @@ -1,8 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct Ok { - pub remote_client_ip: RemoteClientIp, -} diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs index 7e8666934..bdc689056 100644 --- a/src/http/axum_implementation/responses/mod.rs +++ b/src/http/axum_implementation/responses/mod.rs @@ -1,4 +1,3 @@ pub mod announce; pub mod error; -pub mod ok; pub mod scrape; diff --git a/src/http/axum_implementation/responses/ok.rs b/src/http/axum_implementation/responses/ok.rs deleted file mode 100644 index dfd062b51..000000000 --- a/src/http/axum_implementation/responses/ok.rs +++ /dev/null @@ -1,11 +0,0 @@ -use axum::Json; - -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::resources::ok::Ok; - -#[must_use] -pub fn response(remote_client_ip: &RemoteClientIp) -> Json { - Json(Ok { - remote_client_ip: remote_client_ip.clone(), - }) -} diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 21b7260ae..af987ece2 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -4,13 +4,11 @@ use axum::routing::get; use axum::Router; use axum_client_ip::SecureClientIpSource; -use super::handlers::{announce, scrape, status}; +use super::handlers::{announce, scrape}; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { Router::new() - // Status - .route("/status", get(status::handle)) // Announce request .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 96062b46e..2360df9ab 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -5,7 +5,7 @@ /// cargo test `warp_http_tracker_server` -- --nocapture /// ``` /// -/// Axum version ()WIP): +/// Axum version (WIP): /// ```text /// cargo test `warp_http_tracker_server` -- --nocapture /// ``` @@ -1271,143 +1271,6 @@ mod axum_http_tracker_server { // WIP: migration HTTP from Warp to Axum - use local_ip_address::local_ip; - use torrust_tracker::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - use torrust_tracker::http::axum_implementation::resources::ok::Ok; - use torrust_tracker::http::Version; - - use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; - - #[tokio::test] - async fn should_return_the_status() { - // This is a temporary test to test the new Axum HTTP tracker server scaffolding - - let http_tracker_server = start_default_http_tracker(Version::Axum).await; - - let client_ip = local_ip().unwrap(); - - let response = Client::bind(http_tracker_server.get_connection_info(), client_ip) - .get("status") - .await; - - let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); - - assert_eq!( - ok, - Ok { - remote_client_ip: RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: Some(client_ip) - } - } - ); - } - - mod should_get_the_remote_client_ip_from_the_http_request { - - // Temporary tests to test that the new Axum HTTP tracker gets the right remote client IP. - // Once the implementation is finished, test for announce request will cover these cases. - - use std::net::IpAddr; - use std::str::FromStr; - - use local_ip_address::local_ip; - use torrust_tracker::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - use torrust_tracker::http::axum_implementation::resources::ok::Ok; - use torrust_tracker::http::Version; - - use crate::http::client::Client; - use crate::http::server::{start_http_tracker_on_reverse_proxy, start_public_http_tracker}; - - #[tokio::test] - async fn when_the_client_ip_is_a_local_ip_it_should_assign_that_ip() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; - - let client_ip = local_ip().unwrap(); - - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - - let response = client.get("status").await; - - let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); - - assert_eq!( - ok, - Ok { - remote_client_ip: RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: Some(client_ip) - } - } - ); - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_that_ip() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; - - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - - let response = client.get("status").await; - - let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); - - assert_eq!( - ok, - Ok { - remote_client_ip: RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: Some(client_ip) - } - } - ); - } - - #[tokio::test] - async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_as_secure_ip_the_right_most_ip_in_the_x_forwarded_for_http_header( - ) { - /* - client <-> http proxy <-> tracker <-> Internet - ip: header: config: remote client ip: - 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 - */ - - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; - - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - - let left_most_ip = IpAddr::from_str("203.0.113.195").unwrap(); - let right_most_ip = IpAddr::from_str("150.172.238.178").unwrap(); - - let response = client - .get_with_header( - "status", - "X-Forwarded-For", - &format!("{left_most_ip},2001:db8:85a3:8d3:1319:8a2e:370:7348,{right_most_ip}"), - ) - .await; - - let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); - - assert_eq!( - ok, - Ok { - remote_client_ip: RemoteClientIp { - right_most_x_forwarded_for: Some(right_most_ip), - connection_info_ip: Some(client_ip) - } - } - ); - } - } - mod for_all_config_modes { mod and_running_on_reverse_proxy { From a9e3a33c0592f642f77faca84d84c46d25ca527a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 18:10:49 +0000 Subject: [PATCH 0416/1003] refactor(http): extract Axum extractor for the URL path param key --- .../axum_implementation/extractors/key.rs | 55 +++++++++++++++++++ .../axum_implementation/extractors/mod.rs | 1 + .../axum_implementation/handlers/announce.rs | 16 +----- src/http/axum_implementation/handlers/auth.rs | 6 +- .../axum_implementation/handlers/scrape.rs | 18 +----- 5 files changed, 66 insertions(+), 30 deletions(-) create mode 100644 src/http/axum_implementation/extractors/key.rs diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs new file mode 100644 index 000000000..6cc2f13e8 --- /dev/null +++ b/src/http/axum_implementation/extractors/key.rs @@ -0,0 +1,55 @@ +use std::panic::Location; + +use axum::async_trait; +use axum::extract::{FromRequestParts, Path}; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; + +use crate::http::axum_implementation::handlers::auth::{self, KeyIdParam}; +use crate::http::axum_implementation::responses; +use crate::tracker::auth::KeyId; + +pub struct ExtractKeyId(pub KeyId); + +#[async_trait] +impl FromRequestParts for ExtractKeyId +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + match Path::::from_request_parts(parts, state).await { + Ok(key_id_param) => { + let Ok(key_id) = key_id_param.0.value().parse::() else { + return Err(responses::error::Error::from( + auth::Error::InvalidKeyFormat { + location: Location::caller() + }) + .into_response()) + }; + Ok(ExtractKeyId(key_id)) + } + Err(rejection) => match rejection { + axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { + return Err(responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + }) + .into_response()) + } + axum::extract::rejection::PathRejection::MissingPathParams(_) => { + return Err(responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + }) + .into_response()) + } + _ => { + return Err(responses::error::Error::from(auth::Error::CannotExtractKeyParam { + location: Location::caller(), + }) + .into_response()) + } + }, + } + } +} diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 380eeda6d..e6d9e8c67 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,4 +1,5 @@ pub mod announce_request; +pub mod key; pub mod peer_ip; pub mod remote_client_ip; pub mod scrape_request; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index e4cd476fa..93dbc8115 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -3,12 +3,12 @@ use std::panic::Location; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use axum::extract::{Path, State}; +use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use super::auth::KeyIdParam; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; +use crate::http::axum_implementation::extractors::key::ExtractKeyId; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::handlers::auth; @@ -16,7 +16,6 @@ use crate::http::axum_implementation::requests::announce::{Announce, Compact, Ev use crate::http::axum_implementation::responses::{self, announce}; use crate::http::axum_implementation::services; use crate::protocol::clock::{Current, Time}; -use crate::tracker::auth::KeyId; use crate::tracker::peer::Peer; use crate::tracker::Tracker; @@ -42,20 +41,11 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - Path(key_id_param): Path, + ExtractKeyId(key_id): ExtractKeyId, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); - // todo: extract to Axum extractor. Duplicate code in `scrape` handler. - let Ok(key_id) = key_id_param.value().parse::() else { - return responses::error::Error::from( - auth::Error::InvalidKeyFormat { - location: Location::caller() - }) - .into_response() - }; - match tracker.authenticate(&key_id).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs index 366526664..5673ea851 100644 --- a/src/http/axum_implementation/handlers/auth.rs +++ b/src/http/axum_implementation/handlers/auth.rs @@ -18,10 +18,12 @@ impl KeyIdParam { #[derive(Debug, Error)] pub enum Error { - #[error("Missing authentication key for private tracker. Error in {location}")] + #[error("Missing authentication key param for private tracker. Error in {location}")] MissingAuthKey { location: &'static Location<'static> }, - #[error("Invalid format authentication key. Error in {location}")] + #[error("Invalid format for authentication key param. Error in {location}")] InvalidKeyFormat { location: &'static Location<'static> }, + #[error("Cannot extract authentication key param from URL path. Error in {location}")] + CannotExtractKeyParam { location: &'static Location<'static> }, } impl From for responses::error::Error { diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 649d630b0..19d902f8e 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,18 +1,15 @@ -use std::panic::Location; use std::sync::Arc; -use axum::extract::{Path, State}; +use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use super::auth::KeyIdParam; +use crate::http::axum_implementation::extractors::key::ExtractKeyId; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; -use crate::http::axum_implementation::handlers::auth; use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::{responses, services}; -use crate::tracker::auth::KeyId; use crate::tracker::Tracker; #[allow(clippy::unused_async)] @@ -34,20 +31,11 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - Path(key_id_param): Path, + ExtractKeyId(key_id): ExtractKeyId, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - // todo: extract to Axum extractor. Duplicate code in `announce` handler. - let Ok(key_id) = key_id_param.value().parse::() else { - return responses::error::Error::from( - auth::Error::InvalidKeyFormat { - location: Location::caller() - }) - .into_response() - }; - match tracker.authenticate(&key_id).await { Ok(_) => (), Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, From 2ddefa87fa65db43a3747a328a28510e2ca6798f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 11:45:54 +0000 Subject: [PATCH 0417/1003] feat: add .coverage dir to .gitignore See: https://github.com/torrust/torrust-tracker/discussions/209 --- .gitignore | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index d574298da..6b58dcb45 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,11 @@ .env -/target **/*.rs.bk -/database.json.bz2 -/database.db +/.coverage/ /.idea/ +/.vscode/launch.json /config.toml /data.db -/.vscode/launch.json +/database.db +/database.json.bz2 /storage/ - - +/target From ee5b0886ec510b1c775a7664c58ca8070c2e8074 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 11:49:40 +0000 Subject: [PATCH 0418/1003] docs(http): add comment for duplicate struct info --- src/tracker/torrent.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index aa155dfac..23ca6886e 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -14,6 +14,8 @@ pub struct Entry { pub completed: u32, } +/// Swarm metadata dictionary in the scrape response. +/// BEP 48: #[derive(Debug, PartialEq, Default)] pub struct SwarmMetadata { pub complete: u32, // The number of active peers that have completed downloading (seeders) @@ -28,6 +30,14 @@ impl SwarmMetadata { } } +/// Swarm statistics. Alternative struct for swarm metadata in scrape response. +#[derive(Debug)] +pub struct SwamStats { + pub completed: u32, // The number of peers that have ever completed downloading + pub seeders: u32, // The number of active peers that have completed downloading (seeders) + pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +} + impl Entry { #[must_use] pub fn new() -> Entry { @@ -111,13 +121,6 @@ impl Default for Entry { } } -#[derive(Debug)] -pub struct SwamStats { - pub completed: u32, - pub seeders: u32, - pub leechers: u32, -} - #[cfg(test)] mod tests { From fca5353fb825736bc2d86ed477fd25c306245a8b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 16:43:56 +0000 Subject: [PATCH 0419/1003] test(tracker): [#207] add test for Tracker::announce --- src/tracker/mod.rs | 388 +++++++++++++++++++++++++++++------------ src/tracker/torrent.rs | 2 +- 2 files changed, 273 insertions(+), 117 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a3e0ed4fc..c2d66244a 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -44,6 +44,7 @@ pub struct TorrentsMetrics { pub torrents: u64, } +#[derive(Debug, PartialEq, Default)] pub struct AnnounceData { pub peers: Vec, pub swam_stats: SwamStats, @@ -523,12 +524,14 @@ mod tests { mod the_tracker { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::{self, Peer}; use crate::tracker::statistics::Keeper; use crate::tracker::{TorrentsMetrics, Tracker}; @@ -555,6 +558,62 @@ mod tests { } } + fn info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + // The client peer IP + fn peer_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) + } + + /// Sample peer whose state is not relevant for the tests + fn sample_peer() -> Peer { + complete_peer() + } + + /// Sample peer when for tests that need more than one peer + fn peer1() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Completed, + } + } + + /// Sample peer when for tests that need more than one peer + fn peer2() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Completed, + } + } + + fn seeder() -> Peer { + complete_peer() + } + + fn leecher() -> Peer { + incomplete_peer() + } + + fn started_peer() -> Peer { + incomplete_peer() + } + + fn completed_peer() -> Peer { + complete_peer() + } + /// A peer that has completed downloading. fn complete_peer() -> Peer { Peer { @@ -598,181 +657,278 @@ mod tests { ); } - mod handling_an_announce_request { - mod should_assign_the_ip_to_the_peer { - - use std::net::{IpAddr, Ipv4Addr}; - - use crate::tracker::assign_ip_address_to_peer; - - #[test] - fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + mod for_all_config_modes { - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + mod handling_an_announce_request { - assert_eq!(peer_ip, remote_ip); - } + use crate::tracker::tests::the_tracker::{info_hash, peer1, peer2, peer_ip, sample_peer, tracker_factory}; - mod and_when_the_client_ip_is_a_ipv4_loopback_ip { + mod should_assign_the_ip_to_the_peer { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; + use std::net::{IpAddr, Ipv4Addr}; use crate::tracker::assign_ip_address_to_peer; #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let peer_ip = assign_ip_address_to_peer(&remote_ip, None); assert_eq!(peer_ip, remote_ip); } - #[test] - fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + mod and_when_the_client_ip_is_a_ipv4_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( + ) { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); - assert_eq!(peer_ip, tracker_external_ip); + assert_eq!(peer_ip, tracker_external_ip); + } } - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( - ) { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + mod and_when_client_ip_is_a_ipv6_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } - let tracker_external_ip = - IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - assert_eq!(peer_ip, tracker_external_ip); + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( + ) { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } } } - mod and_when_client_ip_is_a_ipv6_loopback_ip { + #[tokio::test] + async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { + let tracker = tracker_factory(); - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; + let mut peer = sample_peer(); - use crate::tracker::assign_ip_address_to_peer; + let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; - #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + assert_eq!(announce_data.peers, vec![]); + } - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + #[tokio::test] + async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { + let tracker = tracker_factory(); - assert_eq!(peer_ip, remote_ip); + let mut previously_announced_peer = peer1(); + tracker + .announce(&info_hash(), &mut previously_announced_peer, &peer_ip()) + .await; + + let mut peer = peer2(); + let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + + assert_eq!(announce_data.peers, vec![previously_announced_peer]); + } + + mod it_should_update_the_swarm_stats_for_the_torrent { + + use crate::tracker::tests::the_tracker::{ + completed_peer, info_hash, leecher, peer_ip, seeder, started_peer, tracker_factory, + }; + + #[tokio::test] + async fn when_the_peer_is_a_seeder() { + let tracker = tracker_factory(); + + let mut peer = seeder(); + + let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + + assert_eq!(announce_data.swam_stats.seeders, 1); } - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + #[tokio::test] + async fn when_the_peer_is_a_leecher() { + let tracker = tracker_factory(); - let tracker_external_ip = - IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + let mut peer = leecher(); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(peer_ip, tracker_external_ip); + assert_eq!(announce_data.swam_stats.leechers, 1); } - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( - ) { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + #[tokio::test] + async fn when_a_previously_announced_started_peer_has_completed_downloading() { + let tracker = tracker_factory(); - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + // We have to announce with "started" event because peer does not count if peer was not previously known + let mut started_peer = started_peer(); + tracker.announce(&info_hash(), &mut started_peer, &peer_ip()).await; - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let mut completed_peer = completed_peer(); + let announce_data = tracker.announce(&info_hash(), &mut completed_peer, &peer_ip()).await; - assert_eq!(peer_ip, tracker_external_ip); + assert_eq!(announce_data.swam_stats.completed, 1); } } } - } - mod handling_a_scrape_request { + mod handling_a_scrape_request { - use std::net::{IpAddr, Ipv4Addr}; + use std::net::{IpAddr, Ipv4Addr}; - use crate::protocol::info_hash::InfoHash; - use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, tracker_factory}; - use crate::tracker::{ScrapeData, SwarmMetadata}; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, tracker_factory}; + use crate::tracker::{ScrapeData, SwarmMetadata}; - #[tokio::test] - async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent() { - let tracker = tracker_factory(); + #[tokio::test] + async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( + ) { + let tracker = tracker_factory(); - let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; + let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; - let scrape_data = tracker.scrape(&info_hashes).await; + let scrape_data = tracker.scrape(&info_hashes).await; - let mut expected_scrape_data = ScrapeData::empty(); + let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); - assert_eq!(scrape_data, expected_scrape_data); - } + assert_eq!(scrape_data, expected_scrape_data); + } - #[tokio::test] - async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let tracker = tracker_factory(); - - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); - - // Announce a "complete" peer for the torrent - let mut complete_peer = complete_peer(); - tracker - .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) - .await; - - // Announce an "incomplete" peer for the torrent - let mut incomplete_peer = incomplete_peer(); - tracker - .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) - .await; - - // Scrape - let scrape_data = tracker.scrape(&vec![info_hash]).await; - - // The expected swarm metadata for the file - let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file( - &info_hash, - SwarmMetadata { - complete: 0, // the "complete" peer does not count because it was not previously known - downloaded: 0, - incomplete: 1, // the "incomplete" peer we have just announced - }, - ); - - assert_eq!(scrape_data, expected_scrape_data); - } + #[tokio::test] + async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { + let tracker = tracker_factory(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + // Announce a "complete" peer for the torrent + let mut complete_peer = complete_peer(); + tracker + .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) + .await; + + // Announce an "incomplete" peer for the torrent + let mut incomplete_peer = incomplete_peer(); + tracker + .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) + .await; + + // Scrape + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 0, // the "complete" peer does not count because it was not previously known + downloaded: 0, + incomplete: 1, // the "incomplete" peer we have just announced + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } - #[tokio::test] - async fn it_should_allow_scraping_for_multiple_torrents() { - let tracker = tracker_factory(); + #[tokio::test] + async fn it_should_allow_scraping_for_multiple_torrents() { + let tracker = tracker_factory(); - let info_hashes = vec![ - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), - ]; + let info_hashes = vec![ + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), + ]; - let scrape_data = tracker.scrape(&info_hashes).await; + let scrape_data = tracker.scrape(&info_hashes).await; - let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[1]); + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[1]); - assert_eq!(scrape_data, expected_scrape_data); + assert_eq!(scrape_data, expected_scrape_data); + } } } + + mod configured_as_whitelisted { + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + + mod configured_as_private { + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + + mod configured_as_private_and_whitelisted { + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 23ca6886e..3362234f0 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -31,7 +31,7 @@ impl SwarmMetadata { } /// Swarm statistics. Alternative struct for swarm metadata in scrape response. -#[derive(Debug)] +#[derive(Debug, PartialEq, Default)] pub struct SwamStats { pub completed: u32, // The number of peers that have ever completed downloading pub seeders: u32, // The number of active peers that have completed downloading (seeders) From d50372fbf064413e2d05aac9a5be2d4b13136083 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 16:44:55 +0000 Subject: [PATCH 0420/1003] feat(cargo): add cargo alias to generate coverage reports --- .cargo/config.toml | 3 +++ cSpell.json | 1 + 2 files changed, 4 insertions(+) create mode 100644 .cargo/config.toml diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000..2fb2fe92d --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,3 @@ +[alias] +cov = "llvm-cov --lcov --output-path=./coverage/lcov.info" +cov-html = "llvm-cov --html" diff --git a/cSpell.json b/cSpell.json index b8aceb568..4a9b11ce9 100644 --- a/cSpell.json +++ b/cSpell.json @@ -31,6 +31,7 @@ "infohashes", "infoschema", "intervali", + "lcov", "leecher", "leechers", "libtorrent", From 1e7eff59d35e50cea6d110bdf7528f02af3f011d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 17:14:26 +0000 Subject: [PATCH 0421/1003] docs(tracker): add code-review --- src/tracker/mod.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index c2d66244a..d638f6601 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -120,6 +120,17 @@ impl Tracker { // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. + // code-review: in the `scrape` function we perform an authorization check. + // We check if the torrent is whitelisted. Should we also check authorization here? + // I think so because the `Tracker` has the responsibility for checking authentication and authorization. + // The `Tracker` has delegated that responsibility to the handlers + // (because we want to return a friendly error response) but that does not mean we should + // double-check authorization at this domain level too. + // I would propose to return a `Result` here. + // Besides, regarding authentication the `Tracker` is also responsible for authentication but + // we are actually handling authentication at the handlers level. So I would extract that + // responsibility into another authentication service. + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -614,7 +625,9 @@ mod tests { complete_peer() } - /// A peer that has completed downloading. + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. fn complete_peer() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000000"), @@ -627,7 +640,7 @@ mod tests { } } - /// A peer that has NOT completed downloading. + /// A peer that counts as `incomplete` is swarm metadata fn incomplete_peer() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000000"), From ed58a32ce32d0644804e0f97120bf5e32d4b3c82 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 21:23:05 +0000 Subject: [PATCH 0422/1003] test(tracker): [#207] add test for Tracker::scrape --- .../axum_implementation/services/scrape.rs | 4 +- src/tracker/mod.rs | 146 ++++++++++++------ 2 files changed, 99 insertions(+), 51 deletions(-) diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs index 30f00a47b..923acf3c4 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/axum_implementation/services/scrape.rs @@ -15,11 +15,9 @@ pub async fn invoke(tracker: &Arc, info_hashes: &Vec, origina /// When the peer is not authenticated and the tracker is running in `private` mode, /// the tracker returns empty stats for all the torrents. pub async fn fake_invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { - let scrape_data = tracker.empty_scrape_for(info_hashes); - send_scrape_event(original_peer_ip, tracker).await; - scrape_data + ScrapeData::zeroed(info_hashes) } async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index d638f6601..6fa1d38fc 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -64,12 +64,23 @@ impl ScrapeData { Self { files } } + #[must_use] + pub fn zeroed(info_hashes: &Vec) -> Self { + let mut scrape_data = Self::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, SwarmMetadata::zeroed()); + } + + scrape_data + } + pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { self.files.insert(*info_hash, swarm_metadata); } - pub fn add_file_with_no_metadata(&mut self, info_hash: &InfoHash) { - self.files.insert(*info_hash, SwarmMetadata::default()); + pub fn add_file_with_zeroed_metadata(&mut self, info_hash: &InfoHash) { + self.files.insert(*info_hash, SwarmMetadata::zeroed()); } } @@ -162,17 +173,6 @@ impl Tracker { scrape_data } - // It return empty swarm metadata for all the infohashes. - pub fn empty_scrape_for(&self, info_hashes: &Vec) -> ScrapeData { - let mut scrape_data = ScrapeData::empty(); - - for info_hash in info_hashes { - scrape_data.add_file(info_hash, SwarmMetadata::default()); - } - - scrape_data - } - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { let torrents = self.get_torrents().await; match torrents.get(info_hash) { @@ -543,25 +543,31 @@ mod tests { use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; + use crate::tracker::mode::Mode; use crate::tracker::peer::{self, Peer}; use crate::tracker::statistics::Keeper; use crate::tracker::{TorrentsMetrics, Tracker}; - pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + pub fn public_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Public; + tracker_factory(configuration) } - pub fn tracker_factory() -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + pub fn whitelisted_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Listed; + tracker_factory(configuration) + } - // Configuration - let configuration = tracker_configuration(); + pub fn tracker_factory(configuration: Configuration) -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. // Initialize stats tracker let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { + match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) @@ -569,7 +575,7 @@ mod tests { } } - fn info_hash() -> InfoHash { + fn sample_info_hash() -> InfoHash { "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() } @@ -584,7 +590,7 @@ mod tests { } /// Sample peer when for tests that need more than one peer - fn peer1() -> Peer { + fn sample_peer_1() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), @@ -597,7 +603,7 @@ mod tests { } /// Sample peer when for tests that need more than one peer - fn peer2() -> Peer { + fn sample_peer_2() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000002"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), @@ -655,7 +661,7 @@ mod tests { #[tokio::test] async fn should_collect_torrent_metrics() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let torrents_metrics = tracker.get_torrents_metrics().await; @@ -674,7 +680,9 @@ mod tests { mod handling_an_announce_request { - use crate::tracker::tests::the_tracker::{info_hash, peer1, peer2, peer_ip, sample_peer, tracker_factory}; + use crate::tracker::tests::the_tracker::{ + peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, + }; mod should_assign_the_ip_to_the_peer { @@ -776,26 +784,26 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let mut peer = sample_peer(); - let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; assert_eq!(announce_data.peers, vec![]); } #[tokio::test] async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { - let tracker = tracker_factory(); + let tracker = public_tracker(); - let mut previously_announced_peer = peer1(); + let mut previously_announced_peer = sample_peer_1(); tracker - .announce(&info_hash(), &mut previously_announced_peer, &peer_ip()) + .announce(&sample_info_hash(), &mut previously_announced_peer, &peer_ip()) .await; - let mut peer = peer2(); - let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + let mut peer = sample_peer_2(); + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; assert_eq!(announce_data.peers, vec![previously_announced_peer]); } @@ -803,41 +811,41 @@ mod tests { mod it_should_update_the_swarm_stats_for_the_torrent { use crate::tracker::tests::the_tracker::{ - completed_peer, info_hash, leecher, peer_ip, seeder, started_peer, tracker_factory, + completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, }; #[tokio::test] async fn when_the_peer_is_a_seeder() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let mut peer = seeder(); - let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; assert_eq!(announce_data.swam_stats.seeders, 1); } #[tokio::test] async fn when_the_peer_is_a_leecher() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let mut peer = leecher(); - let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; assert_eq!(announce_data.swam_stats.leechers, 1); } #[tokio::test] async fn when_a_previously_announced_started_peer_has_completed_downloading() { - let tracker = tracker_factory(); + let tracker = public_tracker(); // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); - tracker.announce(&info_hash(), &mut started_peer, &peer_ip()).await; + tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip()).await; let mut completed_peer = completed_peer(); - let announce_data = tracker.announce(&info_hash(), &mut completed_peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; assert_eq!(announce_data.swam_stats.completed, 1); } @@ -849,13 +857,13 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; use crate::protocol::info_hash::InfoHash; - use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, tracker_factory}; + use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; use crate::tracker::{ScrapeData, SwarmMetadata}; #[tokio::test] async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( ) { - let tracker = tracker_factory(); + let tracker = public_tracker(); let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; @@ -863,14 +871,14 @@ mod tests { let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); assert_eq!(scrape_data, expected_scrape_data); } #[tokio::test] async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); @@ -905,7 +913,7 @@ mod tests { #[tokio::test] async fn it_should_allow_scraping_for_multiple_torrents() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let info_hashes = vec![ "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), @@ -915,8 +923,8 @@ mod tests { let scrape_data = tracker.scrape(&info_hashes).await; let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[1]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[1]); assert_eq!(scrape_data, expected_scrape_data); } @@ -927,7 +935,49 @@ mod tests { mod handling_an_announce_request {} - mod handling_an_scrape_request {} + mod handling_an_scrape_request { + + use crate::protocol::info_hash::InfoHash; + use crate::tracker::tests::the_tracker::{ + complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, + }; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::ScrapeData; + + #[test] + fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { + // Zeroed scrape data is used when the authentication for the scrape request fails. + + let sample_info_hash = sample_info_hash(); + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_zeroed_metadata(&sample_info_hash); + + assert_eq!(ScrapeData::zeroed(&vec![sample_info_hash]), expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { + let tracker = whitelisted_tracker(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + let mut peer = incomplete_peer(); + tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + + // Announce twice to force non zeroed swarm metadata + let mut peer = complete_peer(); + tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected zeroed swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file(&info_hash, SwarmMetadata::zeroed()); + + assert_eq!(scrape_data, expected_scrape_data); + } + } } mod configured_as_private { From d1a7b7fb2493877cb6db814d54fb5709b8f7d5be Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 21:39:05 +0000 Subject: [PATCH 0423/1003] fix(cargo): fix output path in cargo allias --- .cargo/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 2fb2fe92d..e3d31cf7f 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,3 @@ [alias] -cov = "llvm-cov --lcov --output-path=./coverage/lcov.info" +cov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" cov-html = "llvm-cov --html" From fad683425c284a9c29ee6fa7f597e30767668950 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 13:29:45 +0000 Subject: [PATCH 0424/1003] test(tracker): [#207] add tests for HTTP authentication in Tracker The `Tracker` is responsible for the authentication in the HTTP protocol. --- src/tracker/auth.rs | 3 ++ src/tracker/mod.rs | 103 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 22f734e48..01de7a619 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -62,6 +62,9 @@ pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { pub id: KeyId, + // todo: we can remove the `Option`. An `ExpiringKey` that does not expire + // is a `KeyId`. In other words, all `ExpiringKeys` must have an + // expiration time. pub valid_until: Option, } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6fa1d38fc..b8dadab8f 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -199,6 +199,7 @@ impl Tracker { /// /// Will panic if key cannot be converted into a valid `KeyId`. pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { + // todo: change argument `key: &str` to `key_id: &KeyId` self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(&key.parse::().unwrap()); Ok(()) @@ -208,6 +209,8 @@ impl Tracker { /// /// Will return a `key::Error` if unable to get any `auth_key`. pub async fn verify_auth_key(&self, key_id: &KeyId) -> Result<(), auth::Error> { + // code-review: this function is public only because it's used in a test. + // We should change the test and make it private. match self.keys.read().await.get(key_id) { None => Err(auth::Error::UnableToReadKey { location: Location::caller(), @@ -319,6 +322,12 @@ impl Tracker { /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { + // todo: this is a deprecated method. + // We're splitting authentication and authorization responsibilities. + // Use `authenticate` and `authorize` instead. + + // Authentication + // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -343,6 +352,8 @@ impl Tracker { } } + // Authorization + // check if info_hash is whitelisted if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { return Err(Error::TorrentNotWhitelisted { @@ -554,6 +565,12 @@ mod tests { tracker_factory(configuration) } + pub fn private_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Private; + tracker_factory(configuration) + } + pub fn whitelisted_tracker() -> Tracker { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Listed; @@ -982,6 +999,92 @@ mod tests { mod configured_as_private { + mod handling_authentication { + use std::str::FromStr; + use std::time::Duration; + + use crate::tracker::auth; + use crate::tracker::tests::the_tracker::private_tracker; + + #[tokio::test] + async fn it_should_generate_the_expiring_authentication_keys() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + assert_eq!(key.valid_until.unwrap(), Duration::from_secs(100)); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_by_using_a_key() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + let result = tracker.authenticate(&key.id()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { + let tracker = private_tracker(); + + let unregistered_key_id = auth::KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let result = tracker.authenticate(&unregistered_key_id).await; + + assert!(result.is_err()); + } + + #[tokio::test] + async fn it_should_verify_a_valid_authentication_key() { + // todo: this should not be tested directly because + // `verify_auth_key` should be a private method. + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); + } + + #[tokio::test] + async fn it_should_fail_verifying_an_unregistered_authentication_key() { + let tracker = private_tracker(); + + let unregistered_key_id = auth::KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + assert!(tracker.verify_auth_key(&unregistered_key_id).await.is_err()); + } + + #[tokio::test] + async fn it_should_remove_an_authentication_key() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + let result = tracker.remove_auth_key(&key.id().to_string()).await; + + assert!(result.is_ok()); + assert!(tracker.verify_auth_key(&key.id()).await.is_err()); + } + + #[tokio::test] + async fn it_should_load_authentication_keys_from_the_database() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + // Remove the newly generated key in memory + tracker.keys.write().await.remove(&key.id()); + + let result = tracker.load_keys().await; + + assert!(result.is_ok()); + assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); + } + } + mod handling_an_announce_request {} mod handling_an_scrape_request {} From 40ff2498cf3045512daa308021fffd15b2729299 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 13:53:23 +0000 Subject: [PATCH 0425/1003] test(tracker): [#207] add test for authorization (whitelist) in Tracker --- src/tracker/mod.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index b8dadab8f..e16fefa4f 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -950,6 +950,33 @@ mod tests { mod configured_as_whitelisted { + mod handling_authorization { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = tracker.add_torrent_to_whitelist(&info_hash).await; + assert!(result.is_ok()); + + let result = tracker.authorize(&info_hash).await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = tracker.authorize(&info_hash).await; + assert!(result.is_err()); + } + } + mod handling_an_announce_request {} mod handling_an_scrape_request { From af949af4b8d11826836a3e1153cb542359bcd404 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 16:07:47 +0000 Subject: [PATCH 0426/1003] test(tracker): [#207] add tests for torrent persistence in Tracker --- src/tracker/mod.rs | 49 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index e16fefa4f..5de9d6f3c 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -404,6 +404,7 @@ impl Tracker { /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. pub async fn load_persistent_torrents(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; + let mut torrents = self.torrents.write().await; for (info_hash, completed) in persistent_torrents { @@ -577,6 +578,12 @@ mod tests { tracker_factory(configuration) } + pub fn tracker_persisting_torrents_in_database() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.persistent_torrent_completed_stat = true; + tracker_factory(configuration) + } + pub fn tracker_factory(configuration: Configuration) -> Tracker { // code-review: the tracker initialization is duplicated in many places. Consider make this function public. @@ -1123,5 +1130,47 @@ mod tests { mod handling_an_scrape_request {} } + + mod handling_torrent_persistence { + use aquatic_udp_protocol::AnnounceEvent; + + use crate::tracker::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + + #[tokio::test] + async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { + let tracker = tracker_persisting_torrents_in_database(); + + let info_hash = sample_info_hash(); + + let mut peer = sample_peer(); + + peer.event = AnnounceEvent::Started; + let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(swarm_stats.completed, 0); + + peer.event = AnnounceEvent::Completed; + let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(swarm_stats.completed, 1); + + let torrents = tracker.get_all_torrent_peers(&info_hash).await; + assert_eq!(torrents.len(), 1); + + // Remove the newly updated torrent from memory + tracker.torrents.write().await.remove(&info_hash); + + tracker.load_persistent_torrents().await.unwrap(); + + let torrents = tracker.get_torrents().await; + assert!(torrents.contains_key(&info_hash)); + + let torrent_entry = torrents.get(&info_hash).unwrap(); + + // It persists the number of completed peers. + assert_eq!(torrent_entry.completed, 1); + + // It does not persist the peers + assert!(torrent_entry.peers.is_empty()); + } + } } } From 7fb92b5c0ac390465159e1567dc1ff92b9e15ced Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 17:25:11 +0000 Subject: [PATCH 0427/1003] test(tracker): [#207] add tests for whitelist in Tracker --- src/apis/handlers.rs | 4 +- src/setup.rs | 7 ++- src/tracker/mod.rs | 114 ++++++++++++++++++++++++++++++++++++++--- src/tracker/torrent.rs | 4 +- 4 files changed, 116 insertions(+), 13 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index 38959edbe..f7b5e562c 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -86,7 +86,7 @@ pub async fn remove_torrent_from_whitelist_handler( } pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { - match tracker.load_whitelist().await { + match tracker.load_whitelist_from_database().await { Ok(_) => ok_response(), Err(e) => failed_to_reload_whitelist_response(e), } @@ -117,7 +117,7 @@ pub async fn delete_auth_key_handler( } pub async fn reload_keys_handler(State(tracker): State>) -> Response { - match tracker.load_keys().await { + match tracker.load_keys_from_database().await { Ok(_) => ok_response(), Err(e) => failed_to_reload_keys_response(e), } diff --git a/src/setup.rs b/src/setup.rs index 3461667cc..98d311178 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -16,13 +16,16 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve // Load peer keys if tracker.is_private() { - tracker.load_keys().await.expect("Could not retrieve keys from database."); + tracker + .load_keys_from_database() + .await + .expect("Could not retrieve keys from database."); } // Load whitelisted torrents if tracker.is_whitelisted() { tracker - .load_whitelist() + .load_whitelist_from_database() .await .expect("Could not load whitelist from database."); } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 5de9d6f3c..3048ded35 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -38,6 +38,9 @@ pub struct Tracker { #[derive(Debug, PartialEq, Default)] pub struct TorrentsMetrics { + // code-review: consider using `SwamStats` for + // `seeders`, `completed`, and `leechers` attributes. + // pub swam_stats: SwamStats; pub seeders: u64, pub completed: u64, pub leechers: u64, @@ -223,7 +226,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to `load_keys` from the database. - pub async fn load_keys(&self) -> Result<(), databases::error::Error> { + pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; @@ -301,7 +304,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub async fn load_whitelist(&self) -> Result<(), databases::error::Error> { + pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { let whitelisted_torrents_from_database = self.database.load_whitelist().await?; let mut whitelist = self.whitelist.write().await; @@ -402,7 +405,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub async fn load_persistent_torrents(&self) -> Result<(), databases::error::Error> { + pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; let mut torrents = self.torrents.write().await; @@ -700,6 +703,55 @@ mod tests { ); } + #[tokio::test] + async fn it_should_return_all_the_peers_for_a_given_torrent() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + + let peers = tracker.get_all_torrent_peers(&info_hash).await; + + assert_eq!(peers, vec![peer]); + } + + #[tokio::test] + async fn it_should_return_all_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + + let peers = tracker.get_peers_for_peer(&info_hash, &peer).await; + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_torrent_metrics() { + let tracker = public_tracker(); + + tracker + .update_torrent_with_peer_and_get_stats(&sample_info_hash(), &leecher()) + .await; + + let torrent_metrics = tracker.get_torrents_metrics().await; + + assert_eq!( + torrent_metrics, + TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 1, + torrents: 1, + } + ); + } + mod for_all_config_modes { mod handling_an_announce_request { @@ -984,6 +1036,55 @@ mod tests { } } + mod handling_the_torrent_whitelist { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_add_a_torrent_to_the_whitelist() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn it_should_remove_a_torrent_from_the_whitelist() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + tracker.remove_torrent_from_whitelist(&info_hash).await.unwrap(); + + assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + } + + mod persistence { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_load_the_whitelist_from_the_database() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + // Remove torrent from the in-memory whitelist + tracker.whitelist.write().await.remove(&info_hash); + assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + + tracker.load_whitelist_from_database().await.unwrap(); + + assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + } + } + } + mod handling_an_announce_request {} mod handling_an_scrape_request { @@ -1112,7 +1213,7 @@ mod tests { // Remove the newly generated key in memory tracker.keys.write().await.remove(&key.id()); - let result = tracker.load_keys().await; + let result = tracker.load_keys_from_database().await; assert!(result.is_ok()); assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); @@ -1152,13 +1253,10 @@ mod tests { let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; assert_eq!(swarm_stats.completed, 1); - let torrents = tracker.get_all_torrent_peers(&info_hash).await; - assert_eq!(torrents.len(), 1); - // Remove the newly updated torrent from memory tracker.torrents.write().await.remove(&info_hash); - tracker.load_persistent_torrents().await.unwrap(); + tracker.load_torrents_from_database().await.unwrap(); let torrents = tracker.get_torrents().await; assert!(torrents.contains_key(&info_hash)); diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 3362234f0..8675490e2 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -14,6 +14,7 @@ pub struct Entry { pub completed: u32, } +/// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. /// BEP 48: #[derive(Debug, PartialEq, Default)] @@ -30,7 +31,8 @@ impl SwarmMetadata { } } -/// Swarm statistics. Alternative struct for swarm metadata in scrape response. +/// Swarm statistics for one torrent. +/// Alternative struct for swarm metadata in scrape response. #[derive(Debug, PartialEq, Default)] pub struct SwamStats { pub completed: u32, // The number of peers that have ever completed downloading From d0c30540e11a6dd71cefb8d4736b23adf9b87fe0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 17:50:27 +0000 Subject: [PATCH 0428/1003] fix(tracker): typo, rename SwamStats to SwarmStats --- .../axum_implementation/responses/announce.rs | 8 +++---- src/http/warp_implementation/handlers.rs | 4 ++-- src/tracker/mod.rs | 22 +++++++++---------- src/tracker/torrent.rs | 2 +- src/udp/handlers.rs | 8 +++---- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs index a91266490..81651767b 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/axum_implementation/responses/announce.rs @@ -90,8 +90,8 @@ impl From for NonCompact { Self { interval: domain_announce_response.interval, interval_min: domain_announce_response.interval_min, - complete: domain_announce_response.swam_stats.seeders, - incomplete: domain_announce_response.swam_stats.leechers, + complete: domain_announce_response.swarm_stats.seeders, + incomplete: domain_announce_response.swarm_stats.leechers, peers, } } @@ -237,8 +237,8 @@ impl From for Compact { Self { interval: domain_announce_response.interval, interval_min: domain_announce_response.interval_min, - complete: domain_announce_response.swam_stats.seeders, - incomplete: domain_announce_response.swam_stats.leechers, + complete: domain_announce_response.swarm_stats.seeders, + incomplete: domain_announce_response.swarm_stats.leechers, peers, } } diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 6019bf016..b803a594f 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -66,7 +66,7 @@ pub async fn handle_announce( send_announce_response( &announce_request, - &response.swam_stats, + &response.swarm_stats, &response.peers, tracker.config.announce_interval, tracker.config.min_announce_interval, @@ -129,7 +129,7 @@ pub async fn handle_scrape( #[allow(clippy::ptr_arg)] fn send_announce_response( announce_request: &request::Announce, - torrent_stats: &torrent::SwamStats, + torrent_stats: &torrent::SwarmStats, peers: &Vec, interval: u32, interval_min: u32, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 3048ded35..fbda95354 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -19,7 +19,7 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use self::auth::KeyId; use self::error::Error; use self::peer::Peer; -use self::torrent::{SwamStats, SwarmMetadata}; +use self::torrent::{SwarmMetadata, SwarmStats}; use crate::config::Configuration; use crate::databases::driver::Driver; use crate::databases::{self, Database}; @@ -38,9 +38,9 @@ pub struct Tracker { #[derive(Debug, PartialEq, Default)] pub struct TorrentsMetrics { - // code-review: consider using `SwamStats` for + // code-review: consider using `SwarmStats` for // `seeders`, `completed`, and `leechers` attributes. - // pub swam_stats: SwamStats; + // pub swarm_stats: SwarmStats; pub seeders: u64, pub completed: u64, pub leechers: u64, @@ -50,7 +50,7 @@ pub struct TorrentsMetrics { #[derive(Debug, PartialEq, Default)] pub struct AnnounceData { pub peers: Vec, - pub swam_stats: SwamStats, + pub swarm_stats: SwarmStats, pub interval: u32, pub interval_min: u32, } @@ -147,13 +147,13 @@ impl Tracker { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); - let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let swarm_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; let peers = self.get_peers_for_peer(info_hash, peer).await; AnnounceData { peers, - swam_stats, + swarm_stats, interval: self.config.announce_interval, interval_min: self.config.min_announce_interval, } @@ -446,7 +446,7 @@ impl Tracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwamStats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` @@ -469,7 +469,7 @@ impl Tracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrent::SwamStats { + torrent::SwarmStats { completed, seeders, leechers, @@ -898,7 +898,7 @@ mod tests { let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.swam_stats.seeders, 1); + assert_eq!(announce_data.swarm_stats.seeders, 1); } #[tokio::test] @@ -909,7 +909,7 @@ mod tests { let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.swam_stats.leechers, 1); + assert_eq!(announce_data.swarm_stats.leechers, 1); } #[tokio::test] @@ -923,7 +923,7 @@ mod tests { let mut completed_peer = completed_peer(); let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; - assert_eq!(announce_data.swam_stats.completed, 1); + assert_eq!(announce_data.swarm_stats.completed, 1); } } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 8675490e2..4a871aa89 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -34,7 +34,7 @@ impl SwarmMetadata { /// Swarm statistics for one torrent. /// Alternative struct for swarm metadata in scrape response. #[derive(Debug, PartialEq, Default)] -pub struct SwamStats { +pub struct SwarmStats { pub completed: u32, // The number of peers that have ever completed downloading pub seeders: u32, // The number of active peers that have completed downloading (seeders) pub leechers: u32, // The number of active peers that have not completed downloading (leechers) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 6c54a6106..8fda77fb4 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -136,8 +136,8 @@ pub async fn handle_announce( Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swam_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(response.swam_stats.seeders) as i32), + leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), peers: response .peers .iter() @@ -157,8 +157,8 @@ pub async fn handle_announce( Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swam_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(response.swam_stats.seeders) as i32), + leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), peers: response .peers .iter() From 12c8cf9916492962e1be19f6aae7fc8aa826ece9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 5 Mar 2023 11:39:49 +0000 Subject: [PATCH 0429/1003] refactor: ExpiringKey has always an expiration date Adter intruducing the `KeyId` we no longer needed to have keys with no expiration date. A key without an expiration date is a `KeyId`. So all `ExpiraingKeys` have an expiration date. --- src/apis/resources/auth_key.rs | 19 ++++--- src/databases/mysql.rs | 6 +-- src/databases/sqlite.rs | 6 +-- src/tracker/auth.rs | 98 +++++----------------------------- src/tracker/mod.rs | 2 +- 5 files changed, 32 insertions(+), 99 deletions(-) diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index e9989ca75..289e704b6 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -7,17 +7,20 @@ use crate::tracker::auth::{self, KeyId}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { - pub key: String, // todo: rename to `id` - pub valid_until: Option, + pub key: String, // todo: rename to `id` (API breaking change!) + pub valid_until: Option, // todo: `auth::ExpiringKey` has now always a value (API breaking change!) } impl From for auth::ExpiringKey { fn from(auth_key_resource: AuthKey) -> Self { + let valid_until = match auth_key_resource.valid_until { + Some(valid_until) => DurationSinceUnixEpoch::from_secs(valid_until), + None => DurationSinceUnixEpoch::from_secs(0), + }; + auth::ExpiringKey { id: auth_key_resource.key.parse::().unwrap(), - valid_until: auth_key_resource - .valid_until - .map(|valid_until| DurationSinceUnixEpoch::new(valid_until, 0)), + valid_until, } } } @@ -26,7 +29,7 @@ impl From for AuthKey { fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { key: auth_key.id.to_string(), - valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), + valid_until: Some(auth_key.valid_until.as_secs()), } } } @@ -52,7 +55,7 @@ mod tests { auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) + valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap() } ); } @@ -63,7 +66,7 @@ mod tests { let auth_key = auth::ExpiringKey { id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), + valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap(), }; assert_eq!( diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 0d545aaa9..cbd5f3df9 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -118,7 +118,7 @@ impl Database for Mysql { "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| auth::ExpiringKey { id: key.parse::().unwrap(), - valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + valid_until: Duration::from_secs(valid_until.unsigned_abs()), }, )?; @@ -193,7 +193,7 @@ impl Database for Mysql { Ok(key.map(|(key, expiry)| auth::ExpiringKey { id: key.parse::().unwrap(), - valid_until: Some(Duration::from_secs(expiry.unsigned_abs())), + valid_until: Duration::from_secs(expiry.unsigned_abs()), })) } @@ -201,7 +201,7 @@ impl Database for Mysql { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.id.to_string(); - let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); + let valid_until = auth_key.valid_until.as_secs().to_string(); conn.exec_drop( "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index ab0addf4b..974f172e0 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -113,7 +113,7 @@ impl Database for Sqlite { Ok(auth::ExpiringKey { id: key.parse::().unwrap(), - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), + valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), }) })?; @@ -214,7 +214,7 @@ impl Database for Sqlite { let id: String = f.get(0).unwrap(); auth::ExpiringKey { id: id.parse::().unwrap(), - valid_until: Some(DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs())), + valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), } })) } @@ -224,7 +224,7 @@ impl Database for Sqlite { let insert = conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.id.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], + [auth_key.id.to_string(), auth_key.valid_until.as_secs().to_string()], )?; if insert == 0 { diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 01de7a619..f8e1b3440 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -30,7 +30,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { ExpiringKey { id: random_id.parse::().unwrap(), - valid_until: Some(Current::add(&lifetime).unwrap()), + valid_until: Current::add(&lifetime).unwrap(), } } @@ -42,30 +42,19 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); - match auth_key.valid_until { - Some(valid_until) => { - if valid_until < current_time { - Err(Error::KeyExpired { - location: Location::caller(), - }) - } else { - Ok(()) - } - } - None => Err(Error::UnableToReadKey { + if auth_key.valid_until < current_time { + Err(Error::KeyExpired { location: Location::caller(), - key_id: Box::new(auth_key.id.clone()), - }), + }) + } else { + Ok(()) } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { pub id: KeyId, - // todo: we can remove the `Option`. An `ExpiringKey` that does not expire - // is a `KeyId`. In other words, all `ExpiringKeys` must have an - // expiration time. - pub valid_until: Option, + pub valid_until: DurationSinceUnixEpoch, } impl std::fmt::Display for ExpiringKey { @@ -74,54 +63,18 @@ impl std::fmt::Display for ExpiringKey { f, "key: `{}`, valid until `{}`", self.id, - match self.valid_until { - Some(duration) => format!( - "{}", - DateTime::::from_utc( - NaiveDateTime::from_timestamp( - i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), - duration.subsec_nanos(), - ), - Utc - ) + DateTime::::from_utc( + NaiveDateTime::from_timestamp( + i64::try_from(self.valid_until.as_secs()).expect("Overflow of i64 seconds, very future!"), + self.valid_until.subsec_nanos(), ), - None => "Empty!?".to_string(), - } + Utc + ) ) } } impl ExpiringKey { - /// # Panics - /// - /// Will panic if bytes cannot be converted into a valid `KeyId`. - #[must_use] - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { - if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(ExpiringKey { - id: key.parse::().unwrap(), - valid_until: None, - }) - } else { - None - } - } - - /// # Panics - /// - /// Will panic if string cannot be converted into a valid `KeyId`. - #[must_use] - pub fn from_string(key: &str) -> Option { - if key.len() == AUTH_KEY_LENGTH { - Some(ExpiringKey { - id: key.parse::().unwrap(), - valid_until: None, - }) - } else { - None - } - } - #[must_use] pub fn id(&self) -> KeyId { self.id.clone() @@ -176,30 +129,7 @@ mod tests { use std::time::Duration; use crate::protocol::clock::{Current, StoppedTime}; - use crate::tracker::auth::{self, KeyId}; - - #[test] - fn auth_key_from_buffer() { - let auth_key = auth::ExpiringKey::from_buffer([ - 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, - 49, 52, 110, 114, 74, - ]); - - assert!(auth_key.is_some()); - assert_eq!( - auth_key.unwrap().id, - "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse::().unwrap() - ); - } - - #[test] - fn auth_key_from_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::ExpiringKey::from_string(key_string); - - assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().id, key_string.parse::().unwrap()); - } + use crate::tracker::auth; #[test] fn auth_key_id_from_string() { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index fbda95354..0d04868a8 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1147,7 +1147,7 @@ mod tests { let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - assert_eq!(key.valid_until.unwrap(), Duration::from_secs(100)); + assert_eq!(key.valid_until, Duration::from_secs(100)); } #[tokio::test] From c7015fad532983bb4661d510c3dd5cdb46c45acd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 5 Mar 2023 11:46:26 +0000 Subject: [PATCH 0430/1003] refactor: rename struct KeyId to Key There is no longer a clonflict with the `ExpiringKey` strcut that was also called `Key`. --- src/apis/handlers.rs | 4 ++-- src/apis/resources/auth_key.rs | 12 ++++++------ src/databases/mysql.rs | 6 +++--- src/databases/sqlite.rs | 6 +++--- src/http/axum_implementation/extractors/key.rs | 6 +++--- src/http/warp_implementation/filters.rs | 8 ++++---- src/http/warp_implementation/handlers.rs | 8 ++++---- src/tracker/auth.rs | 14 +++++++------- src/tracker/error.rs | 2 +- src/tracker/mod.rs | 16 ++++++++-------- tests/http/client.rs | 6 +++--- tests/http/connection_info.rs | 4 ++-- tests/http_tracker.rs | 16 ++++++++-------- tests/tracker_api.rs | 4 ++-- 14 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index f7b5e562c..652f491e5 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -17,7 +17,7 @@ use crate::apis::resources::auth_key::AuthKey; use crate::apis::resources::stats::Stats; use crate::apis::resources::torrent::ListItem; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::KeyId; +use crate::tracker::auth::Key; use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; @@ -107,7 +107,7 @@ pub async fn delete_auth_key_handler( State(tracker): State>, Path(seconds_valid_or_key): Path, ) -> Response { - match KeyId::from_str(&seconds_valid_or_key.0) { + match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { Ok(_) => ok_response(), diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index 289e704b6..954e633d0 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -3,11 +3,11 @@ use std::convert::From; use serde::{Deserialize, Serialize}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::auth::{self, KeyId}; +use crate::tracker::auth::{self, Key}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { - pub key: String, // todo: rename to `id` (API breaking change!) + pub key: String, // todo: rename to `id` (API breaking change!) pub valid_until: Option, // todo: `auth::ExpiringKey` has now always a value (API breaking change!) } @@ -19,7 +19,7 @@ impl From for auth::ExpiringKey { }; auth::ExpiringKey { - id: auth_key_resource.key.parse::().unwrap(), + id: auth_key_resource.key.parse::().unwrap(), valid_until, } } @@ -40,7 +40,7 @@ mod tests { use super::AuthKey; use crate::protocol::clock::{Current, TimeNow}; - use crate::tracker::auth::{self, KeyId}; + use crate::tracker::auth::{self, Key}; #[test] fn it_should_be_convertible_into_an_auth_key() { @@ -54,7 +54,7 @@ mod tests { assert_eq!( auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { - id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap() } ); @@ -65,7 +65,7 @@ mod tests { let duration_in_secs = 60; let auth_key = auth::ExpiringKey { - id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap(), }; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index cbd5f3df9..00865d026 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -12,7 +12,7 @@ use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::{self, KeyId}; +use crate::tracker::auth::{self, Key}; const DRIVER: Driver = Driver::MySQL; @@ -117,7 +117,7 @@ impl Database for Mysql { let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| auth::ExpiringKey { - id: key.parse::().unwrap(), + id: key.parse::().unwrap(), valid_until: Duration::from_secs(valid_until.unsigned_abs()), }, )?; @@ -192,7 +192,7 @@ impl Database for Mysql { let key = query?; Ok(key.map(|(key, expiry)| auth::ExpiringKey { - id: key.parse::().unwrap(), + id: key.parse::().unwrap(), valid_until: Duration::from_secs(expiry.unsigned_abs()), })) } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 974f172e0..6c5b9f600 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -9,7 +9,7 @@ use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::{self, KeyId}; +use crate::tracker::auth::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; @@ -112,7 +112,7 @@ impl Database for Sqlite { let valid_until: i64 = row.get(1)?; Ok(auth::ExpiringKey { - id: key.parse::().unwrap(), + id: key.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), }) })?; @@ -213,7 +213,7 @@ impl Database for Sqlite { let expiry: i64 = f.get(1).unwrap(); let id: String = f.get(0).unwrap(); auth::ExpiringKey { - id: id.parse::().unwrap(), + id: id.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), } })) diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs index 6cc2f13e8..ecdc9d801 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/key.rs @@ -7,9 +7,9 @@ use axum::response::{IntoResponse, Response}; use crate::http::axum_implementation::handlers::auth::{self, KeyIdParam}; use crate::http::axum_implementation::responses; -use crate::tracker::auth::KeyId; +use crate::tracker::auth::Key; -pub struct ExtractKeyId(pub KeyId); +pub struct ExtractKeyId(pub Key); #[async_trait] impl FromRequestParts for ExtractKeyId @@ -21,7 +21,7 @@ where async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { match Path::::from_request_parts(parts, state).await { Ok(key_id_param) => { - let Ok(key_id) = key_id_param.0.value().parse::() else { + let Ok(key_id) = key_id_param.0.value().parse::() else { return Err(responses::error::Error::from( auth::Error::InvalidKeyFormat { location: Location::caller() diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index eb7abcd4d..a3000bfaa 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -12,7 +12,7 @@ use super::{request, WebResult}; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::KeyId; +use crate::tracker::auth::Key; use crate::tracker::{self, peer}; /// Pass Arc along @@ -37,16 +37,16 @@ pub fn with_peer_id() -> impl Filter + /// Pass Arc along #[must_use] -pub fn with_auth_key_id() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key_id() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() .map(|key: String| { - let key_id = KeyId::from_str(&key); + let key_id = Key::from_str(&key); match key_id { Ok(id) => Some(id), Err(_) => None, } }) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for `PeerAddress` diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index b803a594f..4a64259bb 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -12,7 +12,7 @@ use super::error::Error; use super::{request, response, WebResult}; use crate::http::warp_implementation::peer_builder; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::KeyId; +use crate::tracker::auth::Key; use crate::tracker::{self, auth, peer, statistics, torrent}; /// Authenticate `InfoHash` using optional `auth::Key` @@ -22,7 +22,7 @@ use crate::tracker::{self, auth, peer, statistics, torrent}; /// Will return `ServerError` that wraps the `tracker::error::Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key_id: &Option, + auth_key_id: &Option, tracker: Arc, ) -> Result<(), Error> { tracker @@ -38,7 +38,7 @@ pub async fn authenticate( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key_id: Option, + auth_key_id: Option, tracker: Arc, ) -> WebResult { debug!("http announce request: {:#?}", announce_request); @@ -78,7 +78,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key_id: Option, + auth_key_id: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index f8e1b3440..2f65b2bcb 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -29,7 +29,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); ExpiringKey { - id: random_id.parse::().unwrap(), + id: random_id.parse::().unwrap(), valid_until: Current::add(&lifetime).unwrap(), } } @@ -53,7 +53,7 @@ pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { - pub id: KeyId, + pub id: Key, pub valid_until: DurationSinceUnixEpoch, } @@ -76,18 +76,18 @@ impl std::fmt::Display for ExpiringKey { impl ExpiringKey { #[must_use] - pub fn id(&self) -> KeyId { + pub fn id(&self) -> Key { self.id.clone() } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] -pub struct KeyId(String); +pub struct Key(String); #[derive(Debug, PartialEq, Eq)] pub struct ParseKeyIdError; -impl FromStr for KeyId { +impl FromStr for Key { type Err = ParseKeyIdError; fn from_str(s: &str) -> Result { @@ -109,7 +109,7 @@ pub enum Error { #[error("Failed to read key: {key_id}, {location}")] UnableToReadKey { location: &'static Location<'static>, - key_id: Box, + key_id: Box, }, #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, @@ -134,7 +134,7 @@ mod tests { #[test] fn auth_key_id_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key_id = auth::KeyId::from_str(key_string); + let auth_key_id = auth::Key::from_str(key_string); assert!(auth_key_id.is_ok()); assert_eq!(auth_key_id.unwrap().to_string(), key_string); diff --git a/src/tracker/error.rs b/src/tracker/error.rs index acc85a1c2..f03f4b3e5 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -6,7 +6,7 @@ use crate::located_error::LocatedError; pub enum Error { #[error("The supplied key: {key_id:?}, is not valid: {source}")] PeerKeyNotValid { - key_id: super::auth::KeyId, + key_id: super::auth::Key, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, #[error("The peer is not authenticated, {location}")] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0d04868a8..448905ef7 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -16,7 +16,7 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::auth::KeyId; +use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; @@ -28,7 +28,7 @@ use crate::protocol::info_hash::InfoHash; pub struct Tracker { pub config: Arc, mode: mode::Mode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -204,14 +204,14 @@ impl Tracker { pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { // todo: change argument `key: &str` to `key_id: &KeyId` self.database.remove_key_from_keys(key).await?; - self.keys.write().await.remove(&key.parse::().unwrap()); + self.keys.write().await.remove(&key.parse::().unwrap()); Ok(()) } /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, key_id: &KeyId) -> Result<(), auth::Error> { + pub async fn verify_auth_key(&self, key_id: &Key) -> Result<(), auth::Error> { // code-review: this function is public only because it's used in a test. // We should change the test and make it private. match self.keys.read().await.get(key_id) { @@ -324,7 +324,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { // todo: this is a deprecated method. // We're splitting authentication and authorization responsibilities. // Use `authenticate` and `authorize` instead. @@ -371,7 +371,7 @@ impl Tracker { /// # Errors /// /// Will return an error if the the authentication key cannot be verified. - pub async fn authenticate(&self, key_id: &KeyId) -> Result<(), auth::Error> { + pub async fn authenticate(&self, key_id: &Key) -> Result<(), auth::Error> { if self.is_private() { self.verify_auth_key(key_id).await } else { @@ -1165,7 +1165,7 @@ mod tests { async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { let tracker = private_tracker(); - let unregistered_key_id = auth::KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key_id = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let result = tracker.authenticate(&unregistered_key_id).await; @@ -1187,7 +1187,7 @@ mod tests { async fn it_should_fail_verifying_an_unregistered_authentication_key() { let tracker = private_tracker(); - let unregistered_key_id = auth::KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key_id = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); assert!(tracker.verify_auth_key(&unregistered_key_id).await.is_err()); } diff --git a/tests/http/client.rs b/tests/http/client.rs index b59cf2ac6..fa5fd5d16 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use reqwest::{Client as ReqwestClient, Response}; -use torrust_tracker::tracker::auth::KeyId; +use torrust_tracker::tracker::auth::Key; use super::connection_info::ConnectionInfo; use super::requests::announce::{self, Query}; @@ -11,7 +11,7 @@ use super::requests::scrape; pub struct Client { connection_info: ConnectionInfo, reqwest_client: ReqwestClient, - key_id: Option, + key_id: Option, } /// URL components in this context: @@ -40,7 +40,7 @@ impl Client { } } - pub fn authenticated(connection_info: ConnectionInfo, key_id: KeyId) -> Self { + pub fn authenticated(connection_info: ConnectionInfo, key_id: Key) -> Self { Self { connection_info, reqwest_client: reqwest::Client::builder().build().unwrap(), diff --git a/tests/http/connection_info.rs b/tests/http/connection_info.rs index fb1dbf64e..eedaa73f0 100644 --- a/tests/http/connection_info.rs +++ b/tests/http/connection_info.rs @@ -1,9 +1,9 @@ -use torrust_tracker::tracker::auth::KeyId; +use torrust_tracker::tracker::auth::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { pub bind_address: String, - pub key_id: Option, + pub key_id: Option, } impl ConnectionInfo { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 2360df9ab..b1b90f923 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1083,7 +1083,7 @@ mod warp_http_tracker_server { use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use crate::http::asserts::assert_is_announce_response; use crate::http::asserts_warp::{ @@ -1128,7 +1128,7 @@ mod warp_http_tracker_server { let http_tracker_server = start_private_http_tracker(Version::Warp).await; // The tracker does not have this key - let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key_id = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) .announce(&QueryBuilder::default().query()) @@ -1145,7 +1145,7 @@ mod warp_http_tracker_server { use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; @@ -1242,7 +1242,7 @@ mod warp_http_tracker_server { ) .await; - let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key_id: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) .scrape( @@ -2396,7 +2396,7 @@ mod axum_http_tracker_server { use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::http::client::Client; @@ -2453,7 +2453,7 @@ mod axum_http_tracker_server { let http_tracker_server = start_private_http_tracker(Version::Axum).await; // The tracker does not have this key - let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key_id = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) .announce(&QueryBuilder::default().query()) @@ -2470,7 +2470,7 @@ mod axum_http_tracker_server { use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; @@ -2583,7 +2583,7 @@ mod axum_http_tracker_server { ) .await; - let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key_id: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) .scrape( diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index bec22e2b4..2c59cd8fb 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -638,7 +638,7 @@ mod tracker_apis { mod for_key_resources { use std::time::Duration; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, @@ -665,7 +665,7 @@ mod tracker_apis { // Verify the key with the tracker assert!(api_server .tracker - .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); } From 7b690a4c6a63872ce411adbaf198b43381dcb4c4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 5 Mar 2023 11:59:45 +0000 Subject: [PATCH 0431/1003] refactor: rename ExpiringKey::id to ExpiringKey:key --- src/apis/resources/auth_key.rs | 8 ++++---- src/databases/mysql.rs | 6 +++--- src/databases/sqlite.rs | 6 +++--- src/tracker/auth.rs | 8 ++++---- src/tracker/mod.rs | 4 ++-- tests/tracker_api.rs | 8 ++++---- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index 954e633d0..72ef32a95 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -19,7 +19,7 @@ impl From for auth::ExpiringKey { }; auth::ExpiringKey { - id: auth_key_resource.key.parse::().unwrap(), + key: auth_key_resource.key.parse::().unwrap(), valid_until, } } @@ -28,7 +28,7 @@ impl From for auth::ExpiringKey { impl From for AuthKey { fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { - key: auth_key.id.to_string(), + key: auth_key.key.to_string(), valid_until: Some(auth_key.valid_until.as_secs()), } } @@ -54,7 +54,7 @@ mod tests { assert_eq!( auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { - id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap() } ); @@ -65,7 +65,7 @@ mod tests { let duration_in_secs = 60; let auth_key = auth::ExpiringKey { - id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap(), }; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 00865d026..4bb28f050 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -117,7 +117,7 @@ impl Database for Mysql { let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| auth::ExpiringKey { - id: key.parse::().unwrap(), + key: key.parse::().unwrap(), valid_until: Duration::from_secs(valid_until.unsigned_abs()), }, )?; @@ -192,7 +192,7 @@ impl Database for Mysql { let key = query?; Ok(key.map(|(key, expiry)| auth::ExpiringKey { - id: key.parse::().unwrap(), + key: key.parse::().unwrap(), valid_until: Duration::from_secs(expiry.unsigned_abs()), })) } @@ -200,7 +200,7 @@ impl Database for Mysql { async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let key = auth_key.id.to_string(); + let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.as_secs().to_string(); conn.exec_drop( diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 6c5b9f600..8fac09e47 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -112,7 +112,7 @@ impl Database for Sqlite { let valid_until: i64 = row.get(1)?; Ok(auth::ExpiringKey { - id: key.parse::().unwrap(), + key: key.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), }) })?; @@ -213,7 +213,7 @@ impl Database for Sqlite { let expiry: i64 = f.get(1).unwrap(); let id: String = f.get(0).unwrap(); auth::ExpiringKey { - id: id.parse::().unwrap(), + key: id.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), } })) @@ -224,7 +224,7 @@ impl Database for Sqlite { let insert = conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.id.to_string(), auth_key.valid_until.as_secs().to_string()], + [auth_key.key.to_string(), auth_key.valid_until.as_secs().to_string()], )?; if insert == 0 { diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 2f65b2bcb..09f324e2b 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -29,7 +29,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); ExpiringKey { - id: random_id.parse::().unwrap(), + key: random_id.parse::().unwrap(), valid_until: Current::add(&lifetime).unwrap(), } } @@ -53,7 +53,7 @@ pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { - pub id: Key, + pub key: Key, pub valid_until: DurationSinceUnixEpoch, } @@ -62,7 +62,7 @@ impl std::fmt::Display for ExpiringKey { write!( f, "key: `{}`, valid until `{}`", - self.id, + self.key, DateTime::::from_utc( NaiveDateTime::from_timestamp( i64::try_from(self.valid_until.as_secs()).expect("Overflow of i64 seconds, very future!"), @@ -77,7 +77,7 @@ impl std::fmt::Display for ExpiringKey { impl ExpiringKey { #[must_use] pub fn id(&self) -> Key { - self.id.clone() + self.key.clone() } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 448905ef7..34977b4de 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -190,7 +190,7 @@ impl Tracker { pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; - self.keys.write().await.insert(auth_key.id.clone(), auth_key.clone()); + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) } @@ -233,7 +233,7 @@ impl Tracker { keys.clear(); for key in keys_from_database { - keys.insert(key.id.clone(), key); + keys.insert(key.key.clone(), key); } Ok(()) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 2c59cd8fb..600d26f2f 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -734,7 +734,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.id.to_string()) + .delete_auth_key(&auth_key.key.to_string()) .await; assert_ok(response).await; @@ -777,7 +777,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.id.to_string()) + .delete_auth_key(&auth_key.key.to_string()) .await; assert_failed_to_delete_key(response).await; @@ -797,7 +797,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.id.to_string()) + .delete_auth_key(&auth_key.key.to_string()) .await; assert_token_not_valid(response).await; @@ -810,7 +810,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.id.to_string()) + .delete_auth_key(&auth_key.key.to_string()) .await; assert_unauthorized(response).await; From af038dedc0876bda2809505998502e2067ebdb89 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 5 Mar 2023 12:17:39 +0000 Subject: [PATCH 0432/1003] refactor: rename structs, attributes and variables with sufix KeyId or key_id to `Key` and `key`. removing the `Id` and `id`, since the `KeyId` struct was renamed to `Key`. --- src/apis/handlers.rs | 6 ++--- src/databases/mod.rs | 4 ++-- .../axum_implementation/extractors/key.rs | 14 +++++------ .../axum_implementation/handlers/announce.rs | 6 ++--- src/http/axum_implementation/handlers/auth.rs | 4 ++-- .../axum_implementation/handlers/scrape.rs | 6 ++--- src/http/warp_implementation/filters.rs | 6 ++--- src/http/warp_implementation/handlers.rs | 12 +++++----- src/http/warp_implementation/routes.rs | 6 ++--- src/tracker/auth.rs | 18 +++++++------- src/tracker/error.rs | 4 ++-- src/tracker/mod.rs | 24 +++++++++---------- tests/http/client.rs | 18 +++++++------- tests/http/connection_info.rs | 4 ++-- tests/http_tracker.rs | 24 +++++++++---------- tests/tracker_api.rs | 10 ++++---- 16 files changed, 83 insertions(+), 83 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index 652f491e5..410def39b 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -101,15 +101,15 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path } #[derive(Deserialize)] -pub struct KeyIdParam(String); +pub struct KeyParam(String); pub async fn delete_auth_key_handler( State(tracker): State>, - Path(seconds_valid_or_key): Path, + Path(seconds_valid_or_key): Path, ) -> Response { match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { + Ok(key) => match tracker.remove_auth_key(&key.to_string()).await { Ok(_) => ok_response(), Err(e) => failed_to_delete_key_response(e), }, diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 038be0ea3..247f571d7 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -70,12 +70,12 @@ pub trait Database: Sync + Send { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - // todo: replace type `&str` with `&KeyId` + // todo: replace type `&str` with `&Key` async fn get_key_from_keys(&self, key: &str) -> Result, Error>; async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; - // todo: replace type `&str` with `&KeyId` + // todo: replace type `&str` with `&Key` async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs index ecdc9d801..50aef4a7c 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/key.rs @@ -5,30 +5,30 @@ use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::axum_implementation::handlers::auth::{self, KeyIdParam}; +use crate::http::axum_implementation::handlers::auth::{self, KeyParam}; use crate::http::axum_implementation::responses; use crate::tracker::auth::Key; -pub struct ExtractKeyId(pub Key); +pub struct Extract(pub Key); #[async_trait] -impl FromRequestParts for ExtractKeyId +impl FromRequestParts for Extract where S: Send + Sync, { type Rejection = Response; async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - match Path::::from_request_parts(parts, state).await { - Ok(key_id_param) => { - let Ok(key_id) = key_id_param.0.value().parse::() else { + match Path::::from_request_parts(parts, state).await { + Ok(key_param) => { + let Ok(key) = key_param.0.value().parse::() else { return Err(responses::error::Error::from( auth::Error::InvalidKeyFormat { location: Location::caller() }) .into_response()) }; - Ok(ExtractKeyId(key_id)) + Ok(Extract(key)) } Err(rejection) => match rejection { axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 93dbc8115..4bb06da73 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -8,7 +8,7 @@ use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::key::ExtractKeyId; +use crate::http::axum_implementation::extractors::key::Extract; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::handlers::auth; @@ -41,12 +41,12 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - ExtractKeyId(key_id): ExtractKeyId, + Extract(key): Extract, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); - match tracker.authenticate(&key_id).await { + match tracker.authenticate(&key).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), } diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs index 5673ea851..b1b73e60e 100644 --- a/src/http/axum_implementation/handlers/auth.rs +++ b/src/http/axum_implementation/handlers/auth.rs @@ -7,9 +7,9 @@ use crate::http::axum_implementation::responses; use crate::tracker::auth; #[derive(Deserialize)] -pub struct KeyIdParam(String); +pub struct KeyParam(String); -impl KeyIdParam { +impl KeyParam { #[must_use] pub fn value(&self) -> String { self.0.clone() diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 19d902f8e..41d6bf3dc 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -4,7 +4,7 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::axum_implementation::extractors::key::ExtractKeyId; +use crate::http::axum_implementation::extractors::key::Extract; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; @@ -31,12 +31,12 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - ExtractKeyId(key_id): ExtractKeyId, + Extract(key): Extract, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - match tracker.authenticate(&key_id).await { + match tracker.authenticate(&key).await { Ok(_) => (), Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, } diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index a3000bfaa..06168d82a 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -37,11 +37,11 @@ pub fn with_peer_id() -> impl Filter + /// Pass Arc along #[must_use] -pub fn with_auth_key_id() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() .map(|key: String| { - let key_id = Key::from_str(&key); - match key_id { + let key = Key::from_str(&key); + match key { Ok(id) => Some(id), Err(_) => None, } diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 4a64259bb..f9aedeb8f 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -22,11 +22,11 @@ use crate::tracker::{self, auth, peer, statistics, torrent}; /// Will return `ServerError` that wraps the `tracker::error::Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key_id: &Option, + auth_key: &Option, tracker: Arc, ) -> Result<(), Error> { tracker - .authenticate_request(info_hash, auth_key_id) + .authenticate_request(info_hash, auth_key) .await .map_err(|e| Error::TrackerError { source: (Arc::new(e) as Arc).into(), @@ -38,7 +38,7 @@ pub async fn authenticate( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key_id: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { debug!("http announce request: {:#?}", announce_request); @@ -46,7 +46,7 @@ pub async fn handle_announce( let info_hash = announce_request.info_hash; let remote_client_ip = announce_request.peer_addr; - authenticate(&info_hash, &auth_key_id, tracker.clone()).await?; + authenticate(&info_hash, &auth_key, tracker.clone()).await?; let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); @@ -78,7 +78,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key_id: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); @@ -87,7 +87,7 @@ pub async fn handle_scrape( for info_hash in &scrape_request.info_hashes { let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { - if authenticate(info_hash, &auth_key_id, tracker.clone()).await.is_ok() { + if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); response::ScrapeEntry { complete: seeders, diff --git a/src/http/warp_implementation/routes.rs b/src/http/warp_implementation/routes.rs index 2ee60e8c9..c46c502e4 100644 --- a/src/http/warp_implementation/routes.rs +++ b/src/http/warp_implementation/routes.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use super::filters::{with_announce_request, with_auth_key_id, with_scrape_request, with_tracker}; +use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; use super::handlers::{handle_announce, handle_scrape, send_error}; use crate::tracker; @@ -20,7 +20,7 @@ fn announce(tracker: Arc) -> impl Filter) -> impl Filter Result { if s.len() != AUTH_KEY_LENGTH { - return Err(ParseKeyIdError); + return Err(ParseKeyError); } Ok(Self(s.to_string())) @@ -106,10 +106,10 @@ pub enum Error { KeyVerificationError { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, - #[error("Failed to read key: {key_id}, {location}")] + #[error("Failed to read key: {key}, {location}")] UnableToReadKey { location: &'static Location<'static>, - key_id: Box, + key: Box, }, #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, @@ -132,12 +132,12 @@ mod tests { use crate::tracker::auth; #[test] - fn auth_key_id_from_string() { + fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key_id = auth::Key::from_str(key_string); + let auth_key = auth::Key::from_str(key_string); - assert!(auth_key_id.is_ok()); - assert_eq!(auth_key_id.unwrap().to_string(), key_string); + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); } #[test] diff --git a/src/tracker/error.rs b/src/tracker/error.rs index f03f4b3e5..51bcbf3bb 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -4,9 +4,9 @@ use crate::located_error::LocatedError; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { - #[error("The supplied key: {key_id:?}, is not valid: {source}")] + #[error("The supplied key: {key:?}, is not valid: {source}")] PeerKeyNotValid { - key_id: super::auth::Key, + key: super::auth::Key, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, #[error("The peer is not authenticated, {location}")] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 34977b4de..2ebc4bfc3 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -200,9 +200,9 @@ impl Tracker { /// /// # Panics /// - /// Will panic if key cannot be converted into a valid `KeyId`. + /// Will panic if key cannot be converted into a valid `Key`. pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { - // todo: change argument `key: &str` to `key_id: &KeyId` + // todo: change argument `key: &str` to `key: &Key` self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(&key.parse::().unwrap()); Ok(()) @@ -211,13 +211,13 @@ impl Tracker { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, key_id: &Key) -> Result<(), auth::Error> { + pub async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { // code-review: this function is public only because it's used in a test. // We should change the test and make it private. - match self.keys.read().await.get(key_id) { + match self.keys.read().await.get(key) { None => Err(auth::Error::UnableToReadKey { location: Location::caller(), - key_id: Box::new(key_id.clone()), + key: Box::new(key.clone()), }), Some(key) => auth::verify(key), } @@ -342,7 +342,7 @@ impl Tracker { Some(key) => { if let Err(e) = self.verify_auth_key(key).await { return Err(Error::PeerKeyNotValid { - key_id: key.clone(), + key: key.clone(), source: (Arc::new(e) as Arc).into(), }); } @@ -371,9 +371,9 @@ impl Tracker { /// # Errors /// /// Will return an error if the the authentication key cannot be verified. - pub async fn authenticate(&self, key_id: &Key) -> Result<(), auth::Error> { + pub async fn authenticate(&self, key: &Key) -> Result<(), auth::Error> { if self.is_private() { - self.verify_auth_key(key_id).await + self.verify_auth_key(key).await } else { Ok(()) } @@ -1165,9 +1165,9 @@ mod tests { async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { let tracker = private_tracker(); - let unregistered_key_id = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let result = tracker.authenticate(&unregistered_key_id).await; + let result = tracker.authenticate(&unregistered_key).await; assert!(result.is_err()); } @@ -1187,9 +1187,9 @@ mod tests { async fn it_should_fail_verifying_an_unregistered_authentication_key() { let tracker = private_tracker(); - let unregistered_key_id = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - assert!(tracker.verify_auth_key(&unregistered_key_id).await.is_err()); + assert!(tracker.verify_auth_key(&unregistered_key).await.is_err()); } #[tokio::test] diff --git a/tests/http/client.rs b/tests/http/client.rs index fa5fd5d16..762401078 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -11,7 +11,7 @@ use super::requests::scrape; pub struct Client { connection_info: ConnectionInfo, reqwest_client: ReqwestClient, - key_id: Option, + key: Option, } /// URL components in this context: @@ -27,7 +27,7 @@ impl Client { Self { connection_info, reqwest_client: reqwest::Client::builder().build().unwrap(), - key_id: None, + key: None, } } @@ -36,15 +36,15 @@ impl Client { Self { connection_info, reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), - key_id: None, + key: None, } } - pub fn authenticated(connection_info: ConnectionInfo, key_id: Key) -> Self { + pub fn authenticated(connection_info: ConnectionInfo, key: Key) -> Self { Self { connection_info, reqwest_client: reqwest::Client::builder().build().unwrap(), - key_id: Some(key_id), + key: Some(key), } } @@ -56,8 +56,8 @@ impl Client { self.get(&self.build_scrape_path_and_query(query)).await } - pub async fn announce_with_header(&self, query: &Query, key_id: &str, value: &str) -> Response { - self.get_with_header(&self.build_announce_path_and_query(query), key_id, value) + pub async fn announce_with_header(&self, query: &Query, key: &str, value: &str) -> Response { + self.get_with_header(&self.build_announce_path_and_query(query), key, value) .await } @@ -83,8 +83,8 @@ impl Client { } fn build_path(&self, path: &str) -> String { - match &self.key_id { - Some(key_id) => format!("{path}/{key_id}"), + match &self.key { + Some(key) => format!("{path}/{key}"), None => path.to_string(), } } diff --git a/tests/http/connection_info.rs b/tests/http/connection_info.rs index eedaa73f0..5736271fd 100644 --- a/tests/http/connection_info.rs +++ b/tests/http/connection_info.rs @@ -3,14 +3,14 @@ use torrust_tracker::tracker::auth::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { pub bind_address: String, - pub key_id: Option, + pub key: Option, } impl ConnectionInfo { pub fn anonymous(bind_address: &str) -> Self { Self { bind_address: bind_address.to_string(), - key_id: None, + key: None, } } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index b1b90f923..4219be30a 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1128,9 +1128,9 @@ mod warp_http_tracker_server { let http_tracker_server = start_private_http_tracker(Version::Warp).await; // The tracker does not have this key - let unregistered_key_id = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -1242,9 +1242,9 @@ mod warp_http_tracker_server { ) .await; - let false_key_id: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + let response = Client::authenticated(http_tracker.get_connection_info(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2437,11 +2437,11 @@ mod axum_http_tracker_server { async fn should_fail_if_the_key_query_param_cannot_be_parsed() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; - let invalid_key_id = "INVALID_KEY_ID"; + let invalid_key = "INVALID_KEY"; let response = Client::new(http_tracker_server.get_connection_info()) .get(&format!( - "announce/{invalid_key_id}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" + "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) .await; @@ -2453,9 +2453,9 @@ mod axum_http_tracker_server { let http_tracker_server = start_private_http_tracker(Version::Axum).await; // The tracker does not have this key - let unregistered_key_id = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -2484,11 +2484,11 @@ mod axum_http_tracker_server { async fn should_fail_if_the_key_query_param_cannot_be_parsed() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; - let invalid_key_id = "INVALID_KEY_ID"; + let invalid_key = "INVALID_KEY"; let response = Client::new(http_tracker_server.get_connection_info()) .get(&format!( - "scrape/{invalid_key_id}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" )) .await; @@ -2583,9 +2583,9 @@ mod axum_http_tracker_server { ) .await; - let false_key_id: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + let response = Client::authenticated(http_tracker.get_connection_info(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 600d26f2f..35d9af248 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -741,10 +741,10 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + async fn should_fail_deleting_an_auth_key_when_the_key_is_invalid() { let api_server = start_default_api().await; - let invalid_auth_key_ids = [ + let invalid_auth_keys = [ // "", it returns a 404 // " ", it returns a 404 "0", @@ -754,12 +754,12 @@ mod tracker_apis { "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line ]; - for invalid_auth_key_id in &invalid_auth_key_ids { + for invalid_auth_key in &invalid_auth_keys { let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(invalid_auth_key_id) + .delete_auth_key(invalid_auth_key) .await; - assert_invalid_auth_key_param(response, invalid_auth_key_id).await; + assert_invalid_auth_key_param(response, invalid_auth_key).await; } } From 6fc6c14ba613aed0c102845b71315779926b2131 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 11:09:29 +0000 Subject: [PATCH 0433/1003] test(http): add tests to axum extractor for announce request --- .../extractors/announce_request.rs | 100 +++++++++++++++--- .../axum_implementation/responses/error.rs | 2 +- 2 files changed, 85 insertions(+), 17 deletions(-) diff --git a/src/http/axum_implementation/extractors/announce_request.rs b/src/http/axum_implementation/extractors/announce_request.rs index 0371be9a4..1680cd15c 100644 --- a/src/http/axum_implementation/extractors/announce_request.rs +++ b/src/http/axum_implementation/extractors/announce_request.rs @@ -19,27 +19,95 @@ where type Rejection = Response; async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - let raw_query = parts.uri.query(); - - if raw_query.is_none() { - return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { - location: Location::caller(), - }) - .into_response()); + match extract_announce_from(parts.uri.query()) { + Ok(announce_request) => Ok(ExtractRequest(announce_request)), + Err(error) => Err(error.into_response()), } + } +} - let query = raw_query.unwrap().parse::(); +fn extract_announce_from(maybe_raw_query: Option<&str>) -> Result { + if maybe_raw_query.is_none() { + return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { + location: Location::caller(), + })); + } - if let Err(error) = query { - return Err(responses::error::Error::from(error).into_response()); - } + let query = maybe_raw_query.unwrap().parse::(); - let announce_request = Announce::try_from(query.unwrap()); + if let Err(error) = query { + return Err(responses::error::Error::from(error)); + } - if let Err(error) = announce_request { - return Err(responses::error::Error::from(error).into_response()); - } + let announce_request = Announce::try_from(query.unwrap()); + + if let Err(error) = announce_request { + return Err(responses::error::Error::from(error)); + } + + Ok(announce_request.unwrap()) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::extract_announce_from; + use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; + use crate::http::axum_implementation::responses::error::Error; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_extract_the_announce_request_from_the_url_query_params() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0"; + + let announce = extract_announce_from(Some(raw_query)).unwrap(); + + assert_eq!( + announce, + Announce { + info_hash: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), + peer_id: peer::Id(*b"-qB00000000000000001"), + port: 17548, + downloaded: Some(0), + uploaded: Some(0), + left: Some(0), + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + } + ); + } + + #[test] + fn it_should_reject_a_request_without_query_params() { + let response = extract_announce_from(None).unwrap_err(); + + assert_error_response( + &response, + "Cannot parse query params for announce request: missing query params for announce request", + ); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed() { + let invalid_query = "param1=value1=value2"; + let response = extract_announce_from(Some(invalid_query)).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params"); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_an_announce_request() { + let response = extract_announce_from(Some("param1=value1")).unwrap_err(); - Ok(ExtractRequest(announce_request.unwrap())) + assert_error_response(&response, "Cannot parse query params for announce request"); } } diff --git a/src/http/axum_implementation/responses/error.rs b/src/http/axum_implementation/responses/error.rs index bcf2aaa57..0bcdbd9fb 100644 --- a/src/http/axum_implementation/responses/error.rs +++ b/src/http/axum_implementation/responses/error.rs @@ -2,7 +2,7 @@ use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use serde::{self, Serialize}; -#[derive(Serialize)] +#[derive(Serialize, Debug, PartialEq)] pub struct Error { #[serde(rename = "failure reason")] pub failure_reason: String, From 7b3162267bc493a40b24f46f153857c93649b6ed Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 16:02:19 +0000 Subject: [PATCH 0434/1003] test(http): add tests to axum extractor for scrape request --- .../extractors/scrape_request.rs | 120 +++++++++++++++--- 1 file changed, 105 insertions(+), 15 deletions(-) diff --git a/src/http/axum_implementation/extractors/scrape_request.rs b/src/http/axum_implementation/extractors/scrape_request.rs index 4212abfcb..998728f59 100644 --- a/src/http/axum_implementation/extractors/scrape_request.rs +++ b/src/http/axum_implementation/extractors/scrape_request.rs @@ -19,27 +19,117 @@ where type Rejection = Response; async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - let raw_query = parts.uri.query(); - - if raw_query.is_none() { - return Err(responses::error::Error::from(ParseScrapeQueryError::MissingParams { - location: Location::caller(), - }) - .into_response()); + match extract_scrape_from(parts.uri.query()) { + Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), + Err(error) => Err(error.into_response()), } + } +} - let query = raw_query.unwrap().parse::(); +fn extract_scrape_from(maybe_raw_query: Option<&str>) -> Result { + if maybe_raw_query.is_none() { + return Err(responses::error::Error::from(ParseScrapeQueryError::MissingParams { + location: Location::caller(), + })); + } - if let Err(error) = query { - return Err(responses::error::Error::from(error).into_response()); - } + let query = maybe_raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error)); + } + + let scrape_request = Scrape::try_from(query.unwrap()); + + if let Err(error) = scrape_request { + return Err(responses::error::Error::from(error)); + } + + Ok(scrape_request.unwrap()) +} - let scrape_request = Scrape::try_from(query.unwrap()); +#[cfg(test)] +mod tests { + use std::str::FromStr; - if let Err(error) = scrape_request { - return Err(responses::error::Error::from(error).into_response()); + use super::extract_scrape_from; + use crate::http::axum_implementation::requests::scrape::Scrape; + use crate::http::axum_implementation::responses::error::Error; + use crate::protocol::info_hash::InfoHash; + + struct TestInfoHash { + pub bencoded: String, + pub value: InfoHash, + } + + fn test_info_hash() -> TestInfoHash { + TestInfoHash { + bencoded: "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0".to_owned(), + value: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), } + } + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_extract_the_scrape_request_from_the_url_query_params() { + let info_hash = test_info_hash(); + + let raw_query = format!("info_hash={}", info_hash.bencoded); + + let scrape = extract_scrape_from(Some(&raw_query)).unwrap(); + + assert_eq!( + scrape, + Scrape { + info_hashes: vec![info_hash.value], + } + ); + } + + #[test] + fn it_should_extract_the_scrape_request_from_the_url_query_params_with_more_than_one_info_hash() { + let info_hash = test_info_hash(); + + let raw_query = format!("info_hash={}&info_hash={}", info_hash.bencoded, info_hash.bencoded); + + let scrape = extract_scrape_from(Some(&raw_query)).unwrap(); + + assert_eq!( + scrape, + Scrape { + info_hashes: vec![info_hash.value, info_hash.value], + } + ); + } + + #[test] + fn it_should_reject_a_request_without_query_params() { + let response = extract_scrape_from(None).unwrap_err(); + + assert_error_response( + &response, + "Cannot parse query params for scrape request: missing query params for scrape request", + ); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed() { + let invalid_query = "param1=value1=value2"; + let response = extract_scrape_from(Some(invalid_query)).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params"); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_a_scrape_request() { + let response = extract_scrape_from(Some("param1=value1")).unwrap_err(); - Ok(ExtractRequest(scrape_request.unwrap())) + assert_error_response(&response, "Cannot parse query params for scrape request"); } } From 828065b38dab9164778eea575b3b9b241ad1ec80 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 17:07:01 +0000 Subject: [PATCH 0435/1003] test(http): add tests to Axum extractor for auth key --- .../axum_implementation/extractors/key.rs | 101 ++++++++++++------ 1 file changed, 70 insertions(+), 31 deletions(-) diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs index 50aef4a7c..2a3f2a991 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/key.rs @@ -1,6 +1,8 @@ +//! Wrapper for Axum `Path` extractor to return custom errors. use std::panic::Location; use axum::async_trait; +use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; @@ -19,37 +21,74 @@ where type Rejection = Response; async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - match Path::::from_request_parts(parts, state).await { - Ok(key_param) => { - let Ok(key) = key_param.0.value().parse::() else { - return Err(responses::error::Error::from( - auth::Error::InvalidKeyFormat { - location: Location::caller() - }) - .into_response()) - }; - Ok(Extract(key)) - } - Err(rejection) => match rejection { - axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { - return Err(responses::error::Error::from(auth::Error::InvalidKeyFormat { - location: Location::caller(), - }) - .into_response()) - } - axum::extract::rejection::PathRejection::MissingPathParams(_) => { - return Err(responses::error::Error::from(auth::Error::MissingAuthKey { - location: Location::caller(), - }) - .into_response()) - } - _ => { - return Err(responses::error::Error::from(auth::Error::CannotExtractKeyParam { - location: Location::caller(), - }) - .into_response()) - } - }, + // Extract `key` from URL path with Axum `Path` extractor + let maybe_path_with_key = Path::::from_request_parts(parts, state).await; + + match extract_key(maybe_path_with_key) { + Ok(key) => Ok(Extract(key)), + Err(error) => Err(error.into_response()), + } + } +} + +fn extract_key(path_extractor_result: Result, PathRejection>) -> Result { + match path_extractor_result { + Ok(key_param) => match parse_key(&key_param.0.value()) { + Ok(key) => Ok(key), + Err(error) => Err(error), + }, + Err(path_rejection) => Err(custom_error(&path_rejection)), + } +} + +fn parse_key(key: &str) -> Result { + let key = key.parse::(); + + match key { + Ok(key) => Ok(key), + Err(_parse_key_error) => Err(responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + })), + } +} + +fn custom_error(rejection: &PathRejection) -> responses::error::Error { + match rejection { + axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { + responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + }) + } + axum::extract::rejection::PathRejection::MissingPathParams(_) => { + responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + }) } + _ => responses::error::Error::from(auth::Error::CannotExtractKeyParam { + location: Location::caller(), + }), + } +} + +#[cfg(test)] +mod tests { + + use super::parse_key; + use crate::http::axum_implementation::responses::error::Error; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_return_an_authentication_error_if_the_key_cannot_be_parsed() { + let invalid_key = "invalid_key"; + + let response = parse_key(invalid_key).unwrap_err(); + + assert_error_response(&response, "Authentication error: Invalid format for authentication key param"); } } From 3420576edbffaae54f74bdc118de3ee31544847e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 18:41:37 +0000 Subject: [PATCH 0436/1003] test(http): add tests for peer IP resolution --- .../axum_implementation/extractors/peer_ip.rs | 143 ++++++++++++++++-- 1 file changed, 129 insertions(+), 14 deletions(-) diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs index aae348d99..10f590e70 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -20,7 +20,7 @@ pub enum ResolutionError { impl From for responses::error::Error { fn from(err: ResolutionError) -> Self { responses::error::Error { - failure_reason: format!("{err}"), + failure_reason: format!("Error resolving peer IP: {err}"), } } } @@ -32,23 +32,138 @@ impl From for responses::error::Error { /// Will return an error if the peer IP cannot be obtained according to the configuration. /// For example, if the IP is extracted from an HTTP header which is missing in the request. pub fn resolve(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { + match resolve_peer_ip(on_reverse_proxy, remote_client_ip) { + Ok(ip) => Ok(ip), + Err(error) => Err(error.into_response()), + } +} + +fn resolve_peer_ip(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { if on_reverse_proxy { - if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { - Ok(ip) - } else { - Err( - responses::error::Error::from(ResolutionError::MissingRightMostXForwardedForIp { - location: Location::caller(), - }) - .into_response(), - ) - } - } else if let Some(ip) = remote_client_ip.connection_info_ip { + resolve_peer_ip_on_reverse_proxy(remote_client_ip) + } else { + resolve_peer_ip_without_reverse_proxy(remote_client_ip) + } +} + +fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &RemoteClientIp) -> Result { + if let Some(ip) = remote_client_ip.connection_info_ip { Ok(ip) } else { Err(responses::error::Error::from(ResolutionError::MissingClientIp { location: Location::caller(), - }) - .into_response()) + })) + } +} + +fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &RemoteClientIp) -> Result { + if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { + Ok(ip) + } else { + Err(responses::error::Error::from( + ResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }, + )) + } +} + +#[cfg(test)] +mod tests { + use super::resolve_peer_ip; + use crate::http::axum_implementation::responses::error::Error; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod working_without_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use super::{assert_error_response, resolve_peer_ip}; + use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; + + #[test] + fn it_should_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let ip = resolve_peer_ip( + on_reverse_proxy, + &RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let response = resolve_peer_ip( + on_reverse_proxy, + &RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } + + mod working_on_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use super::assert_error_response; + use crate::http::axum_implementation::extractors::peer_ip::resolve_peer_ip; + use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; + + #[test] + fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let ip = resolve_peer_ip( + on_reverse_proxy, + &RemoteClientIp { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: None, + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let response = resolve_peer_ip( + on_reverse_proxy, + &RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } } } From 743f86908cd7c1e936a86cc2f1b04ce940face24 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 18:56:07 +0000 Subject: [PATCH 0437/1003] refactor(http): move peer IP resolver to handlers mod It's not an Axum extractor. It's a wrapper of another custom Axum extractor which is used in handlers. Since it does not implements the trait `FromRequestParts` the `handlers` dir seems to be a better location. --- src/http/axum_implementation/extractors/mod.rs | 1 - .../axum_implementation/extractors/remote_client_ip.rs | 2 +- src/http/axum_implementation/handlers/announce.rs | 2 +- src/http/axum_implementation/handlers/common/mod.rs | 1 + .../{extractors => handlers/common}/peer_ip.rs | 7 ++++--- src/http/axum_implementation/handlers/mod.rs | 1 + src/http/axum_implementation/handlers/scrape.rs | 2 +- 7 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 src/http/axum_implementation/handlers/common/mod.rs rename src/http/axum_implementation/{extractors => handlers/common}/peer_ip.rs (93%) diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index e6d9e8c67..04e9e306b 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,5 +1,4 @@ pub mod announce_request; pub mod key; -pub mod peer_ip; pub mod remote_client_ip; pub mod scrape_request; diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/remote_client_ip.rs index e852a1b6f..cfc3532de 100644 --- a/src/http/axum_implementation/extractors/remote_client_ip.rs +++ b/src/http/axum_implementation/extractors/remote_client_ip.rs @@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize}; /// `right_most_x_forwarded_for` = 126.0.0.2 /// `connection_info_ip` = 126.0.0.3 /// -/// More info about inner extractors : +/// More info about inner extractors: #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct RemoteClientIp { pub right_most_x_forwarded_for: Option, diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 4bb06da73..e4b5ece80 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -7,9 +7,9 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use super::common::peer_ip; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::key::Extract; -use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::handlers::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; diff --git a/src/http/axum_implementation/handlers/common/mod.rs b/src/http/axum_implementation/handlers/common/mod.rs new file mode 100644 index 000000000..ed159a32b --- /dev/null +++ b/src/http/axum_implementation/handlers/common/mod.rs @@ -0,0 +1 @@ +pub mod peer_ip; diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/handlers/common/peer_ip.rs similarity index 93% rename from src/http/axum_implementation/extractors/peer_ip.rs rename to src/http/axum_implementation/handlers/common/peer_ip.rs index 10f590e70..1c3b6c815 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/handlers/common/peer_ip.rs @@ -1,10 +1,11 @@ +//! Helper handler function to resolve the peer IP from the `RemoteClientIp` extractor. use std::net::IpAddr; use std::panic::Location; use axum::response::{IntoResponse, Response}; use thiserror::Error; -use super::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::responses; #[derive(Error, Debug)] @@ -29,7 +30,7 @@ impl From for responses::error::Error { /// /// # Errors /// -/// Will return an error if the peer IP cannot be obtained according to the configuration. +/// Will return an error response if the peer IP cannot be obtained according to the configuration. /// For example, if the IP is extracted from an HTTP header which is missing in the request. pub fn resolve(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { match resolve_peer_ip(on_reverse_proxy, remote_client_ip) { @@ -128,8 +129,8 @@ mod tests { use std::str::FromStr; use super::assert_error_response; - use crate::http::axum_implementation::extractors::peer_ip::resolve_peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; + use crate::http::axum_implementation::handlers::common::peer_ip::resolve_peer_ip; #[test] fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index e6b13ae91..36a810d95 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -3,6 +3,7 @@ use crate::tracker::error::Error; pub mod announce; pub mod auth; +pub mod common; pub mod scrape; impl From for responses::error::Error { diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 41d6bf3dc..d8d68a4c3 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -4,8 +4,8 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use super::common::peer_ip; use crate::http::axum_implementation::extractors::key::Extract; -use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; use crate::http::axum_implementation::requests::scrape::Scrape; From b4ae67d2cc4114c009268e53fd5c0192a3c52194 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 19:07:07 +0000 Subject: [PATCH 0438/1003] docs(http): add mod description --- src/http/axum_implementation/extractors/remote_client_ip.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/remote_client_ip.rs index cfc3532de..0f6789261 100644 --- a/src/http/axum_implementation/extractors/remote_client_ip.rs +++ b/src/http/axum_implementation/extractors/remote_client_ip.rs @@ -1,3 +1,5 @@ +//! Wrapper for two Axum extractors to get the relevant information +//! to resolve the remote client IP. use std::net::{IpAddr, SocketAddr}; use axum::async_trait; From 637f25f25b8449fd7b5bc7a968da7d92bcef1484 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 14:02:51 +0000 Subject: [PATCH 0439/1003] refactor(http): move auth mod to handlers::common The `auth` mod does not contains a handler. It only contains auth error and funtion to ma the error into responses. --- src/http/axum_implementation/extractors/key.rs | 2 +- src/http/axum_implementation/handlers/announce.rs | 2 +- src/http/axum_implementation/handlers/{ => common}/auth.rs | 0 src/http/axum_implementation/handlers/common/mod.rs | 1 + src/http/axum_implementation/handlers/mod.rs | 1 - 5 files changed, 3 insertions(+), 3 deletions(-) rename src/http/axum_implementation/handlers/{ => common}/auth.rs (100%) diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs index 2a3f2a991..e32c4c76a 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/key.rs @@ -7,7 +7,7 @@ use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::axum_implementation::handlers::auth::{self, KeyParam}; +use crate::http::axum_implementation::handlers::common::auth::{self, KeyParam}; use crate::http::axum_implementation::responses; use crate::tracker::auth::Key; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index e4b5ece80..33f78814f 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -11,7 +11,7 @@ use super::common::peer_ip; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::key::Extract; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::handlers::auth; +use crate::http::axum_implementation::handlers::common::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; use crate::http::axum_implementation::responses::{self, announce}; use crate::http::axum_implementation::services; diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/common/auth.rs similarity index 100% rename from src/http/axum_implementation/handlers/auth.rs rename to src/http/axum_implementation/handlers/common/auth.rs diff --git a/src/http/axum_implementation/handlers/common/mod.rs b/src/http/axum_implementation/handlers/common/mod.rs index ed159a32b..41bf1369f 100644 --- a/src/http/axum_implementation/handlers/common/mod.rs +++ b/src/http/axum_implementation/handlers/common/mod.rs @@ -1 +1,2 @@ pub mod peer_ip; +pub mod auth; diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index 36a810d95..69b69127e 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -2,7 +2,6 @@ use super::responses; use crate::tracker::error::Error; pub mod announce; -pub mod auth; pub mod common; pub mod scrape; From 49bb0db5b2958bcf19cc4b3eab40f0ee4e317d0b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 14:33:33 +0000 Subject: [PATCH 0440/1003] refactor(http): rename mod and move struct --- .../extractors/{key.rs => authentication_key.rs} | 13 ++++++++++++- src/http/axum_implementation/extractors/mod.rs | 2 +- src/http/axum_implementation/handlers/announce.rs | 2 +- .../axum_implementation/handlers/common/auth.rs | 11 ----------- src/http/axum_implementation/handlers/scrape.rs | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) rename src/http/axum_implementation/extractors/{key.rs => authentication_key.rs} (92%) diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/authentication_key.rs similarity index 92% rename from src/http/axum_implementation/extractors/key.rs rename to src/http/axum_implementation/extractors/authentication_key.rs index e32c4c76a..8ffc4ff12 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/authentication_key.rs @@ -6,13 +6,24 @@ use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use serde::Deserialize; -use crate::http::axum_implementation::handlers::common::auth::{self, KeyParam}; +use crate::http::axum_implementation::handlers::common::auth; use crate::http::axum_implementation::responses; use crate::tracker::auth::Key; pub struct Extract(pub Key); +#[derive(Deserialize)] +pub struct KeyParam(String); + +impl KeyParam { + #[must_use] + pub fn value(&self) -> String { + self.0.clone() + } +} + #[async_trait] impl FromRequestParts for Extract where diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 04e9e306b..97aae63a5 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,4 +1,4 @@ pub mod announce_request; -pub mod key; +pub mod authentication_key; pub mod remote_client_ip; pub mod scrape_request; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 33f78814f..18787737f 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -9,7 +9,7 @@ use log::debug; use super::common::peer_ip; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::key::Extract; +use crate::http::axum_implementation::extractors::authentication_key::Extract; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::handlers::common::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; diff --git a/src/http/axum_implementation/handlers/common/auth.rs b/src/http/axum_implementation/handlers/common/auth.rs index b1b73e60e..30971725a 100644 --- a/src/http/axum_implementation/handlers/common/auth.rs +++ b/src/http/axum_implementation/handlers/common/auth.rs @@ -1,21 +1,10 @@ use std::panic::Location; -use serde::Deserialize; use thiserror::Error; use crate::http::axum_implementation::responses; use crate::tracker::auth; -#[derive(Deserialize)] -pub struct KeyParam(String); - -impl KeyParam { - #[must_use] - pub fn value(&self) -> String { - self.0.clone() - } -} - #[derive(Debug, Error)] pub enum Error { #[error("Missing authentication key param for private tracker. Error in {location}")] diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index d8d68a4c3..b65fa5592 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -5,7 +5,7 @@ use axum::response::{IntoResponse, Response}; use log::debug; use super::common::peer_ip; -use crate::http::axum_implementation::extractors::key::Extract; +use crate::http::axum_implementation::extractors::authentication_key::Extract; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; use crate::http::axum_implementation::requests::scrape::Scrape; From 6ebcfcdcd78b4011a823e613362748420eead674 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 15:36:31 +0000 Subject: [PATCH 0441/1003] refactor(http): push logic from Axum to App layer Some app logic was coupled to Axum and it could be potencially used with any other web library. Besides, it's easier to test. --- ...mote_client_ip.rs => client_ip_sources.rs} | 29 +--- .../axum_implementation/extractors/mod.rs | 2 +- .../axum_implementation/handlers/announce.rs | 29 ++-- .../handlers/common/peer_ip.rs | 164 ++---------------- .../axum_implementation/handlers/scrape.rs | 37 ++-- src/http/axum_implementation/services/mod.rs | 1 + .../services/peer_ip_resolver.rs | 149 ++++++++++++++++ src/tracker/error.rs | 2 + 8 files changed, 212 insertions(+), 201 deletions(-) rename src/http/axum_implementation/extractors/{remote_client_ip.rs => client_ip_sources.rs} (51%) create mode 100644 src/http/axum_implementation/services/peer_ip_resolver.rs diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/client_ip_sources.rs similarity index 51% rename from src/http/axum_implementation/extractors/remote_client_ip.rs rename to src/http/axum_implementation/extractors/client_ip_sources.rs index 0f6789261..b41478c22 100644 --- a/src/http/axum_implementation/extractors/remote_client_ip.rs +++ b/src/http/axum_implementation/extractors/client_ip_sources.rs @@ -1,34 +1,19 @@ //! Wrapper for two Axum extractors to get the relevant information //! to resolve the remote client IP. -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use axum::async_trait; use axum::extract::{ConnectInfo, FromRequestParts}; use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; -use serde::{Deserialize, Serialize}; -/// Given this request chain: -/// -/// client <-> http proxy 1 <-> http proxy 2 <-> server -/// ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 -/// X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 -/// -/// This extractor extracts these values from the HTTP headers and connection info. -/// -/// `right_most_x_forwarded_for` = 126.0.0.2 -/// `connection_info_ip` = 126.0.0.3 -/// -/// More info about inner extractors: -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub struct RemoteClientIp { - pub right_most_x_forwarded_for: Option, - pub connection_info_ip: Option, -} +use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + +pub struct Extract(pub ClientIpSources); #[async_trait] -impl FromRequestParts for RemoteClientIp +impl FromRequestParts for Extract where S: Send + Sync, { @@ -45,9 +30,9 @@ where Err(_) => None, }; - Ok(RemoteClientIp { + Ok(Extract(ClientIpSources { right_most_x_forwarded_for, connection_info_ip, - }) + })) } } diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 97aae63a5..557330257 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,4 +1,4 @@ pub mod announce_request; pub mod authentication_key; -pub mod remote_client_ip; +pub mod client_ip_sources; pub mod scrape_request; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 18787737f..05216ce28 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -7,23 +7,28 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use super::common::peer_ip; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::authentication_key::Extract; -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::extractors::authentication_key::Extract as ExtractKey; +use crate::http::axum_implementation::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::http::axum_implementation::handlers::common::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; use crate::http::axum_implementation::responses::{self, announce}; -use crate::http::axum_implementation::services; +use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; +use crate::http::axum_implementation::services::{self, peer_ip_resolver}; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; use crate::tracker::Tracker; +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + #[allow(clippy::unused_async)] pub async fn handle_without_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - remote_client_ip: RemoteClientIp, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { debug!("http announce request: {:#?}", announce_request); @@ -34,15 +39,15 @@ pub async fn handle_without_key( .into_response(); } - handle(&tracker, &announce_request, &remote_client_ip).await + handle(&tracker, &announce_request, &client_ip_sources).await } #[allow(clippy::unused_async)] pub async fn handle_with_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - Extract(key): Extract, - remote_client_ip: RemoteClientIp, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, + ExtractKey(key): ExtractKey, ) -> Response { debug!("http announce request: {:#?}", announce_request); @@ -51,18 +56,18 @@ pub async fn handle_with_key( Err(error) => return responses::error::Error::from(error).into_response(), } - handle(&tracker, &announce_request, &remote_client_ip).await + handle(&tracker, &announce_request, &client_ip_sources).await } -async fn handle(tracker: &Arc, announce_request: &Announce, remote_client_ip: &RemoteClientIp) -> Response { +async fn handle(tracker: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources) -> Response { match tracker.authorize(&announce_request.info_hash).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), } - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(err) => return err, + Err(error) => return responses::error::Error::from(error).into_response(), }; let mut peer = peer_from_request(announce_request, &peer_ip); diff --git a/src/http/axum_implementation/handlers/common/peer_ip.rs b/src/http/axum_implementation/handlers/common/peer_ip.rs index 1c3b6c815..df10e5eb1 100644 --- a/src/http/axum_implementation/handlers/common/peer_ip.rs +++ b/src/http/axum_implementation/handlers/common/peer_ip.rs @@ -1,170 +1,34 @@ -//! Helper handler function to resolve the peer IP from the `RemoteClientIp` extractor. -use std::net::IpAddr; -use std::panic::Location; - -use axum::response::{IntoResponse, Response}; -use thiserror::Error; - -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::responses; +use crate::http::axum_implementation::services::peer_ip_resolver::PeerIpResolutionError; -#[derive(Error, Debug)] -pub enum ResolutionError { - #[error( - "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" - )] - MissingRightMostXForwardedForIp { location: &'static Location<'static> }, - #[error("cannot get the client IP from the connection info in {location}")] - MissingClientIp { location: &'static Location<'static> }, -} - -impl From for responses::error::Error { - fn from(err: ResolutionError) -> Self { +impl From for responses::error::Error { + fn from(err: PeerIpResolutionError) -> Self { responses::error::Error { failure_reason: format!("Error resolving peer IP: {err}"), } } } -/// It resolves the peer IP. -/// -/// # Errors -/// -/// Will return an error response if the peer IP cannot be obtained according to the configuration. -/// For example, if the IP is extracted from an HTTP header which is missing in the request. -pub fn resolve(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { - match resolve_peer_ip(on_reverse_proxy, remote_client_ip) { - Ok(ip) => Ok(ip), - Err(error) => Err(error.into_response()), - } -} - -fn resolve_peer_ip(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { - if on_reverse_proxy { - resolve_peer_ip_on_reverse_proxy(remote_client_ip) - } else { - resolve_peer_ip_without_reverse_proxy(remote_client_ip) - } -} - -fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &RemoteClientIp) -> Result { - if let Some(ip) = remote_client_ip.connection_info_ip { - Ok(ip) - } else { - Err(responses::error::Error::from(ResolutionError::MissingClientIp { - location: Location::caller(), - })) - } -} - -fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &RemoteClientIp) -> Result { - if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { - Ok(ip) - } else { - Err(responses::error::Error::from( - ResolutionError::MissingRightMostXForwardedForIp { - location: Location::caller(), - }, - )) - } -} - #[cfg(test)] mod tests { - use super::resolve_peer_ip; - use crate::http::axum_implementation::responses::error::Error; + use std::panic::Location; - fn assert_error_response(error: &Error, error_message: &str) { + use crate::http::axum_implementation::responses; + use crate::http::axum_implementation::services::peer_ip_resolver::PeerIpResolutionError; + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { assert!( error.failure_reason.contains(error_message), "Error response does not contain message: '{error_message}'. Error: {error:?}" ); } - mod working_without_reverse_proxy { - use std::net::IpAddr; - use std::str::FromStr; - - use super::{assert_error_response, resolve_peer_ip}; - use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - - #[test] - fn it_should_get_the_peer_ip_from_the_connection_info() { - let on_reverse_proxy = false; - - let ip = resolve_peer_ip( - on_reverse_proxy, - &RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), - }, - ) - .unwrap(); - - assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); - } - - #[test] - fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { - let on_reverse_proxy = false; - - let response = resolve_peer_ip( - on_reverse_proxy, - &RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: None, - }, - ) - .unwrap_err(); - - assert_error_response( - &response, - "Error resolving peer IP: cannot get the client IP from the connection info", - ); - } - } - - mod working_on_reverse_proxy { - use std::net::IpAddr; - use std::str::FromStr; - - use super::assert_error_response; - use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - use crate::http::axum_implementation::handlers::common::peer_ip::resolve_peer_ip; - - #[test] - fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { - let on_reverse_proxy = true; - - let ip = resolve_peer_ip( - on_reverse_proxy, - &RemoteClientIp { - right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), - connection_info_ip: None, - }, - ) - .unwrap(); - - assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); - } - - #[test] - fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { - let on_reverse_proxy = true; - - let response = resolve_peer_ip( - on_reverse_proxy, - &RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: None, - }, - ) - .unwrap_err(); + #[test] + fn it_should_map_a_peer_ip_resolution_error_into_an_error_response() { + let response = responses::error::Error::from(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }); - assert_error_response( - &response, - "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", - ); - } + assert_error_response(&response, "Error resolving peer IP"); } } diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index b65fa5592..2027b8604 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -4,50 +4,55 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use super::common::peer_ip; -use crate::http::axum_implementation::extractors::authentication_key::Extract; -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::extractors::authentication_key::Extract as ExtractKey; +use crate::http::axum_implementation::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; use crate::http::axum_implementation::requests::scrape::Scrape; +use crate::http::axum_implementation::services::peer_ip_resolver::{self, ClientIpSources}; use crate::http::axum_implementation::{responses, services}; use crate::tracker::Tracker; +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + #[allow(clippy::unused_async)] pub async fn handle_without_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - remote_client_ip: RemoteClientIp, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); if tracker.requires_authentication() { - return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await; + return handle_fake_scrape(&tracker, &scrape_request, &client_ip_sources).await; } - handle_real_scrape(&tracker, &scrape_request, &remote_client_ip).await + handle_real_scrape(&tracker, &scrape_request, &client_ip_sources).await } #[allow(clippy::unused_async)] pub async fn handle_with_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - Extract(key): Extract, - remote_client_ip: RemoteClientIp, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, + ExtractKey(key): ExtractKey, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); match tracker.authenticate(&key).await { Ok(_) => (), - Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, + Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &client_ip_sources).await, } - handle_real_scrape(&tracker, &scrape_request, &remote_client_ip).await + handle_real_scrape(&tracker, &scrape_request, &client_ip_sources).await } -async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &RemoteClientIp) -> Response { - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { +async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, client_ip_sources: &ClientIpSources) -> Response { + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(err) => return err, + Err(error) => return responses::error::Error::from(error).into_response(), }; let scrape_data = services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; @@ -56,10 +61,10 @@ async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, rem } /// When authentication fails in `private` mode the tracker returns empty swarm metadata for all the requested infohashes. -async fn handle_fake_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &RemoteClientIp) -> Response { - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { +async fn handle_fake_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &ClientIpSources) -> Response { + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, remote_client_ip) { Ok(peer_ip) => peer_ip, - Err(err) => return err, + Err(error) => return responses::error::Error::from(error).into_response(), }; let scrape_data = services::scrape::fake_invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; diff --git a/src/http/axum_implementation/services/mod.rs b/src/http/axum_implementation/services/mod.rs index 776d2dfbf..5d1acd67d 100644 --- a/src/http/axum_implementation/services/mod.rs +++ b/src/http/axum_implementation/services/mod.rs @@ -1,2 +1,3 @@ pub mod announce; +pub mod peer_ip_resolver; pub mod scrape; diff --git a/src/http/axum_implementation/services/peer_ip_resolver.rs b/src/http/axum_implementation/services/peer_ip_resolver.rs new file mode 100644 index 000000000..fae1e4ec0 --- /dev/null +++ b/src/http/axum_implementation/services/peer_ip_resolver.rs @@ -0,0 +1,149 @@ +//! Given this request chain: +//! +//! client <-> http proxy 1 <-> http proxy 2 <-> server +//! ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 +//! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +//! +//! This service resolves the peer IP from these values: +//! +//! `right_most_x_forwarded_for` = 126.0.0.2 +//! `connection_info_ip` = 126.0.0.3 +//! +//! Depending on the tracker configuration. +use std::net::IpAddr; +use std::panic::Location; + +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct ClientIpSources { + pub right_most_x_forwarded_for: Option, + pub connection_info_ip: Option, +} + +#[derive(Error, Debug)] +pub enum PeerIpResolutionError { + #[error( + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" + )] + MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + #[error("cannot get the client IP from the connection info in {location}")] + MissingClientIp { location: &'static Location<'static> }, +} + +/// # Errors +/// +/// Will return an error if the peer IP cannot be obtained according to the configuration. +/// For example, if the IP is extracted from an HTTP header which is missing in the request. +pub fn invoke(on_reverse_proxy: bool, client_ip_sources: &ClientIpSources) -> Result { + if on_reverse_proxy { + resolve_peer_ip_on_reverse_proxy(client_ip_sources) + } else { + resolve_peer_ip_without_reverse_proxy(client_ip_sources) + } +} + +fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { + if let Some(ip) = remote_client_ip.connection_info_ip { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingClientIp { + location: Location::caller(), + }) + } +} + +fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { + if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::invoke; + + mod working_without_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use super::invoke; + use crate::http::axum_implementation::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + + #[test] + fn it_should_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let ip = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let error = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert!(matches!(error, PeerIpResolutionError::MissingClientIp { .. })); + } + } + + mod working_on_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use crate::http::axum_implementation::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + + #[test] + fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let ip = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: None, + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let error = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert!(matches!(error, PeerIpResolutionError::MissingRightMostXForwardedForIp { .. })); + } + } +} diff --git a/src/tracker/error.rs b/src/tracker/error.rs index 51bcbf3bb..080903da6 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -4,6 +4,7 @@ use crate::located_error::LocatedError; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { + // Authentication errors #[error("The supplied key: {key:?}, is not valid: {source}")] PeerKeyNotValid { key: super::auth::Key, @@ -12,6 +13,7 @@ pub enum Error { #[error("The peer is not authenticated, {location}")] PeerNotAuthenticated { location: &'static Location<'static> }, + // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { info_hash: crate::protocol::info_hash::InfoHash, From fa609949d407f3ab36f114f65741ce0beaace137 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 17:58:21 +0000 Subject: [PATCH 0442/1003] test(http): [#128] unit test for announce handler --- .../axum_implementation/handlers/announce.rs | 279 ++++++++++++++++-- 1 file changed, 257 insertions(+), 22 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 05216ce28..9a92b243d 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -16,13 +16,9 @@ use crate::http::axum_implementation::responses::{self, announce}; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::http::axum_implementation::services::{self, peer_ip_resolver}; use crate::protocol::clock::{Current, Time}; +use crate::tracker::auth::Key; use crate::tracker::peer::Peer; -use crate::tracker::Tracker; - -/* code-review: authentication, authorization and peer IP resolution could be moved - from the handler (Axum) layer into the app layer `services::announce::invoke`. - That would make the handler even simpler and the code more reusable and decoupled from Axum. -*/ +use crate::tracker::{AnnounceData, Tracker}; #[allow(clippy::unused_async)] pub async fn handle_without_key( @@ -32,14 +28,7 @@ pub async fn handle_without_key( ) -> Response { debug!("http announce request: {:#?}", announce_request); - if tracker.requires_authentication() { - return responses::error::Error::from(auth::Error::MissingAuthKey { - location: Location::caller(), - }) - .into_response(); - } - - handle(&tracker, &announce_request, &client_ip_sources).await + handle(&tracker, &announce_request, &client_ip_sources, None).await } #[allow(clippy::unused_async)] @@ -51,29 +40,67 @@ pub async fn handle_with_key( ) -> Response { debug!("http announce request: {:#?}", announce_request); - match tracker.authenticate(&key).await { - Ok(_) => (), - Err(error) => return responses::error::Error::from(error).into_response(), - } + handle(&tracker, &announce_request, &client_ip_sources, Some(key)).await +} - handle(&tracker, &announce_request, &client_ip_sources).await +async fn handle( + tracker: &Arc, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Response { + let announce_data = match handle_announce(tracker, announce_request, client_ip_sources, maybe_key).await { + Ok(announce_data) => announce_data, + Err(error) => return error.into_response(), + }; + build_response(announce_request, announce_data) } -async fn handle(tracker: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources) -> Response { +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + +async fn handle_announce( + tracker: &Arc, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Result { + // Authentication + if tracker.requires_authentication() { + match maybe_key { + Some(key) => match tracker.authenticate(&key).await { + Ok(_) => (), + Err(error) => return Err(responses::error::Error::from(error)), + }, + None => { + return Err(responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + })) + } + } + } + + // Authorization match tracker.authorize(&announce_request.info_hash).await { Ok(_) => (), - Err(error) => return responses::error::Error::from(error).into_response(), + Err(error) => return Err(responses::error::Error::from(error)), } let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(error) => return responses::error::Error::from(error).into_response(), + Err(error) => return Err(responses::error::Error::from(error)), }; let mut peer = peer_from_request(announce_request, &peer_ip); let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; + Ok(announce_data) +} + +fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { match &announce_request.compact { Some(compact) => match compact { Compact::Accepted => announce::Compact::from(announce_data).into_response(), @@ -108,3 +135,211 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { None => aquatic_udp_protocol::AnnounceEvent::None, } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::http::axum_implementation::requests::announce::Announce; + use crate::http::axum_implementation::responses; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::mode::Mode; + use crate::tracker::statistics::Keeper; + use crate::tracker::{peer, Tracker}; + + fn private_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Private; + tracker_factory(configuration) + } + + fn listed_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Listed; + tracker_factory(configuration) + } + + fn tracker_on_reverse_proxy() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = true; + tracker_factory(configuration) + } + + fn tracker_not_on_reverse_proxy() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = false; + tracker_factory(configuration) + } + + fn tracker_factory(configuration: Configuration) -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + fn sample_announce_request() -> Announce { + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + downloaded: None, + uploaded: None, + left: None, + event: None, + compact: None, + } + } + + fn sample_client_ip_sources() -> ClientIpSources { + ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + } + } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod with_tracker_in_private_mode { + + use std::str::FromStr; + use std::sync::Arc; + + use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; + use crate::http::axum_implementation::handlers::announce::handle_announce; + use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::tracker::auth; + + #[tokio::test] + async fn it_should_fail_when_the_authentication_key_is_missing() { + let tracker = Arc::new(private_tracker()); + + let maybe_key = None; + + let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Authentication error: Missing authentication key param for private tracker", + ); + } + + #[tokio::test] + async fn it_should_fail_when_the_authentication_key_is_invalid() { + let tracker = Arc::new(private_tracker()); + + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let maybe_key = Some(unregistered_key); + + let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) + .await + .unwrap_err(); + + assert_error_response(&response, "Authentication error: Failed to read key"); + } + } + + mod with_tracker_in_listed_mode { + + use std::sync::Arc; + + use super::{listed_tracker, sample_announce_request, sample_client_ip_sources}; + use crate::http::axum_implementation::handlers::announce::handle_announce; + use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + + #[tokio::test] + async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { + let tracker = Arc::new(listed_tracker()); + + let announce_request = sample_announce_request(); + + let response = handle_announce(&tracker, &announce_request, &sample_client_ip_sources(), None) + .await + .unwrap_err(); + + assert_error_response( + &response, + &format!( + "Tracker error: The torrent: {}, is not whitelisted", + announce_request.info_hash + ), + ); + } + } + + mod with_tracker_on_reverse_proxy { + + use std::sync::Arc; + + use super::{sample_announce_request, tracker_on_reverse_proxy}; + use crate::http::axum_implementation::handlers::announce::handle_announce; + use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { + let tracker = Arc::new(tracker_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } + } + + mod with_tracker_not_on_reverse_proxy { + + use std::sync::Arc; + + use super::{sample_announce_request, tracker_not_on_reverse_proxy}; + use crate::http::axum_implementation::handlers::announce::handle_announce; + use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { + let tracker = Arc::new(tracker_not_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } +} From 3860fc867b664e47186260996f3b434cfa57e6c8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 18:08:48 +0000 Subject: [PATCH 0443/1003] fix: format --- src/http/axum_implementation/handlers/common/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/axum_implementation/handlers/common/mod.rs b/src/http/axum_implementation/handlers/common/mod.rs index 41bf1369f..dc028cabf 100644 --- a/src/http/axum_implementation/handlers/common/mod.rs +++ b/src/http/axum_implementation/handlers/common/mod.rs @@ -1,2 +1,2 @@ -pub mod peer_ip; pub mod auth; +pub mod peer_ip; From 930a424d372ca1d80e0f85d118d121ac34e82a2f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Mar 2023 16:35:42 +0000 Subject: [PATCH 0444/1003] test(http): [#220] unit tests for scrape handler --- .../axum_implementation/handlers/scrape.rs | 271 ++++++++++++++++-- .../axum_implementation/services/scrape.rs | 2 +- 2 files changed, 245 insertions(+), 28 deletions(-) diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 2027b8604..ee59b80a3 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -10,12 +10,8 @@ use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::services::peer_ip_resolver::{self, ClientIpSources}; use crate::http::axum_implementation::{responses, services}; -use crate::tracker::Tracker; - -/* code-review: authentication, authorization and peer IP resolution could be moved - from the handler (Axum) layer into the app layer `services::announce::invoke`. - That would make the handler even simpler and the code more reusable and decoupled from Axum. -*/ +use crate::tracker::auth::Key; +use crate::tracker::{ScrapeData, Tracker}; #[allow(clippy::unused_async)] pub async fn handle_without_key( @@ -25,11 +21,7 @@ pub async fn handle_without_key( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - if tracker.requires_authentication() { - return handle_fake_scrape(&tracker, &scrape_request, &client_ip_sources).await; - } - - handle_real_scrape(&tracker, &scrape_request, &client_ip_sources).await + handle(&tracker, &scrape_request, &client_ip_sources, None).await } #[allow(clippy::unused_async)] @@ -41,33 +33,258 @@ pub async fn handle_with_key( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - match tracker.authenticate(&key).await { - Ok(_) => (), - Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &client_ip_sources).await, - } + handle(&tracker, &scrape_request, &client_ip_sources, Some(key)).await +} - handle_real_scrape(&tracker, &scrape_request, &client_ip_sources).await +async fn handle( + tracker: &Arc, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Response { + let scrape_data = match handle_scrape(tracker, scrape_request, client_ip_sources, maybe_key).await { + Ok(scrape_data) => scrape_data, + Err(error) => return error.into_response(), + }; + build_response(scrape_data) } -async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, client_ip_sources: &ClientIpSources) -> Response { +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + +async fn handle_scrape( + tracker: &Arc, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Result { + // Authentication + let return_real_scrape_data = if tracker.requires_authentication() { + match maybe_key { + Some(key) => match tracker.authenticate(&key).await { + Ok(_) => true, + Err(_error) => false, + }, + None => false, + } + } else { + true + }; + + // Authorization for scrape requests is handled at the `Tracker` level + // for each torrent. + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(error) => return responses::error::Error::from(error).into_response(), + Err(error) => return Err(responses::error::Error::from(error)), }; - let scrape_data = services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; + if return_real_scrape_data { + Ok(services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await) + } else { + Ok(services::scrape::fake(tracker, &scrape_request.info_hashes, &peer_ip).await) + } +} +fn build_response(scrape_data: ScrapeData) -> Response { responses::scrape::Bencoded::from(scrape_data).into_response() } -/// When authentication fails in `private` mode the tracker returns empty swarm metadata for all the requested infohashes. -async fn handle_fake_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &ClientIpSources) -> Response { - let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, remote_client_ip) { - Ok(peer_ip) => peer_ip, - Err(error) => return responses::error::Error::from(error).into_response(), - }; +#[cfg(test)] +mod tests { + use std::net::IpAddr; + use std::str::FromStr; + use std::sync::Arc; - let scrape_data = services::scrape::fake_invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; + use crate::config::{ephemeral_configuration, Configuration}; + use crate::http::axum_implementation::requests::scrape::Scrape; + use crate::http::axum_implementation::responses; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::mode::Mode; + use crate::tracker::statistics::Keeper; + use crate::tracker::Tracker; - responses::scrape::Bencoded::from(scrape_data).into_response() + fn private_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Private; + tracker_factory(configuration) + } + + fn listed_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Listed; + tracker_factory(configuration) + } + + fn tracker_on_reverse_proxy() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = true; + tracker_factory(configuration) + } + + fn tracker_not_on_reverse_proxy() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = false; + tracker_factory(configuration) + } + + fn tracker_factory(configuration: Configuration) -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + fn sample_scrape_request() -> Scrape { + Scrape { + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + } + } + + fn sample_client_ip_sources() -> ClientIpSources { + ClientIpSources { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: Some(IpAddr::from_str("203.0.113.196").unwrap()), + } + } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod with_tracker_in_private_mode { + use std::str::FromStr; + use std::sync::Arc; + + use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; + use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::tracker::{auth, ScrapeData}; + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { + let tracker = Arc::new(private_tracker()); + + let scrape_request = sample_scrape_request(); + let maybe_key = None; + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { + let tracker = Arc::new(private_tracker()); + + let scrape_request = sample_scrape_request(); + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let maybe_key = Some(unregistered_key); + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + + mod with_tracker_in_listed_mode { + + use std::sync::Arc; + + use super::{listed_tracker, sample_client_ip_sources, sample_scrape_request}; + use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::tracker::ScrapeData; + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { + let tracker = Arc::new(listed_tracker()); + + let scrape_request = sample_scrape_request(); + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), None) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + + mod with_tracker_on_reverse_proxy { + use std::sync::Arc; + + use super::{sample_scrape_request, tracker_on_reverse_proxy}; + use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::http::axum_implementation::handlers::scrape::tests::assert_error_response; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { + let tracker = Arc::new(tracker_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } + } + + mod with_tracker_not_on_reverse_proxy { + use std::sync::Arc; + + use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; + use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::http::axum_implementation::handlers::scrape::tests::assert_error_response; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { + let tracker = Arc::new(tracker_not_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } } diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs index 923acf3c4..cfcba09f9 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/axum_implementation/services/scrape.rs @@ -14,7 +14,7 @@ pub async fn invoke(tracker: &Arc, info_hashes: &Vec, origina /// When the peer is not authenticated and the tracker is running in `private` mode, /// the tracker returns empty stats for all the torrents. -pub async fn fake_invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { +pub async fn fake(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { send_scrape_event(original_peer_ip, tracker).await; ScrapeData::zeroed(info_hashes) From 65bb1c3d28a78ec435c554512db27b04d4345b10 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Mar 2023 18:16:20 +0000 Subject: [PATCH 0445/1003] test(http): [#222] unit tests for announce service --- .../axum_implementation/services/announce.rs | 191 ++++++++++++++++++ 1 file changed, 191 insertions(+) diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 356dbaeb9..5ce0fb1d5 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -22,3 +22,194 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) announce_data } + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::mode::Mode; + use crate::tracker::statistics::Keeper; + use crate::tracker::{peer, Tracker}; + + fn public_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Public; + tracker_factory(configuration) + } + + fn tracker_factory(configuration: Configuration) -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + fn sample_peer_using_ipv4() -> peer::Peer { + sample_peer() + } + + fn sample_peer_using_ipv6() -> peer::Peer { + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + ); + peer + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + mod with_tracker_in_any_mode { + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::config::ephemeral_configuration; + use crate::http::axum_implementation::services::announce::invoke; + use crate::http::axum_implementation::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + use crate::tracker::peer::Peer; + use crate::tracker::torrent::SwarmStats; + use crate::tracker::{statistics, AnnounceData, Tracker}; + + #[tokio::test] + async fn it_should_return_the_announce_data() { + let tracker = Arc::new(public_tracker()); + + let mut peer = sample_peer(); + + let announce_data = invoke(tracker.clone(), sample_info_hash(), &mut peer).await; + + let expected_announce_data = AnnounceData { + peers: vec![], + swarm_stats: SwarmStats { + completed: 0, + seeders: 1, + leechers: 0, + }, + interval: tracker.config.announce_interval, + interval_min: tracker.config.min_announce_interval, + }; + + assert_eq!(announce_data, expected_announce_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &Arc::new(ephemeral_configuration()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let mut peer = sample_peer_using_ipv4(); + + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer).await; + } + + fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.external_ip = + Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); + + Tracker::new(&Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() + } + + fn peer_with_the_ipv4_loopback_ip() -> Peer { + let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new(loopback_ip, 8080); + peer + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4_even_if_the_tracker_changes_the_peer_ip_to_ipv6() + { + // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. + + // Assert that the event sent is a TCP4 event + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let mut peer = peer_with_the_ipv4_loopback_ip(); + + let _announce_data = invoke( + tracker_with_an_ipv6_external_ip(stats_event_sender).into(), + sample_info_hash(), + &mut peer, + ) + .await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() + { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &Arc::new(ephemeral_configuration()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let mut peer = sample_peer_using_ipv6(); + + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer).await; + } + } +} From 164f29aacdd6d47926d41516d4b306f360f92ac9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 23 Feb 2023 16:03:59 +0100 Subject: [PATCH 0446/1003] isolated configuration, test-helpers and primitives crates --- Cargo.lock | 45 ++ Cargo.toml | 62 +-- packages/configuration/Cargo.toml | 16 + .../configuration/src/lib.rs | 39 +- packages/located-error/Cargo.toml | 9 + .../located-error/src/lib.rs | 0 packages/primitives/Cargo.toml | 9 + .../mode.rs => packages/primitives/src/lib.rs | 10 +- packages/test-helpers/Cargo.toml | 11 + packages/test-helpers/src/configuration.rs | 34 ++ packages/test-helpers/src/lib.rs | 2 + packages/test-helpers/src/random.rs | 7 + src/apis/middlewares/auth.rs | 2 +- src/apis/server.rs | 176 +++++++- src/databases/driver.rs | 34 +- src/databases/error.rs | 29 +- src/databases/mysql.rs | 4 +- src/databases/sqlite.rs | 6 +- .../axum_implementation/handlers/announce.rs | 18 +- .../axum_implementation/handlers/scrape.rs | 18 +- .../axum_implementation/requests/announce.rs | 2 +- .../axum_implementation/requests/scrape.rs | 2 +- .../axum_implementation/services/announce.rs | 17 +- src/http/warp_implementation/error.rs | 3 +- .../warp_implementation/filter_helpers.rs | 3 +- src/jobs/http_tracker.rs | 2 +- src/jobs/torrent_cleanup.rs | 2 +- src/jobs/tracker_apis.rs | 2 +- src/jobs/udp_tracker.rs | 2 +- src/lib.rs | 46 +- src/logging.rs | 3 +- src/main.rs | 2 +- src/setup.rs | 2 +- src/tracker/auth.rs | 2 +- src/tracker/error.rs | 2 +- src/tracker/mod.rs | 34 +- src/tracker/services/common.rs | 3 +- src/tracker/services/statistics.rs | 6 +- src/tracker/services/torrent.rs | 12 +- src/udp/error.rs | 3 +- src/udp/handlers.rs | 16 +- src/udp/server.rs | 129 +++++- tests/api/mod.rs | 2 +- tests/api/server.rs | 78 ---- tests/api/test_environment.rs | 134 ++++++ tests/http/server.rs | 10 +- tests/tracker_api.rs | 404 +++++++++++------- tests/udp/client.rs | 65 ++- tests/udp/mod.rs | 7 +- tests/udp/server.rs | 67 --- tests/udp/test_environment.rs | 131 ++++++ tests/udp_tracker.rs | 36 +- 52 files changed, 1226 insertions(+), 534 deletions(-) create mode 100644 packages/configuration/Cargo.toml rename src/config.rs => packages/configuration/src/lib.rs (93%) create mode 100644 packages/located-error/Cargo.toml rename src/located_error.rs => packages/located-error/src/lib.rs (100%) create mode 100644 packages/primitives/Cargo.toml rename src/tracker/mode.rs => packages/primitives/src/lib.rs (70%) create mode 100644 packages/test-helpers/Cargo.toml create mode 100644 packages/test-helpers/src/configuration.rs create mode 100644 packages/test-helpers/src/lib.rs create mode 100644 packages/test-helpers/src/random.rs delete mode 100644 tests/api/server.rs create mode 100644 tests/api/test_environment.rs delete mode 100644 tests/udp/server.rs create mode 100644 tests/udp/test_environment.rs diff --git a/Cargo.lock b/Cargo.lock index cfd8aaba8..9045b7c47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2972,10 +2972,55 @@ dependencies = [ "thiserror", "tokio", "toml", + "torrust-tracker-configuration", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", "uuid 1.2.1", "warp", ] +[[package]] +name = "torrust-tracker-configuration" +version = "2.3.0" +dependencies = [ + "config", + "log", + "serde", + "serde_with", + "thiserror", + "toml", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "uuid 1.2.1", +] + +[[package]] +name = "torrust-tracker-located-error" +version = "2.3.0" +dependencies = [ + "log", + "thiserror", +] + +[[package]] +name = "torrust-tracker-primitives" +version = "2.3.0" +dependencies = [ + "derive_more", + "serde", +] + +[[package]] +name = "torrust-tracker-test-helpers" +version = "2.3.0" +dependencies = [ + "lazy_static", + "rand", + "tokio", + "torrust-tracker-configuration", +] + [[package]] name = "tower" version = "0.4.13" diff --git a/Cargo.toml b/Cargo.toml index fa126a152..740a5805e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,31 +1,19 @@ [package] -edition = "2021" name = "torrust-tracker" -version = "2.3.0" -license = "AGPL-3.0" -authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." -repository = "https://github.com/torrust/torrust-tracker" - -[profile.dev] -debug = 1 -opt-level = 1 -lto = "thin" +license = "AGPL-3.0" +authors.workspace = true +edition.workspace = true +version.workspace = true -[profile.release] -debug = 1 -opt-level = 3 -lto = "fat" +[workspace.package] +authors = ["Nautilus Cyberneering , Mick van Dijke "] +edition = "2021" +repository = "https://github.com/torrust/torrust-tracker" +version = "2.3.0" [dependencies] -tokio = { version = "1", features = [ - "rt-multi-thread", - "net", - "sync", - "macros", - "signal", -] } - +tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" serde_json = "1.0" @@ -34,37 +22,32 @@ hex = "0.4.3" percent-encoding = "2" binascii = "0.1" lazy_static = "1.4" - openssl = { version = "0.10", features = ["vendored"] } - warp = { version = "0.3", features = ["tls"] } - config = "0.13" toml = "0.5" - log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" - r2d2 = "0.8" r2d2_mysql = "21" r2d2_sqlite = { version = "0.21", features = ["bundled"] } - rand = "0.8" derive_more = "0.99" thiserror = "1.0" futures = "0.3" async-trait = "0.1" - aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } axum-client-ip = "0.4.0" bip_bencode = "0.4.4" +torrust-tracker-primitives = { path = "packages/primitives" } +torrust-tracker-configuration = { path = "packages/configuration" } +torrust-tracker-located-error = { path = "packages/located-error" } multimap = "0.8.3" - [dev-dependencies] mockall = "0.11" reqwest = { version = "0.11.13", features = ["json"] } @@ -72,3 +55,22 @@ serde_urlencoded = "0.7.1" serde_repr = "0.1.10" serde_bytes = "0.11.8" local-ip-address = "0.5.1" +torrust-tracker-test-helpers = { path = "packages/test-helpers" } + +[workspace] +members = [ + "packages/configuration", + "packages/primitives", + "packages/test-helpers", + "packages/located-error", +] + +[profile.dev] +debug = 1 +opt-level = 1 +lto = "thin" + +[profile.release] +debug = 1 +opt-level = 3 +lto = "fat" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml new file mode 100644 index 000000000..a6f1740a0 --- /dev/null +++ b/packages/configuration/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "torrust-tracker-configuration" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_with = "2.0" +config = "0.13" +toml = "0.5" +log = { version = "0.4", features = ["release_max_level_info"] } +thiserror = "1.0" +torrust-tracker-primitives = { path = "../primitives" } +torrust-tracker-located-error = { path = "../located-error" } +uuid = { version = "1", features = ["v4"] } diff --git a/src/config.rs b/packages/configuration/src/lib.rs similarity index 93% rename from src/config.rs rename to packages/configuration/src/lib.rs index 7ed0f9fa7..2121752c5 100644 --- a/src/config.rs +++ b/packages/configuration/src/lib.rs @@ -8,17 +8,13 @@ use std::{env, fs}; use config::{Config, ConfigError, File, FileFormat}; use log::warn; -use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use thiserror::Error; -use {std, toml}; +use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; -use crate::databases::driver::Driver; -use crate::located_error::{Located, LocatedError}; -use crate::tracker::mode; - -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct UdpTracker { pub enabled: bool, pub bind_address: String, @@ -62,8 +58,8 @@ impl HttpApi { #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, - pub mode: mode::Mode, - pub db_driver: Driver, + pub mode: TrackerMode, + pub db_driver: DatabaseDriver, pub db_path: String, pub announce_interval: u32, pub min_announce_interval: u32, @@ -122,41 +118,34 @@ pub fn ephemeral_configuration() -> Configuration { }; // Ephemeral socket address for API - let api_port = random_port(); + let api_port = 0u16; config.http_api.enabled = true; config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); // Ephemeral socket address for UDP tracker - let upd_port = random_port(); + let udp_port = 0u16; config.udp_trackers[0].enabled = true; - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &upd_port); + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); // Ephemeral socket address for HTTP tracker - let http_port = random_port(); + let http_port = 0u16; config.http_trackers[0].enabled = true; config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); // Ephemeral sqlite database let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}_{}_{}.db", &api_port, &upd_port, &http_port)); + let temp_file = temp_directory.join(format!("data_{}_{}_{}.db", &api_port, &udp_port, &http_port)); config.db_path = temp_file.to_str().unwrap().to_owned(); config } -fn random_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) -} - impl Default for Configuration { fn default() -> Self { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - mode: mode::Mode::Public, - db_driver: Driver::Sqlite3, + mode: TrackerMode::Public, + db_driver: DatabaseDriver::Sqlite3, db_path: String::from("./storage/database/data.db"), announce_interval: 120, min_announce_interval: 120, @@ -266,7 +255,7 @@ impl Configuration { #[cfg(test)] mod tests { - use crate::config::Configuration; + use crate::Configuration; #[cfg(test)] fn default_config_toml() -> String { @@ -325,7 +314,7 @@ mod tests { fn configuration_should_contain_the_external_ip() { let configuration = Configuration::default(); - assert_eq!(configuration.external_ip, Option::Some(String::from("0.0.0.0"))); + assert_eq!(configuration.external_ip, Some(String::from("0.0.0.0"))); } #[test] diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml new file mode 100644 index 000000000..c4b2ef726 --- /dev/null +++ b/packages/located-error/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "torrust-tracker-located-error" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +log = { version = "0.4", features = ["release_max_level_info"] } +thiserror = "1.0" diff --git a/src/located_error.rs b/packages/located-error/src/lib.rs similarity index 100% rename from src/located_error.rs rename to packages/located-error/src/lib.rs diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml new file mode 100644 index 000000000..9aec28384 --- /dev/null +++ b/packages/primitives/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "torrust-tracker-primitives" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +derive_more = "0.99" diff --git a/src/tracker/mode.rs b/packages/primitives/src/lib.rs similarity index 70% rename from src/tracker/mode.rs rename to packages/primitives/src/lib.rs index a0dba6e67..bcd48145f 100644 --- a/src/tracker/mode.rs +++ b/packages/primitives/src/lib.rs @@ -1,8 +1,14 @@ -use serde; use serde::{Deserialize, Serialize}; +// TODO: Move to the database crate once that gets its own crate. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] +pub enum DatabaseDriver { + Sqlite3, + MySQL, +} + #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] -pub enum Mode { +pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] Public, diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml new file mode 100644 index 000000000..5be0e8aba --- /dev/null +++ b/packages/test-helpers/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "torrust-tracker-test-helpers" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +lazy_static = "1.4" +rand = "0.8.5" +torrust-tracker-configuration = { path = "../configuration"} diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs new file mode 100644 index 000000000..f7c584d55 --- /dev/null +++ b/packages/test-helpers/src/configuration.rs @@ -0,0 +1,34 @@ +use std::env; + +use torrust_tracker_configuration::Configuration; + +use crate::random; + +/// This configuration is used for testing. It generates random config values so they do not collide +/// if you run more than one tracker at the same time. +/// +/// # Panics +/// +/// Will panic if it can't convert the temp file path to string +#[must_use] +pub fn ephemeral() -> Configuration { + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; + + // Ephemeral socket addresses + let bind_addr = "127.0.0.1:0".to_string(); + + config.http_api.bind_address = bind_addr.to_string(); + config.udp_trackers[0].bind_address = bind_addr; + + // Ephemeral sqlite database + let temp_directory = env::temp_dir(); + let random_db_id = random::string(16); + let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); + + config.db_path = temp_file.to_str().unwrap().to_owned(); + + config +} diff --git a/packages/test-helpers/src/lib.rs b/packages/test-helpers/src/lib.rs new file mode 100644 index 000000000..e0f350131 --- /dev/null +++ b/packages/test-helpers/src/lib.rs @@ -0,0 +1,2 @@ +pub mod configuration; +pub mod random; diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs new file mode 100644 index 000000000..ffb2ccd6f --- /dev/null +++ b/packages/test-helpers/src/random.rs @@ -0,0 +1,7 @@ +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; + +/// Returns a random alphanumeric string of a certain size. +pub fn string(size: usize) -> String { + thread_rng().sample_iter(&Alphanumeric).take(size).map(char::from).collect() +} diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs index 758ba1cda..f2745d42e 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/middlewares/auth.rs @@ -5,9 +5,9 @@ use axum::http::Request; use axum::middleware::Next; use axum::response::{IntoResponse, Response}; use serde::Deserialize; +use torrust_tracker_configuration::{Configuration, HttpApi}; use crate::apis::responses::unhandled_rejection_response; -use crate::config::{Configuration, HttpApi}; #[derive(Deserialize, Debug)] pub struct QueryParams { diff --git a/src/apis/server.rs b/src/apis/server.rs index bbb3e5852..5ec22f253 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,15 +1,156 @@ -use std::net::SocketAddr; +use std::net::{SocketAddr, TcpListener}; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use futures::Future; use log::info; +use tokio::task::JoinHandle; use warp::hyper; use super::routes::router; +use crate::signals::shutdown_signal_with_message; use crate::tracker::Tracker; +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedApiServer = ApiServer; +#[allow(clippy::module_name_repetitions)] +pub type RunningApiServer = ApiServer; + +#[allow(clippy::module_name_repetitions)] +pub struct ApiServer { + pub cfg: torrust_tracker_configuration::HttpApi, + pub tracker: Arc, + pub state: S, +} + +pub struct Stopped; + +pub struct Running { + pub bind_address: SocketAddr, + stop_job_sender: tokio::sync::oneshot::Sender, + job: JoinHandle<()>, +} + +impl ApiServer { + pub fn new(cfg: torrust_tracker_configuration::HttpApi, tracker: Arc) -> Self { + Self { + cfg, + tracker, + state: Stopped {}, + } + } + + /// # Errors + /// + /// Will return `Err` if `TcpListener` can not bind to `bind_address`. + pub fn start(self) -> Result, Error> { + let listener = TcpListener::bind(&self.cfg.bind_address).map_err(|e| Error::Error(e.to_string()))?; + + let bind_address = listener.local_addr().map_err(|e| Error::Error(e.to_string()))?; + + let cfg = self.cfg.clone(); + let tracker = self.tracker.clone(); + + let (sender, receiver) = tokio::sync::oneshot::channel::(); + + let job = tokio::spawn(async move { + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, cfg.ssl_cert_path, cfg.ssl_key_path) { + let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) + .await + .expect("Could not read ssl cert and/or key."); + + start_tls_from_tcp_listener_with_graceful_shutdown(listener, tls_config, &tracker, receiver) + .await + .expect("Could not start from tcp listener with tls."); + } else { + start_from_tcp_listener_with_graceful_shutdown(listener, &tracker, receiver) + .await + .expect("Could not start from tcp listener."); + } + }); + + let running_api_server: ApiServer = ApiServer { + cfg: self.cfg, + tracker: self.tracker, + state: Running { + bind_address, + stop_job_sender: sender, + job, + }, + }; + + Ok(running_api_server) + } +} + +impl ApiServer { + /// # Errors + /// + /// Will return `Err` if the oneshot channel to send the stop signal + /// has already been called once. + pub async fn stop(self) -> Result, Error> { + self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; + + let _ = self.state.job.await; + + let stopped_api_server: ApiServer = ApiServer { + cfg: self.cfg, + tracker: self.tracker, + state: Stopped {}, + }; + + Ok(stopped_api_server) + } +} + +pub fn start_from_tcp_listener_with_graceful_shutdown( + tcp_listener: TcpListener, + tracker: &Arc, + shutdown_signal: tokio::sync::oneshot::Receiver, +) -> impl Future> { + let app = router(tracker); + + let context = tcp_listener.local_addr().expect("Could not get context."); + + axum::Server::from_tcp(tcp_listener) + .expect("Could not bind to tcp listener.") + .serve(app.into_make_service()) + .with_graceful_shutdown(shutdown_signal_with_message( + shutdown_signal, + format!("Shutting down {context}.."), + )) +} + +pub fn start_tls_from_tcp_listener_with_graceful_shutdown( + tcp_listener: TcpListener, + tls_config: RustlsConfig, + tracker: &Arc, + shutdown_signal: tokio::sync::oneshot::Receiver, +) -> impl Future> { + let app = router(tracker); + + let context = tcp_listener.local_addr().expect("Could not get context."); + + let handle = Handle::new(); + + let cloned_handle = handle.clone(); + + tokio::spawn(async move { + shutdown_signal_with_message(shutdown_signal, format!("Shutting down {context}..")).await; + cloned_handle.shutdown(); + }); + + axum_server::from_tcp_rustls(tcp_listener, tls_config) + .handle(handle) + .serve(app.into_make_service()) +} + pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = router(tracker); @@ -41,3 +182,36 @@ pub fn start_tls( .handle(handle) .serve(app.into_make_service()) } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration::ephemeral; + + use crate::apis::server::ApiServer; + use crate::tracker; + use crate::tracker::statistics; + + fn tracker_configuration() -> Arc { + Arc::new(ephemeral()) + } + + #[tokio::test] + async fn it_should_be_able_to_start_from_stopped_state_and_then_stop_again() { + let cfg = tracker_configuration(); + + let tracker = Arc::new(tracker::Tracker::new(&cfg, None, statistics::Repo::new()).unwrap()); + + let stopped_api_server = ApiServer::new(cfg.http_api.clone(), tracker); + + let running_api_server_result = stopped_api_server.start(); + + assert!(running_api_server_result.is_ok()); + + let running_api_server = running_api_server_result.unwrap(); + + assert!(running_api_server.stop().await.is_ok()); + } +} diff --git a/src/databases/driver.rs b/src/databases/driver.rs index c601f1866..4ce6ea515 100644 --- a/src/databases/driver.rs +++ b/src/databases/driver.rs @@ -1,30 +1,22 @@ -use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::DatabaseDriver; use super::error::Error; use super::mysql::Mysql; use super::sqlite::Sqlite; use super::{Builder, Database}; -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] -pub enum Driver { - Sqlite3, - MySQL, -} - -impl Driver { - /// . - /// - /// # Errors - /// - /// This function will return an error if unable to connect to the database. - pub fn build(&self, db_path: &str) -> Result, Error> { - let database = match self { - Driver::Sqlite3 => Builder::::build(db_path), - Driver::MySQL => Builder::::build(db_path), - }?; +/// . +/// +/// # Errors +/// +/// This function will return an error if unable to connect to the database. +pub fn build(driver: &DatabaseDriver, db_path: &str) -> Result, Error> { + let database = match driver { + DatabaseDriver::Sqlite3 => Builder::::build(db_path), + DatabaseDriver::MySQL => Builder::::build(db_path), + }?; - database.create_database_tables().expect("Could not create database tables."); + database.create_database_tables().expect("Could not create database tables."); - Ok(database) - } + Ok(database) } diff --git a/src/databases/error.rs b/src/databases/error.rs index 4bee82f19..68b732190 100644 --- a/src/databases/error.rs +++ b/src/databases/error.rs @@ -2,47 +2,46 @@ use std::panic::Location; use std::sync::Arc; use r2d2_mysql::mysql::UrlError; - -use super::driver::Driver; -use crate::located_error::{Located, LocatedError}; +use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::DatabaseDriver; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { #[error("The {driver} query unexpectedly returned nothing: {source}")] QueryReturnedNoRows { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - driver: Driver, + driver: DatabaseDriver, }, #[error("The {driver} query was malformed: {source}")] InvalidQuery { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - driver: Driver, + driver: DatabaseDriver, }, #[error("Unable to insert record into {driver} database, {location}")] InsertFailed { location: &'static Location<'static>, - driver: Driver, + driver: DatabaseDriver, }, #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] DeleteFailed { location: &'static Location<'static>, error_code: usize, - driver: Driver, + driver: DatabaseDriver, }, #[error("Failed to connect to {driver} database: {source}")] ConnectionError { source: LocatedError<'static, UrlError>, - driver: Driver, + driver: DatabaseDriver, }, #[error("Failed to create r2d2 {driver} connection pool: {source}")] ConnectionPool { source: LocatedError<'static, r2d2::Error>, - driver: Driver, + driver: DatabaseDriver, }, } @@ -52,11 +51,11 @@ impl From for Error { match err { r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { source: (Arc::new(err) as Arc).into(), - driver: Driver::Sqlite3, + driver: DatabaseDriver::Sqlite3, }, _ => Error::InvalidQuery { source: (Arc::new(err) as Arc).into(), - driver: Driver::Sqlite3, + driver: DatabaseDriver::Sqlite3, }, } } @@ -68,7 +67,7 @@ impl From for Error { let e: Arc = Arc::new(err); Error::InvalidQuery { source: e.into(), - driver: Driver::MySQL, + driver: DatabaseDriver::MySQL, } } } @@ -78,14 +77,14 @@ impl From for Error { fn from(err: UrlError) -> Self { Self::ConnectionError { source: Located(err).into(), - driver: Driver::MySQL, + driver: DatabaseDriver::MySQL, } } } -impl From<(r2d2::Error, Driver)> for Error { +impl From<(r2d2::Error, DatabaseDriver)> for Error { #[track_caller] - fn from(e: (r2d2::Error, Driver)) -> Self { + fn from(e: (r2d2::Error, DatabaseDriver)) -> Self { let (err, driver) = e; Self::ConnectionPool { source: Located(err).into(), diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 4bb28f050..c8117a45c 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -7,14 +7,14 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; +use torrust_tracker_primitives::DatabaseDriver; -use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; -const DRIVER: Driver = Driver::MySQL; +const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; pub struct Mysql { pool: Pool, diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 8fac09e47..4bf2931de 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -4,14 +4,14 @@ use std::str::FromStr; use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; +use torrust_tracker_primitives::DatabaseDriver; -use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; -const DRIVER: Driver = Driver::Sqlite3; +const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; pub struct Sqlite { pool: Pool, @@ -24,7 +24,7 @@ impl Database for Sqlite { /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); - Pool::new(cm).map_or_else(|err| Err((err, Driver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) + Pool::new(cm).map_or_else(|err| Err((err, DatabaseDriver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) } fn create_database_tables(&self) -> Result<(), Error> { diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 9a92b243d..6458e2c2f 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -140,35 +140,37 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { mod tests { use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration; + use crate::http::axum_implementation::requests::announce::Announce; use crate::http::axum_implementation::responses; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; - use crate::tracker::mode::Mode; use crate::tracker::statistics::Keeper; use crate::tracker::{peer, Tracker}; fn private_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Private; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Private; tracker_factory(configuration) } fn listed_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Listed; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Listed; tracker_factory(configuration) } fn tracker_on_reverse_proxy() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.on_reverse_proxy = true; tracker_factory(configuration) } fn tracker_not_on_reverse_proxy() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.on_reverse_proxy = false; tracker_factory(configuration) } diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index ee59b80a3..43bf6c99f 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -98,35 +98,37 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration; + use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::responses; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; - use crate::tracker::mode::Mode; use crate::tracker::statistics::Keeper; use crate::tracker::Tracker; fn private_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Private; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Private; tracker_factory(configuration) } fn listed_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Listed; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Listed; tracker_factory(configuration) } fn tracker_on_reverse_proxy() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.on_reverse_proxy = true; tracker_factory(configuration) } fn tracker_not_on_reverse_proxy() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.on_reverse_proxy = false; tracker_factory(configuration) } diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 0f9a6fbfe..6e357ea6d 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -3,11 +3,11 @@ use std::panic::Location; use std::str::FromStr; use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; use crate::http::axum_implementation::query::{ParseQueryError, Query}; use crate::http::axum_implementation::responses; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; -use crate::located_error::{Located, LocatedError}; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/axum_implementation/requests/scrape.rs index da50d4be5..505be566e 100644 --- a/src/http/axum_implementation/requests/scrape.rs +++ b/src/http/axum_implementation/requests/scrape.rs @@ -1,11 +1,11 @@ use std::panic::Location; use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; use crate::http::axum_implementation::query::Query; use crate::http::axum_implementation::responses; use crate::http::percent_encoding::percent_decode_info_hash; -use crate::located_error::{Located, LocatedError}; use crate::protocol::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 5ce0fb1d5..255a73c8f 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -29,17 +29,18 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration; - use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; - use crate::tracker::mode::Mode; use crate::tracker::statistics::Keeper; use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Public; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Public; tracker_factory(configuration) } @@ -93,9 +94,9 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::config::ephemeral_configuration; use crate::http::axum_implementation::services::announce::invoke; use crate::http::axum_implementation::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; use crate::tracker::peer::Peer; @@ -136,7 +137,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - &Arc::new(ephemeral_configuration()), + &Arc::new(configuration::ephemeral()), Some(stats_event_sender), statistics::Repo::new(), ) @@ -149,7 +150,7 @@ mod tests { } fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.external_ip = Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); @@ -200,7 +201,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - &Arc::new(ephemeral_configuration()), + &Arc::new(configuration::ephemeral()), Some(stats_event_sender), statistics::Repo::new(), ) diff --git a/src/http/warp_implementation/error.rs b/src/http/warp_implementation/error.rs index f07c32f6d..55b22c27a 100644 --- a/src/http/warp_implementation/error.rs +++ b/src/http/warp_implementation/error.rs @@ -1,10 +1,9 @@ use std::panic::Location; use thiserror::Error; +use torrust_tracker_located_error::LocatedError; use warp::reject::Reject; -use crate::located_error::LocatedError; - #[derive(Error, Debug)] pub enum Error { #[error("tracker server error: {source}")] diff --git a/src/http/warp_implementation/filter_helpers.rs b/src/http/warp_implementation/filter_helpers.rs index 89188d868..583d38352 100644 --- a/src/http/warp_implementation/filter_helpers.rs +++ b/src/http/warp_implementation/filter_helpers.rs @@ -3,8 +3,7 @@ use std::panic::Location; use std::str::FromStr; use thiserror::Error; - -use crate::located_error::{Located, LocatedError}; +use torrust_tracker_located_error::{Located, LocatedError}; #[derive(Error, Debug)] pub enum XForwardedForParseError { diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index aa96af884..ce546f608 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -5,8 +5,8 @@ use axum_server::tls_rustls::RustlsConfig; use log::{info, warn}; use tokio::sync::oneshot; use tokio::task::JoinHandle; +use torrust_tracker_configuration::HttpTracker; -use crate::config::HttpTracker; use crate::http::axum_implementation::server; use crate::http::warp_implementation::server::Http; use crate::http::Version; diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 073ceda61..4c4ed1f53 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use chrono::Utc; use log::info; use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; -use crate::config::Configuration; use crate::tracker; #[must_use] diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs index 00e39eeba..85bb1b59f 100644 --- a/src/jobs/tracker_apis.rs +++ b/src/jobs/tracker_apis.rs @@ -4,9 +4,9 @@ use axum_server::tls_rustls::RustlsConfig; use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; +use torrust_tracker_configuration::HttpApi; use crate::apis::server; -use crate::config::HttpApi; use crate::tracker; #[derive(Debug)] diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index d0907c976..468f6dbbd 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; +use torrust_tracker_configuration::UdpTracker; -use crate::config::UdpTracker; use crate::tracker; use crate::udp::server::Udp; diff --git a/src/lib.rs b/src/lib.rs index cbda2854c..f80bcfb6c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,7 @@ pub mod apis; -pub mod config; pub mod databases; pub mod http; pub mod jobs; -pub mod located_error; pub mod logging; pub mod protocol; pub mod setup; @@ -32,3 +30,47 @@ pub mod ephemeral_instance_keys { pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); } } + +pub mod signals { + use log::info; + + /// Resolves on `ctrl_c` or the `terminate` signal. + pub async fn global_shutdown_signal() { + let ctrl_c = async { + tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {} + } + } + + /// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. + pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { + let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; + + tokio::select! { + _ = stop => {}, + _ = global_shutdown_signal() => {} + } + } + + /// Same as `shutdown_signal()`, but shows a message when it resolves. + pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal(stop_receiver).await; + + info!("{message}"); + } +} diff --git a/src/logging.rs b/src/logging.rs index 4d16f7670..83e2c9360 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -2,8 +2,7 @@ use std::str::FromStr; use std::sync::Once; use log::{info, LevelFilter}; - -use crate::config::Configuration; +use torrust_tracker_configuration::Configuration; static INIT: Once = Once::new(); diff --git a/src/main.rs b/src/main.rs index 199e8f5c5..fcb8331a4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,9 +2,9 @@ use std::env; use std::sync::Arc; use log::info; -use torrust_tracker::config::Configuration; use torrust_tracker::stats::setup_statistics; use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, tracker}; +use torrust_tracker_configuration::Configuration; #[tokio::main] async fn main() { diff --git a/src/setup.rs b/src/setup.rs index 98d311178..5b51632a7 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; -use crate::config::Configuration; use crate::http::Version; use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::tracker; diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 84252f667..00663c383 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -10,8 +10,8 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; +use torrust_tracker_located_error::LocatedError; -use crate::located_error::LocatedError; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; diff --git a/src/tracker/error.rs b/src/tracker/error.rs index 080903da6..10ca5ec19 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -1,6 +1,6 @@ use std::panic::Location; -use crate::located_error::LocatedError; +use torrust_tracker_located_error::LocatedError; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 2ebc4bfc3..18ada69e0 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,6 +1,5 @@ pub mod auth; pub mod error; -pub mod mode; pub mod peer; pub mod services; pub mod statistics; @@ -15,19 +14,19 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; -use crate::config::Configuration; -use crate::databases::driver::Driver; use crate::databases::{self, Database}; use crate::protocol::info_hash::InfoHash; pub struct Tracker { pub config: Arc, - mode: mode::Mode, + mode: TrackerMode, keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, @@ -96,7 +95,7 @@ impl Tracker { stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = Driver::build(&config.db_driver, &config.db_path)?; + let database = databases::driver::build(&config.db_driver, &config.db_path)?; Ok(Tracker { config: config.clone(), @@ -111,15 +110,15 @@ impl Tracker { } pub fn is_public(&self) -> bool { - self.mode == mode::Mode::Public + self.mode == TrackerMode::Public } pub fn is_private(&self) -> bool { - self.mode == mode::Mode::Private || self.mode == mode::Mode::PrivateListed + self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed + self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } pub fn requires_authentication(&self) -> bool { @@ -554,35 +553,36 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration::{self}; - use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; - use crate::tracker::mode::Mode; use crate::tracker::peer::{self, Peer}; use crate::tracker::statistics::Keeper; use crate::tracker::{TorrentsMetrics, Tracker}; pub fn public_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Public; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Public; tracker_factory(configuration) } pub fn private_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Private; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Private; tracker_factory(configuration) } pub fn whitelisted_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Listed; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Listed; tracker_factory(configuration) } pub fn tracker_persisting_torrents_in_database() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.persistent_torrent_completed_stat = true; tracker_factory(configuration) } diff --git a/src/tracker/services/common.rs b/src/tracker/services/common.rs index 8757e6a21..39aa3cc0b 100644 --- a/src/tracker/services/common.rs +++ b/src/tracker/services/common.rs @@ -1,6 +1,7 @@ use std::sync::Arc; -use crate::config::Configuration; +use torrust_tracker_configuration::Configuration; + use crate::tracker::statistics::Keeper; use crate::tracker::Tracker; diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 745f5563c..c0aaf9c64 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -36,13 +36,15 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { mod tests { use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration::ephemeral; + use crate::tracker; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + Arc::new(ephemeral()) } #[tokio::test] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index e2353876e..ce652a091 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -137,14 +137,16 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration::ephemeral; + use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + Arc::new(ephemeral()) } #[tokio::test] @@ -190,14 +192,16 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration::ephemeral; + use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + Arc::new(ephemeral()) } #[tokio::test] diff --git a/src/udp/error.rs b/src/udp/error.rs index de66eb2bf..a6381cc78 100644 --- a/src/udp/error.rs +++ b/src/udp/error.rs @@ -1,8 +1,7 @@ use std::panic::Location; use thiserror::Error; - -use crate::located_error::LocatedError; +use torrust_tracker_located_error::LocatedError; #[derive(Error, Debug)] pub enum Error { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 8fda77fb4..4a0874c72 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -250,31 +250,33 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration::ephemeral; - use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::{Current, Time}; - use crate::tracker::{self, mode, peer, statistics}; + use crate::tracker::{self, peer, statistics}; fn tracker_configuration() -> Arc { Arc::new(default_testing_tracker_configuration()) } fn default_testing_tracker_configuration() -> Configuration { - ephemeral_configuration() + ephemeral() } fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Public).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); initialized_tracker(&configuration) } fn initialized_private_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Private).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); initialized_tracker(&configuration) } fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Listed).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); initialized_tracker(&configuration) } @@ -354,7 +356,7 @@ mod tests { self } - pub fn with_mode(mut self, mode: mode::Mode) -> Self { + pub fn with_mode(mut self, mode: TrackerMode) -> Self { self.configuration.mode = mode; self } diff --git a/src/udp/server.rs b/src/udp/server.rs index e85c81e9d..f74468189 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -1,25 +1,113 @@ +use std::future::Future; use std::io::Cursor; use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; +use futures::pin_mut; use log::{debug, error, info}; use tokio::net::UdpSocket; +use tokio::task::JoinHandle; -use crate::tracker; +use crate::signals::shutdown_signal; +use crate::tracker::Tracker; use crate::udp::handlers::handle_packet; use crate::udp::MAX_PACKET_SIZE; +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedUdpServer = UdpServer; +#[allow(clippy::module_name_repetitions)] +pub type RunningUdpServer = UdpServer; + +#[allow(clippy::module_name_repetitions)] +pub struct UdpServer { + pub cfg: torrust_tracker_configuration::UdpTracker, + pub tracker: Arc, + pub state: S, +} + +pub struct Stopped; + +pub struct Running { + pub bind_address: SocketAddr, + stop_job_sender: tokio::sync::oneshot::Sender, + job: JoinHandle<()>, +} + +impl UdpServer { + pub fn new(cfg: torrust_tracker_configuration::UdpTracker, tracker: Arc) -> Self { + Self { + cfg, + tracker, + state: Stopped {}, + } + } + + /// # Errors + /// + /// Will return `Err` if UDP can't bind to given bind address. + pub async fn start(self) -> Result, Error> { + let udp = Udp::new(self.tracker.clone(), &self.cfg.bind_address) + .await + .map_err(|e| Error::Error(e.to_string()))?; + + let bind_address = udp.socket.local_addr().map_err(|e| Error::Error(e.to_string()))?; + + let (sender, receiver) = tokio::sync::oneshot::channel::(); + + let job = tokio::spawn(async move { + udp.start_with_graceful_shutdown(shutdown_signal(receiver)).await; + }); + + let running_udp_server: UdpServer = UdpServer { + cfg: self.cfg, + tracker: self.tracker, + state: Running { + bind_address, + stop_job_sender: sender, + job, + }, + }; + + Ok(running_udp_server) + } +} + +impl UdpServer { + /// # Errors + /// + /// Will return `Err` if the oneshot channel to send the stop signal + /// has already been called once. + pub async fn stop(self) -> Result, Error> { + self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; + + let _ = self.state.job.await; + + let stopped_api_server: UdpServer = UdpServer { + cfg: self.cfg, + tracker: self.tracker, + state: Stopped {}, + }; + + Ok(stopped_api_server) + } +} + pub struct Udp { socket: Arc, - tracker: Arc, + tracker: Arc, } impl Udp { /// # Errors /// /// Will return `Err` unable to bind to the supplied `bind_address`. - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { + pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; Ok(Udp { @@ -57,6 +145,41 @@ impl Udp { } } + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. + async fn start_with_graceful_shutdown(&self, shutdown_signal: F) + where + F: Future, + { + // Pin the future so that it doesn't move to the first loop iteration. + pin_mut!(shutdown_signal); + + loop { + let mut data = [0; MAX_PACKET_SIZE]; + let socket = self.socket.clone(); + let tracker = self.tracker.clone(); + + tokio::select! { + _ = &mut shutdown_signal => { + info!("Stopping UDP server: {}..", self.socket.local_addr().unwrap()); + break; + } + Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { + let payload = data[..valid_bytes].to_vec(); + + info!("Received {} bytes", payload.len()); + debug!("From: {}", &remote_addr); + debug!("Payload: {:?}", payload); + + let response = handle_packet(remote_addr, payload, tracker).await; + + Udp::send_response(socket, remote_addr, response).await; + } + } + } + } + async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 8dd6f4c53..fcb24e491 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -5,7 +5,7 @@ use torrust_tracker::tracker::Tracker; pub mod asserts; pub mod client; pub mod connection_info; -pub mod server; +pub mod test_environment; /// It forces a database error by dropping all tables. /// That makes any query fail. diff --git a/tests/api/server.rs b/tests/api/server.rs deleted file mode 100644 index 0e23a4320..000000000 --- a/tests/api/server.rs +++ /dev/null @@ -1,78 +0,0 @@ -use core::panic; -use std::sync::Arc; - -use torrust_tracker::config::{ephemeral_configuration, Configuration}; -use torrust_tracker::jobs::tracker_apis; -use torrust_tracker::protocol::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - -use super::connection_info::ConnectionInfo; - -pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) -} - -pub async fn start_default_api() -> Server { - let configuration = tracker_configuration(); - start_custom_api(configuration.clone()).await -} - -pub async fn start_custom_api(configuration: Arc) -> Server { - let server = start(&configuration); - tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; - server -} - -fn start(configuration: &Arc) -> Server { - let connection_info = ConnectionInfo::authenticated( - &configuration.http_api.bind_address.clone(), - &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), - ); - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - Server { - tracker, - connection_info, - } -} - -pub struct Server { - pub tracker: Arc, - pub connection_info: ConnectionInfo, -} - -impl Server { - pub fn get_connection_info(&self) -> ConnectionInfo { - self.connection_info.clone() - } - - pub fn get_bind_address(&self) -> String { - self.connection_info.bind_address.clone() - } - - /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs new file mode 100644 index 000000000..ff143ec7a --- /dev/null +++ b/tests/api/test_environment.rs @@ -0,0 +1,134 @@ +use core::panic; +use std::sync::Arc; + +use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::Tracker; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_test_helpers::configuration::ephemeral; + +use super::connection_info::ConnectionInfo; + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment; + +pub struct TestEnvironment { + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + api_server: StoppedApiServer, +} + +pub struct Running { + api_server: RunningApiServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment { + #[allow(dead_code)] + pub fn new_stopped() -> Self { + let api_server = api_server(); + + Self { + tracker: api_server.tracker.clone(), + state: Stopped { api_server }, + } + } + + #[allow(dead_code)] + pub fn start(self) -> TestEnvironment { + TestEnvironment { + tracker: self.tracker, + state: Running { + api_server: self.state.api_server.start().unwrap(), + }, + } + } +} + +impl TestEnvironment { + pub fn new_running() -> Self { + let api_server = running_api_server(); + + Self { + tracker: api_server.tracker.clone(), + state: Running { api_server }, + } + } + + pub async fn stop(self) -> TestEnvironment { + TestEnvironment { + tracker: self.tracker, + state: Stopped { + api_server: self.state.api_server.stop().await.unwrap(), + }, + } + } + + pub fn get_connection_info(&self) -> ConnectionInfo { + ConnectionInfo { + bind_address: self.state.api_server.state.bind_address.to_string(), + api_token: self.state.api_server.cfg.access_tokens.get("admin").cloned(), + } + } +} + +#[allow(clippy::module_name_repetitions)] +pub fn running_test_environment() -> RunningTestEnvironment { + TestEnvironment::new_running() +} + +pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral()) +} + +// TODO: Move to test-helpers crate once `Tracker` is isolated. +pub fn tracker_instance(configuration: &Arc) -> Arc { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + tracker +} + +pub fn api_server() -> StoppedApiServer { + let config = tracker_configuration(); + + let tracker = tracker_instance(&config); + + ApiServer::new(config.http_api.clone(), tracker) +} + +pub fn running_api_server() -> RunningApiServer { + api_server().start().unwrap() +} diff --git a/tests/http/server.rs b/tests/http/server.rs index 1c8d1cb77..147ad93c1 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -2,35 +2,35 @@ use core::panic; use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; -use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::http::Version; use torrust_tracker::jobs::http_tracker; use torrust_tracker::protocol::info_hash::InfoHash; -use torrust_tracker::tracker::mode::Mode; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; +use torrust_tracker_configuration::{ephemeral_configuration, Configuration}; +use torrust_tracker_primitives::TrackerMode; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings pub async fn start_public_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Public; + configuration.mode = TrackerMode::Public; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings pub async fn start_whitelisted_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Listed; + configuration.mode = TrackerMode::Listed; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings pub async fn start_private_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Private; + configuration.mode = TrackerMode::Private; start_custom_http_tracker(Arc::new(configuration), version).await } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 35d9af248..ccdcded5e 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -27,74 +27,84 @@ mod tracker_apis { mod authentication { use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; use crate::common::http::{Query, QueryParam}; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let token = api_server.get_connection_info().api_token.unwrap(); + let token = test_env.get_connection_info().api_token.unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) .await; assert_eq!(response.status(), 200); + + test_env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::default()) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) .await; assert_token_not_valid(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) .await; assert_token_not_valid(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let token = api_server.get_connection_info().api_token.unwrap(); + let token = test_env.get_connection_info().api_token.unwrap(); // At the beginning of the query component - let response = Client::new(api_server.get_connection_info()) - .get_request(&format!("torrents?token={}&limit=1", &token)) + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?token={token}&limit=1")) .await; assert_eq!(response.status(), 200); // At the end of the query component - let response = Client::new(api_server.get_connection_info()) - .get_request(&format!("torrents?limit=1&token={}", &token)) + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?limit=1&token={token}")) .await; assert_eq!(response.status(), 200); + + test_env.stop().await; } } @@ -107,21 +117,21 @@ mod tracker_apis { use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; use crate::common::fixtures::PeerBuilder; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - api_server + test_env .add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), &PeerBuilder::default().into(), ) .await; - let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; + let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; assert_stats( response, @@ -145,23 +155,29 @@ mod tracker_apis { }, ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_tracker_statistics() - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_tracker_statistics() + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .get_tracker_statistics() .await; assert_unauthorized(response).await; + + test_env.stop().await; } } @@ -179,21 +195,19 @@ mod tracker_apis { }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; use crate::common::fixtures::PeerBuilder; use crate::common::http::{Query, QueryParam}; #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::empty()) - .await; + let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; assert_torrent_list( response, @@ -206,24 +220,22 @@ mod tracker_apis { }], ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server - .add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()) - .await; - api_server - .add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()) - .await; + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -238,24 +250,22 @@ mod tracker_apis { }], ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server - .add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()) - .await; - api_server - .add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()) - .await; + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -270,66 +280,76 @@ mod tracker_apis { }], ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; for invalid_offset in &invalid_offsets { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; } + + test_env.stop().await; } #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; for invalid_limit in &invalid_limits { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; } + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_torrents(Query::empty()) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrents(Query::empty()) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .get_torrents(Query::default()) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let peer = PeerBuilder::default().into(); - api_server.add_torrent_peer(&info_hash, &peer).await; + test_env.add_torrent_peer(&info_hash, &peer).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -344,27 +364,31 @@ mod tracker_apis { }, ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; assert_torrent_not_known(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrent(invalid_infohash) .await; @@ -372,33 +396,39 @@ mod tracker_apis { } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrent(invalid_infohash) .await; assert_not_found(response).await; } + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_torrent(&info_hash.to_string()) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrent(&info_hash.to_string()) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .get_torrent(&info_hash.to_string()) .await; assert_unauthorized(response).await; + + test_env.stop().await; } } @@ -416,82 +446,92 @@ mod tracker_apis { use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::force_database_error; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; assert_ok(response).await; assert!( - api_server + test_env .tracker .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); + + test_env.stop().await; } #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info()); + let api_client = Client::new(test_env.get_connection_info()); let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .whitelist_a_torrent(&info_hash) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .whitelist_a_torrent(&info_hash) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .whitelist_a_torrent(&info_hash) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; assert_failed_to_whitelist_torrent(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .whitelist_a_torrent(invalid_infohash) .await; @@ -499,49 +539,55 @@ mod tracker_apis { } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .whitelist_a_torrent(invalid_infohash) .await; assert_not_found(response).await; } + + test_env.stop().await; } #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; assert_ok(response).await; - assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); + + test_env.stop().await; } #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) .await; assert_ok(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(invalid_infohash) .await; @@ -549,89 +595,101 @@ mod tracker_apis { } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(invalid_infohash) .await; assert_not_found(response).await; } + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; assert_failed_to_remove_torrent_from_whitelist(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .remove_torrent_from_whitelist(&hash) - .await; + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .remove_torrent_from_whitelist(&hash) + .await; assert_token_not_valid(response).await; - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .remove_torrent_from_whitelist(&hash) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent is whitelisted and use that endpoint to check if the torrent is still there after reloading. assert!( - !(api_server + !(test_env .tracker .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await) ); */ + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; assert_failed_to_reload_whitelist(response).await; + + test_env.stop().await; } } @@ -648,50 +706,56 @@ mod tracker_apis { use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::force_database_error; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .generate_auth_key(seconds_valid) .await; let auth_key_resource = assert_auth_key_utf8(response).await; // Verify the key with the tracker - assert!(api_server + assert!(test_env .tracker .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .generate_auth_key(seconds_valid) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .generate_auth_key(seconds_valid) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .generate_auth_key(seconds_valid) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let invalid_key_durations = [ // "", it returns 404 @@ -700,49 +764,55 @@ mod tracker_apis { ]; for invalid_key_duration in &invalid_key_durations { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .post(&format!("key/{}", &invalid_key_duration)) .await; assert_invalid_key_duration_param(response, invalid_key_duration).await; } + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .generate_auth_key(seconds_valid) .await; assert_failed_to_generate_key(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - let auth_key = api_server + let auth_key = test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .delete_auth_key(&auth_key.key.to_string()) .await; assert_ok(response).await; + + test_env.stop().await; } #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_is_invalid() { - let api_server = start_default_api().await; + async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + let test_env = running_test_environment(); let invalid_auth_keys = [ // "", it returns a 404 @@ -755,123 +825,139 @@ mod tracker_apis { ]; for invalid_auth_key in &invalid_auth_keys { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .delete_auth_key(invalid_auth_key) .await; assert_invalid_auth_key_param(response, invalid_auth_key).await; } + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - let auth_key = api_server + let auth_key = test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .delete_auth_key(&auth_key.key.to_string()) .await; assert_failed_to_delete_key(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; // Generate new auth key - let auth_key = api_server + let auth_key = test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key.to_string()) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .delete_auth_key(&auth_key.key.to_string()) + .await; assert_token_not_valid(response).await; // Generate new auth key - let auth_key = api_server + let auth_key = test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .delete_auth_key(&auth_key.key.to_string()) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_reloading_keys() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - api_server + test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(api_server.get_connection_info()).reload_keys().await; + let response = Client::new(test_env.get_connection_info()).reload_keys().await; assert_ok(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - api_server + test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()).reload_keys().await; + let response = Client::new(test_env.get_connection_info()).reload_keys().await; assert_failed_to_reload_keys(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - api_server + test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .reload_keys() - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .reload_keys() + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .reload_keys() .await; assert_unauthorized(response).await; + + test_env.stop().await; } } } diff --git a/tests/udp/client.rs b/tests/udp/client.rs index 3cb4d6134..0bec03d03 100644 --- a/tests/udp/client.rs +++ b/tests/udp/client.rs @@ -1,41 +1,54 @@ use std::io::Cursor; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::Arc; use aquatic_udp_protocol::{Request, Response}; -use rand::{thread_rng, Rng}; +use tokio::net::UdpSocket; use torrust_tracker::udp::MAX_PACKET_SIZE; -use crate::common::udp::Client as UdpClient; +use crate::udp::source_address; -/// Creates a new generic UDP client connected to a generic UDP server -pub async fn new_udp_client_connected(remote_address: &SocketAddr) -> UdpClient { - let local_address = loopback_socket_address(ephemeral_random_client_port()); - UdpClient::connected(remote_address, &local_address).await +#[allow(clippy::module_name_repetitions)] +pub struct UdpClient { + pub socket: Arc, } -/// Creates a new UDP tracker client connected to a UDP tracker server -pub async fn new_udp_tracker_client_connected(remote_address: &SocketAddr) -> Client { - let udp_client = new_udp_client_connected(remote_address).await; - Client { udp_client } -} +impl UdpClient { + pub async fn bind(local_address: &str) -> Self { + let socket = UdpSocket::bind(local_address).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } -pub fn ephemeral_random_client_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) + pub async fn connect(&self, remote_address: &str) { + self.socket.connect(remote_address).await.unwrap(); + } + + pub async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + pub async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } } -fn loopback_socket_address(port: u16) -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port) +/// Creates a new `UdpClient` connected to a Udp server +pub async fn new_udp_client_connected(remote_address: &str) -> UdpClient { + let port = 0; // Let OS choose an unused port. + let client = UdpClient::bind(&source_address(port)).await; + client.connect(remote_address).await; + client } -/// A UDP tracker client -pub struct Client { - pub udp_client: UdpClient, // A generic UDP client +#[allow(clippy::module_name_repetitions)] +pub struct UdpTrackerClient { + pub udp_client: UdpClient, } -impl Client { +impl UdpTrackerClient { pub async fn send(&self, request: Request) -> usize { // Write request into a buffer let request_buffer = vec![0u8; MAX_PACKET_SIZE]; @@ -63,3 +76,9 @@ impl Client { Response::from_bytes(&response_buffer[..payload_size], true).unwrap() } } + +/// Creates a new `UdpTrackerClient` connected to a Udp Tracker server +pub async fn new_udp_tracker_client_connected(remote_address: &str) -> UdpTrackerClient { + let udp_client = new_udp_client_connected(remote_address).await; + UdpTrackerClient { udp_client } +} diff --git a/tests/udp/mod.rs b/tests/udp/mod.rs index 16a77bb99..f45a4a4f9 100644 --- a/tests/udp/mod.rs +++ b/tests/udp/mod.rs @@ -1,3 +1,8 @@ pub mod asserts; pub mod client; -pub mod server; +pub mod test_environment; + +/// Generates the source address for the UDP client +fn source_address(port: u16) -> String { + format!("127.0.0.1:{port}") +} diff --git a/tests/udp/server.rs b/tests/udp/server.rs deleted file mode 100644 index 401d4cf92..000000000 --- a/tests/udp/server.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::net::SocketAddr; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; - -use tokio::task::JoinHandle; -use torrust_tracker::config::{ephemeral_configuration, Configuration}; -use torrust_tracker::jobs::udp_tracker; -use torrust_tracker::tracker::statistics::Keeper; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - -pub fn start_udp_tracker(configuration: &Arc) -> Server { - let mut udp_server = Server::new(); - udp_server.start(configuration); - udp_server -} - -pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) -} -pub struct Server { - pub started: AtomicBool, - pub job: Option>, - pub bind_address: Option, -} - -impl Server { - pub fn new() -> Self { - Self { - started: AtomicBool::new(false), - job: None, - bind_address: None, - } - } - - pub fn start(&mut self, configuration: &Arc) { - if !self.started.load(Ordering::Relaxed) { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - let udp_tracker_config = &configuration.udp_trackers[0]; - - // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); - - self.bind_address = Some(udp_tracker_config.bind_address.parse().unwrap()); - - self.started.store(true, Ordering::Relaxed); - } - } -} diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs new file mode 100644 index 000000000..e53a7a580 --- /dev/null +++ b/tests/udp/test_environment.rs @@ -0,0 +1,131 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::Tracker; +use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_test_helpers::configuration::ephemeral; + +fn tracker_configuration() -> Arc { + Arc::new(ephemeral()) +} + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment; + +pub struct TestEnvironment { + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + api_server: StoppedUdpServer, +} + +pub struct Running { + api_server: RunningUdpServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + #[allow(dead_code)] + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment { + #[allow(dead_code)] + pub fn new_stopped() -> Self { + let udp_server = udp_server(); + + Self { + tracker: udp_server.tracker.clone(), + state: Stopped { api_server: udp_server }, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> TestEnvironment { + TestEnvironment { + tracker: self.tracker, + state: Running { + api_server: self.state.api_server.start().await.unwrap(), + }, + } + } +} + +impl TestEnvironment { + pub async fn new_running() -> Self { + let udp_server = running_udp_server().await; + + Self { + tracker: udp_server.tracker.clone(), + state: Running { api_server: udp_server }, + } + } + + #[allow(dead_code)] + pub async fn stop(self) -> TestEnvironment { + TestEnvironment { + tracker: self.tracker, + state: Stopped { + api_server: self.state.api_server.stop().await.unwrap(), + }, + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.state.api_server.state.bind_address + } +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment() -> RunningTestEnvironment { + TestEnvironment::new_running().await +} + +// TODO: Move to test-helpers crate once `Tracker` is isolated. +pub fn tracker_instance(configuration: &Arc) -> Arc { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + tracker +} + +pub fn udp_server() -> StoppedUdpServer { + let config = tracker_configuration(); + + let tracker = tracker_instance(&config); + + UdpServer::new(config.udp_trackers[0].clone(), tracker) +} + +pub async fn running_udp_server() -> RunningUdpServer { + udp_server().start().await.unwrap() +} diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs index 0287d01b7..b7cc3bd6f 100644 --- a/tests/udp_tracker.rs +++ b/tests/udp_tracker.rs @@ -19,8 +19,8 @@ mod udp_tracker_server { use torrust_tracker::udp::MAX_PACKET_SIZE; use crate::udp::asserts::is_error_response; - use crate::udp::client::{new_udp_client_connected, Client}; - use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp::client::{new_udp_client_connected, UdpTrackerClient}; + use crate::udp::test_environment::running_test_environment; fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { [0; MAX_PACKET_SIZE] @@ -30,7 +30,7 @@ mod udp_tracker_server { [0; MAX_PACKET_SIZE] } - async fn send_connection_request(transaction_id: TransactionId, client: &Client) -> ConnectionId { + async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { let connect_request = ConnectRequest { transaction_id }; client.send(connect_request.into()).await; @@ -45,11 +45,9 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let configuration = tracker_configuration(); + let test_env = running_test_environment().await; - let udp_server = start_udp_tracker(&configuration); - - let client = new_udp_client_connected(&udp_server.bind_address.unwrap()).await; + let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; client.send(&empty_udp_request()).await; @@ -65,15 +63,13 @@ mod udp_tracker_server { use crate::udp::asserts::is_connect_response; use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp::test_environment::running_test_environment; #[tokio::test] async fn should_return_a_connect_response() { - let configuration = tracker_configuration(); - - let udp_server = start_udp_tracker(&configuration); + let test_env = running_test_environment().await; - let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; let connect_request = ConnectRequest { transaction_id: TransactionId(123), @@ -97,16 +93,14 @@ mod udp_tracker_server { use crate::udp::asserts::is_ipv4_announce_response; use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp::test_environment::running_test_environment; use crate::udp_tracker_server::send_connection_request; #[tokio::test] async fn should_return_an_announce_response() { - let configuration = tracker_configuration(); + let test_env = running_test_environment().await; - let udp_server = start_udp_tracker(&configuration); - - let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; let connection_id = send_connection_request(TransactionId(123), &client).await; @@ -140,16 +134,14 @@ mod udp_tracker_server { use crate::udp::asserts::is_scrape_response; use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp::test_environment::running_test_environment; use crate::udp_tracker_server::send_connection_request; #[tokio::test] async fn should_return_a_scrape_response() { - let configuration = tracker_configuration(); - - let udp_server = start_udp_tracker(&configuration); + let test_env = running_test_environment().await; - let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; let connection_id = send_connection_request(TransactionId(123), &client).await; From a72c12349b852734b8f81478381cf2b50037aeac Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 23 Feb 2023 16:43:08 +0100 Subject: [PATCH 0447/1003] refactor: removed duplicate ephemeral configuration fn --- packages/configuration/src/lib.rs | 39 ---------------------- packages/test-helpers/src/configuration.rs | 25 ++++++++++---- tests/http/server.rs | 17 +++++----- 3 files changed, 27 insertions(+), 54 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 2121752c5..1003a92db 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -101,45 +101,6 @@ impl From for Error { } } -/// This configuration is used for testing. It generates random config values so they do not collide -/// if you run more than one tracker at the same time. -/// -/// # Panics -/// -/// Will panic if it can't convert the temp file path to string -#[must_use] -pub fn ephemeral_configuration() -> Configuration { - // todo: disable services that are not needed. - // For example: a test for the UDP tracker should disable the API and HTTP tracker. - - let mut config = Configuration { - log_level: Some("off".to_owned()), // Change to `debug` for tests debugging - ..Default::default() - }; - - // Ephemeral socket address for API - let api_port = 0u16; - config.http_api.enabled = true; - config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); - - // Ephemeral socket address for UDP tracker - let udp_port = 0u16; - config.udp_trackers[0].enabled = true; - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); - - // Ephemeral socket address for HTTP tracker - let http_port = 0u16; - config.http_trackers[0].enabled = true; - config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); - - // Ephemeral sqlite database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}_{}_{}.db", &api_port, &udp_port, &http_port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - config -} - impl Default for Configuration { fn default() -> Self { let mut configuration = Configuration { diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index f7c584d55..a978a050b 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -5,29 +5,40 @@ use torrust_tracker_configuration::Configuration; use crate::random; /// This configuration is used for testing. It generates random config values so they do not collide -/// if you run more than one tracker at the same time. +/// if you run more than one tracker at the same time. /// /// # Panics /// /// Will panic if it can't convert the temp file path to string #[must_use] pub fn ephemeral() -> Configuration { + // todo: disable services that are not needed. + // For example: a test for the UDP tracker should disable the API and HTTP tracker. + let mut config = Configuration { - log_level: Some("off".to_owned()), + log_level: Some("off".to_owned()), // Change to `debug` for tests debugging ..Default::default() }; - // Ephemeral socket addresses - let bind_addr = "127.0.0.1:0".to_string(); + // Ephemeral socket address for API + let api_port = 0u16; + config.http_api.enabled = true; + config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + + // Ephemeral socket address for UDP tracker + let udp_port = 0u16; + config.udp_trackers[0].enabled = true; + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); - config.http_api.bind_address = bind_addr.to_string(); - config.udp_trackers[0].bind_address = bind_addr; + // Ephemeral socket address for HTTP tracker + let http_port = 0u16; + config.http_trackers[0].enabled = true; + config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); // Ephemeral sqlite database let temp_directory = env::temp_dir(); let random_db_id = random::string(16); let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); - config.db_path = temp_file.to_str().unwrap().to_owned(); config diff --git a/tests/http/server.rs b/tests/http/server.rs index 147ad93c1..920c01f07 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -8,28 +8,29 @@ use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; -use torrust_tracker_configuration::{ephemeral_configuration, Configuration}; +use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; +use torrust_tracker_test_helpers::configuration::ephemeral; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings pub async fn start_public_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.mode = TrackerMode::Public; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings pub async fn start_whitelisted_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.mode = TrackerMode::Listed; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings pub async fn start_private_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.mode = TrackerMode::Private; start_custom_http_tracker(Arc::new(configuration), version).await } @@ -42,7 +43,7 @@ pub async fn start_private_http_tracker(version: Version) -> Server { /// bind_address = "[::]:7070" /// ``` pub async fn start_ipv6_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); // Change socket address to "wildcard address" (unspecified address which means any IP address) // but keeping the random port generated with the ephemeral configuration. @@ -60,7 +61,7 @@ pub async fn start_ipv6_http_tracker(version: Version) -> Server { /// external_ip = "2.137.87.41" /// ``` pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.external_ip = Some(external_ip.to_string()); start_custom_http_tracker(Arc::new(configuration), version).await } @@ -72,7 +73,7 @@ pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: /// on_reverse_proxy = true /// ``` pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.on_reverse_proxy = true; start_custom_http_tracker(Arc::new(configuration), version).await } @@ -83,7 +84,7 @@ pub async fn start_default_http_tracker(version: Version) -> Server { } pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + Arc::new(ephemeral()) } pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { From 504bad32eb7ff7d0aca7fafdc2fb18ab3020f005 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Mar 2023 16:22:06 +0100 Subject: [PATCH 0448/1003] refactor: abstract away http server implementations --- packages/configuration/src/lib.rs | 2 +- src/http/axum_implementation/server.rs | 114 +++++++++++++++++++++++-- src/http/mod.rs | 1 + src/http/tracker_interface.rs | 100 ++++++++++++++++++++++ src/http/warp_implementation/server.rs | 70 +++++++++++++++ tests/api/test_environment.rs | 30 +------ tests/common/mod.rs | 1 + tests/common/tracker.rs | 34 ++++++++ tests/http/mod.rs | 1 + tests/http/test_environment.rs | 102 ++++++++++++++++++++++ tests/http_tracker.rs | 24 ++++++ tests/udp/test_environment.rs | 43 ++-------- 12 files changed, 452 insertions(+), 70 deletions(-) create mode 100644 src/http/tracker_interface.rs create mode 100644 tests/common/tracker.rs create mode 100644 tests/http/test_environment.rs diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 1003a92db..d42c82df9 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -21,7 +21,7 @@ pub struct UdpTracker { } #[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpTracker { pub enabled: bool, pub bind_address: String, diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/server.rs index 30c580af6..f2a7371be 100644 --- a/src/http/axum_implementation/server.rs +++ b/src/http/axum_implementation/server.rs @@ -1,19 +1,123 @@ +use std::future::Future; use std::net::SocketAddr; +use std::str::FromStr; use std::sync::Arc; +use async_trait::async_trait; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; -use futures::Future; +use futures::future::BoxFuture; use log::info; use warp::hyper; use super::routes::router; +use crate::http::tracker_interface::TrackerInterfaceTrait; use crate::tracker::Tracker; -pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { +#[derive(Debug)] +pub enum Error { + Error(String), +} + +pub struct Server; + +impl Server { + pub fn start_from_tcp_listener_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(&tracker); + + Box::pin(async { + axum::Server::from_tcp(tcp_listener) + .expect("Could not bind to tcp listener.") + .serve(app.into_make_service()) + .with_graceful_shutdown(shutdown_signal) + .await + .expect("Axum server crashed."); + }) + } + + pub fn start_tls_from_tcp_listener_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + (ssl_cert_path, ssl_key_path): (String, String), + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(&tracker); + + let handle = Handle::new(); + + let cloned_handle = handle.clone(); + + tokio::task::spawn_local(async move { + shutdown_signal.await; + cloned_handle.shutdown(); + }); + + Box::pin(async { + let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) + .await + .expect("Could not read tls cert."); + + axum_server::from_tcp_rustls(tcp_listener, tls_config) + .handle(handle) + .serve(app.into_make_service()) + .await + .expect("Axum server crashed."); + }) + } +} + +#[async_trait] +impl TrackerInterfaceTrait for Server { + fn new() -> Self { + Self {} + } + + fn start_with_graceful_shutdown( + &self, + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); + let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); + let bind_addr = tcp_listener + .local_addr() + .expect("Could not get local_addr from tcp_listener."); + + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { + let server = Self::start_tls_from_tcp_listener_with_graceful_shutdown( + tcp_listener, + (ssl_cert_path.to_string(), ssl_key_path.to_string()), + tracker, + shutdown_signal, + ); + + (bind_addr, server) + } else { + let server = Self::start_from_tcp_listener_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); + + (bind_addr, server) + } + } +} + +pub fn start(socket_addr: std::net::SocketAddr, tracker: &Arc) -> impl Future> { let app = router(tracker); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); server.with_graceful_shutdown(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); @@ -22,7 +126,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future, ) -> impl Future> { @@ -39,5 +143,5 @@ pub fn start_tls( axum_server::bind_rustls(socket_addr, ssl_config) .handle(handle) - .serve(app.into_make_service_with_connect_info::()) + .serve(app.into_make_service_with_connect_info::()) } diff --git a/src/http/mod.rs b/src/http/mod.rs index 039a2067b..c2cbb43df 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -14,6 +14,7 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; pub mod percent_encoding; +pub mod tracker_interface; pub mod warp_implementation; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] diff --git a/src/http/tracker_interface.rs b/src/http/tracker_interface.rs new file mode 100644 index 000000000..033d5a75d --- /dev/null +++ b/src/http/tracker_interface.rs @@ -0,0 +1,100 @@ +use std::future::Future; +use std::net::SocketAddr; +use std::sync::Arc; + +use futures::future::BoxFuture; + +use crate::signals::shutdown_signal; +use crate::tracker::Tracker; + +/// Trait to be implemented by a http interface for the tracker. +#[allow(clippy::module_name_repetitions)] +pub trait TrackerInterfaceTrait: Sync + Send { + fn new() -> Self; + + fn start_with_graceful_shutdown( + &self, + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static; +} + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedHttpServer = TrackerInterface>; +#[allow(clippy::module_name_repetitions)] +pub type RunningHttpServer = TrackerInterface>; + +pub struct TrackerInterface { + cfg: torrust_tracker_configuration::HttpTracker, + state: S, +} + +pub struct Stopped { + interface: I, +} + +pub struct Running { + bind_addr: SocketAddr, + task_killer: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle, +} + +impl TrackerInterface> { + pub fn new(cfg: torrust_tracker_configuration::HttpTracker, interface: I) -> Self { + Self { + cfg, + state: Stopped { interface }, + } + } + + pub async fn start(self, tracker: Arc) -> Result>, Error> { + let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); + let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + + let configuration = self.cfg.clone(); + let interface = self.state.interface; + + let task = tokio::spawn(async move { + let (bind_addr, server) = + interface.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); + + addr_sender.send(bind_addr).unwrap(); + + server.await; + + interface + }); + + let bind_address = addr_receiver.await.expect("Could not receive bind_address."); + + Ok(TrackerInterface { + cfg: self.cfg, + state: Running { + bind_addr: bind_address, + task_killer: shutdown_sender, + task, + }, + }) + } +} + +impl TrackerInterface> { + pub async fn stop(self) -> Result>, Error> { + self.state.task_killer.send(0).unwrap(); + + let interface = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; + + Ok(TrackerInterface { + cfg: self.cfg, + state: Stopped { interface }, + }) + } +} diff --git a/src/http/warp_implementation/server.rs b/src/http/warp_implementation/server.rs index 894d3e911..6b0665fce 100644 --- a/src/http/warp_implementation/server.rs +++ b/src/http/warp_implementation/server.rs @@ -1,8 +1,78 @@ +use std::future::Future; use std::net::SocketAddr; +use std::str::FromStr; use std::sync::Arc; +use futures::future::BoxFuture; + use super::routes; +use crate::http::tracker_interface::TrackerInterfaceTrait; use crate::tracker; +use crate::tracker::Tracker; + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +pub struct Server; + +impl Server { + pub fn start_with_graceful_shutdown( + addr: SocketAddr, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let (bind_addr, server) = warp::serve(routes::routes(tracker)).bind_with_graceful_shutdown(addr, shutdown_signal); + + (bind_addr, Box::pin(server)) + } + + pub fn start_tls_with_graceful_shutdown( + addr: SocketAddr, + (ssl_cert_path, ssl_key_path): (&str, &str), + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let (bind_addr, server) = warp::serve(routes::routes(tracker)) + .tls() + .cert_path(ssl_cert_path) + .key_path(ssl_key_path) + .bind_with_graceful_shutdown(addr, shutdown_signal); + + (bind_addr, Box::pin(server)) + } +} + +impl TrackerInterfaceTrait for Server { + fn new() -> Self { + Self {} + } + + fn start_with_graceful_shutdown( + &self, + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); + + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { + Self::start_tls_with_graceful_shutdown(addr, (ssl_cert_path, ssl_key_path), tracker, shutdown_signal) + } else { + Self::start_with_graceful_shutdown(addr, tracker, shutdown_signal) + } + } +} /// Server that listens on HTTP, needs a `tracker::TorrentTracker` #[derive(Clone)] diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index ff143ec7a..78ff7d259 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -11,6 +11,7 @@ use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration::ephemeral; use super::connection_info::ConnectionInfo; +use crate::common::tracker::{tracker_configuration, tracker_instance}; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -92,35 +93,6 @@ pub fn running_test_environment() -> RunningTestEnvironment { TestEnvironment::new_running() } -pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) -} - -// TODO: Move to test-helpers crate once `Tracker` is isolated. -pub fn tracker_instance(configuration: &Arc) -> Arc { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - tracker -} - pub fn api_server() -> StoppedApiServer { let config = tracker_configuration(); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b57996292..9452cc111 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,3 +1,4 @@ pub mod fixtures; pub mod http; +pub mod tracker; pub mod udp; diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs new file mode 100644 index 000000000..c0e44749b --- /dev/null +++ b/tests/common/tracker.rs @@ -0,0 +1,34 @@ +use std::sync::Arc; + +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::Tracker; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; + +pub fn tracker_configuration() -> Arc { + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) +} + +// TODO: Move to test-helpers crate once `Tracker` is isolated. +pub fn tracker_instance(configuration: &Arc) -> Arc { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + tracker +} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 40616025b..a335723e3 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -5,6 +5,7 @@ pub mod connection_info; pub mod requests; pub mod responses; pub mod server; +pub mod test_environment; use percent_encoding::NON_ALPHANUMERIC; diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs new file mode 100644 index 000000000..acf0224ef --- /dev/null +++ b/tests/http/test_environment.rs @@ -0,0 +1,102 @@ +use core::panic; +use std::sync::Arc; + +use torrust_tracker::http::tracker_interface::{RunningHttpServer, StoppedHttpServer, TrackerInterface, TrackerInterfaceTrait}; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::Tracker; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_test_helpers::configuration::ephemeral; + +use crate::common::tracker::{tracker_configuration, tracker_instance}; + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment>; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment>; + +pub struct TestEnvironment { + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + http_server: StoppedHttpServer, +} + +pub struct Running { + http_server: RunningHttpServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment> { + #[allow(dead_code)] + pub fn new_stopped() -> Self { + let cfg = tracker_configuration(); + + let tracker = tracker_instance(&cfg); + + let http_server = stopped_http_server(cfg.http_trackers[0].clone()); + + Self { + tracker, + state: Stopped { http_server }, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> TestEnvironment> { + TestEnvironment { + tracker: self.tracker.clone(), + state: Running { + http_server: self.state.http_server.start(self.tracker).await.unwrap(), + }, + } + } +} + +impl TestEnvironment> { + pub async fn new_running() -> Self { + let test_env = StoppedTestEnvironment::new_stopped(); + + test_env.start().await + } + + pub async fn stop(self) -> TestEnvironment> { + TestEnvironment { + tracker: self.tracker, + state: Stopped { + http_server: self.state.http_server.stop().await.unwrap(), + }, + } + } +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment() -> RunningTestEnvironment { + TestEnvironment::new_running().await +} + +pub fn stopped_http_server( + cfg: torrust_tracker_configuration::HttpTracker, +) -> StoppedHttpServer { + let http_server = I::new(); + + TrackerInterface::new(cfg, http_server) +} + +pub async fn running_http_server( + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, +) -> RunningHttpServer { + stopped_http_server(cfg).start(tracker).await.unwrap() +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 4219be30a..fd9adee34 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -12,6 +12,30 @@ mod common; mod http; +pub type Axum = torrust_tracker::http::axum_implementation::server::Server; +pub type Warp = torrust_tracker::http::warp_implementation::server::Server; + +mod http_tracker_test_environment { + use torrust_tracker::http; + + use crate::http::test_environment::running_test_environment; + use crate::{Axum, Warp}; + + #[tokio::test] + async fn should_be_able_to_start_and_stop_a_test_environment_using_axum() { + let test_env = running_test_environment::().await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_be_able_to_start_and_stop_a_test_environment_using_warp() { + let test_env = running_test_environment::().await; + + test_env.stop().await; + } +} + mod warp_http_tracker_server { mod for_all_config_modes { diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index e53a7a580..dffe458dd 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -10,9 +10,7 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration::ephemeral; -fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) -} +use crate::common::tracker::{tracker_configuration, tracker_instance}; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -26,11 +24,11 @@ pub struct TestEnvironment { #[allow(dead_code)] pub struct Stopped { - api_server: StoppedUdpServer, + udp_server: StoppedUdpServer, } pub struct Running { - api_server: RunningUdpServer, + udp_server: RunningUdpServer, } impl TestEnvironment { @@ -48,7 +46,7 @@ impl TestEnvironment { Self { tracker: udp_server.tracker.clone(), - state: Stopped { api_server: udp_server }, + state: Stopped { udp_server: udp_server }, } } @@ -57,7 +55,7 @@ impl TestEnvironment { TestEnvironment { tracker: self.tracker, state: Running { - api_server: self.state.api_server.start().await.unwrap(), + udp_server: self.state.udp_server.start().await.unwrap(), }, } } @@ -69,7 +67,7 @@ impl TestEnvironment { Self { tracker: udp_server.tracker.clone(), - state: Running { api_server: udp_server }, + state: Running { udp_server: udp_server }, } } @@ -78,13 +76,13 @@ impl TestEnvironment { TestEnvironment { tracker: self.tracker, state: Stopped { - api_server: self.state.api_server.stop().await.unwrap(), + udp_server: self.state.udp_server.stop().await.unwrap(), }, } } pub fn bind_address(&self) -> SocketAddr { - self.state.api_server.state.bind_address + self.state.udp_server.state.bind_address } } @@ -93,31 +91,6 @@ pub async fn running_test_environment() -> RunningTestEnvironment { TestEnvironment::new_running().await } -// TODO: Move to test-helpers crate once `Tracker` is isolated. -pub fn tracker_instance(configuration: &Arc) -> Arc { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - tracker -} - pub fn udp_server() -> StoppedUdpServer { let config = tracker_configuration(); From 191fbac1353cb64bb39b5bb7239334410ba4d6e0 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Mar 2023 16:43:18 +0100 Subject: [PATCH 0449/1003] refactor: moved signals mod to own file --- src/lib.rs | 45 +-------------------------------------------- src/signals.rs | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 44 deletions(-) create mode 100644 src/signals.rs diff --git a/src/lib.rs b/src/lib.rs index f80bcfb6c..f01ff0468 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,6 +5,7 @@ pub mod jobs; pub mod logging; pub mod protocol; pub mod setup; +pub mod signals; pub mod stats; pub mod tracker; pub mod udp; @@ -30,47 +31,3 @@ pub mod ephemeral_instance_keys { pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); } } - -pub mod signals { - use log::info; - - /// Resolves on `ctrl_c` or the `terminate` signal. - pub async fn global_shutdown_signal() { - let ctrl_c = async { - tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); - }; - - #[cfg(unix)] - let terminate = async { - tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - }; - - #[cfg(not(unix))] - let terminate = std::future::pending::<()>(); - - tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {} - } - } - - /// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. - pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { - let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; - - tokio::select! { - _ = stop => {}, - _ = global_shutdown_signal() => {} - } - } - - /// Same as `shutdown_signal()`, but shows a message when it resolves. - pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { - shutdown_signal(stop_receiver).await; - - info!("{message}"); - } -} diff --git a/src/signals.rs b/src/signals.rs new file mode 100644 index 000000000..b5a25ded7 --- /dev/null +++ b/src/signals.rs @@ -0,0 +1,41 @@ +use log::info; + +/// Resolves on `ctrl_c` or the `terminate` signal. +pub async fn global_shutdown_signal() { + let ctrl_c = async { + tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {} + } +} + +/// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. +pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { + let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; + + tokio::select! { + _ = stop => {}, + _ = global_shutdown_signal() => {} + } +} + +/// Same as `shutdown_signal()`, but shows a message when it resolves. +pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal(stop_receiver).await; + + info!("{message}"); +} From f40e43f8cb0433aa81e6e27cf056d4df5c4fe39d Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Mar 2023 17:16:38 +0100 Subject: [PATCH 0450/1003] refactor: prepend paths to ephemeral calls --- src/apis/server.rs | 3 +-- src/tracker/services/statistics.rs | 3 +-- src/tracker/services/torrent.rs | 6 ++---- src/udp/handlers.rs | 3 +-- tests/api/test_environment.rs | 5 ----- tests/http/server.rs | 15 +++++++-------- tests/http/test_environment.rs | 5 ----- tests/http_tracker.rs | 2 -- tests/udp/test_environment.rs | 4 ---- 9 files changed, 12 insertions(+), 34 deletions(-) diff --git a/src/apis/server.rs b/src/apis/server.rs index 5ec22f253..4594456fb 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -188,14 +188,13 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::apis::server::ApiServer; use crate::tracker; use crate::tracker::statistics; fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } #[tokio::test] diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index c0aaf9c64..94a9b1bd5 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -37,14 +37,13 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::tracker; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } #[tokio::test] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index ce652a091..fc5686e23 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -138,7 +138,6 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; @@ -146,7 +145,7 @@ mod tests { use crate::tracker::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } #[tokio::test] @@ -193,7 +192,6 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; @@ -201,7 +199,7 @@ mod tests { use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } #[tokio::test] diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 4a0874c72..411590d2f 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -252,7 +252,6 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::protocol::clock::{Current, Time}; use crate::tracker::{self, peer, statistics}; @@ -262,7 +261,7 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - ephemeral() + torrust_tracker_test_helpers::configuration::ephemeral() } fn initialized_public_tracker() -> Arc { diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index 78ff7d259..4f119fd64 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -1,14 +1,9 @@ -use core::panic; use std::sync::Arc; use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::Tracker; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_test_helpers::configuration::ephemeral; use super::connection_info::ConnectionInfo; use crate::common::tracker::{tracker_configuration, tracker_instance}; diff --git a/tests/http/server.rs b/tests/http/server.rs index 920c01f07..4753ee4dc 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -10,27 +10,26 @@ use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; -use torrust_tracker_test_helpers::configuration::ephemeral; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings pub async fn start_public_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.mode = TrackerMode::Public; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings pub async fn start_whitelisted_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.mode = TrackerMode::Listed; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings pub async fn start_private_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.mode = TrackerMode::Private; start_custom_http_tracker(Arc::new(configuration), version).await } @@ -43,7 +42,7 @@ pub async fn start_private_http_tracker(version: Version) -> Server { /// bind_address = "[::]:7070" /// ``` pub async fn start_ipv6_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); // Change socket address to "wildcard address" (unspecified address which means any IP address) // but keeping the random port generated with the ephemeral configuration. @@ -61,7 +60,7 @@ pub async fn start_ipv6_http_tracker(version: Version) -> Server { /// external_ip = "2.137.87.41" /// ``` pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.external_ip = Some(external_ip.to_string()); start_custom_http_tracker(Arc::new(configuration), version).await } @@ -73,7 +72,7 @@ pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: /// on_reverse_proxy = true /// ``` pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.on_reverse_proxy = true; start_custom_http_tracker(Arc::new(configuration), version).await } @@ -84,7 +83,7 @@ pub async fn start_default_http_tracker(version: Version) -> Server { } pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index acf0224ef..f6770b2ad 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -1,14 +1,9 @@ -use core::panic; use std::sync::Arc; use torrust_tracker::http::tracker_interface::{RunningHttpServer, StoppedHttpServer, TrackerInterface, TrackerInterfaceTrait}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::Tracker; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_test_helpers::configuration::ephemeral; use crate::common::tracker::{tracker_configuration, tracker_instance}; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index fd9adee34..c0d0bdd23 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -16,8 +16,6 @@ pub type Axum = torrust_tracker::http::axum_implementation::server::Server; pub type Warp = torrust_tracker::http::warp_implementation::server::Server; mod http_tracker_test_environment { - use torrust_tracker::http; - use crate::http::test_environment::running_test_environment; use crate::{Axum, Warp}; diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index dffe458dd..585a8f934 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -3,12 +3,8 @@ use std::sync::Arc; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::Tracker; use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_test_helpers::configuration::ephemeral; use crate::common::tracker::{tracker_configuration, tracker_instance}; From 4f2b035a36901fa3f8ec52537923bb0739080e19 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Mar 2023 14:45:07 +0100 Subject: [PATCH 0451/1003] refactor: renamed `TrackerInterface` struct and relevant trait --- src/http/axum_implementation/server.rs | 4 +-- src/http/tracker_interface.rs | 42 +++++++++++++------------- src/http/warp_implementation/server.rs | 4 +-- tests/http/test_environment.rs | 23 ++++++++------ 4 files changed, 39 insertions(+), 34 deletions(-) diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/server.rs index f2a7371be..a12d60332 100644 --- a/src/http/axum_implementation/server.rs +++ b/src/http/axum_implementation/server.rs @@ -11,7 +11,7 @@ use log::info; use warp::hyper; use super::routes::router; -use crate::http::tracker_interface::TrackerInterfaceTrait; +use crate::http::tracker_interface::HttpServerLauncher; use crate::tracker::Tracker; #[derive(Debug)] @@ -77,7 +77,7 @@ impl Server { } #[async_trait] -impl TrackerInterfaceTrait for Server { +impl HttpServerLauncher for Server { fn new() -> Self { Self {} } diff --git a/src/http/tracker_interface.rs b/src/http/tracker_interface.rs index 033d5a75d..a8bb057d7 100644 --- a/src/http/tracker_interface.rs +++ b/src/http/tracker_interface.rs @@ -7,9 +7,9 @@ use futures::future::BoxFuture; use crate::signals::shutdown_signal; use crate::tracker::Tracker; -/// Trait to be implemented by a http interface for the tracker. +/// Trait to be implemented by a http server launcher for the tracker. #[allow(clippy::module_name_repetitions)] -pub trait TrackerInterfaceTrait: Sync + Send { +pub trait HttpServerLauncher: Sync + Send { fn new() -> Self; fn start_with_graceful_shutdown( @@ -28,54 +28,54 @@ pub enum Error { } #[allow(clippy::module_name_repetitions)] -pub type StoppedHttpServer = TrackerInterface>; +pub type StoppedHttpServer = HttpServer>; #[allow(clippy::module_name_repetitions)] -pub type RunningHttpServer = TrackerInterface>; +pub type RunningHttpServer = HttpServer>; -pub struct TrackerInterface { +pub struct HttpServer { cfg: torrust_tracker_configuration::HttpTracker, state: S, } -pub struct Stopped { - interface: I, +pub struct Stopped { + launcher: I, } -pub struct Running { +pub struct Running { bind_addr: SocketAddr, task_killer: tokio::sync::oneshot::Sender, task: tokio::task::JoinHandle, } -impl TrackerInterface> { - pub fn new(cfg: torrust_tracker_configuration::HttpTracker, interface: I) -> Self { +impl HttpServer> { + pub fn new(cfg: torrust_tracker_configuration::HttpTracker, launcher: I) -> Self { Self { cfg, - state: Stopped { interface }, + state: Stopped { launcher }, } } - pub async fn start(self, tracker: Arc) -> Result>, Error> { + pub async fn start(self, tracker: Arc) -> Result>, Error> { let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); let configuration = self.cfg.clone(); - let interface = self.state.interface; + let launcher = self.state.launcher; let task = tokio::spawn(async move { let (bind_addr, server) = - interface.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); + launcher.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); addr_sender.send(bind_addr).unwrap(); server.await; - interface + launcher }); let bind_address = addr_receiver.await.expect("Could not receive bind_address."); - Ok(TrackerInterface { + Ok(HttpServer { cfg: self.cfg, state: Running { bind_addr: bind_address, @@ -86,15 +86,15 @@ impl TrackerInterface> { } } -impl TrackerInterface> { - pub async fn stop(self) -> Result>, Error> { +impl HttpServer> { + pub async fn stop(self) -> Result>, Error> { self.state.task_killer.send(0).unwrap(); - let interface = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; + let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; - Ok(TrackerInterface { + Ok(HttpServer { cfg: self.cfg, - state: Stopped { interface }, + state: Stopped { launcher }, }) } } diff --git a/src/http/warp_implementation/server.rs b/src/http/warp_implementation/server.rs index 6b0665fce..8d01559f3 100644 --- a/src/http/warp_implementation/server.rs +++ b/src/http/warp_implementation/server.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use futures::future::BoxFuture; use super::routes; -use crate::http::tracker_interface::TrackerInterfaceTrait; +use crate::http::tracker_interface::HttpServerLauncher; use crate::tracker; use crate::tracker::Tracker; @@ -50,7 +50,7 @@ impl Server { } } -impl TrackerInterfaceTrait for Server { +impl HttpServerLauncher for Server { fn new() -> Self { Self {} } diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index f6770b2ad..87232f79b 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker::http::tracker_interface::{RunningHttpServer, StoppedHttpServer, TrackerInterface, TrackerInterfaceTrait}; +use torrust_tracker::http::tracker_interface::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; @@ -18,11 +18,11 @@ pub struct TestEnvironment { } #[allow(dead_code)] -pub struct Stopped { +pub struct Stopped { http_server: StoppedHttpServer, } -pub struct Running { +pub struct Running { http_server: RunningHttpServer, } @@ -33,7 +33,7 @@ impl TestEnvironment { } } -impl TestEnvironment> { +impl TestEnvironment> { #[allow(dead_code)] pub fn new_stopped() -> Self { let cfg = tracker_configuration(); @@ -59,7 +59,7 @@ impl TestEnvironment> { } } -impl TestEnvironment> { +impl TestEnvironment> { pub async fn new_running() -> Self { let test_env = StoppedTestEnvironment::new_stopped(); @@ -77,19 +77,24 @@ impl TestEnvironment> { } #[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment() -> RunningTestEnvironment { +pub async fn stopped_test_environment() -> StoppedTestEnvironment { + TestEnvironment::new_stopped().await +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment() -> RunningTestEnvironment { TestEnvironment::new_running().await } -pub fn stopped_http_server( +pub fn stopped_http_server( cfg: torrust_tracker_configuration::HttpTracker, ) -> StoppedHttpServer { let http_server = I::new(); - TrackerInterface::new(cfg, http_server) + HttpServer::new(cfg, http_server) } -pub async fn running_http_server( +pub async fn running_http_server( cfg: torrust_tracker_configuration::HttpTracker, tracker: Arc, ) -> RunningHttpServer { From d914e5c0fe263b433550d7d1d75ed79d26afb843 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Mar 2023 20:07:16 +0100 Subject: [PATCH 0452/1003] refactor: replaced test http servers with the new test environments --- Cargo.lock | 1 + packages/test-helpers/Cargo.toml | 1 + packages/test-helpers/src/configuration.rs | 69 ++ src/http/tracker_interface.rs | 6 +- tests/api/test_environment.rs | 6 +- tests/common/http.rs | 5 - tests/common/tracker.rs | 10 +- tests/http/client.rs | 17 +- tests/http/mod.rs | 2 - tests/http/server.rs | 137 ---- tests/http/test_environment.rs | 44 +- tests/http_tracker.rs | 754 +++++++++++---------- tests/udp/test_environment.rs | 6 +- 13 files changed, 532 insertions(+), 526 deletions(-) delete mode 100644 tests/http/server.rs diff --git a/Cargo.lock b/Cargo.lock index 9045b7c47..ce179501f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3019,6 +3019,7 @@ dependencies = [ "rand", "tokio", "torrust-tracker-configuration", + "torrust-tracker-primitives", ] [[package]] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 5be0e8aba..2f942bac7 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -9,3 +9,4 @@ tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", lazy_static = "1.4" rand = "0.8.5" torrust-tracker-configuration = { path = "../configuration"} +torrust-tracker-primitives = { path = "../primitives"} diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index a978a050b..ec29fdbe1 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -1,6 +1,8 @@ use std::env; +use std::net::IpAddr; use torrust_tracker_configuration::Configuration; +use torrust_tracker_primitives::TrackerMode; use crate::random; @@ -43,3 +45,70 @@ pub fn ephemeral() -> Configuration { config } + +#[must_use] +pub fn ephemeral_with_reverse_proxy() -> Configuration { + let mut cfg = ephemeral(); + + cfg.on_reverse_proxy = true; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_public() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Public; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_private() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Private; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_whitelisted() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Listed; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_private_whitelisted() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::PrivateListed; + + cfg +} + +#[must_use] +pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { + let mut cfg = ephemeral(); + + cfg.external_ip = Some(ip.to_string()); + + cfg +} + +#[must_use] +pub fn ephemeral_ipv6() -> Configuration { + let mut cfg = ephemeral(); + + let ipv6 = format!("[::]:{}", 0); + + cfg.http_api.bind_address = ipv6.clone(); + cfg.http_trackers[0].bind_address = ipv6.clone(); + cfg.udp_trackers[0].bind_address = ipv6; + + cfg +} diff --git a/src/http/tracker_interface.rs b/src/http/tracker_interface.rs index a8bb057d7..fc4ba9c95 100644 --- a/src/http/tracker_interface.rs +++ b/src/http/tracker_interface.rs @@ -33,8 +33,8 @@ pub type StoppedHttpServer = HttpServer>; pub type RunningHttpServer = HttpServer>; pub struct HttpServer { - cfg: torrust_tracker_configuration::HttpTracker, - state: S, + pub cfg: torrust_tracker_configuration::HttpTracker, + pub state: S, } pub struct Stopped { @@ -42,7 +42,7 @@ pub struct Stopped { } pub struct Running { - bind_addr: SocketAddr, + pub bind_addr: SocketAddr, task_killer: tokio::sync::oneshot::Sender, task: tokio::task::JoinHandle, } diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index 4f119fd64..1565530c1 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use super::connection_info::ConnectionInfo; -use crate::common::tracker::{tracker_configuration, tracker_instance}; +use crate::common::tracker::new_tracker; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -89,9 +89,9 @@ pub fn running_test_environment() -> RunningTestEnvironment { } pub fn api_server() -> StoppedApiServer { - let config = tracker_configuration(); + let config = Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()); - let tracker = tracker_instance(&config); + let tracker = new_tracker(config.clone()); ApiServer::new(config.http_api.clone(), tracker) } diff --git a/tests/common/http.rs b/tests/common/http.rs index 902752674..d682027fd 100644 --- a/tests/common/http.rs +++ b/tests/common/http.rs @@ -1,11 +1,6 @@ pub type ReqwestQuery = Vec; pub type ReqwestQueryParam = (String, String); -#[derive(Clone, Debug)] -pub struct ConnectionInfo { - pub bind_address: String, -} - /// URL Query component #[derive(Default, Debug)] pub struct Query { diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index c0e44749b..7451bbc36 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -4,12 +4,8 @@ use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::Tracker; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; -pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) -} - // TODO: Move to test-helpers crate once `Tracker` is isolated. -pub fn tracker_instance(configuration: &Arc) -> Arc { +pub fn new_tracker(configuration: Arc) -> Arc { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -20,7 +16,7 @@ pub fn tracker_instance(configuration: &Arc Arc::new(tracker), Err(error) => { panic!("{}", error) @@ -28,7 +24,7 @@ pub fn tracker_instance(configuration: &Arc, } @@ -23,26 +22,26 @@ pub struct Client { /// base url path query /// ``` impl Client { - pub fn new(connection_info: ConnectionInfo) -> Self { + pub fn new(server_addr: std::net::SocketAddr) -> Self { Self { - connection_info, + server_addr, reqwest_client: reqwest::Client::builder().build().unwrap(), key: None, } } /// Creates the new client binding it to an specific local address - pub fn bind(connection_info: ConnectionInfo, local_address: IpAddr) -> Self { + pub fn bind(server_addr: std::net::SocketAddr, local_address: IpAddr) -> Self { Self { - connection_info, + server_addr, reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), key: None, } } - pub fn authenticated(connection_info: ConnectionInfo, key: Key) -> Self { + pub fn authenticated(server_addr: std::net::SocketAddr, key: Key) -> Self { Self { - connection_info, + server_addr, reqwest_client: reqwest::Client::builder().build().unwrap(), key: Some(key), } @@ -95,6 +94,6 @@ impl Client { } fn base_url(&self) -> String { - format!("http://{}/", &self.connection_info.bind_address) + format!("http://{}/", &self.server_addr) } } diff --git a/tests/http/mod.rs b/tests/http/mod.rs index a335723e3..771145f46 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,10 +1,8 @@ pub mod asserts; pub mod asserts_warp; pub mod client; -pub mod connection_info; pub mod requests; pub mod responses; -pub mod server; pub mod test_environment; use percent_encoding::NON_ALPHANUMERIC; diff --git a/tests/http/server.rs b/tests/http/server.rs deleted file mode 100644 index 4753ee4dc..000000000 --- a/tests/http/server.rs +++ /dev/null @@ -1,137 +0,0 @@ -use core::panic; -use std::net::{IpAddr, SocketAddr}; -use std::sync::Arc; - -use torrust_tracker::http::Version; -use torrust_tracker::jobs::http_tracker; -use torrust_tracker::protocol::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_primitives::TrackerMode; - -use super::connection_info::ConnectionInfo; - -/// Starts a HTTP tracker with mode "public" in settings -pub async fn start_public_http_tracker(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.mode = TrackerMode::Public; - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker with mode "listed" in settings -pub async fn start_whitelisted_http_tracker(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.mode = TrackerMode::Listed; - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker with mode "private" in settings -pub async fn start_private_http_tracker(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.mode = TrackerMode::Private; - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker with a wildcard IPV6 address. -/// The configuration in the `config.toml` file would be like this: -/// -/// ```text -/// [[http_trackers]] -/// bind_address = "[::]:7070" -/// ``` -pub async fn start_ipv6_http_tracker(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - - // Change socket address to "wildcard address" (unspecified address which means any IP address) - // but keeping the random port generated with the ephemeral configuration. - let socket_addr: SocketAddr = configuration.http_trackers[0].bind_address.parse().unwrap(); - let new_ipv6_socket_address = format!("[::]:{}", socket_addr.port()); - configuration.http_trackers[0].bind_address = new_ipv6_socket_address; - - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker with an specific `external_ip`. -/// The configuration in the `config.toml` file would be like this: -/// -/// ```text -/// external_ip = "2.137.87.41" -/// ``` -pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.external_ip = Some(external_ip.to_string()); - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker `on_reverse_proxy`. -/// The configuration in the `config.toml` file would be like this: -/// -/// ```text -/// on_reverse_proxy = true -/// ``` -pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.on_reverse_proxy = true; - start_custom_http_tracker(Arc::new(configuration), version).await -} - -pub async fn start_default_http_tracker(version: Version) -> Server { - let configuration = tracker_configuration(); - start_custom_http_tracker(configuration.clone(), version).await -} - -pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) -} - -pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { - let server = start(&configuration); - http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone(), version).await; - server -} - -fn start(configuration: &Arc) -> Server { - let connection_info = ConnectionInfo::anonymous(&configuration.http_trackers[0].bind_address.clone()); - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - Server { - tracker, - connection_info, - } -} - -pub struct Server { - pub tracker: Arc, - pub connection_info: ConnectionInfo, -} - -impl Server { - pub fn get_connection_info(&self) -> ConnectionInfo { - self.connection_info.clone() - } - - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index 87232f79b..a2cb4619c 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -5,7 +5,7 @@ use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::tracker::{tracker_configuration, tracker_instance}; +use crate::common::tracker::new_tracker; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment>; @@ -13,6 +13,7 @@ pub type StoppedTestEnvironment = TestEnvironment>; pub type RunningTestEnvironment = TestEnvironment>; pub struct TestEnvironment { + pub cfg: Arc, pub tracker: Arc, pub state: S, } @@ -35,14 +36,15 @@ impl TestEnvironment { impl TestEnvironment> { #[allow(dead_code)] - pub fn new_stopped() -> Self { - let cfg = tracker_configuration(); + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); - let tracker = tracker_instance(&cfg); + let tracker = new_tracker(cfg.clone()); let http_server = stopped_http_server(cfg.http_trackers[0].clone()); Self { + cfg, tracker, state: Stopped { http_server }, } @@ -51,39 +53,61 @@ impl TestEnvironment> { #[allow(dead_code)] pub async fn start(self) -> TestEnvironment> { TestEnvironment { + cfg: self.cfg, tracker: self.tracker.clone(), state: Running { http_server: self.state.http_server.start(self.tracker).await.unwrap(), }, } } + + pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { + &self.state.http_server.cfg + } + + pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { + &mut self.state.http_server.cfg + } } impl TestEnvironment> { - pub async fn new_running() -> Self { - let test_env = StoppedTestEnvironment::new_stopped(); + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + let test_env = StoppedTestEnvironment::new_stopped(cfg); test_env.start().await } pub async fn stop(self) -> TestEnvironment> { TestEnvironment { + cfg: self.cfg, tracker: self.tracker, state: Stopped { http_server: self.state.http_server.stop().await.unwrap(), }, } } + + pub fn bind_address(&self) -> &std::net::SocketAddr { + &self.state.http_server.state.bind_addr + } + + pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { + &self.state.http_server.cfg + } } #[allow(clippy::module_name_repetitions)] -pub async fn stopped_test_environment() -> StoppedTestEnvironment { - TestEnvironment::new_stopped().await +pub fn stopped_test_environment( + cfg: torrust_tracker_configuration::Configuration, +) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) } #[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment() -> RunningTestEnvironment { - TestEnvironment::new_running().await +pub async fn running_test_environment( + cfg: torrust_tracker_configuration::Configuration, +) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await } pub fn stopped_http_server( diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index c0d0bdd23..22a6c44ff 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2,12 +2,12 @@ /// /// Warp version: /// ```text -/// cargo test `warp_http_tracker_server` -- --nocapture +/// cargo test `warp_test_env` -- --nocapture /// ``` /// /// Axum version (WIP): /// ```text -/// cargo test `warp_http_tracker_server` -- --nocapture +/// cargo test `warp_test_env` -- --nocapture /// ``` mod common; mod http; @@ -15,31 +15,30 @@ mod http; pub type Axum = torrust_tracker::http::axum_implementation::server::Server; pub type Warp = torrust_tracker::http::warp_implementation::server::Server; -mod http_tracker_test_environment { +mod test_env_test_environment { use crate::http::test_environment::running_test_environment; use crate::{Axum, Warp}; #[tokio::test] async fn should_be_able_to_start_and_stop_a_test_environment_using_axum() { - let test_env = running_test_environment::().await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; test_env.stop().await; } #[tokio::test] async fn should_be_able_to_start_and_stop_a_test_environment_using_warp() { - let test_env = running_test_environment::().await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; test_env.stop().await; } } -mod warp_http_tracker_server { +mod warp_test_env { mod for_all_config_modes { mod running_on_reverse_proxy { - use torrust_tracker::http::Version; use crate::http::asserts::{ assert_could_not_find_remote_address_on_xff_header_error_response, @@ -47,18 +46,21 @@ mod warp_http_tracker_server { }; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_http_tracker_on_reverse_proxy; + use crate::http::test_environment::{running_test_environment, stopped_test_environment}; + use crate::Warp; #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let params = QueryBuilder::default().query().params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -67,11 +69,13 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let params = QueryBuilder::default().query().params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; @@ -97,7 +101,6 @@ mod warp_http_tracker_server { use local_ip_address::local_ip; use reqwest::Response; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -113,20 +116,18 @@ mod warp_http_tracker_server { use crate::http::responses; use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList}; use crate::http::responses::announce_warp::{WarpAnnounce, WarpDictionaryPeer}; - use crate::http::server::{ - start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, - start_ipv6_http_tracker, start_public_http_tracker, - }; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); params.remove_optional_params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -135,16 +136,16 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + let response = Client::new(test_env.bind_address().clone()).get("announce").await; assert_internal_server_error_response(response).await; } #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; // Without `info_hash` param @@ -152,7 +153,7 @@ mod warp_http_tracker_server { params.info_hash = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -164,7 +165,7 @@ mod warp_http_tracker_server { params.peer_id = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -176,7 +177,7 @@ mod warp_http_tracker_server { params.port = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -185,14 +186,14 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -207,13 +208,13 @@ mod warp_http_tracker_server { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -222,7 +223,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -231,7 +232,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -241,7 +242,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -250,7 +251,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -260,7 +261,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -276,7 +277,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -286,7 +287,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -295,7 +296,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -305,7 +306,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -314,7 +315,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -326,7 +327,7 @@ mod warp_http_tracker_server { async fn should_not_fail_when_the_event_param_is_invalid() { // All invalid values are ignored as if the `event` param were empty - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -343,7 +344,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -353,7 +354,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_not_fail_when_the_compact_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -362,7 +363,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -372,9 +373,10 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -387,8 +389,8 @@ mod warp_http_tracker_server { &Announce { complete: 1, // the peer for this test incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![], }, ) @@ -397,7 +399,8 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -407,12 +410,10 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -427,8 +428,8 @@ mod warp_http_tracker_server { &WarpAnnounce { complete: 2, incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![WarpDictionaryPeer::from(previously_announced_peer)], }, ) @@ -437,13 +438,14 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); // Add a peer - http_tracker_server.add_torrent_peer(&info_hash, &peer).await; + test_env.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -452,9 +454,7 @@ mod warp_http_tracker_server { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&announce_query) - .await; + let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; assert_empty_announce_response(response).await; } @@ -464,7 +464,8 @@ mod warp_http_tracker_server { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -474,12 +475,10 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -505,7 +504,8 @@ mod warp_http_tracker_server { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -515,14 +515,12 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -543,26 +541,28 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); } #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; - Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); } @@ -571,9 +571,10 @@ mod warp_http_tracker_server { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -581,33 +582,35 @@ mod warp_http_tracker_server { ) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); } #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); } #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; - Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); } @@ -616,9 +619,10 @@ mod warp_http_tracker_server { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -626,19 +630,20 @@ mod warp_http_tracker_server { ) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); } #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -647,7 +652,7 @@ mod warp_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -663,14 +668,17 @@ mod warp_http_tracker_server { 127.0.0.1 external_ip = "2.137.87.41" */ - let http_tracker_server = - start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -679,10 +687,10 @@ mod warp_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } @@ -695,17 +703,17 @@ mod warp_http_tracker_server { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let http_tracker_server = start_http_tracker_with_external_ip( - &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - Version::Warp, - ) - .await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -714,10 +722,10 @@ mod warp_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } @@ -730,11 +738,13 @@ mod warp_http_tracker_server { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(http_tracker_server.get_connection_info()); + let client = Client::new(test_env.bind_address().clone()); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -746,7 +756,7 @@ mod warp_http_tracker_server { ) .await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -766,7 +776,6 @@ mod warp_http_tracker_server { use std::net::IpAddr; use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -776,26 +785,29 @@ mod warp_http_tracker_server { use crate::http::requests; use crate::http::requests::scrape::QueryBuilder; use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_internal_server_error_response(response).await; } #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -806,11 +818,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -820,7 +833,7 @@ mod warp_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -844,11 +857,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -858,7 +872,7 @@ mod warp_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -882,11 +896,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -899,12 +914,13 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_accept_multiple_infohashes() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -923,11 +939,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(http_tracker.get_connection_info()) + Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -935,18 +952,19 @@ mod warp_http_tracker_server { ) .await; - let stats = http_tracker.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); } #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let http_tracker = start_ipv6_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -954,7 +972,7 @@ mod warp_http_tracker_server { ) .await; - let stats = http_tracker.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); } @@ -966,21 +984,23 @@ mod warp_http_tracker_server { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_whitelisted_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -990,17 +1010,19 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker_server + test_env .tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1011,7 +1033,6 @@ mod warp_http_tracker_server { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -1020,15 +1041,18 @@ mod warp_http_tracker_server { use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_whitelisted_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1038,7 +1062,7 @@ mod warp_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1053,11 +1077,13 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1067,13 +1093,13 @@ mod warp_http_tracker_server { ) .await; - http_tracker + test_env .tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1103,7 +1129,6 @@ mod warp_http_tracker_server { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; @@ -1113,19 +1138,17 @@ mod warp_http_tracker_server { }; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_private_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_respond_to_authenticated_peers() { - let http_tracker_server = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; - let key = http_tracker_server - .tracker - .generate_auth_key(Duration::from_secs(60)) - .await - .unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + let response = Client::authenticated(test_env.bind_address().clone(), key.id()) .announce(&QueryBuilder::default().query()) .await; @@ -1134,11 +1157,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let http_tracker_server = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1147,12 +1171,13 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let http_tracker_server = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key) + let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -1165,7 +1190,6 @@ mod warp_http_tracker_server { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; @@ -1175,15 +1199,17 @@ mod warp_http_tracker_server { use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_private_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let http_tracker = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1193,7 +1219,7 @@ mod warp_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1208,11 +1234,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let http_tracker = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1222,9 +1249,9 @@ mod warp_http_tracker_server { ) .await; - let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + let response = Client::authenticated(test_env.bind_address().clone(), key.id()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1250,11 +1277,12 @@ mod warp_http_tracker_server { async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error - let http_tracker = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1266,7 +1294,7 @@ mod warp_http_tracker_server { let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), false_key) + let response = Client::authenticated(test_env.bind_address().clone(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1289,30 +1317,32 @@ mod warp_http_tracker_server { } } -mod axum_http_tracker_server { +mod axum_test_env { // WIP: migration HTTP from Warp to Axum mod for_all_config_modes { mod and_running_on_reverse_proxy { - use torrust_tracker::http::Version; use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_http_tracker_on_reverse_proxy; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let params = QueryBuilder::default().query().params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1321,11 +1351,13 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let params = QueryBuilder::default().query().params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; @@ -1351,7 +1383,6 @@ mod axum_http_tracker_server { use local_ip_address::local_ip; use reqwest::Response; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -1366,20 +1397,18 @@ mod axum_http_tracker_server { use crate::http::requests::announce::{Compact, QueryBuilder}; use crate::http::responses; use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::http::server::{ - start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, - start_ipv6_http_tracker, start_public_http_tracker, - }; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); params.remove_optional_params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1388,20 +1417,20 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + let response = Client::new(test_env.bind_address().clone()).get("announce").await; assert_missing_query_params_for_announce_request_error_response(response).await; } #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let invalid_query_param = "a=b=c"; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{invalid_query_param}")) .await; @@ -1410,7 +1439,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; // Without `info_hash` param @@ -1418,7 +1447,7 @@ mod axum_http_tracker_server { params.info_hash = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1430,7 +1459,7 @@ mod axum_http_tracker_server { params.peer_id = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1442,7 +1471,7 @@ mod axum_http_tracker_server { params.port = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1451,14 +1480,14 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1473,13 +1502,13 @@ mod axum_http_tracker_server { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1488,7 +1517,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1497,7 +1526,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1507,7 +1536,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1516,7 +1545,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1526,7 +1555,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1542,7 +1571,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1552,7 +1581,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1561,7 +1590,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1571,7 +1600,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1580,7 +1609,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1590,7 +1619,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1607,7 +1636,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1617,7 +1646,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1626,7 +1655,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1636,9 +1665,10 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -1651,8 +1681,8 @@ mod axum_http_tracker_server { &Announce { complete: 1, // the peer for this test incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![], }, ) @@ -1661,7 +1691,8 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1671,12 +1702,10 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1691,8 +1720,8 @@ mod axum_http_tracker_server { &Announce { complete: 2, incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![DictionaryPeer::from(previously_announced_peer)], }, ) @@ -1701,7 +1730,8 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1710,7 +1740,7 @@ mod axum_http_tracker_server { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - http_tracker_server.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -1720,10 +1750,10 @@ mod axum_http_tracker_server { 8080, )) .build(); - http_tracker_server.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1739,8 +1769,8 @@ mod axum_http_tracker_server { &Announce { complete: 3, incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], }, ) @@ -1749,13 +1779,14 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); // Add a peer - http_tracker_server.add_torrent_peer(&info_hash, &peer).await; + test_env.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -1764,9 +1795,7 @@ mod axum_http_tracker_server { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&announce_query) - .await; + let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; assert_empty_announce_response(response).await; } @@ -1776,7 +1805,8 @@ mod axum_http_tracker_server { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1786,12 +1816,10 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1817,7 +1845,8 @@ mod axum_http_tracker_server { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1827,14 +1856,12 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1855,26 +1882,28 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); } #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; - Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); } @@ -1883,9 +1912,10 @@ mod axum_http_tracker_server { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -1893,33 +1923,35 @@ mod axum_http_tracker_server { ) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); } #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); } #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; - Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); } @@ -1928,9 +1960,10 @@ mod axum_http_tracker_server { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -1938,19 +1971,20 @@ mod axum_http_tracker_server { ) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); } #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -1959,7 +1993,7 @@ mod axum_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -1975,14 +2009,17 @@ mod axum_http_tracker_server { 127.0.0.1 external_ip = "2.137.87.41" */ - let http_tracker_server = - start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -1991,10 +2028,10 @@ mod axum_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } @@ -2007,17 +2044,17 @@ mod axum_http_tracker_server { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let http_tracker_server = start_http_tracker_with_external_ip( - &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - Version::Axum, - ) - .await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -2026,10 +2063,10 @@ mod axum_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } @@ -2042,11 +2079,13 @@ mod axum_http_tracker_server { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(http_tracker_server.get_connection_info()); + let client = Client::new(test_env.bind_address().clone()); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -2058,7 +2097,7 @@ mod axum_http_tracker_server { ) .await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -2078,7 +2117,6 @@ mod axum_http_tracker_server { use std::net::IpAddr; use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -2091,26 +2129,30 @@ mod axum_http_tracker_server { use crate::http::requests; use crate::http::requests::scrape::QueryBuilder; use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; + use crate::http::test_environment::running_test_environment; + use crate::Axum; - #[tokio::test] - async fn should_fail_when_the_url_query_component_is_empty() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; } #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -2120,11 +2162,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2134,7 +2177,7 @@ mod axum_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2158,11 +2201,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2172,7 +2216,7 @@ mod axum_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2196,11 +2240,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2213,12 +2258,13 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_accept_multiple_infohashes() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -2237,11 +2283,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(http_tracker.get_connection_info()) + Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2249,18 +2296,19 @@ mod axum_http_tracker_server { ) .await; - let stats = http_tracker.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); } #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let http_tracker = start_ipv6_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2268,7 +2316,7 @@ mod axum_http_tracker_server { ) .await; - let stats = http_tracker.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); } @@ -2280,21 +2328,23 @@ mod axum_http_tracker_server { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_whitelisted_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2303,17 +2353,19 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker_server + test_env .tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2324,7 +2376,6 @@ mod axum_http_tracker_server { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -2333,15 +2384,18 @@ mod axum_http_tracker_server { use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_whitelisted_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2351,7 +2405,7 @@ mod axum_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2366,11 +2420,13 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2380,13 +2436,13 @@ mod axum_http_tracker_server { ) .await; - http_tracker + test_env .tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2416,26 +2472,23 @@ mod axum_http_tracker_server { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_private_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_respond_to_authenticated_peers() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; - let key = http_tracker_server - .tracker - .generate_auth_key(Duration::from_secs(60)) - .await - .unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + let response = Client::authenticated(test_env.bind_address().clone(), key.id()) .announce(&QueryBuilder::default().query()) .await; @@ -2444,11 +2497,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2457,11 +2511,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!( "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) @@ -2472,12 +2527,13 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key) + let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -2490,7 +2546,6 @@ mod axum_http_tracker_server { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; @@ -2500,15 +2555,17 @@ mod axum_http_tracker_server { use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_private_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!( "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" )) @@ -2519,11 +2576,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let http_tracker = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2533,7 +2591,7 @@ mod axum_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2548,11 +2606,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let http_tracker = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2562,9 +2621,9 @@ mod axum_http_tracker_server { ) .await; - let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + let response = Client::authenticated(test_env.bind_address().clone(), key.id()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2591,11 +2650,12 @@ mod axum_http_tracker_server { // There is not authentication error // code-review: should this really be this way? - let http_tracker = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2607,7 +2667,7 @@ mod axum_http_tracker_server { let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), false_key) + let response = Client::authenticated(test_env.bind_address().clone(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index 585a8f934..f805d9a05 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; -use crate::common::tracker::{tracker_configuration, tracker_instance}; +use crate::common::tracker::new_tracker; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -88,9 +88,9 @@ pub async fn running_test_environment() -> RunningTestEnvironment { } pub fn udp_server() -> StoppedUdpServer { - let config = tracker_configuration(); + let config = Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()); - let tracker = tracker_instance(&config); + let tracker = new_tracker(config.clone()); UdpServer::new(config.udp_trackers[0].clone(), tracker) } From fac2be86cedc8e4df650766653d963dee046aae0 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 3 Mar 2023 01:35:03 +0100 Subject: [PATCH 0453/1003] fix: all http tracker tests --- src/apis/server.rs | 2 +- .../{server.rs => launcher.rs} | 18 +- src/http/axum_implementation/mod.rs | 2 +- src/http/axum_implementation/routes.rs | 2 +- .../{server.rs => launcher.rs} | 6 +- src/http/warp_implementation/mod.rs | 2 +- src/jobs/http_tracker.rs | 8 +- src/main.rs | 2 +- src/tracker/mod.rs | 29 +-- src/tracker/services/common.rs | 2 +- src/tracker/services/statistics.rs | 2 +- src/tracker/services/torrent.rs | 14 +- src/udp/handlers.rs | 22 +- tests/common/tracker.rs | 15 +- tests/http/test_environment.rs | 18 +- tests/http_tracker.rs | 232 +++++++++++++++++- 16 files changed, 283 insertions(+), 93 deletions(-) rename src/http/axum_implementation/{server.rs => launcher.rs} (90%) rename src/http/warp_implementation/{server.rs => launcher.rs} (97%) diff --git a/src/apis/server.rs b/src/apis/server.rs index 4594456fb..8d4c703b7 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -201,7 +201,7 @@ mod tests { async fn it_should_be_able_to_start_from_stopped_state_and_then_stop_again() { let cfg = tracker_configuration(); - let tracker = Arc::new(tracker::Tracker::new(&cfg, None, statistics::Repo::new()).unwrap()); + let tracker = Arc::new(tracker::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); let stopped_api_server = ApiServer::new(cfg.http_api.clone(), tracker); diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/launcher.rs similarity index 90% rename from src/http/axum_implementation/server.rs rename to src/http/axum_implementation/launcher.rs index a12d60332..95fa9f2b7 100644 --- a/src/http/axum_implementation/server.rs +++ b/src/http/axum_implementation/launcher.rs @@ -19,9 +19,9 @@ pub enum Error { Error(String), } -pub struct Server; +pub struct Launcher; -impl Server { +impl Launcher { pub fn start_from_tcp_listener_with_graceful_shutdown( tcp_listener: std::net::TcpListener, tracker: Arc, @@ -30,12 +30,12 @@ impl Server { where F: Future + Send + 'static, { - let app = router(&tracker); + let app = router(tracker); Box::pin(async { axum::Server::from_tcp(tcp_listener) .expect("Could not bind to tcp listener.") - .serve(app.into_make_service()) + .serve(app.into_make_service_with_connect_info::()) .with_graceful_shutdown(shutdown_signal) .await .expect("Axum server crashed."); @@ -51,7 +51,7 @@ impl Server { where F: Future + Send + 'static, { - let app = router(&tracker); + let app = router(tracker); let handle = Handle::new(); @@ -69,7 +69,7 @@ impl Server { axum_server::from_tcp_rustls(tcp_listener, tls_config) .handle(handle) - .serve(app.into_make_service()) + .serve(app.into_make_service_with_connect_info::()) .await .expect("Axum server crashed."); }) @@ -77,7 +77,7 @@ impl Server { } #[async_trait] -impl HttpServerLauncher for Server { +impl HttpServerLauncher for Launcher { fn new() -> Self { Self {} } @@ -114,7 +114,7 @@ impl HttpServerLauncher for Server { } } -pub fn start(socket_addr: std::net::SocketAddr, tracker: &Arc) -> impl Future> { +pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); @@ -128,7 +128,7 @@ pub fn start(socket_addr: std::net::SocketAddr, tracker: &Arc) -> impl pub fn start_tls( socket_addr: std::net::SocketAddr, ssl_config: RustlsConfig, - tracker: &Arc, + tracker: Arc, ) -> impl Future> { let app = router(tracker); diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index ecc60e1f8..79d230255 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -1,8 +1,8 @@ pub mod extractors; pub mod handlers; +pub mod launcher; pub mod query; pub mod requests; pub mod responses; pub mod routes; -pub mod server; pub mod services; diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index af987ece2..b0f30453d 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -7,7 +7,7 @@ use axum_client_ip::SecureClientIpSource; use super::handlers::{announce, scrape}; use crate::tracker::Tracker; -pub fn router(tracker: &Arc) -> Router { +pub fn router(tracker: Arc) -> Router { Router::new() // Announce request .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) diff --git a/src/http/warp_implementation/server.rs b/src/http/warp_implementation/launcher.rs similarity index 97% rename from src/http/warp_implementation/server.rs rename to src/http/warp_implementation/launcher.rs index 8d01559f3..777bd930b 100644 --- a/src/http/warp_implementation/server.rs +++ b/src/http/warp_implementation/launcher.rs @@ -15,9 +15,9 @@ pub enum Error { Error(String), } -pub struct Server; +pub struct Launcher; -impl Server { +impl Launcher { pub fn start_with_graceful_shutdown( addr: SocketAddr, tracker: Arc, @@ -50,7 +50,7 @@ impl Server { } } -impl HttpServerLauncher for Server { +impl HttpServerLauncher for Launcher { fn new() -> Self { Self {} } diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs index 2ceda2e68..c0e046f4f 100644 --- a/src/http/warp_implementation/mod.rs +++ b/src/http/warp_implementation/mod.rs @@ -2,11 +2,11 @@ pub mod error; pub mod filter_helpers; pub mod filters; pub mod handlers; +pub mod launcher; pub mod peer_builder; pub mod request; pub mod response; pub mod routes; -pub mod server; use warp::Rejection; diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index ce546f608..40caa8e88 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -7,8 +7,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use crate::http::axum_implementation::server; -use crate::http::warp_implementation::server::Http; +use crate::http::axum_implementation::launcher; +use crate::http::warp_implementation::launcher::Http; use crate::http::Version; use crate::tracker; @@ -98,7 +98,7 @@ async fn start_axum(config: &HttpTracker, tracker: Arc) -> Joi if !ssl_enabled { info!("Starting Torrust HTTP tracker server on: http://{}", bind_addr); - let handle = server::start(bind_addr, &tracker); + let handle = launcher::start(bind_addr, tracker); tx.send(ServerJobStarted()) .expect("the HTTP tracker server should not be dropped"); @@ -113,7 +113,7 @@ async fn start_axum(config: &HttpTracker, tracker: Arc) -> Joi .await .unwrap(); - let handle = server::start_tls(bind_addr, ssl_config, &tracker); + let handle = launcher::start_tls(bind_addr, ssl_config, tracker); tx.send(ServerJobStarted()) .expect("the HTTP tracker server should not be dropped"); diff --git a/src/main.rs b/src/main.rs index fcb8331a4..b0cc68b12 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,7 +30,7 @@ async fn main() { let (stats_event_sender, stats_repository) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&config.clone(), stats_event_sender, stats_repository) { + let tracker = match tracker::Tracker::new(config.clone(), stats_event_sender, stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 18ada69e0..874233d91 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -91,15 +91,17 @@ impl Tracker { /// /// Will return a `databases::error::Error` if unable to connect to database. pub fn new( - config: &Arc, + config: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { let database = databases::driver::build(&config.db_driver, &config.db_path)?; + let mode = config.mode; + Ok(Tracker { - config: config.clone(), - mode: config.mode, + config, + mode, keys: RwLock::new(std::collections::HashMap::new()), whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), @@ -550,17 +552,15 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; - use torrust_tracker_test_helpers::configuration::{self}; + use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::{self, Peer}; - use crate::tracker::statistics::Keeper; + use crate::tracker::services::common::tracker_factory; use crate::tracker::{TorrentsMetrics, Tracker}; pub fn public_tracker() -> Tracker { @@ -587,21 +587,6 @@ mod tests { tracker_factory(configuration) } - pub fn tracker_factory(configuration: Configuration) -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } - } - fn sample_info_hash() -> InfoHash { "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() } diff --git a/src/tracker/services/common.rs b/src/tracker/services/common.rs index 39aa3cc0b..757725263 100644 --- a/src/tracker/services/common.rs +++ b/src/tracker/services/common.rs @@ -9,7 +9,7 @@ use crate::tracker::Tracker; /// /// Will panic if tracker cannot be instantiated. #[must_use] -pub fn tracker_factory(configuration: &Arc) -> Tracker { +pub fn tracker_factory(configuration: Arc) -> Tracker { // todo: the tracker initialization is duplicated in many places. // Initialize stats tracker diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 94a9b1bd5..35fd49db5 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -48,7 +48,7 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let tracker_metrics = get_metrics(tracker.clone()).await; diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index fc5686e23..50b17744e 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -150,7 +150,7 @@ mod tests { #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let torrent_info = get_torrent_info( tracker.clone(), @@ -163,7 +163,7 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -204,7 +204,7 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; @@ -213,7 +213,7 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -237,7 +237,7 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -261,7 +261,7 @@ mod tests { #[tokio::test] async fn should_allow_using_pagination_in_the_result() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -294,7 +294,7 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 411590d2f..074f362f4 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -266,20 +266,20 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - initialized_tracker(&configuration) + initialized_tracker(configuration) } fn initialized_private_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - initialized_tracker(&configuration) + initialized_tracker(configuration) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - initialized_tracker(&configuration) + initialized_tracker(configuration) } - fn initialized_tracker(configuration: &Arc) -> Arc { + fn initialized_tracker(configuration: Arc) -> Arc { let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } @@ -436,7 +436,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -454,7 +454,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -697,7 +697,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -927,7 +927,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -959,7 +959,7 @@ mod tests { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1242,7 +1242,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1274,7 +1274,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index 7451bbc36..ed2d8392b 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::services::common::tracker_factory; use torrust_tracker::tracker::Tracker; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; @@ -12,19 +12,8 @@ pub fn new_tracker(configuration: Arc Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - // Initialize logging logging::setup(&configuration); - tracker + Arc::new(tracker_factory(configuration)) } diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index a2cb4619c..40e504b08 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -41,7 +41,7 @@ impl TestEnvironment> { let tracker = new_tracker(cfg.clone()); - let http_server = stopped_http_server(cfg.http_trackers[0].clone()); + let http_server = http_server(cfg.http_trackers[0].clone()); Self { cfg, @@ -61,10 +61,12 @@ impl TestEnvironment> { } } + #[allow(dead_code)] pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { &self.state.http_server.cfg } + #[allow(dead_code)] pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { &mut self.state.http_server.cfg } @@ -91,12 +93,13 @@ impl TestEnvironment> { &self.state.http_server.state.bind_addr } + #[allow(dead_code)] pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { &self.state.http_server.cfg } } -#[allow(clippy::module_name_repetitions)] +#[allow(clippy::module_name_repetitions, dead_code)] pub fn stopped_test_environment( cfg: torrust_tracker_configuration::Configuration, ) -> StoppedTestEnvironment { @@ -110,17 +113,8 @@ pub async fn running_test_environment( TestEnvironment::new_running(cfg).await } -pub fn stopped_http_server( - cfg: torrust_tracker_configuration::HttpTracker, -) -> StoppedHttpServer { +pub fn http_server(cfg: torrust_tracker_configuration::HttpTracker) -> StoppedHttpServer { let http_server = I::new(); HttpServer::new(cfg, http_server) } - -pub async fn running_http_server( - cfg: torrust_tracker_configuration::HttpTracker, - tracker: Arc, -) -> RunningHttpServer { - stopped_http_server(cfg).start(tracker).await.unwrap() -} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 22a6c44ff..d29f674e6 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -12,8 +12,8 @@ mod common; mod http; -pub type Axum = torrust_tracker::http::axum_implementation::server::Server; -pub type Warp = torrust_tracker::http::warp_implementation::server::Server; +pub type Axum = torrust_tracker::http::axum_implementation::launcher::Launcher; +pub type Warp = torrust_tracker::http::warp_implementation::launcher::Launcher; mod test_env_test_environment { use crate::http::test_environment::running_test_environment; @@ -46,7 +46,7 @@ mod warp_test_env { }; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::{running_test_environment, stopped_test_environment}; + use crate::http::test_environment::running_test_environment; use crate::Warp; #[tokio::test] @@ -65,6 +65,8 @@ mod warp_test_env { .await; assert_could_not_find_remote_address_on_xff_header_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -80,6 +82,8 @@ mod warp_test_env { .await; assert_invalid_remote_address_on_xff_header_error_response(response).await; + + test_env.stop().await; } } @@ -132,6 +136,8 @@ mod warp_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -182,6 +188,8 @@ mod warp_test_env { .await; assert_internal_server_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -199,6 +207,8 @@ mod warp_test_env { assert_invalid_info_hash_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -219,6 +229,8 @@ mod warp_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -238,6 +250,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -257,6 +271,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -283,6 +299,8 @@ mod warp_test_env { assert_invalid_peer_id_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -302,6 +320,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -321,6 +341,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -350,6 +372,8 @@ mod warp_test_env { assert_is_announce_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -369,6 +393,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -395,6 +421,8 @@ mod warp_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -434,6 +462,8 @@ mod warp_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -457,6 +487,8 @@ mod warp_test_env { let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; assert_empty_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -497,6 +529,8 @@ mod warp_test_env { }; assert_compact_announce_response(response, &expected_response).await; + + test_env.stop().await; } #[tokio::test] @@ -531,6 +565,8 @@ mod warp_test_env { .await; assert!(!is_a_compact_announce_response(response).await); + + test_env.stop().await; } async fn is_a_compact_announce_response(response: Response) -> bool { @@ -551,6 +587,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -565,6 +605,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -585,6 +629,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -599,6 +647,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -613,6 +665,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -633,6 +689,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -657,6 +717,8 @@ mod warp_test_env { assert_eq!(peer_addr.ip(), client_ip); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -692,6 +754,8 @@ mod warp_test_env { assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -727,6 +791,8 @@ mod warp_test_env { assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -760,6 +826,8 @@ mod warp_test_env { let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + + test_env.stop().await; } } @@ -795,6 +863,8 @@ mod warp_test_env { let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_internal_server_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -814,6 +884,8 @@ mod warp_test_env { // code-review: it's not returning the invalid info hash error assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -853,6 +925,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -892,6 +966,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -910,6 +986,8 @@ mod warp_test_env { .await; assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + + test_env.stop().await; } #[tokio::test] @@ -935,6 +1013,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -955,6 +1035,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -975,6 +1059,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; } } } @@ -1005,6 +1093,8 @@ mod warp_test_env { .await; assert_torrent_not_in_whitelist_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1027,6 +1117,8 @@ mod warp_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } } @@ -1073,6 +1165,8 @@ mod warp_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -1119,6 +1213,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } } } @@ -1153,6 +1249,8 @@ mod warp_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1182,6 +1280,8 @@ mod warp_test_env { .await; assert_warp_invalid_authentication_key_error_response(response).await; + + test_env.stop().await; } } @@ -1230,6 +1330,8 @@ mod warp_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -1271,6 +1373,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -1305,6 +1409,8 @@ mod warp_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } } } @@ -1347,6 +1453,8 @@ mod axum_test_env { .await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1362,6 +1470,8 @@ mod axum_test_env { .await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; } } @@ -1413,6 +1523,8 @@ mod axum_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1422,6 +1534,8 @@ mod axum_test_env { let response = Client::new(test_env.bind_address().clone()).get("announce").await; assert_missing_query_params_for_announce_request_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1435,6 +1549,8 @@ mod axum_test_env { .await; assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + + test_env.stop().await; } #[tokio::test] @@ -1476,6 +1592,8 @@ mod axum_test_env { .await; assert_bad_announce_request_error_response(response, "missing param port").await; + + test_env.stop().await; } #[tokio::test] @@ -1493,6 +1611,8 @@ mod axum_test_env { assert_cannot_parse_query_params_error_response(response, "").await; } + + test_env.stop().await; } #[tokio::test] @@ -1513,6 +1633,8 @@ mod axum_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1532,6 +1654,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1551,6 +1675,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1577,6 +1703,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1596,6 +1724,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1615,6 +1745,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1642,6 +1774,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1661,6 +1795,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1687,6 +1823,8 @@ mod axum_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -1726,6 +1864,8 @@ mod axum_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -1775,6 +1915,8 @@ mod axum_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -1798,6 +1940,8 @@ mod axum_test_env { let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; assert_empty_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1838,6 +1982,8 @@ mod axum_test_env { }; assert_compact_announce_response(response, &expected_response).await; + + test_env.stop().await; } #[tokio::test] @@ -1872,6 +2018,8 @@ mod axum_test_env { .await; assert!(!is_a_compact_announce_response(response).await); + + test_env.stop().await; } async fn is_a_compact_announce_response(response: Response) -> bool { @@ -1892,6 +2040,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1906,6 +2058,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1926,6 +2082,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1933,13 +2093,19 @@ mod axum_test_env { let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + let res = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; + println!("{:?}", res.text().await.unwrap()); + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1954,6 +2120,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1974,6 +2144,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1998,6 +2172,8 @@ mod axum_test_env { assert_eq!(peer_addr.ip(), client_ip); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -2033,6 +2209,8 @@ mod axum_test_env { assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -2068,6 +2246,8 @@ mod axum_test_env { assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -2101,6 +2281,8 @@ mod axum_test_env { let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + + test_env.stop().await; } } @@ -2140,6 +2322,8 @@ mod axum_test_env { let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -2158,6 +2342,8 @@ mod axum_test_env { assert_cannot_parse_query_params_error_response(response, "").await; } + + test_env.stop().await; } #[tokio::test] @@ -2197,6 +2383,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2236,6 +2424,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2254,6 +2444,8 @@ mod axum_test_env { .await; assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + + test_env.stop().await; } #[tokio::test] @@ -2279,6 +2471,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2299,6 +2493,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -2319,6 +2517,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; } } } @@ -2349,6 +2551,8 @@ mod axum_test_env { .await; assert_torrent_not_in_whitelist_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -2370,6 +2574,8 @@ mod axum_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } } @@ -2416,6 +2622,8 @@ mod axum_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2462,6 +2670,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } } } @@ -2493,6 +2703,8 @@ mod axum_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -2507,6 +2719,8 @@ mod axum_test_env { .await; assert_authentication_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -2533,11 +2747,13 @@ mod axum_test_env { // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) + let response = Client::authenticated(test_env.bind_address(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; assert_authentication_error_response(response).await; + + test_env.stop().await; } } @@ -2602,6 +2818,8 @@ mod axum_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2643,6 +2861,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2678,6 +2898,8 @@ mod axum_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } } } From 5b95b5d596278db48bda925a3414aec764de2ea5 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 3 Mar 2023 11:59:52 +0100 Subject: [PATCH 0454/1003] refactor: renamed `tracker_interface` to `server` and shortened `configuration::ephemeral` calls --- src/apis/server.rs | 3 +- src/http/axum_implementation/launcher.rs | 2 +- src/http/mod.rs | 2 +- src/http/{tracker_interface.rs => server.rs} | 1 + src/http/warp_implementation/launcher.rs | 2 +- src/tracker/services/statistics.rs | 3 +- src/tracker/services/torrent.rs | 6 +- src/udp/handlers.rs | 3 +- tests/api/test_environment.rs | 3 +- tests/http/test_environment.rs | 2 +- tests/http_tracker.rs | 322 +++++++------------ tests/udp/test_environment.rs | 3 +- 12 files changed, 144 insertions(+), 208 deletions(-) rename src/http/{tracker_interface.rs => server.rs} (98%) diff --git a/src/apis/server.rs b/src/apis/server.rs index 8d4c703b7..4c8fbaada 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -188,13 +188,14 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; use crate::apis::server::ApiServer; use crate::tracker; use crate::tracker::statistics; fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) + Arc::new(configuration::ephemeral()) } #[tokio::test] diff --git a/src/http/axum_implementation/launcher.rs b/src/http/axum_implementation/launcher.rs index 95fa9f2b7..a49efd11d 100644 --- a/src/http/axum_implementation/launcher.rs +++ b/src/http/axum_implementation/launcher.rs @@ -11,7 +11,7 @@ use log::info; use warp::hyper; use super::routes::router; -use crate::http::tracker_interface::HttpServerLauncher; +use crate::http::server::HttpServerLauncher; use crate::tracker::Tracker; #[derive(Debug)] diff --git a/src/http/mod.rs b/src/http/mod.rs index c2cbb43df..b4841c0af 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; pub mod percent_encoding; -pub mod tracker_interface; +pub mod server; pub mod warp_implementation; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] diff --git a/src/http/tracker_interface.rs b/src/http/server.rs similarity index 98% rename from src/http/tracker_interface.rs rename to src/http/server.rs index fc4ba9c95..e7b6c1888 100644 --- a/src/http/tracker_interface.rs +++ b/src/http/server.rs @@ -32,6 +32,7 @@ pub type StoppedHttpServer = HttpServer>; #[allow(clippy::module_name_repetitions)] pub type RunningHttpServer = HttpServer>; +#[allow(clippy::module_name_repetitions)] pub struct HttpServer { pub cfg: torrust_tracker_configuration::HttpTracker, pub state: S, diff --git a/src/http/warp_implementation/launcher.rs b/src/http/warp_implementation/launcher.rs index 777bd930b..46ec2bf3c 100644 --- a/src/http/warp_implementation/launcher.rs +++ b/src/http/warp_implementation/launcher.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use futures::future::BoxFuture; use super::routes; -use crate::http::tracker_interface::HttpServerLauncher; +use crate::http::server::HttpServerLauncher; use crate::tracker; use crate::tracker::Tracker; diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 35fd49db5..28cd0b962 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -37,13 +37,14 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; use crate::tracker; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) + Arc::new(configuration::ephemeral()) } #[tokio::test] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index 50b17744e..b04b4e1dc 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -138,6 +138,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; @@ -145,7 +146,7 @@ mod tests { use crate::tracker::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) + Arc::new(configuration::ephemeral()) } #[tokio::test] @@ -192,6 +193,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; @@ -199,7 +201,7 @@ mod tests { use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) + Arc::new(configuration::ephemeral()) } #[tokio::test] diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 074f362f4..211a0d1ba 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -252,6 +252,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::{Current, Time}; use crate::tracker::{self, peer, statistics}; @@ -261,7 +262,7 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - torrust_tracker_test_helpers::configuration::ephemeral() + configuration::ephemeral() } fn initialized_public_tracker() -> Arc { diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index 1565530c1..1f8708650 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -4,6 +4,7 @@ use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServe use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; +use torrust_tracker_test_helpers::configuration; use super::connection_info::ConnectionInfo; use crate::common::tracker::new_tracker; @@ -89,7 +90,7 @@ pub fn running_test_environment() -> RunningTestEnvironment { } pub fn api_server() -> StoppedApiServer { - let config = Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()); + let config = Arc::new(configuration::ephemeral()); let tracker = new_tracker(config.clone()); diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index 40e504b08..459c2fbe6 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker::http::tracker_interface::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; +use torrust_tracker::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index d29f674e6..a4e87115a 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -16,19 +16,21 @@ pub type Axum = torrust_tracker::http::axum_implementation::launcher::Launcher; pub type Warp = torrust_tracker::http::warp_implementation::launcher::Launcher; mod test_env_test_environment { + use torrust_tracker_test_helpers::configuration; + use crate::http::test_environment::running_test_environment; use crate::{Axum, Warp}; #[tokio::test] async fn should_be_able_to_start_and_stop_a_test_environment_using_axum() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; test_env.stop().await; } #[tokio::test] async fn should_be_able_to_start_and_stop_a_test_environment_using_warp() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; test_env.stop().await; } @@ -39,6 +41,7 @@ mod warp_test_env { mod for_all_config_modes { mod running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::{ assert_could_not_find_remote_address_on_xff_header_error_response, @@ -54,9 +57,7 @@ mod warp_test_env { // If the tracker is running behind a reverse proxy, the peer IP is the // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -71,9 +72,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -107,6 +106,7 @@ mod warp_test_env { use reqwest::Response; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ @@ -125,7 +125,7 @@ mod warp_test_env { #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -142,7 +142,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let response = Client::new(test_env.bind_address().clone()).get("announce").await; @@ -151,7 +151,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; // Without `info_hash` param @@ -194,7 +194,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -218,7 +218,7 @@ mod warp_test_env { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -235,7 +235,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -256,7 +256,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -277,7 +277,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -305,7 +305,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -326,7 +326,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -349,7 +349,7 @@ mod warp_test_env { async fn should_not_fail_when_the_event_param_is_invalid() { // All invalid values are ignored as if the `event` param were empty - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -378,7 +378,7 @@ mod warp_test_env { #[tokio::test] async fn should_not_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -399,8 +399,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let response = Client::new(test_env.bind_address().clone()) .announce( @@ -427,8 +426,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -468,8 +466,7 @@ mod warp_test_env { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -496,8 +493,7 @@ mod warp_test_env { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -538,8 +534,7 @@ mod warp_test_env { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -577,8 +572,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) @@ -595,8 +589,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -615,8 +608,7 @@ mod warp_test_env { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce( @@ -637,8 +629,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) @@ -655,8 +646,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -675,8 +665,7 @@ mod warp_test_env { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce( @@ -697,8 +686,7 @@ mod warp_test_env { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -730,11 +718,10 @@ mod warp_test_env { 127.0.0.1 external_ip = "2.137.87.41" */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -767,11 +754,10 @@ mod warp_test_env { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -804,9 +790,7 @@ mod warp_test_env { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -846,6 +830,7 @@ mod warp_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; @@ -858,8 +843,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_internal_server_error_response(response).await; @@ -869,8 +853,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); @@ -890,8 +873,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -931,8 +913,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -972,8 +953,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -992,8 +972,7 @@ mod warp_test_env { #[tokio::test] async fn should_accept_multiple_infohashes() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -1019,8 +998,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1043,8 +1021,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1073,6 +1050,7 @@ mod warp_test_env { use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; @@ -1082,9 +1060,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1100,9 +1076,7 @@ mod warp_test_env { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1127,6 +1101,7 @@ mod warp_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; use crate::http::asserts::assert_scrape_response; @@ -1138,9 +1113,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1171,9 +1144,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1239,8 +1210,7 @@ mod warp_test_env { #[tokio::test] async fn should_respond_to_authenticated_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); @@ -1255,8 +1225,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1269,8 +1238,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -1293,6 +1261,7 @@ mod warp_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; use crate::http::asserts::assert_scrape_response; @@ -1304,8 +1273,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1336,8 +1304,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1381,8 +1348,7 @@ mod warp_test_env { async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1430,6 +1396,7 @@ mod axum_test_env { mod for_all_config_modes { mod and_running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::http::client::Client; @@ -1442,9 +1409,7 @@ mod axum_test_env { // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -1459,9 +1424,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -1495,6 +1458,7 @@ mod axum_test_env { use reqwest::Response; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ @@ -1512,7 +1476,7 @@ mod axum_test_env { #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1529,7 +1493,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let response = Client::new(test_env.bind_address().clone()).get("announce").await; @@ -1540,7 +1504,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let invalid_query_param = "a=b=c"; @@ -1555,7 +1519,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; // Without `info_hash` param @@ -1598,7 +1562,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1622,7 +1586,7 @@ mod axum_test_env { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1639,7 +1603,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1660,7 +1624,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1681,7 +1645,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1709,7 +1673,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1730,7 +1694,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1751,7 +1715,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1780,7 +1744,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1801,8 +1765,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let response = Client::new(test_env.bind_address().clone()) .announce( @@ -1829,8 +1792,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1870,8 +1832,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1921,8 +1882,7 @@ mod axum_test_env { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -1949,8 +1909,7 @@ mod axum_test_env { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1991,8 +1950,7 @@ mod axum_test_env { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2030,8 +1988,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) @@ -2048,8 +2005,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -2068,8 +2024,7 @@ mod axum_test_env { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce( @@ -2090,15 +2045,12 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let res = Client::new(test_env.bind_address().clone()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - println!("{:?}", res.text().await.unwrap()); - let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); @@ -2110,8 +2062,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -2130,8 +2081,7 @@ mod axum_test_env { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce( @@ -2152,8 +2102,7 @@ mod axum_test_env { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -2185,11 +2134,10 @@ mod axum_test_env { 127.0.0.1 external_ip = "2.137.87.41" */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -2222,11 +2170,10 @@ mod axum_test_env { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -2259,9 +2206,7 @@ mod axum_test_env { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2301,6 +2246,7 @@ mod axum_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ @@ -2317,8 +2263,7 @@ mod axum_test_env { //#[tokio::test] #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; @@ -2328,8 +2273,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); @@ -2348,8 +2292,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2389,8 +2332,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2430,8 +2372,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2450,8 +2391,7 @@ mod axum_test_env { #[tokio::test] async fn should_accept_multiple_infohashes() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -2477,8 +2417,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2501,8 +2440,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2531,6 +2469,7 @@ mod axum_test_env { use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; @@ -2540,9 +2479,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2557,9 +2494,7 @@ mod axum_test_env { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2584,6 +2519,7 @@ mod axum_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; use crate::http::asserts::assert_scrape_response; @@ -2595,9 +2531,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2628,9 +2562,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2693,8 +2625,7 @@ mod axum_test_env { #[tokio::test] async fn should_respond_to_authenticated_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); @@ -2709,8 +2640,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2747,7 +2677,7 @@ mod axum_test_env { // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(test_env.bind_address(), unregistered_key) + let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -2765,6 +2695,7 @@ mod axum_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; @@ -2792,8 +2723,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2824,8 +2754,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2870,8 +2799,7 @@ mod axum_test_env { // There is not authentication error // code-review: should this really be this way? - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index f805d9a05..02d51c4bf 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -5,6 +5,7 @@ use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; +use torrust_tracker_test_helpers::configuration; use crate::common::tracker::new_tracker; @@ -88,7 +89,7 @@ pub async fn running_test_environment() -> RunningTestEnvironment { } pub fn udp_server() -> StoppedUdpServer { - let config = Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()); + let config = Arc::new(configuration::ephemeral()); let tracker = new_tracker(config.clone()); From d020c5a514d4f09669f99c956ee8d47521752872 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 7 Mar 2023 18:05:35 +0100 Subject: [PATCH 0455/1003] refactor: `tracker_api` launching and testing --- src/apis/routes.rs | 2 +- src/apis/server.rs | 212 ++++++++++++++++++---------------- src/jobs/tracker_apis.rs | 4 +- src/jobs/udp_tracker.rs | 4 +- src/udp/handlers.rs | 83 ++++++------- src/udp/server.rs | 31 ++--- tests/api/test_environment.rs | 55 ++++----- tests/tracker_api.rs | 111 +++++++++++------- tests/udp/test_environment.rs | 49 ++++---- tests/udp_tracker.rs | 12 +- 10 files changed, 292 insertions(+), 271 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 281979aa5..6e3218605 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -10,7 +10,7 @@ use super::handlers::{ use super::middlewares::auth::auth; use crate::tracker::Tracker; -pub fn router(tracker: &Arc) -> Router { +pub fn router(tracker: Arc) -> Router { Router::new() // Stats .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) diff --git a/src/apis/server.rs b/src/apis/server.rs index 4c8fbaada..0a5013161 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,15 +1,16 @@ -use std::net::{SocketAddr, TcpListener}; +use std::net::SocketAddr; +use std::str::FromStr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; +use futures::future::BoxFuture; use futures::Future; use log::info; -use tokio::task::JoinHandle; use warp::hyper; use super::routes::router; -use crate::signals::shutdown_signal_with_message; +use crate::signals::shutdown_signal; use crate::tracker::Tracker; #[derive(Debug)] @@ -25,133 +26,150 @@ pub type RunningApiServer = ApiServer; #[allow(clippy::module_name_repetitions)] pub struct ApiServer { pub cfg: torrust_tracker_configuration::HttpApi, - pub tracker: Arc, pub state: S, } pub struct Stopped; pub struct Running { - pub bind_address: SocketAddr, - stop_job_sender: tokio::sync::oneshot::Sender, - job: JoinHandle<()>, + pub bind_addr: SocketAddr, + task_killer: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle<()>, } impl ApiServer { - pub fn new(cfg: torrust_tracker_configuration::HttpApi, tracker: Arc) -> Self { - Self { - cfg, - tracker, - state: Stopped {}, - } + pub fn new(cfg: torrust_tracker_configuration::HttpApi) -> Self { + Self { cfg, state: Stopped {} } } - /// # Errors - /// - /// Will return `Err` if `TcpListener` can not bind to `bind_address`. - pub fn start(self) -> Result, Error> { - let listener = TcpListener::bind(&self.cfg.bind_address).map_err(|e| Error::Error(e.to_string()))?; - - let bind_address = listener.local_addr().map_err(|e| Error::Error(e.to_string()))?; - - let cfg = self.cfg.clone(); - let tracker = self.tracker.clone(); - - let (sender, receiver) = tokio::sync::oneshot::channel::(); - - let job = tokio::spawn(async move { - if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, cfg.ssl_cert_path, cfg.ssl_key_path) { - let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) - .await - .expect("Could not read ssl cert and/or key."); - - start_tls_from_tcp_listener_with_graceful_shutdown(listener, tls_config, &tracker, receiver) - .await - .expect("Could not start from tcp listener with tls."); - } else { - start_from_tcp_listener_with_graceful_shutdown(listener, &tracker, receiver) - .await - .expect("Could not start from tcp listener."); - } + pub async fn start(self, tracker: Arc) -> Result, Error> { + let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); + let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + + let configuration = self.cfg.clone(); + + let task = tokio::spawn(async move { + let (bind_addr, server) = Launcher::start(&configuration, tracker, shutdown_signal(shutdown_receiver)); + + addr_sender.send(bind_addr).unwrap(); + + server.await; }); - let running_api_server: ApiServer = ApiServer { + let bind_address = addr_receiver.await.expect("Could not receive bind_address."); + + Ok(ApiServer { cfg: self.cfg, - tracker: self.tracker, state: Running { - bind_address, - stop_job_sender: sender, - job, + bind_addr: bind_address, + task_killer: shutdown_sender, + task, }, - }; - - Ok(running_api_server) + }) } } impl ApiServer { - /// # Errors - /// - /// Will return `Err` if the oneshot channel to send the stop signal - /// has already been called once. pub async fn stop(self) -> Result, Error> { - self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; + self.state.task_killer.send(0).unwrap(); - let _ = self.state.job.await; + let _ = self.state.task.await; - let stopped_api_server: ApiServer = ApiServer { + Ok(ApiServer { cfg: self.cfg, - tracker: self.tracker, state: Stopped {}, - }; - - Ok(stopped_api_server) + }) } } -pub fn start_from_tcp_listener_with_graceful_shutdown( - tcp_listener: TcpListener, - tracker: &Arc, - shutdown_signal: tokio::sync::oneshot::Receiver, -) -> impl Future> { - let app = router(tracker); - - let context = tcp_listener.local_addr().expect("Could not get context."); - - axum::Server::from_tcp(tcp_listener) - .expect("Could not bind to tcp listener.") - .serve(app.into_make_service()) - .with_graceful_shutdown(shutdown_signal_with_message( - shutdown_signal, - format!("Shutting down {context}.."), - )) -} +struct Launcher; + +impl Launcher { + pub fn start( + cfg: &torrust_tracker_configuration::HttpApi, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); + let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); + let bind_addr = tcp_listener + .local_addr() + .expect("Could not get local_addr from tcp_listener."); + + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (&cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { + let server = Self::start_tls_with_graceful_shutdown( + tcp_listener, + (ssl_cert_path.to_string(), ssl_key_path.to_string()), + tracker, + shutdown_signal, + ); + + (bind_addr, server) + } else { + let server = Self::start_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); + + (bind_addr, server) + } + } -pub fn start_tls_from_tcp_listener_with_graceful_shutdown( - tcp_listener: TcpListener, - tls_config: RustlsConfig, - tracker: &Arc, - shutdown_signal: tokio::sync::oneshot::Receiver, -) -> impl Future> { - let app = router(tracker); + pub fn start_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(tracker); + + Box::pin(async { + axum::Server::from_tcp(tcp_listener) + .expect("Could not bind to tcp listener.") + .serve(app.into_make_service_with_connect_info::()) + .with_graceful_shutdown(shutdown_signal) + .await + .expect("Axum server crashed."); + }) + } - let context = tcp_listener.local_addr().expect("Could not get context."); + pub fn start_tls_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + (ssl_cert_path, ssl_key_path): (String, String), + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(tracker); - let handle = Handle::new(); + let handle = Handle::new(); - let cloned_handle = handle.clone(); + let cloned_handle = handle.clone(); - tokio::spawn(async move { - shutdown_signal_with_message(shutdown_signal, format!("Shutting down {context}..")).await; - cloned_handle.shutdown(); - }); + tokio::task::spawn_local(async move { + shutdown_signal.await; + cloned_handle.shutdown(); + }); - axum_server::from_tcp_rustls(tcp_listener, tls_config) - .handle(handle) - .serve(app.into_make_service()) + Box::pin(async { + let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) + .await + .expect("Could not read tls cert."); + + axum_server::from_tcp_rustls(tcp_listener, tls_config) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."); + }) + } } -pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { +pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -165,7 +183,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future, + tracker: Arc, ) -> impl Future> { let app = router(tracker); @@ -204,9 +222,9 @@ mod tests { let tracker = Arc::new(tracker::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); - let stopped_api_server = ApiServer::new(cfg.http_api.clone(), tracker); + let stopped_api_server = ApiServer::new(cfg.http_api.clone()); - let running_api_server_result = stopped_api_server.start(); + let running_api_server_result = stopped_api_server.start(tracker).await; assert!(running_api_server_result.is_ok()); diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs index 85bb1b59f..939b58638 100644 --- a/src/jobs/tracker_apis.rs +++ b/src/jobs/tracker_apis.rs @@ -31,7 +31,7 @@ pub async fn start_job(config: &HttpApi, tracker: Arc) -> Join if !ssl_enabled { info!("Starting Torrust APIs server on: http://{}", bind_addr); - let handle = server::start(bind_addr, &tracker); + let handle = server::start(bind_addr, tracker); tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); @@ -45,7 +45,7 @@ pub async fn start_job(config: &HttpApi, tracker: Arc) -> Join .await .unwrap(); - let handle = server::start_tls(bind_addr, ssl_config, &tracker); + let handle = server::start_tls(bind_addr, ssl_config, tracker); tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 468f6dbbd..57232855b 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -12,10 +12,10 @@ pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHan let bind_addr = config.bind_address.clone(); tokio::spawn(async move { - match Udp::new(tracker, &bind_addr).await { + match Udp::new(&bind_addr).await { Ok(udp_server) => { info!("Starting UDP server on: udp://{}", bind_addr); - udp_server.start().await; + udp_server.start(tracker).await; } Err(e) => { warn!("Could not start UDP tracker on: udp://{}", bind_addr); diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 211a0d1ba..e47a89dd4 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -11,12 +11,12 @@ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::{self, statistics}; +use crate::tracker::{statistics, Tracker}; use crate::udp::error::Error; use crate::udp::peer_builder; use crate::udp::request::AnnounceWrapper; -pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { +pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { message: format!("{e:?}"), location: Location::caller(), @@ -46,11 +46,7 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. -pub async fn handle_request( - request: Request, - remote_addr: SocketAddr, - tracker: Arc, -) -> Result { +pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, @@ -61,11 +57,7 @@ pub async fn handle_request( /// # Errors /// /// This function dose not ever return an error. -pub async fn handle_connect( - remote_addr: SocketAddr, - request: &ConnectRequest, - tracker: Arc, -) -> Result { +pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); @@ -90,7 +82,7 @@ pub async fn handle_connect( /// # Errors /// /// Will return `Error` if unable to `authenticate_request`. -pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), Error> { +pub async fn authenticate(info_hash: &InfoHash, tracker: &Tracker) -> Result<(), Error> { tracker .authenticate_request(info_hash, &None) .await @@ -105,7 +97,7 @@ pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, - tracker: Arc, + tracker: &Tracker, ) -> Result { debug!("udp announce request: {:#?}", announce_request); @@ -116,7 +108,7 @@ pub async fn handle_announce( let info_hash = wrapped_announce_request.info_hash; let remote_client_ip = remote_addr.ip(); - authenticate(&info_hash, tracker.clone()).await?; + authenticate(&info_hash, tracker).await?; let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); @@ -182,11 +174,7 @@ pub async fn handle_announce( /// # Errors /// /// This function dose not ever return an error. -pub async fn handle_scrape( - remote_addr: SocketAddr, - request: &ScrapeRequest, - tracker: Arc, -) -> Result { +pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { // Convert from aquatic infohashes let mut info_hashes = vec![]; for info_hash in &request.info_hashes { @@ -392,7 +380,7 @@ mod tests { transaction_id: TransactionId(0i32), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, initialized_public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &initialized_public_tracker()) .await .unwrap(); @@ -411,7 +399,7 @@ mod tests { transaction_id: TransactionId(0i32), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, initialized_public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &initialized_public_tracker()) .await .unwrap(); @@ -439,7 +427,7 @@ mod tests { let torrent_tracker = Arc::new( tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); - handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) + handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) .await .unwrap(); } @@ -457,7 +445,7 @@ mod tests { let torrent_tracker = Arc::new( tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); - handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) + handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) .await .unwrap(); } @@ -573,7 +561,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -593,11 +581,11 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, initialized_public_tracker()) + let response = handle_announce(remote_addr, &request, &initialized_public_tracker()) .await .unwrap(); - let empty_peer_vector: Vec> = vec![]; + let empty_peer_vector: Vec> = vec![]; assert_eq!( response, Response::from(AnnounceResponse { @@ -636,7 +624,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -667,7 +655,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() + handle_announce(remote_addr, &request, &tracker).await.unwrap() } #[tokio::test] @@ -704,7 +692,7 @@ mod tests { handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), - tracker.clone(), + &tracker, ) .await .unwrap(); @@ -740,7 +728,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -797,7 +785,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -820,11 +808,11 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, initialized_public_tracker()) + let response = handle_announce(remote_addr, &request, &initialized_public_tracker()) .await .unwrap(); - let empty_peer_vector: Vec> = vec![]; + let empty_peer_vector: Vec> = vec![]; assert_eq!( response, Response::from(AnnounceResponse { @@ -863,7 +851,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -897,7 +885,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() + handle_announce(remote_addr, &request, &tracker).await.unwrap() } #[tokio::test] @@ -937,9 +925,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - handle_announce(remote_addr, &announce_request, tracker.clone()) - .await - .unwrap(); + handle_announce(remote_addr, &announce_request, &tracker).await.unwrap(); } mod from_a_loopback_ip { @@ -982,7 +968,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -1036,7 +1022,7 @@ mod tests { info_hashes, }; - let response = handle_scrape(remote_addr, &request, initialized_public_tracker()) + let response = handle_scrape(remote_addr, &request, &initialized_public_tracker()) .await .unwrap(); @@ -1083,7 +1069,7 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap() + handle_scrape(remote_addr, &request, &tracker).await.unwrap() } fn match_scrape_response(response: Response) -> Option { @@ -1134,8 +1120,7 @@ mod tests { let request = build_scrape_request(&remote_addr, &non_existing_info_hash); - let torrent_stats = - match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1177,8 +1162,7 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - let torrent_stats = - match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); let expected_torrent_stats = vec![TorrentScrapeStatistics { seeders: NumberOfPeers(1), @@ -1200,8 +1184,7 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - let torrent_stats = - match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1246,7 +1229,7 @@ mod tests { tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); - handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) .await .unwrap(); } @@ -1278,7 +1261,7 @@ mod tests { tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); - handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) .await .unwrap(); } diff --git a/src/udp/server.rs b/src/udp/server.rs index f74468189..f3f90362d 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -27,7 +27,6 @@ pub type RunningUdpServer = UdpServer; #[allow(clippy::module_name_repetitions)] pub struct UdpServer { pub cfg: torrust_tracker_configuration::UdpTracker, - pub tracker: Arc, pub state: S, } @@ -40,19 +39,15 @@ pub struct Running { } impl UdpServer { - pub fn new(cfg: torrust_tracker_configuration::UdpTracker, tracker: Arc) -> Self { - Self { - cfg, - tracker, - state: Stopped {}, - } + pub fn new(cfg: torrust_tracker_configuration::UdpTracker) -> Self { + Self { cfg, state: Stopped {} } } /// # Errors /// /// Will return `Err` if UDP can't bind to given bind address. - pub async fn start(self) -> Result, Error> { - let udp = Udp::new(self.tracker.clone(), &self.cfg.bind_address) + pub async fn start(self, tracker: Arc) -> Result, Error> { + let udp = Udp::new(&self.cfg.bind_address) .await .map_err(|e| Error::Error(e.to_string()))?; @@ -61,12 +56,11 @@ impl UdpServer { let (sender, receiver) = tokio::sync::oneshot::channel::(); let job = tokio::spawn(async move { - udp.start_with_graceful_shutdown(shutdown_signal(receiver)).await; + udp.start_with_graceful_shutdown(tracker, shutdown_signal(receiver)).await; }); let running_udp_server: UdpServer = UdpServer { cfg: self.cfg, - tracker: self.tracker, state: Running { bind_address, stop_job_sender: sender, @@ -90,7 +84,6 @@ impl UdpServer { let stopped_api_server: UdpServer = UdpServer { cfg: self.cfg, - tracker: self.tracker, state: Stopped {}, }; @@ -100,30 +93,27 @@ impl UdpServer { pub struct Udp { socket: Arc, - tracker: Arc, } impl Udp { /// # Errors /// /// Will return `Err` unable to bind to the supplied `bind_address`. - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { + pub async fn new(bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; Ok(Udp { socket: Arc::new(socket), - tracker, }) } /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - pub async fn start(&self) { + pub async fn start(&self, tracker: Arc) { loop { let mut data = [0; MAX_PACKET_SIZE]; let socket = self.socket.clone(); - let tracker = self.tracker.clone(); tokio::select! { _ = tokio::signal::ctrl_c() => { @@ -137,7 +127,7 @@ impl Udp { debug!("From: {}", &remote_addr); debug!("Payload: {:?}", payload); - let response = handle_packet(remote_addr, payload, tracker).await; + let response = handle_packet(remote_addr, payload, &tracker).await; Udp::send_response(socket, remote_addr, response).await; } @@ -148,7 +138,7 @@ impl Udp { /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - async fn start_with_graceful_shutdown(&self, shutdown_signal: F) + async fn start_with_graceful_shutdown(&self, tracker: Arc, shutdown_signal: F) where F: Future, { @@ -158,7 +148,6 @@ impl Udp { loop { let mut data = [0; MAX_PACKET_SIZE]; let socket = self.socket.clone(); - let tracker = self.tracker.clone(); tokio::select! { _ = &mut shutdown_signal => { @@ -172,7 +161,7 @@ impl Udp { debug!("From: {}", &remote_addr); debug!("Payload: {:?}", payload); - let response = handle_packet(remote_addr, payload, tracker).await; + let response = handle_packet(remote_addr, payload, &tracker).await; Udp::send_response(socket, remote_addr, response).await; } diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index 1f8708650..b6f5ca990 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -4,7 +4,6 @@ use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServe use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use torrust_tracker_test_helpers::configuration; use super::connection_info::ConnectionInfo; use crate::common::tracker::new_tracker; @@ -15,6 +14,7 @@ pub type StoppedTestEnvironment = TestEnvironment; pub type RunningTestEnvironment = TestEnvironment; pub struct TestEnvironment { + pub cfg: Arc, pub tracker: Arc, pub state: S, } @@ -36,39 +36,45 @@ impl TestEnvironment { } impl TestEnvironment { - #[allow(dead_code)] - pub fn new_stopped() -> Self { - let api_server = api_server(); + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); + + let tracker = new_tracker(cfg.clone()); + + let api_server = api_server(cfg.http_api.clone()); Self { - tracker: api_server.tracker.clone(), + cfg, + tracker, state: Stopped { api_server }, } } - #[allow(dead_code)] - pub fn start(self) -> TestEnvironment { + pub async fn start(self) -> TestEnvironment { TestEnvironment { - tracker: self.tracker, + cfg: self.cfg, + tracker: self.tracker.clone(), state: Running { - api_server: self.state.api_server.start().unwrap(), + api_server: self.state.api_server.start(self.tracker).await.unwrap(), }, } } + + pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpApi { + &mut self.state.api_server.cfg + } } impl TestEnvironment { - pub fn new_running() -> Self { - let api_server = running_api_server(); + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + let test_env = StoppedTestEnvironment::new_stopped(cfg); - Self { - tracker: api_server.tracker.clone(), - state: Running { api_server }, - } + test_env.start().await } pub async fn stop(self) -> TestEnvironment { TestEnvironment { + cfg: self.cfg, tracker: self.tracker, state: Stopped { api_server: self.state.api_server.stop().await.unwrap(), @@ -78,25 +84,22 @@ impl TestEnvironment { pub fn get_connection_info(&self) -> ConnectionInfo { ConnectionInfo { - bind_address: self.state.api_server.state.bind_address.to_string(), + bind_address: self.state.api_server.state.bind_addr.to_string(), api_token: self.state.api_server.cfg.access_tokens.get("admin").cloned(), } } } #[allow(clippy::module_name_repetitions)] -pub fn running_test_environment() -> RunningTestEnvironment { - TestEnvironment::new_running() +pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) } -pub fn api_server() -> StoppedApiServer { - let config = Arc::new(configuration::ephemeral()); - - let tracker = new_tracker(config.clone()); - - ApiServer::new(config.http_api.clone(), tracker) +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await } -pub fn running_api_server() -> RunningApiServer { - api_server().start().unwrap() +pub fn api_server(cfg: torrust_tracker_configuration::HttpApi) -> StoppedApiServer { + ApiServer::new(cfg) } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index ccdcded5e..d00c7d68c 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -9,7 +9,6 @@ mod api; mod common; mod tracker_apis { - use crate::common::fixtures::invalid_info_hashes; // When these infohashes are used in URL path params @@ -24,7 +23,29 @@ mod tracker_apis { [String::new(), " ".to_string()].to_vec() } + mod configuration { + use torrust_tracker_test_helpers::configuration; + + use crate::api::test_environment::stopped_test_environment; + + #[tokio::test] + #[should_panic] + async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { + let mut test_env = stopped_test_environment(configuration::ephemeral()); + + let cfg = test_env.config_mut(); + + cfg.ssl_enabled = true; + cfg.ssl_key_path = Some("bad key path".to_string()); + cfg.ssl_cert_path = Some("bad cert path".to_string()); + + test_env.start().await; + } + } + mod authentication { + use torrust_tracker_test_helpers::configuration; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::test_environment::running_test_environment; @@ -32,7 +53,7 @@ mod tracker_apis { #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let token = test_env.get_connection_info().api_token.unwrap(); @@ -47,7 +68,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::default()) @@ -60,7 +81,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) @@ -73,7 +94,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) @@ -86,7 +107,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let token = test_env.get_connection_info().api_token.unwrap(); @@ -113,6 +134,7 @@ mod tracker_apis { use torrust_tracker::apis::resources::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; @@ -122,7 +144,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; test_env .add_torrent_peer( @@ -161,7 +183,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(connection_with_invalid_token( test_env.get_connection_info().bind_address.as_str(), @@ -187,6 +209,7 @@ mod tracker_apis { use torrust_tracker::apis::resources::torrent::Torrent; use torrust_tracker::apis::resources::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ @@ -201,7 +224,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_getting_torrents() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -226,7 +249,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -256,7 +279,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -286,7 +309,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; @@ -303,7 +326,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; @@ -320,7 +343,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(connection_with_invalid_token( test_env.get_connection_info().bind_address.as_str(), @@ -341,7 +364,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -370,7 +393,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -385,7 +408,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(test_env.get_connection_info()) @@ -408,7 +431,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -436,6 +459,7 @@ mod tracker_apis { use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ @@ -450,7 +474,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -471,7 +495,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -488,7 +512,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -511,7 +535,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -528,7 +552,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(test_env.get_connection_info()) @@ -551,7 +575,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -569,7 +593,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -584,7 +608,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(test_env.get_connection_info()) @@ -607,7 +631,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -626,7 +650,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -652,7 +676,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -677,7 +701,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -697,6 +721,7 @@ mod tracker_apis { use std::time::Duration; use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; use crate::api::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, @@ -710,7 +735,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; @@ -732,7 +757,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; @@ -755,7 +780,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_key_durations = [ // "", it returns 404 @@ -763,9 +788,9 @@ mod tracker_apis { "-1", "text", ]; - for invalid_key_duration in &invalid_key_durations { + for invalid_key_duration in invalid_key_durations { let response = Client::new(test_env.get_connection_info()) - .post(&format!("key/{}", &invalid_key_duration)) + .post(&format!("key/{}", invalid_key_duration)) .await; assert_invalid_key_duration_param(response, invalid_key_duration).await; @@ -776,7 +801,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; force_database_error(&test_env.tracker); @@ -792,7 +817,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; let auth_key = test_env @@ -812,7 +837,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_auth_keys = [ // "", it returns a 404 @@ -837,7 +862,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; let auth_key = test_env @@ -859,7 +884,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; @@ -896,7 +921,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_reloading_keys() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; test_env @@ -914,7 +939,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; test_env @@ -934,7 +959,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; test_env diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index 02d51c4bf..f729777a1 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -5,7 +5,6 @@ use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; -use torrust_tracker_test_helpers::configuration; use crate::common::tracker::new_tracker; @@ -15,6 +14,7 @@ pub type StoppedTestEnvironment = TestEnvironment; pub type RunningTestEnvironment = TestEnvironment; pub struct TestEnvironment { + pub cfg: Arc, pub tracker: Arc, pub state: S, } @@ -38,39 +38,41 @@ impl TestEnvironment { impl TestEnvironment { #[allow(dead_code)] - pub fn new_stopped() -> Self { - let udp_server = udp_server(); + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); + + let tracker = new_tracker(cfg.clone()); + + let udp_server = udp_server(cfg.udp_trackers[0].clone()); Self { - tracker: udp_server.tracker.clone(), - state: Stopped { udp_server: udp_server }, + cfg, + tracker, + state: Stopped { udp_server }, } } #[allow(dead_code)] pub async fn start(self) -> TestEnvironment { TestEnvironment { - tracker: self.tracker, + cfg: self.cfg, + tracker: self.tracker.clone(), state: Running { - udp_server: self.state.udp_server.start().await.unwrap(), + udp_server: self.state.udp_server.start(self.tracker).await.unwrap(), }, } } } impl TestEnvironment { - pub async fn new_running() -> Self { - let udp_server = running_udp_server().await; - - Self { - tracker: udp_server.tracker.clone(), - state: Running { udp_server: udp_server }, - } + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + StoppedTestEnvironment::new_stopped(cfg).start().await } #[allow(dead_code)] pub async fn stop(self) -> TestEnvironment { TestEnvironment { + cfg: self.cfg, tracker: self.tracker, state: Stopped { udp_server: self.state.udp_server.stop().await.unwrap(), @@ -83,19 +85,16 @@ impl TestEnvironment { } } -#[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment() -> RunningTestEnvironment { - TestEnvironment::new_running().await +#[allow(clippy::module_name_repetitions, dead_code)] +pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) } -pub fn udp_server() -> StoppedUdpServer { - let config = Arc::new(configuration::ephemeral()); - - let tracker = new_tracker(config.clone()); - - UdpServer::new(config.udp_trackers[0].clone(), tracker) +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await } -pub async fn running_udp_server() -> RunningUdpServer { - udp_server().start().await.unwrap() +pub fn udp_server(cfg: torrust_tracker_configuration::UdpTracker) -> StoppedUdpServer { + UdpServer::new(cfg) } diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs index b7cc3bd6f..0f9283a8b 100644 --- a/tests/udp_tracker.rs +++ b/tests/udp_tracker.rs @@ -17,6 +17,7 @@ mod udp_tracker_server { use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; use torrust_tracker::udp::MAX_PACKET_SIZE; + use torrust_tracker_test_helpers::configuration; use crate::udp::asserts::is_error_response; use crate::udp::client::{new_udp_client_connected, UdpTrackerClient}; @@ -45,7 +46,7 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let test_env = running_test_environment().await; + let test_env = running_test_environment(configuration::ephemeral()).await; let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; @@ -60,6 +61,7 @@ mod udp_tracker_server { mod receiving_a_connection_request { use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; use crate::udp::asserts::is_connect_response; use crate::udp::client::new_udp_tracker_client_connected; @@ -67,7 +69,7 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_a_connect_response() { - let test_env = running_test_environment().await; + let test_env = running_test_environment(configuration::ephemeral()).await; let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; @@ -90,6 +92,7 @@ mod udp_tracker_server { AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, TransactionId, }; + use torrust_tracker_test_helpers::configuration; use crate::udp::asserts::is_ipv4_announce_response; use crate::udp::client::new_udp_tracker_client_connected; @@ -98,7 +101,7 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_an_announce_response() { - let test_env = running_test_environment().await; + let test_env = running_test_environment(configuration::ephemeral()).await; let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; @@ -131,6 +134,7 @@ mod udp_tracker_server { mod receiving_an_scrape_request { use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; use crate::udp::asserts::is_scrape_response; use crate::udp::client::new_udp_tracker_client_connected; @@ -139,7 +143,7 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_a_scrape_response() { - let test_env = running_test_environment().await; + let test_env = running_test_environment(configuration::ephemeral()).await; let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; From a611fbd26acc13d67f24fa51767646207044de4a Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 9 Mar 2023 11:31:17 +0100 Subject: [PATCH 0456/1003] chore: fix clippy errors --- src/apis/routes.rs | 1 + src/apis/server.rs | 18 +++++++++++++++--- src/http/axum_implementation/routes.rs | 1 + src/http/server.rs | 17 ++++++++++++++--- src/udp/server.rs | 1 + tests/common/tracker.rs | 1 + tests/tracker_api.rs | 2 +- 7 files changed, 34 insertions(+), 7 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 6e3218605..ecc51090c 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -10,6 +10,7 @@ use super::handlers::{ use super::middlewares::auth::auth; use crate::tracker::Tracker; +#[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { Router::new() // Stats diff --git a/src/apis/server.rs b/src/apis/server.rs index 0a5013161..a283bbc54 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -38,10 +38,14 @@ pub struct Running { } impl ApiServer { + #[must_use] pub fn new(cfg: torrust_tracker_configuration::HttpApi) -> Self { Self { cfg, state: Stopped {} } } + /// # Errors + /// + /// It would return an error if no `SocketAddr` is returned after launching the server. pub async fn start(self, tracker: Arc) -> Result, Error> { let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); @@ -51,12 +55,14 @@ impl ApiServer { let task = tokio::spawn(async move { let (bind_addr, server) = Launcher::start(&configuration, tracker, shutdown_signal(shutdown_receiver)); - addr_sender.send(bind_addr).unwrap(); + addr_sender.send(bind_addr).expect("Could not return SocketAddr."); server.await; }); - let bind_address = addr_receiver.await.expect("Could not receive bind_address."); + let bind_address = addr_receiver + .await + .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; Ok(ApiServer { cfg: self.cfg, @@ -70,8 +76,14 @@ impl ApiServer { } impl ApiServer { + /// # Errors + /// + /// It would return an error if the channel for the task killer signal was closed. pub async fn stop(self) -> Result, Error> { - self.state.task_killer.send(0).unwrap(); + self.state + .task_killer + .send(0) + .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; let _ = self.state.task.await; diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index b0f30453d..acde5f662 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -7,6 +7,7 @@ use axum_client_ip::SecureClientIpSource; use super::handlers::{announce, scrape}; use crate::tracker::Tracker; +#[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { Router::new() // Announce request diff --git a/src/http/server.rs b/src/http/server.rs index e7b6c1888..98160777c 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -56,6 +56,9 @@ impl HttpServer> { } } + /// # Errors + /// + /// It would return an error if no `SocketAddr` is returned after launching the server. pub async fn start(self, tracker: Arc) -> Result>, Error> { let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); @@ -67,14 +70,16 @@ impl HttpServer> { let (bind_addr, server) = launcher.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); - addr_sender.send(bind_addr).unwrap(); + addr_sender.send(bind_addr).expect("Could not return SocketAddr."); server.await; launcher }); - let bind_address = addr_receiver.await.expect("Could not receive bind_address."); + let bind_address = addr_receiver + .await + .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; Ok(HttpServer { cfg: self.cfg, @@ -88,8 +93,14 @@ impl HttpServer> { } impl HttpServer> { + /// # Errors + /// + /// It would return an error if the channel for the task killer signal was closed. pub async fn stop(self) -> Result>, Error> { - self.state.task_killer.send(0).unwrap(); + self.state + .task_killer + .send(0) + .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; diff --git a/src/udp/server.rs b/src/udp/server.rs index f3f90362d..e52b8fd52 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -39,6 +39,7 @@ pub struct Running { } impl UdpServer { + #[must_use] pub fn new(cfg: torrust_tracker_configuration::UdpTracker) -> Self { Self { cfg, state: Stopped {} } } diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index ed2d8392b..127cfefc4 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -5,6 +5,7 @@ use torrust_tracker::tracker::Tracker; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; // TODO: Move to test-helpers crate once `Tracker` is isolated. +#[allow(clippy::module_name_repetitions)] pub fn new_tracker(configuration: Arc) -> Arc { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index d00c7d68c..dac5907c2 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -790,7 +790,7 @@ mod tracker_apis { for invalid_key_duration in invalid_key_durations { let response = Client::new(test_env.get_connection_info()) - .post(&format!("key/{}", invalid_key_duration)) + .post(&format!("key/{invalid_key_duration}")) .await; assert_invalid_key_duration_param(response, invalid_key_duration).await; From cf9e9a9be268606b4b466127c583f3023167c10a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 17:30:59 +0000 Subject: [PATCH 0457/1003] fix: merge conflicts --- src/http/axum_implementation/handlers/announce.rs | 2 +- src/http/axum_implementation/handlers/scrape.rs | 2 +- src/http/axum_implementation/routes.rs | 2 +- src/http/axum_implementation/services/announce.rs | 8 ++++---- src/tracker/mod.rs | 9 +++++---- tests/http_tracker.rs | 2 ++ 6 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 6458e2c2f..1f1d7e176 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -182,7 +182,7 @@ mod tests { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 43bf6c99f..99bde0087 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -140,7 +140,7 @@ mod tests { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index acde5f662..a8e740f69 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -15,7 +15,7 @@ pub fn router(tracker: Arc) -> Router { .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) // Scrape request .route("/scrape", get(scrape::handle_without_key).with_state(tracker.clone())) - .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker.clone())) + .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker)) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) } diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 255a73c8f..479fb9d2b 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -51,7 +51,7 @@ mod tests { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) @@ -137,7 +137,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - &Arc::new(configuration::ephemeral()), + Arc::new(configuration::ephemeral()), Some(stats_event_sender), statistics::Repo::new(), ) @@ -154,7 +154,7 @@ mod tests { configuration.external_ip = Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); - Tracker::new(&Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() + Tracker::new(Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() } fn peer_with_the_ipv4_loopback_ip() -> Peer { @@ -201,7 +201,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - &Arc::new(configuration::ephemeral()), + Arc::new(configuration::ephemeral()), Some(stats_event_sender), statistics::Repo::new(), ) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 874233d91..aae22f9b7 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -552,6 +552,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_primitives::TrackerMode; @@ -566,25 +567,25 @@ mod tests { pub fn public_tracker() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.mode = TrackerMode::Public; - tracker_factory(configuration) + tracker_factory(Arc::new(configuration)) } pub fn private_tracker() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.mode = TrackerMode::Private; - tracker_factory(configuration) + tracker_factory(Arc::new(configuration)) } pub fn whitelisted_tracker() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.mode = TrackerMode::Listed; - tracker_factory(configuration) + tracker_factory(Arc::new(configuration)) } pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.persistent_torrent_completed_stat = true; - tracker_factory(configuration) + tracker_factory(Arc::new(configuration)) } fn sample_info_hash() -> InfoHash { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a4e87115a..d83ccbd0c 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1198,6 +1198,7 @@ mod warp_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::assert_is_announce_response; use crate::http::asserts_warp::{ @@ -2616,6 +2617,7 @@ mod axum_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::http::client::Client; From 0097c85c2f896d0686d7cfdcf21304b728bf4b61 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 17:35:59 +0000 Subject: [PATCH 0458/1003] fix: clippy warnings --- tests/http_tracker.rs | 264 ++++++++++++++++-------------------------- 1 file changed, 102 insertions(+), 162 deletions(-) diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index d83ccbd0c..3a5e84525 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -61,9 +61,7 @@ mod warp_test_env { let params = QueryBuilder::default().query().params(); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_could_not_find_remote_address_on_xff_header_error_response(response).await; @@ -76,7 +74,7 @@ mod warp_test_env { let params = QueryBuilder::default().query().params(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; @@ -131,9 +129,7 @@ mod warp_test_env { params.remove_optional_params(); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; @@ -144,7 +140,7 @@ mod warp_test_env { async fn should_fail_when_the_url_query_component_is_empty() { let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(test_env.bind_address().clone()).get("announce").await; + let response = Client::new(*test_env.bind_address()).get("announce").await; assert_internal_server_error_response(response).await; } @@ -159,9 +155,7 @@ mod warp_test_env { params.info_hash = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_invalid_info_hash_error_response(response).await; @@ -171,9 +165,7 @@ mod warp_test_env { params.peer_id = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_invalid_peer_id_error_response(response).await; @@ -183,9 +175,7 @@ mod warp_test_env { params.port = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; @@ -201,9 +191,7 @@ mod warp_test_env { for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_invalid_info_hash_error_response(response).await; } @@ -224,9 +212,7 @@ mod warp_test_env { params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; @@ -244,9 +230,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -265,9 +249,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -293,9 +275,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_invalid_peer_id_error_response(response).await; } @@ -314,9 +294,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -335,9 +313,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -366,9 +342,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; } @@ -387,9 +361,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -401,7 +373,7 @@ mod warp_test_env { async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -439,7 +411,7 @@ mod warp_test_env { test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -481,7 +453,7 @@ mod warp_test_env { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; assert_empty_announce_response(response).await; @@ -506,7 +478,7 @@ mod warp_test_env { test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -549,7 +521,7 @@ mod warp_test_env { // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -574,7 +546,7 @@ mod warp_test_env { async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) .await; @@ -591,7 +563,7 @@ mod warp_test_env { async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; @@ -610,7 +582,7 @@ mod warp_test_env { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -631,7 +603,7 @@ mod warp_test_env { async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) .await; @@ -648,7 +620,7 @@ mod warp_test_env { async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; @@ -667,7 +639,7 @@ mod warp_test_env { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -691,7 +663,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -727,7 +699,7 @@ mod warp_test_env { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -763,7 +735,7 @@ mod warp_test_env { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -794,7 +766,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(test_env.bind_address().clone()); + let client = Client::new(*test_env.bind_address()); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -844,7 +816,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_request_is_empty() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(test_env.bind_address().clone()).get("scrape").await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; assert_internal_server_error_response(response).await; @@ -860,9 +832,7 @@ mod warp_test_env { for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; // code-review: it's not returning the invalid info hash error assert_internal_server_error_response(response).await; @@ -887,7 +857,7 @@ mod warp_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -927,7 +897,7 @@ mod warp_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -957,7 +927,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -977,7 +947,7 @@ mod warp_test_env { let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -1002,7 +972,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1025,7 +995,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1064,7 +1034,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1086,7 +1056,7 @@ mod warp_test_env { .await .expect("should add the torrent to the whitelist"); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1127,7 +1097,7 @@ mod warp_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1164,7 +1134,7 @@ mod warp_test_env { .await .expect("should add the torrent to the whitelist"); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1215,7 +1185,7 @@ mod warp_test_env { let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), key.id()) .announce(&QueryBuilder::default().query()) .await; @@ -1230,7 +1200,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1244,7 +1214,7 @@ mod warp_test_env { // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -1288,7 +1258,7 @@ mod warp_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1321,7 +1291,7 @@ mod warp_test_env { let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), key.id()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1365,7 +1335,7 @@ mod warp_test_env { let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), false_key) + let response = Client::authenticated(*test_env.bind_address(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1414,9 +1384,7 @@ mod axum_test_env { let params = QueryBuilder::default().query().params(); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; @@ -1429,7 +1397,7 @@ mod axum_test_env { let params = QueryBuilder::default().query().params(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; @@ -1483,9 +1451,7 @@ mod axum_test_env { params.remove_optional_params(); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; @@ -1496,7 +1462,7 @@ mod axum_test_env { async fn should_fail_when_the_url_query_component_is_empty() { let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(test_env.bind_address().clone()).get("announce").await; + let response = Client::new(*test_env.bind_address()).get("announce").await; assert_missing_query_params_for_announce_request_error_response(response).await; @@ -1509,7 +1475,7 @@ mod axum_test_env { let invalid_query_param = "a=b=c"; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get(&format!("announce?{invalid_query_param}")) .await; @@ -1528,9 +1494,7 @@ mod axum_test_env { params.info_hash = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param info_hash").await; @@ -1540,9 +1504,7 @@ mod axum_test_env { params.peer_id = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param peer_id").await; @@ -1552,9 +1514,7 @@ mod axum_test_env { params.port = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param port").await; @@ -1570,9 +1530,7 @@ mod axum_test_env { for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_cannot_parse_query_params_error_response(response, "").await; } @@ -1593,9 +1551,7 @@ mod axum_test_env { params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; @@ -1613,9 +1569,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1634,9 +1588,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1662,9 +1614,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1683,9 +1633,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1704,9 +1652,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1733,9 +1679,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1754,9 +1698,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1768,7 +1710,7 @@ mod axum_test_env { async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -1806,7 +1748,7 @@ mod axum_test_env { test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1855,7 +1797,7 @@ mod axum_test_env { test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1898,7 +1840,7 @@ mod axum_test_env { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; assert_empty_announce_response(response).await; @@ -1923,7 +1865,7 @@ mod axum_test_env { test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1966,7 +1908,7 @@ mod axum_test_env { // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1991,7 +1933,7 @@ mod axum_test_env { async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) .await; @@ -2008,7 +1950,7 @@ mod axum_test_env { async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; @@ -2027,7 +1969,7 @@ mod axum_test_env { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -2048,7 +1990,7 @@ mod axum_test_env { async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) .await; @@ -2065,7 +2007,7 @@ mod axum_test_env { async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; @@ -2084,7 +2026,7 @@ mod axum_test_env { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -2108,7 +2050,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -2144,7 +2086,7 @@ mod axum_test_env { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -2180,7 +2122,7 @@ mod axum_test_env { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -2211,7 +2153,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(test_env.bind_address().clone()); + let client = Client::new(*test_env.bind_address()); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -2265,7 +2207,7 @@ mod axum_test_env { #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(test_env.bind_address().clone()).get("scrape").await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; @@ -2281,9 +2223,7 @@ mod axum_test_env { for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_cannot_parse_query_params_error_response(response, "").await; } @@ -2307,7 +2247,7 @@ mod axum_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2347,7 +2287,7 @@ mod axum_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2377,7 +2317,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2397,7 +2337,7 @@ mod axum_test_env { let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -2422,7 +2362,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2445,7 +2385,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2484,7 +2424,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2505,7 +2445,7 @@ mod axum_test_env { .await .expect("should add the torrent to the whitelist"); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2546,7 +2486,7 @@ mod axum_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2583,7 +2523,7 @@ mod axum_test_env { .await .expect("should add the torrent to the whitelist"); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2631,7 +2571,7 @@ mod axum_test_env { let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), key.id()) .announce(&QueryBuilder::default().query()) .await; @@ -2646,7 +2586,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2662,7 +2602,7 @@ mod axum_test_env { let invalid_key = "INVALID_KEY"; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get(&format!( "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) @@ -2679,7 +2619,7 @@ mod axum_test_env { // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -2714,7 +2654,7 @@ mod axum_test_env { let invalid_key = "INVALID_KEY"; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get(&format!( "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" )) @@ -2739,7 +2679,7 @@ mod axum_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2772,7 +2712,7 @@ mod axum_test_env { let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), key.id()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2817,7 +2757,7 @@ mod axum_test_env { let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), false_key) + let response = Client::authenticated(*test_env.bind_address(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) From 4ad981574ac319ccd9fcd86c425caf0a0ae82168 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 18:26:10 +0000 Subject: [PATCH 0459/1003] refactor: remove duplicate code --- packages/test-helpers/src/configuration.rs | 9 ++ .../axum_implementation/handlers/announce.rs | 42 ++-------- .../axum_implementation/handlers/scrape.rs | 42 ++-------- .../axum_implementation/services/announce.rs | 24 +----- src/tracker/mod.rs | 19 ++--- src/udp/handlers.rs | 83 ++++++++----------- tests/http_tracker.rs | 9 +- 7 files changed, 70 insertions(+), 158 deletions(-) diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index ec29fdbe1..0b7a269ff 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -55,6 +55,15 @@ pub fn ephemeral_with_reverse_proxy() -> Configuration { cfg } +#[must_use] +pub fn ephemeral_without_reverse_proxy() -> Configuration { + let mut cfg = ephemeral(); + + cfg.on_reverse_proxy = false; + + cfg +} + #[must_use] pub fn ephemeral_mode_public() -> Configuration { let mut cfg = ephemeral(); diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 1f1d7e176..ebb8c8586 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -138,56 +138,30 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { #[cfg(test)] mod tests { - use std::sync::Arc; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::http::axum_implementation::requests::announce::Announce; use crate::http::axum_implementation::responses; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; - use crate::tracker::statistics::Keeper; + use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; fn private_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Private; - tracker_factory(configuration) + tracker_factory(configuration::ephemeral_mode_private().into()) } - fn listed_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Listed; - tracker_factory(configuration) + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) } fn tracker_on_reverse_proxy() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.on_reverse_proxy = true; - tracker_factory(configuration) + tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) } fn tracker_not_on_reverse_proxy() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.on_reverse_proxy = false; - tracker_factory(configuration) - } - - fn tracker_factory(configuration: Configuration) -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } + tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) } fn sample_announce_request() -> Announce { @@ -263,13 +237,13 @@ mod tests { use std::sync::Arc; - use super::{listed_tracker, sample_announce_request, sample_client_ip_sources}; + use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; use crate::http::axum_implementation::handlers::announce::handle_announce; use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let tracker = Arc::new(listed_tracker()); + let tracker = Arc::new(whitelisted_tracker()); let announce_request = sample_announce_request(); diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 99bde0087..fd316882d 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -96,56 +96,30 @@ fn build_response(scrape_data: ScrapeData) -> Response { mod tests { use std::net::IpAddr; use std::str::FromStr; - use std::sync::Arc; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::responses; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; - use crate::tracker::statistics::Keeper; + use crate::tracker::services::common::tracker_factory; use crate::tracker::Tracker; fn private_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Private; - tracker_factory(configuration) + tracker_factory(configuration::ephemeral_mode_private().into()) } - fn listed_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Listed; - tracker_factory(configuration) + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) } fn tracker_on_reverse_proxy() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.on_reverse_proxy = true; - tracker_factory(configuration) + tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) } fn tracker_not_on_reverse_proxy() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.on_reverse_proxy = false; - tracker_factory(configuration) - } - - fn tracker_factory(configuration: Configuration) -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } + tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) } fn sample_scrape_request() -> Scrape { @@ -214,13 +188,13 @@ mod tests { use std::sync::Arc; - use super::{listed_tracker, sample_client_ip_sources, sample_scrape_request}; + use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; use crate::http::axum_implementation::handlers::scrape::handle_scrape; use crate::tracker::ScrapeData; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let tracker = Arc::new(listed_tracker()); + let tracker = Arc::new(whitelisted_tracker()); let scrape_request = sample_scrape_request(); diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 479fb9d2b..73d6ed468 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -26,37 +26,17 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; - use crate::tracker::statistics::Keeper; + use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Public; - tracker_factory(configuration) - } - - fn tracker_factory(configuration: Configuration) -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } + tracker_factory(configuration::ephemeral_mode_public().into()) } fn sample_info_hash() -> InfoHash { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index aae22f9b7..326afbf00 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -555,7 +555,6 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::DurationSinceUnixEpoch; @@ -564,22 +563,16 @@ mod tests { use crate::tracker::services::common::tracker_factory; use crate::tracker::{TorrentsMetrics, Tracker}; - pub fn public_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Public; - tracker_factory(Arc::new(configuration)) + fn public_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_public().into()) } - pub fn private_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Private; - tracker_factory(Arc::new(configuration)) + fn private_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_private().into()) } - pub fn whitelisted_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Listed; - tracker_factory(Arc::new(configuration)) + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) } pub fn tracker_persisting_torrents_in_database() -> Tracker { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index e47a89dd4..41b1184dc 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -239,11 +239,11 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::{Current, Time}; - use crate::tracker::{self, peer, statistics}; + use crate::tracker::services::common::tracker_factory; + use crate::tracker::{self, peer}; fn tracker_configuration() -> Arc { Arc::new(default_testing_tracker_configuration()) @@ -253,24 +253,20 @@ mod tests { configuration::ephemeral() } - fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - initialized_tracker(configuration) + fn public_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_public().into()) } - fn initialized_private_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - initialized_tracker(configuration) + fn private_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_private().into()) } - fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - initialized_tracker(configuration) + fn whitelisted_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_whitelisted().into()) } fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); - Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) + tracker_factory(configuration).into() } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -344,11 +340,6 @@ mod tests { self } - pub fn with_mode(mut self, mode: TrackerMode) -> Self { - self.configuration.mode = mode; - self - } - pub fn into(self) -> Configuration { self.configuration } @@ -366,7 +357,7 @@ mod tests { use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_connect; - use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -380,7 +371,7 @@ mod tests { transaction_id: TransactionId(0i32), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &initialized_public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) .await .unwrap(); @@ -399,7 +390,7 @@ mod tests { transaction_id: TransactionId(0i32), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &initialized_public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) .await .unwrap(); @@ -539,12 +530,12 @@ mod tests { use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - initialized_public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, + public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let client_ip = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; @@ -581,9 +572,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, &initialized_public_tracker()) - .await - .unwrap(); + let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -604,7 +593,7 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -660,7 +649,7 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); add_a_torrent_peer_using_ipv6(tracker.clone()).await; @@ -707,11 +696,11 @@ mod tests { use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; + use crate::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let client_ip = Ipv4Addr::new(127, 0, 0, 1); let client_port = 8080; @@ -762,12 +751,12 @@ mod tests { use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - initialized_public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, + public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -808,9 +797,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, &initialized_public_tracker()) - .await - .unwrap(); + let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -831,7 +818,7 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -890,7 +877,7 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); add_a_torrent_peer_using_ipv4(tracker.clone()).await; @@ -999,7 +986,7 @@ mod tests { use crate::tracker::{self, peer}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1022,9 +1009,7 @@ mod tests { info_hashes, }; - let response = handle_scrape(remote_addr, &request, &initialized_public_tracker()) - .await - .unwrap(); + let response = handle_scrape(remote_addr, &request, &public_tracker()).await.unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1082,12 +1067,12 @@ mod tests { mod with_a_public_tracker { use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handlers::tests::initialized_public_tracker; + use crate::udp::handlers::tests::public_tracker; use crate::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); @@ -1109,11 +1094,11 @@ mod tests { use crate::udp::handlers::tests::scrape_request::{ add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::udp::handlers::tests::{initialized_private_tracker, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; #[tokio::test] async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { - let tracker = initialized_private_tracker(); + let tracker = private_tracker(); let remote_addr = sample_ipv4_remote_addr(); let non_existing_info_hash = InfoHash([0u8; 20]); @@ -1130,7 +1115,7 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_tracker_has_the_requested_torrent_because_authenticated_requests_are_not_supported_in_udp_tracker( ) { - let tracker = initialized_private_tracker(); + let tracker = private_tracker(); let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await).unwrap(); @@ -1147,11 +1132,11 @@ mod tests { use crate::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::udp::handlers::tests::{initialized_whitelisted_tracker, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let tracker = initialized_whitelisted_tracker(); + let tracker = whitelisted_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1175,7 +1160,7 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let tracker = initialized_whitelisted_tracker(); + let tracker = whitelisted_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 3a5e84525..aea8fac37 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2597,8 +2597,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; @@ -2613,8 +2612,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -2649,8 +2647,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; From e548f686a435b7a024df7d3eab66a6c4be2a83b9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 14:11:50 +0000 Subject: [PATCH 0460/1003] test(http): [#224] unit tests for scrape service --- .../axum_implementation/services/scrape.rs | 213 ++++++++++++++++++ src/tracker/peer.rs | 4 + 2 files changed, 217 insertions(+) diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs index cfcba09f9..b48bab642 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/axum_implementation/services/scrape.rs @@ -30,3 +30,216 @@ async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { } } } + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_test_helpers::configuration; + + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::services::common::tracker_factory; + use crate::tracker::{peer, Tracker}; + + fn public_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_public().into()) + } + + fn sample_info_hashes() -> Vec { + vec![sample_info_hash()] + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + mod with_real_data { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; + + use crate::http::axum_implementation::services::scrape::invoke; + use crate::http::axum_implementation::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::{statistics, ScrapeData, Tracker}; + + #[tokio::test] + async fn it_should_return_the_scrape_data_for_a_torrent() { + let tracker = Arc::new(public_tracker()); + + let info_hash = sample_info_hash(); + let info_hashes = vec![info_hash]; + + // Announce a new peer to force scrape data to contain not zeroed data + let mut peer = sample_peer(); + let original_peer_ip = peer.ip(); + tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + + let scrape_data = invoke(&tracker, &info_hashes, &original_peer_ip).await; + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + } + } + + mod with_zeroed_data { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; + + use crate::http::axum_implementation::services::scrape::fake; + use crate::http::axum_implementation::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; + use crate::tracker::{statistics, ScrapeData, Tracker}; + + #[tokio::test] + async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { + let tracker = Arc::new(public_tracker()); + + let info_hash = sample_info_hash(); + let info_hashes = vec![info_hash]; + + // Announce a new peer to force scrape data to contain not zeroed data + let mut peer = sample_peer(); + let original_peer_ip = peer.ip(); + tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + + let scrape_data = fake(&tracker, &info_hashes, &original_peer_ip).await; + + let expected_scrape_data = ScrapeData::zeroed(&info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + fake(&tracker, &sample_info_hashes(), &peer_ip).await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + fake(&tracker, &sample_info_hashes(), &peer_ip).await; + } + } +} diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index c6d87f036..015af12a3 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -40,6 +40,10 @@ impl Peer { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + pub fn ip(&mut self) -> IpAddr { + self.peer_addr.ip() + } + pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } From 4355c2a8a62a568814cdaa3f4576d22d70b7ba56 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 19:28:10 +0000 Subject: [PATCH 0461/1003] feat: [#227] enable Axum HTTP tracker --- src/setup.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/setup.rs b/src/setup.rs index 5b51632a7..ee32f5a81 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -51,7 +51,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Warp).await); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Axum).await); } // Start HTTP API From 467d43f607afb1f4821adb85a2f60c8f114314ed Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 08:48:57 +0000 Subject: [PATCH 0462/1003] refactor: [#229] remove Warp HTTP Tracker --- src/apis/responses.rs | 2 +- src/http/mod.rs | 2 - src/http/warp_implementation/error.rs | 39 - .../warp_implementation/filter_helpers.rs | 85 -- src/http/warp_implementation/filters.rs | 205 --- src/http/warp_implementation/handlers.rs | 207 --- src/http/warp_implementation/launcher.rs | 116 -- src/http/warp_implementation/mod.rs | 14 - src/http/warp_implementation/peer_builder.rs | 32 - src/http/warp_implementation/request.rs | 36 - src/http/warp_implementation/response.rs | 132 -- src/http/warp_implementation/routes.rs | 36 - src/jobs/http_tracker.rs | 62 +- tests/http/asserts.rs | 46 - tests/http/asserts_warp.rs | 34 - tests/http/mod.rs | 1 - tests/http/responses/announce_warp.rs | 30 - tests/http/responses/mod.rs | 1 - tests/http_tracker.rs | 1350 +---------------- tests/udp_tracker.rs | 2 + 20 files changed, 9 insertions(+), 2423 deletions(-) delete mode 100644 src/http/warp_implementation/error.rs delete mode 100644 src/http/warp_implementation/filter_helpers.rs delete mode 100644 src/http/warp_implementation/filters.rs delete mode 100644 src/http/warp_implementation/handlers.rs delete mode 100644 src/http/warp_implementation/launcher.rs delete mode 100644 src/http/warp_implementation/mod.rs delete mode 100644 src/http/warp_implementation/peer_builder.rs delete mode 100644 src/http/warp_implementation/request.rs delete mode 100644 src/http/warp_implementation/response.rs delete mode 100644 src/http/warp_implementation/routes.rs delete mode 100644 tests/http/asserts_warp.rs delete mode 100644 tests/http/responses/announce_warp.rs diff --git a/src/apis/responses.rs b/src/apis/responses.rs index 3b0946396..c0a6cbcf8 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -141,7 +141,7 @@ pub fn failed_to_reload_keys_response(e: E) -> Response { unhandled_rejection_response(format!("failed to reload keys: {e}")) } -/// This error response is to keep backward compatibility with the old Warp API. +/// This error response is to keep backward compatibility with the old API. /// It should be a plain text or json. #[must_use] pub fn unhandled_rejection_response(reason: String) -> Response { diff --git a/src/http/mod.rs b/src/http/mod.rs index b4841c0af..2309ee146 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -15,10 +15,8 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; pub mod percent_encoding; pub mod server; -pub mod warp_implementation; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { - Warp, Axum, } diff --git a/src/http/warp_implementation/error.rs b/src/http/warp_implementation/error.rs deleted file mode 100644 index 55b22c27a..000000000 --- a/src/http/warp_implementation/error.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::panic::Location; - -use thiserror::Error; -use torrust_tracker_located_error::LocatedError; -use warp::reject::Reject; - -#[derive(Error, Debug)] -pub enum Error { - #[error("tracker server error: {source}")] - TrackerError { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, - - #[error("internal server error: {message}, {location}")] - InternalServer { - location: &'static Location<'static>, - message: String, - }, - - #[error("no valid infohashes found, {location}")] - EmptyInfoHash { location: &'static Location<'static> }, - - #[error("peer_id is either missing or invalid, {location}")] - InvalidPeerId { location: &'static Location<'static> }, - - #[error("could not find remote address: {message}, {location}")] - AddressNotFound { - location: &'static Location<'static>, - message: String, - }, - - #[error("too many infohashes: {message}, {location}")] - TwoManyInfoHashes { - location: &'static Location<'static>, - message: String, - }, -} - -impl Reject for Error {} diff --git a/src/http/warp_implementation/filter_helpers.rs b/src/http/warp_implementation/filter_helpers.rs deleted file mode 100644 index 583d38352..000000000 --- a/src/http/warp_implementation/filter_helpers.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::net::{AddrParseError, IpAddr}; -use std::panic::Location; -use std::str::FromStr; - -use thiserror::Error; -use torrust_tracker_located_error::{Located, LocatedError}; - -#[derive(Error, Debug)] -pub enum XForwardedForParseError { - #[error("Empty X-Forwarded-For header value, {location}")] - EmptyValue { location: &'static Location<'static> }, - - #[error("Invalid IP in X-Forwarded-For header: {source}")] - InvalidIp { source: LocatedError<'static, AddrParseError> }, -} - -impl From for XForwardedForParseError { - #[track_caller] - fn from(err: AddrParseError) -> Self { - Self::InvalidIp { - source: Located(err).into(), - } - } -} - -/// It extracts the last IP address from the `X-Forwarded-For` http header value. -/// -/// # Errors -/// -/// Will return and error if the last IP in the `X-Forwarded-For` header is not a valid IP -pub fn maybe_rightmost_forwarded_ip(x_forwarded_for_value: &str) -> Result { - let mut x_forwarded_for_raw = x_forwarded_for_value.to_string(); - - // Remove whitespace chars - x_forwarded_for_raw.retain(|c| !c.is_whitespace()); - - // Get all forwarded IP's in a vec - let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); - - match x_forwarded_ips.last() { - Some(last_ip) => match IpAddr::from_str(last_ip) { - Ok(ip) => Ok(ip), - Err(err) => Err(err.into()), - }, - None => Err(XForwardedForParseError::EmptyValue { - location: Location::caller(), - }), - } -} - -#[cfg(test)] -mod tests { - - use std::net::IpAddr; - use std::str::FromStr; - - use super::maybe_rightmost_forwarded_ip; - - #[test] - fn the_last_forwarded_ip_can_be_parsed_from_the_the_corresponding_http_header() { - assert!(maybe_rightmost_forwarded_ip("").is_err()); - - assert!(maybe_rightmost_forwarded_ip("INVALID IP").is_err()); - - assert_eq!( - maybe_rightmost_forwarded_ip("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap(), - IpAddr::from_str("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap() - ); - - assert_eq!( - maybe_rightmost_forwarded_ip("203.0.113.195").unwrap(), - IpAddr::from_str("203.0.113.195").unwrap() - ); - - assert_eq!( - maybe_rightmost_forwarded_ip("203.0.113.195, 2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap(), - IpAddr::from_str("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap() - ); - - assert_eq!( - maybe_rightmost_forwarded_ip("203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178").unwrap(), - IpAddr::from_str("150.172.238.178").unwrap() - ); - } -} diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs deleted file mode 100644 index 06168d82a..000000000 --- a/src/http/warp_implementation/filters.rs +++ /dev/null @@ -1,205 +0,0 @@ -use std::convert::Infallible; -use std::net::{IpAddr, SocketAddr}; -use std::panic::Location; -use std::str::FromStr; -use std::sync::Arc; - -use warp::{reject, Filter, Rejection}; - -use super::error::Error; -use super::filter_helpers::maybe_rightmost_forwarded_ip; -use super::{request, WebResult}; -use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; -use crate::protocol::common::MAX_SCRAPE_TORRENTS; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::Key; -use crate::tracker::{self, peer}; - -/// Pass Arc along -#[must_use] -pub fn with_tracker( - tracker: Arc, -) -> impl Filter,), Error = Infallible> + Clone { - warp::any().map(move || tracker.clone()) -} - -/// Check for infoHash -#[must_use] -pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { - warp::filters::query::raw().and_then(|q| async move { info_hashes(&q) }) -} - -/// Check for `PeerId` -#[must_use] -pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) -} - -/// Pass Arc along -#[must_use] -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { - warp::path::param::() - .map(|key: String| { - let key = Key::from_str(&key); - match key { - Ok(id) => Some(id), - Err(_) => None, - } - }) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) -} - -/// Check for `PeerAddress` -#[must_use] -pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::addr::remote() - .and(warp::header::optional::("X-Forwarded-For")) - .map(move |remote_addr: Option, x_forwarded_for: Option| { - (on_reverse_proxy, remote_addr, x_forwarded_for) - }) - .and_then(|q| async move { peer_addr(q) }) -} - -/// Check for `request::Announce` -#[must_use] -pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::filters::query::query::() - .and(with_info_hash()) - .and(with_peer_id()) - .and(with_peer_addr(on_reverse_proxy)) - .and_then(|q, r, s, t| async move { announce_request(q, &r, s, t) }) -} - -/// Check for `ScrapeRequest` -#[must_use] -pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::any() - .and(with_info_hash()) - .and(with_peer_addr(on_reverse_proxy)) - .and_then(|q, r| async move { scrape_request(q, r) }) -} - -/// Parse `InfoHash` from raw query string -#[allow(clippy::ptr_arg)] -fn info_hashes(raw_query: &String) -> WebResult> { - let split_raw_query: Vec<&str> = raw_query.split('&').collect(); - let mut info_hashes: Vec = Vec::new(); - - for v in split_raw_query { - if v.contains("info_hash") { - // get raw percent encoded infohash - let raw_info_hash = v.split('=').collect::>()[1]; - - let info_hash = percent_decode_info_hash(raw_info_hash); - - if let Ok(ih) = info_hash { - info_hashes.push(ih); - } - } - } - - if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(Error::TwoManyInfoHashes { - location: Location::caller(), - message: format! {"found: {}, but limit is: {}",info_hashes.len(), MAX_SCRAPE_TORRENTS}, - })) - } else if info_hashes.is_empty() { - Err(reject::custom(Error::EmptyInfoHash { - location: Location::caller(), - })) - } else { - Ok(info_hashes) - } -} - -/// Parse `PeerId` from raw query string -#[allow(clippy::ptr_arg)] -fn peer_id(raw_query: &String) -> WebResult { - // put all query params in a vec - let split_raw_query: Vec<&str> = raw_query.split('&').collect(); - - let mut peer_id: Option = None; - - for v in split_raw_query { - // look for the peer_id param - if v.contains("peer_id") { - // get raw percent encoded peer id - let raw_peer_id = v.split('=').collect::>()[1]; - - if let Ok(id) = percent_decode_peer_id(raw_peer_id) { - peer_id = Some(id); - } else { - return Err(reject::custom(Error::InvalidPeerId { - location: Location::caller(), - })); - } - - break; - } - } - - match peer_id { - Some(id) => Ok(id), - None => Err(reject::custom(Error::InvalidPeerId { - location: Location::caller(), - })), - } -} - -/// Get peer IP from HTTP client IP or X-Forwarded-For HTTP header -fn peer_addr( - (on_reverse_proxy, remote_client_ip, maybe_x_forwarded_for): (bool, Option, Option), -) -> WebResult { - if on_reverse_proxy { - if maybe_x_forwarded_for.is_none() { - return Err(reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: "must have a x-forwarded-for when using a reverse proxy".to_string(), - })); - } - - let x_forwarded_for = maybe_x_forwarded_for.unwrap(); - - maybe_rightmost_forwarded_ip(&x_forwarded_for).map_err(|e| { - reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: format!("on remote proxy and unable to parse the last x-forwarded-ip: `{e}`, from `{x_forwarded_for}`"), - }) - }) - } else if remote_client_ip.is_none() { - return Err(reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: "neither on have remote address or on a reverse proxy".to_string(), - })); - } else { - return Ok(remote_client_ip.unwrap().ip()); - } -} - -/// Parse `AnnounceRequest` from raw `AnnounceRequestQuery`, `InfoHash` and Option -#[allow(clippy::unnecessary_wraps)] -#[allow(clippy::ptr_arg)] -fn announce_request( - announce_request_query: request::AnnounceQuery, - info_hashes: &Vec, - peer_id: peer::Id, - peer_addr: IpAddr, -) -> WebResult { - Ok(request::Announce { - info_hash: info_hashes[0], - peer_addr, - downloaded: announce_request_query.downloaded.unwrap_or(0), - uploaded: announce_request_query.uploaded.unwrap_or(0), - peer_id, - port: announce_request_query.port, - left: announce_request_query.left.unwrap_or(0), - event: announce_request_query.event, - compact: announce_request_query.compact, - }) -} - -/// Parse `ScrapeRequest` from `InfoHash` -#[allow(clippy::unnecessary_wraps)] -fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(request::Scrape { info_hashes, peer_addr }) -} diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs deleted file mode 100644 index f9aedeb8f..000000000 --- a/src/http/warp_implementation/handlers.rs +++ /dev/null @@ -1,207 +0,0 @@ -use std::collections::HashMap; -use std::convert::Infallible; -use std::net::IpAddr; -use std::panic::Location; -use std::sync::Arc; - -use log::debug; -use warp::http::Response; -use warp::{reject, Rejection, Reply}; - -use super::error::Error; -use super::{request, response, WebResult}; -use crate::http::warp_implementation::peer_builder; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::Key; -use crate::tracker::{self, auth, peer, statistics, torrent}; - -/// Authenticate `InfoHash` using optional `auth::Key` -/// -/// # Errors -/// -/// Will return `ServerError` that wraps the `tracker::error::Error` if unable to `authenticate_request`. -pub async fn authenticate( - info_hash: &InfoHash, - auth_key: &Option, - tracker: Arc, -) -> Result<(), Error> { - tracker - .authenticate_request(info_hash, auth_key) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) -} - -/// # Errors -/// -/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. -pub async fn handle_announce( - announce_request: request::Announce, - auth_key: Option, - tracker: Arc, -) -> WebResult { - debug!("http announce request: {:#?}", announce_request); - - let info_hash = announce_request.info_hash; - let remote_client_ip = announce_request.peer_addr; - - authenticate(&info_hash, &auth_key, tracker.clone()).await?; - - let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); - - // todo: we should be use the http::axum_implementation::services::announce::announce service, - // but this Warp implementation is going to be removed. - - let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; - - match remote_client_ip { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Tcp4Announce).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Tcp6Announce).await; - } - } - - send_announce_response( - &announce_request, - &response.swarm_stats, - &response.peers, - tracker.config.announce_interval, - tracker.config.min_announce_interval, - ) -} - -/// # Errors -/// -/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. -pub async fn handle_scrape( - scrape_request: request::Scrape, - auth_key: Option, - tracker: Arc, -) -> WebResult { - let mut files: HashMap = HashMap::new(); - let db = tracker.get_torrents().await; - - for info_hash in &scrape_request.info_hashes { - let scrape_entry = match db.get(info_hash) { - Some(torrent_info) => { - if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { - let (seeders, completed, leechers) = torrent_info.get_stats(); - response::ScrapeEntry { - complete: seeders, - downloaded: completed, - incomplete: leechers, - } - } else { - response::ScrapeEntry { - complete: 0, - downloaded: 0, - incomplete: 0, - } - } - } - None => response::ScrapeEntry { - complete: 0, - downloaded: 0, - incomplete: 0, - }, - }; - - files.insert(*info_hash, scrape_entry); - } - - // send stats event - match scrape_request.peer_addr { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; - } - } - - send_scrape_response(files) -} - -/// Send announce response -#[allow(clippy::ptr_arg)] -fn send_announce_response( - announce_request: &request::Announce, - torrent_stats: &torrent::SwarmStats, - peers: &Vec, - interval: u32, - interval_min: u32, -) -> WebResult { - let http_peers: Vec = peers - .iter() - .map(|peer| response::Peer { - peer_id: peer.peer_id.to_string(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port(), - }) - .collect(); - - let res = response::Announce { - interval, - interval_min, - complete: torrent_stats.seeders, - incomplete: torrent_stats.leechers, - peers: http_peers, - }; - - // check for compact response request - if let Some(1) = announce_request.compact { - match res.write_compact() { - Ok(body) => Ok(Response::new(body)), - Err(e) => Err(reject::custom(Error::InternalServer { - message: e.to_string(), - location: Location::caller(), - })), - } - } else { - Ok(Response::new(res.write().into())) - } -} - -/// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { - let res = response::Scrape { files }; - - match res.write() { - Ok(body) => Ok(Response::new(body)), - Err(e) => Err(reject::custom(Error::InternalServer { - message: e.to_string(), - location: Location::caller(), - })), - } -} - -/// Handle all server errors and send error reply -/// -/// # Errors -/// -/// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. -pub fn send_error(r: &Rejection) -> std::result::Result { - let warp_reject_error = r.find::(); - - let body = if let Some(error) = warp_reject_error { - debug!("{:?}", error); - response::Error { - failure_reason: error.to_string(), - } - .write() - } else { - response::Error { - failure_reason: Error::InternalServer { - message: "Undefined".to_string(), - location: Location::caller(), - } - .to_string(), - } - .write() - }; - - Ok(Response::new(body)) -} diff --git a/src/http/warp_implementation/launcher.rs b/src/http/warp_implementation/launcher.rs deleted file mode 100644 index 46ec2bf3c..000000000 --- a/src/http/warp_implementation/launcher.rs +++ /dev/null @@ -1,116 +0,0 @@ -use std::future::Future; -use std::net::SocketAddr; -use std::str::FromStr; -use std::sync::Arc; - -use futures::future::BoxFuture; - -use super::routes; -use crate::http::server::HttpServerLauncher; -use crate::tracker; -use crate::tracker::Tracker; - -#[derive(Debug)] -pub enum Error { - Error(String), -} - -pub struct Launcher; - -impl Launcher { - pub fn start_with_graceful_shutdown( - addr: SocketAddr, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let (bind_addr, server) = warp::serve(routes::routes(tracker)).bind_with_graceful_shutdown(addr, shutdown_signal); - - (bind_addr, Box::pin(server)) - } - - pub fn start_tls_with_graceful_shutdown( - addr: SocketAddr, - (ssl_cert_path, ssl_key_path): (&str, &str), - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let (bind_addr, server) = warp::serve(routes::routes(tracker)) - .tls() - .cert_path(ssl_cert_path) - .key_path(ssl_key_path) - .bind_with_graceful_shutdown(addr, shutdown_signal); - - (bind_addr, Box::pin(server)) - } -} - -impl HttpServerLauncher for Launcher { - fn new() -> Self { - Self {} - } - - fn start_with_graceful_shutdown( - &self, - cfg: torrust_tracker_configuration::HttpTracker, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); - - if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { - Self::start_tls_with_graceful_shutdown(addr, (ssl_cert_path, ssl_key_path), tracker, shutdown_signal) - } else { - Self::start_with_graceful_shutdown(addr, tracker, shutdown_signal) - } - } -} - -/// Server that listens on HTTP, needs a `tracker::TorrentTracker` -#[derive(Clone)] -pub struct Http { - tracker: Arc, -} - -impl Http { - #[must_use] - pub fn new(tracker: Arc) -> Http { - Http { tracker } - } - - /// Start the `HttpServer` - pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { - let (_addr, server) = - warp::serve(routes::routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - - server - } - - /// Start the `HttpServer` in TLS mode - pub fn start_tls( - &self, - socket_addr: SocketAddr, - ssl_cert_path: String, - ssl_key_path: String, - ) -> impl warp::Future { - let (_addr, server) = warp::serve(routes::routes(self.tracker.clone())) - .tls() - .cert_path(ssl_cert_path) - .key_path(ssl_key_path) - .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - - server - } -} diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs deleted file mode 100644 index c0e046f4f..000000000 --- a/src/http/warp_implementation/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -pub mod error; -pub mod filter_helpers; -pub mod filters; -pub mod handlers; -pub mod launcher; -pub mod peer_builder; -pub mod request; -pub mod response; -pub mod routes; - -use warp::Rejection; - -pub type Bytes = u64; -pub type WebResult = std::result::Result; diff --git a/src/http/warp_implementation/peer_builder.rs b/src/http/warp_implementation/peer_builder.rs deleted file mode 100644 index 70cf7b508..000000000 --- a/src/http/warp_implementation/peer_builder.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::net::{IpAddr, SocketAddr}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - -use super::request::Announce; -use crate::protocol::clock::{Current, Time}; -use crate::tracker::peer::Peer; - -#[must_use] -pub fn from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None, - } - } else { - AnnounceEvent::None - }; - - #[allow(clippy::cast_possible_truncation)] - Peer { - peer_id: announce_request.peer_id, - peer_addr: SocketAddr::new(*peer_ip, announce_request.port), - updated: Current::now(), - uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), - downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), - left: NumberOfBytes(i128::from(announce_request.left) as i64), - event, - } -} diff --git a/src/http/warp_implementation/request.rs b/src/http/warp_implementation/request.rs deleted file mode 100644 index f666b48c5..000000000 --- a/src/http/warp_implementation/request.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::net::IpAddr; - -use serde::Deserialize; - -use crate::http::warp_implementation::Bytes; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::peer; - -#[derive(Deserialize)] -pub struct AnnounceQuery { - pub downloaded: Option, - pub uploaded: Option, - pub key: Option, - pub port: u16, - pub left: Option, - pub event: Option, - pub compact: Option, -} - -#[derive(Debug)] -pub struct Announce { - pub info_hash: InfoHash, - pub peer_addr: IpAddr, - pub downloaded: Bytes, - pub uploaded: Bytes, - pub peer_id: peer::Id, - pub port: u16, - pub left: Bytes, - pub event: Option, - pub compact: Option, -} - -pub struct Scrape { - pub info_hashes: Vec, - pub peer_addr: IpAddr, -} diff --git a/src/http/warp_implementation/response.rs b/src/http/warp_implementation/response.rs deleted file mode 100644 index 1e9f7fa09..000000000 --- a/src/http/warp_implementation/response.rs +++ /dev/null @@ -1,132 +0,0 @@ -use std::collections::HashMap; -use std::io::Write; -use std::net::IpAddr; - -use serde::{self, Deserialize, Serialize}; - -use crate::protocol::info_hash::InfoHash; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Peer { - pub peer_id: String, - pub ip: IpAddr, - pub port: u16, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Announce { - pub interval: u32, - #[serde(rename = "min interval")] - pub interval_min: u32, - //pub tracker_id: String, - pub complete: u32, - pub incomplete: u32, - pub peers: Vec, -} - -impl Announce { - /// # Panics - /// - /// It would panic if the `Announce` struct would contain an inappropriate type. - #[must_use] - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } - - /// # Errors - /// - /// Will return `Err` if internally interrupted. - pub fn write_compact(&self) -> Result, Box> { - let mut peers_v4: Vec = Vec::new(); - let mut peers_v6: Vec = Vec::new(); - - for peer in &self.peers { - match peer.ip { - IpAddr::V4(ip) => { - peers_v4.write_all(&u32::from(ip).to_be_bytes())?; - peers_v4.write_all(&peer.port.to_be_bytes())?; - } - IpAddr::V6(ip) => { - peers_v6.write_all(&u128::from(ip).to_be_bytes())?; - peers_v6.write_all(&peer.port.to_be_bytes())?; - } - } - } - - let mut bytes: Vec = Vec::new(); - bytes.write_all(b"d8:intervali")?; - bytes.write_all(self.interval.to_string().as_bytes())?; - bytes.write_all(b"e12:min intervali")?; - bytes.write_all(self.interval_min.to_string().as_bytes())?; - bytes.write_all(b"e8:completei")?; - bytes.write_all(self.complete.to_string().as_bytes())?; - bytes.write_all(b"e10:incompletei")?; - bytes.write_all(self.incomplete.to_string().as_bytes())?; - bytes.write_all(b"e5:peers")?; - bytes.write_all(peers_v4.len().to_string().as_bytes())?; - bytes.write_all(b":")?; - bytes.write_all(peers_v4.as_slice())?; - bytes.write_all(b"e6:peers6")?; - bytes.write_all(peers_v6.len().to_string().as_bytes())?; - bytes.write_all(b":")?; - bytes.write_all(peers_v6.as_slice())?; - bytes.write_all(b"e")?; - - Ok(bytes) - } -} - -#[derive(Serialize)] -pub struct ScrapeEntry { - pub complete: u32, - pub downloaded: u32, - pub incomplete: u32, -} - -#[derive(Serialize)] -pub struct Scrape { - pub files: HashMap, -} - -impl Scrape { - /// # Errors - /// - /// Will return `Err` if internally interrupted. - pub fn write(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - - bytes.write_all(b"d5:filesd")?; - - for (info_hash, scrape_response_entry) in &self.files { - bytes.write_all(b"20:")?; - bytes.write_all(&info_hash.0)?; - bytes.write_all(b"d8:completei")?; - bytes.write_all(scrape_response_entry.complete.to_string().as_bytes())?; - bytes.write_all(b"e10:downloadedi")?; - bytes.write_all(scrape_response_entry.downloaded.to_string().as_bytes())?; - bytes.write_all(b"e10:incompletei")?; - bytes.write_all(scrape_response_entry.incomplete.to_string().as_bytes())?; - bytes.write_all(b"ee")?; - } - - bytes.write_all(b"ee")?; - - Ok(bytes) - } -} - -#[derive(Serialize)] -pub struct Error { - #[serde(rename = "failure reason")] - pub failure_reason: String, -} - -impl Error { - /// # Panics - /// - /// It would panic if the `Error` struct would contain an inappropriate type. - #[must_use] - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } -} diff --git a/src/http/warp_implementation/routes.rs b/src/http/warp_implementation/routes.rs deleted file mode 100644 index c46c502e4..000000000 --- a/src/http/warp_implementation/routes.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::convert::Infallible; -use std::sync::Arc; - -use warp::{Filter, Rejection}; - -use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; -use super::handlers::{handle_announce, handle_scrape, send_error}; -use crate::tracker; - -/// All routes -#[must_use] -pub fn routes(tracker: Arc) -> impl Filter + Clone { - announce(tracker.clone()) - .or(scrape(tracker)) - .recover(|q| async move { send_error(&q) }) -} - -/// GET /announce or /announce/ -fn announce(tracker: Arc) -> impl Filter + Clone { - warp::path::path("announce") - .and(warp::filters::method::get()) - .and(with_announce_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_announce) -} - -/// GET /scrape/ -fn scrape(tracker: Arc) -> impl Filter + Clone { - warp::path::path("scrape") - .and(warp::filters::method::get()) - .and(with_scrape_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_scrape) -} diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 40caa8e88..70f512a39 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -1,14 +1,12 @@ -use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use log::{info, warn}; +use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; use crate::http::axum_implementation::launcher; -use crate::http::warp_implementation::launcher::Http; use crate::http::Version; use crate::tracker; @@ -17,68 +15,10 @@ pub struct ServerJobStarted(); pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { match version { - Version::Warp => start_warp(config, tracker.clone()).await, Version::Axum => start_axum(config, tracker.clone()).await, } } -/// # Panics -/// -/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -async fn start_warp(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("HTTP tracker server bind_address invalid."); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - let (tx, rx) = oneshot::channel::(); - - // Run the HTTP tracker server - let join_handle = tokio::spawn(async move { - let http_tracker = Http::new(tracker); - - if !ssl_enabled { - info!("Starting HTTP tracker server on: http://{}", bind_addr); - - let handle = http_tracker.start(bind_addr); - - tx.send(ServerJobStarted()) - .expect("HTTP tracker server should not be dropped"); - - handle.await; - - info!("HTTP tracker server on http://{} stopped", bind_addr); - } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: https://{}", bind_addr); - - let handle = http_tracker.start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()); - - tx.send(ServerJobStarted()) - .expect("HTTP tracker server should not be dropped"); - - handle.await; - - info!("HTTP tracker server on https://{} stopped", bind_addr); - } else { - warn!( - "Could not start HTTPS tracker server on: {}, missing SSL Cert or Key!", - bind_addr - ); - } - }); - - // Wait until the HTTPS tracker server job is running - match rx.await { - Ok(_msg) => info!("HTTP tracker server started"), - Err(e) => panic!("HTTP tracker server was dropped: {e}"), - } - - join_handle -} - /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 0d5441f89..932b48be4 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -108,48 +108,12 @@ pub async fn assert_missing_query_params_for_scrape_request_error_response(respo // Other errors -pub async fn assert_internal_server_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error(&response.text().await.unwrap(), "internal server", Location::caller()); -} - -pub async fn assert_invalid_info_hash_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "no valid infohashes found", - Location::caller(), - ); -} - -pub async fn assert_invalid_peer_id_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "peer_id is either missing or invalid", - Location::caller(), - ); -} - pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { assert_eq!(response.status(), 200); assert_bencoded_error(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); } -pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "could not find remote address: must have a x-forwarded-for when using a reverse proxy", - Location::caller(), - ); -} - pub async fn assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response: Response) { assert_eq!(response.status(), 200); @@ -160,16 +124,6 @@ pub async fn assert_could_not_find_remote_address_on_x_forwarded_for_header_erro ); } -pub async fn assert_invalid_remote_address_on_xff_header_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "could not find remote address: on remote proxy and unable to parse the last x-forwarded-ip", - Location::caller(), - ); -} - pub async fn assert_cannot_parse_query_param_error_response(response: Response, failure: &str) { assert_cannot_parse_query_params_error_response(response, &format!(": {failure}")).await; } diff --git a/tests/http/asserts_warp.rs b/tests/http/asserts_warp.rs deleted file mode 100644 index d1a936efa..000000000 --- a/tests/http/asserts_warp.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::panic::Location; - -/// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. -use reqwest::Response; - -use super::responses::announce_warp::WarpAnnounce; -use crate::http::asserts::assert_bencoded_error; - -pub async fn assert_warp_announce_response(response: Response, expected_announce_response: &WarpAnnounce) { - assert_eq!(response.status(), 200); - - let body = response.text().await.unwrap(); - - let announce_response: WarpAnnounce = serde_bencode::from_str(&body) - .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); - - assert_eq!(announce_response, *expected_announce_response); -} - -pub async fn assert_warp_peer_not_authenticated_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "The peer is not authenticated", - Location::caller(), - ); -} - -pub async fn assert_warp_invalid_authentication_key_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error(&response.text().await.unwrap(), "is not valid", Location::caller()); -} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 771145f46..b0d896c99 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,5 +1,4 @@ pub mod asserts; -pub mod asserts_warp; pub mod client; pub mod requests; pub mod responses; diff --git a/tests/http/responses/announce_warp.rs b/tests/http/responses/announce_warp.rs deleted file mode 100644 index 0fcf05eb8..000000000 --- a/tests/http/responses/announce_warp.rs +++ /dev/null @@ -1,30 +0,0 @@ -/// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. -use serde::{self, Deserialize, Serialize}; -use torrust_tracker::tracker::peer::Peer; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct WarpAnnounce { - pub complete: u32, - pub incomplete: u32, - pub interval: u32, - #[serde(rename = "min interval")] - pub min_interval: u32, - pub peers: Vec, // Peers using IPV4 -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct WarpDictionaryPeer { - pub ip: String, - pub peer_id: String, - pub port: u16, -} - -impl From for WarpDictionaryPeer { - fn from(peer: Peer) -> Self { - Self { - peer_id: peer.peer_id.to_string(), - ip: peer.peer_addr.ip().to_string(), - port: peer.peer_addr.port(), - } - } -} diff --git a/tests/http/responses/mod.rs b/tests/http/responses/mod.rs index aecb53fed..bdc689056 100644 --- a/tests/http/responses/mod.rs +++ b/tests/http/responses/mod.rs @@ -1,4 +1,3 @@ pub mod announce; -pub mod announce_warp; pub mod error; pub mod scrape; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index aea8fac37..1b07b987a 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1,1368 +1,28 @@ /// Integration tests for HTTP tracker server /// -/// Warp version: /// ```text -/// cargo test `warp_test_env` -- --nocapture -/// ``` -/// -/// Axum version (WIP): -/// ```text -/// cargo test `warp_test_env` -- --nocapture +/// cargo test `http_tracker_server` -- --nocapture /// ``` mod common; mod http; pub type Axum = torrust_tracker::http::axum_implementation::launcher::Launcher; -pub type Warp = torrust_tracker::http::warp_implementation::launcher::Launcher; -mod test_env_test_environment { +mod test_environment_for_http_tracker { use torrust_tracker_test_helpers::configuration; use crate::http::test_environment::running_test_environment; - use crate::{Axum, Warp}; + use crate::Axum; #[tokio::test] - async fn should_be_able_to_start_and_stop_a_test_environment_using_axum() { + async fn should_be_started_and_stopped() { let test_env = running_test_environment::(configuration::ephemeral()).await; test_env.stop().await; } - - #[tokio::test] - async fn should_be_able_to_start_and_stop_a_test_environment_using_warp() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - test_env.stop().await; - } } -mod warp_test_env { - - mod for_all_config_modes { - - mod running_on_reverse_proxy { - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::{ - assert_could_not_find_remote_address_on_xff_header_error_response, - assert_invalid_remote_address_on_xff_header_error_response, - }; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { - // If the tracker is running behind a reverse proxy, the peer IP is the - // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. - - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let params = QueryBuilder::default().query().params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_could_not_find_remote_address_on_xff_header_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let params = QueryBuilder::default().query().params(); - - let response = Client::new(*test_env.bind_address()) - .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") - .await; - - assert_invalid_remote_address_on_xff_header_error_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_announce_request { - - // Announce request documentation: - // - // BEP 03. The BitTorrent Protocol Specification - // https://www.bittorrent.org/beps/bep_0003.html - // - // BEP 23. Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Announce - - use std::net::{IpAddr, Ipv6Addr}; - use std::str::FromStr; - - use local_ip_address::local_ip; - use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, - assert_internal_server_error_response, assert_invalid_info_hash_error_response, - assert_invalid_peer_id_error_response, assert_is_announce_response, - }; - use crate::http::asserts_warp::assert_warp_announce_response; - use crate::http::client::Client; - use crate::http::requests::announce::{Compact, QueryBuilder}; - use crate::http::responses; - use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList}; - use crate::http::responses::announce_warp::{WarpAnnounce, WarpDictionaryPeer}; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.remove_optional_params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let response = Client::new(*test_env.bind_address()).get("announce").await; - - assert_internal_server_error_response(response).await; - } - - #[tokio::test] - async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - // Without `info_hash` param - - let mut params = QueryBuilder::default().query().params(); - - params.info_hash = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_invalid_info_hash_error_response(response).await; - - // Without `peer_id` param - - let mut params = QueryBuilder::default().query().params(); - - params.peer_id = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_invalid_peer_id_error_response(response).await; - - // Without `port` param - - let mut params = QueryBuilder::default().query().params(); - - params.port = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - for invalid_value in &invalid_info_hashes() { - params.set("info_hash", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_invalid_info_hash_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_when_the_peer_address_param_is_invalid() { - // AnnounceQuery does not even contain the `peer_addr` - // The peer IP is obtained in two ways: - // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. - // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("downloaded", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("uploaded", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "-qB0000000000000000", // 19 bytes - "-qB000000000000000000", // 21 bytes - ]; - - for invalid_value in invalid_values { - params.set("peer_id", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_invalid_peer_id_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("port", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("left", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_when_the_event_param_is_invalid() { - // All invalid values are ignored as if the `event` param were empty - - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "Started", // It should be lowercase - "Stopped", // It should be lowercase - "Completed", // It should be lowercase - ]; - - for invalid_value in invalid_values { - params.set("event", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("compact", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) - .query(), - ) - .await; - - assert_announce_response( - response, - &Announce { - complete: 1, // the peer for this test - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .query(), - ) - .await; - - // It should only contain the previously announced peer - assert_warp_announce_response( - response, - &WarpAnnounce { - complete: 2, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![WarpDictionaryPeer::from(previously_announced_peer)], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = PeerBuilder::default().build(); - - // Add a peer - test_env.add_torrent_peer(&info_hash, &peer).await; - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer.peer_id) - .query(); - - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - - let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; - - assert_empty_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_compact_response() { - // Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2 accepting compact responses - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_compact(Compact::Accepted) - .query(), - ) - .await; - - let expected_response = responses::announce::Compact { - complete: 2, - incomplete: 0, - interval: 120, - min_interval: 120, - peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), - }; - - assert_compact_announce_response(response, &expected_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_return_the_compact_response_by_default() { - // code-review: the HTTP tracker does not return the compact response by default if the "compact" - // param is not provided in the announce URL. The BEP 23 suggest to do so. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2 without passing the "compact" param - // By default it should respond with the compact peer list - // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .without_compact() - .query(), - ) - .await; - - assert!(!is_a_compact_announce_response(response).await); - - test_env.stop().await; - } - - async fn is_a_compact_announce_response(response: Response) -> bool { - let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); - compact_announce.is_ok() - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 0); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 0); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client_ip = local_ip().unwrap(); - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), client_ip); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. - - client <-> tracker <-> Internet - 127.0.0.1 external_ip = "2.137.87.41" - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. - - client <-> tracker <-> Internet - ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( - ) { - /* - client <-> http proxy <-> tracker <-> Internet - ip: header: config: peer addr: - 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let client = Client::new(*test_env.bind_address()); - - let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - - client - .announce_with_header( - &announce_query, - "X-Forwarded-For", - "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", - ) - .await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - - // Scrape documentation: - // - // BEP 48. Tracker Protocol Extension: Scrape - // https://www.bittorrent.org/beps/bep_0048.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Scrape - - use std::net::IpAddr; - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::requests::scrape::QueryBuilder; - use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(*test_env.bind_address()).get("scrape").await; - - assert_internal_server_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let mut params = QueryBuilder::default().query().params(); - - for invalid_value in &invalid_info_hashes() { - params.set_one_info_hash_param(invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - // code-review: it's not returning the invalid info hash error - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 1, - downloaded: 0, - incomplete: 0, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .add_info_hash(&info_hash1) - .add_info_hash(&info_hash2) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file(info_hash1.bytes(), File::zeroed()) - .add_file(info_hash2.bytes(), File::zeroed()) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_scrapes_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_scrapes_handled, 1); - - drop(stats); - - test_env.stop().await; - } - } - } - - mod configured_as_whitelisted { - - mod and_receiving_an_announce_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_torrent_not_in_whitelist_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - - async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - } - } - - mod configured_as_private { - - mod and_receiving_an_announce_request { - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::assert_is_announce_response; - use crate::http::asserts_warp::{ - assert_warp_invalid_authentication_key_error_response, assert_warp_peer_not_authenticated_error_response, - }; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .announce(&QueryBuilder::default().query()) - .await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_warp_peer_not_authenticated_error_response(response).await; - } - - #[tokio::test] - async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - // The tracker does not have this key - let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), unregistered_key) - .announce(&QueryBuilder::default().query()) - .await; - - assert_warp_invalid_authentication_key_error_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { - // There is not authentication error - - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), false_key) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - } - } - - mod configured_as_private_and_whitelisted { - - mod and_receiving_an_announce_request {} - - mod receiving_an_scrape_request {} - } -} - -mod axum_test_env { - - // WIP: migration HTTP from Warp to Axum +mod http_tracker { mod for_all_config_modes { diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs index 0f9283a8b..3fe78c03d 100644 --- a/tests/udp_tracker.rs +++ b/tests/udp_tracker.rs @@ -1,6 +1,8 @@ /// Integration tests for UDP tracker server /// +/// ```text /// cargo test `udp_tracker_server` -- --nocapture +/// ``` extern crate rand; mod common; From 96d5333021f33938c7faf9e2c9f7d09f314aaaae Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 09:04:06 +0000 Subject: [PATCH 0463/1003] refactor: [#229] rename Axum HTTP tracker to v1 (version1) --- src/http/mod.rs | 4 +- .../extractors/announce_request.rs | 10 +- .../extractors/authentication_key.rs | 6 +- .../extractors/client_ip_sources.rs | 2 +- .../extractors/mod.rs | 0 .../extractors/scrape_request.rs | 10 +- .../handlers/announce.rs | 42 +- .../handlers/common/auth.rs | 2 +- .../handlers/common/mod.rs | 0 .../handlers/common/peer_ip.rs | 8 +- .../handlers/mod.rs | 0 .../handlers/scrape.rs | 34 +- .../{axum_implementation => v1}/launcher.rs | 0 src/http/{axum_implementation => v1}/mod.rs | 0 src/http/{axum_implementation => v1}/query.rs | 8 +- .../requests/announce.rs | 12 +- .../requests/mod.rs | 0 .../requests/scrape.rs | 12 +- .../responses/announce.rs | 4 +- .../responses/error.rs | 0 .../responses/mod.rs | 0 .../responses/scrape.rs | 2 +- .../{axum_implementation => v1}/routes.rs | 0 .../services/announce.rs | 4 +- .../services/mod.rs | 0 .../services/peer_ip_resolver.rs | 4 +- .../services/scrape.rs | 12 +- src/jobs/http_tracker.rs | 6 +- src/setup.rs | 2 +- tests/http_tracker.rs | 2155 +++++++++-------- 30 files changed, 1169 insertions(+), 1170 deletions(-) rename src/http/{axum_implementation => v1}/extractors/announce_request.rs (90%) rename src/http/{axum_implementation => v1}/extractors/authentication_key.rs (94%) rename src/http/{axum_implementation => v1}/extractors/client_ip_sources.rs (93%) rename src/http/{axum_implementation => v1}/extractors/mod.rs (100%) rename src/http/{axum_implementation => v1}/extractors/scrape_request.rs (92%) rename src/http/{axum_implementation => v1}/handlers/announce.rs (85%) rename src/http/{axum_implementation => v1}/handlers/common/auth.rs (95%) rename src/http/{axum_implementation => v1}/handlers/common/mod.rs (100%) rename src/http/{axum_implementation => v1}/handlers/common/peer_ip.rs (75%) rename src/http/{axum_implementation => v1}/handlers/mod.rs (100%) rename src/http/{axum_implementation => v1}/handlers/scrape.rs (85%) rename src/http/{axum_implementation => v1}/launcher.rs (100%) rename src/http/{axum_implementation => v1}/mod.rs (100%) rename src/http/{axum_implementation => v1}/query.rs (97%) rename src/http/{axum_implementation => v1}/requests/announce.rs (97%) rename src/http/{axum_implementation => v1}/requests/mod.rs (100%) rename src/http/{axum_implementation => v1}/requests/scrape.rs (90%) rename src/http/{axum_implementation => v1}/responses/announce.rs (98%) rename src/http/{axum_implementation => v1}/responses/error.rs (100%) rename src/http/{axum_implementation => v1}/responses/mod.rs (100%) rename src/http/{axum_implementation => v1}/responses/scrape.rs (97%) rename src/http/{axum_implementation => v1}/routes.rs (100%) rename src/http/{axum_implementation => v1}/services/announce.rs (97%) rename src/http/{axum_implementation => v1}/services/mod.rs (100%) rename src/http/{axum_implementation => v1}/services/peer_ip_resolver.rs (95%) rename src/http/{axum_implementation => v1}/services/scrape.rs (94%) diff --git a/src/http/mod.rs b/src/http/mod.rs index 2309ee146..b8aa6b19f 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -12,11 +12,11 @@ use serde::{Deserialize, Serialize}; -pub mod axum_implementation; pub mod percent_encoding; pub mod server; +pub mod v1; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { - Axum, + V1, } diff --git a/src/http/axum_implementation/extractors/announce_request.rs b/src/http/v1/extractors/announce_request.rs similarity index 90% rename from src/http/axum_implementation/extractors/announce_request.rs rename to src/http/v1/extractors/announce_request.rs index 1680cd15c..c0b0451b3 100644 --- a/src/http/axum_implementation/extractors/announce_request.rs +++ b/src/http/v1/extractors/announce_request.rs @@ -5,9 +5,9 @@ use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::axum_implementation::query::Query; -use crate::http::axum_implementation::requests::announce::{Announce, ParseAnnounceQueryError}; -use crate::http::axum_implementation::responses; +use crate::http::v1::query::Query; +use crate::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; +use crate::http::v1::responses; pub struct ExtractRequest(pub Announce); @@ -53,8 +53,8 @@ mod tests { use std::str::FromStr; use super::extract_announce_from; - use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; - use crate::http::axum_implementation::responses::error::Error; + use crate::http::v1::requests::announce::{Announce, Compact, Event}; + use crate::http::v1::responses::error::Error; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/http/axum_implementation/extractors/authentication_key.rs b/src/http/v1/extractors/authentication_key.rs similarity index 94% rename from src/http/axum_implementation/extractors/authentication_key.rs rename to src/http/v1/extractors/authentication_key.rs index 8ffc4ff12..3b2680a5f 100644 --- a/src/http/axum_implementation/extractors/authentication_key.rs +++ b/src/http/v1/extractors/authentication_key.rs @@ -8,8 +8,8 @@ use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; use serde::Deserialize; -use crate::http::axum_implementation::handlers::common::auth; -use crate::http::axum_implementation::responses; +use crate::http::v1::handlers::common::auth; +use crate::http::v1::responses; use crate::tracker::auth::Key; pub struct Extract(pub Key); @@ -85,7 +85,7 @@ fn custom_error(rejection: &PathRejection) -> responses::error::Error { mod tests { use super::parse_key; - use crate::http::axum_implementation::responses::error::Error; + use crate::http::v1::responses::error::Error; fn assert_error_response(error: &Error, error_message: &str) { assert!( diff --git a/src/http/axum_implementation/extractors/client_ip_sources.rs b/src/http/v1/extractors/client_ip_sources.rs similarity index 93% rename from src/http/axum_implementation/extractors/client_ip_sources.rs rename to src/http/v1/extractors/client_ip_sources.rs index b41478c22..c8b3659f3 100644 --- a/src/http/axum_implementation/extractors/client_ip_sources.rs +++ b/src/http/v1/extractors/client_ip_sources.rs @@ -8,7 +8,7 @@ use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; -use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; +use crate::http::v1::services::peer_ip_resolver::ClientIpSources; pub struct Extract(pub ClientIpSources); diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/v1/extractors/mod.rs similarity index 100% rename from src/http/axum_implementation/extractors/mod.rs rename to src/http/v1/extractors/mod.rs diff --git a/src/http/axum_implementation/extractors/scrape_request.rs b/src/http/v1/extractors/scrape_request.rs similarity index 92% rename from src/http/axum_implementation/extractors/scrape_request.rs rename to src/http/v1/extractors/scrape_request.rs index 998728f59..d63470897 100644 --- a/src/http/axum_implementation/extractors/scrape_request.rs +++ b/src/http/v1/extractors/scrape_request.rs @@ -5,9 +5,9 @@ use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::axum_implementation::query::Query; -use crate::http::axum_implementation::requests::scrape::{ParseScrapeQueryError, Scrape}; -use crate::http::axum_implementation::responses; +use crate::http::v1::query::Query; +use crate::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; +use crate::http::v1::responses; pub struct ExtractRequest(pub Scrape); @@ -53,8 +53,8 @@ mod tests { use std::str::FromStr; use super::extract_scrape_from; - use crate::http::axum_implementation::requests::scrape::Scrape; - use crate::http::axum_implementation::responses::error::Error; + use crate::http::v1::requests::scrape::Scrape; + use crate::http::v1::responses::error::Error; use crate::protocol::info_hash::InfoHash; struct TestInfoHash { diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/v1/handlers/announce.rs similarity index 85% rename from src/http/axum_implementation/handlers/announce.rs rename to src/http/v1/handlers/announce.rs index ebb8c8586..1f10c3fa4 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/v1/handlers/announce.rs @@ -7,14 +7,14 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::authentication_key::Extract as ExtractKey; -use crate::http::axum_implementation::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::http::axum_implementation::handlers::common::auth; -use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; -use crate::http::axum_implementation::responses::{self, announce}; -use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; -use crate::http::axum_implementation::services::{self, peer_ip_resolver}; +use crate::http::v1::extractors::announce_request::ExtractRequest; +use crate::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::http::v1::handlers::common::auth; +use crate::http::v1::requests::announce::{Announce, Compact, Event}; +use crate::http::v1::responses::{self, announce}; +use crate::http::v1::services::peer_ip_resolver::ClientIpSources; +use crate::http::v1::services::{self, peer_ip_resolver}; use crate::protocol::clock::{Current, Time}; use crate::tracker::auth::Key; use crate::tracker::peer::Peer; @@ -141,9 +141,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::http::axum_implementation::requests::announce::Announce; - use crate::http::axum_implementation::responses; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::requests::announce::Announce; + use crate::http::v1::responses; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; @@ -197,8 +197,8 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; - use crate::http::axum_implementation::handlers::announce::handle_announce; - use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::http::v1::handlers::announce::handle_announce; + use crate::http::v1::handlers::announce::tests::assert_error_response; use crate::tracker::auth; #[tokio::test] @@ -238,8 +238,8 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; - use crate::http::axum_implementation::handlers::announce::handle_announce; - use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::http::v1::handlers::announce::handle_announce; + use crate::http::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { @@ -266,9 +266,9 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, tracker_on_reverse_proxy}; - use crate::http::axum_implementation::handlers::announce::handle_announce; - use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::handlers::announce::handle_announce; + use crate::http::v1::handlers::announce::tests::assert_error_response; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -295,9 +295,9 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, tracker_not_on_reverse_proxy}; - use crate::http::axum_implementation::handlers::announce::handle_announce; - use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::handlers::announce::handle_announce; + use crate::http::v1::handlers::announce::tests::assert_error_response; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/http/axum_implementation/handlers/common/auth.rs b/src/http/v1/handlers/common/auth.rs similarity index 95% rename from src/http/axum_implementation/handlers/common/auth.rs rename to src/http/v1/handlers/common/auth.rs index 30971725a..938fc3f01 100644 --- a/src/http/axum_implementation/handlers/common/auth.rs +++ b/src/http/v1/handlers/common/auth.rs @@ -2,7 +2,7 @@ use std::panic::Location; use thiserror::Error; -use crate::http::axum_implementation::responses; +use crate::http::v1::responses; use crate::tracker::auth; #[derive(Debug, Error)] diff --git a/src/http/axum_implementation/handlers/common/mod.rs b/src/http/v1/handlers/common/mod.rs similarity index 100% rename from src/http/axum_implementation/handlers/common/mod.rs rename to src/http/v1/handlers/common/mod.rs diff --git a/src/http/axum_implementation/handlers/common/peer_ip.rs b/src/http/v1/handlers/common/peer_ip.rs similarity index 75% rename from src/http/axum_implementation/handlers/common/peer_ip.rs rename to src/http/v1/handlers/common/peer_ip.rs index df10e5eb1..e182c716b 100644 --- a/src/http/axum_implementation/handlers/common/peer_ip.rs +++ b/src/http/v1/handlers/common/peer_ip.rs @@ -1,5 +1,5 @@ -use crate::http::axum_implementation::responses; -use crate::http::axum_implementation::services::peer_ip_resolver::PeerIpResolutionError; +use crate::http::v1::responses; +use crate::http::v1::services::peer_ip_resolver::PeerIpResolutionError; impl From for responses::error::Error { fn from(err: PeerIpResolutionError) -> Self { @@ -13,8 +13,8 @@ impl From for responses::error::Error { mod tests { use std::panic::Location; - use crate::http::axum_implementation::responses; - use crate::http::axum_implementation::services::peer_ip_resolver::PeerIpResolutionError; + use crate::http::v1::responses; + use crate::http::v1::services::peer_ip_resolver::PeerIpResolutionError; fn assert_error_response(error: &responses::error::Error, error_message: &str) { assert!( diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/v1/handlers/mod.rs similarity index 100% rename from src/http/axum_implementation/handlers/mod.rs rename to src/http/v1/handlers/mod.rs diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/v1/handlers/scrape.rs similarity index 85% rename from src/http/axum_implementation/handlers/scrape.rs rename to src/http/v1/handlers/scrape.rs index fd316882d..50f92cd36 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/v1/handlers/scrape.rs @@ -4,12 +4,12 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::axum_implementation::extractors::authentication_key::Extract as ExtractKey; -use crate::http::axum_implementation::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; -use crate::http::axum_implementation::requests::scrape::Scrape; -use crate::http::axum_implementation::services::peer_ip_resolver::{self, ClientIpSources}; -use crate::http::axum_implementation::{responses, services}; +use crate::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::http::v1::extractors::scrape_request::ExtractRequest; +use crate::http::v1::requests::scrape::Scrape; +use crate::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use crate::http::v1::{responses, services}; use crate::tracker::auth::Key; use crate::tracker::{ScrapeData, Tracker}; @@ -99,9 +99,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::http::axum_implementation::requests::scrape::Scrape; - use crate::http::axum_implementation::responses; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::requests::scrape::Scrape; + use crate::http::v1::responses; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::Tracker; @@ -147,7 +147,7 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::http::v1::handlers::scrape::handle_scrape; use crate::tracker::{auth, ScrapeData}; #[tokio::test] @@ -189,7 +189,7 @@ mod tests { use std::sync::Arc; use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; - use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::http::v1::handlers::scrape::handle_scrape; use crate::tracker::ScrapeData; #[tokio::test] @@ -212,9 +212,9 @@ mod tests { use std::sync::Arc; use super::{sample_scrape_request, tracker_on_reverse_proxy}; - use crate::http::axum_implementation::handlers::scrape::handle_scrape; - use crate::http::axum_implementation::handlers::scrape::tests::assert_error_response; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::handlers::scrape::handle_scrape; + use crate::http::v1::handlers::scrape::tests::assert_error_response; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -240,9 +240,9 @@ mod tests { use std::sync::Arc; use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; - use crate::http::axum_implementation::handlers::scrape::handle_scrape; - use crate::http::axum_implementation::handlers::scrape::tests::assert_error_response; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::handlers::scrape::handle_scrape; + use crate::http::v1::handlers::scrape::tests::assert_error_response; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/http/axum_implementation/launcher.rs b/src/http/v1/launcher.rs similarity index 100% rename from src/http/axum_implementation/launcher.rs rename to src/http/v1/launcher.rs diff --git a/src/http/axum_implementation/mod.rs b/src/http/v1/mod.rs similarity index 100% rename from src/http/axum_implementation/mod.rs rename to src/http/v1/mod.rs diff --git a/src/http/axum_implementation/query.rs b/src/http/v1/query.rs similarity index 97% rename from src/http/axum_implementation/query.rs rename to src/http/v1/query.rs index 8b01e9db7..45484ea38 100644 --- a/src/http/axum_implementation/query.rs +++ b/src/http/v1/query.rs @@ -174,7 +174,7 @@ impl std::fmt::Display for FieldValuePairSet { mod tests { mod url_query { - use crate::http::axum_implementation::query::Query; + use crate::http::v1::query::Query; #[test] fn should_parse_the_query_params_from_an_url_query_string() { @@ -227,7 +227,7 @@ mod tests { } mod should_allow_more_than_one_value_for_the_same_param { - use crate::http::axum_implementation::query::Query; + use crate::http::v1::query::Query; #[test] fn instantiated_from_a_vector() { @@ -249,7 +249,7 @@ mod tests { } mod should_be_displayed { - use crate::http::axum_implementation::query::Query; + use crate::http::v1::query::Query; #[test] fn with_one_param() { @@ -270,7 +270,7 @@ mod tests { } mod param_name_value_pair { - use crate::http::axum_implementation::query::NameValuePair; + use crate::http::v1::query::NameValuePair; #[test] fn should_parse_a_single_query_param() { diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/v1/requests/announce.rs similarity index 97% rename from src/http/axum_implementation/requests/announce.rs rename to src/http/v1/requests/announce.rs index 6e357ea6d..eeab97d5f 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/v1/requests/announce.rs @@ -5,9 +5,9 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::http::axum_implementation::query::{ParseQueryError, Query}; -use crate::http::axum_implementation::responses; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::http::v1::query::{ParseQueryError, Query}; +use crate::http::v1::responses; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; @@ -280,8 +280,8 @@ mod tests { mod announce_request { - use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::announce::{ + use crate::http::v1::query::Query; + use crate::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; use crate::protocol::info_hash::InfoHash; @@ -350,8 +350,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::announce::{ + use crate::http::v1::query::Query; + use crate::http::v1::requests::announce::{ Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; diff --git a/src/http/axum_implementation/requests/mod.rs b/src/http/v1/requests/mod.rs similarity index 100% rename from src/http/axum_implementation/requests/mod.rs rename to src/http/v1/requests/mod.rs diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/v1/requests/scrape.rs similarity index 90% rename from src/http/axum_implementation/requests/scrape.rs rename to src/http/v1/requests/scrape.rs index 505be566e..6257f0733 100644 --- a/src/http/axum_implementation/requests/scrape.rs +++ b/src/http/v1/requests/scrape.rs @@ -3,9 +3,9 @@ use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::http::axum_implementation::query::Query; -use crate::http::axum_implementation::responses; use crate::http::percent_encoding::percent_decode_info_hash; +use crate::http::v1::query::Query; +use crate::http::v1::responses; use crate::protocol::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; @@ -85,8 +85,8 @@ mod tests { mod scrape_request { - use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH}; + use crate::http::v1::query::Query; + use crate::http::v1::requests::scrape::{Scrape, INFO_HASH}; use crate::protocol::info_hash::InfoHash; #[test] @@ -107,8 +107,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH}; + use crate::http::v1::query::Query; + use crate::http::v1::requests::scrape::{Scrape, INFO_HASH}; #[test] fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/v1/responses/announce.rs similarity index 98% rename from src/http/axum_implementation/responses/announce.rs rename to src/http/v1/responses/announce.rs index 81651767b..8b178ff7e 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/v1/responses/announce.rs @@ -8,7 +8,7 @@ use bip_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut} use serde::{self, Deserialize, Serialize}; use thiserror::Error; -use crate::http::axum_implementation::responses; +use crate::http::v1::responses; use crate::tracker::{self, AnnounceData}; /// Normal (non compact) "announce" response @@ -250,7 +250,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use super::{NonCompact, Peer}; - use crate::http::axum_implementation::responses::announce::{Compact, CompactPeer}; + use crate::http::v1::responses::announce::{Compact, CompactPeer}; // Some ascii values used in tests: // diff --git a/src/http/axum_implementation/responses/error.rs b/src/http/v1/responses/error.rs similarity index 100% rename from src/http/axum_implementation/responses/error.rs rename to src/http/v1/responses/error.rs diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/v1/responses/mod.rs similarity index 100% rename from src/http/axum_implementation/responses/mod.rs rename to src/http/v1/responses/mod.rs diff --git a/src/http/axum_implementation/responses/scrape.rs b/src/http/v1/responses/scrape.rs similarity index 97% rename from src/http/axum_implementation/responses/scrape.rs rename to src/http/v1/responses/scrape.rs index 3fc34a0e5..5cbe6502e 100644 --- a/src/http/axum_implementation/responses/scrape.rs +++ b/src/http/v1/responses/scrape.rs @@ -55,7 +55,7 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { - use crate::http::axum_implementation::responses::scrape::Bencoded; + use crate::http::v1::responses::scrape::Bencoded; use crate::protocol::info_hash::InfoHash; use crate::tracker::torrent::SwarmMetadata; use crate::tracker::ScrapeData; diff --git a/src/http/axum_implementation/routes.rs b/src/http/v1/routes.rs similarity index 100% rename from src/http/axum_implementation/routes.rs rename to src/http/v1/routes.rs diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/v1/services/announce.rs similarity index 97% rename from src/http/axum_implementation/services/announce.rs rename to src/http/v1/services/announce.rs index 73d6ed468..a8b9f0d06 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/v1/services/announce.rs @@ -77,8 +77,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::http::axum_implementation::services::announce::invoke; - use crate::http::axum_implementation::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + use crate::http::v1::services::announce::invoke; + use crate::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; use crate::tracker::peer::Peer; use crate::tracker::torrent::SwarmStats; use crate::tracker::{statistics, AnnounceData, Tracker}; diff --git a/src/http/axum_implementation/services/mod.rs b/src/http/v1/services/mod.rs similarity index 100% rename from src/http/axum_implementation/services/mod.rs rename to src/http/v1/services/mod.rs diff --git a/src/http/axum_implementation/services/peer_ip_resolver.rs b/src/http/v1/services/peer_ip_resolver.rs similarity index 95% rename from src/http/axum_implementation/services/peer_ip_resolver.rs rename to src/http/v1/services/peer_ip_resolver.rs index fae1e4ec0..c7bc183b4 100644 --- a/src/http/axum_implementation/services/peer_ip_resolver.rs +++ b/src/http/v1/services/peer_ip_resolver.rs @@ -73,7 +73,7 @@ mod tests { use std::str::FromStr; use super::invoke; - use crate::http::axum_implementation::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + use crate::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_connection_info() { @@ -112,7 +112,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; - use crate::http::axum_implementation::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + use crate::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/v1/services/scrape.rs similarity index 94% rename from src/http/axum_implementation/services/scrape.rs rename to src/http/v1/services/scrape.rs index b48bab642..b6f319375 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/v1/services/scrape.rs @@ -77,10 +77,8 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::http::axum_implementation::services::scrape::invoke; - use crate::http::axum_implementation::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, - }; + use crate::http::v1::services::scrape::invoke; + use crate::http::v1::services::scrape::tests::{public_tracker, sample_info_hash, sample_info_hashes, sample_peer}; use crate::tracker::torrent::SwarmMetadata; use crate::tracker::{statistics, ScrapeData, Tracker}; @@ -169,10 +167,8 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::http::axum_implementation::services::scrape::fake; - use crate::http::axum_implementation::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, - }; + use crate::http::v1::services::scrape::fake; + use crate::http::v1::services::scrape::tests::{public_tracker, sample_info_hash, sample_info_hashes, sample_peer}; use crate::tracker::{statistics, ScrapeData, Tracker}; #[tokio::test] diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 70f512a39..e0091958b 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -6,7 +6,7 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use crate::http::axum_implementation::launcher; +use crate::http::v1::launcher; use crate::http::Version; use crate::tracker; @@ -15,14 +15,14 @@ pub struct ServerJobStarted(); pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { match version { - Version::Axum => start_axum(config, tracker.clone()).await, + Version::V1 => start_v1(config, tracker.clone()).await, } } /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -async fn start_axum(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { +async fn start_v1(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .bind_address .parse::() diff --git a/src/setup.rs b/src/setup.rs index ee32f5a81..86de0723c 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -51,7 +51,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Axum).await); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); } // Start HTTP API diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 1b07b987a..730da93d5 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -6,1435 +6,1438 @@ mod common; mod http; -pub type Axum = torrust_tracker::http::axum_implementation::launcher::Launcher; +pub type V1 = torrust_tracker::http::v1::launcher::Launcher; -mod test_environment_for_http_tracker { - use torrust_tracker_test_helpers::configuration; +mod http_tracker { - use crate::http::test_environment::running_test_environment; - use crate::Axum; + mod v1 { - #[tokio::test] - async fn should_be_started_and_stopped() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + use torrust_tracker_test_helpers::configuration; - test_env.stop().await; - } -} + use crate::http::test_environment::running_test_environment; + use crate::V1; -mod http_tracker { + #[tokio::test] + async fn test_environment_should_be_started_and_stopped() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - mod for_all_config_modes { + test_env.stop().await; + } - mod and_running_on_reverse_proxy { - use torrust_tracker_test_helpers::configuration; + mod for_all_config_modes { - use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Axum; + mod and_running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; - #[tokio::test] - async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { - // If the tracker is running behind a reverse proxy, the peer IP is the - // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. + use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::test_environment::running_test_environment; + use crate::V1; - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + #[tokio::test] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let params = QueryBuilder::default().query().params(); + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let params = QueryBuilder::default().query().params(); - assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - test_env.stop().await; - } + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - #[tokio::test] - async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + test_env.stop().await; + } - let params = QueryBuilder::default().query().params(); + #[tokio::test] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - let response = Client::new(*test_env.bind_address()) - .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") - .await; + let params = QueryBuilder::default().query().params(); - assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + let response = Client::new(*test_env.bind_address()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; - test_env.stop().await; - } - } + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - mod receiving_an_announce_request { - - // Announce request documentation: - // - // BEP 03. The BitTorrent Protocol Specification - // https://www.bittorrent.org/beps/bep_0003.html - // - // BEP 23. Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Announce - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::str::FromStr; - - use local_ip_address::local_ip; - use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_announce_response, assert_bad_announce_request_error_response, - assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, - assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, - assert_missing_query_params_for_announce_request_error_response, - }; - use crate::http::client::Client; - use crate::http::requests::announce::{Compact, QueryBuilder}; - use crate::http::responses; - use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::http::test_environment::running_test_environment; - use crate::Axum; - - #[tokio::test] - async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.remove_optional_params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; + test_env.stop().await; + } } - #[tokio::test] - async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + mod receiving_an_announce_request { - let response = Client::new(*test_env.bind_address()).get("announce").await; + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{ + assert_announce_response, assert_bad_announce_request_error_response, + assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, + assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, + assert_missing_query_params_for_announce_request_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::{Compact, QueryBuilder}; + use crate::http::responses; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::test_environment::running_test_environment; + use crate::V1; - assert_missing_query_params_for_announce_request_error_response(response).await; + #[tokio::test] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - test_env.stop().await; - } + let mut params = QueryBuilder::default().query().params(); - #[tokio::test] - async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + params.remove_optional_params(); - let invalid_query_param = "a=b=c"; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()) - .get(&format!("announce?{invalid_query_param}")) - .await; + assert_is_announce_response(response).await; - assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let response = Client::new(*test_env.bind_address()).get("announce").await; - // Without `info_hash` param + assert_missing_query_params_for_announce_request_error_response(response).await; - let mut params = QueryBuilder::default().query().params(); + test_env.stop().await; + } - params.info_hash = None; + #[tokio::test] + async fn should_fail_when_url_query_parameters_are_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let invalid_query_param = "a=b=c"; - assert_bad_announce_request_error_response(response, "missing param info_hash").await; + let response = Client::new(*test_env.bind_address()) + .get(&format!("announce?{invalid_query_param}")) + .await; - // Without `peer_id` param + assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; - let mut params = QueryBuilder::default().query().params(); + test_env.stop().await; + } - params.peer_id = None; + #[tokio::test] + async fn should_fail_when_a_mandatory_field_is_missing() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + // Without `info_hash` param - assert_bad_announce_request_error_response(response, "missing param peer_id").await; + let mut params = QueryBuilder::default().query().params(); - // Without `port` param + params.info_hash = None; - let mut params = QueryBuilder::default().query().params(); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - params.port = None; + assert_bad_announce_request_error_response(response, "missing param info_hash").await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + // Without `peer_id` param - assert_bad_announce_request_error_response(response, "missing param port").await; + let mut params = QueryBuilder::default().query().params(); - test_env.stop().await; - } + params.peer_id = None; - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let mut params = QueryBuilder::default().query().params(); + assert_bad_announce_request_error_response(response, "missing param peer_id").await; - for invalid_value in &invalid_info_hashes() { - params.set("info_hash", invalid_value); + // Without `port` param - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let mut params = QueryBuilder::default().query().params(); - assert_cannot_parse_query_params_error_response(response, "").await; - } + params.port = None; - test_env.stop().await; - } + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - #[tokio::test] - async fn should_not_fail_when_the_peer_address_param_is_invalid() { - // AnnounceQuery does not even contain the `peer_addr` - // The peer IP is obtained in two ways: - // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. - // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. + assert_bad_announce_request_error_response(response, "missing param port").await; - let test_env = running_test_environment::(configuration::ephemeral()).await; + test_env.stop().await; + } - let mut params = QueryBuilder::default().query().params(); + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + let mut params = QueryBuilder::default().query().params(); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); - assert_is_announce_response(response).await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - test_env.stop().await; - } + assert_cannot_parse_query_params_error_response(response, "").await; + } - #[tokio::test] - async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let mut params = QueryBuilder::default().query().params(); + let test_env = running_test_environment::(configuration::ephemeral()).await; - let invalid_values = ["-1", "1.1", "a"]; + let mut params = QueryBuilder::default().query().params(); - for invalid_value in invalid_values { - params.set("downloaded", invalid_value); + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - assert_bad_announce_request_error_response(response, "invalid param value").await; + assert_is_announce_response(response).await; + + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = ["-1", "1.1", "a"]; - let invalid_values = ["-1", "1.1", "a"]; + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); - for invalid_value in invalid_values { - params.set("uploaded", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = ["-1", "1.1", "a"]; - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "-qB0000000000000000", // 19 bytes - "-qB000000000000000000", // 21 bytes - ]; + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); - for invalid_value in invalid_values { - params.set("peer_id", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; - let invalid_values = ["-1", "1.1", "a"]; + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); - for invalid_value in invalid_values { - params.set("port", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_port_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = ["-1", "1.1", "a"]; - let invalid_values = ["-1", "1.1", "a"]; + for invalid_value in invalid_values { + params.set("port", invalid_value); - for invalid_value in invalid_values { - params.set("left", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_left_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = ["-1", "1.1", "a"]; - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "Started", // It should be lowercase to be valid: `started` - "Stopped", // It should be lowercase to be valid: `stopped` - "Completed", // It should be lowercase to be valid: `completed` - ]; + for invalid_value in invalid_values { + params.set("left", invalid_value); - for invalid_value in invalid_values { - params.set("event", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_event_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase to be valid: `started` + "Stopped", // It should be lowercase to be valid: `stopped` + "Completed", // It should be lowercase to be valid: `completed` + ]; - let invalid_values = ["-1", "1.1", "a"]; + for invalid_value in invalid_values { + params.set("event", invalid_value); - for invalid_value in invalid_values { - params.set("compact", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + #[tokio::test] + async fn should_fail_when_the_compact_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) - .query(), - ) - .await; + let mut params = QueryBuilder::default().query().params(); - assert_announce_response( - response, - &Announce { - complete: 1, // the peer for this test - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![], - }, - ) - .await; - - test_env.stop().await; - } + let invalid_values = ["-1", "1.1", "a"]; - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + for invalid_value in invalid_values { + params.set("compact", invalid_value); - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + test_env.stop().await; + } - // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .query(), + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![], + }, ) .await; - // It should only contain the previously announced peer - assert_announce_response( - response, - &Announce { - complete: 2, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(previously_announced_peer)], - }, - ) - .await; - - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Announce a peer using IPV4 - let peer_using_ipv4 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) - .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; - - // Announce a peer using IPV6 - let peer_using_ipv6 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - 8080, - )) - .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; - - // Announce the new Peer. - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000003")) - .query(), + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .query(), + ) + .await; + + // It should only contain the previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, ) .await; - // The newly announced peer is not included on the response peer list, - // but all the previously announced peers should be included regardless the IP version they are using. - assert_announce_response( - response, - &Announce { - complete: 3, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = PeerBuilder::default().build(); - - // Add a peer - test_env.add_torrent_peer(&info_hash, &peer).await; - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer.peer_id) - .query(); + test_env.stop().await; + } - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Announce a peer using IPV4 + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + + // Announce a peer using IPV6 + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + )) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + + // Announce the new Peer. + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000003")) + .query(), + ) + .await; + + // The newly announced peer is not included on the response peer list, + // but all the previously announced peers should be included regardless the IP version they are using. + assert_announce_response( + response, + &Announce { + complete: 3, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], + }, + ) + .await; - let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; + test_env.stop().await; + } - assert_empty_announce_response(response).await; + #[tokio::test] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); - #[tokio::test] - async fn should_return_the_compact_response() { - // Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html + // Add a peer + test_env.add_torrent_peer(&info_hash, &peer).await; - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + assert_empty_announce_response(response).await; - // Announce the new Peer 2 accepting compact responses - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_compact(Compact::Accepted) - .query(), - ) - .await; + test_env.stop().await; + } - let expected_response = responses::announce::Compact { - complete: 2, - incomplete: 0, - interval: 120, - min_interval: 120, - peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), - }; + #[tokio::test] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html - assert_compact_announce_response(response, &expected_response).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - #[tokio::test] - async fn should_not_return_the_compact_response_by_default() { - // code-review: the HTTP tracker does not return the compact response by default if the "compact" - // param is not provided in the announce URL. The BEP 23 suggest to do so. + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + // Announce the new Peer 2 accepting compact responses + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + assert_compact_announce_response(response, &expected_response).await; - // Announce the new Peer 2 without passing the "compact" param - // By default it should respond with the compact peer list - // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .without_compact() - .query(), - ) - .await; + test_env.stop().await; + } - assert!(!is_a_compact_announce_response(response).await); + #[tokio::test] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. - test_env.stop().await; - } + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - async fn is_a_compact_announce_response(response: Response) -> bool { - let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); - compact_announce.is_ok() - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - #[tokio::test] - async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - let stats = test_env.tracker.get_stats().await; + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; - assert_eq!(stats.tcp4_connections_handled, 1); + assert!(!is_a_compact_announce_response(response).await); - drop(stats); + test_env.stop().await; + } - test_env.stop().await; - } + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } - #[tokio::test] - async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + #[tokio::test] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp6_connections_handled, 1); + assert_eq!(stats.tcp4_connections_handled, 1); - drop(stats); + drop(stats); - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. + #[tokio::test] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; + let stats = test_env.tracker.get_stats().await; - let stats = test_env.tracker.get_stats().await; + assert_eq!(stats.tcp6_connections_handled, 1); - assert_eq!(stats.tcp6_connections_handled, 0); + drop(stats); - drop(stats); + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. - #[tokio::test] - async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, 1); + assert_eq!(stats.tcp6_connections_handled, 0); - drop(stats); + drop(stats); - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + #[tokio::test] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 1); + assert_eq!(stats.tcp4_announces_handled, 1); - drop(stats); + drop(stats); - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. + #[tokio::test] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; + let stats = test_env.tracker.get_stats().await; - let stats = test_env.tracker.get_stats().await; + assert_eq!(stats.tcp6_announces_handled, 1); - assert_eq!(stats.tcp6_announces_handled, 0); + drop(stats); - drop(stats); + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() + { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. - #[tokio::test] - async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client_ip = local_ip().unwrap(); + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; - let client = Client::bind(*test_env.bind_address(), client_ip); + let stats = test_env.tracker.get_stats().await; - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); + assert_eq!(stats.tcp6_announces_handled, 0); - client.announce(&announce_query).await; + drop(stats); - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; + test_env.stop().await; + } - assert_eq!(peer_addr.ip(), client_ip); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + #[tokio::test] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. + let client = Client::bind(*test_env.bind_address(), client_ip); - client <-> tracker <-> Internet - 127.0.0.1 external_ip = "2.137.87.41" - */ + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; + client.announce(&announce_query).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; - let client = Client::bind(*test_env.bind_address(), client_ip); + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); + test_env.stop().await; + } - client.announce(&announce_query).await; + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. + let client = Client::bind(*test_env.bind_address(), client_ip); - client <-> tracker <-> Internet - ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" - */ + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; + client.announce(&announce_query).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; - let client = Client::bind(*test_env.bind_address(), client_ip); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); + test_env.stop().await; + } - client.announce(&announce_query).await; + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; - #[tokio::test] - async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( - ) { - /* - client <-> http proxy <-> tracker <-> Internet - ip: header: config: peer addr: - 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 - */ + let client = Client::bind(*test_env.bind_address(), client_ip); - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + client.announce(&announce_query).await; - let client = Client::new(*test_env.bind_address()); + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; - let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - client - .announce_with_header( - &announce_query, - "X-Forwarded-For", - "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", - ) - .await; + test_env.stop().await; + } - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ - assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - test_env.stop().await; - } - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - mod receiving_an_scrape_request { - - // Scrape documentation: - // - // BEP 48. Tracker Protocol Extension: Scrape - // https://www.bittorrent.org/beps/bep_0048.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Scrape - - use std::net::IpAddr; - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, - assert_scrape_response, - }; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::requests::scrape::QueryBuilder; - use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Axum; - - //#[tokio::test] - #[allow(dead_code)] - async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(*test_env.bind_address()).get("scrape").await; - - assert_missing_query_params_for_scrape_request_error_response(response).await; - - test_env.stop().await; - } + let client = Client::new(*test_env.bind_address()); - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - let mut params = QueryBuilder::default().query().params(); + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; - for invalid_value in &invalid_info_hashes() { - params.set_one_info_hash_param(invalid_value); + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); - assert_cannot_parse_query_params_error_response(response, "").await; + test_env.stop().await; } - - test_env.stop().await; } - #[tokio::test] - async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + mod receiving_an_scrape_request { - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + use std::net::IpAddr; + use std::str::FromStr; - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; - assert_scrape_response(response, &expected_scrape_response).await; + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{ + assert_cannot_parse_query_params_error_response, + assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, + }; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::requests::scrape::QueryBuilder; + use crate::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::http::test_environment::running_test_environment; + use crate::V1; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; + + assert_missing_query_params_for_scrape_request_error_response(response).await; + + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - #[tokio::test] - async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let mut params = QueryBuilder::default().query().params(); - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() - .build(), - ) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + assert_cannot_parse_query_params_error_response(response, "").await; + } - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 1, - downloaded: 0, - incomplete: 0, - }, - ) - .build(); + test_env.stop().await; + } - assert_scrape_response(response, &expected_scrape_response).await; + #[tokio::test] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() + { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } - #[tokio::test] - async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + #[tokio::test] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + #[tokio::test] + async fn should_accept_multiple_infohashes() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .add_info_hash(&info_hash1) - .add_info_hash(&info_hash2) - .query(), - ) - .await; + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; - let expected_scrape_response = ResponseBuilder::default() - .add_file(info_hash1.bytes(), File::zeroed()) - .add_file(info_hash2.bytes(), File::zeroed()) - .build(); + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); - assert_scrape_response(response, &expected_scrape_response).await; + assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + #[tokio::test] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp4_scrapes_handled, 1); + assert_eq!(stats.tcp4_scrapes_handled, 1); - drop(stats); + drop(stats); - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + #[tokio::test] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp6_scrapes_handled, 1); + assert_eq!(stats.tcp6_scrapes_handled, 1); - drop(stats); + drop(stats); - test_env.stop().await; + test_env.stop().await; + } } } - } - mod configured_as_whitelisted { + mod configured_as_whitelisted { - mod and_receiving_an_announce_request { - use std::str::FromStr; + mod and_receiving_an_announce_request { + use std::str::FromStr; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; - use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Axum; + use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::test_environment::running_test_environment; + use crate::V1; - #[tokio::test] - async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + #[tokio::test] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_torrent_not_in_whitelist_error_response(response).await; - - test_env.stop().await; - } + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; - #[tokio::test] - async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; + assert_torrent_not_in_whitelist_error_response(response).await; - assert_is_announce_response(response).await; + test_env.stop().await; + } - test_env.stop().await; - } - } + #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - mod receiving_an_scrape_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Axum; - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; - assert_scrape_response(response, &expected_scrape_response).await; + assert_is_announce_response(response).await; - test_env.stop().await; + test_env.stop().await; + } } - #[tokio::test] - async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::test_environment::running_test_environment; + use crate::V1; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } - test_env.stop().await; + #[tokio::test] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } } } - } - mod configured_as_private { + mod configured_as_private { - mod and_receiving_an_announce_request { - use std::str::FromStr; - use std::time::Duration; + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; - use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Axum; + use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::test_environment::running_test_environment; + use crate::V1; - #[tokio::test] - async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_respond_to_authenticated_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .announce(&QueryBuilder::default().query()) - .await; + let response = Client::authenticated(*test_env.bind_address(), key.id()) + .announce(&QueryBuilder::default().query()) + .await; - assert_is_announce_response(response).await; + assert_is_announce_response(response).await; - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; - assert_authentication_error_response(response).await; + assert_authentication_error_response(response).await; - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let invalid_key = "INVALID_KEY"; + let invalid_key = "INVALID_KEY"; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*test_env.bind_address()) .get(&format!( "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) .await; - assert_authentication_error_response(response).await; - } + assert_authentication_error_response(response).await; + } - #[tokio::test] - async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - // The tracker does not have this key - let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + // The tracker does not have this key + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(*test_env.bind_address(), unregistered_key) - .announce(&QueryBuilder::default().query()) - .await; + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) + .announce(&QueryBuilder::default().query()) + .await; - assert_authentication_error_response(response).await; + assert_authentication_error_response(response).await; - test_env.stop().await; + test_env.stop().await; + } } - } - mod receiving_an_scrape_request { + mod receiving_an_scrape_request { - use std::str::FromStr; - use std::time::Duration; + use std::str::FromStr; + use std::time::Duration; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Axum; + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::test_environment::running_test_environment; + use crate::V1; - #[tokio::test] - async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let invalid_key = "INVALID_KEY"; + let invalid_key = "INVALID_KEY"; - let response = Client::new(*test_env.bind_address()) - .get(&format!( - "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" - )) - .await; + let response = Client::new(*test_env.bind_address()) + .get(&format!( + "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + )) + .await; - assert_authentication_error_response(response).await; - } - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + assert_authentication_error_response(response).await; + } - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - assert_scrape_response(response, &expected_scrape_response).await; + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env.stop().await; - } + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - #[tokio::test] - async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + assert_scrape_response(response, &expected_scrape_response).await; - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { - // There is not authentication error - // code-review: should this really be this way? + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + // code-review: should this really be this way? - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(*test_env.bind_address(), false_key) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + let response = Client::authenticated(*test_env.bind_address(), false_key) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - assert_scrape_response(response, &expected_scrape_response).await; + assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + test_env.stop().await; + } } } - } - mod configured_as_private_and_whitelisted { + mod configured_as_private_and_whitelisted { - mod and_receiving_an_announce_request {} + mod and_receiving_an_announce_request {} - mod receiving_an_scrape_request {} + mod receiving_an_scrape_request {} + } } } From 034295bd39b10047279ded3a8171eba13a55be00 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 09:57:48 +0000 Subject: [PATCH 0464/1003] feat: update cargo dep r2d2_mysql from 21 to 23 --- Cargo.lock | 1451 ++++++++++++++++++++-------------------- Cargo.toml | 2 +- src/databases/mysql.rs | 6 +- src/tracker/auth.rs | 5 +- 4 files changed, 716 insertions(+), 748 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce179501f..1fbd61c19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,11 +28,22 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" -version = "0.7.19" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -46,15 +57,6 @@ dependencies = [ "libc", ] -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "aquatic_udp_protocol" version = "0.2.0" @@ -71,12 +73,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - [[package]] name = "arrayvec" version = "0.7.2" @@ -85,26 +81,15 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -113,9 +98,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.1" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" +checksum = "2fb79c228270dcf2426e74864cabc94babb5dbab01a4314e702d2f16540e1591" dependencies = [ "async-trait", "axum-core", @@ -157,9 +142,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.0" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92" +checksum = "b2f958c80c248b34b9a877a643811be8dbca03ca5ba827f2b63baf3a81e5fc4e" dependencies = [ "async-trait", "bytes", @@ -174,9 +159,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8456dab8f11484979a86651da8e619b355ede5d61a160755155f6c344bd18c47" +checksum = "25e4a990e1593e286b1b96e6df76da9dbcb84945a810287ca8101f1a4f000f61" dependencies = [ "arc-swap", "bytes", @@ -186,7 +171,7 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile 1.0.1", + "rustls-pemfile 1.0.2", "tokio", "tokio-rustls", "tower-service", @@ -202,33 +187,32 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide 0.6.2", + "miniz_oxide", "object", "rustc-demangle", ] [[package]] -name = "base-x" -version = "0.2.11" +name = "base64" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.13.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "bigdecimal" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" +checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744" dependencies = [ - "num-bigint 0.3.3", + "num-bigint", "num-integer", "num-traits", - "serde", ] [[package]] @@ -239,25 +223,21 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.58.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f8523b410d7187a43085e7e064416ea32ded16bd0a4e6fc025e21616d01258f" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", "clang-sys", - "clap", - "env_logger", "lazy_static", "lazycell", - "log", "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "which", ] [[package]] @@ -277,9 +257,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitvec" -version = "0.22.3" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", @@ -289,20 +269,56 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.9.0" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] -name = "block-buffer" -version = "0.10.3" +name = "borsh" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "40f9ca3698b2e4cb7c15571db0abc5551dca417a21ae8140460b50309bb2cc62" dependencies = [ - "generic-array", + "borsh-derive", + "hashbrown 0.13.2", +] + +[[package]] +name = "borsh-derive" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598b3eacc6db9c3ee57b22707ad8f6a8d2f6d442bfe24ffeb8cbb70ca59e6a35" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186b734fa1c9f6743e90c95d7233c9faab6360d1a96d4ffa19d9cfd1e9350f8a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99b7ff1008316626f485991b960ade129253d4034014616b94f309a15366cc49" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -323,9 +339,31 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" + +[[package]] +name = "bytecheck" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f" +dependencies = [ + "bytecheck_derive", + "ptr_meta", + "simdutf8", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] [[package]] name = "byteorder" @@ -335,23 +373,23 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cexpr" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 5.1.2", + "nom", ] [[package]] @@ -362,51 +400,36 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", "serde", - "time 0.1.44", + "time 0.1.45", "wasm-bindgen", "winapi", ] [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" dependencies = [ "glob", "libc", "libloading", ] -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - [[package]] name = "cmake" -version = "0.1.48" +version = "0.1.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" dependencies = [ "cc", ] @@ -423,14 +446,14 @@ dependencies = [ [[package]] name = "config" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f1667b8320afa80d69d8bbe40830df2c8a06003d86f73d8e003b2c48df416d" +checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" dependencies = [ "async-trait", "json5", "lazy_static", - "nom 7.1.1", + "nom", "pathdiff", "ron", "rust-ini", @@ -440,12 +463,6 @@ dependencies = [ "yaml-rust", ] -[[package]] -name = "const_fn" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" - [[package]] name = "convert_case" version = "0.4.0" @@ -486,6 +503,73 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +dependencies = [ + "cfg-if", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +dependencies = [ + "cfg-if", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -498,9 +582,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.80" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -510,9 +594,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.80" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", @@ -525,15 +609,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.80" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.80" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", @@ -542,9 +626,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.1" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4529658bdda7fd6769b8614be250cdcfc3aeb0ee72fe66f9e41e5e5eb73eac02" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ "darling_core", "darling_macro", @@ -552,23 +636,23 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.1" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "649c91bc01e8b1eac09fb91e8dbc7d517684ca6be8ebc75bb9cafc894f9fdb6f" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", + "strsim", "syn", ] [[package]] name = "darling_macro" -version = "0.14.1" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", @@ -584,15 +668,15 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", + "rustc_version", "syn", ] [[package]] name = "derive_utils" -version = "0.11.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" +checksum = "7590f99468735a318c254ca9158d0c065aa9b5312896b5a043b5e39bc96f5fa2" dependencies = [ "proc-macro2", "quote", @@ -607,29 +691,14 @@ checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" [[package]] name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer", "crypto-common", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - [[package]] name = "dlv-list" version = "0.3.0" @@ -644,30 +713,38 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] [[package]] -name = "env_logger" -version = "0.8.4" +name = "errno" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", + "errno-dragonfly", + "libc", + "winapi", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", ] [[package]] @@ -693,9 +770,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -711,13 +788,13 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide 0.5.4", + "miniz_oxide", ] [[package]] @@ -777,9 +854,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frunk" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd67cf7d54b7e72d0ea76f3985c3747d74aee43e0218ad993b7903ba7a5395e" +checksum = "a89c703bf50009f383a0873845357cc400a95fc535f836feddfe015d7df6e1e0" dependencies = [ "frunk_core", "frunk_derives", @@ -788,15 +865,15 @@ dependencies = [ [[package]] name = "frunk_core" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1246cf43ec80bf8b2505b5c360b8fb999c97dabd17dbb604d85558d5cbc25482" +checksum = "2a446d01a558301dca28ef43222864a9fa2bd9a2e71370f769d5d5d5ec9f3537" [[package]] name = "frunk_derives" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbc4f084ec5a3f031d24ccedeb87ab2c3189a2f33b8d070889073837d5ea09e" +checksum = "b83164912bb4c97cfe0772913c7af7387ee2e00cb6d4636fb65a35b3d0c8f173" dependencies = [ "frunk_proc_macro_helpers", "quote", @@ -805,9 +882,9 @@ dependencies = [ [[package]] name = "frunk_proc_macro_helpers" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99f11257f106c6753f5ffcb8e601fb39c390a088017aaa55b70c526bff15f63e" +checksum = "015425591bbeb0f5b8a75593340f1789af428e9f887a4f1e36c0c471f067ef50" dependencies = [ "frunk_core", "proc-macro2", @@ -817,9 +894,9 @@ dependencies = [ [[package]] name = "frunk_proc_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a078bd8459eccbb85e0b007b8f756585762a72a9efc53f359b371c3b6351dbcc" +checksum = "ea01524f285deab48affffb342b97f186e657b119c3f1821ac531780e0fbfae0" dependencies = [ "frunk_core", "frunk_proc_macros_impl", @@ -828,9 +905,9 @@ dependencies = [ [[package]] name = "frunk_proc_macros_impl" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffba99f0fa4f57e42f57388fbb9a0ca863bc2b4261f3c5570fed579d5df6c32" +checksum = "0a802d974cc18ee7fe1a7868fc9ce31086294fd96ba62f8da64ecb44e92a2653" dependencies = [ "frunk_core", "frunk_proc_macro_helpers", @@ -841,15 +918,15 @@ dependencies = [ [[package]] name = "funty" -version = "1.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" dependencies = [ "futures-channel", "futures-core", @@ -862,9 +939,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" dependencies = [ "futures-core", "futures-sink", @@ -872,15 +949,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" dependencies = [ "futures-core", "futures-task", @@ -889,15 +966,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" dependencies = [ "proc-macro2", "quote", @@ -906,21 +983,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" dependencies = [ "futures-channel", "futures-core", @@ -963,15 +1040,15 @@ checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -988,20 +1065,20 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] name = "hashbrown" -version = "0.12.3" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash", + "ahash 0.8.3", ] [[package]] @@ -1019,14 +1096,14 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha1 0.10.5", + "sha1", ] [[package]] @@ -1040,9 +1117,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] @@ -1055,9 +1132,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -1093,17 +1170,11 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" dependencies = [ "bytes", "futures-channel", @@ -1138,9 +1209,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.51" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5a6ef98976b22b3b7f2f3a806f858cb862044cfa66805aa3ad84cb3d3b785ed" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1178,9 +1249,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown 0.12.3", @@ -1198,21 +1269,30 @@ dependencies = [ [[package]] name = "io-enum" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e3306b0f260aad2872563eb0d5d1a59f2420fad270a661dce59a01e92d806b" +checksum = "e4b0d47a958cb166282b4dc4840a35783e861c2b39080af846e6481ebe145eee" dependencies = [ - "autocfg", "derive_utils", "quote", "syn", ] +[[package]] +name = "io-lifetimes" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "ipnet" -version = "2.5.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" +checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "itertools" @@ -1225,15 +1305,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] @@ -1263,38 +1343,88 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lexical" -version = "5.2.2" +version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f404a90a744e32e8be729034fc33b90cf2a56418fbf594d69aa3c0214ad414e5" +checksum = "c7aefb36fd43fef7003334742cbf77b243fcd36418a1d1bdd480d613a67968f6" dependencies = [ - "cfg-if", "lexical-core", ] [[package]] name = "lexical-core" -version = "0.7.6" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" +checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +dependencies = [ + "lexical-parse-integer", + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-parse-integer" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-util" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" dependencies = [ - "arrayvec 0.5.2", - "bitflags", - "cfg-if", - "ryu", + "static_assertions", +] + +[[package]] +name = "lexical-write-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +dependencies = [ + "lexical-util", + "lexical-write-integer", + "static_assertions", +] + +[[package]] +name = "lexical-write-integer" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +dependencies = [ + "lexical-util", "static_assertions", ] [[package]] name = "libc" -version = "0.2.136" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55edcf6c0bb319052dea84732cf99db461780fd5e8d3eb46ab6ff312ab31f197" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libloading" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", @@ -1302,9 +1432,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -1324,9 +1454,9 @@ dependencies = [ [[package]] name = "link-cplusplus" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" dependencies = [ "cc", ] @@ -1337,6 +1467,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + [[package]] name = "local-ip-address" version = "0.5.1" @@ -1370,11 +1506,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.6.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" +checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" dependencies = [ - "hashbrown 0.11.2", + "hashbrown 0.12.3", ] [[package]] @@ -1391,9 +1527,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] @@ -1420,15 +1556,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.6.2" @@ -1440,14 +1567,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.36.1", + "windows-sys 0.45.0", ] [[package]] @@ -1506,19 +1633,20 @@ dependencies = [ [[package]] name = "mysql" -version = "21.0.2" +version = "23.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06f5abe1c0f91831afd4d35298c08d958e80144869757b913891e5b0d00c2c96" +checksum = "05f11339ca5c251941805d51362a07823605a80586ced92914ab7de84fba813f" dependencies = [ "bufstream", "bytes", + "crossbeam", + "flate2", "io-enum", "libc", "lru", "mysql_common", "named_pipe", "native-tls", - "nix", "once_cell", "pem", "percent-encoding", @@ -1531,11 +1659,11 @@ dependencies = [ [[package]] name = "mysql_common" -version = "0.27.5" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fa08ec695a40ed899b1239e81d0d74de5b40802d4fc8b513e2c541717c434e" +checksum = "9006c95034ccf7b903d955f210469119f6c3477fc9c9e7a7845ce38a3e665c2a" dependencies = [ - "base64", + "base64 0.13.1", "bigdecimal", "bindgen", "bitflags", @@ -1543,14 +1671,13 @@ dependencies = [ "byteorder", "bytes", "cc", - "chrono", "cmake", "crc32fast", "flate2", "frunk", "lazy_static", "lexical", - "num-bigint 0.4.3", + "num-bigint", "num-traits", "rand", "regex", @@ -1558,13 +1685,13 @@ dependencies = [ "saturating", "serde", "serde_json", - "sha1 0.6.1", + "sha1", "sha2", "smallvec", "subprocess", "thiserror", - "time 0.2.27", - "uuid 0.8.2", + "time 0.3.20", + "uuid", ] [[package]] @@ -1578,9 +1705,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -1604,34 +1731,11 @@ dependencies = [ "libc", ] -[[package]] -name = "nix" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" -dependencies = [ - "bitflags", - "cc", - "cfg-if", - "libc", - "memoffset", -] - [[package]] name = "nom" -version = "5.1.2" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" -dependencies = [ - "memchr", - "version_check", -] - -[[package]] -name = "nom" -version = "7.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -1649,17 +1753,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.4.3" @@ -1692,23 +1785,14 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ "hermit-abi", "libc", ] -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - [[package]] name = "object" version = "0.30.3" @@ -1720,21 +1804,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.15.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl" -version = "0.10.42" +version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" +checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ "bitflags", "cfg-if", @@ -1764,18 +1842,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.22.0+1.1.1q" +version = "111.25.1+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" +checksum = "1ef9a9cc6ea7d9d5e7c4a913dc4b48d0e359eddf01af1dfec96ba7064b4aba10" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.77" +version = "0.9.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" +checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" dependencies = [ "autocfg", "cc", @@ -1807,15 +1885,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -1832,13 +1910,11 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ - "base64", - "once_cell", - "regex", + "base64 0.13.1", ] [[package]] @@ -1849,9 +1925,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.0" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ "thiserror", "ucd-trie", @@ -1859,9 +1935,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.4.0" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b75706b9642ebcb34dab3bc7750f811609a0eb1dd8b88c2d15bf628c1c65b2" +checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" dependencies = [ "pest", "pest_generator", @@ -1869,9 +1945,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.4.0" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f9272122f5979a6511a749af9db9bfc810393f63119970d7085fed1c4ea0db" +checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", @@ -1882,13 +1958,13 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.4.0" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8717927f9b79515e565a64fe46c38b8cd0427e64c40680b14a7365ab09ac8d" +checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" dependencies = [ "once_cell", "pest", - "sha1 0.10.5", + "sha2", ] [[package]] @@ -1925,21 +2001,21 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.1" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5aab5be6e4732b473071984b3164dbbfb7a3674d30ea5ff44410b6bcd960c3c" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", @@ -1951,33 +2027,62 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da1c2388b1513e1b605fcec39a95e0a9e8ef088f71443ef37099fa9ae6673fcb" +checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" [[package]] name = "predicates-tree" -version = "1.0.5" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d86de6de25020a36c6d3643a86d9a6a9f552107c0559c60ea03551b5e16c032" +checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" dependencies = [ "predicates-core", "termtree", ] [[package]] -name = "proc-macro-hack" -version = "0.5.19" +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + +[[package]] +name = "proc-macro2" +version = "1.0.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "ptr_meta" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] [[package]] -name = "proc-macro2" -version = "1.0.47" +name = "ptr_meta_derive" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "unicode-ident", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1988,9 +2093,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -2008,9 +2113,9 @@ dependencies = [ [[package]] name = "r2d2_mysql" -version = "21.0.0" +version = "23.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d05145690b395f5515feff202b8f4b9429c500f423ef7129175155c3c3a9e2" +checksum = "9733d738ce65959a744f387bae69aa690a867e18d48e5486b171c47bc7b0c575" dependencies = [ "mysql", "r2d2", @@ -2028,9 +2133,9 @@ dependencies = [ [[package]] name = "radium" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] name = "rand" @@ -2073,9 +2178,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", @@ -2084,26 +2189,26 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "rend" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" dependencies = [ - "winapi", + "bytecheck", ] [[package]] name = "reqwest" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" dependencies = [ - "base64", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -2149,13 +2254,38 @@ dependencies = [ "winapi", ] +[[package]] +name = "rkyv" +version = "0.7.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c30f1d45d9aa61cbc8cd1eb87705470892289bb2d01943e7803b873a57404dc3" +dependencies = [ + "bytecheck", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "ron" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "serde", ] @@ -2186,13 +2316,20 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.26.1" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" +checksum = "e13cf35f7140155d02ba4ec3294373d513a3c7baa8364c162b030e33c61520a8" dependencies = [ - "arrayvec 0.7.2", + "arrayvec", + "borsh", + "bytecheck", + "byteorder", + "bytes", "num-traits", + "rand", + "rkyv", "serde", + "serde_json", ] [[package]] @@ -2209,27 +2346,32 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc_version" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 0.9.0", + "semver", ] [[package]] -name = "rustc_version" -version = "0.4.0" +name = "rustix" +version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ - "semver 1.0.14", + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", ] [[package]] name = "rustls" -version = "0.20.7" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -2243,29 +2385,29 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] name = "rustls-pemfile" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64", + "base64 0.21.0", ] [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safemem" @@ -2281,28 +2423,27 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot", ] [[package]] name = "scoped-tls" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" @@ -2312,9 +2453,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "sct" @@ -2326,11 +2467,17 @@ dependencies = [ "untrusted", ] +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -2341,9 +2488,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -2351,30 +2498,15 @@ dependencies = [ [[package]] name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" - -[[package]] -name = "semver-parser" -version = "0.7.0" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" [[package]] name = "serde" -version = "1.0.152" +version = "1.0.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "8cdd151213925e7f1ab45a9bbfb129316bd00799784b174b7cc7bcd16961c49e" dependencies = [ "serde_derive", ] @@ -2391,18 +2523,18 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718dc5fff5b36f99093fc49b280cfc96ce6fc824317783bff5a1fed0c7a64819" +checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "4fc80d722935453bcafdc2c9a73cd6fac4dc1938f0346035d84bf99fa9e33217" dependencies = [ "proc-macro2", "quote", @@ -2411,9 +2543,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.87" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", @@ -2422,18 +2554,18 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" +checksum = "db0969fff533976baadd92e08b1d102c5a3d8a8049eadfd69d4d1e3c5b2ed189" dependencies = [ "serde", ] [[package]] name = "serde_repr" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +checksum = "395627de918015623b32e7669714206363a7fc00382bf477e72c1f7533e8eafc" dependencies = [ "proc-macro2", "quote", @@ -2454,25 +2586,25 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.0.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368f2d60d049ea019a84dcd6687b0d1e0030fe663ae105039bdf967ed5e6a9a7" +checksum = "7ea48c9627169d206b35905699f513f513c303ab9d964a59b44fdcf66c1d1ab7" dependencies = [ - "base64", + "base64 0.13.1", "chrono", "hex", "indexmap", "serde", "serde_json", "serde_with_macros", - "time 0.3.16", + "time 0.3.20", ] [[package]] name = "serde_with_macros" -version = "2.0.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ccadfacf6cf10faad22bbadf55986bdd0856edfb5d9210aa1dcf1f516e84e93" +checksum = "9e6b7e52858f9f06c25e1c566bbb4ab428200cb3b30053ea09dc50837de7538b" dependencies = [ "darling", "proc-macro2", @@ -2482,22 +2614,13 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", -] - -[[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" -dependencies = [ - "sha1_smol", + "digest", ] [[package]] @@ -2508,26 +2631,18 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest", ] -[[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - [[package]] name = "sha2" -version = "0.9.9" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ - "block-buffer 0.9.0", "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest", ] [[package]] @@ -2538,18 +2653,24 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -2562,9 +2683,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -2576,76 +2697,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version 0.2.3", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1 0.6.1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - [[package]] name = "strsim" version = "0.10.0" @@ -2664,9 +2721,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -2675,9 +2732,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "tap" @@ -2687,56 +2744,46 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" dependencies = [ "cfg-if", "fastrand", - "libc", "redox_syscall", - "remove_dir_all", - "winapi", + "rustix", + "windows-sys 0.42.0", ] [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] [[package]] name = "termtree" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507e9898683b6c43a9aa55b64259b721b52ba226e0f3779137e50ad114a4c90b" - -[[package]] -name = "textwrap" -version = "0.11.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] +checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8" [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -2745,9 +2792,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", @@ -2756,31 +2803,14 @@ dependencies = [ [[package]] name = "time" -version = "0.2.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros 0.1.1", - "version_check", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.16" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", - "libc", - "num_threads", "serde", "time-core", - "time-macros 0.2.5", + "time-macros", ] [[package]] @@ -2791,36 +2821,13 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] -[[package]] -name = "time-macros-impl" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - [[package]] name = "tinyvec" version = "1.6.0" @@ -2832,15 +2839,15 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.21.2" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg", "bytes", @@ -2852,14 +2859,14 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.45.0", ] [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -2868,9 +2875,9 @@ dependencies = [ [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", @@ -2889,9 +2896,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite", @@ -2912,9 +2919,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", @@ -2926,9 +2933,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] @@ -2976,7 +2983,7 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", - "uuid 1.2.1", + "uuid", "warp", ] @@ -2992,7 +2999,7 @@ dependencies = [ "toml", "torrust-tracker-located-error", "torrust-tracker-primitives", - "uuid 1.2.1", + "uuid", ] [[package]] @@ -3092,9 +3099,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" @@ -3102,7 +3109,7 @@ version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -3137,9 +3144,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" @@ -3158,15 +3165,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -3208,15 +3215,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" - -[[package]] -name = "uuid" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" dependencies = [ "getrandom", ] @@ -3227,12 +3228,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - [[package]] name = "version_check" version = "0.9.4" @@ -3295,9 +3290,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3305,9 +3300,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", "log", @@ -3320,9 +3315,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" dependencies = [ "cfg-if", "js-sys", @@ -3332,9 +3327,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3342,9 +3337,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", @@ -3355,15 +3350,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -3379,15 +3374,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "which" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" -dependencies = [ - "libc", -] - [[package]] name = "winapi" version = "0.3.9" @@ -3419,19 +3405,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", -] - [[package]] name = "windows-sys" version = "0.42.0" @@ -3439,85 +3412,79 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc", ] [[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.0" +name = "windows-sys" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] [[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" +name = "windows-targets" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] [[package]] -name = "windows_aarch64_msvc" -version = "0.42.0" +name = "windows_aarch64_gnullvm" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] -name = "windows_i686_gnu" -version = "0.36.1" +name = "windows_aarch64_msvc" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" - -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "winreg" @@ -3530,9 +3497,9 @@ dependencies = [ [[package]] name = "wyz" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "129e027ad65ce1453680623c3fb5163cbf7107bfe1aa32257e7d0e63f9ced188" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] diff --git a/Cargo.toml b/Cargo.toml index 740a5805e..dc51d8dca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" r2d2 = "0.8" -r2d2_mysql = "21" +r2d2_mysql = "23" r2d2_sqlite = { version = "0.21", features = ["bundled"] } rand = "0.8" derive_more = "0.99" diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index c8117a45c..f0c7ec1dd 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -6,7 +6,7 @@ use log::debug; use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; -use r2d2_mysql::MysqlConnectionManager; +use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::DatabaseDriver; use crate::databases::{Database, Error}; @@ -17,7 +17,7 @@ use crate::tracker::auth::{self, Key}; const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; pub struct Mysql { - pool: Pool, + pool: Pool, } #[async_trait] @@ -28,7 +28,7 @@ impl Database for Mysql { fn new(db_path: &str) -> Result { let opts = Opts::from_url(db_path)?; let builder = OptsBuilder::from_opts(opts); - let manager = MysqlConnectionManager::new(builder); + let manager = MySqlConnectionManager::new(builder); let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; Ok(Self { pool }) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 00663c383..e3c12a828 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -64,10 +64,11 @@ impl std::fmt::Display for ExpiringKey { "key: `{}`, valid until `{}`", self.key, DateTime::::from_utc( - NaiveDateTime::from_timestamp( + NaiveDateTime::from_timestamp_opt( i64::try_from(self.valid_until.as_secs()).expect("Overflow of i64 seconds, very future!"), self.valid_until.subsec_nanos(), - ), + ) + .unwrap(), Utc ) ) From 6854081d2e5e187b72bb3d34407df20feefced90 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 10:29:31 +0000 Subject: [PATCH 0465/1003] refactor: remove cargo dep warp and add dep hyper We were only using `hyper` from `warp` dependency. I've removed the `warp` dependency and added `hyper`. --- Cargo.lock | 203 +--------------------------------------- Cargo.toml | 2 +- src/apis/server.rs | 1 - src/http/v1/launcher.rs | 1 - 4 files changed, 3 insertions(+), 204 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1fbd61c19..42c4f8084 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -171,7 +171,7 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile 1.0.2", + "rustls-pemfile", "tokio", "tokio-rustls", "tower-service", @@ -321,16 +321,6 @@ dependencies = [ "syn", ] -[[package]] -name = "buf_redux" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] - [[package]] name = "bufstream" version = "0.1.4" @@ -1090,31 +1080,6 @@ dependencies = [ "hashbrown 0.12.3", ] -[[package]] -name = "headers" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" -dependencies = [ - "base64 0.13.1", - "bitflags", - "bytes", - "headers-core", - "http", - "httpdate", - "mime", - "sha1", -] - -[[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" -dependencies = [ - "http", -] - [[package]] name = "hermit-abi" version = "0.2.6" @@ -1540,16 +1505,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1613,24 +1568,6 @@ dependencies = [ "serde", ] -[[package]] -name = "multipart" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand", - "safemem", - "tempfile", - "twoway", -] - [[package]] name = "mysql" version = "23.0.1" @@ -2085,12 +2022,6 @@ dependencies = [ "syn", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "1.0.23" @@ -2379,15 +2310,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "rustls-pemfile" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "rustls-pemfile" version = "1.0.2" @@ -2409,12 +2331,6 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "saturating" version = "0.1.0" @@ -2439,12 +2355,6 @@ dependencies = [ "parking_lot", ] -[[package]] -name = "scoped-tls" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2612,17 +2522,6 @@ dependencies = [ "syn", ] -[[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - [[package]] name = "sha1" version = "0.10.5" @@ -2894,29 +2793,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "tokio-stream" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite", -] - [[package]] name = "tokio-util" version = "0.7.7" @@ -2957,6 +2833,7 @@ dependencies = [ "fern", "futures", "hex", + "hyper", "lazy_static", "local-ip-address", "log", @@ -2984,7 +2861,6 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "uuid", - "warp", ] [[package]] @@ -3103,34 +2979,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" -[[package]] -name = "tungstenite" -version = "0.17.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand", - "sha-1", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", -] - [[package]] name = "twox-hash" version = "1.6.3" @@ -3154,15 +3002,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.11" @@ -3207,12 +3046,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "uuid" version = "1.3.0" @@ -3244,38 +3077,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "warp" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed7b8be92646fc3d18b06147664ebc5f48d222686cb11a8755e561a735aacc6d" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "headers", - "http", - "hyper", - "log", - "mime", - "mime_guess", - "multipart", - "percent-encoding", - "pin-project", - "rustls-pemfile 0.2.1", - "scoped-tls", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-rustls", - "tokio-stream", - "tokio-tungstenite", - "tokio-util", - "tower-service", - "tracing", -] - [[package]] name = "wasi" version = "0.10.0+wasi-snapshot-preview1" diff --git a/Cargo.toml b/Cargo.toml index dc51d8dca..064c18b0e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,6 @@ percent-encoding = "2" binascii = "0.1" lazy_static = "1.4" openssl = { version = "0.10", features = ["vendored"] } -warp = { version = "0.3", features = ["tls"] } config = "0.13" toml = "0.5" log = { version = "0.4", features = ["release_max_level_info"] } @@ -47,6 +46,7 @@ torrust-tracker-primitives = { path = "packages/primitives" } torrust-tracker-configuration = { path = "packages/configuration" } torrust-tracker-located-error = { path = "packages/located-error" } multimap = "0.8.3" +hyper = "0.14.24" [dev-dependencies] mockall = "0.11" diff --git a/src/apis/server.rs b/src/apis/server.rs index a283bbc54..daac35999 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -7,7 +7,6 @@ use axum_server::Handle; use futures::future::BoxFuture; use futures::Future; use log::info; -use warp::hyper; use super::routes::router; use crate::signals::shutdown_signal; diff --git a/src/http/v1/launcher.rs b/src/http/v1/launcher.rs index a49efd11d..45bc54664 100644 --- a/src/http/v1/launcher.rs +++ b/src/http/v1/launcher.rs @@ -8,7 +8,6 @@ use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use futures::future::BoxFuture; use log::info; -use warp::hyper; use super::routes::router; use crate::http::server::HttpServerLauncher; From f29aaebfaec0b828d37c2c6875d8cf8bd2483cd7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 13:06:20 +0000 Subject: [PATCH 0466/1003] feat: bump cargo dep: toml from 0.5.11 to 0.7.2 --- Cargo.lock | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++---- Cargo.toml | 2 +- 2 files changed, 57 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42c4f8084..96fec0b2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -449,7 +449,7 @@ dependencies = [ "rust-ini", "serde", "serde_json", - "toml", + "toml 0.5.11", "yaml-rust", ] @@ -1984,7 +1984,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "toml", + "toml 0.5.11", ] [[package]] @@ -2482,6 +2482,15 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_spanned" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2816,6 +2825,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "torrust-tracker" version = "2.3.0" @@ -2855,7 +2898,7 @@ dependencies = [ "serde_with", "thiserror", "tokio", - "toml", + "toml 0.7.2", "torrust-tracker-configuration", "torrust-tracker-located-error", "torrust-tracker-primitives", @@ -2872,7 +2915,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml", + "toml 0.5.11", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -3287,6 +3330,15 @@ version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +[[package]] +name = "winnow" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.10.1" diff --git a/Cargo.toml b/Cargo.toml index 064c18b0e..977ec57c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ binascii = "0.1" lazy_static = "1.4" openssl = { version = "0.10", features = ["vendored"] } config = "0.13" -toml = "0.5" +toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" From ad488c4c756aad13184854bcbcddf42e946eae04 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 13:19:02 +0000 Subject: [PATCH 0467/1003] feat: bump cargo dep: axum from 0.6.1 to 0.6.10 --- Cargo.lock | 12 ++++++------ Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96fec0b2c..519ea50f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -98,9 +98,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.7" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fb79c228270dcf2426e74864cabc94babb5dbab01a4314e702d2f16540e1591" +checksum = "8582122b8edba2af43eaf6b80dbfd33f421b5a0eb3a3113d21bc096ac5b44faf" dependencies = [ "async-trait", "axum-core", @@ -1270,9 +1270,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "js-sys" @@ -2966,9 +2966,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" dependencies = [ "bitflags", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 977ec57c8..1c11ce0a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ futures = "0.3" async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } -axum = "0.6.1" +axum = "0.6.10" axum-server = { version = "0.4.4", features = ["tls-rustls"] } axum-client-ip = "0.4.0" bip_bencode = "0.4.4" From ff9985e6cc3895af6f159f871608458ac22d4c74 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 13:47:56 +0000 Subject: [PATCH 0468/1003] feat: use only major and minor version in cargo deps `axum` is set to `0.6.10` becuase setting it to `0.6` makes cargo to downgrade it to `0.6.7`. --- Cargo.toml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1c11ce0a5..6f213995f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,13 +13,13 @@ repository = "https://github.com/torrust/torrust-tracker" version = "2.3.0" [dependencies] -tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +tokio = { version = "1.26", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } serde = { version = "1.0", features = ["derive"] } -serde_bencode = "^0.2.3" +serde_bencode = "^0.2" serde_json = "1.0" serde_with = "2.0" -hex = "0.4.3" -percent-encoding = "2" +hex = "0.4" +percent-encoding = "2.2" binascii = "0.1" lazy_static = "1.4" openssl = { version = "0.10", features = ["vendored"] } @@ -29,7 +29,7 @@ log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" r2d2 = "0.8" -r2d2_mysql = "23" +r2d2_mysql = "23.0" r2d2_sqlite = { version = "0.21", features = ["bundled"] } rand = "0.8" derive_more = "0.99" @@ -39,22 +39,22 @@ async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } axum = "0.6.10" -axum-server = { version = "0.4.4", features = ["tls-rustls"] } -axum-client-ip = "0.4.0" -bip_bencode = "0.4.4" +axum-server = { version = "0.4", features = ["tls-rustls"] } +axum-client-ip = "0.4" +bip_bencode = "0.4" torrust-tracker-primitives = { path = "packages/primitives" } torrust-tracker-configuration = { path = "packages/configuration" } torrust-tracker-located-error = { path = "packages/located-error" } -multimap = "0.8.3" -hyper = "0.14.24" +multimap = "0.8" +hyper = "0.14" [dev-dependencies] mockall = "0.11" -reqwest = { version = "0.11.13", features = ["json"] } -serde_urlencoded = "0.7.1" -serde_repr = "0.1.10" -serde_bytes = "0.11.8" -local-ip-address = "0.5.1" +reqwest = { version = "0.11", features = ["json"] } +serde_urlencoded = "0.7" +serde_repr = "0.1" +serde_bytes = "0.11" +local-ip-address = "0.5" torrust-tracker-test-helpers = { path = "packages/test-helpers" } [workspace] From 8e387cd63464e91283d0784c43e991c5196ec90a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 14:10:20 +0000 Subject: [PATCH 0469/1003] feat: update cargo aliases --- .cargo/config.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index e3d31cf7f..71480e92d 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,5 @@ [alias] -cov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" +cov = "llvm-cov" +cov-lcov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" cov-html = "llvm-cov --html" +time = "build --timings --all-targets" From 19d33b403b930b24925d3f0b42344b5e71bcdcbb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 16:48:38 +0000 Subject: [PATCH 0470/1003] refactor: [#157] extract API contexts --- src/apis/context/auth_key/handlers.rs | 46 ++++++ src/apis/context/auth_key/mod.rs | 4 + .../auth_key/resources.rs} | 0 src/apis/context/auth_key/responses.rs | 35 +++++ src/apis/context/auth_key/routes.rs | 25 ++++ src/apis/{resources => context}/mod.rs | 2 +- src/apis/context/stats/handlers.rs | 13 ++ src/apis/context/stats/mod.rs | 4 + .../stats.rs => context/stats/resources.rs} | 0 src/apis/context/stats/responses.rs | 8 + src/apis/context/stats/routes.rs | 11 ++ src/apis/context/torrent/handlers.rs | 59 ++++++++ src/apis/context/torrent/mod.rs | 4 + src/apis/context/torrent/resources/mod.rs | 2 + .../{ => context/torrent}/resources/peer.rs | 0 .../torrent}/resources/torrent.rs | 5 +- src/apis/context/torrent/responses.rs | 18 +++ src/apis/context/torrent/routes.rs | 17 +++ src/apis/context/whitelist/handlers.rs | 46 ++++++ src/apis/context/whitelist/mod.rs | 3 + src/apis/context/whitelist/responses.rs | 20 +++ src/apis/context/whitelist/routes.rs | 22 +++ src/apis/handlers.rs | 138 ------------------ src/apis/mod.rs | 8 +- src/apis/responses.rs | 76 +--------- src/apis/routes.rs | 52 ++----- tests/api/asserts.rs | 6 +- tests/tracker_api.rs | 8 +- 28 files changed, 364 insertions(+), 268 deletions(-) create mode 100644 src/apis/context/auth_key/handlers.rs create mode 100644 src/apis/context/auth_key/mod.rs rename src/apis/{resources/auth_key.rs => context/auth_key/resources.rs} (100%) create mode 100644 src/apis/context/auth_key/responses.rs create mode 100644 src/apis/context/auth_key/routes.rs rename src/apis/{resources => context}/mod.rs (72%) create mode 100644 src/apis/context/stats/handlers.rs create mode 100644 src/apis/context/stats/mod.rs rename src/apis/{resources/stats.rs => context/stats/resources.rs} (100%) create mode 100644 src/apis/context/stats/responses.rs create mode 100644 src/apis/context/stats/routes.rs create mode 100644 src/apis/context/torrent/handlers.rs create mode 100644 src/apis/context/torrent/mod.rs create mode 100644 src/apis/context/torrent/resources/mod.rs rename src/apis/{ => context/torrent}/resources/peer.rs (100%) rename src/apis/{ => context/torrent}/resources/torrent.rs (96%) create mode 100644 src/apis/context/torrent/responses.rs create mode 100644 src/apis/context/torrent/routes.rs create mode 100644 src/apis/context/whitelist/handlers.rs create mode 100644 src/apis/context/whitelist/mod.rs create mode 100644 src/apis/context/whitelist/responses.rs create mode 100644 src/apis/context/whitelist/routes.rs delete mode 100644 src/apis/handlers.rs diff --git a/src/apis/context/auth_key/handlers.rs b/src/apis/context/auth_key/handlers.rs new file mode 100644 index 000000000..af78b3f4c --- /dev/null +++ b/src/apis/context/auth_key/handlers.rs @@ -0,0 +1,46 @@ +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use axum::extract::{Path, State}; +use axum::response::Response; +use serde::Deserialize; + +use super::responses::{ + auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, +}; +use crate::apis::context::auth_key::resources::AuthKey; +use crate::apis::responses::{invalid_auth_key_param_response, ok_response}; +use crate::tracker::auth::Key; +use crate::tracker::Tracker; + +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + let seconds_valid = seconds_valid_or_key; + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(e) => failed_to_generate_key_response(e), + } +} + +#[derive(Deserialize)] +pub struct KeyParam(String); + +pub async fn delete_auth_key_handler( + State(tracker): State>, + Path(seconds_valid_or_key): Path, +) -> Response { + match Key::from_str(&seconds_valid_or_key.0) { + Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), + Ok(key) => match tracker.remove_auth_key(&key.to_string()).await { + Ok(_) => ok_response(), + Err(e) => failed_to_delete_key_response(e), + }, + } +} + +pub async fn reload_keys_handler(State(tracker): State>) -> Response { + match tracker.load_keys_from_database().await { + Ok(_) => ok_response(), + Err(e) => failed_to_reload_keys_response(e), + } +} diff --git a/src/apis/context/auth_key/mod.rs b/src/apis/context/auth_key/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/apis/context/auth_key/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/apis/resources/auth_key.rs b/src/apis/context/auth_key/resources.rs similarity index 100% rename from src/apis/resources/auth_key.rs rename to src/apis/context/auth_key/resources.rs diff --git a/src/apis/context/auth_key/responses.rs b/src/apis/context/auth_key/responses.rs new file mode 100644 index 000000000..8c1bf58dc --- /dev/null +++ b/src/apis/context/auth_key/responses.rs @@ -0,0 +1,35 @@ +use std::error::Error; + +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Response}; + +use crate::apis::context::auth_key::resources::AuthKey; +use crate::apis::responses::unhandled_rejection_response; + +/// # Panics +/// +/// Will panic if it can't convert the `AuthKey` resource to json +#[must_use] +pub fn auth_key_response(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + +#[must_use] +pub fn failed_to_generate_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to generate key: {e}")) +} + +#[must_use] +pub fn failed_to_delete_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to delete key: {e}")) +} + +#[must_use] +pub fn failed_to_reload_keys_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload keys: {e}")) +} diff --git a/src/apis/context/auth_key/routes.rs b/src/apis/context/auth_key/routes.rs new file mode 100644 index 000000000..2a4f5b9dd --- /dev/null +++ b/src/apis/context/auth_key/routes.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; + +use axum::routing::{get, post}; +use axum::Router; + +use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + // Keys + router + .route( + // code-review: Axum does not allow two routes with the same path but different path variable name. + // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: + // POST /api/key + // DELETE /api/key/:key + "/api/key/:seconds_valid_or_key", + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), + ) + // Keys command + .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker)) +} diff --git a/src/apis/resources/mod.rs b/src/apis/context/mod.rs similarity index 72% rename from src/apis/resources/mod.rs rename to src/apis/context/mod.rs index bf3ce273b..6d3fb7566 100644 --- a/src/apis/resources/mod.rs +++ b/src/apis/context/mod.rs @@ -1,4 +1,4 @@ pub mod auth_key; -pub mod peer; pub mod stats; pub mod torrent; +pub mod whitelist; diff --git a/src/apis/context/stats/handlers.rs b/src/apis/context/stats/handlers.rs new file mode 100644 index 000000000..e93e65996 --- /dev/null +++ b/src/apis/context/stats/handlers.rs @@ -0,0 +1,13 @@ +use std::sync::Arc; + +use axum::extract::State; +use axum::response::Json; + +use super::resources::Stats; +use super::responses::stats_response; +use crate::tracker::services::statistics::get_metrics; +use crate::tracker::Tracker; + +pub async fn get_stats_handler(State(tracker): State>) -> Json { + stats_response(get_metrics(tracker.clone()).await) +} diff --git a/src/apis/context/stats/mod.rs b/src/apis/context/stats/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/apis/context/stats/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/apis/resources/stats.rs b/src/apis/context/stats/resources.rs similarity index 100% rename from src/apis/resources/stats.rs rename to src/apis/context/stats/resources.rs diff --git a/src/apis/context/stats/responses.rs b/src/apis/context/stats/responses.rs new file mode 100644 index 000000000..ea9a2480a --- /dev/null +++ b/src/apis/context/stats/responses.rs @@ -0,0 +1,8 @@ +use axum::response::Json; + +use super::resources::Stats; +use crate::tracker::services::statistics::TrackerMetrics; + +pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { + Json(Stats::from(tracker_metrics)) +} diff --git a/src/apis/context/stats/routes.rs b/src/apis/context/stats/routes.rs new file mode 100644 index 000000000..8791ed25a --- /dev/null +++ b/src/apis/context/stats/routes.rs @@ -0,0 +1,11 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::get_stats_handler; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + router.route("/api/stats", get(get_stats_handler).with_state(tracker)) +} diff --git a/src/apis/context/torrent/handlers.rs b/src/apis/context/torrent/handlers.rs new file mode 100644 index 000000000..1a8280e75 --- /dev/null +++ b/src/apis/context/torrent/handlers.rs @@ -0,0 +1,59 @@ +use std::fmt; +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{Path, Query, State}; +use axum::response::{IntoResponse, Json, Response}; +use serde::{de, Deserialize, Deserializer}; + +use super::resources::torrent::ListItem; +use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; +use crate::apis::responses::invalid_info_hash_param_response; +use crate::apis::InfoHashParam; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::tracker::Tracker; + +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Some(info) => torrent_info_response(info).into_response(), + None => torrent_not_known_response(), + }, + } +} + +#[derive(Deserialize)] +pub struct PaginationParams { + #[serde(default, deserialize_with = "empty_string_as_none")] + pub offset: Option, + pub limit: Option, +} + +pub async fn get_torrents_handler( + State(tracker): State>, + pagination: Query, +) -> Json> { + torrent_list_response( + &get_torrents( + tracker.clone(), + &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + ) + .await, + ) +} + +/// Serde deserialization decorator to map empty Strings to None, +fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: fmt::Display, +{ + let opt = Option::::deserialize(de)?; + match opt.as_deref() { + None | Some("") => Ok(None), + Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), + } +} diff --git a/src/apis/context/torrent/mod.rs b/src/apis/context/torrent/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/apis/context/torrent/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/apis/context/torrent/resources/mod.rs b/src/apis/context/torrent/resources/mod.rs new file mode 100644 index 000000000..46d62aac5 --- /dev/null +++ b/src/apis/context/torrent/resources/mod.rs @@ -0,0 +1,2 @@ +pub mod peer; +pub mod torrent; diff --git a/src/apis/resources/peer.rs b/src/apis/context/torrent/resources/peer.rs similarity index 100% rename from src/apis/resources/peer.rs rename to src/apis/context/torrent/resources/peer.rs diff --git a/src/apis/resources/torrent.rs b/src/apis/context/torrent/resources/torrent.rs similarity index 96% rename from src/apis/resources/torrent.rs rename to src/apis/context/torrent/resources/torrent.rs index 3d8b2f427..1099dc923 100644 --- a/src/apis/resources/torrent.rs +++ b/src/apis/context/torrent/resources/torrent.rs @@ -74,8 +74,9 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::apis::resources::peer::Peer; - use crate::apis::resources::torrent::{ListItem, Torrent}; + use super::Torrent; + use crate::apis::context::torrent::resources::peer::Peer; + use crate::apis::context::torrent::resources::torrent::ListItem; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/apis/context/torrent/responses.rs b/src/apis/context/torrent/responses.rs new file mode 100644 index 000000000..48e3c6e7f --- /dev/null +++ b/src/apis/context/torrent/responses.rs @@ -0,0 +1,18 @@ +use axum::response::{IntoResponse, Json, Response}; +use serde_json::json; + +use super::resources::torrent::{ListItem, Torrent}; +use crate::tracker::services::torrent::{BasicInfo, Info}; + +pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { + Json(ListItem::new_vec(basic_infos)) +} + +pub fn torrent_info_response(info: Info) -> Json { + Json(Torrent::from(info)) +} + +#[must_use] +pub fn torrent_not_known_response() -> Response { + Json(json!("torrent not known")).into_response() +} diff --git a/src/apis/context/torrent/routes.rs b/src/apis/context/torrent/routes.rs new file mode 100644 index 000000000..234f17223 --- /dev/null +++ b/src/apis/context/torrent/routes.rs @@ -0,0 +1,17 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::{get_torrent_handler, get_torrents_handler}; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + // Torrents + router + .route( + "/api/torrent/:info_hash", + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route("/api/torrents", get(get_torrents_handler).with_state(tracker)) +} diff --git a/src/apis/context/whitelist/handlers.rs b/src/apis/context/whitelist/handlers.rs new file mode 100644 index 000000000..c1e90a509 --- /dev/null +++ b/src/apis/context/whitelist/handlers.rs @@ -0,0 +1,46 @@ +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{Path, State}; +use axum::response::Response; + +use super::responses::{ + failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, +}; +use crate::apis::responses::{invalid_info_hash_param_response, ok_response}; +use crate::apis::InfoHashParam; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::Tracker; + +pub async fn add_torrent_to_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(_) => ok_response(), + Err(e) => failed_to_whitelist_torrent_response(e), + }, + } +} + +pub async fn remove_torrent_from_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => ok_response(), + Err(e) => failed_to_remove_torrent_from_whitelist_response(e), + }, + } +} + +pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { + match tracker.load_whitelist_from_database().await { + Ok(_) => ok_response(), + Err(e) => failed_to_reload_whitelist_response(e), + } +} diff --git a/src/apis/context/whitelist/mod.rs b/src/apis/context/whitelist/mod.rs new file mode 100644 index 000000000..f6f000f34 --- /dev/null +++ b/src/apis/context/whitelist/mod.rs @@ -0,0 +1,3 @@ +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/apis/context/whitelist/responses.rs b/src/apis/context/whitelist/responses.rs new file mode 100644 index 000000000..dd2727898 --- /dev/null +++ b/src/apis/context/whitelist/responses.rs @@ -0,0 +1,20 @@ +use std::error::Error; + +use axum::response::Response; + +use crate::apis::responses::unhandled_rejection_response; + +#[must_use] +pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) +} + +#[must_use] +pub fn failed_to_whitelist_torrent_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) +} + +#[must_use] +pub fn failed_to_reload_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload whitelist: {e}")) +} diff --git a/src/apis/context/whitelist/routes.rs b/src/apis/context/whitelist/routes.rs new file mode 100644 index 000000000..1349f8bc1 --- /dev/null +++ b/src/apis/context/whitelist/routes.rs @@ -0,0 +1,22 @@ +use std::sync::Arc; + +use axum::routing::{delete, get, post}; +use axum::Router; + +use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + router + // Whitelisted torrents + .route( + "/api/whitelist/:info_hash", + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) + .route( + "/api/whitelist/:info_hash", + delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) + // Whitelist commands + .route("/api/whitelist/reload", get(reload_whitelist_handler).with_state(tracker)) +} diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs deleted file mode 100644 index 410def39b..000000000 --- a/src/apis/handlers.rs +++ /dev/null @@ -1,138 +0,0 @@ -use std::fmt; -use std::str::FromStr; -use std::sync::Arc; -use std::time::Duration; - -use axum::extract::{Path, Query, State}; -use axum::response::{IntoResponse, Json, Response}; -use serde::{de, Deserialize, Deserializer}; - -use super::responses::{ - auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, - failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, - invalid_auth_key_param_response, invalid_info_hash_param_response, ok_response, stats_response, torrent_info_response, - torrent_list_response, torrent_not_known_response, -}; -use crate::apis::resources::auth_key::AuthKey; -use crate::apis::resources::stats::Stats; -use crate::apis::resources::torrent::ListItem; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::Key; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; -use crate::tracker::Tracker; - -pub async fn get_stats_handler(State(tracker): State>) -> Json { - stats_response(get_metrics(tracker.clone()).await) -} - -#[derive(Deserialize)] -pub struct InfoHashParam(String); - -pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { - Some(info) => torrent_info_response(info).into_response(), - None => torrent_not_known_response(), - }, - } -} - -#[derive(Deserialize)] -pub struct PaginationParams { - #[serde(default, deserialize_with = "empty_string_as_none")] - pub offset: Option, - pub limit: Option, -} - -pub async fn get_torrents_handler( - State(tracker): State>, - pagination: Query, -) -> Json> { - torrent_list_response( - &get_torrents( - tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), - ) - .await, - ) -} - -pub async fn add_torrent_to_whitelist_handler( - State(tracker): State>, - Path(info_hash): Path, -) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(_) => ok_response(), - Err(e) => failed_to_whitelist_torrent_response(e), - }, - } -} - -pub async fn remove_torrent_from_whitelist_handler( - State(tracker): State>, - Path(info_hash): Path, -) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => ok_response(), - Err(e) => failed_to_remove_torrent_from_whitelist_response(e), - }, - } -} - -pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { - match tracker.load_whitelist_from_database().await { - Ok(_) => ok_response(), - Err(e) => failed_to_reload_whitelist_response(e), - } -} - -pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { - let seconds_valid = seconds_valid_or_key; - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), - Err(e) => failed_to_generate_key_response(e), - } -} - -#[derive(Deserialize)] -pub struct KeyParam(String); - -pub async fn delete_auth_key_handler( - State(tracker): State>, - Path(seconds_valid_or_key): Path, -) -> Response { - match Key::from_str(&seconds_valid_or_key.0) { - Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key) => match tracker.remove_auth_key(&key.to_string()).await { - Ok(_) => ok_response(), - Err(e) => failed_to_delete_key_response(e), - }, - } -} - -pub async fn reload_keys_handler(State(tracker): State>) -> Response { - match tracker.load_keys_from_database().await { - Ok(_) => ok_response(), - Err(e) => failed_to_reload_keys_response(e), - } -} - -/// Serde deserialization decorator to map empty Strings to None, -fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> -where - D: Deserializer<'de>, - T: FromStr, - T::Err: fmt::Display, -{ - let opt = Option::::deserialize(de)?; - match opt.as_deref() { - None | Some("") => Ok(None), - Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), - } -} diff --git a/src/apis/mod.rs b/src/apis/mod.rs index a646d5543..fd7fdb6e5 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,6 +1,10 @@ -pub mod handlers; +pub mod context; pub mod middlewares; -pub mod resources; pub mod responses; pub mod routes; pub mod server; + +use serde::Deserialize; + +#[derive(Deserialize)] +pub struct InfoHashParam(pub String); diff --git a/src/apis/responses.rs b/src/apis/responses.rs index c0a6cbcf8..4a9c39bf9 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -1,15 +1,6 @@ -use std::error::Error; - use axum::http::{header, StatusCode}; -use axum::response::{IntoResponse, Json, Response}; +use axum::response::{IntoResponse, Response}; use serde::Serialize; -use serde_json::json; - -use crate::apis::resources::auth_key::AuthKey; -use crate::apis::resources::stats::Stats; -use crate::apis::resources::torrent::{ListItem, Torrent}; -use crate::tracker::services::statistics::TrackerMetrics; -use crate::tracker::services::torrent::{BasicInfo, Info}; /* code-review: When Axum cannot parse a path or query param it shows a message like this: @@ -38,36 +29,6 @@ pub enum ActionStatus<'a> { Err { reason: std::borrow::Cow<'a, str> }, } -// Resource responses - -#[must_use] -pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { - Json(Stats::from(tracker_metrics)) -} - -#[must_use] -pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { - Json(ListItem::new_vec(basic_infos)) -} - -#[must_use] -pub fn torrent_info_response(info: Info) -> Json { - Json(Torrent::from(info)) -} - -/// # Panics -/// -/// Will panic if it can't convert the `AuthKey` resource to json -#[must_use] -pub fn auth_key_response(auth_key: &AuthKey) -> Response { - ( - StatusCode::OK, - [(header::CONTENT_TYPE, "application/json; charset=utf-8")], - serde_json::to_string(auth_key).unwrap(), - ) - .into_response() -} - // OK response /// # Panics @@ -106,41 +67,6 @@ fn bad_request_response(body: &str) -> Response { .into_response() } -#[must_use] -pub fn torrent_not_known_response() -> Response { - Json(json!("torrent not known")).into_response() -} - -#[must_use] -pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) -} - -#[must_use] -pub fn failed_to_whitelist_torrent_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) -} - -#[must_use] -pub fn failed_to_reload_whitelist_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to reload whitelist: {e}")) -} - -#[must_use] -pub fn failed_to_generate_key_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to generate key: {e}")) -} - -#[must_use] -pub fn failed_to_delete_key_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to delete key: {e}")) -} - -#[must_use] -pub fn failed_to_reload_keys_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to reload keys: {e}")) -} - /// This error response is to keep backward compatibility with the old API. /// It should be a plain text or json. #[must_use] diff --git a/src/apis/routes.rs b/src/apis/routes.rs index ecc51090c..c567e50da 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,53 +1,19 @@ use std::sync::Arc; -use axum::routing::{delete, get, post}; use axum::{middleware, Router}; -use super::handlers::{ - add_torrent_to_whitelist_handler, delete_auth_key_handler, generate_auth_key_handler, get_stats_handler, get_torrent_handler, - get_torrents_handler, reload_keys_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler, -}; +use super::context::{auth_key, stats, torrent, whitelist}; use super::middlewares::auth::auth; use crate::tracker::Tracker; #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { - Router::new() - // Stats - .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) - // Torrents - .route( - "/api/torrent/:info_hash", - get(get_torrent_handler).with_state(tracker.clone()), - ) - .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) - // Whitelisted torrents - .route( - "/api/whitelist/:info_hash", - post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), - ) - .route( - "/api/whitelist/:info_hash", - delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), - ) - // Whitelist command - .route( - "/api/whitelist/reload", - get(reload_whitelist_handler).with_state(tracker.clone()), - ) - // Keys - .route( - // code-review: Axum does not allow two routes with the same path but different path variable name. - // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: - // POST /api/key - // DELETE /api/key/:key - "/api/key/:seconds_valid_or_key", - post(generate_auth_key_handler) - .with_state(tracker.clone()) - .delete(delete_auth_key_handler) - .with_state(tracker.clone()), - ) - // Keys command - .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) - .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) + let router = Router::new(); + + let router = auth_key::routes::add(router, tracker.clone()); + let router = stats::routes::add(router, tracker.clone()); + let router = whitelist::routes::add(router, tracker.clone()); + let router = torrent::routes::add(router, tracker.clone()); + + router.layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 5a4abfb62..c7567e6fe 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::apis::resources::auth_key::AuthKey; -use torrust_tracker::apis::resources::stats::Stats; -use torrust_tracker::apis::resources::torrent::{ListItem, Torrent}; +use torrust_tracker::apis::context::auth_key::resources::AuthKey; +use torrust_tracker::apis::context::stats::resources::Stats; +use torrust_tracker::apis::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index dac5907c2..ff4eb295b 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -132,7 +132,7 @@ mod tracker_apis { mod for_stats_resources { use std::str::FromStr; - use torrust_tracker::apis::resources::stats::Stats; + use torrust_tracker::apis::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; @@ -206,8 +206,8 @@ mod tracker_apis { mod for_torrent_resources { use std::str::FromStr; - use torrust_tracker::apis::resources::torrent::Torrent; - use torrust_tracker::apis::resources::{self, torrent}; + use torrust_tracker::apis::context::torrent::resources::peer::Peer; + use torrust_tracker::apis::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; @@ -383,7 +383,7 @@ mod tracker_apis { seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![resources::peer::Peer::from(peer)]), + peers: Some(vec![Peer::from(peer)]), }, ) .await; From 8fe52c321371100a2bbae40f0df888adc1fa9d8c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Mar 2023 09:43:13 +0000 Subject: [PATCH 0471/1003] refactor(api): reorganize api tests in contexts After changing production code we follow the API contexts structure in tests too. --- tests/api/mod.rs | 1 + tests/api/tests/authentication.rs | 83 +++ tests/api/tests/configuration.rs | 17 + tests/api/tests/context/auth_key.rs | 265 ++++++++ tests/api/tests/context/mod.rs | 4 + tests/api/tests/context/stats.rs | 71 ++ tests/api/tests/context/torrent.rs | 249 +++++++ tests/api/tests/context/whitelist.rs | 258 +++++++ tests/api/tests/fixtures.rs | 13 + tests/api/tests/mod.rs | 4 + tests/tracker_api.rs | 983 +-------------------------- 11 files changed, 966 insertions(+), 982 deletions(-) create mode 100644 tests/api/tests/authentication.rs create mode 100644 tests/api/tests/configuration.rs create mode 100644 tests/api/tests/context/auth_key.rs create mode 100644 tests/api/tests/context/mod.rs create mode 100644 tests/api/tests/context/stats.rs create mode 100644 tests/api/tests/context/torrent.rs create mode 100644 tests/api/tests/context/whitelist.rs create mode 100644 tests/api/tests/fixtures.rs create mode 100644 tests/api/tests/mod.rs diff --git a/tests/api/mod.rs b/tests/api/mod.rs index fcb24e491..f59210b22 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -6,6 +6,7 @@ pub mod asserts; pub mod client; pub mod connection_info; pub mod test_environment; +pub mod tests; /// It forces a database error by dropping all tables. /// That makes any query fail. diff --git a/tests/api/tests/authentication.rs b/tests/api/tests/authentication.rs new file mode 100644 index 000000000..5183c8909 --- /dev/null +++ b/tests/api/tests/authentication.rs @@ -0,0 +1,83 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; +use crate::api::client::Client; +use crate::api::test_environment::running_test_environment; +use crate::common::http::{Query, QueryParam}; + +#[tokio::test] +async fn should_authenticate_requests_by_using_a_token_query_param() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let token = test_env.get_connection_info().api_token.unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) + .await; + + assert_eq!(response.status(), 200); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_missing() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::default()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_empty() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) + .await; + + assert_token_not_valid(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) + .await; + + assert_token_not_valid(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let token = test_env.get_connection_info().api_token.unwrap(); + + // At the beginning of the query component + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?token={token}&limit=1")) + .await; + + assert_eq!(response.status(), 200); + + // At the end of the query component + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?limit=1&token={token}")) + .await; + + assert_eq!(response.status(), 200); + + test_env.stop().await; +} diff --git a/tests/api/tests/configuration.rs b/tests/api/tests/configuration.rs new file mode 100644 index 000000000..f81201191 --- /dev/null +++ b/tests/api/tests/configuration.rs @@ -0,0 +1,17 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::api::test_environment::stopped_test_environment; + +#[tokio::test] +#[should_panic] +async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { + let mut test_env = stopped_test_environment(configuration::ephemeral()); + + let cfg = test_env.config_mut(); + + cfg.ssl_enabled = true; + cfg.ssl_key_path = Some("bad key path".to_string()); + cfg.ssl_cert_path = Some("bad cert path".to_string()); + + test_env.start().await; +} diff --git a/tests/api/tests/context/auth_key.rs b/tests/api/tests/context/auth_key.rs new file mode 100644 index 000000000..ee7121615 --- /dev/null +++ b/tests/api/tests/context/auth_key.rs @@ -0,0 +1,265 @@ +use std::time::Duration; + +use torrust_tracker::tracker::auth::Key; +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{ + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::api::client::Client; +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::force_database_error; +use crate::api::test_environment::running_test_environment; + +#[tokio::test] +async fn should_allow_generating_a_new_auth_key() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + let response = Client::new(test_env.get_connection_info()) + .generate_auth_key(seconds_valid) + .await; + + let auth_key_resource = assert_auth_key_utf8(response).await; + + // Verify the key with the tracker + assert!(test_env + .tracker + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(test_env.get_connection_info()) + .post(&format!("key/{invalid_key_duration}")) + .await; + + assert_invalid_key_duration_param(response, invalid_key_duration).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_auth_key_cannot_be_generated() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + force_database_error(&test_env.tracker); + + let seconds_valid = 60; + let response = Client::new(test_env.get_connection_info()) + .generate_auth_key(seconds_valid) + .await; + + assert_failed_to_generate_key(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_deleting_an_auth_key() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_auth_keys = [ + // "", it returns a 404 + // " ", it returns a 404 + "0", + "-1", + "INVALID AUTH KEY ID", + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8", // 32 char key cspell:disable-line + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line + ]; + + for invalid_auth_key in &invalid_auth_keys { + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(invalid_auth_key) + .await; + + assert_invalid_auth_key_param(response, invalid_auth_key).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_auth_key_cannot_be_deleted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_failed_to_delete_key(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + // Generate new auth key + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_token_not_valid(response).await; + + // Generate new auth key + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_reloading_keys() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(test_env.get_connection_info()).reload_keys().await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_keys_cannot_be_reloaded() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()).reload_keys().await; + + assert_failed_to_reload_keys(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .reload_keys() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .reload_keys() + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/api/tests/context/mod.rs b/tests/api/tests/context/mod.rs new file mode 100644 index 000000000..6d3fb7566 --- /dev/null +++ b/tests/api/tests/context/mod.rs @@ -0,0 +1,4 @@ +pub mod auth_key; +pub mod stats; +pub mod torrent; +pub mod whitelist; diff --git a/tests/api/tests/context/stats.rs b/tests/api/tests/context/stats.rs new file mode 100644 index 000000000..99ae405b7 --- /dev/null +++ b/tests/api/tests/context/stats.rs @@ -0,0 +1,71 @@ +use std::str::FromStr; + +use torrust_tracker::apis::context::stats::resources::Stats; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; +use crate::api::client::Client; +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::test_environment::running_test_environment; +use crate::common::fixtures::PeerBuilder; + +#[tokio::test] +async fn should_allow_getting_tracker_statistics() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + test_env + .add_torrent_peer( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &PeerBuilder::default().into(), + ) + .await; + + let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; + + assert_stats( + response, + Stats { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }, + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_tracker_statistics() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_tracker_statistics() + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/api/tests/context/torrent.rs b/tests/api/tests/context/torrent.rs new file mode 100644 index 000000000..998c2afaf --- /dev/null +++ b/tests/api/tests/context/torrent.rs @@ -0,0 +1,249 @@ +use std::str::FromStr; + +use torrust_tracker::apis::context::torrent::resources::peer::Peer; +use torrust_tracker::apis::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{ + assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, + assert_torrent_list, assert_torrent_not_known, assert_unauthorized, +}; +use crate::api::client::Client; +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::test_environment::running_test_environment; +use crate::api::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; +use crate::common::fixtures::PeerBuilder; +use crate::common::http::{Query, QueryParam}; + +#[tokio::test] +async fn should_allow_getting_torrents() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_limiting_the_torrents_in_the_result() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_the_torrents_result_pagination() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; + + for invalid_offset in &invalid_offsets { + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; + + for invalid_limit in &invalid_limits { + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrents(Query::empty()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_torrents(Query::default()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_getting_a_torrent_info() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let peer = PeerBuilder::default().into(); + + test_env.add_torrent_peer(&info_hash, &peer).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_info( + response, + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![Peer::from(peer)]), + }, + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_not_known(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrent(&info_hash.to_string()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_torrent(&info_hash.to_string()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/api/tests/context/whitelist.rs b/tests/api/tests/context/whitelist.rs new file mode 100644 index 000000000..29ea573c0 --- /dev/null +++ b/tests/api/tests/context/whitelist.rs @@ -0,0 +1,258 @@ +use std::str::FromStr; + +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{ + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, + assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::api::client::Client; +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::force_database_error; +use crate::api::test_environment::running_test_environment; +use crate::api::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; + +#[tokio::test] +async fn should_allow_whitelisting_a_torrent() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_ok(response).await; + assert!( + test_env + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let api_client = Client::new(test_env.get_connection_info()); + + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; + + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .whitelist_a_torrent(&info_hash) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_torrent_cannot_be_whitelisted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_failed_to_whitelist_torrent(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_removing_a_torrent_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_ok(response).await; + assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_failed_to_remove_torrent_from_whitelist(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_token_not_valid(response).await; + + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_reload_the_whitelist_from_the_database() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + + assert_ok(response).await; + /* todo: this assert fails because the whitelist has not been reloaded yet. + We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent + is whitelisted and use that endpoint to check if the torrent is still there after reloading. + assert!( + !(test_env + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await) + ); + */ + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + + assert_failed_to_reload_whitelist(response).await; + + test_env.stop().await; +} diff --git a/tests/api/tests/fixtures.rs b/tests/api/tests/fixtures.rs new file mode 100644 index 000000000..6d147f190 --- /dev/null +++ b/tests/api/tests/fixtures.rs @@ -0,0 +1,13 @@ +use crate::common::fixtures::invalid_info_hashes; + +// When these infohashes are used in URL path params +// the response is a custom response returned in the handler +pub fn invalid_infohashes_returning_bad_request() -> Vec { + invalid_info_hashes() +} + +// When these infohashes are used in URL path params +// the response is an Axum response returned in the handler +pub fn invalid_infohashes_returning_not_found() -> Vec { + [String::new(), " ".to_string()].to_vec() +} diff --git a/tests/api/tests/mod.rs b/tests/api/tests/mod.rs new file mode 100644 index 000000000..38b4a2b37 --- /dev/null +++ b/tests/api/tests/mod.rs @@ -0,0 +1,4 @@ +pub mod authentication; +pub mod configuration; +pub mod context; +pub mod fixtures; diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index ff4eb295b..3219bc987 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1,988 +1,7 @@ /// Integration tests for the tracker API /// /// ```text -/// cargo test tracker_apis -- --nocapture +/// cargo test --test tracker_api /// ``` -extern crate rand; - mod api; mod common; - -mod tracker_apis { - use crate::common::fixtures::invalid_info_hashes; - - // When these infohashes are used in URL path params - // the response is a custom response returned in the handler - fn invalid_infohashes_returning_bad_request() -> Vec { - invalid_info_hashes() - } - - // When these infohashes are used in URL path params - // the response is an Axum response returned in the handler - fn invalid_infohashes_returning_not_found() -> Vec { - [String::new(), " ".to_string()].to_vec() - } - - mod configuration { - use torrust_tracker_test_helpers::configuration; - - use crate::api::test_environment::stopped_test_environment; - - #[tokio::test] - #[should_panic] - async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { - let mut test_env = stopped_test_environment(configuration::ephemeral()); - - let cfg = test_env.config_mut(); - - cfg.ssl_enabled = true; - cfg.ssl_key_path = Some("bad key path".to_string()); - cfg.ssl_cert_path = Some("bad cert path".to_string()); - - test_env.start().await; - } - } - - mod authentication { - use torrust_tracker_test_helpers::configuration; - - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; - use crate::api::client::Client; - use crate::api::test_environment::running_test_environment; - use crate::common::http::{Query, QueryParam}; - - #[tokio::test] - async fn should_authenticate_requests_by_using_a_token_query_param() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let token = test_env.get_connection_info().api_token.unwrap(); - - let response = Client::new(test_env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) - .await; - - assert_eq!(response.status(), 200); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_authenticate_requests_when_the_token_is_missing() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_request_with_query("stats", Query::default()) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_authenticate_requests_when_the_token_is_empty() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) - .await; - - assert_token_not_valid(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) - .await; - - assert_token_not_valid(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let token = test_env.get_connection_info().api_token.unwrap(); - - // At the beginning of the query component - let response = Client::new(test_env.get_connection_info()) - .get_request(&format!("torrents?token={token}&limit=1")) - .await; - - assert_eq!(response.status(), 200); - - // At the end of the query component - let response = Client::new(test_env.get_connection_info()) - .get_request(&format!("torrents?limit=1&token={token}")) - .await; - - assert_eq!(response.status(), 200); - - test_env.stop().await; - } - } - - mod for_stats_resources { - use std::str::FromStr; - - use torrust_tracker::apis::context::stats::resources::Stats; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::test_environment::running_test_environment; - use crate::common::fixtures::PeerBuilder; - - #[tokio::test] - async fn should_allow_getting_tracker_statistics() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - test_env - .add_torrent_peer( - &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), - &PeerBuilder::default().into(), - ) - .await; - - let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; - - assert_stats( - response, - Stats { - torrents: 1, - seeders: 1, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_tracker_statistics() - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .get_tracker_statistics() - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - } - - mod for_torrent_resources { - use std::str::FromStr; - - use torrust_tracker::apis::context::torrent::resources::peer::Peer; - use torrust_tracker::apis::context::torrent::resources::torrent::{self, Torrent}; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; - use crate::api::asserts::{ - assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, - assert_torrent_list, assert_torrent_not_known, assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::test_environment::running_test_environment; - use crate::common::fixtures::PeerBuilder; - use crate::common::http::{Query, QueryParam}; - - #[tokio::test] - async fn should_allow_getting_torrents() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - - let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_limiting_the_torrents_in_the_result() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - - test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_the_torrents_result_pagination() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - - test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; - - for invalid_offset in &invalid_offsets { - let response = Client::new(test_env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) - .await; - - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; - - for invalid_limit in &invalid_limits { - let response = Client::new(test_env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) - .await; - - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_torrents(Query::empty()) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .get_torrents(Query::default()) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_getting_a_torrent_info() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let peer = PeerBuilder::default().into(); - - test_env.add_torrent_peer(&info_hash, &peer).await; - - let response = Client::new(test_env.get_connection_info()) - .get_torrent(&info_hash.to_string()) - .await; - - assert_torrent_info( - response, - Torrent { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: Some(vec![Peer::from(peer)]), - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let response = Client::new(test_env.get_connection_info()) - .get_torrent(&info_hash.to_string()) - .await; - - assert_torrent_not_known(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) - .get_torrent(invalid_infohash) - .await; - - assert_invalid_infohash_param(response, invalid_infohash).await; - } - - for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) - .get_torrent(invalid_infohash) - .await; - - assert_not_found(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_torrent(&info_hash.to_string()) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .get_torrent(&info_hash.to_string()) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - } - - mod for_whitelisted_torrent_resources { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; - use crate::api::asserts::{ - assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, - assert_token_not_valid, assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::force_database_error; - use crate::api::test_environment::running_test_environment; - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_ok(response).await; - assert!( - test_env - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await - ); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let api_client = Client::new(test_env.get_connection_info()); - - let response = api_client.whitelist_a_torrent(&info_hash).await; - assert_ok(response).await; - - let response = api_client.whitelist_a_torrent(&info_hash).await; - assert_ok(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .whitelist_a_torrent(&info_hash) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .whitelist_a_torrent(&info_hash) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_failed_to_whitelist_torrent(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(invalid_infohash) - .await; - - assert_invalid_infohash_param(response, invalid_infohash).await; - } - - for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(invalid_infohash) - .await; - - assert_not_found(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_removing_a_torrent_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_ok(response).await; - assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) - .await; - - assert_ok(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(invalid_infohash) - .await; - - assert_invalid_infohash_param(response, invalid_infohash).await; - } - - for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(invalid_infohash) - .await; - - assert_not_found(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_failed_to_remove_torrent_from_whitelist(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_token_not_valid(response).await; - - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_reload_the_whitelist_from_the_database() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; - - assert_ok(response).await; - /* todo: this assert fails because the whitelist has not been reloaded yet. - We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent - is whitelisted and use that endpoint to check if the torrent is still there after reloading. - assert!( - !(test_env - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await) - ); - */ - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; - - assert_failed_to_reload_whitelist(response).await; - - test_env.stop().await; - } - } - - mod for_key_resources { - use std::time::Duration; - - use torrust_tracker::tracker::auth::Key; - use torrust_tracker_test_helpers::configuration; - - use crate::api::asserts::{ - assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, - assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::force_database_error; - use crate::api::test_environment::running_test_environment; - - #[tokio::test] - async fn should_allow_generating_a_new_auth_key() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - - let response = Client::new(test_env.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; - - let auth_key_resource = assert_auth_key_utf8(response).await; - - // Verify the key with the tracker - assert!(test_env - .tracker - .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) - .await - .is_ok()); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .generate_auth_key(seconds_valid) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .generate_auth_key(seconds_valid) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_key_durations = [ - // "", it returns 404 - // " ", it returns 404 - "-1", "text", - ]; - - for invalid_key_duration in invalid_key_durations { - let response = Client::new(test_env.get_connection_info()) - .post(&format!("key/{invalid_key_duration}")) - .await; - - assert_invalid_key_duration_param(response, invalid_key_duration).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_auth_key_cannot_be_generated() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - force_database_error(&test_env.tracker); - - let seconds_valid = 60; - let response = Client::new(test_env.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; - - assert_failed_to_generate_key(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_deleting_an_auth_key() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - let auth_key = test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(test_env.get_connection_info()) - .delete_auth_key(&auth_key.key.to_string()) - .await; - - assert_ok(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_auth_keys = [ - // "", it returns a 404 - // " ", it returns a 404 - "0", - "-1", - "INVALID AUTH KEY ID", - "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8", // 32 char key cspell:disable-line - "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line - ]; - - for invalid_auth_key in &invalid_auth_keys { - let response = Client::new(test_env.get_connection_info()) - .delete_auth_key(invalid_auth_key) - .await; - - assert_invalid_auth_key_param(response, invalid_auth_key).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - let auth_key = test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()) - .delete_auth_key(&auth_key.key.to_string()) - .await; - - assert_failed_to_delete_key(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - - // Generate new auth key - let auth_key = test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .delete_auth_key(&auth_key.key.to_string()) - .await; - - assert_token_not_valid(response).await; - - // Generate new auth key - let auth_key = test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .delete_auth_key(&auth_key.key.to_string()) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_reloading_keys() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(test_env.get_connection_info()).reload_keys().await; - - assert_ok(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_keys_cannot_be_reloaded() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()).reload_keys().await; - - assert_failed_to_reload_keys(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .reload_keys() - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .reload_keys() - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - } -} From 70d94ad180bab0a2b48b627ac44878c2c5bd7958 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Mar 2023 11:03:58 +0000 Subject: [PATCH 0472/1003] refactor(api): add namespace v1 to API --- src/apis/mod.rs | 4 +--- src/apis/routes.rs | 9 +++------ src/apis/{ => v1}/context/auth_key/handlers.rs | 4 ++-- src/apis/{ => v1}/context/auth_key/mod.rs | 0 src/apis/{ => v1}/context/auth_key/resources.rs | 0 src/apis/{ => v1}/context/auth_key/responses.rs | 4 ++-- src/apis/{ => v1}/context/auth_key/routes.rs | 0 src/apis/{ => v1}/context/mod.rs | 0 src/apis/{ => v1}/context/stats/handlers.rs | 0 src/apis/{ => v1}/context/stats/mod.rs | 0 src/apis/{ => v1}/context/stats/resources.rs | 0 src/apis/{ => v1}/context/stats/responses.rs | 0 src/apis/{ => v1}/context/stats/routes.rs | 0 src/apis/{ => v1}/context/torrent/handlers.rs | 2 +- src/apis/{ => v1}/context/torrent/mod.rs | 0 src/apis/{ => v1}/context/torrent/resources/mod.rs | 0 src/apis/{ => v1}/context/torrent/resources/peer.rs | 0 .../{ => v1}/context/torrent/resources/torrent.rs | 4 ++-- src/apis/{ => v1}/context/torrent/responses.rs | 0 src/apis/{ => v1}/context/torrent/routes.rs | 0 src/apis/{ => v1}/context/whitelist/handlers.rs | 2 +- src/apis/{ => v1}/context/whitelist/mod.rs | 0 src/apis/{ => v1}/context/whitelist/responses.rs | 2 +- src/apis/{ => v1}/context/whitelist/routes.rs | 0 src/apis/{ => v1}/middlewares/auth.rs | 2 +- src/apis/{ => v1}/middlewares/mod.rs | 0 src/apis/v1/mod.rs | 4 ++++ src/apis/{ => v1}/responses.rs | 0 src/apis/v1/routes.rs | 13 +++++++++++++ tests/api/asserts.rs | 6 +++--- tests/api/tests/mod.rs | 3 +-- tests/api/tests/{ => v1}/authentication.rs | 0 tests/api/tests/{ => v1}/context/auth_key.rs | 0 tests/api/tests/{ => v1}/context/mod.rs | 0 tests/api/tests/{ => v1}/context/stats.rs | 2 +- tests/api/tests/{ => v1}/context/torrent.rs | 4 ++-- tests/api/tests/{ => v1}/context/whitelist.rs | 0 tests/api/tests/v1/mod.rs | 2 ++ 38 files changed, 40 insertions(+), 27 deletions(-) rename src/apis/{ => v1}/context/auth_key/handlers.rs (91%) rename src/apis/{ => v1}/context/auth_key/mod.rs (100%) rename src/apis/{ => v1}/context/auth_key/resources.rs (100%) rename src/apis/{ => v1}/context/auth_key/responses.rs (88%) rename src/apis/{ => v1}/context/auth_key/routes.rs (100%) rename src/apis/{ => v1}/context/mod.rs (100%) rename src/apis/{ => v1}/context/stats/handlers.rs (100%) rename src/apis/{ => v1}/context/stats/mod.rs (100%) rename src/apis/{ => v1}/context/stats/resources.rs (100%) rename src/apis/{ => v1}/context/stats/responses.rs (100%) rename src/apis/{ => v1}/context/stats/routes.rs (100%) rename src/apis/{ => v1}/context/torrent/handlers.rs (96%) rename src/apis/{ => v1}/context/torrent/mod.rs (100%) rename src/apis/{ => v1}/context/torrent/resources/mod.rs (100%) rename src/apis/{ => v1}/context/torrent/resources/peer.rs (100%) rename src/apis/{ => v1}/context/torrent/resources/torrent.rs (96%) rename src/apis/{ => v1}/context/torrent/responses.rs (100%) rename src/apis/{ => v1}/context/torrent/routes.rs (100%) rename src/apis/{ => v1}/context/whitelist/handlers.rs (95%) rename src/apis/{ => v1}/context/whitelist/mod.rs (100%) rename src/apis/{ => v1}/context/whitelist/responses.rs (90%) rename src/apis/{ => v1}/context/whitelist/routes.rs (100%) rename src/apis/{ => v1}/middlewares/auth.rs (96%) rename src/apis/{ => v1}/middlewares/mod.rs (100%) create mode 100644 src/apis/v1/mod.rs rename src/apis/{ => v1}/responses.rs (100%) create mode 100644 src/apis/v1/routes.rs rename tests/api/tests/{ => v1}/authentication.rs (100%) rename tests/api/tests/{ => v1}/context/auth_key.rs (100%) rename tests/api/tests/{ => v1}/context/mod.rs (100%) rename tests/api/tests/{ => v1}/context/stats.rs (97%) rename tests/api/tests/{ => v1}/context/torrent.rs (98%) rename tests/api/tests/{ => v1}/context/whitelist.rs (100%) create mode 100644 tests/api/tests/v1/mod.rs diff --git a/src/apis/mod.rs b/src/apis/mod.rs index fd7fdb6e5..1bc257916 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,8 +1,6 @@ -pub mod context; -pub mod middlewares; -pub mod responses; pub mod routes; pub mod server; +pub mod v1; use serde::Deserialize; diff --git a/src/apis/routes.rs b/src/apis/routes.rs index c567e50da..9e33ca77e 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -2,18 +2,15 @@ use std::sync::Arc; use axum::{middleware, Router}; -use super::context::{auth_key, stats, torrent, whitelist}; -use super::middlewares::auth::auth; +use super::v1; +use super::v1::middlewares::auth::auth; use crate::tracker::Tracker; #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { let router = Router::new(); - let router = auth_key::routes::add(router, tracker.clone()); - let router = stats::routes::add(router, tracker.clone()); - let router = whitelist::routes::add(router, tracker.clone()); - let router = torrent::routes::add(router, tracker.clone()); + let router = v1::routes::add(router, tracker.clone()); router.layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } diff --git a/src/apis/context/auth_key/handlers.rs b/src/apis/v1/context/auth_key/handlers.rs similarity index 91% rename from src/apis/context/auth_key/handlers.rs rename to src/apis/v1/context/auth_key/handlers.rs index af78b3f4c..d21f08299 100644 --- a/src/apis/context/auth_key/handlers.rs +++ b/src/apis/v1/context/auth_key/handlers.rs @@ -9,8 +9,8 @@ use serde::Deserialize; use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, }; -use crate::apis::context::auth_key::resources::AuthKey; -use crate::apis::responses::{invalid_auth_key_param_response, ok_response}; +use crate::apis::v1::context::auth_key::resources::AuthKey; +use crate::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; use crate::tracker::auth::Key; use crate::tracker::Tracker; diff --git a/src/apis/context/auth_key/mod.rs b/src/apis/v1/context/auth_key/mod.rs similarity index 100% rename from src/apis/context/auth_key/mod.rs rename to src/apis/v1/context/auth_key/mod.rs diff --git a/src/apis/context/auth_key/resources.rs b/src/apis/v1/context/auth_key/resources.rs similarity index 100% rename from src/apis/context/auth_key/resources.rs rename to src/apis/v1/context/auth_key/resources.rs diff --git a/src/apis/context/auth_key/responses.rs b/src/apis/v1/context/auth_key/responses.rs similarity index 88% rename from src/apis/context/auth_key/responses.rs rename to src/apis/v1/context/auth_key/responses.rs index 8c1bf58dc..9b8fcebe2 100644 --- a/src/apis/context/auth_key/responses.rs +++ b/src/apis/v1/context/auth_key/responses.rs @@ -3,8 +3,8 @@ use std::error::Error; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; -use crate::apis::context::auth_key::resources::AuthKey; -use crate::apis::responses::unhandled_rejection_response; +use crate::apis::v1::context::auth_key::resources::AuthKey; +use crate::apis::v1::responses::unhandled_rejection_response; /// # Panics /// diff --git a/src/apis/context/auth_key/routes.rs b/src/apis/v1/context/auth_key/routes.rs similarity index 100% rename from src/apis/context/auth_key/routes.rs rename to src/apis/v1/context/auth_key/routes.rs diff --git a/src/apis/context/mod.rs b/src/apis/v1/context/mod.rs similarity index 100% rename from src/apis/context/mod.rs rename to src/apis/v1/context/mod.rs diff --git a/src/apis/context/stats/handlers.rs b/src/apis/v1/context/stats/handlers.rs similarity index 100% rename from src/apis/context/stats/handlers.rs rename to src/apis/v1/context/stats/handlers.rs diff --git a/src/apis/context/stats/mod.rs b/src/apis/v1/context/stats/mod.rs similarity index 100% rename from src/apis/context/stats/mod.rs rename to src/apis/v1/context/stats/mod.rs diff --git a/src/apis/context/stats/resources.rs b/src/apis/v1/context/stats/resources.rs similarity index 100% rename from src/apis/context/stats/resources.rs rename to src/apis/v1/context/stats/resources.rs diff --git a/src/apis/context/stats/responses.rs b/src/apis/v1/context/stats/responses.rs similarity index 100% rename from src/apis/context/stats/responses.rs rename to src/apis/v1/context/stats/responses.rs diff --git a/src/apis/context/stats/routes.rs b/src/apis/v1/context/stats/routes.rs similarity index 100% rename from src/apis/context/stats/routes.rs rename to src/apis/v1/context/stats/routes.rs diff --git a/src/apis/context/torrent/handlers.rs b/src/apis/v1/context/torrent/handlers.rs similarity index 96% rename from src/apis/context/torrent/handlers.rs rename to src/apis/v1/context/torrent/handlers.rs index 1a8280e75..fc816cdbf 100644 --- a/src/apis/context/torrent/handlers.rs +++ b/src/apis/v1/context/torrent/handlers.rs @@ -8,7 +8,7 @@ use serde::{de, Deserialize, Deserializer}; use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::apis::responses::invalid_info_hash_param_response; +use crate::apis::v1::responses::invalid_info_hash_param_response; use crate::apis::InfoHashParam; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; diff --git a/src/apis/context/torrent/mod.rs b/src/apis/v1/context/torrent/mod.rs similarity index 100% rename from src/apis/context/torrent/mod.rs rename to src/apis/v1/context/torrent/mod.rs diff --git a/src/apis/context/torrent/resources/mod.rs b/src/apis/v1/context/torrent/resources/mod.rs similarity index 100% rename from src/apis/context/torrent/resources/mod.rs rename to src/apis/v1/context/torrent/resources/mod.rs diff --git a/src/apis/context/torrent/resources/peer.rs b/src/apis/v1/context/torrent/resources/peer.rs similarity index 100% rename from src/apis/context/torrent/resources/peer.rs rename to src/apis/v1/context/torrent/resources/peer.rs diff --git a/src/apis/context/torrent/resources/torrent.rs b/src/apis/v1/context/torrent/resources/torrent.rs similarity index 96% rename from src/apis/context/torrent/resources/torrent.rs rename to src/apis/v1/context/torrent/resources/torrent.rs index 1099dc923..48f4c58f0 100644 --- a/src/apis/context/torrent/resources/torrent.rs +++ b/src/apis/v1/context/torrent/resources/torrent.rs @@ -75,8 +75,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use super::Torrent; - use crate::apis::context::torrent::resources::peer::Peer; - use crate::apis::context::torrent::resources::torrent::ListItem; + use crate::apis::v1::context::torrent::resources::peer::Peer; + use crate::apis::v1::context::torrent::resources::torrent::ListItem; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/apis/context/torrent/responses.rs b/src/apis/v1/context/torrent/responses.rs similarity index 100% rename from src/apis/context/torrent/responses.rs rename to src/apis/v1/context/torrent/responses.rs diff --git a/src/apis/context/torrent/routes.rs b/src/apis/v1/context/torrent/routes.rs similarity index 100% rename from src/apis/context/torrent/routes.rs rename to src/apis/v1/context/torrent/routes.rs diff --git a/src/apis/context/whitelist/handlers.rs b/src/apis/v1/context/whitelist/handlers.rs similarity index 95% rename from src/apis/context/whitelist/handlers.rs rename to src/apis/v1/context/whitelist/handlers.rs index c1e90a509..325f20e26 100644 --- a/src/apis/context/whitelist/handlers.rs +++ b/src/apis/v1/context/whitelist/handlers.rs @@ -7,7 +7,7 @@ use axum::response::Response; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::apis::responses::{invalid_info_hash_param_response, ok_response}; +use crate::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::apis::InfoHashParam; use crate::protocol::info_hash::InfoHash; use crate::tracker::Tracker; diff --git a/src/apis/context/whitelist/mod.rs b/src/apis/v1/context/whitelist/mod.rs similarity index 100% rename from src/apis/context/whitelist/mod.rs rename to src/apis/v1/context/whitelist/mod.rs diff --git a/src/apis/context/whitelist/responses.rs b/src/apis/v1/context/whitelist/responses.rs similarity index 90% rename from src/apis/context/whitelist/responses.rs rename to src/apis/v1/context/whitelist/responses.rs index dd2727898..197d4c90b 100644 --- a/src/apis/context/whitelist/responses.rs +++ b/src/apis/v1/context/whitelist/responses.rs @@ -2,7 +2,7 @@ use std::error::Error; use axum::response::Response; -use crate::apis::responses::unhandled_rejection_response; +use crate::apis::v1::responses::unhandled_rejection_response; #[must_use] pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { diff --git a/src/apis/context/whitelist/routes.rs b/src/apis/v1/context/whitelist/routes.rs similarity index 100% rename from src/apis/context/whitelist/routes.rs rename to src/apis/v1/context/whitelist/routes.rs diff --git a/src/apis/middlewares/auth.rs b/src/apis/v1/middlewares/auth.rs similarity index 96% rename from src/apis/middlewares/auth.rs rename to src/apis/v1/middlewares/auth.rs index f2745d42e..e729072b6 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/v1/middlewares/auth.rs @@ -7,7 +7,7 @@ use axum::response::{IntoResponse, Response}; use serde::Deserialize; use torrust_tracker_configuration::{Configuration, HttpApi}; -use crate::apis::responses::unhandled_rejection_response; +use crate::apis::v1::responses::unhandled_rejection_response; #[derive(Deserialize, Debug)] pub struct QueryParams { diff --git a/src/apis/middlewares/mod.rs b/src/apis/v1/middlewares/mod.rs similarity index 100% rename from src/apis/middlewares/mod.rs rename to src/apis/v1/middlewares/mod.rs diff --git a/src/apis/v1/mod.rs b/src/apis/v1/mod.rs new file mode 100644 index 000000000..e87984b8e --- /dev/null +++ b/src/apis/v1/mod.rs @@ -0,0 +1,4 @@ +pub mod context; +pub mod middlewares; +pub mod responses; +pub mod routes; diff --git a/src/apis/responses.rs b/src/apis/v1/responses.rs similarity index 100% rename from src/apis/responses.rs rename to src/apis/v1/responses.rs diff --git a/src/apis/v1/routes.rs b/src/apis/v1/routes.rs new file mode 100644 index 000000000..9bac47937 --- /dev/null +++ b/src/apis/v1/routes.rs @@ -0,0 +1,13 @@ +use std::sync::Arc; + +use axum::Router; + +use super::context::{auth_key, stats, torrent, whitelist}; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + let router = auth_key::routes::add(router, tracker.clone()); + let router = stats::routes::add(router, tracker.clone()); + let router = whitelist::routes::add(router, tracker.clone()); + torrent::routes::add(router, tracker) +} diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index c7567e6fe..d37bcdbb4 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::apis::context::auth_key::resources::AuthKey; -use torrust_tracker::apis::context::stats::resources::Stats; -use torrust_tracker::apis::context::torrent::resources::torrent::{ListItem, Torrent}; +use torrust_tracker::apis::v1::context::auth_key::resources::AuthKey; +use torrust_tracker::apis::v1::context::stats::resources::Stats; +use torrust_tracker::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/api/tests/mod.rs b/tests/api/tests/mod.rs index 38b4a2b37..c795e0032 100644 --- a/tests/api/tests/mod.rs +++ b/tests/api/tests/mod.rs @@ -1,4 +1,3 @@ -pub mod authentication; pub mod configuration; -pub mod context; pub mod fixtures; +pub mod v1; diff --git a/tests/api/tests/authentication.rs b/tests/api/tests/v1/authentication.rs similarity index 100% rename from tests/api/tests/authentication.rs rename to tests/api/tests/v1/authentication.rs diff --git a/tests/api/tests/context/auth_key.rs b/tests/api/tests/v1/context/auth_key.rs similarity index 100% rename from tests/api/tests/context/auth_key.rs rename to tests/api/tests/v1/context/auth_key.rs diff --git a/tests/api/tests/context/mod.rs b/tests/api/tests/v1/context/mod.rs similarity index 100% rename from tests/api/tests/context/mod.rs rename to tests/api/tests/v1/context/mod.rs diff --git a/tests/api/tests/context/stats.rs b/tests/api/tests/v1/context/stats.rs similarity index 97% rename from tests/api/tests/context/stats.rs rename to tests/api/tests/v1/context/stats.rs index 99ae405b7..2d9423deb 100644 --- a/tests/api/tests/context/stats.rs +++ b/tests/api/tests/v1/context/stats.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use torrust_tracker::apis::context::stats::resources::Stats; +use torrust_tracker::apis::v1::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; diff --git a/tests/api/tests/context/torrent.rs b/tests/api/tests/v1/context/torrent.rs similarity index 98% rename from tests/api/tests/context/torrent.rs rename to tests/api/tests/v1/context/torrent.rs index 998c2afaf..cbe216d6c 100644 --- a/tests/api/tests/context/torrent.rs +++ b/tests/api/tests/v1/context/torrent.rs @@ -1,7 +1,7 @@ use std::str::FromStr; -use torrust_tracker::apis::context::torrent::resources::peer::Peer; -use torrust_tracker::apis::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker::apis::v1::context::torrent::resources::peer::Peer; +use torrust_tracker::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; diff --git a/tests/api/tests/context/whitelist.rs b/tests/api/tests/v1/context/whitelist.rs similarity index 100% rename from tests/api/tests/context/whitelist.rs rename to tests/api/tests/v1/context/whitelist.rs diff --git a/tests/api/tests/v1/mod.rs b/tests/api/tests/v1/mod.rs new file mode 100644 index 000000000..6a8d9709d --- /dev/null +++ b/tests/api/tests/v1/mod.rs @@ -0,0 +1,2 @@ +pub mod authentication; +pub mod context; From d9a4266a8344db4078d189ecf069be49d125171e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Mar 2023 11:58:09 +0000 Subject: [PATCH 0473/1003] feat(api): [#238] API also served with version prefix /api/v1/ For the time being we accept both versions `/api/` and `/api/v1`: http://0.0.0.0:1212/api/stats?token=MyAccessToken http://0.0.0.0:1212/api/v1/stats?token=MyAccessToken --- src/apis/routes.rs | 4 +++- src/apis/v1/context/auth_key/routes.rs | 10 +++++----- src/apis/v1/context/stats/routes.rs | 4 ++-- src/apis/v1/context/torrent/routes.rs | 6 +++--- src/apis/v1/context/whitelist/routes.rs | 10 ++++++---- src/apis/v1/routes.rs | 20 ++++++++++++++----- tests/api/mod.rs | 4 +--- tests/api/tests/v1/mod.rs | 2 -- tests/api/{ => v1}/asserts.rs | 0 tests/api/{ => v1}/client.rs | 4 ++-- tests/api/v1/mod.rs | 3 +++ .../{tests/v1 => v1/tests}/authentication.rs | 4 ++-- tests/api/{ => v1}/tests/configuration.rs | 0 .../v1 => v1/tests}/context/auth_key.rs | 10 +++++----- .../api/{tests/v1 => v1/tests}/context/mod.rs | 0 .../{tests/v1 => v1/tests}/context/stats.rs | 4 ++-- .../{tests/v1 => v1/tests}/context/torrent.rs | 10 +++++----- .../v1 => v1/tests}/context/whitelist.rs | 12 +++++------ tests/api/{ => v1}/tests/fixtures.rs | 0 tests/api/{ => v1}/tests/mod.rs | 3 ++- 20 files changed, 62 insertions(+), 48 deletions(-) delete mode 100644 tests/api/tests/v1/mod.rs rename tests/api/{ => v1}/asserts.rs (100%) rename tests/api/{ => v1}/client.rs (97%) create mode 100644 tests/api/v1/mod.rs rename tests/api/{tests/v1 => v1/tests}/authentication.rs (95%) rename tests/api/{ => v1}/tests/configuration.rs (100%) rename tests/api/{tests/v1 => v1/tests}/context/auth_key.rs (99%) rename tests/api/{tests/v1 => v1/tests}/context/mod.rs (100%) rename tests/api/{tests/v1 => v1/tests}/context/stats.rs (94%) rename tests/api/{tests/v1 => v1/tests}/context/torrent.rs (97%) rename tests/api/{tests/v1 => v1/tests}/context/whitelist.rs (97%) rename tests/api/{ => v1}/tests/fixtures.rs (100%) rename tests/api/{ => v1}/tests/mod.rs (50%) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 9e33ca77e..2545d6b88 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -10,7 +10,9 @@ use crate::tracker::Tracker; pub fn router(tracker: Arc) -> Router { let router = Router::new(); - let router = v1::routes::add(router, tracker.clone()); + let prefix = "/api"; + + let router = v1::routes::add(prefix, router, tracker.clone()); router.layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } diff --git a/src/apis/v1/context/auth_key/routes.rs b/src/apis/v1/context/auth_key/routes.rs index 2a4f5b9dd..9b155c2a5 100644 --- a/src/apis/v1/context/auth_key/routes.rs +++ b/src/apis/v1/context/auth_key/routes.rs @@ -6,20 +6,20 @@ use axum::Router; use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Keys router .route( // code-review: Axum does not allow two routes with the same path but different path variable name. // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: - // POST /api/key - // DELETE /api/key/:key - "/api/key/:seconds_valid_or_key", + // POST /key + // DELETE /key/:key + &format!("{prefix}/key/:seconds_valid_or_key"), post(generate_auth_key_handler) .with_state(tracker.clone()) .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) // Keys command - .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker)) + .route(&format!("{prefix}/keys/reload"), get(reload_keys_handler).with_state(tracker)) } diff --git a/src/apis/v1/context/stats/routes.rs b/src/apis/v1/context/stats/routes.rs index 8791ed25a..07f88aa70 100644 --- a/src/apis/v1/context/stats/routes.rs +++ b/src/apis/v1/context/stats/routes.rs @@ -6,6 +6,6 @@ use axum::Router; use super::handlers::get_stats_handler; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { - router.route("/api/stats", get(get_stats_handler).with_state(tracker)) +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + router.route(&format!("{prefix}/stats"), get(get_stats_handler).with_state(tracker)) } diff --git a/src/apis/v1/context/torrent/routes.rs b/src/apis/v1/context/torrent/routes.rs index 234f17223..00faa9665 100644 --- a/src/apis/v1/context/torrent/routes.rs +++ b/src/apis/v1/context/torrent/routes.rs @@ -6,12 +6,12 @@ use axum::Router; use super::handlers::{get_torrent_handler, get_torrents_handler}; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Torrents router .route( - "/api/torrent/:info_hash", + &format!("{prefix}/torrent/:info_hash"), get(get_torrent_handler).with_state(tracker.clone()), ) - .route("/api/torrents", get(get_torrents_handler).with_state(tracker)) + .route(&format!("{prefix}/torrents"), get(get_torrents_handler).with_state(tracker)) } diff --git a/src/apis/v1/context/whitelist/routes.rs b/src/apis/v1/context/whitelist/routes.rs index 1349f8bc1..06011b462 100644 --- a/src/apis/v1/context/whitelist/routes.rs +++ b/src/apis/v1/context/whitelist/routes.rs @@ -6,17 +6,19 @@ use axum::Router; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + let prefix = format!("{prefix}/whitelist"); + router // Whitelisted torrents .route( - "/api/whitelist/:info_hash", + &format!("{prefix}/:info_hash"), post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) .route( - "/api/whitelist/:info_hash", + &format!("{prefix}/:info_hash"), delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist commands - .route("/api/whitelist/reload", get(reload_whitelist_handler).with_state(tracker)) + .route(&format!("{prefix}/reload"), get(reload_whitelist_handler).with_state(tracker)) } diff --git a/src/apis/v1/routes.rs b/src/apis/v1/routes.rs index 9bac47937..d45319c4b 100644 --- a/src/apis/v1/routes.rs +++ b/src/apis/v1/routes.rs @@ -5,9 +5,19 @@ use axum::Router; use super::context::{auth_key, stats, torrent, whitelist}; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { - let router = auth_key::routes::add(router, tracker.clone()); - let router = stats::routes::add(router, tracker.clone()); - let router = whitelist::routes::add(router, tracker.clone()); - torrent::routes::add(router, tracker) +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + // Without `v1` prefix. + // We keep the old API endpoints without `v1` prefix for backward compatibility. + // todo: remove when the torrust index backend is using the `v1` prefix. + let router = auth_key::routes::add(prefix, router, tracker.clone()); + let router = stats::routes::add(prefix, router, tracker.clone()); + let router = whitelist::routes::add(prefix, router, tracker.clone()); + let router = torrent::routes::add(prefix, router, tracker.clone()); + + // With `v1` prefix + let v1_prefix = format!("{prefix}/v1"); + let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); + let router = stats::routes::add(&v1_prefix, router, tracker.clone()); + let router = whitelist::routes::add(&v1_prefix, router, tracker.clone()); + torrent::routes::add(&v1_prefix, router, tracker) } diff --git a/tests/api/mod.rs b/tests/api/mod.rs index f59210b22..7022da9b4 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -2,11 +2,9 @@ use std::sync::Arc; use torrust_tracker::tracker::Tracker; -pub mod asserts; -pub mod client; pub mod connection_info; pub mod test_environment; -pub mod tests; +pub mod v1; /// It forces a database error by dropping all tables. /// That makes any query fail. diff --git a/tests/api/tests/v1/mod.rs b/tests/api/tests/v1/mod.rs deleted file mode 100644 index 6a8d9709d..000000000 --- a/tests/api/tests/v1/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod authentication; -pub mod context; diff --git a/tests/api/asserts.rs b/tests/api/v1/asserts.rs similarity index 100% rename from tests/api/asserts.rs rename to tests/api/v1/asserts.rs diff --git a/tests/api/client.rs b/tests/api/v1/client.rs similarity index 97% rename from tests/api/client.rs rename to tests/api/v1/client.rs index f99805570..d5cdf69f6 100644 --- a/tests/api/client.rs +++ b/tests/api/v1/client.rs @@ -1,6 +1,6 @@ use reqwest::Response; -use super::connection_info::ConnectionInfo; +use crate::api::connection_info::ConnectionInfo; use crate::common::http::{Query, QueryParam, ReqwestQuery}; /// API Client @@ -13,7 +13,7 @@ impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info, - base_path: "/api/".to_string(), + base_path: "/api/v1/".to_string(), } } diff --git a/tests/api/v1/mod.rs b/tests/api/v1/mod.rs new file mode 100644 index 000000000..b31e5fe49 --- /dev/null +++ b/tests/api/v1/mod.rs @@ -0,0 +1,3 @@ +pub mod asserts; +pub mod client; +pub mod tests; diff --git a/tests/api/tests/v1/authentication.rs b/tests/api/v1/tests/authentication.rs similarity index 95% rename from tests/api/tests/v1/authentication.rs rename to tests/api/v1/tests/authentication.rs index 5183c8909..5be96e078 100644 --- a/tests/api/tests/v1/authentication.rs +++ b/tests/api/v1/tests/authentication.rs @@ -1,8 +1,8 @@ use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; -use crate::api::client::Client; use crate::api::test_environment::running_test_environment; +use crate::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; +use crate::api::v1::client::Client; use crate::common::http::{Query, QueryParam}; #[tokio::test] diff --git a/tests/api/tests/configuration.rs b/tests/api/v1/tests/configuration.rs similarity index 100% rename from tests/api/tests/configuration.rs rename to tests/api/v1/tests/configuration.rs diff --git a/tests/api/tests/v1/context/auth_key.rs b/tests/api/v1/tests/context/auth_key.rs similarity index 99% rename from tests/api/tests/v1/context/auth_key.rs rename to tests/api/v1/tests/context/auth_key.rs index ee7121615..814afeacf 100644 --- a/tests/api/tests/v1/context/auth_key.rs +++ b/tests/api/v1/tests/context/auth_key.rs @@ -3,14 +3,14 @@ use std::time::Duration; use torrust_tracker::tracker::auth::Key; use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{ - assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, -}; -use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::force_database_error; use crate::api::test_environment::running_test_environment; +use crate::api::v1::asserts::{ + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::api::v1::client::Client; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { diff --git a/tests/api/tests/v1/context/mod.rs b/tests/api/v1/tests/context/mod.rs similarity index 100% rename from tests/api/tests/v1/context/mod.rs rename to tests/api/v1/tests/context/mod.rs diff --git a/tests/api/tests/v1/context/stats.rs b/tests/api/v1/tests/context/stats.rs similarity index 94% rename from tests/api/tests/v1/context/stats.rs rename to tests/api/v1/tests/context/stats.rs index 2d9423deb..6f850a62c 100644 --- a/tests/api/tests/v1/context/stats.rs +++ b/tests/api/v1/tests/context/stats.rs @@ -4,10 +4,10 @@ use torrust_tracker::apis::v1::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; -use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::test_environment::running_test_environment; +use crate::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; +use crate::api::v1::client::Client; use crate::common::fixtures::PeerBuilder; #[tokio::test] diff --git a/tests/api/tests/v1/context/torrent.rs b/tests/api/v1/tests/context/torrent.rs similarity index 97% rename from tests/api/tests/v1/context/torrent.rs rename to tests/api/v1/tests/context/torrent.rs index cbe216d6c..8c7031f0e 100644 --- a/tests/api/tests/v1/context/torrent.rs +++ b/tests/api/v1/tests/context/torrent.rs @@ -5,14 +5,14 @@ use torrust_tracker::apis::v1::context::torrent::resources::torrent::{self, Torr use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{ +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::test_environment::running_test_environment; +use crate::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; -use crate::api::client::Client; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::test_environment::running_test_environment; -use crate::api::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; +use crate::api::v1::client::Client; +use crate::api::v1::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::common::fixtures::PeerBuilder; use crate::common::http::{Query, QueryParam}; diff --git a/tests/api/tests/v1/context/whitelist.rs b/tests/api/v1/tests/context/whitelist.rs similarity index 97% rename from tests/api/tests/v1/context/whitelist.rs rename to tests/api/v1/tests/context/whitelist.rs index 29ea573c0..05cc8c4a7 100644 --- a/tests/api/tests/v1/context/whitelist.rs +++ b/tests/api/v1/tests/context/whitelist.rs @@ -3,15 +3,15 @@ use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{ - assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, - assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, -}; -use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::force_database_error; use crate::api::test_environment::running_test_environment; -use crate::api::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; +use crate::api::v1::asserts::{ + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, + assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::api::v1::client::Client; +use crate::api::v1::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { diff --git a/tests/api/tests/fixtures.rs b/tests/api/v1/tests/fixtures.rs similarity index 100% rename from tests/api/tests/fixtures.rs rename to tests/api/v1/tests/fixtures.rs diff --git a/tests/api/tests/mod.rs b/tests/api/v1/tests/mod.rs similarity index 50% rename from tests/api/tests/mod.rs rename to tests/api/v1/tests/mod.rs index c795e0032..38b4a2b37 100644 --- a/tests/api/tests/mod.rs +++ b/tests/api/v1/tests/mod.rs @@ -1,3 +1,4 @@ +pub mod authentication; pub mod configuration; +pub mod context; pub mod fixtures; -pub mod v1; From 7f4479ac9ad3a0b08df32718cdabb4f118aff5ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Mar 2023 12:34:03 +0000 Subject: [PATCH 0474/1003] refactor(tests): one binary --- tests/http_tracker.rs | 1443 ----------------- tests/integration.rs | 7 + tests/{ => servers}/api/connection_info.rs | 0 tests/{ => servers}/api/mod.rs | 0 tests/{ => servers}/api/test_environment.rs | 0 tests/{ => servers}/api/v1/asserts.rs | 0 tests/{ => servers}/api/v1/client.rs | 2 +- .../api/v1/contract}/authentication.rs | 6 +- .../api/v1/contract}/configuration.rs | 2 +- .../api/v1/contract}/context/auth_key.rs | 10 +- .../api/v1/contract}/context/mod.rs | 0 .../api/v1/contract}/context/stats.rs | 8 +- .../api/v1/contract}/context/torrent.rs | 16 +- .../api/v1/contract}/context/whitelist.rs | 14 +- .../api/v1/contract}/fixtures.rs | 0 .../tests => servers/api/v1/contract}/mod.rs | 0 tests/{ => servers}/api/v1/mod.rs | 2 +- tests/{ => servers}/http/asserts.rs | 2 +- tests/{ => servers}/http/client.rs | 0 tests/{ => servers}/http/connection_info.rs | 0 tests/{ => servers}/http/mod.rs | 1 + tests/{ => servers}/http/requests/announce.rs | 2 +- tests/{ => servers}/http/requests/mod.rs | 0 tests/{ => servers}/http/requests/scrape.rs | 2 +- .../{ => servers}/http/responses/announce.rs | 0 tests/{ => servers}/http/responses/error.rs | 0 tests/{ => servers}/http/responses/mod.rs | 0 tests/{ => servers}/http/responses/scrape.rs | 2 +- tests/{ => servers}/http/test_environment.rs | 0 tests/servers/http/v1/contract.rs | 1425 ++++++++++++++++ tests/servers/http/v1/mod.rs | 1 + tests/servers/mod.rs | 5 + tests/{ => servers}/udp/asserts.rs | 0 tests/{ => servers}/udp/client.rs | 2 +- tests/servers/udp/contract.rs | 160 ++ tests/{ => servers}/udp/mod.rs | 1 + tests/{ => servers}/udp/test_environment.rs | 0 tests/tracker_api.rs | 7 - tests/udp_tracker.rs | 173 -- 39 files changed, 1637 insertions(+), 1656 deletions(-) delete mode 100644 tests/http_tracker.rs create mode 100644 tests/integration.rs rename tests/{ => servers}/api/connection_info.rs (100%) rename tests/{ => servers}/api/mod.rs (100%) rename tests/{ => servers}/api/test_environment.rs (100%) rename tests/{ => servers}/api/v1/asserts.rs (100%) rename tests/{ => servers}/api/v1/client.rs (98%) rename tests/{api/v1/tests => servers/api/v1/contract}/authentication.rs (92%) rename tests/{api/v1/tests => servers/api/v1/contract}/configuration.rs (86%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/auth_key.rs (96%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/mod.rs (100%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/stats.rs (86%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/torrent.rs (95%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/whitelist.rs (95%) rename tests/{api/v1/tests => servers/api/v1/contract}/fixtures.rs (100%) rename tests/{api/v1/tests => servers/api/v1/contract}/mod.rs (100%) rename tests/{ => servers}/api/v1/mod.rs (64%) rename tests/{ => servers}/http/asserts.rs (99%) rename tests/{ => servers}/http/client.rs (100%) rename tests/{ => servers}/http/connection_info.rs (100%) rename tests/{ => servers}/http/mod.rs (98%) rename tests/{ => servers}/http/requests/announce.rs (99%) rename tests/{ => servers}/http/requests/mod.rs (100%) rename tests/{ => servers}/http/requests/scrape.rs (97%) rename tests/{ => servers}/http/responses/announce.rs (100%) rename tests/{ => servers}/http/responses/error.rs (100%) rename tests/{ => servers}/http/responses/mod.rs (100%) rename tests/{ => servers}/http/responses/scrape.rs (99%) rename tests/{ => servers}/http/test_environment.rs (100%) create mode 100644 tests/servers/http/v1/contract.rs create mode 100644 tests/servers/http/v1/mod.rs create mode 100644 tests/servers/mod.rs rename tests/{ => servers}/udp/asserts.rs (100%) rename tests/{ => servers}/udp/client.rs (98%) create mode 100644 tests/servers/udp/contract.rs rename tests/{ => servers}/udp/mod.rs (91%) rename tests/{ => servers}/udp/test_environment.rs (100%) delete mode 100644 tests/tracker_api.rs delete mode 100644 tests/udp_tracker.rs diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs deleted file mode 100644 index 730da93d5..000000000 --- a/tests/http_tracker.rs +++ /dev/null @@ -1,1443 +0,0 @@ -/// Integration tests for HTTP tracker server -/// -/// ```text -/// cargo test `http_tracker_server` -- --nocapture -/// ``` -mod common; -mod http; - -pub type V1 = torrust_tracker::http::v1::launcher::Launcher; - -mod http_tracker { - - mod v1 { - - use torrust_tracker_test_helpers::configuration; - - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn test_environment_should_be_started_and_stopped() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - test_env.stop().await; - } - - mod for_all_config_modes { - - mod and_running_on_reverse_proxy { - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { - // If the tracker is running behind a reverse proxy, the peer IP is the - // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let params = QueryBuilder::default().query().params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let params = QueryBuilder::default().query().params(); - - let response = Client::new(*test_env.bind_address()) - .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") - .await; - - assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_announce_request { - - // Announce request documentation: - // - // BEP 03. The BitTorrent Protocol Specification - // https://www.bittorrent.org/beps/bep_0003.html - // - // BEP 23. Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Announce - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::str::FromStr; - - use local_ip_address::local_ip; - use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_announce_response, assert_bad_announce_request_error_response, - assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, - assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, - assert_missing_query_params_for_announce_request_error_response, - }; - use crate::http::client::Client; - use crate::http::requests::announce::{Compact, QueryBuilder}; - use crate::http::responses; - use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.remove_optional_params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let response = Client::new(*test_env.bind_address()).get("announce").await; - - assert_missing_query_params_for_announce_request_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let invalid_query_param = "a=b=c"; - - let response = Client::new(*test_env.bind_address()) - .get(&format!("announce?{invalid_query_param}")) - .await; - - assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - // Without `info_hash` param - - let mut params = QueryBuilder::default().query().params(); - - params.info_hash = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "missing param info_hash").await; - - // Without `peer_id` param - - let mut params = QueryBuilder::default().query().params(); - - params.peer_id = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "missing param peer_id").await; - - // Without `port` param - - let mut params = QueryBuilder::default().query().params(); - - params.port = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "missing param port").await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - for invalid_value in &invalid_info_hashes() { - params.set("info_hash", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_cannot_parse_query_params_error_response(response, "").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_when_the_peer_address_param_is_invalid() { - // AnnounceQuery does not even contain the `peer_addr` - // The peer IP is obtained in two ways: - // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. - // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("downloaded", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("uploaded", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "-qB0000000000000000", // 19 bytes - "-qB000000000000000000", // 21 bytes - ]; - - for invalid_value in invalid_values { - params.set("peer_id", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("port", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("left", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "Started", // It should be lowercase to be valid: `started` - "Stopped", // It should be lowercase to be valid: `stopped` - "Completed", // It should be lowercase to be valid: `completed` - ]; - - for invalid_value in invalid_values { - params.set("event", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("compact", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) - .query(), - ) - .await; - - assert_announce_response( - response, - &Announce { - complete: 1, // the peer for this test - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .query(), - ) - .await; - - // It should only contain the previously announced peer - assert_announce_response( - response, - &Announce { - complete: 2, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(previously_announced_peer)], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Announce a peer using IPV4 - let peer_using_ipv4 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) - .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; - - // Announce a peer using IPV6 - let peer_using_ipv6 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - 8080, - )) - .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; - - // Announce the new Peer. - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000003")) - .query(), - ) - .await; - - // The newly announced peer is not included on the response peer list, - // but all the previously announced peers should be included regardless the IP version they are using. - assert_announce_response( - response, - &Announce { - complete: 3, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = PeerBuilder::default().build(); - - // Add a peer - test_env.add_torrent_peer(&info_hash, &peer).await; - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer.peer_id) - .query(); - - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - - let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; - - assert_empty_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_compact_response() { - // Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2 accepting compact responses - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_compact(Compact::Accepted) - .query(), - ) - .await; - - let expected_response = responses::announce::Compact { - complete: 2, - incomplete: 0, - interval: 120, - min_interval: 120, - peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), - }; - - assert_compact_announce_response(response, &expected_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_return_the_compact_response_by_default() { - // code-review: the HTTP tracker does not return the compact response by default if the "compact" - // param is not provided in the announce URL. The BEP 23 suggest to do so. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2 without passing the "compact" param - // By default it should respond with the compact peer list - // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .without_compact() - .query(), - ) - .await; - - assert!(!is_a_compact_announce_response(response).await); - - test_env.stop().await; - } - - async fn is_a_compact_announce_response(response: Response) -> bool { - let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); - compact_announce.is_ok() - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 0); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() - { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 0); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client_ip = local_ip().unwrap(); - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), client_ip); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. - - client <-> tracker <-> Internet - 127.0.0.1 external_ip = "2.137.87.41" - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. - - client <-> tracker <-> Internet - ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( - ) { - /* - client <-> http proxy <-> tracker <-> Internet - ip: header: config: peer addr: - 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let client = Client::new(*test_env.bind_address()); - - let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - - client - .announce_with_header( - &announce_query, - "X-Forwarded-For", - "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", - ) - .await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - - // Scrape documentation: - // - // BEP 48. Tracker Protocol Extension: Scrape - // https://www.bittorrent.org/beps/bep_0048.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Scrape - - use std::net::IpAddr; - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_cannot_parse_query_params_error_response, - assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, - }; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::requests::scrape::QueryBuilder; - use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - //#[tokio::test] - #[allow(dead_code)] - async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(*test_env.bind_address()).get("scrape").await; - - assert_missing_query_params_for_scrape_request_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let mut params = QueryBuilder::default().query().params(); - - for invalid_value in &invalid_info_hashes() { - params.set_one_info_hash_param(invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_cannot_parse_query_params_error_response(response, "").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() - { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 1, - downloaded: 0, - incomplete: 0, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .add_info_hash(&info_hash1) - .add_info_hash(&info_hash2) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file(info_hash1.bytes(), File::zeroed()) - .add_file(info_hash2.bytes(), File::zeroed()) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_scrapes_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_scrapes_handled, 1); - - drop(stats); - - test_env.stop().await; - } - } - } - - mod configured_as_whitelisted { - - mod and_receiving_an_announce_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_torrent_not_in_whitelist_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - } - } - - mod configured_as_private { - - mod and_receiving_an_announce_request { - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .announce(&QueryBuilder::default().query()) - .await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_authentication_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let invalid_key = "INVALID_KEY"; - - let response = Client::new(*test_env.bind_address()) - .get(&format!( - "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" - )) - .await; - - assert_authentication_error_response(response).await; - } - - #[tokio::test] - async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - // The tracker does not have this key - let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), unregistered_key) - .announce(&QueryBuilder::default().query()) - .await; - - assert_authentication_error_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let invalid_key = "INVALID_KEY"; - - let response = Client::new(*test_env.bind_address()) - .get(&format!( - "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" - )) - .await; - - assert_authentication_error_response(response).await; - } - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { - // There is not authentication error - // code-review: should this really be this way? - - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), false_key) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - } - } - - mod configured_as_private_and_whitelisted { - - mod and_receiving_an_announce_request {} - - mod receiving_an_scrape_request {} - } - } -} diff --git a/tests/integration.rs b/tests/integration.rs new file mode 100644 index 000000000..5d66d9074 --- /dev/null +++ b/tests/integration.rs @@ -0,0 +1,7 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` +mod common; +mod servers; diff --git a/tests/api/connection_info.rs b/tests/servers/api/connection_info.rs similarity index 100% rename from tests/api/connection_info.rs rename to tests/servers/api/connection_info.rs diff --git a/tests/api/mod.rs b/tests/servers/api/mod.rs similarity index 100% rename from tests/api/mod.rs rename to tests/servers/api/mod.rs diff --git a/tests/api/test_environment.rs b/tests/servers/api/test_environment.rs similarity index 100% rename from tests/api/test_environment.rs rename to tests/servers/api/test_environment.rs diff --git a/tests/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs similarity index 100% rename from tests/api/v1/asserts.rs rename to tests/servers/api/v1/asserts.rs diff --git a/tests/api/v1/client.rs b/tests/servers/api/v1/client.rs similarity index 98% rename from tests/api/v1/client.rs rename to tests/servers/api/v1/client.rs index d5cdf69f6..2b6db2e77 100644 --- a/tests/api/v1/client.rs +++ b/tests/servers/api/v1/client.rs @@ -1,7 +1,7 @@ use reqwest::Response; -use crate::api::connection_info::ConnectionInfo; use crate::common::http::{Query, QueryParam, ReqwestQuery}; +use crate::servers::api::connection_info::ConnectionInfo; /// API Client pub struct Client { diff --git a/tests/api/v1/tests/authentication.rs b/tests/servers/api/v1/contract/authentication.rs similarity index 92% rename from tests/api/v1/tests/authentication.rs rename to tests/servers/api/v1/contract/authentication.rs index 5be96e078..fb8de1810 100644 --- a/tests/api/v1/tests/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -1,9 +1,9 @@ use torrust_tracker_test_helpers::configuration; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; -use crate::api::v1::client::Client; use crate::common::http::{Query, QueryParam}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; +use crate::servers::api::v1::client::Client; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { diff --git a/tests/api/v1/tests/configuration.rs b/tests/servers/api/v1/contract/configuration.rs similarity index 86% rename from tests/api/v1/tests/configuration.rs rename to tests/servers/api/v1/contract/configuration.rs index f81201191..e4b608607 100644 --- a/tests/api/v1/tests/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -1,6 +1,6 @@ use torrust_tracker_test_helpers::configuration; -use crate::api::test_environment::stopped_test_environment; +use crate::servers::api::test_environment::stopped_test_environment; #[tokio::test] #[should_panic] diff --git a/tests/api/v1/tests/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs similarity index 96% rename from tests/api/v1/tests/context/auth_key.rs rename to tests/servers/api/v1/contract/context/auth_key.rs index 814afeacf..a99272e84 100644 --- a/tests/api/v1/tests/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -3,14 +3,14 @@ use std::time::Duration; use torrust_tracker::tracker::auth::Key; use torrust_tracker_test_helpers::configuration; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::force_database_error; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{ +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::force_database_error; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, }; -use crate::api::v1::client::Client; +use crate::servers::api::v1::client::Client; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { diff --git a/tests/api/v1/tests/context/mod.rs b/tests/servers/api/v1/contract/context/mod.rs similarity index 100% rename from tests/api/v1/tests/context/mod.rs rename to tests/servers/api/v1/contract/context/mod.rs diff --git a/tests/api/v1/tests/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs similarity index 86% rename from tests/api/v1/tests/context/stats.rs rename to tests/servers/api/v1/contract/context/stats.rs index 6f850a62c..3929a4270 100644 --- a/tests/api/v1/tests/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -4,11 +4,11 @@ use torrust_tracker::apis::v1::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; -use crate::api::v1::client::Client; use crate::common::fixtures::PeerBuilder; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; +use crate::servers::api::v1::client::Client; #[tokio::test] async fn should_allow_getting_tracker_statistics() { diff --git a/tests/api/v1/tests/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs similarity index 95% rename from tests/api/v1/tests/context/torrent.rs rename to tests/servers/api/v1/contract/context/torrent.rs index 8c7031f0e..702a8bcd4 100644 --- a/tests/api/v1/tests/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -5,16 +5,18 @@ use torrust_tracker::apis::v1::context::torrent::resources::torrent::{self, Torr use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{ +use crate::common::fixtures::PeerBuilder; +use crate::common::http::{Query, QueryParam}; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; -use crate::api::v1::client::Client; -use crate::api::v1::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; -use crate::common::fixtures::PeerBuilder; -use crate::common::http::{Query, QueryParam}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::contract::fixtures::{ + invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, +}; #[tokio::test] async fn should_allow_getting_torrents() { diff --git a/tests/api/v1/tests/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs similarity index 95% rename from tests/api/v1/tests/context/whitelist.rs rename to tests/servers/api/v1/contract/context/whitelist.rs index 05cc8c4a7..67992642f 100644 --- a/tests/api/v1/tests/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -3,15 +3,17 @@ use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::force_database_error; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{ +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::force_database_error; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, }; -use crate::api::v1::client::Client; -use crate::api::v1::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::contract::fixtures::{ + invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, +}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { diff --git a/tests/api/v1/tests/fixtures.rs b/tests/servers/api/v1/contract/fixtures.rs similarity index 100% rename from tests/api/v1/tests/fixtures.rs rename to tests/servers/api/v1/contract/fixtures.rs diff --git a/tests/api/v1/tests/mod.rs b/tests/servers/api/v1/contract/mod.rs similarity index 100% rename from tests/api/v1/tests/mod.rs rename to tests/servers/api/v1/contract/mod.rs diff --git a/tests/api/v1/mod.rs b/tests/servers/api/v1/mod.rs similarity index 64% rename from tests/api/v1/mod.rs rename to tests/servers/api/v1/mod.rs index b31e5fe49..37298b377 100644 --- a/tests/api/v1/mod.rs +++ b/tests/servers/api/v1/mod.rs @@ -1,3 +1,3 @@ pub mod asserts; pub mod client; -pub mod tests; +pub mod contract; diff --git a/tests/http/asserts.rs b/tests/servers/http/asserts.rs similarity index 99% rename from tests/http/asserts.rs rename to tests/servers/http/asserts.rs index 932b48be4..3a2e67bf0 100644 --- a/tests/http/asserts.rs +++ b/tests/servers/http/asserts.rs @@ -4,7 +4,7 @@ use reqwest::Response; use super::responses::announce::{Announce, Compact, DeserializedCompact}; use super::responses::scrape; -use crate::http::responses::error::Error; +use crate::servers::http::responses::error::Error; pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { let error_failure_reason = serde_bencode::from_str::(response_text) diff --git a/tests/http/client.rs b/tests/servers/http/client.rs similarity index 100% rename from tests/http/client.rs rename to tests/servers/http/client.rs diff --git a/tests/http/connection_info.rs b/tests/servers/http/connection_info.rs similarity index 100% rename from tests/http/connection_info.rs rename to tests/servers/http/connection_info.rs diff --git a/tests/http/mod.rs b/tests/servers/http/mod.rs similarity index 98% rename from tests/http/mod.rs rename to tests/servers/http/mod.rs index b0d896c99..cb2885df0 100644 --- a/tests/http/mod.rs +++ b/tests/servers/http/mod.rs @@ -3,6 +3,7 @@ pub mod client; pub mod requests; pub mod responses; pub mod test_environment; +pub mod v1; use percent_encoding::NON_ALPHANUMERIC; diff --git a/tests/http/requests/announce.rs b/tests/servers/http/requests/announce.rs similarity index 99% rename from tests/http/requests/announce.rs rename to tests/servers/http/requests/announce.rs index 87aa3425f..414c118ef 100644 --- a/tests/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -6,7 +6,7 @@ use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; -use crate::http::{percent_encode_byte_array, ByteArray20}; +use crate::servers::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: ByteArray20, diff --git a/tests/http/requests/mod.rs b/tests/servers/http/requests/mod.rs similarity index 100% rename from tests/http/requests/mod.rs rename to tests/servers/http/requests/mod.rs diff --git a/tests/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs similarity index 97% rename from tests/http/requests/scrape.rs rename to tests/servers/http/requests/scrape.rs index 979dad540..d7f7cd581 100644 --- a/tests/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; -use crate::http::{percent_encode_byte_array, ByteArray20}; +use crate::servers::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: Vec, diff --git a/tests/http/responses/announce.rs b/tests/servers/http/responses/announce.rs similarity index 100% rename from tests/http/responses/announce.rs rename to tests/servers/http/responses/announce.rs diff --git a/tests/http/responses/error.rs b/tests/servers/http/responses/error.rs similarity index 100% rename from tests/http/responses/error.rs rename to tests/servers/http/responses/error.rs diff --git a/tests/http/responses/mod.rs b/tests/servers/http/responses/mod.rs similarity index 100% rename from tests/http/responses/mod.rs rename to tests/servers/http/responses/mod.rs diff --git a/tests/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs similarity index 99% rename from tests/http/responses/scrape.rs rename to tests/servers/http/responses/scrape.rs index 1aea517cf..221ff0a38 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/servers/http/responses/scrape.rs @@ -4,7 +4,7 @@ use std::str; use serde::{self, Deserialize, Serialize}; use serde_bencode::value::Value; -use crate::http::{ByteArray20, InfoHash}; +use crate::servers::http::{ByteArray20, InfoHash}; #[derive(Debug, PartialEq, Default)] pub struct Response { diff --git a/tests/http/test_environment.rs b/tests/servers/http/test_environment.rs similarity index 100% rename from tests/http/test_environment.rs rename to tests/servers/http/test_environment.rs diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs new file mode 100644 index 000000000..eda42f1ee --- /dev/null +++ b/tests/servers/http/v1/contract.rs @@ -0,0 +1,1425 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::servers::http::test_environment::running_test_environment; + +pub type V1 = torrust_tracker::http::v1::launcher::Launcher; + +#[tokio::test] +async fn test_environment_should_be_started_and_stopped() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + test_env.stop().await; +} + +mod for_all_config_modes { + + mod and_running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. + + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(*test_env.bind_address()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; + + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_announce_request { + + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::servers::http::asserts::{ + assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, + assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, + assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + }; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::{Compact, QueryBuilder}; + use crate::servers::http::responses; + use crate::servers::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let response = Client::new(*test_env.bind_address()).get("announce").await; + + assert_missing_query_params_for_announce_request_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_url_query_parameters_are_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let invalid_query_param = "a=b=c"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!("announce?{invalid_query_param}")) + .await; + + assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_a_mandatory_field_is_missing() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + // Without `info_hash` param + + let mut params = QueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param info_hash").await; + + // Without `peer_id` param + + let mut params = QueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param peer_id").await; + + // Without `port` param + + let mut params = QueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param port").await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_cannot_parse_query_params_error_response(response, "").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. + + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_port_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_left_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_event_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase to be valid: `started` + "Stopped", // It should be lowercase to be valid: `stopped` + "Completed", // It should be lowercase to be valid: `completed` + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_compact_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .query(), + ) + .await; + + // It should only contain the previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Announce a peer using IPV4 + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + + // Announce a peer using IPV6 + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + )) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + + // Announce the new Peer. + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000003")) + .query(), + ) + .await; + + // The newly announced peer is not included on the response peer list, + // but all the previously announced peers should be included regardless the IP version they are using. + assert_announce_response( + response, + &Announce { + complete: 3, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); + + // Add a peer + test_env.add_torrent_peer(&info_hash, &peer).await; + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; + + assert_empty_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + + test_env.stop().await; + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let client = Client::new(*test_env.bind_address()); + + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + + use std::net::IpAddr; + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::servers::http::asserts::{ + assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, + assert_scrape_response, + }; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::requests::scrape::QueryBuilder; + use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; + + assert_missing_query_params_for_scrape_request_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_cannot_parse_query_params_error_response(response, "").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_accept_multiple_infohashes() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; + } + } +} + +mod configured_as_whitelisted { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::PeerBuilder; + use crate::servers::http::asserts::assert_scrape_response; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::responses::scrape::{File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + } +} + +mod configured_as_private { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_respond_to_authenticated_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), key.id()) + .announce(&QueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_authentication_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let invalid_key = "INVALID_KEY"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!( + "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + // The tracker does not have this key + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) + .announce(&QueryBuilder::default().query()) + .await; + + assert_authentication_error_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::PeerBuilder; + use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::responses::scrape::{File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let invalid_key = "INVALID_KEY"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!( + "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + // code-review: should this really be this way? + + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), false_key) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + } +} + +mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} +} diff --git a/tests/servers/http/v1/mod.rs b/tests/servers/http/v1/mod.rs new file mode 100644 index 000000000..2943dbb50 --- /dev/null +++ b/tests/servers/http/v1/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs new file mode 100644 index 000000000..c19f72020 --- /dev/null +++ b/tests/servers/mod.rs @@ -0,0 +1,5 @@ +extern crate rand; + +mod api; +mod http; +mod udp; diff --git a/tests/udp/asserts.rs b/tests/servers/udp/asserts.rs similarity index 100% rename from tests/udp/asserts.rs rename to tests/servers/udp/asserts.rs diff --git a/tests/udp/client.rs b/tests/servers/udp/client.rs similarity index 98% rename from tests/udp/client.rs rename to tests/servers/udp/client.rs index 0bec03d03..a13845b97 100644 --- a/tests/udp/client.rs +++ b/tests/servers/udp/client.rs @@ -5,7 +5,7 @@ use aquatic_udp_protocol::{Request, Response}; use tokio::net::UdpSocket; use torrust_tracker::udp::MAX_PACKET_SIZE; -use crate::udp::source_address; +use crate::servers::udp::source_address; #[allow(clippy::module_name_repetitions)] pub struct UdpClient { diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs new file mode 100644 index 000000000..311cf5e49 --- /dev/null +++ b/tests/servers/udp/contract.rs @@ -0,0 +1,160 @@ +// UDP tracker documentation: +// +// BEP 15. UDP Tracker Protocol for BitTorrent +// https://www.bittorrent.org/beps/bep_0015.html + +use core::panic; + +use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; +use torrust_tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::udp::asserts::is_error_response; +use crate::servers::udp::client::{new_udp_client_connected, UdpTrackerClient}; +use crate::servers::udp::test_environment::running_test_environment; + +fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] +} + +fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] +} + +async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + } +} + +#[tokio::test] +async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; + + client.send(&empty_udp_request()).await; + + let mut buffer = empty_buffer(); + client.receive(&mut buffer).await; + let response = Response::from_bytes(&buffer, true).unwrap(); + + assert!(is_error_response(&response, "bad request")); +} + +mod receiving_a_connection_request { + use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_connect_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_a_connect_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + assert!(is_connect_response(&response, TransactionId(123))); + } +} + +mod receiving_an_announce_request { + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, + TransactionId, + }; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_ipv4_announce_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::contract::send_connection_request; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_an_announce_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send announce request + + let announce_request = AnnounceRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hash: InfoHash([0u8; 20]), + peer_id: PeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client.udp_client.socket.local_addr().unwrap().port()), + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + assert!(is_ipv4_announce_response(&response)); + } +} + +mod receiving_an_scrape_request { + use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_scrape_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::contract::send_connection_request; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_a_scrape_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send scrape request + + // Full scrapes are not allowed you need to pass an array of info hashes otherwise + // it will return "bad request" error with empty vector + let info_hashes = vec![InfoHash([0u8; 20])]; + + let scrape_request = ScrapeRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hashes, + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + assert!(is_scrape_response(&response)); + } +} diff --git a/tests/udp/mod.rs b/tests/servers/udp/mod.rs similarity index 91% rename from tests/udp/mod.rs rename to tests/servers/udp/mod.rs index f45a4a4f9..d39c37153 100644 --- a/tests/udp/mod.rs +++ b/tests/servers/udp/mod.rs @@ -1,5 +1,6 @@ pub mod asserts; pub mod client; +pub mod contract; pub mod test_environment; /// Generates the source address for the UDP client diff --git a/tests/udp/test_environment.rs b/tests/servers/udp/test_environment.rs similarity index 100% rename from tests/udp/test_environment.rs rename to tests/servers/udp/test_environment.rs diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs deleted file mode 100644 index 3219bc987..000000000 --- a/tests/tracker_api.rs +++ /dev/null @@ -1,7 +0,0 @@ -/// Integration tests for the tracker API -/// -/// ```text -/// cargo test --test tracker_api -/// ``` -mod api; -mod common; diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs deleted file mode 100644 index 3fe78c03d..000000000 --- a/tests/udp_tracker.rs +++ /dev/null @@ -1,173 +0,0 @@ -/// Integration tests for UDP tracker server -/// -/// ```text -/// cargo test `udp_tracker_server` -- --nocapture -/// ``` -extern crate rand; - -mod common; -mod udp; - -mod udp_tracker_server { - - // UDP tracker documentation: - // - // BEP 15. UDP Tracker Protocol for BitTorrent - // https://www.bittorrent.org/beps/bep_0015.html - - use core::panic; - - use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; - use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker_test_helpers::configuration; - - use crate::udp::asserts::is_error_response; - use crate::udp::client::{new_udp_client_connected, UdpTrackerClient}; - use crate::udp::test_environment::running_test_environment; - - fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] - } - - fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] - } - - async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { - let connect_request = ConnectRequest { transaction_id }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), - } - } - - #[tokio::test] - async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; - - client.send(&empty_udp_request()).await; - - let mut buffer = empty_buffer(); - client.receive(&mut buffer).await; - let response = Response::from_bytes(&buffer, true).unwrap(); - - assert!(is_error_response(&response, "bad request")); - } - - mod receiving_a_connection_request { - use aquatic_udp_protocol::{ConnectRequest, TransactionId}; - use torrust_tracker_test_helpers::configuration; - - use crate::udp::asserts::is_connect_response; - use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::test_environment::running_test_environment; - - #[tokio::test] - async fn should_return_a_connect_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - assert!(is_connect_response(&response, TransactionId(123))); - } - } - - mod receiving_an_announce_request { - use std::net::Ipv4Addr; - - use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, - TransactionId, - }; - use torrust_tracker_test_helpers::configuration; - - use crate::udp::asserts::is_ipv4_announce_response; - use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::test_environment::running_test_environment; - use crate::udp_tracker_server::send_connection_request; - - #[tokio::test] - async fn should_return_an_announce_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; - - // Send announce request - - let announce_request = AnnounceRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hash: InfoHash([0u8; 20]), - peer_id: PeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client.udp_client.socket.local_addr().unwrap().port()), - }; - - client.send(announce_request.into()).await; - - let response = client.receive().await; - - assert!(is_ipv4_announce_response(&response)); - } - } - - mod receiving_an_scrape_request { - use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; - use torrust_tracker_test_helpers::configuration; - - use crate::udp::asserts::is_scrape_response; - use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::test_environment::running_test_environment; - use crate::udp_tracker_server::send_connection_request; - - #[tokio::test] - async fn should_return_a_scrape_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; - - // Send scrape request - - // Full scrapes are not allowed you need to pass an array of info hashes otherwise - // it will return "bad request" error with empty vector - let info_hashes = vec![InfoHash([0u8; 20])]; - - let scrape_request = ScrapeRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hashes, - }; - - client.send(scrape_request.into()).await; - - let response = client.receive().await; - - assert!(is_scrape_response(&response)); - } - } -} From fd50bb000451864b36400f34e6625d7feaab6053 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 13 Mar 2023 19:11:56 +0000 Subject: [PATCH 0475/1003] refactor(tracker): use domain struts in DB trait instead of primitive types. --- src/apis/v1/context/auth_key/handlers.rs | 2 +- src/databases/mod.rs | 16 +++++----------- src/databases/mysql.rs | 16 +++++++++------- src/databases/sqlite.rs | 14 +++++++------- src/protocol/info_hash.rs | 12 ++++++++++++ src/tracker/mod.rs | 11 +++++------ 6 files changed, 39 insertions(+), 32 deletions(-) diff --git a/src/apis/v1/context/auth_key/handlers.rs b/src/apis/v1/context/auth_key/handlers.rs index d21f08299..cb1cd1113 100644 --- a/src/apis/v1/context/auth_key/handlers.rs +++ b/src/apis/v1/context/auth_key/handlers.rs @@ -31,7 +31,7 @@ pub async fn delete_auth_key_handler( ) -> Response { match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key) => match tracker.remove_auth_key(&key.to_string()).await { + Ok(key) => match tracker.remove_auth_key(&key).await { Ok(_) => ok_response(), Err(e) => failed_to_delete_key_response(e), }, diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 247f571d7..0af6f5723 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use self::error::Error; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth; +use crate::tracker::auth::{self, Key}; pub(self) struct Builder where @@ -63,25 +63,19 @@ pub trait Database: Sync + Send { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; - // todo: replace type `&str` with `&InfoHash` - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error>; + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error>; async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - // todo: replace type `&str` with `&Key` - async fn get_key_from_keys(&self, key: &str) -> Result, Error>; + async fn get_key_from_keys(&self, key: &Key) -> Result, Error>; async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; - // todo: replace type `&str` with `&Key` - async fn remove_key_from_keys(&self, key: &str) -> Result; + async fn remove_key_from_keys(&self, key: &Key) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - Ok(self - .get_info_hash_from_whitelist(&info_hash.clone().to_string()) - .await? - .is_some()) + Ok(self.get_info_hash_from_whitelist(info_hash).await?.is_some()) } } diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index f0c7ec1dd..f6918974f 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -147,12 +147,12 @@ impl Database for Mysql { Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error> { + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let select = conn.exec_first::( "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", - params! { info_hash }, + params! { "info_hash" => info_hash.to_hex_string() }, )?; let info_hash = select.map(|f| InfoHash::from_str(&f).expect("Failed to decode InfoHash String from DB!")); @@ -183,11 +183,13 @@ impl Database for Mysql { Ok(1) } - async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let query = - conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }); + let query = conn.exec_first::<(String, i64), _, _>( + "SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", + params! { "key" => key.to_string() }, + ); let key = query?; @@ -211,10 +213,10 @@ impl Database for Mysql { Ok(1) } - async fn remove_key_from_keys(&self, key: &str) -> Result { + async fn remove_key_from_keys(&self, key: &Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key })?; + conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { "key" => key.to_string() })?; Ok(1) } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 4bf2931de..adb201def 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -156,12 +156,12 @@ impl Database for Sqlite { } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error> { + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query([info_hash])?; + let mut rows = stmt.query([info_hash.to_hex_string()])?; let query = rows.next()?; @@ -200,7 +200,7 @@ impl Database for Sqlite { } } - async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -211,9 +211,9 @@ impl Database for Sqlite { Ok(key.map(|f| { let expiry: i64 = f.get(1).unwrap(); - let id: String = f.get(0).unwrap(); + let key: String = f.get(0).unwrap(); auth::ExpiringKey { - key: id.parse::().unwrap(), + key: key.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), } })) @@ -237,10 +237,10 @@ impl Database for Sqlite { } } - async fn remove_key_from_keys(&self, key: &str) -> Result { + async fn remove_key_from_keys(&self, key: &Key) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key])?; + let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key.to_string()])?; if deleted == 1 { // should only remove a single record. diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 320636725..fd7602cdd 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -24,6 +24,11 @@ impl InfoHash { pub fn bytes(&self) -> [u8; 20] { self.0 } + + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } } impl std::fmt::Display for InfoHash { @@ -197,6 +202,13 @@ mod tests { assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); } + #[test] + fn an_info_hash_should_return_its_a_40_utf8_lowercased_char_hex_representations_as_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + assert_eq!(info_hash.to_hex_string(), "ffffffffffffffffffffffffffffffffffffffff"); + } + #[test] fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { let info_hash: InfoHash = [255u8; 20].as_slice().into(); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 326afbf00..8a9739793 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -202,10 +202,9 @@ impl Tracker { /// # Panics /// /// Will panic if key cannot be converted into a valid `Key`. - pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { - // todo: change argument `key: &str` to `key: &Key` + pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key).await?; - self.keys.write().await.remove(&key.parse::().unwrap()); + self.keys.write().await.remove(key); Ok(()) } @@ -1175,12 +1174,12 @@ mod tests { async fn it_should_remove_an_authentication_key() { let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - let result = tracker.remove_auth_key(&key.id().to_string()).await; + let result = tracker.remove_auth_key(&expiring_key.id()).await; assert!(result.is_ok()); - assert!(tracker.verify_auth_key(&key.id()).await.is_err()); + assert!(tracker.verify_auth_key(&expiring_key.id()).await.is_err()); } #[tokio::test] From 084b2acfb378c5df795644ea20a7adf30dd0c2d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 Mar 2023 13:28:27 +0000 Subject: [PATCH 0476/1003] feat(api): [#120] use datetime ISO 8601 in auth key enpoint instead of timestamp. --- src/apis/v1/context/auth_key/resources.rs | 53 ++++++---- src/protocol/clock/mod.rs | 70 +++++++++++++ src/tracker/auth.rs | 117 ++++++++++++++-------- src/tracker/mod.rs | 18 ++-- tests/servers/http/v1/contract.rs | 8 +- 5 files changed, 190 insertions(+), 76 deletions(-) diff --git a/src/apis/v1/context/auth_key/resources.rs b/src/apis/v1/context/auth_key/resources.rs index 72ef32a95..cf43a6f3d 100644 --- a/src/apis/v1/context/auth_key/resources.rs +++ b/src/apis/v1/context/auth_key/resources.rs @@ -2,25 +2,21 @@ use std::convert::From; use serde::{Deserialize, Serialize}; -use crate::protocol::clock::DurationSinceUnixEpoch; +use crate::protocol::clock::convert_from_iso_8601_to_timestamp; use crate::tracker::auth::{self, Key}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { - pub key: String, // todo: rename to `id` (API breaking change!) - pub valid_until: Option, // todo: `auth::ExpiringKey` has now always a value (API breaking change!) + pub key: String, + pub valid_until: u64, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. + pub expiry_time: String, } impl From for auth::ExpiringKey { fn from(auth_key_resource: AuthKey) -> Self { - let valid_until = match auth_key_resource.valid_until { - Some(valid_until) => DurationSinceUnixEpoch::from_secs(valid_until), - None => DurationSinceUnixEpoch::from_secs(0), - }; - auth::ExpiringKey { key: auth_key_resource.key.parse::().unwrap(), - valid_until, + valid_until: convert_from_iso_8601_to_timestamp(&auth_key_resource.expiry_time), } } } @@ -29,7 +25,8 @@ impl From for AuthKey { fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { key: auth_key.key.to_string(), - valid_until: Some(auth_key.valid_until.as_secs()), + valid_until: auth_key.valid_until.as_secs(), + expiry_time: auth_key.expiry_time().to_string(), } } } @@ -42,38 +39,53 @@ mod tests { use crate::protocol::clock::{Current, TimeNow}; use crate::tracker::auth::{self, Key}; + struct TestTime { + pub timestamp: u64, + pub iso_8601_v1: String, + pub iso_8601_v2: String, + } + + fn one_hour_after_unix_epoch() -> TestTime { + let timestamp = 60_u64; + let iso_8601_v1 = "1970-01-01T00:01:00.000Z".to_string(); + let iso_8601_v2 = "1970-01-01 00:01:00 UTC".to_string(); + TestTime { + timestamp, + iso_8601_v1, + iso_8601_v2, + } + } + #[test] fn it_should_be_convertible_into_an_auth_key() { - let duration_in_secs = 60; - let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(duration_in_secs), + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v1, }; assert_eq!( auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap() + valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() } ); } #[test] fn it_should_be_convertible_from_an_auth_key() { - let duration_in_secs = 60; - let auth_key = auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap(), + valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), }; assert_eq!( AuthKey::from(auth_key), AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(duration_in_secs) + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v2, } ); } @@ -83,10 +95,11 @@ mod tests { assert_eq!( serde_json::to_string(&AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(60) + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v1, }) .unwrap(), - "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60}" // cspell:disable-line + "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60,\"expiry_time\":\"1970-01-01T00:01:00.000Z\"}" // cspell:disable-line ); } } diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index 7868d4c5e..73df37b58 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -1,6 +1,9 @@ use std::num::IntErrorKind; +use std::str::FromStr; use std::time::Duration; +use chrono::{DateTime, NaiveDateTime, Utc}; + pub type DurationSinceUnixEpoch = Duration; #[derive(Debug)] @@ -36,6 +39,40 @@ pub trait TimeNow: Time { } } +/// # Panics +/// +/// Will panic if the input time cannot be converted to `DateTime::`. +/// +#[must_use] +pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { + convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) +} + +/// # Panics +/// +/// Will panic if the input time overflows the u64 type. +/// +#[must_use] +pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) +} + +/// # Panics +/// +/// Will panic if the input time overflows the i64 type. +/// +#[must_use] +pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { + DateTime::::from_utc( + NaiveDateTime::from_timestamp_opt( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ) + .unwrap(), + Utc, + ) +} + #[cfg(test)] mod tests { use std::any::TypeId; @@ -54,6 +91,39 @@ mod tests { assert_ne!(TypeId::of::(), TypeId::of::()); assert_ne!(Stopped::now(), Working::now()); } + + mod timestamp { + use chrono::{DateTime, NaiveDateTime, Utc}; + + use crate::protocol::clock::{ + convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, + DurationSinceUnixEpoch, + }; + + #[test] + fn should_be_converted_to_datetime_utc() { + let timestamp = DurationSinceUnixEpoch::ZERO; + assert_eq!( + convert_from_timestamp_to_datetime_utc(timestamp), + DateTime::::from_utc(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc) + ); + } + + #[test] + fn should_be_converted_from_datetime_utc() { + let datetime = DateTime::::from_utc(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc); + assert_eq!( + convert_from_datetime_utc_to_timestamp(&datetime), + DurationSinceUnixEpoch::ZERO + ); + } + + #[test] + fn should_be_converted_from_datetime_utc_in_iso_8601() { + let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); + assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); + } + } } mod working_clock { diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index e3c12a828..75bc543a8 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -3,7 +3,6 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use chrono::{DateTime, NaiveDateTime, Utc}; use derive_more::Display; use log::debug; use rand::distributions::Alphanumeric; @@ -12,7 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_located_error::LocatedError; -use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::protocol::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; #[must_use] @@ -59,27 +58,28 @@ pub struct ExpiringKey { impl std::fmt::Display for ExpiringKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "key: `{}`, valid until `{}`", - self.key, - DateTime::::from_utc( - NaiveDateTime::from_timestamp_opt( - i64::try_from(self.valid_until.as_secs()).expect("Overflow of i64 seconds, very future!"), - self.valid_until.subsec_nanos(), - ) - .unwrap(), - Utc - ) - ) + write!(f, "key: `{}`, valid until `{}`", self.key, self.expiry_time()) } } impl ExpiringKey { #[must_use] - pub fn id(&self) -> Key { + pub fn key(&self) -> Key { self.key.clone() } + + /// It returns the expiry time. For example, for the starting time for Unix Epoch + /// (timestamp 0) it will return a `DateTime` whose string representation is + /// `1970-01-01 00:00:00 UTC`. + /// + /// # Panics + /// + /// Will panic when the key timestamp overflows the ui64 type. + /// + #[must_use] + pub fn expiry_time(&self) -> chrono::DateTime { + convert_from_timestamp_to_datetime_utc(self.valid_until) + } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] @@ -126,44 +126,75 @@ impl From for Error { #[cfg(test)] mod tests { - use std::str::FromStr; - use std::time::Duration; - use crate::protocol::clock::{Current, StoppedTime}; - use crate::tracker::auth; + mod key { + use std::str::FromStr; - #[test] - fn auth_key_from_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::Key::from_str(key_string); + use crate::tracker::auth::Key; - assert!(auth_key.is_ok()); - assert_eq!(auth_key.unwrap().to_string(), key_string); + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let key = Key::from_str(key_string); + + assert!(key.is_ok()); + assert_eq!(key.unwrap().to_string(), key_string); + } } - #[test] - fn generate_valid_auth_key() { - let auth_key = auth::generate(Duration::new(9999, 0)); + mod expiring_auth_key { + use std::str::FromStr; + use std::time::Duration; - assert!(auth::verify(&auth_key).is_ok()); - } + use crate::protocol::clock::{Current, StoppedTime}; + use crate::tracker::auth; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key = auth::Key::from_str(key_string); - #[test] - fn generate_and_check_expired_auth_key() { - // Set the time to the current time. - Current::local_set_to_system_time_now(); + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); + } - // Make key that is valid for 19 seconds. - let auth_key = auth::generate(Duration::from_secs(19)); + #[test] + fn should_be_displayed() { + // Set the time to the current time. + Current::local_set_to_unix_epoch(); - // Mock the time has passed 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + let expiring_key = auth::generate(Duration::from_secs(0)); - assert!(auth::verify(&auth_key).is_ok()); + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line + ); + } - // Mock the time has passed another 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + #[test] + fn should_be_generated_with_a_expiration_time() { + let expiring_key = auth::generate(Duration::new(9999, 0)); - assert!(auth::verify(&auth_key).is_err()); + assert!(auth::verify(&expiring_key).is_ok()); + } + + #[test] + fn should_be_generate_and_verified() { + // Set the time to the current time. + Current::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let expiring_key = auth::generate(Duration::from_secs(19)); + + // Mock the time has passed 10 sec. + Current::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify(&expiring_key).is_ok()); + + // Mock the time has passed another 10 sec. + Current::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify(&expiring_key).is_err()); + } } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 8a9739793..71bb41f90 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1132,9 +1132,9 @@ mod tests { async fn it_should_authenticate_a_peer_by_using_a_key() { let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - let result = tracker.authenticate(&key.id()).await; + let result = tracker.authenticate(&expiring_key.key()).await; assert!(result.is_ok()); } @@ -1156,9 +1156,9 @@ mod tests { // `verify_auth_key` should be a private method. let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); } #[tokio::test] @@ -1176,25 +1176,25 @@ mod tests { let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - let result = tracker.remove_auth_key(&expiring_key.id()).await; + let result = tracker.remove_auth_key(&expiring_key.key()).await; assert!(result.is_ok()); - assert!(tracker.verify_auth_key(&expiring_key.id()).await.is_err()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_err()); } #[tokio::test] async fn it_should_load_authentication_keys_from_the_database() { let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); // Remove the newly generated key in memory - tracker.keys.write().await.remove(&key.id()); + tracker.keys.write().await.remove(&expiring_key.key()); let result = tracker.load_keys_from_database().await; assert!(result.is_ok()); - assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index eda42f1ee..501c0f6fa 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1215,9 +1215,9 @@ mod configured_as_private { async fn should_respond_to_authenticated_peers() { let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) .announce(&QueryBuilder::default().query()) .await; @@ -1353,9 +1353,9 @@ mod configured_as_private { ) .await; - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) From d51aae049260af54e1a5253a5f8dc53a277e2a31 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Mar 2023 15:45:21 +0000 Subject: [PATCH 0477/1003] feat(tracker): [#164] add prefix 0x to peer ID hex string --- src/tracker/peer.rs | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 015af12a3..3012770bb 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -169,8 +169,13 @@ impl Id { pub fn to_hex_string(&self) -> Option { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); - std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) + + match std::str::from_utf8(&tmp) { + Ok(hex) => Some(format!("0x{hex}")), + Err(_) => None, + } } #[must_use] @@ -360,23 +365,23 @@ mod test { #[test] fn should_be_converted_to_hex_string() { let id = peer::Id(*b"-qB00000000000000000"); - assert_eq!(id.to_hex_string().unwrap(), "2d71423030303030303030303030303030303030"); + assert_eq!(id.to_hex_string().unwrap(), "0x2d71423030303030303030303030303030303030"); let id = peer::Id([ 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, ]); - assert_eq!(id.to_hex_string().unwrap(), "009f9296009f9296009f9296009f9296009f9296"); + assert_eq!(id.to_hex_string().unwrap(), "0x009f9296009f9296009f9296009f9296009f9296"); } #[test] fn should_be_converted_into_string_type_using_the_hex_string_format() { let id = peer::Id(*b"-qB00000000000000000"); - assert_eq!(id.to_string(), "2d71423030303030303030303030303030303030"); + assert_eq!(id.to_string(), "0x2d71423030303030303030303030303030303030"); let id = peer::Id([ 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, ]); - assert_eq!(id.to_string(), "009f9296009f9296009f9296009f9296009f9296"); + assert_eq!(id.to_string(), "0x009f9296009f9296009f9296009f9296009f9296"); } #[test] @@ -390,6 +395,7 @@ mod test { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use serde_json::Value; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::{self, Peer}; @@ -406,12 +412,26 @@ mod test { event: AnnounceEvent::Started, }; - let json_serialized_value = serde_json::to_string(&torrent_peer).unwrap(); + let raw_json = serde_json::to_string(&torrent_peer).unwrap(); + + let expected_raw_json = r#" + { + "peer_id": { + "id": "0x2d71423030303030303030303030303030303030", + "client": "qBittorrent" + }, + "peer_addr":"126.0.0.1:8080", + "updated":0, + "uploaded":0, + "downloaded":0, + "left":0, + "event":"Started" + } + "#; assert_eq!( - json_serialized_value, - // todo: compare using pretty json format to improve readability - r#"{"peer_id":{"id":"2d71423030303030303030303030303030303030","client":"qBittorrent"},"peer_addr":"126.0.0.1:8080","updated":0,"uploaded":0,"downloaded":0,"left":0,"event":"Started"}"# + serde_json::from_str::(&raw_json).unwrap(), + serde_json::from_str::(expected_raw_json).unwrap() ); } } From 46f49005f722c98401689cec5160131acba2eaa4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 07:45:14 +0000 Subject: [PATCH 0478/1003] refactor: add servers modules Move apps into a "servers" module. --- src/jobs/http_tracker.rs | 4 +- src/jobs/tracker_apis.rs | 2 +- src/jobs/udp_tracker.rs | 2 +- src/lib.rs | 4 +- src/{ => servers}/apis/mod.rs | 0 src/{ => servers}/apis/routes.rs | 0 src/{ => servers}/apis/server.rs | 2 +- .../apis/v1/context/auth_key/handlers.rs | 4 +- .../apis/v1/context/auth_key/mod.rs | 0 .../apis/v1/context/auth_key/resources.rs | 0 .../apis/v1/context/auth_key/responses.rs | 4 +- .../apis/v1/context/auth_key/routes.rs | 0 src/{ => servers}/apis/v1/context/mod.rs | 0 .../apis/v1/context/stats/handlers.rs | 0 .../apis/v1/context/stats/mod.rs | 0 .../apis/v1/context/stats/resources.rs | 0 .../apis/v1/context/stats/responses.rs | 0 .../apis/v1/context/stats/routes.rs | 0 .../apis/v1/context/torrent/handlers.rs | 4 +- .../apis/v1/context/torrent/mod.rs | 0 .../apis/v1/context/torrent/resources/mod.rs | 0 .../apis/v1/context/torrent/resources/peer.rs | 0 .../v1/context/torrent/resources/torrent.rs | 4 +- .../apis/v1/context/torrent/responses.rs | 0 .../apis/v1/context/torrent/routes.rs | 0 .../apis/v1/context/whitelist/handlers.rs | 4 +- .../apis/v1/context/whitelist/mod.rs | 0 .../apis/v1/context/whitelist/responses.rs | 2 +- .../apis/v1/context/whitelist/routes.rs | 0 src/{ => servers}/apis/v1/middlewares/auth.rs | 2 +- src/{ => servers}/apis/v1/middlewares/mod.rs | 0 src/{ => servers}/apis/v1/mod.rs | 0 src/{ => servers}/apis/v1/responses.rs | 0 src/{ => servers}/apis/v1/routes.rs | 0 src/{ => servers}/http/mod.rs | 0 src/{ => servers}/http/percent_encoding.rs | 2 +- src/{ => servers}/http/server.rs | 0 .../http/v1/extractors/announce_request.rs | 10 +-- .../http/v1/extractors/authentication_key.rs | 6 +- .../http/v1/extractors/client_ip_sources.rs | 2 +- src/{ => servers}/http/v1/extractors/mod.rs | 0 .../http/v1/extractors/scrape_request.rs | 10 +-- .../http/v1/handlers/announce.rs | 42 +++++----- .../http/v1/handlers/common/auth.rs | 2 +- .../http/v1/handlers/common/mod.rs | 0 .../http/v1/handlers/common/peer_ip.rs | 8 +- src/{ => servers}/http/v1/handlers/mod.rs | 0 src/{ => servers}/http/v1/handlers/scrape.rs | 34 ++++---- src/{ => servers}/http/v1/launcher.rs | 2 +- src/{ => servers}/http/v1/mod.rs | 0 src/{ => servers}/http/v1/query.rs | 8 +- .../http/v1/requests/announce.rs | 16 ++-- src/{ => servers}/http/v1/requests/mod.rs | 0 src/{ => servers}/http/v1/requests/scrape.rs | 14 ++-- .../http/v1/responses/announce.rs | 4 +- src/{ => servers}/http/v1/responses/error.rs | 0 src/{ => servers}/http/v1/responses/mod.rs | 0 src/{ => servers}/http/v1/responses/scrape.rs | 2 +- src/{ => servers}/http/v1/routes.rs | 0 .../http/v1/services/announce.rs | 4 +- src/{ => servers}/http/v1/services/mod.rs | 0 .../http/v1/services/peer_ip_resolver.rs | 4 +- src/{ => servers}/http/v1/services/scrape.rs | 12 ++- src/servers/mod.rs | 3 + src/{ => servers}/udp/connection_cookie.rs | 2 +- src/{ => servers}/udp/error.rs | 0 src/{ => servers}/udp/handlers.rs | 82 +++++++++---------- src/{ => servers}/udp/mod.rs | 0 src/{ => servers}/udp/peer_builder.rs | 0 src/{ => servers}/udp/request.rs | 0 src/{ => servers}/udp/server.rs | 4 +- src/setup.rs | 2 +- tests/servers/api/test_environment.rs | 2 +- tests/servers/api/v1/asserts.rs | 6 +- .../servers/api/v1/contract/context/stats.rs | 2 +- .../api/v1/contract/context/torrent.rs | 4 +- tests/servers/http/test_environment.rs | 2 +- tests/servers/http/v1/contract.rs | 2 +- tests/servers/udp/client.rs | 2 +- tests/servers/udp/contract.rs | 2 +- tests/servers/udp/test_environment.rs | 2 +- 81 files changed, 171 insertions(+), 166 deletions(-) rename src/{ => servers}/apis/mod.rs (100%) rename src/{ => servers}/apis/routes.rs (100%) rename src/{ => servers}/apis/server.rs (99%) rename src/{ => servers}/apis/v1/context/auth_key/handlers.rs (90%) rename src/{ => servers}/apis/v1/context/auth_key/mod.rs (100%) rename src/{ => servers}/apis/v1/context/auth_key/resources.rs (100%) rename src/{ => servers}/apis/v1/context/auth_key/responses.rs (86%) rename src/{ => servers}/apis/v1/context/auth_key/routes.rs (100%) rename src/{ => servers}/apis/v1/context/mod.rs (100%) rename src/{ => servers}/apis/v1/context/stats/handlers.rs (100%) rename src/{ => servers}/apis/v1/context/stats/mod.rs (100%) rename src/{ => servers}/apis/v1/context/stats/resources.rs (100%) rename src/{ => servers}/apis/v1/context/stats/responses.rs (100%) rename src/{ => servers}/apis/v1/context/stats/routes.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/handlers.rs (94%) rename src/{ => servers}/apis/v1/context/torrent/mod.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/resources/mod.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/resources/peer.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/resources/torrent.rs (96%) rename src/{ => servers}/apis/v1/context/torrent/responses.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/routes.rs (100%) rename src/{ => servers}/apis/v1/context/whitelist/handlers.rs (92%) rename src/{ => servers}/apis/v1/context/whitelist/mod.rs (100%) rename src/{ => servers}/apis/v1/context/whitelist/responses.rs (89%) rename src/{ => servers}/apis/v1/context/whitelist/routes.rs (100%) rename src/{ => servers}/apis/v1/middlewares/auth.rs (95%) rename src/{ => servers}/apis/v1/middlewares/mod.rs (100%) rename src/{ => servers}/apis/v1/mod.rs (100%) rename src/{ => servers}/apis/v1/responses.rs (100%) rename src/{ => servers}/apis/v1/routes.rs (100%) rename src/{ => servers}/http/mod.rs (100%) rename src/{ => servers}/http/percent_encoding.rs (95%) rename src/{ => servers}/http/server.rs (100%) rename src/{ => servers}/http/v1/extractors/announce_request.rs (91%) rename src/{ => servers}/http/v1/extractors/authentication_key.rs (95%) rename src/{ => servers}/http/v1/extractors/client_ip_sources.rs (93%) rename src/{ => servers}/http/v1/extractors/mod.rs (100%) rename src/{ => servers}/http/v1/extractors/scrape_request.rs (93%) rename src/{ => servers}/http/v1/handlers/announce.rs (86%) rename src/{ => servers}/http/v1/handlers/common/auth.rs (96%) rename src/{ => servers}/http/v1/handlers/common/mod.rs (100%) rename src/{ => servers}/http/v1/handlers/common/peer_ip.rs (77%) rename src/{ => servers}/http/v1/handlers/mod.rs (100%) rename src/{ => servers}/http/v1/handlers/scrape.rs (86%) rename src/{ => servers}/http/v1/launcher.rs (98%) rename src/{ => servers}/http/v1/mod.rs (100%) rename src/{ => servers}/http/v1/query.rs (97%) rename src/{ => servers}/http/v1/requests/announce.rs (97%) rename src/{ => servers}/http/v1/requests/mod.rs (100%) rename src/{ => servers}/http/v1/requests/scrape.rs (89%) rename src/{ => servers}/http/v1/responses/announce.rs (98%) rename src/{ => servers}/http/v1/responses/error.rs (100%) rename src/{ => servers}/http/v1/responses/mod.rs (100%) rename src/{ => servers}/http/v1/responses/scrape.rs (97%) rename src/{ => servers}/http/v1/routes.rs (100%) rename src/{ => servers}/http/v1/services/announce.rs (97%) rename src/{ => servers}/http/v1/services/mod.rs (100%) rename src/{ => servers}/http/v1/services/peer_ip_resolver.rs (95%) rename src/{ => servers}/http/v1/services/scrape.rs (95%) create mode 100644 src/servers/mod.rs rename src/{ => servers}/udp/connection_cookie.rs (99%) rename src/{ => servers}/udp/error.rs (100%) rename src/{ => servers}/udp/handlers.rs (94%) rename src/{ => servers}/udp/mod.rs (100%) rename src/{ => servers}/udp/peer_builder.rs (100%) rename src/{ => servers}/udp/request.rs (100%) rename src/{ => servers}/udp/server.rs (98%) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index e0091958b..43bd0076f 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -6,8 +6,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use crate::http::v1::launcher; -use crate::http::Version; +use crate::servers::http::v1::launcher; +use crate::servers::http::Version; use crate::tracker; #[derive(Debug)] diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs index 939b58638..cdebc21a8 100644 --- a/src/jobs/tracker_apis.rs +++ b/src/jobs/tracker_apis.rs @@ -6,7 +6,7 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpApi; -use crate::apis::server; +use crate::servers::apis::server; use crate::tracker; #[derive(Debug)] diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 57232855b..138222daf 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -4,8 +4,8 @@ use log::{error, info, warn}; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; +use crate::servers::udp::server::Udp; use crate::tracker; -use crate::udp::server::Udp; #[must_use] pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { diff --git a/src/lib.rs b/src/lib.rs index f01ff0468..6c0ae464f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,14 +1,12 @@ -pub mod apis; pub mod databases; -pub mod http; pub mod jobs; pub mod logging; pub mod protocol; +pub mod servers; pub mod setup; pub mod signals; pub mod stats; pub mod tracker; -pub mod udp; #[macro_use] extern crate lazy_static; diff --git a/src/apis/mod.rs b/src/servers/apis/mod.rs similarity index 100% rename from src/apis/mod.rs rename to src/servers/apis/mod.rs diff --git a/src/apis/routes.rs b/src/servers/apis/routes.rs similarity index 100% rename from src/apis/routes.rs rename to src/servers/apis/routes.rs diff --git a/src/apis/server.rs b/src/servers/apis/server.rs similarity index 99% rename from src/apis/server.rs rename to src/servers/apis/server.rs index daac35999..002babbfb 100644 --- a/src/apis/server.rs +++ b/src/servers/apis/server.rs @@ -219,7 +219,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::apis::server::ApiServer; + use crate::servers::apis::server::ApiServer; use crate::tracker; use crate::tracker::statistics; diff --git a/src/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs similarity index 90% rename from src/apis/v1/context/auth_key/handlers.rs rename to src/servers/apis/v1/context/auth_key/handlers.rs index cb1cd1113..d2e633206 100644 --- a/src/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -9,8 +9,8 @@ use serde::Deserialize; use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, }; -use crate::apis::v1::context::auth_key::resources::AuthKey; -use crate::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; +use crate::servers::apis::v1::context::auth_key::resources::AuthKey; +use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; use crate::tracker::auth::Key; use crate::tracker::Tracker; diff --git a/src/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs similarity index 100% rename from src/apis/v1/context/auth_key/mod.rs rename to src/servers/apis/v1/context/auth_key/mod.rs diff --git a/src/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs similarity index 100% rename from src/apis/v1/context/auth_key/resources.rs rename to src/servers/apis/v1/context/auth_key/resources.rs diff --git a/src/apis/v1/context/auth_key/responses.rs b/src/servers/apis/v1/context/auth_key/responses.rs similarity index 86% rename from src/apis/v1/context/auth_key/responses.rs rename to src/servers/apis/v1/context/auth_key/responses.rs index 9b8fcebe2..4e3b0c711 100644 --- a/src/apis/v1/context/auth_key/responses.rs +++ b/src/servers/apis/v1/context/auth_key/responses.rs @@ -3,8 +3,8 @@ use std::error::Error; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; -use crate::apis::v1::context::auth_key::resources::AuthKey; -use crate::apis::v1::responses::unhandled_rejection_response; +use crate::servers::apis::v1::context::auth_key::resources::AuthKey; +use crate::servers::apis::v1::responses::unhandled_rejection_response; /// # Panics /// diff --git a/src/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs similarity index 100% rename from src/apis/v1/context/auth_key/routes.rs rename to src/servers/apis/v1/context/auth_key/routes.rs diff --git a/src/apis/v1/context/mod.rs b/src/servers/apis/v1/context/mod.rs similarity index 100% rename from src/apis/v1/context/mod.rs rename to src/servers/apis/v1/context/mod.rs diff --git a/src/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs similarity index 100% rename from src/apis/v1/context/stats/handlers.rs rename to src/servers/apis/v1/context/stats/handlers.rs diff --git a/src/apis/v1/context/stats/mod.rs b/src/servers/apis/v1/context/stats/mod.rs similarity index 100% rename from src/apis/v1/context/stats/mod.rs rename to src/servers/apis/v1/context/stats/mod.rs diff --git a/src/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs similarity index 100% rename from src/apis/v1/context/stats/resources.rs rename to src/servers/apis/v1/context/stats/resources.rs diff --git a/src/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs similarity index 100% rename from src/apis/v1/context/stats/responses.rs rename to src/servers/apis/v1/context/stats/responses.rs diff --git a/src/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs similarity index 100% rename from src/apis/v1/context/stats/routes.rs rename to src/servers/apis/v1/context/stats/routes.rs diff --git a/src/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs similarity index 94% rename from src/apis/v1/context/torrent/handlers.rs rename to src/servers/apis/v1/context/torrent/handlers.rs index fc816cdbf..45ffbcf22 100644 --- a/src/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -8,9 +8,9 @@ use serde::{de, Deserialize, Deserializer}; use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::apis::v1::responses::invalid_info_hash_param_response; -use crate::apis::InfoHashParam; use crate::protocol::info_hash::InfoHash; +use crate::servers::apis::v1::responses::invalid_info_hash_param_response; +use crate::servers::apis::InfoHashParam; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; diff --git a/src/apis/v1/context/torrent/mod.rs b/src/servers/apis/v1/context/torrent/mod.rs similarity index 100% rename from src/apis/v1/context/torrent/mod.rs rename to src/servers/apis/v1/context/torrent/mod.rs diff --git a/src/apis/v1/context/torrent/resources/mod.rs b/src/servers/apis/v1/context/torrent/resources/mod.rs similarity index 100% rename from src/apis/v1/context/torrent/resources/mod.rs rename to src/servers/apis/v1/context/torrent/resources/mod.rs diff --git a/src/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs similarity index 100% rename from src/apis/v1/context/torrent/resources/peer.rs rename to src/servers/apis/v1/context/torrent/resources/peer.rs diff --git a/src/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs similarity index 96% rename from src/apis/v1/context/torrent/resources/torrent.rs rename to src/servers/apis/v1/context/torrent/resources/torrent.rs index 48f4c58f0..577ac279c 100644 --- a/src/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -75,10 +75,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use super::Torrent; - use crate::apis::v1::context::torrent::resources::peer::Peer; - use crate::apis::v1::context::torrent::resources::torrent::ListItem; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; + use crate::servers::apis::v1::context::torrent::resources::peer::Peer; + use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; use crate::tracker::peer; use crate::tracker::services::torrent::{BasicInfo, Info}; diff --git a/src/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs similarity index 100% rename from src/apis/v1/context/torrent/responses.rs rename to src/servers/apis/v1/context/torrent/responses.rs diff --git a/src/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs similarity index 100% rename from src/apis/v1/context/torrent/routes.rs rename to src/servers/apis/v1/context/torrent/routes.rs diff --git a/src/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs similarity index 92% rename from src/apis/v1/context/whitelist/handlers.rs rename to src/servers/apis/v1/context/whitelist/handlers.rs index 325f20e26..2ca70cba7 100644 --- a/src/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -7,9 +7,9 @@ use axum::response::Response; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; -use crate::apis::InfoHashParam; use crate::protocol::info_hash::InfoHash; +use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; +use crate::servers::apis::InfoHashParam; use crate::tracker::Tracker; pub async fn add_torrent_to_whitelist_handler( diff --git a/src/apis/v1/context/whitelist/mod.rs b/src/servers/apis/v1/context/whitelist/mod.rs similarity index 100% rename from src/apis/v1/context/whitelist/mod.rs rename to src/servers/apis/v1/context/whitelist/mod.rs diff --git a/src/apis/v1/context/whitelist/responses.rs b/src/servers/apis/v1/context/whitelist/responses.rs similarity index 89% rename from src/apis/v1/context/whitelist/responses.rs rename to src/servers/apis/v1/context/whitelist/responses.rs index 197d4c90b..06d4a9448 100644 --- a/src/apis/v1/context/whitelist/responses.rs +++ b/src/servers/apis/v1/context/whitelist/responses.rs @@ -2,7 +2,7 @@ use std::error::Error; use axum::response::Response; -use crate::apis::v1::responses::unhandled_rejection_response; +use crate::servers::apis::v1::responses::unhandled_rejection_response; #[must_use] pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { diff --git a/src/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs similarity index 100% rename from src/apis/v1/context/whitelist/routes.rs rename to src/servers/apis/v1/context/whitelist/routes.rs diff --git a/src/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs similarity index 95% rename from src/apis/v1/middlewares/auth.rs rename to src/servers/apis/v1/middlewares/auth.rs index e729072b6..f0c63250b 100644 --- a/src/apis/v1/middlewares/auth.rs +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -7,7 +7,7 @@ use axum::response::{IntoResponse, Response}; use serde::Deserialize; use torrust_tracker_configuration::{Configuration, HttpApi}; -use crate::apis::v1::responses::unhandled_rejection_response; +use crate::servers::apis::v1::responses::unhandled_rejection_response; #[derive(Deserialize, Debug)] pub struct QueryParams { diff --git a/src/apis/v1/middlewares/mod.rs b/src/servers/apis/v1/middlewares/mod.rs similarity index 100% rename from src/apis/v1/middlewares/mod.rs rename to src/servers/apis/v1/middlewares/mod.rs diff --git a/src/apis/v1/mod.rs b/src/servers/apis/v1/mod.rs similarity index 100% rename from src/apis/v1/mod.rs rename to src/servers/apis/v1/mod.rs diff --git a/src/apis/v1/responses.rs b/src/servers/apis/v1/responses.rs similarity index 100% rename from src/apis/v1/responses.rs rename to src/servers/apis/v1/responses.rs diff --git a/src/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs similarity index 100% rename from src/apis/v1/routes.rs rename to src/servers/apis/v1/routes.rs diff --git a/src/http/mod.rs b/src/servers/http/mod.rs similarity index 100% rename from src/http/mod.rs rename to src/servers/http/mod.rs diff --git a/src/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs similarity index 95% rename from src/http/percent_encoding.rs rename to src/servers/http/percent_encoding.rs index 3774519fb..c824c8df7 100644 --- a/src/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -21,8 +21,8 @@ pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result responses::error::Error { mod tests { use super::parse_key; - use crate::http::v1::responses::error::Error; + use crate::servers::http::v1::responses::error::Error; fn assert_error_response(error: &Error, error_message: &str) { assert!( diff --git a/src/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs similarity index 93% rename from src/http/v1/extractors/client_ip_sources.rs rename to src/servers/http/v1/extractors/client_ip_sources.rs index c8b3659f3..b291eba12 100644 --- a/src/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -8,7 +8,7 @@ use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; -use crate::http::v1::services::peer_ip_resolver::ClientIpSources; +use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; pub struct Extract(pub ClientIpSources); diff --git a/src/http/v1/extractors/mod.rs b/src/servers/http/v1/extractors/mod.rs similarity index 100% rename from src/http/v1/extractors/mod.rs rename to src/servers/http/v1/extractors/mod.rs diff --git a/src/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs similarity index 93% rename from src/http/v1/extractors/scrape_request.rs rename to src/servers/http/v1/extractors/scrape_request.rs index d63470897..65a40bff2 100644 --- a/src/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -5,9 +5,9 @@ use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::v1::query::Query; -use crate::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; -use crate::http::v1::responses; +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; +use crate::servers::http::v1::responses; pub struct ExtractRequest(pub Scrape); @@ -53,9 +53,9 @@ mod tests { use std::str::FromStr; use super::extract_scrape_from; - use crate::http::v1::requests::scrape::Scrape; - use crate::http::v1::responses::error::Error; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::requests::scrape::Scrape; + use crate::servers::http::v1::responses::error::Error; struct TestInfoHash { pub bencoded: String, diff --git a/src/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs similarity index 86% rename from src/http/v1/handlers/announce.rs rename to src/servers/http/v1/handlers/announce.rs index 1f10c3fa4..af8a4115e 100644 --- a/src/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -7,15 +7,15 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::v1::extractors::announce_request::ExtractRequest; -use crate::http::v1::extractors::authentication_key::Extract as ExtractKey; -use crate::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::http::v1::handlers::common::auth; -use crate::http::v1::requests::announce::{Announce, Compact, Event}; -use crate::http::v1::responses::{self, announce}; -use crate::http::v1::services::peer_ip_resolver::ClientIpSources; -use crate::http::v1::services::{self, peer_ip_resolver}; use crate::protocol::clock::{Current, Time}; +use crate::servers::http::v1::extractors::announce_request::ExtractRequest; +use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::servers::http::v1::handlers::common::auth; +use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; +use crate::servers::http::v1::responses::{self, announce}; +use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; +use crate::servers::http::v1::services::{self, peer_ip_resolver}; use crate::tracker::auth::Key; use crate::tracker::peer::Peer; use crate::tracker::{AnnounceData, Tracker}; @@ -141,10 +141,10 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::http::v1::requests::announce::Announce; - use crate::http::v1::responses; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::requests::announce::Announce; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; @@ -197,8 +197,8 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; - use crate::http::v1::handlers::announce::handle_announce; - use crate::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; use crate::tracker::auth; #[tokio::test] @@ -238,8 +238,8 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; - use crate::http::v1::handlers::announce::handle_announce; - use crate::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { @@ -266,9 +266,9 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, tracker_on_reverse_proxy}; - use crate::http::v1::handlers::announce::handle_announce; - use crate::http::v1::handlers::announce::tests::assert_error_response; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -295,9 +295,9 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, tracker_not_on_reverse_proxy}; - use crate::http::v1::handlers::announce::handle_announce; - use crate::http::v1::handlers::announce::tests::assert_error_response; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs similarity index 96% rename from src/http/v1/handlers/common/auth.rs rename to src/servers/http/v1/handlers/common/auth.rs index 938fc3f01..644556e95 100644 --- a/src/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -2,7 +2,7 @@ use std::panic::Location; use thiserror::Error; -use crate::http::v1::responses; +use crate::servers::http::v1::responses; use crate::tracker::auth; #[derive(Debug, Error)] diff --git a/src/http/v1/handlers/common/mod.rs b/src/servers/http/v1/handlers/common/mod.rs similarity index 100% rename from src/http/v1/handlers/common/mod.rs rename to src/servers/http/v1/handlers/common/mod.rs diff --git a/src/http/v1/handlers/common/peer_ip.rs b/src/servers/http/v1/handlers/common/peer_ip.rs similarity index 77% rename from src/http/v1/handlers/common/peer_ip.rs rename to src/servers/http/v1/handlers/common/peer_ip.rs index e182c716b..685324b4a 100644 --- a/src/http/v1/handlers/common/peer_ip.rs +++ b/src/servers/http/v1/handlers/common/peer_ip.rs @@ -1,5 +1,5 @@ -use crate::http::v1::responses; -use crate::http::v1::services::peer_ip_resolver::PeerIpResolutionError; +use crate::servers::http::v1::responses; +use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; impl From for responses::error::Error { fn from(err: PeerIpResolutionError) -> Self { @@ -13,8 +13,8 @@ impl From for responses::error::Error { mod tests { use std::panic::Location; - use crate::http::v1::responses; - use crate::http::v1::services::peer_ip_resolver::PeerIpResolutionError; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; fn assert_error_response(error: &responses::error::Error, error_message: &str) { assert!( diff --git a/src/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs similarity index 100% rename from src/http/v1/handlers/mod.rs rename to src/servers/http/v1/handlers/mod.rs diff --git a/src/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs similarity index 86% rename from src/http/v1/handlers/scrape.rs rename to src/servers/http/v1/handlers/scrape.rs index 50f92cd36..75c5717de 100644 --- a/src/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -4,12 +4,12 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::v1::extractors::authentication_key::Extract as ExtractKey; -use crate::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::http::v1::extractors::scrape_request::ExtractRequest; -use crate::http::v1::requests::scrape::Scrape; -use crate::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; -use crate::http::v1::{responses, services}; +use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; +use crate::servers::http::v1::requests::scrape::Scrape; +use crate::servers::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use crate::servers::http::v1::{responses, services}; use crate::tracker::auth::Key; use crate::tracker::{ScrapeData, Tracker}; @@ -99,10 +99,10 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::http::v1::requests::scrape::Scrape; - use crate::http::v1::responses; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::requests::scrape::Scrape; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::tracker::services::common::tracker_factory; use crate::tracker::Tracker; @@ -147,7 +147,7 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::handle_scrape; use crate::tracker::{auth, ScrapeData}; #[tokio::test] @@ -189,7 +189,7 @@ mod tests { use std::sync::Arc; use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; - use crate::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::handle_scrape; use crate::tracker::ScrapeData; #[tokio::test] @@ -212,9 +212,9 @@ mod tests { use std::sync::Arc; use super::{sample_scrape_request, tracker_on_reverse_proxy}; - use crate::http::v1::handlers::scrape::handle_scrape; - use crate::http::v1::handlers::scrape::tests::assert_error_response; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -240,9 +240,9 @@ mod tests { use std::sync::Arc; use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; - use crate::http::v1::handlers::scrape::handle_scrape; - use crate::http::v1::handlers::scrape::tests::assert_error_response; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs similarity index 98% rename from src/http/v1/launcher.rs rename to src/servers/http/v1/launcher.rs index 45bc54664..4cfa4295d 100644 --- a/src/http/v1/launcher.rs +++ b/src/servers/http/v1/launcher.rs @@ -10,7 +10,7 @@ use futures::future::BoxFuture; use log::info; use super::routes::router; -use crate::http::server::HttpServerLauncher; +use crate::servers::http::server::HttpServerLauncher; use crate::tracker::Tracker; #[derive(Debug)] diff --git a/src/http/v1/mod.rs b/src/servers/http/v1/mod.rs similarity index 100% rename from src/http/v1/mod.rs rename to src/servers/http/v1/mod.rs diff --git a/src/http/v1/query.rs b/src/servers/http/v1/query.rs similarity index 97% rename from src/http/v1/query.rs rename to src/servers/http/v1/query.rs index 45484ea38..c40e7949f 100644 --- a/src/http/v1/query.rs +++ b/src/servers/http/v1/query.rs @@ -174,7 +174,7 @@ impl std::fmt::Display for FieldValuePairSet { mod tests { mod url_query { - use crate::http::v1::query::Query; + use crate::servers::http::v1::query::Query; #[test] fn should_parse_the_query_params_from_an_url_query_string() { @@ -227,7 +227,7 @@ mod tests { } mod should_allow_more_than_one_value_for_the_same_param { - use crate::http::v1::query::Query; + use crate::servers::http::v1::query::Query; #[test] fn instantiated_from_a_vector() { @@ -249,7 +249,7 @@ mod tests { } mod should_be_displayed { - use crate::http::v1::query::Query; + use crate::servers::http::v1::query::Query; #[test] fn with_one_param() { @@ -270,7 +270,7 @@ mod tests { } mod param_name_value_pair { - use crate::http::v1::query::NameValuePair; + use crate::servers::http::v1::query::NameValuePair; #[test] fn should_parse_a_single_query_param() { diff --git a/src/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs similarity index 97% rename from src/http/v1/requests/announce.rs rename to src/servers/http/v1/requests/announce.rs index eeab97d5f..3b1e55cb9 100644 --- a/src/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -5,10 +5,10 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; -use crate::http::v1::query::{ParseQueryError, Query}; -use crate::http::v1::responses; use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::servers::http::v1::query::{ParseQueryError, Query}; +use crate::servers::http::v1::responses; use crate::tracker::peer::{self, IdConversionError}; pub type NumberOfBytes = i64; @@ -280,11 +280,11 @@ mod tests { mod announce_request { - use crate::http::v1::query::Query; - use crate::http::v1::requests::announce::{ + use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; - use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; #[test] @@ -350,8 +350,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::http::v1::query::Query; - use crate::http::v1::requests::announce::{ + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::announce::{ Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; diff --git a/src/http/v1/requests/mod.rs b/src/servers/http/v1/requests/mod.rs similarity index 100% rename from src/http/v1/requests/mod.rs rename to src/servers/http/v1/requests/mod.rs diff --git a/src/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs similarity index 89% rename from src/http/v1/requests/scrape.rs rename to src/servers/http/v1/requests/scrape.rs index 6257f0733..e50895c20 100644 --- a/src/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -3,10 +3,10 @@ use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::http::percent_encoding::percent_decode_info_hash; -use crate::http::v1::query::Query; -use crate::http::v1::responses; use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::servers::http::percent_encoding::percent_decode_info_hash; +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::responses; pub type NumberOfBytes = i64; @@ -85,9 +85,9 @@ mod tests { mod scrape_request { - use crate::http::v1::query::Query; - use crate::http::v1::requests::scrape::{Scrape, INFO_HASH}; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { @@ -107,8 +107,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::http::v1::query::Query; - use crate::http::v1::requests::scrape::{Scrape, INFO_HASH}; + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; #[test] fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { diff --git a/src/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs similarity index 98% rename from src/http/v1/responses/announce.rs rename to src/servers/http/v1/responses/announce.rs index 8b178ff7e..4902e0d62 100644 --- a/src/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -8,7 +8,7 @@ use bip_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut} use serde::{self, Deserialize, Serialize}; use thiserror::Error; -use crate::http::v1::responses; +use crate::servers::http::v1::responses; use crate::tracker::{self, AnnounceData}; /// Normal (non compact) "announce" response @@ -250,7 +250,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use super::{NonCompact, Peer}; - use crate::http::v1::responses::announce::{Compact, CompactPeer}; + use crate::servers::http::v1::responses::announce::{Compact, CompactPeer}; // Some ascii values used in tests: // diff --git a/src/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs similarity index 100% rename from src/http/v1/responses/error.rs rename to src/servers/http/v1/responses/error.rs diff --git a/src/http/v1/responses/mod.rs b/src/servers/http/v1/responses/mod.rs similarity index 100% rename from src/http/v1/responses/mod.rs rename to src/servers/http/v1/responses/mod.rs diff --git a/src/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs similarity index 97% rename from src/http/v1/responses/scrape.rs rename to src/servers/http/v1/responses/scrape.rs index 5cbe6502e..7d9e169c8 100644 --- a/src/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -55,8 +55,8 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { - use crate::http::v1::responses::scrape::Bencoded; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::responses::scrape::Bencoded; use crate::tracker::torrent::SwarmMetadata; use crate::tracker::ScrapeData; diff --git a/src/http/v1/routes.rs b/src/servers/http/v1/routes.rs similarity index 100% rename from src/http/v1/routes.rs rename to src/servers/http/v1/routes.rs diff --git a/src/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs similarity index 97% rename from src/http/v1/services/announce.rs rename to src/servers/http/v1/services/announce.rs index a8b9f0d06..3f8c5a839 100644 --- a/src/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -77,8 +77,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::http::v1::services::announce::invoke; - use crate::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + use crate::servers::http::v1::services::announce::invoke; + use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; use crate::tracker::peer::Peer; use crate::tracker::torrent::SwarmStats; use crate::tracker::{statistics, AnnounceData, Tracker}; diff --git a/src/http/v1/services/mod.rs b/src/servers/http/v1/services/mod.rs similarity index 100% rename from src/http/v1/services/mod.rs rename to src/servers/http/v1/services/mod.rs diff --git a/src/http/v1/services/peer_ip_resolver.rs b/src/servers/http/v1/services/peer_ip_resolver.rs similarity index 95% rename from src/http/v1/services/peer_ip_resolver.rs rename to src/servers/http/v1/services/peer_ip_resolver.rs index c7bc183b4..ac5b8c79f 100644 --- a/src/http/v1/services/peer_ip_resolver.rs +++ b/src/servers/http/v1/services/peer_ip_resolver.rs @@ -73,7 +73,7 @@ mod tests { use std::str::FromStr; use super::invoke; - use crate::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + use crate::servers::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_connection_info() { @@ -112,7 +112,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; - use crate::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + use crate::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { diff --git a/src/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs similarity index 95% rename from src/http/v1/services/scrape.rs rename to src/servers/http/v1/services/scrape.rs index b6f319375..1044634ad 100644 --- a/src/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -77,8 +77,10 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::http::v1::services::scrape::invoke; - use crate::http::v1::services::scrape::tests::{public_tracker, sample_info_hash, sample_info_hashes, sample_peer}; + use crate::servers::http::v1::services::scrape::invoke; + use crate::servers::http::v1::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; use crate::tracker::torrent::SwarmMetadata; use crate::tracker::{statistics, ScrapeData, Tracker}; @@ -167,8 +169,10 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::http::v1::services::scrape::fake; - use crate::http::v1::services::scrape::tests::{public_tracker, sample_info_hash, sample_info_hashes, sample_peer}; + use crate::servers::http::v1::services::scrape::fake; + use crate::servers::http::v1::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; use crate::tracker::{statistics, ScrapeData, Tracker}; #[tokio::test] diff --git a/src/servers/mod.rs b/src/servers/mod.rs new file mode 100644 index 000000000..17005b56d --- /dev/null +++ b/src/servers/mod.rs @@ -0,0 +1,3 @@ +pub mod apis; +pub mod http; +pub mod udp; diff --git a/src/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs similarity index 99% rename from src/udp/connection_cookie.rs rename to src/servers/udp/connection_cookie.rs index ef2a8b219..a55d40801 100644 --- a/src/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -92,7 +92,7 @@ mod tests { use super::cookie_builder::{self}; use crate::protocol::clock::time_extent::{self, Extent}; use crate::protocol::clock::{Stopped, StoppedTime}; - use crate::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; + use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); diff --git a/src/udp/error.rs b/src/servers/udp/error.rs similarity index 100% rename from src/udp/error.rs rename to src/servers/udp/error.rs diff --git a/src/udp/handlers.rs b/src/servers/udp/handlers.rs similarity index 94% rename from src/udp/handlers.rs rename to src/servers/udp/handlers.rs index 41b1184dc..1544e13cc 100644 --- a/src/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -11,10 +11,10 @@ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; +use crate::servers::udp::error::Error; +use crate::servers::udp::peer_builder; +use crate::servers::udp::request::AnnounceWrapper; use crate::tracker::{statistics, Tracker}; -use crate::udp::error::Error; -use crate::udp::peer_builder; -use crate::udp::request::AnnounceWrapper; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { @@ -354,10 +354,10 @@ mod tests { use mockall::predicate::eq; use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_connect; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; use crate::tracker::{self, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_connect; - use crate::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -451,8 +451,8 @@ mod tests { TransactionId, }; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::tests::sample_ipv4_remote_addr; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { request: AnnounceRequest, @@ -525,13 +525,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::tracker::{self, peer, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_announce; - use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{ + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; + use crate::tracker::{self, peer, statistics}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -692,11 +692,11 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; use crate::tracker::peer; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_announce; - use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -746,13 +746,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::tracker::{self, peer, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_announce; - use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{ + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; + use crate::tracker::{self, peer, statistics}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -921,12 +921,12 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::TrackerConfigurationBuilder; use crate::tracker; use crate::tracker::statistics::Keeper; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_announce; - use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::TrackerConfigurationBuilder; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -983,10 +983,10 @@ mod tests { }; use super::TorrentPeerBuilder; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; use crate::tracker::{self, peer}; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1067,8 +1067,8 @@ mod tests { mod with_a_public_tracker { use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handlers::tests::public_tracker; - use crate::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; + use crate::servers::udp::handlers::tests::public_tracker; + use crate::servers::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { @@ -1090,11 +1090,11 @@ mod tests { use aquatic_udp_protocol::InfoHash; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::scrape_request::{ + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::scrape_request::{ add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; + use crate::servers::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; #[tokio::test] async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { @@ -1128,11 +1128,11 @@ mod tests { mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::scrape_request::{ + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { @@ -1195,9 +1195,9 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; use crate::tracker::{self, statistics}; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1227,9 +1227,9 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; use crate::tracker::{self, statistics}; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { diff --git a/src/udp/mod.rs b/src/servers/udp/mod.rs similarity index 100% rename from src/udp/mod.rs rename to src/servers/udp/mod.rs diff --git a/src/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs similarity index 100% rename from src/udp/peer_builder.rs rename to src/servers/udp/peer_builder.rs diff --git a/src/udp/request.rs b/src/servers/udp/request.rs similarity index 100% rename from src/udp/request.rs rename to src/servers/udp/request.rs diff --git a/src/udp/server.rs b/src/servers/udp/server.rs similarity index 98% rename from src/udp/server.rs rename to src/servers/udp/server.rs index e52b8fd52..f44f37f5f 100644 --- a/src/udp/server.rs +++ b/src/servers/udp/server.rs @@ -9,10 +9,10 @@ use log::{debug, error, info}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; +use crate::servers::udp::handlers::handle_packet; +use crate::servers::udp::MAX_PACKET_SIZE; use crate::signals::shutdown_signal; use crate::tracker::Tracker; -use crate::udp::handlers::handle_packet; -use crate::udp::MAX_PACKET_SIZE; #[derive(Debug)] pub enum Error { diff --git a/src/setup.rs b/src/setup.rs index 86de0723c..c8e5e4113 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -4,8 +4,8 @@ use log::warn; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; -use crate::http::Version; use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::servers::http::Version; use crate::tracker; /// # Panics diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index b6f5ca990..07ba46fff 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index d37bcdbb4..1b1f204a2 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::apis::v1::context::auth_key::resources::AuthKey; -use torrust_tracker::apis::v1::context::stats::resources::Stats; -use torrust_tracker::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; +use torrust_tracker::servers::apis::v1::context::auth_key::resources::AuthKey; +use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 3929a4270..038272963 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,7 +1,7 @@ use std::str::FromStr; -use torrust_tracker::apis::v1::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 702a8bcd4..69d600a8b 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use torrust_tracker::apis::v1::context::torrent::resources::peer::Peer; -use torrust_tracker::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; +use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index 459c2fbe6..aa414d62e 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 501c0f6fa..f67b0c5e5 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -2,7 +2,7 @@ use torrust_tracker_test_helpers::configuration; use crate::servers::http::test_environment::running_test_environment; -pub type V1 = torrust_tracker::http::v1::launcher::Launcher; +pub type V1 = torrust_tracker::servers::http::v1::launcher::Launcher; #[tokio::test] async fn test_environment_should_be_started_and_stopped() { diff --git a/tests/servers/udp/client.rs b/tests/servers/udp/client.rs index a13845b97..75467055e 100644 --- a/tests/servers/udp/client.rs +++ b/tests/servers/udp/client.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{Request, Response}; use tokio::net::UdpSocket; -use torrust_tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker::servers::udp::MAX_PACKET_SIZE; use crate::servers::udp::source_address; diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 311cf5e49..3187d9871 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -6,7 +6,7 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; -use torrust_tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker::servers::udp::MAX_PACKET_SIZE; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_error_response; diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index f729777a1..11a2cf6cd 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -2,9 +2,9 @@ use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; use crate::common::tracker::new_tracker; From 389771b5b221a37908e7f0a584da5864d855f501 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 08:22:25 +0000 Subject: [PATCH 0479/1003] refactor: move databases mod to tracker mod Database is only use inisde the `tracker` mod. --- src/lib.rs | 1 - src/{ => tracker}/databases/driver.rs | 0 src/{ => tracker}/databases/error.rs | 0 src/{ => tracker}/databases/mod.rs | 0 src/{ => tracker}/databases/mysql.rs | 2 +- src/{ => tracker}/databases/sqlite.rs | 2 +- src/tracker/mod.rs | 3 ++- 7 files changed, 4 insertions(+), 4 deletions(-) rename src/{ => tracker}/databases/driver.rs (100%) rename src/{ => tracker}/databases/error.rs (100%) rename src/{ => tracker}/databases/mod.rs (100%) rename src/{ => tracker}/databases/mysql.rs (99%) rename src/{ => tracker}/databases/sqlite.rs (99%) diff --git a/src/lib.rs b/src/lib.rs index 6c0ae464f..442b687fa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -pub mod databases; pub mod jobs; pub mod logging; pub mod protocol; diff --git a/src/databases/driver.rs b/src/tracker/databases/driver.rs similarity index 100% rename from src/databases/driver.rs rename to src/tracker/databases/driver.rs diff --git a/src/databases/error.rs b/src/tracker/databases/error.rs similarity index 100% rename from src/databases/error.rs rename to src/tracker/databases/error.rs diff --git a/src/databases/mod.rs b/src/tracker/databases/mod.rs similarity index 100% rename from src/databases/mod.rs rename to src/tracker/databases/mod.rs diff --git a/src/databases/mysql.rs b/src/tracker/databases/mysql.rs similarity index 99% rename from src/databases/mysql.rs rename to src/tracker/databases/mysql.rs index f6918974f..ded9e1617 100644 --- a/src/databases/mysql.rs +++ b/src/tracker/databases/mysql.rs @@ -9,7 +9,7 @@ use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::DatabaseDriver; -use crate::databases::{Database, Error}; +use super::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; diff --git a/src/databases/sqlite.rs b/src/tracker/databases/sqlite.rs similarity index 99% rename from src/databases/sqlite.rs rename to src/tracker/databases/sqlite.rs index adb201def..e230ac18f 100644 --- a/src/databases/sqlite.rs +++ b/src/tracker/databases/sqlite.rs @@ -6,7 +6,7 @@ use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::DatabaseDriver; -use crate::databases::{Database, Error}; +use super::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 71bb41f90..9ed28b684 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,4 +1,5 @@ pub mod auth; +pub mod databases; pub mod error; pub mod peer; pub mod services; @@ -21,8 +22,8 @@ use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; -use crate::databases::{self, Database}; use crate::protocol::info_hash::InfoHash; +use crate::tracker::databases::Database; pub struct Tracker { pub config: Arc, From 06ea911ef3a5da68d7d53624eb6b83b54ad5ab49 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 08:48:01 +0000 Subject: [PATCH 0480/1003] refactor: move signals to servers mod --- src/lib.rs | 1 - src/servers/apis/server.rs | 2 +- src/servers/http/server.rs | 2 +- src/servers/mod.rs | 1 + src/{ => servers}/signals.rs | 0 src/servers/udp/server.rs | 2 +- 6 files changed, 4 insertions(+), 4 deletions(-) rename src/{ => servers}/signals.rs (100%) diff --git a/src/lib.rs b/src/lib.rs index 442b687fa..1eb54edbd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,7 +3,6 @@ pub mod logging; pub mod protocol; pub mod servers; pub mod setup; -pub mod signals; pub mod stats; pub mod tracker; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 002babbfb..e4714cd9a 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -9,7 +9,7 @@ use futures::Future; use log::info; use super::routes::router; -use crate::signals::shutdown_signal; +use crate::servers::signals::shutdown_signal; use crate::tracker::Tracker; #[derive(Debug)] diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 98160777c..510c685d4 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use futures::future::BoxFuture; -use crate::signals::shutdown_signal; +use crate::servers::signals::shutdown_signal; use crate::tracker::Tracker; /// Trait to be implemented by a http server launcher for the tracker. diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 17005b56d..a71b3f029 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,3 +1,4 @@ pub mod apis; pub mod http; +pub mod signals; pub mod udp; diff --git a/src/signals.rs b/src/servers/signals.rs similarity index 100% rename from src/signals.rs rename to src/servers/signals.rs diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index f44f37f5f..9eb9836fe 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -9,9 +9,9 @@ use log::{debug, error, info}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; +use crate::servers::signals::shutdown_signal; use crate::servers::udp::handlers::handle_packet; use crate::servers::udp::MAX_PACKET_SIZE; -use crate::signals::shutdown_signal; use crate::tracker::Tracker; #[derive(Debug)] From 5dab523174894e195e6e3905e0f2ed818baf952a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 09:40:48 +0000 Subject: [PATCH 0481/1003] refactor: extract bootstrap mod --- src/{ => bootstrap}/jobs/http_tracker.rs | 0 src/{setup.rs => bootstrap/jobs/mod.rs} | 6 +++++- src/{ => bootstrap}/jobs/torrent_cleanup.rs | 0 src/{ => bootstrap}/jobs/tracker_apis.rs | 0 src/{ => bootstrap}/jobs/udp_tracker.rs | 0 src/{ => bootstrap}/logging.rs | 0 src/bootstrap/mod.rs | 3 +++ src/{ => bootstrap}/stats.rs | 8 ++++---- src/jobs/mod.rs | 4 ---- src/lib.rs | 5 +---- src/main.rs | 10 +++++----- tests/common/tracker.rs | 4 ++-- 12 files changed, 20 insertions(+), 20 deletions(-) rename src/{ => bootstrap}/jobs/http_tracker.rs (100%) rename src/{setup.rs => bootstrap/jobs/mod.rs} (95%) rename src/{ => bootstrap}/jobs/torrent_cleanup.rs (100%) rename src/{ => bootstrap}/jobs/tracker_apis.rs (100%) rename src/{ => bootstrap}/jobs/udp_tracker.rs (100%) rename src/{ => bootstrap}/logging.rs (100%) create mode 100644 src/bootstrap/mod.rs rename src/{ => bootstrap}/stats.rs (66%) delete mode 100644 src/jobs/mod.rs diff --git a/src/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs similarity index 100% rename from src/jobs/http_tracker.rs rename to src/bootstrap/jobs/http_tracker.rs diff --git a/src/setup.rs b/src/bootstrap/jobs/mod.rs similarity index 95% rename from src/setup.rs rename to src/bootstrap/jobs/mod.rs index c8e5e4113..cbe3f509c 100644 --- a/src/setup.rs +++ b/src/bootstrap/jobs/mod.rs @@ -1,10 +1,14 @@ +pub mod http_tracker; +pub mod torrent_cleanup; +pub mod tracker_apis; +pub mod udp_tracker; + use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; -use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::servers::http::Version; use crate::tracker; diff --git a/src/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs similarity index 100% rename from src/jobs/torrent_cleanup.rs rename to src/bootstrap/jobs/torrent_cleanup.rs diff --git a/src/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs similarity index 100% rename from src/jobs/tracker_apis.rs rename to src/bootstrap/jobs/tracker_apis.rs diff --git a/src/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs similarity index 100% rename from src/jobs/udp_tracker.rs rename to src/bootstrap/jobs/udp_tracker.rs diff --git a/src/logging.rs b/src/bootstrap/logging.rs similarity index 100% rename from src/logging.rs rename to src/bootstrap/logging.rs diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs new file mode 100644 index 000000000..fd51d9b90 --- /dev/null +++ b/src/bootstrap/mod.rs @@ -0,0 +1,3 @@ +pub mod jobs; +pub mod logging; +pub mod stats; diff --git a/src/stats.rs b/src/bootstrap/stats.rs similarity index 66% rename from src/stats.rs rename to src/bootstrap/stats.rs index 8f87c01a3..3b109f297 100644 --- a/src/stats.rs +++ b/src/bootstrap/stats.rs @@ -1,7 +1,7 @@ use crate::tracker::statistics; #[must_use] -pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { +pub fn setup(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { let mut stats_event_sender = None; let mut stats_tracker = statistics::Keeper::new(); @@ -15,13 +15,13 @@ pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option Date: Fri, 17 Mar 2023 10:18:35 +0000 Subject: [PATCH 0482/1003] refactor: extract app setup and app start mods --- src/app.rs | 68 +++++++++++++++++++++++++++ src/bootstrap/app.rs | 49 +++++++++++++++++++ src/bootstrap/jobs/mod.rs | 68 --------------------------- src/bootstrap/jobs/torrent_cleanup.rs | 2 +- src/bootstrap/mod.rs | 1 + src/lib.rs | 1 + src/main.rs | 44 ++--------------- 7 files changed, 124 insertions(+), 109 deletions(-) create mode 100644 src/app.rs create mode 100644 src/bootstrap/app.rs diff --git a/src/app.rs b/src/app.rs new file mode 100644 index 000000000..5f75449ca --- /dev/null +++ b/src/app.rs @@ -0,0 +1,68 @@ +use std::sync::Arc; + +use log::warn; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::bootstrap::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::servers::http::Version; +use crate::tracker; + +/// # Panics +/// +/// Will panic if the socket address for API can't be parsed. +pub async fn start(config: Arc, tracker: Arc) -> Vec> { + let mut jobs: Vec> = Vec::new(); + + // Load peer keys + if tracker.is_private() { + tracker + .load_keys_from_database() + .await + .expect("Could not retrieve keys from database."); + } + + // Load whitelisted torrents + if tracker.is_whitelisted() { + tracker + .load_whitelist_from_database() + .await + .expect("Could not load whitelist from database."); + } + + // Start the UDP blocks + for udp_tracker_config in &config.udp_trackers { + if !udp_tracker_config.enabled { + continue; + } + + if tracker.is_private() { + warn!( + "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", + udp_tracker_config.bind_address, config.mode + ); + } else { + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); + } + } + + // Start the HTTP blocks + for http_tracker_config in &config.http_trackers { + if !http_tracker_config.enabled { + continue; + } + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); + } + + // Start HTTP API + if config.http_api.enabled { + jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); + } + + // Remove torrents without peers, every interval + if config.inactive_peer_cleanup_interval > 0 { + jobs.push(torrent_cleanup::start_job(&config, &tracker)); + } + + jobs +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs new file mode 100644 index 000000000..557203b0e --- /dev/null +++ b/src/bootstrap/app.rs @@ -0,0 +1,49 @@ +use std::env; +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use crate::bootstrap::stats; +use crate::tracker::Tracker; +use crate::{bootstrap, ephemeral_instance_keys, static_time, tracker}; + +/// # Panics +/// +/// Will panic if it can't load the configuration from either +/// `./config.toml` file or env var `TORRUST_TRACKER_CONFIG`. +#[must_use] +pub fn setup() -> (Arc, Arc) { + const CONFIG_PATH: &str = "./config.toml"; + const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize Torrust config + let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); + Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) + } else { + println!("Loading configuration from config file {CONFIG_PATH}"); + Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) + }; + + // Initialize statistics + let (stats_event_sender, stats_repository) = stats::setup(config.tracker_usage_statistics); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(config.clone(), stats_event_sender, stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + bootstrap::logging::setup(&config); + + (config, tracker) +} diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index cbe3f509c..ba44a56ad 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -2,71 +2,3 @@ pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_apis; pub mod udp_tracker; - -use std::sync::Arc; - -use log::warn; -use tokio::task::JoinHandle; -use torrust_tracker_configuration::Configuration; - -use crate::servers::http::Version; -use crate::tracker; - -/// # Panics -/// -/// Will panic if the socket address for API can't be parsed. -pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { - let mut jobs: Vec> = Vec::new(); - - // Load peer keys - if tracker.is_private() { - tracker - .load_keys_from_database() - .await - .expect("Could not retrieve keys from database."); - } - - // Load whitelisted torrents - if tracker.is_whitelisted() { - tracker - .load_whitelist_from_database() - .await - .expect("Could not load whitelist from database."); - } - - // Start the UDP blocks - for udp_tracker_config in &config.udp_trackers { - if !udp_tracker_config.enabled { - continue; - } - - if tracker.is_private() { - warn!( - "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", - udp_tracker_config.bind_address, config.mode - ); - } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); - } - } - - // Start the HTTP blocks - for http_tracker_config in &config.http_trackers { - if !http_tracker_config.enabled { - continue; - } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); - } - - // Start HTTP API - if config.http_api.enabled { - jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); - } - - // Remove torrents without peers, every interval - if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(config, &tracker)); - } - - jobs -} diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 4c4ed1f53..64240bffe 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -8,7 +8,7 @@ use torrust_tracker_configuration::Configuration; use crate::tracker; #[must_use] -pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { +pub fn start_job(config: &Arc, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs index fd51d9b90..b4ee0558e 100644 --- a/src/bootstrap/mod.rs +++ b/src/bootstrap/mod.rs @@ -1,3 +1,4 @@ +pub mod app; pub mod jobs; pub mod logging; pub mod stats; diff --git a/src/lib.rs b/src/lib.rs index 5cf4ab8eb..70f0c4a4f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,4 @@ +pub mod app; pub mod bootstrap; pub mod protocol; pub mod servers; diff --git a/src/main.rs b/src/main.rs index f8f218fca..87c0fc367 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,49 +1,13 @@ -use std::env; -use std::sync::Arc; - use log::info; -use torrust_tracker::bootstrap::stats::setup; -use torrust_tracker::{bootstrap, ephemeral_instance_keys, static_time, tracker}; -use torrust_tracker_configuration::Configuration; +use torrust_tracker::{app, bootstrap}; #[tokio::main] async fn main() { - const CONFIG_PATH: &str = "./config.toml"; - const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize Torrust config - let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { - println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); - Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) - } else { - println!("Loading configuration from config file {CONFIG_PATH}"); - Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) - }; - - // Initialize statistics - let (stats_event_sender, stats_repository) = setup(config.tracker_usage_statistics); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(config.clone(), stats_event_sender, stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - bootstrap::logging::setup(&config); + let (config, tracker) = bootstrap::app::setup(); - // Run jobs - let jobs = bootstrap::jobs::setup(&config, tracker.clone()).await; + let jobs = app::start(config.clone(), tracker.clone()).await; - // handle the signals here + // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Torrust shutting down.."); From 9ffcd6bec0c111c896bb4ab3280c71df7b2d7190 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 11:21:49 +0000 Subject: [PATCH 0483/1003] refactor: rename mod and extract bit_torrent mod `protocol` nod contains logic not only for BitTorrent protocol. --- src/lib.rs | 2 +- src/protocol/mod.rs | 5 ----- src/servers/apis/v1/context/auth_key/resources.rs | 4 ++-- src/servers/apis/v1/context/torrent/handlers.rs | 2 +- .../apis/v1/context/torrent/resources/torrent.rs | 4 ++-- src/servers/apis/v1/context/whitelist/handlers.rs | 2 +- src/servers/http/percent_encoding.rs | 4 ++-- .../http/v1/extractors/announce_request.rs | 2 +- src/servers/http/v1/extractors/scrape_request.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 4 ++-- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/requests/announce.rs | 4 ++-- src/servers/http/v1/requests/scrape.rs | 4 ++-- src/servers/http/v1/responses/scrape.rs | 2 +- src/servers/http/v1/services/announce.rs | 6 +++--- src/servers/http/v1/services/scrape.rs | 6 +++--- src/servers/udp/connection_cookie.rs | 10 +++++----- src/servers/udp/handlers.rs | 6 +++--- src/servers/udp/peer_builder.rs | 2 +- src/servers/udp/request.rs | 2 +- src/{protocol => shared/bit_torrent}/common.rs | 0 src/{protocol => shared/bit_torrent}/info_hash.rs | 0 src/shared/bit_torrent/mod.rs | 2 ++ src/{protocol => shared}/clock/mod.rs | 15 ++++++++------- src/{protocol => shared}/clock/time_extent.rs | 4 ++-- src/{protocol => shared/clock}/utils.rs | 2 +- src/{protocol => shared}/crypto.rs | 4 ++-- src/shared/mod.rs | 3 +++ src/tracker/auth.rs | 6 +++--- src/tracker/databases/mod.rs | 2 +- src/tracker/databases/mysql.rs | 4 ++-- src/tracker/databases/sqlite.rs | 4 ++-- src/tracker/error.rs | 2 +- src/tracker/mod.rs | 10 +++++----- src/tracker/peer.rs | 10 ++++------ src/tracker/services/torrent.rs | 8 ++++---- src/tracker/torrent.rs | 6 +++--- tests/common/fixtures.rs | 2 +- tests/servers/api/test_environment.rs | 2 +- tests/servers/api/v1/contract/context/stats.rs | 2 +- tests/servers/api/v1/contract/context/torrent.rs | 2 +- .../servers/api/v1/contract/context/whitelist.rs | 2 +- tests/servers/http/requests/announce.rs | 2 +- tests/servers/http/requests/scrape.rs | 2 +- tests/servers/http/test_environment.rs | 2 +- tests/servers/http/v1/contract.rs | 12 ++++++------ tests/servers/udp/test_environment.rs | 2 +- 47 files changed, 93 insertions(+), 94 deletions(-) delete mode 100644 src/protocol/mod.rs rename src/{protocol => shared/bit_torrent}/common.rs (100%) rename src/{protocol => shared/bit_torrent}/info_hash.rs (100%) create mode 100644 src/shared/bit_torrent/mod.rs rename src/{protocol => shared}/clock/mod.rs (96%) rename src/{protocol => shared}/clock/time_extent.rs (99%) rename src/{protocol => shared/clock}/utils.rs (89%) rename src/{protocol => shared}/crypto.rs (94%) create mode 100644 src/shared/mod.rs diff --git a/src/lib.rs b/src/lib.rs index 70f0c4a4f..cf4439c3e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ pub mod app; pub mod bootstrap; -pub mod protocol; pub mod servers; +pub mod shared; pub mod tracker; #[macro_use] diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs deleted file mode 100644 index bd4310dcf..000000000 --- a/src/protocol/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod clock; -pub mod common; -pub mod crypto; -pub mod info_hash; -pub mod utils; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index cf43a6f3d..400b34eb7 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -2,7 +2,7 @@ use std::convert::From; use serde::{Deserialize, Serialize}; -use crate::protocol::clock::convert_from_iso_8601_to_timestamp; +use crate::shared::clock::convert_from_iso_8601_to_timestamp; use crate::tracker::auth::{self, Key}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -36,7 +36,7 @@ mod tests { use std::time::Duration; use super::AuthKey; - use crate::protocol::clock::{Current, TimeNow}; + use crate::shared::clock::{Current, TimeNow}; use crate::tracker::auth::{self, Key}; struct TestTime { diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 45ffbcf22..4032f2e9a 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -8,9 +8,9 @@ use serde::{de, Deserialize, Deserializer}; use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::protocol::info_hash::InfoHash; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 577ac279c..e328f80c4 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -75,10 +75,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use super::Torrent; - use crate::protocol::clock::DurationSinceUnixEpoch; - use crate::protocol::info_hash::InfoHash; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::peer; use crate::tracker::services::torrent::{BasicInfo, Info}; diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index 2ca70cba7..25e285c0b 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -7,9 +7,9 @@ use axum::response::Response; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::protocol::info_hash::InfoHash; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::Tracker; pub async fn add_torrent_to_whitelist_handler( diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index c824c8df7..019735e0f 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -1,4 +1,4 @@ -use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; /// # Errors @@ -21,8 +21,8 @@ pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { @@ -39,8 +39,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; - use crate::protocol::clock::DurationSinceUnixEpoch; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index a55d40801..4a75145c1 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -4,7 +4,7 @@ use std::panic::Location; use aquatic_udp_protocol::ConnectionId; use super::error::Error; -use crate::protocol::clock::time_extent::{Extent, TimeExtent}; +use crate::shared::clock::time_extent::{Extent, TimeExtent}; pub type Cookie = [u8; 8]; @@ -61,8 +61,8 @@ mod cookie_builder { use std::net::SocketAddr; use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; - use crate::protocol::crypto::keys::seeds::{Current, Keeper}; + use crate::shared::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; + use crate::shared::crypto::keys::seeds::{Current, Keeper}; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -90,9 +90,9 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use super::cookie_builder::{self}; - use crate::protocol::clock::time_extent::{self, Extent}; - use crate::protocol::clock::{Stopped, StoppedTime}; use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; + use crate::shared::clock::time_extent::{self, Extent}; + use crate::shared::clock::{Stopped, StoppedTime}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 1544e13cc..7eb971d05 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -9,11 +9,11 @@ use aquatic_udp_protocol::{ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; -use crate::protocol::common::MAX_SCRAPE_TORRENTS; -use crate::protocol::info_hash::InfoHash; use crate::servers::udp::error::Error; use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::{statistics, Tracker}; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { @@ -241,7 +241,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::protocol::clock::{Current, Time}; + use crate::shared::clock::{Current, Time}; use crate::tracker::services::common::tracker_factory; use crate::tracker::{self, peer}; diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 84eae64f9..8d8852dc7 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, SocketAddr}; use super::request::AnnounceWrapper; -use crate::protocol::clock::{Current, Time}; +use crate::shared::clock::{Current, Time}; use crate::tracker::peer::{Id, Peer}; #[must_use] diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index 28d75f860..4be99e6d0 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -1,6 +1,6 @@ use aquatic_udp_protocol::AnnounceRequest; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; pub struct AnnounceWrapper { pub announce_request: AnnounceRequest, diff --git a/src/protocol/common.rs b/src/shared/bit_torrent/common.rs similarity index 100% rename from src/protocol/common.rs rename to src/shared/bit_torrent/common.rs diff --git a/src/protocol/info_hash.rs b/src/shared/bit_torrent/info_hash.rs similarity index 100% rename from src/protocol/info_hash.rs rename to src/shared/bit_torrent/info_hash.rs diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs new file mode 100644 index 000000000..7579a0780 --- /dev/null +++ b/src/shared/bit_torrent/mod.rs @@ -0,0 +1,2 @@ +pub mod common; +pub mod info_hash; diff --git a/src/protocol/clock/mod.rs b/src/shared/clock/mod.rs similarity index 96% rename from src/protocol/clock/mod.rs rename to src/shared/clock/mod.rs index 73df37b58..399fb6b9b 100644 --- a/src/protocol/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -1,3 +1,6 @@ +pub mod time_extent; +pub mod utils; + use std::num::IntErrorKind; use std::str::FromStr; use std::time::Duration; @@ -77,7 +80,7 @@ pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) mod tests { use std::any::TypeId; - use crate::protocol::clock::{Current, Stopped, Time, Working}; + use crate::shared::clock::{Current, Stopped, Time, Working}; #[test] fn it_should_be_the_stopped_clock_as_default_when_testing() { @@ -95,7 +98,7 @@ mod tests { mod timestamp { use chrono::{DateTime, NaiveDateTime, Utc}; - use crate::protocol::clock::{ + use crate::shared::clock::{ convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, DurationSinceUnixEpoch, }; @@ -227,7 +230,7 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; + use crate::shared::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; #[test] fn it_should_default_to_zero_when_testing() { @@ -286,7 +289,7 @@ mod stopped_clock { use std::cell::RefCell; use std::time::SystemTime; - use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::static_time; pub fn get_app_start_time() -> DurationSinceUnixEpoch { @@ -311,7 +314,7 @@ mod stopped_clock { mod tests { use std::time::Duration; - use crate::protocol::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; + use crate::shared::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; #[test] fn it_should_get_the_zero_start_time_when_testing() { @@ -326,5 +329,3 @@ mod stopped_clock { } } } - -pub mod time_extent; diff --git a/src/protocol/clock/time_extent.rs b/src/shared/clock/time_extent.rs similarity index 99% rename from src/protocol/clock/time_extent.rs rename to src/shared/clock/time_extent.rs index b4c20cd70..64142c404 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/shared/clock/time_extent.rs @@ -176,10 +176,10 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; #[cfg(test)] mod test { - use crate::protocol::clock::time_extent::{ + use crate::shared::clock::time_extent::{ checked_duration_from_nanos, Base, DefaultTimeExtentMaker, Extent, Make, Multiplier, Product, TimeExtent, MAX, ZERO, }; - use crate::protocol::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); diff --git a/src/protocol/utils.rs b/src/shared/clock/utils.rs similarity index 89% rename from src/protocol/utils.rs rename to src/shared/clock/utils.rs index cec02ceaf..9127f97b1 100644 --- a/src/protocol/utils.rs +++ b/src/shared/clock/utils.rs @@ -1,4 +1,4 @@ -use super::clock::DurationSinceUnixEpoch; +use super::DurationSinceUnixEpoch; /// # Errors /// diff --git a/src/protocol/crypto.rs b/src/shared/crypto.rs similarity index 94% rename from src/protocol/crypto.rs rename to src/shared/crypto.rs index a335e2dba..848dcd36b 100644 --- a/src/protocol/crypto.rs +++ b/src/shared/crypto.rs @@ -74,8 +74,8 @@ pub mod keys { use std::convert::TryInto; use crate::ephemeral_instance_keys::RANDOM_SEED; - use crate::protocol::crypto::keys::seeds::detail::ZEROED_TEST_SEED; - use crate::protocol::crypto::keys::seeds::CURRENT_SEED; + use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; + use crate::shared::crypto::keys::seeds::CURRENT_SEED; #[test] fn it_should_have_a_zero_test_seed() { diff --git a/src/shared/mod.rs b/src/shared/mod.rs new file mode 100644 index 000000000..4b0d9138e --- /dev/null +++ b/src/shared/mod.rs @@ -0,0 +1,3 @@ +pub mod bit_torrent; +pub mod clock; +pub mod crypto; diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 75bc543a8..31e1f50e4 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -11,8 +11,8 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_located_error::LocatedError; -use crate::protocol::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; -use crate::protocol::common::AUTH_KEY_LENGTH; +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; #[must_use] /// # Panics @@ -146,7 +146,7 @@ mod tests { use std::str::FromStr; use std::time::Duration; - use crate::protocol::clock::{Current, StoppedTime}; + use crate::shared::clock::{Current, StoppedTime}; use crate::tracker::auth; #[test] diff --git a/src/tracker/databases/mod.rs b/src/tracker/databases/mod.rs index 0af6f5723..f68288bbe 100644 --- a/src/tracker/databases/mod.rs +++ b/src/tracker/databases/mod.rs @@ -8,7 +8,7 @@ use std::marker::PhantomData; use async_trait::async_trait; use self::error::Error; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; pub(self) struct Builder diff --git a/src/tracker/databases/mysql.rs b/src/tracker/databases/mysql.rs index ded9e1617..7e4aab99e 100644 --- a/src/tracker/databases/mysql.rs +++ b/src/tracker/databases/mysql.rs @@ -10,8 +10,8 @@ use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::DatabaseDriver; use super::{Database, Error}; -use crate::protocol::common::AUTH_KEY_LENGTH; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; diff --git a/src/tracker/databases/sqlite.rs b/src/tracker/databases/sqlite.rs index e230ac18f..931289183 100644 --- a/src/tracker/databases/sqlite.rs +++ b/src/tracker/databases/sqlite.rs @@ -7,8 +7,8 @@ use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::DatabaseDriver; use super::{Database, Error}; -use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::auth::{self, Key}; const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; diff --git a/src/tracker/error.rs b/src/tracker/error.rs index 10ca5ec19..aaf755e0d 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -16,7 +16,7 @@ pub enum Error { // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { - info_hash: crate::protocol::info_hash::InfoHash, + info_hash: crate::shared::bit_torrent::info_hash::InfoHash, location: &'static Location<'static>, }, } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 9ed28b684..6b8c27076 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -22,7 +22,7 @@ use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::databases::Database; pub struct Tracker { @@ -557,8 +557,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; - use crate::protocol::clock::DurationSinceUnixEpoch; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::peer::{self, Peer}; use crate::tracker::services::common::tracker_factory; use crate::tracker::{TorrentsMetrics, Tracker}; @@ -911,7 +911,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; use crate::tracker::{ScrapeData, SwarmMetadata}; @@ -1068,7 +1068,7 @@ mod tests { mod handling_an_scrape_request { - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 3012770bb..6a298c9df 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -6,9 +6,9 @@ use serde; use serde::Serialize; use thiserror::Error; -use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::protocol::utils::ser_unix_time_value; +use crate::shared::bit_torrent::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::shared::clock::utils::ser_unix_time_value; +use crate::shared::clock::DurationSinceUnixEpoch; #[derive(PartialEq, Eq, Debug)] pub enum IPVersion { @@ -28,8 +28,6 @@ pub struct Peer { pub downloaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, // The number of bytes this peer still has to download - // code-review: aquatic_udp_protocol::request::AnnounceEvent is used also for the HTTP tracker. - // Maybe we should use our own enum and use the¡is one only for the UDP tracker. #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } @@ -397,7 +395,7 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde_json::Value; - use crate::protocol::clock::{Current, Time}; + use crate::shared::clock::{Current, Time}; use crate::tracker::peer::{self, Peer}; #[test] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index b04b4e1dc..e9e254582 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use serde::Deserialize; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::peer::Peer; use crate::tracker::Tracker; @@ -117,7 +117,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::peer; fn sample_peer() -> peer::Peer { @@ -140,7 +140,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrent_info, Info}; @@ -195,7 +195,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4a871aa89..882e52ff1 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -4,8 +4,8 @@ use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use super::peer::{self, Peer}; -use crate::protocol::clock::{Current, TimeNow}; -use crate::protocol::common::MAX_SCRAPE_TORRENTS; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::shared::clock::{Current, TimeNow}; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Entry { @@ -134,7 +134,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; + use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; use crate::tracker::peer; use crate::tracker::torrent::Entry; diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index d4b3e9812..7062c8376 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; +use torrust_tracker::shared::clock::DurationSinceUnixEpoch; use torrust_tracker::tracker::peer::{self, Id, Peer}; pub struct PeerBuilder { diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index 07ba46fff..c9b693320 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 038272963..45f7e604a 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,7 +1,7 @@ use std::str::FromStr; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 69d600a8b..ab497787f 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 67992642f..60ab4c901 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index 414c118ef..20c5ddaa7 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -3,7 +3,7 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; -use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs index d7f7cd581..9e4257d6c 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -1,7 +1,7 @@ use std::fmt; use std::str::FromStr; -use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index aa414d62e..e6013540d 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index f67b0c5e5..b508dfc39 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -72,7 +72,7 @@ mod for_all_config_modes { use local_ip_address::local_ip; use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; @@ -833,7 +833,7 @@ mod for_all_config_modes { use std::net::IpAddr; use std::str::FromStr; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; @@ -1055,7 +1055,7 @@ mod configured_as_whitelisted { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; @@ -1104,7 +1104,7 @@ mod configured_as_whitelisted { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; @@ -1201,7 +1201,7 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker_test_helpers::configuration; @@ -1278,7 +1278,7 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index 11a2cf6cd..c9f67c987 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -1,8 +1,8 @@ use std::net::SocketAddr; use std::sync::Arc; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; From 2d99866931a6dfc8bf3ad65e2d438145cafd2b1c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 11:55:51 +0000 Subject: [PATCH 0484/1003] refactor: move static vars --- src/bootstrap/app.rs | 4 +++- src/lib.rs | 19 ------------------- src/shared/clock/mod.rs | 4 ++-- src/shared/clock/static_time.rs | 5 +++++ src/shared/crypto.rs | 21 ++++++++++++++++----- tests/common/tracker.rs | 4 +++- 6 files changed, 29 insertions(+), 28 deletions(-) create mode 100644 src/shared/clock/static_time.rs diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 557203b0e..7fb1bf7ca 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -4,8 +4,10 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; use crate::bootstrap::stats; +use crate::shared::clock::static_time; +use crate::shared::crypto::ephemeral_instance_keys; use crate::tracker::Tracker; -use crate::{bootstrap, ephemeral_instance_keys, static_time, tracker}; +use crate::{bootstrap, tracker}; /// # Panics /// diff --git a/src/lib.rs b/src/lib.rs index cf4439c3e..bd775f8cf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,22 +6,3 @@ pub mod tracker; #[macro_use] extern crate lazy_static; - -pub mod static_time { - use std::time::SystemTime; - - lazy_static! { - pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); - } -} - -pub mod ephemeral_instance_keys { - use rand::rngs::ThreadRng; - use rand::Rng; - - pub type Seed = [u8; 32]; - - lazy_static! { - pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); - } -} diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs index 399fb6b9b..b5001e10e 100644 --- a/src/shared/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -1,3 +1,4 @@ +pub mod static_time; pub mod time_extent; pub mod utils; @@ -289,8 +290,7 @@ mod stopped_clock { use std::cell::RefCell; use std::time::SystemTime; - use crate::shared::clock::DurationSinceUnixEpoch; - use crate::static_time; + use crate::shared::clock::{static_time, DurationSinceUnixEpoch}; pub fn get_app_start_time() -> DurationSinceUnixEpoch { (*static_time::TIME_AT_APP_START) diff --git a/src/shared/clock/static_time.rs b/src/shared/clock/static_time.rs new file mode 100644 index 000000000..f916cec9c --- /dev/null +++ b/src/shared/clock/static_time.rs @@ -0,0 +1,5 @@ +use std::time::SystemTime; + +lazy_static! { + pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); +} diff --git a/src/shared/crypto.rs b/src/shared/crypto.rs index 848dcd36b..c10a417c1 100644 --- a/src/shared/crypto.rs +++ b/src/shared/crypto.rs @@ -1,8 +1,19 @@ +pub mod ephemeral_instance_keys { + use rand::rngs::ThreadRng; + use rand::Rng; + + pub type Seed = [u8; 32]; + + lazy_static! { + pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); + } +} + pub mod keys { pub mod seeds { use self::detail::CURRENT_SEED; - use crate::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; @@ -33,7 +44,7 @@ pub mod keys { mod tests { use super::detail::ZEROED_TEST_SEED; use super::{Current, Instance, Keeper}; - use crate::ephemeral_instance_keys::Seed; + use crate::shared::crypto::ephemeral_instance_keys::Seed; pub struct ZeroedTestSeed; @@ -58,7 +69,7 @@ pub mod keys { } mod detail { - use crate::ephemeral_instance_keys::Seed; + use crate::shared::crypto::ephemeral_instance_keys::Seed; #[allow(dead_code)] pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; @@ -67,13 +78,13 @@ pub mod keys { pub use ZEROED_TEST_SEED as CURRENT_SEED; #[cfg(not(test))] - pub use crate::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; + pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; #[cfg(test)] mod tests { use std::convert::TryInto; - use crate::ephemeral_instance_keys::RANDOM_SEED; + use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; use crate::shared::crypto::keys::seeds::CURRENT_SEED; diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index 1d06009ba..92c1df7bf 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -1,8 +1,10 @@ use std::sync::Arc; +use torrust_tracker::bootstrap; +use torrust_tracker::shared::clock::static_time; +use torrust_tracker::shared::crypto::ephemeral_instance_keys; use torrust_tracker::tracker::services::common::tracker_factory; use torrust_tracker::tracker::Tracker; -use torrust_tracker::{bootstrap, ephemeral_instance_keys, static_time}; // TODO: Move to test-helpers crate once `Tracker` is isolated. #[allow(clippy::module_name_repetitions)] From e2553b85b4ea4e18ea44417410fb6bbfd90f5437 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 12:11:52 +0000 Subject: [PATCH 0485/1003] refactor: extract mod into file --- src/shared/crypto.rs | 109 ------------------- src/shared/crypto/ephemeral_instance_keys.rs | 8 ++ src/shared/crypto/keys.rs | 95 ++++++++++++++++ src/shared/crypto/mod.rs | 2 + 4 files changed, 105 insertions(+), 109 deletions(-) delete mode 100644 src/shared/crypto.rs create mode 100644 src/shared/crypto/ephemeral_instance_keys.rs create mode 100644 src/shared/crypto/keys.rs create mode 100644 src/shared/crypto/mod.rs diff --git a/src/shared/crypto.rs b/src/shared/crypto.rs deleted file mode 100644 index c10a417c1..000000000 --- a/src/shared/crypto.rs +++ /dev/null @@ -1,109 +0,0 @@ -pub mod ephemeral_instance_keys { - use rand::rngs::ThreadRng; - use rand::Rng; - - pub type Seed = [u8; 32]; - - lazy_static! { - pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); - } -} - -pub mod keys { - - pub mod seeds { - use self::detail::CURRENT_SEED; - use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; - - pub trait Keeper { - type Seed: Sized + Default + AsMut<[u8]>; - fn get_seed() -> &'static Self::Seed; - } - - pub struct Instance; - pub struct Current; - - impl Keeper for Instance { - type Seed = Seed; - - fn get_seed() -> &'static Self::Seed { - &RANDOM_SEED - } - } - - impl Keeper for Current { - type Seed = Seed; - - #[allow(clippy::needless_borrow)] - fn get_seed() -> &'static Self::Seed { - &CURRENT_SEED - } - } - - #[cfg(test)] - mod tests { - use super::detail::ZEROED_TEST_SEED; - use super::{Current, Instance, Keeper}; - use crate::shared::crypto::ephemeral_instance_keys::Seed; - - pub struct ZeroedTestSeed; - - impl Keeper for ZeroedTestSeed { - type Seed = Seed; - - #[allow(clippy::needless_borrow)] - fn get_seed() -> &'static Self::Seed { - &ZEROED_TEST_SEED - } - } - - #[test] - fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { - assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); - } - - #[test] - fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { - assert_ne!(Current::get_seed(), Instance::get_seed()); - } - } - - mod detail { - use crate::shared::crypto::ephemeral_instance_keys::Seed; - - #[allow(dead_code)] - pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; - - #[cfg(test)] - pub use ZEROED_TEST_SEED as CURRENT_SEED; - - #[cfg(not(test))] - pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; - - #[cfg(test)] - mod tests { - use std::convert::TryInto; - - use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; - use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; - use crate::shared::crypto::keys::seeds::CURRENT_SEED; - - #[test] - fn it_should_have_a_zero_test_seed() { - assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); - } - - #[test] - fn it_should_default_to_zeroed_seed_when_testing() { - assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); - } - - #[test] - fn it_should_have_a_large_random_seed() { - assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); - assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); - } - } - } - } -} diff --git a/src/shared/crypto/ephemeral_instance_keys.rs b/src/shared/crypto/ephemeral_instance_keys.rs new file mode 100644 index 000000000..635d10fbd --- /dev/null +++ b/src/shared/crypto/ephemeral_instance_keys.rs @@ -0,0 +1,8 @@ +use rand::rngs::ThreadRng; +use rand::Rng; + +pub type Seed = [u8; 32]; + +lazy_static! { + pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); +} diff --git a/src/shared/crypto/keys.rs b/src/shared/crypto/keys.rs new file mode 100644 index 000000000..5e04eb551 --- /dev/null +++ b/src/shared/crypto/keys.rs @@ -0,0 +1,95 @@ +pub mod seeds { + use self::detail::CURRENT_SEED; + use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + + pub trait Keeper { + type Seed: Sized + Default + AsMut<[u8]>; + fn get_seed() -> &'static Self::Seed; + } + + pub struct Instance; + pub struct Current; + + impl Keeper for Instance { + type Seed = Seed; + + fn get_seed() -> &'static Self::Seed { + &RANDOM_SEED + } + } + + impl Keeper for Current { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &CURRENT_SEED + } + } + + #[cfg(test)] + mod tests { + use super::detail::ZEROED_TEST_SEED; + use super::{Current, Instance, Keeper}; + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + pub struct ZeroedTestSeed; + + impl Keeper for ZeroedTestSeed { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &ZEROED_TEST_SEED + } + } + + #[test] + fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { + assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); + } + + #[test] + fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { + assert_ne!(Current::get_seed(), Instance::get_seed()); + } + } + + mod detail { + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + #[allow(dead_code)] + pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; + + #[cfg(test)] + pub use ZEROED_TEST_SEED as CURRENT_SEED; + + #[cfg(not(test))] + pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; + + #[cfg(test)] + mod tests { + use std::convert::TryInto; + + use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; + use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; + use crate::shared::crypto::keys::seeds::CURRENT_SEED; + + #[test] + fn it_should_have_a_zero_test_seed() { + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); + } + + #[test] + fn it_should_default_to_zeroed_seed_when_testing() { + assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); + } + + #[test] + fn it_should_have_a_large_random_seed() { + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); + } + } + } +} diff --git a/src/shared/crypto/mod.rs b/src/shared/crypto/mod.rs new file mode 100644 index 000000000..066eb0f46 --- /dev/null +++ b/src/shared/crypto/mod.rs @@ -0,0 +1,2 @@ +pub mod ephemeral_instance_keys; +pub mod keys; From 84c45827cb498cc962e91c6ec88ecd7a3eb732a5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 12:21:16 +0000 Subject: [PATCH 0486/1003] refactor: remove duplicate code for static vars initialization --- src/bootstrap/app.rs | 14 +++++++++----- tests/common/tracker.rs | 8 +------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 7fb1bf7ca..8fa6194f2 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -9,6 +9,14 @@ use crate::shared::crypto::ephemeral_instance_keys; use crate::tracker::Tracker; use crate::{bootstrap, tracker}; +pub fn initialize_static() { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); +} + /// # Panics /// /// Will panic if it can't load the configuration from either @@ -18,11 +26,7 @@ pub fn setup() -> (Arc, Arc) { const CONFIG_PATH: &str = "./config.toml"; const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + initialize_static(); // Initialize Torrust config let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index 92c1df7bf..8579609d9 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -1,19 +1,13 @@ use std::sync::Arc; use torrust_tracker::bootstrap; -use torrust_tracker::shared::clock::static_time; -use torrust_tracker::shared::crypto::ephemeral_instance_keys; use torrust_tracker::tracker::services::common::tracker_factory; use torrust_tracker::tracker::Tracker; // TODO: Move to test-helpers crate once `Tracker` is isolated. #[allow(clippy::module_name_repetitions)] pub fn new_tracker(configuration: Arc) -> Arc { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + bootstrap::app::initialize_static(); // Initialize logging bootstrap::logging::setup(&configuration); From fe392c792e28728341a1d1410991bbc155db09e2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 12:51:46 +0000 Subject: [PATCH 0487/1003] refactor: reorganize tracker factory mod --- src/bootstrap/app.rs | 1 - src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/services/announce.rs | 2 +- src/servers/http/v1/services/scrape.rs | 2 +- src/servers/udp/handlers.rs | 12 +++++------ src/tracker/mod.rs | 2 +- src/tracker/services/common.rs | 25 ---------------------- src/tracker/services/mod.rs | 27 +++++++++++++++++++++++- src/tracker/services/statistics.rs | 2 +- src/tracker/services/torrent.rs | 4 ++-- tests/common/tracker.rs | 5 +---- 12 files changed, 41 insertions(+), 45 deletions(-) delete mode 100644 src/tracker/services/common.rs diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 8fa6194f2..b76641ecd 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -48,7 +48,6 @@ pub fn setup() -> (Arc, Arc) { } }; - // Initialize logging bootstrap::logging::setup(&config); (config, tracker) diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index a93dccabb..db41388ab 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -145,7 +145,7 @@ mod tests { use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::{peer, Tracker}; fn private_tracker() -> Tracker { diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 9912723b8..f55194810 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -103,7 +103,7 @@ mod tests { use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::Tracker; fn private_tracker() -> Tracker { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 58867634f..116dc1e95 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -32,7 +32,7 @@ mod tests { use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 120a5068d..82ecc72e0 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -41,7 +41,7 @@ mod tests { use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 7eb971d05..e00203cfc 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -242,8 +242,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::shared::clock::{Current, Time}; - use crate::tracker::services::common::tracker_factory; - use crate::tracker::{self, peer}; + use crate::tracker::services::tracker_factory; + use crate::tracker::{peer, Tracker}; fn tracker_configuration() -> Arc { Arc::new(default_testing_tracker_configuration()) @@ -253,19 +253,19 @@ mod tests { configuration::ephemeral() } - fn public_tracker() -> Arc { + fn public_tracker() -> Arc { initialized_tracker(configuration::ephemeral_mode_public().into()) } - fn private_tracker() -> Arc { + fn private_tracker() -> Arc { initialized_tracker(configuration::ephemeral_mode_private().into()) } - fn whitelisted_tracker() -> Arc { + fn whitelisted_tracker() -> Arc { initialized_tracker(configuration::ephemeral_mode_whitelisted().into()) } - fn initialized_tracker(configuration: Arc) -> Arc { + fn initialized_tracker(configuration: Arc) -> Arc { tracker_factory(configuration).into() } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6b8c27076..a89d6df2c 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -560,7 +560,7 @@ mod tests { use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::peer::{self, Peer}; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::{TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { diff --git a/src/tracker/services/common.rs b/src/tracker/services/common.rs deleted file mode 100644 index 757725263..000000000 --- a/src/tracker/services/common.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::sync::Arc; - -use torrust_tracker_configuration::Configuration; - -use crate::tracker::statistics::Keeper; -use crate::tracker::Tracker; - -/// # Panics -/// -/// Will panic if tracker cannot be instantiated. -#[must_use] -pub fn tracker_factory(configuration: Arc) -> Tracker { - // todo: the tracker initialization is duplicated in many places. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } -} diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index 2fd557d54..69b0320e8 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -1,3 +1,28 @@ -pub mod common; pub mod statistics; pub mod torrent; + +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use crate::tracker::statistics::Keeper; +use crate::tracker::Tracker; + +/// # Panics +/// +/// Will panic if tracker cannot be instantiated. +#[must_use] +pub fn tracker_factory(configuration: Arc) -> Tracker { + // todo: the tracker initialization is duplicated in many places. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } +} diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 28cd0b962..cf05fcf90 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -40,8 +40,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::tracker; - use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; + use crate::tracker::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index e9e254582..30d24eb00 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -141,9 +141,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrent_info, Info}; + use crate::tracker::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) @@ -196,9 +196,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; + use crate::tracker::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index 8579609d9..d95573702 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -1,15 +1,12 @@ use std::sync::Arc; use torrust_tracker::bootstrap; -use torrust_tracker::tracker::services::common::tracker_factory; +use torrust_tracker::tracker::services::tracker_factory; use torrust_tracker::tracker::Tracker; -// TODO: Move to test-helpers crate once `Tracker` is isolated. -#[allow(clippy::module_name_repetitions)] pub fn new_tracker(configuration: Arc) -> Arc { bootstrap::app::initialize_static(); - // Initialize logging bootstrap::logging::setup(&configuration); Arc::new(tracker_factory(configuration)) From cbca065142781f47db827f24f94f9b4566094a19 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 12:56:36 +0000 Subject: [PATCH 0488/1003] refactor: rename mod and function To follow production code conventions. --- tests/common/{tracker.rs => app.rs} | 2 +- tests/common/mod.rs | 2 +- tests/servers/api/test_environment.rs | 4 ++-- tests/servers/http/test_environment.rs | 4 ++-- tests/servers/udp/test_environment.rs | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) rename tests/common/{tracker.rs => app.rs} (72%) diff --git a/tests/common/tracker.rs b/tests/common/app.rs similarity index 72% rename from tests/common/tracker.rs rename to tests/common/app.rs index d95573702..132faeb06 100644 --- a/tests/common/tracker.rs +++ b/tests/common/app.rs @@ -4,7 +4,7 @@ use torrust_tracker::bootstrap; use torrust_tracker::tracker::services::tracker_factory; use torrust_tracker::tracker::Tracker; -pub fn new_tracker(configuration: Arc) -> Arc { +pub fn setup_with_config(configuration: Arc) -> Arc { bootstrap::app::initialize_static(); bootstrap::logging::setup(&configuration); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 9452cc111..51a8a5b03 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,4 +1,4 @@ +pub mod app; pub mod fixtures; pub mod http; -pub mod tracker; pub mod udp; diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index c9b693320..be767f05e 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use super::connection_info::ConnectionInfo; -use crate::common::tracker::new_tracker; +use crate::common::app::setup_with_config; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -39,7 +39,7 @@ impl TestEnvironment { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = new_tracker(cfg.clone()); + let tracker = setup_with_config(cfg.clone()); let api_server = api_server(cfg.http_api.clone()); diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index e6013540d..a1b3444dd 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -5,7 +5,7 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::tracker::new_tracker; +use crate::common::app::setup_with_config; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment>; @@ -39,7 +39,7 @@ impl TestEnvironment> { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = new_tracker(cfg.clone()); + let tracker = setup_with_config(cfg.clone()); let http_server = http_server(cfg.http_trackers[0].clone()); diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index c9f67c987..fdbb9036d 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::tracker::new_tracker; +use crate::common::app::setup_with_config; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -41,7 +41,7 @@ impl TestEnvironment { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = new_tracker(cfg.clone()); + let tracker = setup_with_config(cfg.clone()); let udp_server = udp_server(cfg.udp_trackers[0].clone()); From de5775759e0c5abdd615e5b4faceb5abd9031c28 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 13:48:27 +0000 Subject: [PATCH 0489/1003] refactor: remove duplicate app initialization code There was a lot of duplicate code for app initialization between prod and testing code. --- src/bootstrap/app.rs | 54 ++++++++++--------- src/bootstrap/mod.rs | 1 - src/tracker/services/mod.rs | 11 ++-- .../{statistics.rs => statistics/mod.rs} | 2 + .../services/statistics/setup.rs} | 8 +-- tests/common/app.rs | 9 +--- tests/servers/api/test_environment.rs | 4 +- tests/servers/http/test_environment.rs | 4 +- tests/servers/udp/test_environment.rs | 4 +- 9 files changed, 48 insertions(+), 49 deletions(-) rename src/tracker/services/{statistics.rs => statistics/mod.rs} (99%) rename src/{bootstrap/stats.rs => tracker/services/statistics/setup.rs} (69%) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index b76641ecd..e845feac0 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -3,11 +3,26 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; -use crate::bootstrap::stats; +use crate::bootstrap; use crate::shared::clock::static_time; use crate::shared::crypto::ephemeral_instance_keys; +use crate::tracker::services::tracker_factory; use crate::tracker::Tracker; -use crate::{bootstrap, tracker}; + +#[must_use] +pub fn setup() -> (Arc, Arc) { + let configuration = Arc::new(initialize_configuration()); + let tracker = initialize_with_configuration(&configuration); + + (configuration, tracker) +} + +#[must_use] +pub fn initialize_with_configuration(configuration: &Arc) -> Arc { + initialize_static(); + initialize_logging(configuration); + Arc::new(initialize_tracker(configuration)) +} pub fn initialize_static() { // Set the time of Torrust app starting @@ -20,35 +35,26 @@ pub fn initialize_static() { /// # Panics /// /// Will panic if it can't load the configuration from either -/// `./config.toml` file or env var `TORRUST_TRACKER_CONFIG`. +/// `./config.toml` file or the env var `TORRUST_TRACKER_CONFIG`. #[must_use] -pub fn setup() -> (Arc, Arc) { +fn initialize_configuration() -> Configuration { const CONFIG_PATH: &str = "./config.toml"; const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; - initialize_static(); - - // Initialize Torrust config - let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + if env::var(CONFIG_ENV_VAR_NAME).is_ok() { println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); - Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) + Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap() } else { println!("Loading configuration from config file {CONFIG_PATH}"); - Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) - }; - - // Initialize statistics - let (stats_event_sender, stats_repository) = stats::setup(config.tracker_usage_statistics); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(config.clone(), stats_event_sender, stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; + Configuration::load_from_file(CONFIG_PATH).unwrap() + } +} - bootstrap::logging::setup(&config); +#[must_use] +pub fn initialize_tracker(config: &Arc) -> Tracker { + tracker_factory(config.clone()) +} - (config, tracker) +pub fn initialize_logging(config: &Arc) { + bootstrap::logging::setup(config); } diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs index b4ee0558e..e3b6467ee 100644 --- a/src/bootstrap/mod.rs +++ b/src/bootstrap/mod.rs @@ -1,4 +1,3 @@ pub mod app; pub mod jobs; pub mod logging; -pub mod stats; diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index 69b0320e8..8667f79a9 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -5,21 +5,18 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; -use crate::tracker::statistics::Keeper; use crate::tracker::Tracker; /// # Panics /// /// Will panic if tracker cannot be instantiated. #[must_use] -pub fn tracker_factory(configuration: Arc) -> Tracker { - // todo: the tracker initialization is duplicated in many places. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); +pub fn tracker_factory(config: Arc) -> Tracker { + // Initialize statistics + let (stats_event_sender, stats_repository) = statistics::setup::factory(config.tracker_usage_statistics); // Initialize Torrust tracker - match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + match Tracker::new(config, stats_event_sender, stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics/mod.rs similarity index 99% rename from src/tracker/services/statistics.rs rename to src/tracker/services/statistics/mod.rs index cf05fcf90..cae4d1d69 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics/mod.rs @@ -1,3 +1,5 @@ +pub mod setup; + use std::sync::Arc; use crate::tracker::statistics::Metrics; diff --git a/src/bootstrap/stats.rs b/src/tracker/services/statistics/setup.rs similarity index 69% rename from src/bootstrap/stats.rs rename to src/tracker/services/statistics/setup.rs index 3b109f297..b7cb831cb 100644 --- a/src/bootstrap/stats.rs +++ b/src/tracker/services/statistics/setup.rs @@ -1,7 +1,7 @@ use crate::tracker::statistics; #[must_use] -pub fn setup(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { +pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { let mut stats_event_sender = None; let mut stats_tracker = statistics::Keeper::new(); @@ -15,13 +15,13 @@ pub fn setup(tracker_usage_statistics: bool) -> (Option) -> Arc { - bootstrap::app::initialize_static(); - - bootstrap::logging::setup(&configuration); - - Arc::new(tracker_factory(configuration)) +pub fn setup_with_configuration(configuration: &Arc) -> Arc { + bootstrap::app::initialize_with_configuration(configuration) } diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index be767f05e..dbb23dcfa 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use super::connection_info::ConnectionInfo; -use crate::common::app::setup_with_config; +use crate::common::app::setup_with_configuration; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -39,7 +39,7 @@ impl TestEnvironment { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = setup_with_config(cfg.clone()); + let tracker = setup_with_configuration(&cfg); let api_server = api_server(cfg.http_api.clone()); diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index a1b3444dd..8d0aaba02 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -5,7 +5,7 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::app::setup_with_config; +use crate::common::app::setup_with_configuration; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment>; @@ -39,7 +39,7 @@ impl TestEnvironment> { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = setup_with_config(cfg.clone()); + let tracker = setup_with_configuration(&cfg); let http_server = http_server(cfg.http_trackers[0].clone()); diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index fdbb9036d..15266d881 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::app::setup_with_config; +use crate::common::app::setup_with_configuration; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -41,7 +41,7 @@ impl TestEnvironment { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = setup_with_config(cfg.clone()); + let tracker = setup_with_configuration(&cfg); let udp_server = udp_server(cfg.udp_trackers[0].clone()); From 721227ecf35f4b0ae68834b3a5749c8c13ed5195 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 17:52:09 +0000 Subject: [PATCH 0490/1003] feat: release 3.0.0-alpha.1 --- Cargo.lock | 10 +++++----- Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 519ea50f0..3bc78bd67 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2861,7 +2861,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -2908,7 +2908,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "config", "log", @@ -2923,7 +2923,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "log", "thiserror", @@ -2931,7 +2931,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "derive_more", "serde", @@ -2939,7 +2939,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 6f213995f..36006d7a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ version.workspace = true authors = ["Nautilus Cyberneering , Mick van Dijke "] edition = "2021" repository = "https://github.com/torrust/torrust-tracker" -version = "2.3.0" +version = "3.0.0-alpha.1" [dependencies] tokio = { version = "1.26", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } From 6e9ef79e801d9c9d6296ead2a2714de3a69fb3fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 19:17:25 +0000 Subject: [PATCH 0491/1003] feat: add description and licence to all workspace packages --- packages/configuration/Cargo.toml | 2 ++ packages/located-error/Cargo.toml | 2 ++ packages/primitives/Cargo.toml | 2 ++ packages/test-helpers/Cargo.toml | 2 ++ 4 files changed, 8 insertions(+) diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index a6f1740a0..dccec59ea 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "torrust-tracker-configuration" +description = "A library to provide configuration to the Torrust Tracker." +license = "AGPL-3.0" version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index c4b2ef726..f67ef340f 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "torrust-tracker-located-error" +description = "A library to provide error decorator with the location and the source of the original error." +license = "AGPL-3.0" version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 9aec28384..bba45cf5d 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "torrust-tracker-primitives" +description = "A library with the primitive types shared by the Torrust tracker packages." +license = "AGPL-3.0" version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 2f942bac7..a4c6528ab 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "torrust-tracker-test-helpers" +description = "A library providing helpers for testing the Torrust tracker." +license = "AGPL-3.0" version.workspace = true authors.workspace = true edition.workspace = true From 67e7d51e8d8125429f0ecf18b8d48f2cdb9c5220 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 19:17:52 +0000 Subject: [PATCH 0492/1003] ci: fix workflow to publish crates Now we have a workspace with multiple packages and we have to publish one at the time followint the inverse dependency order. --- .github/workflows/publish_crate.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index c120a0fc5..d92b4e557 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -46,6 +46,22 @@ jobs: with: toolchain: stable - - run: cargo publish + - run: cargo publish -p torrust-tracker-located-error + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - run: cargo publish -p torrust-tracker-primitives + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - run: cargo publish -p torrust-tracker-configuration + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - run: cargo publish -p torrust-tracker-test-helpers + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - run: cargo publish -p torrust-tracker env: CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} From 354edec3cd17ce721fd5aa07a91abab86fbaf2e2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 19 Mar 2023 11:37:01 +0000 Subject: [PATCH 0493/1003] fix: all dependencies must have a version specified when publishing --- Cargo.toml | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/located-error/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 36006d7a4..4b6bcb323 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,9 +42,9 @@ axum = "0.6.10" axum-server = { version = "0.4", features = ["tls-rustls"] } axum-client-ip = "0.4" bip_bencode = "0.4" -torrust-tracker-primitives = { path = "packages/primitives" } -torrust-tracker-configuration = { path = "packages/configuration" } -torrust-tracker-located-error = { path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "packages/configuration" } +torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "packages/located-error" } multimap = "0.8" hyper = "0.14" @@ -55,7 +55,7 @@ serde_urlencoded = "0.7" serde_repr = "0.1" serde_bytes = "0.11" local-ip-address = "0.5" -torrust-tracker-test-helpers = { path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.1", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index dccec59ea..aade6272d 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -13,6 +13,6 @@ config = "0.13" toml = "0.5" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" -torrust-tracker-primitives = { path = "../primitives" } -torrust-tracker-located-error = { path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "../located-error" } uuid = { version = "1", features = ["v4"] } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index f67ef340f..7d66bba65 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -2,7 +2,7 @@ name = "torrust-tracker-located-error" description = "A library to provide error decorator with the location and the source of the original error." license = "AGPL-3.0" -version.workspace = true +version = "3.0.0-alpha.1" authors.workspace = true edition.workspace = true diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index a4c6528ab..4483f8f4d 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -10,5 +10,5 @@ edition.workspace = true tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { path = "../configuration"} -torrust-tracker-primitives = { path = "../primitives"} +torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "../configuration"} +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives"} From fcfd9f470092b2e54c3beee0615475ab46e3f958 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 19 Mar 2023 11:46:13 +0000 Subject: [PATCH 0494/1003] refactor: single step to publish on crates.io --- .github/workflows/publish_crate.yml | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index d92b4e557..4d5d0772e 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -46,22 +46,12 @@ jobs: with: toolchain: stable - - run: cargo publish -p torrust-tracker-located-error - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} - - - run: cargo publish -p torrust-tracker-primitives - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} - - - run: cargo publish -p torrust-tracker-configuration - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} - - - run: cargo publish -p torrust-tracker-test-helpers - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} - - - run: cargo publish -p torrust-tracker + - name: Publish workspace packages + run: | + cargo publish -p torrust-tracker-located-error + cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-configuration + cargo publish -p torrust-tracker-test-helpers + cargo publish -p torrust-tracker env: CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} From 62cd78f12296ff227d7f42c18b099c6f0e8fb61e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 19 Mar 2023 11:54:48 +0000 Subject: [PATCH 0495/1003] fix: package version inherid from workspace --- packages/located-error/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 7d66bba65..f67ef340f 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -2,7 +2,7 @@ name = "torrust-tracker-located-error" description = "A library to provide error decorator with the location and the source of the original error." license = "AGPL-3.0" -version = "3.0.0-alpha.1" +version.workspace = true authors.workspace = true edition.workspace = true From a58554a13ddfd674eb3c73ce66e8a2ad489f28fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Mar 2023 18:00:42 +0000 Subject: [PATCH 0496/1003] docs: [#253] crate docs for lib.rs Entrypoint for the crate documentation, the `lib.rs` file. --- cSpell.json | 1 + docs/media/torrust-tracker-components.png | Bin 0 -> 84935 bytes src/lib.rs | 423 ++++++++++++++++++++++ 3 files changed, 424 insertions(+) create mode 100644 docs/media/torrust-tracker-components.png diff --git a/cSpell.json b/cSpell.json index 4a9b11ce9..d8dee5c6b 100644 --- a/cSpell.json +++ b/cSpell.json @@ -7,6 +7,7 @@ "Azureus", "bencode", "bencoded", + "beps", "binascii", "Bitflu", "bools", diff --git a/docs/media/torrust-tracker-components.png b/docs/media/torrust-tracker-components.png new file mode 100644 index 0000000000000000000000000000000000000000..19fe3c0b897a2413a4a6fbf807a0dbdac19b6268 GIT binary patch literal 84935 zcmdSBcR1E@-#@IaB8o_oPDx}{MluU!k5saiy~*Ahib6(YWMn5rRyGxp?3EqaN;cVk zue1BPf6w!Kp5s2A;~D?l=W$(EUBY>OKi|*i{eG?Y_ldHi6#2fR`$$Mg$YrD@RY*v7 zd?q2;`G9mgzOv-_`3U~lYa^|0M?!L-lK9_kVeAL!Nk~qR$Vgtk;T%2L=W?meWnFCM zZ(Gs_W|bN;20`k0?*mddBwsj;s3l}uicjT`&wdg79uhz~ViI$WFwx1Mn-MDe?wFs} zHMYKvx`nl&jh@E8^D$}71y4OhH=};U{Vf?2X)O_(?!bTlu2rOEW}e2UzJH*xzLNbv`9ei8`B7fq#xaVcysrz{x09Urefo58e#Ko>?97=r zg}=9x%p4X!()Zf9F}C@*$;#ZwLmi7swQ zhlv)$fsgk$H#e<+lmx$RZ%&j-F6sT^M=RFoGykNw(5||@y}h!svZ=|Y?e(?U+1U`6 z4DI6YImSw&annD4&aRk~>gIm@c&S?jU#?O#*0_7^+Chb^>}#7OiQa<=I%0D7wNV%xl$Kqq2oux*_~}zJ5(L```53z`FRQkL0;3Ar0#AFmWtYN z?rMKJs^iCh_=oKzSspK_*S^pF11oh}FTG%DKFn#Y90ej@^S!GsgQP%}lW@cu5&$)AVvBbHA zoJWrzpJ8V=HZZX0E6U5ru)^E$7}YJ0zO?RiiSj(NoNZuW5H0EvMX6_QUTo2oJw84@ zFfdS8SNHz?S=XBJ)y3aBIy$(7V+W)x1Q)YR18`?a*SO?G6)<>bs`7n#Vqx)ycTI*d2nXDhjx zuIf!`XlTfi?6J0Ff80(_PftgsCUms#B6z$52AfBbl_ zq55_pQ|sQSlTy#9pGXQhO-=R{=ik47Uzyb)T>aX$yAv&^#J|q`tVorQJnuYnn~{-G zT-wufQ`c+b3h71nJ5IEG z`TY5Fd;86|+Z$s=G_wt4;zT{{9USWG>rX30g+@el4hTIB2xxC^{yD$WnyN%gx^tm$ zKMlwG_wSq9+i}{pu)8l`-uApaYkse$ygZr5`#Tkn%RJUHH1cHS>+cf$Gk^~>z=;lsr1sGBa|+Qh=+ zyFx-j78Vv(=Eri}Dsjs=wF=I1a=yb4y?OHsC1vm4y?Pa<-jwmmvWtqI{^iS;n%A#CkBf_oh!F5xU#7oc)s!mix$a!)dl2WSwzigvbmy*J zyJ~7`>X@xR;Gdn{-1a{b-@o6l)a|crVQ+7*ma*~J^;E@?u`%{DXUbd`&v0-|7EhP4 zv$5R~yEf;}q-rego1LAVm359FsiRYddh8wa_AN)e^6R7|YD&tpW^J#(e*MaC*tM zU?W&qa1adoiX5=dnY|5+jrZ)?LnVIo*)f^Sdv0!S)z#H#1THN}GIj00ii(QxYaOO~ zyMFvA!>Q3bY_$6$FCX8RkqG^|)Smv+=S{m06zWc&9gQi-o`Y)mli&O@`>U$$?~@o{#KVO2N#$A^fX4C8!DCS z!Oa|RPV7ovSy^eWMm_lc{X5zdnnaj1bI9}O`dqcdUf0|l{Ql}Ax;Jm$;DCxaP97mt zR#*E(ZZR-0C@|K=iN!VWM2fgMqPt$Xa>dnk_1?A}5r<2pz4cenr$mFa3|aXvn`2Xc`}S@6@0pPL%BrfzI!gp^0YO1C6BBA0 znlks55K0wo?U>L|Nz#et#Ohz|>88hVqq5FlVQOBOXqA(b!wG+OT%&5EL>>R zDRt#j9i1O*ykONUO?vJ6^+f&7Hq_qnho~@5M3UdX584<@l6ijAKYs6T6@i%U%W6Ar z1&Hh#;L&$%2tI%DvYcFPPft&4>wKy16_ma9b{xAaQc_}`>jl}_Ax|VHem$V&)@fon znLKhxoZqT=kgq^;=KJbrFxms}CuV%M@^xFM- z+^O~Z_u~RKC+3ayd$c_S(DJOUt+7S5wfiWv3j51E+?L{u8=~Xcm$0i**jrE(3knLN zqmQxd5PEM#7Nd2Ul#Pu|9alN8`Cy_075B^N!w;4{s9WVK7y_e+N zg4y0XyDF)39wjWi4~%@)Tk7VNB@rJVk1mYbE+{BCU7B?NkU2|PS(&|`D;3rA&5gD6 z*RQ?3y+gypTN4#b3%!G|ty}h>_1HT)@}E6>;_Zv{5P|Lw#G35pY$-w?cXNz_e7uJ#JYNBjoffARw9Qw77&sWMX%Bw>@nkE=-yYJKU%yRl7S4sEo-;a-a)82mJ3Ds#9g(%4jDMv0?MXPP1Hkfy0;Dbzc=b3lDzhghuI$ZNy zK|w)GGsbl2FzHUTuDrZFtUfn8yZ$qLdy8oo9UUD#zXiI0w5+T~j*)_a0dq0sty{Nn zXmxdU#lK>SO0D_?|MiiF4|folxwxj#ioLg5h;_UsH z^Ny2hMdAZFcWrD09Dd!<(TPRF@~QY~Bf!GKGASn`wz1HPg0XY^Cm@l?{){@#$WEh% z=*XZTn~`r}O-)UgAMPf{x}k$jOiX0zmg^MS;vd)}qk%MoM7yHuf%)p|IeB>(M(baq z6s0^t8=E|JiiAY>l>gPO(>UK8x@9G3MK}*Jf(~< z3rP!cb#+C*XW+Mpz>Q8w(8FE^!?7vfyFsxmb*?<& z(zmQqzR<9+#l^+b@)2lq$B!KOY9$c&30q2AOADnD5C{atWqIb#(zLv|Z(g2*fk9GY zVj?OGDwg&E6LWKRb!wlAt}OjM41u}@Hg|x^Q785E^+iNPfQ(ElI+~mBIyfZLi`Cw! z0Zs$^yK?#RWeJIjFJDeEF+J$&+(|NE{zx;2$f3bZ6Pt;DlAM<~LQeei1dsQB^@X*q zwX4-tRV_mMNj`>uZXv$rV!Cj)QJuvkJV*vaI(vEo>3C8K&9{+E4|!Ra+it8njAk^D zG<{A^N#Q^BI0a<~txD9C_|42LEc{M~Nd`7H;);)CsOO{<{@O-zo=(IySAL~Bka6pY z*>;j?@;kgAY02#pw-G;^!Q-<>iGN-)BK>dk1x{A~hK7dG`4#;tf77X_?((?l?xI`R z&GniA0|U23GEAt)>`kV5y(UMiON^S5r^+?cE7mUv8d?(Es|@9%nWunrwM)JYdB?7~!fNi@#u zPdDGvb;W3b**>TK$cTs|baVh-fBMUHS*bmrU7(1&1-1~^h^-Vq9N(UPYkp-N=f-1G z!{du2(>1giPjK zi;4tUS*1BPz%jt`fg;cI@~%sAdyLl6-M=(2IEWS$grt)=k#%2aj1;V%{9U6&>V(`hf%`n0p&)o-MC@#_L8`?qobpx zC1-IR-pIZC`6m*$mCKwD> zKGB=le*Xj$llYY@hQnWjm`}(GP3lo!)4DQ^yV`vC47#1F0-EfkU zR9Hl0XmIexjT;=CoQJ8Y&!0d4JyVzQ;KA9o>2h;3v!`Kn$BvbtYCwTNxkiUT@4S_% z^JGLuQE_Qz=Jo5>XZiUZZET!zHINs;lKCw<&w8vbP^{oYmrux%@4^k!(@S{w?%i zKX%a;@K{z>);kDamXG2!MZHI5#QpFNdd46WE9O=9X*(LWfb-0NRbSEKWY_Z)Z!DjW z4?P_ngNSQr_0U0a+y~F~*=nMAf#ND}a)41zUzdz6U_;*p#1J3x*|TTpm7T;-2@-)$ z3u(fuXrKFpf#7ez&u8=(rn+*B?MCaMMqRsk)ADSOb@>JjEo}#y0rVcnKivn#&CJb# zNRMFAOqvtGbl&q!_m}tM_3`6CxUS~}zE?l<_s9rtK^{>3blKW779%4gS7&E5(53Na ziEq{xB=>fEq&*kp(k{B8rp6knqp8`W7QK*VU}(6p@wXq>4NRvXw$tC&*O!9hC$={h zr?sQQ$<{W34Lt7W&z}GhePtd(0r7hY=n2Ft_VDnybm^l|E!n<(Z!$Cg&duqiQi=ml zUez}dhm!XVSCRsPNKJi1TYKaF^!7^Lr7tFLv%Dl#xS06(Wy#6Z8;fv?+*cRUz+Q3S z_0Q!>GYi@bkc8K6EOzLck|~gyw!aB0FE1}G_0ZEBT>RY;S%*`#u`(97;_itZrhkr? zFbs5J-CxEa<|#}iK_%|GG+AifFRGEAQof6Xfo5=!+Li1D|#-u?80tze;`1@XsHmq3n%0wkcmGcHRUH-bNO9XKFIOMJ0KJ3CqPA_fNsAp^7;0W(9Qo2lh42c+POxOnbdb6;N!{}ygW zAcGL@%)`27;0B!e;-aD!vfOtuB5ta#yN=+}P@YkP^)q!!p)qlCa2Pela=JV{=}o%x zL4^+me)>CfyI8>&T-vL{HB?y^04xw$mN(a?*OvaIV^7|>bIx^P9QAT+bo7vTQk=sn zr6dr0=%eE;$@|w%rzb%rOD!k>P*VfaoeWA%Jxv%!-I4$Hv%)7mJ^faOR?W^{@*;=d zGz0_iPQ3{*^$)qZsDO_ba(0s3OQN}uCV`Um<;w`zJjxuMsQWTRqU$Ou5ENQlTb=%N zUm-pCM3RPv#>~ttI5=3l*wK)M!*TkD2n2)Gg$Wd3uzpAk;AY-IfMM@z#q7tMQ0_T7 zIs1Bh%T`8Tg2SV*q8dK-_9mk{?_EQ4_%I3vU>KkE585r{XuGSc zFeRm`t<8RB;M1`Kry+YlP6Eaz>SpIg+}ykUK@;%V_V3&-_$R)kWF2%J^-aKOsuxNb zJ}Z3`Cliw%usZG^gi@ex?lWgxX9g}yOZ!5F1k)WE9yUpO2`)M?(1ZiF%7Hf#m$tP% zPktArdSr64S1qQ$#Kmr~axYH)63HD^RT@H))ASF$$cx9GJ$iISOH0ehXcV0BD7zhg zAHIplxWT|kxOMv%J(D*mG1%zkcyDNnN7*Sm0j`fRFras>F8wjTb7vMp3)(DR*q%x+ z`2#3hI%yLfet!?(b4-~51#5`D$iv6yfMbfa;pXCc=I^gn>}ZKjrlxjeo+51SPm$eN z1EhXS3kzm$CceA>Hb5xbb;$EVj*}+F#-|TGvFa}?#YdQ#nITH|6BAiHnOcQ&AWhzs zs}*?=tNHla>+1Y#DF3~h!2iL+4=@Nq$gS6Bb0errzhC_TNVrlt_o#|cKZ028#nQxwgOPpc~1 zpkIQG6N&Xiod3dk;_g{nvUKDoA(^qlp(CCA{4(L=fz?AWhsGg9%ey){ zeu$4J1XkD9)fpTb8X9sD{sFGdT#JJu5LoRtGjLyR6`$K=1vOqT^)xf{%*y(4bm$i^ z&Obp>7@Kv5zDC2&EGQUNJ;bT@{?eQSz3}kVRKw6Lmu^{IlJ}!Wv(6sC;y=TE(Sm=<_WCsDRN*J3f?Tw&?;n#^lE=|Q~3Ba(H}UhrlKM) zwc4e7PsD9$#<>JLQEs%{=I8dn>b2Qw#^f7wJV*G<+TLmAM}^F0CZ>Xu3{Q_#m*XJH6>OK!rp!=!=LIjD0U zYy8jE(5sb|Kdwi{#cg%ydhOl0on<6YY;$1N86wMMkxZ=Hh1 zUFAB!_J<}TXb)AKTBaA1HalEV=r8c#B0!KhK5nYmgTph5@Q|>AJ!106xE|s@k%b-#h5RgYDwJX=!Xi^x?Ijo~GZ7BtuTO zto@y)5(o3Idd9E@PcTcrx~QOlm5uF@w>QzDK{w=miC&B)LmdZ}`SNWYn6;){A(l;9 zTiXLZ7}f%uq6fORrKKf4DJlX4r7Gbp^cQc+nt=V&eZ^8^jy#6jNqDdM?N4lPYhz9& z9HpZ>C@%W#{7Dd_njEpxuaNL>vRr{t$na7HP@HJWJ)}C7d`z#lrUnjea(w(1b5o1| z%mNUKsUUg1U07U<6Lt}I+a)for^iGv_!i2h;BGD+|GuQ8BsVv-iansd*1-H*s-TU9BrNy&Hj(f{F3KAqQUyje(o>lpNV)gHIIUg>(S4qYeflVRcKf3fK@{iBp9n5%!CcE5fD zO&XoS)Rb+5&>d=Y>f+ygoFTwwc8IwBX2k2%^YGa4J$RD2pHJjx<(DtbVsKG`ap2ga zEi&`>m9Q}@B`y3N1HFSBhVKXR$Vc1u_TGv9S5hJj02ig8qw}MuN0a#wXg0)qS{fSI z@PO0>g@w&7opt_v8n?ByqG1JMdjo=%8r4UEsO_iaYT~QL5#l8}1Q6iyrT~?$uycS+ zfuV`phEXgwR_VvZ{Rc=c$>V-I*xNH#XrTp*e|3wlJOz*V^7$+jAnzckLULYHM3Lbo zE87TV4|>C10_u|G%xmmZg7?I-=_u-q4BPv%vQ4=4=<=v#pm6veK%wN5d5a1KTXNS$ z|Da>z3<9XjS+#v@i`=_`9cW}>ag2t>$kenDCk#%f)A#rZNR;TH0>>I~`&k`*QEI<` zmlgG6IbX4&2TA3cnwkKVStFqXPd!O-BLWUf^IW>lD-b0o_A$_wo1QH|!}e2j1l4K9 zpvPQHkaPiQ9)@03pwCqEGg4BvY671@;h7#E?@X4X1zCYwixVMuxPUDn%H!@6!uQvv z3NVwI?HrNiKrJTvcJrg;1RQ-sEHW%5SPf`RsAu|IBBhM3^PyM#XtGkzLwAIhgD!@) z4+X!q66fC8)fF!0Q?77cUfyG911~RFkMAl)4b0we0h$i}<@O{#Eo}m-z^~uGD=I2X zpGHM#K)`wStO4D?hl~y#gcfcNv{?@IZ11V*PoaBBnj|@`p4`#@*-+gwpv_~#cyeS^ ztl{T?1DQZGyMIW9v;XR)AM@&zJ0Ko5s$Yq`zp!Oqz{erj2niK~y0SAf4+HYz zZ@bDBtBdA1f$;U< zeiX?Z;IZ?k5wID!RKu_MP=AT*6dDltkqqp2^xpLJRw%mm_H9i~0(PTh#quQgSj_I; zSxEgpFnQtV5t->9=XrSSY;BKfHE@pv9VzlJ&B};L;fdMSw<#`r%m(R?@o^J<{UP`s zetnHfu6zber41ojWe*;F2I+)$92FVqg7}25FDaY}Bc;cGRZ;ye*uEfh*-Nm$^>%l4 zjY&eM)!sIc6Icmo0F6IR-%rqwgnu3GPNQJ){*nm|%#V;EUj@@PM#Ewg_*ST=yXk%3 zaz;^R!?b`)jts?3i}N+rr7M1K10ZmDz#u|(z~=5PcCv=C3RZ(&WKiL9nw&fUdJUhm zX|ShqT^*GyG(Pa!7;&dY1FPqfWBRtL2!sk(lYI;py;>&Cpsu6+` zKo$9Ywt+?)x&Rau(9N32?~M7^9={w(q`_61S@r{sD}o+U?f!w+(;LfmE2t38ai$Twjjzwpt9Gm^8jI`a767S zxxJ=RVIo-9G{EP?=0i$A-J$K#k1s2Wt@?xnkHH~iGjw#0&l>}->DR6#9}AJZB`V^& z;9EMu78VjRw>&$9gUj=r13*W9GoYEC`Op(mLP+XnpeLi0>#imIyWRwhC+Gx0&!0a( zZ(IQcqNFqpBvp5$F!UV{dK$eZhtl6HnjXVPcR-PnlI(we-j%iO15~q|SFc{79fa(^ z3eO4Kg;*X26tmDI&~shphM%Z;>E5^z8Whxo?z-B4Kk#)H_u~Z`1vT^KBVOyXQkO5^ z2kcwh6Wf{3e)8nW)2CBE1qehHwB1i2k88-Hk)Q%ZACChh>41nc7U#w6HGM7j3XqXs z=@fXI*p$aH9|}gU)2E&Ak!sj)={Z<6{q6TsBE8eU^G()PaOacI^|ia`R=xp@Vi19a zgfvmSR_mUA{P+=$E^UrRZaQx-N7r z`_ss}$^ky#AGZzB1tue(p)f^>d5IDXpy$d{{2Cl=iI?z6N=nl8TzS+t0-oyY=xP5> zMN)ESj=8roYcI@EsV=%8OVDsM_yeS*xBFdCaV@5L3&DA*SmbZ3@&RqjPb%Jq1g;b- z-2DB!5tn7_1SR}dy^V4FP+fif+QLM|RBRfyDg4r3Fkg?HOZapY*8pJ^q{b1}R~=t3 z6vL)RTM%%A-S-95BF=Jjf7m$rNg8gQI*ZRYuU$KbP6k=Vv^C|9)q+$08YE5&wzq<`H-&jn(Wp`vccPS~~ z2UyM!Stwxi%eXi(tj2Cyt?eYzf0G*m@PUE+3}8V+XMu^kbNhXG!+)0GqM#|!96J^r z7M7ftcwJf9X87yD(I>4b3Vu(Y{(|$2!g2G)4YUJBG0(TqnBgw!#EYl)pPMxy9%)9i zCam8J^lm^2D4Os)ZfR<+0VseMZohvU8O2GSvNvzcY;4}ArxTmnghnZjAXFbQw9`QKDv2a=FqP>-s z%pxM%Cn%wHHhUc_-p)I`xUgXH*6)-zhjtN=C7+<+DuOcreD-dzrq5b-^VTfN!#_q@ zX~&9Qu+q}s)!owr8op=Ou0;?<@D0S;@W#o>$xsWl3T>1983`qt|E!7_%7pm%JJ!}> zhTlRVnjATD1Ue0o%0RpUg`8y2t}N4rV&8o2R|<#2s{*YlfQ}S}mw;0rutcpZSMa$# zX9stIzwh09XSxFFunRy9x~!Yq8on3c6CnCWxmSNf0~a&1cg0T*wf6`&SmSs?#U7ep zA*zQErXX$o`SYji0@;=gz#f(gTNMyLFFV@|{tGZLq8}Q0W`}XJH>jb( z3fp@!I`L}2ua;y#_|76S0Q+=DkK*_!PY#9@7Op@>145Y?9MrS2`T-OHXb#wfXdIY_ zpNRD)TtTD(QhK(CJ_>>i6yden1vWn+8Hf!U?D3_S;*m6+rldEBiJt3_%=2+Dk>@fFzKrA3ER;UvBrBfTajUM$0QJ>S}7o03%WSw%@-bC1s5B3=sU;$|+79d%DG0|FkzGjP=#unOGaB<1p8l zg1>flI>XS!g5LJx`BJ8R_wHTP5lfqs&r-M@Zjb-^rBh%-K+*zyN=8OTJYM=Z(@23OKDTs3y1A3SQzB1x(rK3 zKj_skFCe2L`yBh?1>D7ycke_DzfmK*wWKHjzX#b?WM%;Ak)Oh?8t*O4Nli`7&%fXn z_o=F(Xp)LeLR(uzP|y`M4}uIfWAO9mJTOS_-i_q9jOt8bM`^eJ`4smL3K1+N@4Zw< zdn3NUMn@4tAOhO@*KgmhV~^!wAtApd*~=wF0~2B%(wi2IB~!BFU5_Whb@@%Y3*7x82L!eUy)cgx7CN>p#*M zGe7^!34+P}KV}qF(IfGy*WOREY?9QghS`UK>lG&dU#1WKUkoh#|Mw&QwVOlWN2VFB z#GD6-`{m6G{+`P-@x{e%JgU1$M1!Ggg0Ap6agw~+24rkGb%x}l+aRLU7n~SK2B^+( zaarL@_jpkw%!&WDcIiK3Ega`|5`Se7-DeZxpCpk4|J}qt|C@c`Ugq^1H>l$p$L3eC z?d!E^N$zD*el~H=M`mhvWgY*^=X8ZcRQmSq6XfhjcvdO?ot=%Jz+vG1^6lHxEpEtd zpzt_)Rh5-kx16|w|8-`=7HD#}ZXe?O{k|w%o(cKuF0cjf-9x+IO|&@G)Zm|k0-cw_ z(JR)kK7eWk-QwiQM2G}&LQWZU=KE*v#`~>v5Ai$g(#U|x3g44ToY)+RQI`!g0oY2+ z-pF??LOjFg!me?*K^ehnf3KAX*$5C3d!ELCWCz+T?3GX!WkhgIDS#M9S^|D4~oO+dY7EM=<<8>@MjZyKj=d9P#X{#f=mlW zid@y09h^oaMi5Gb+YCVgK&G&`I8MwfmQqDmx7?s6gzb8Yq?A;-^K8P04@&@gsQQ3p zY|u`oo>tuQYaEk8!pFx4I>(pFN^i;#gyp?9-D{%JIpO8bEH2(uRz5+zQ+aRR$Z{wt zD>tD#qtk6xP>3~!0?xLCV%gmX^XK*&8W=Pk>Hhv55zZF0x_>Ek8k(!5U=gD>0`LkU z#}Nnw^gRsaFnc43%w4Z(d5;_Uj+o z?9LwDh{p;Eq4OCz6qdp@9J+*I(_dtxd=70U5yPO)^_uV?B2>ki3?< zdd-OK1kc(4tFW@2)Z(>c248&Y`2i`#t9P1;_Nyy3lhW`Yc#1-@fyTrIIqPf?> zClhp*I3PkwK(c|ja_FsV_JHR%rZ7_w0k(|tT`{O-G25?wPii%cY4iF6>;apbn zuf(MF4(w={z%A7{@`yh1U$`LVG^KU>Hr{a$koQ!7Irl&2`iN-j4G)*NF3KV#{vXNC z$$i4W?k7&1U}H-I$wm{#e-`KFvh(x9-K7v{ME?+vZ~F9!01#R`BI>qe0W}d9f)EdQ zla0N-R<^;DloXbFF_WfO(5}XCTi`^&a|5=*N)kPc;yMJ&R&Q54Ca30;7Wm8=fn(3Y!rDQ} zabQ@KVmTrM8prnR-mQA$2KF(dkmFmpIP;_RNZ!Pif7TW8Ze}FLa&bwgyoKGDgNP$Y z!4pSoidPl+LC~YRdV8anf^k6gTm<3$$4l&4Ex?KWH*e%<8sOlDqbiD=G$;&|I^1f& zJm40jGe2^z16N`$p%BKWH1n^{tf3%+gz8&0nE4#4KA=S~Vd19d;Md{K!66}dA48@c z@9}rKf6PW8B_#zQso!&rv%X)={RB66W9a{B#z>qL2yAw75wU?Spz+-l#}Q>>0+GFz zc(vIWADSGA1j@Pb=96q&4EF-B$jF$2tK*k?6NPo(N6ln{ZL-8f>-6jy1h+jsJpp8y zydj1+TN5b;h_A_G6f4a3K|eU8jR@~S=lkh#!G#7TMXVPf$zqk)kT2yR5hPk%a|$2CfBdg8vQsB0dQB-wgp z&+Dv>K0vj?_>SO@#MiG)3=9^Ib6*2e#+F0XB91?>pY^5$<@NU`gE$2_L%Yb{6mVEP z{wxQF|Kny%XYxK?nwvW+;%bi-hNXyO*7XI0NZ?-=F62i=9mU2#%*`D!1n(g579>?* zMxl6P4rC0Ab3H}AroJAbDX}#{HD_l5p9<7RBq(!pbE(tqFdIhjE|tB1zz7xwl5~FW zbg*RQcZYTp4A4+*Z6j`y)+g?A48{Q^es$v$2VK;4G!&#fy(zI!cp=;sERBAA#aM;! z@m1<+2E}{!q>25=5BV$3t&u?qKsmTz(RpcW5>qT|VJEx(U>iJD2lKkeMdtDUJN}2lyum?#AT3oj{v`CiiBa($7t?KzikiLw<_>MlqPa$|w zh(Wzr;Zz_+?Te2&N2qLWz6MBuK>?VT|AzFEl9^F^QbwaS=lO>Hf)EMUsy z;L8)67;T}Tpa_;uN=qx$`?4=ml;&RGYpgoT5Q{hFB#DxYh3Ju!f`UaE8P6!sBVqzM z9Q~lb$iYuN=tLSmCQtw>%xOl-f6K2)ndMBh%^A=CzV)`p9O;<2I7W}R$Y@zwdZ1k3 z7(J0>!mTrM90DkYO|jSfMg(;4A~@jaUP+jo^8vNb(MSBcwE6=OG-O+?J=$o?-azE=E{$hGLajnY~M#XCLTPO|++nhn@!o`>$txyzbl~tTjST;ir*! z?p0l6a!o#_%jY%98PafNo*NoBZ!Y49bn5Sky0w3j33M1;nmM__{;Cg@V9RMBv{LYE z+M>edGM6LVgD*hFWPjvC9-T$17S}MJe7(BW3 zf1QeoJ&DNdM4XD?5L%ygZ$U6+0&-nC`jvGn$1x7jdA(9lKWq73fQ+%=&~B88uh@}@ zjF1u_uzqyeW+1+H_VYepHlt_C`u$2z8?K)4wx9l?J*kZ7A36qLBiyzLv0Mq4^M z!W%+(&z+ke)>=pq~&dgciL zSDD?|f#R&zY8(X2m8A@ijTILZ2)oQBK?p&!`ttDzZ{~}xD@BEcGHG_6o}zs-KQJDv z^H7h=<%*=F^aMG2C(+HaFyDAWoFRi#zqEZ<&F?}+KWO{;`N&3PXJkz6;VL`$dkKOS z(nk`c$VWz>HSFi3SRDS*Ht6K2t z)omyy$btdvBO4`t^L$RuWze1Ej#oe=ERH+aYR*PYwKc=C5RGfB3-VUl zcE^5yLRwl^=iC;BBsPci|6z>FG_C_b;PjlT%QA?1#h zo*WoJV3MAe)(K%xVCgfgtSAM8fY_L7%Un=rvVh#!jkF%zGN>uQ_x6~Qcg;C0crN$- zdmG|<9~XwP1P2mHLSlMmoC*kcyg=a6yxEF2^NQ0|sfWi!-6O)nzWg}k)0MVEl#e5H zQ|WewA<`MXk_nZSlCCno@SUU>A(eqLrKG0D3Ece*;XVdJvoSgo(bKSF59idpnn}%b z(vDsAZ2_>~r4y_i93{vEK*)ZnBZ1xQ`sp{sM^GeaWx&cOy|FAUa@Rm^?01DlMydnv zqi)cn^{+`pWjr@(%&l7%efG`+XztKe zP028Ts^;3aM4X>G;SI*o{K^h{K`*fogHu!t3w@<-`4F_AOrZ_vs;jf9w?Xoqc*NIT zNlHMp<~JZHRuLm6$PSQ^kzqbI7jcZ;9gF-`$T#Wu6DbgG^Tc)vR82uzE2J^(A3c|5 z?#A7+c!5m*-*sgY(Kj#puZViAh92Mq3?1H8i``+m+f`R$c^zcz-^5u(J(_Y$Z2|0R zn6UD@F`fr)8)y?(HF}_weCkx{n>R-(DKWaCqN+Oj=MMw0gZ1t#oKpy+ON|Re?gWv0 zB8=GZL5QH$<}@HQX3=&2@Zm6QBmlYQP~dIHEWpgpw>zS&Fuo0ku@g-SYfDw*a2H}f?^y3hJ2i8>8F;r@7@{L zM-jzLWK|GTg8aaVnKzUlAXg}{2&4UPS!DW>ybx;iLHtu6qPKvK7@a@{2#?6vQ?w?+ zlTS8K;V}JUBfsDCd907R0&5>c^>J_0$bzDx^CIVlF!R1PRXokilb)6)Q?|3u@gdk7 zgn2LHNZcZP>Wu$`JH%aN$xvHHK2F@+pR-19o~DM;?oNUw@8 zG<$E;2*m=OE%v{@CFYOeRPqA{te&0O_R&oP<_=z=WPoJ}PFdUT(U_oZwN#kID+|DU zYgA<<2^vJruTxN?+Y&n3m!d%L+R8>^vI6V2Ez|;F{S&vrJ({o8)v0V1dd)sWs1EX?ldF;=tn ziiY~C*JaSW#PoD?ef{g;J_`~j*clla%We?8=CRB-Z&pXbbsv+_>D|8F1pI|L5%@gs zd4N+SUGKY#ZUL_9wZG2D(0PA*8z$7Cv!C{Aoaus6g(NeyC)wi~nA(nO#B68VSZ4bz zdFDevFNir|(kv5<7vugI;0&*`Lv25&UljocOHWTUa9LSjCnP+G8B2^~9H@QpY>C*ui!y2|QHz$rkSXjWHMKbya za2tgO598kaLiU+W8ZIq0ZEXf>YUUSkO6=I#)A3Z;!v`U=tp3h%>(h6h(ZBH?_fb>(V{K_W7!v>XXoVQy?gz-z;naZ)^-kF{%B*I z(md;#GyJM22*@K8fB5i1n}sk8eGZBaK9c1xw}*dQI&URp8EqbfgA_spSCU!i>68Z# z;!V!m4u4hp`%70)y)hp&4GJbk4M04z5w*(BMx*~$TdO~IkaQ=09U2|vAQV-|ct9Rl zUT8bZsCi&PrXK=5J@5U!_iVYgz4BSaY{QSUcnS_&aS%O>%mV6nTY)(9q>F=sVkBLFdn;k?0D_}NepI^Jp0GB6-F3Jx*CqPp3- zf)q=zG@gh;9EEjZ6&2mQUcL&!X2p~G=1@yl3yga^*zbnI*vER1)ue;0VMcuw>*6VXNu+K*xd|&!QOJd8na{^M(9a4|Eu) zwCEKUs5!_NU~S2bpsw@-Izi~5J#?t$--8`s*P{PJ>L#KeSo=|but6`Wj{BBtDNOsO zr7S##W&IA*Q|QweelOJGA|xT{2o(_054IQsuSpPeg6AuMN3pS6fu%weui3VS#5PTX zIl}mQlT-+9+0@PQFS4E+l0D%#SEc52$N$vhb9F&v`i(R-Wsk&{EJHkMjvH}=J++u{ zoSqJxU;EL&(f6Y8bn=Y*`p}O?^{L|Dv35Ft?Zn28mBX`j|J&yj6`z&orr(w^*Nk-T|annou_=?&d2h11Z8A$~FHGsJ66{F|u2=LoN8bI<+ zHtfuV*xsW6RBnxY!g%}+fGJw6uE%0nTS!pQdw`Zhr(}2T+$mixP4IsA?himuyIG#- zUS~ilvph_~e2RL3Nqfxd;I0F1y~9sTtMgt%Quh@162({QJ%fSaZ+B$H zN>zZeAWEDKdad%9MjZKoh2I^G7)n2NY6Jv5ib#os?8j%|-XkJJ%>2QBkIZQ5?RAF- z2x!@4U_y|5wC}Lz$_Oo?5}Ryci)v2szF+g>$B!Y6Lg2$(eqm{8DdwxOvOuLTVq>}L zXK-z}y>em_lf8x4dC07Wssa!ivTAyDKll0K%uG->UmsbE+9og-!W6^@72tV3&poM* zoj1VaeEb#ZAVNFY*hGkVc~#H{aYyQ%Qd6<5P`TN^ZF%O1s^!wlr*hCwqH5C&}!}kpBHK;RP;okE21y$A6#HTEvwtx8W0WN{y4wal$ zX{~Zkk@R{kjrWLE{P^)hHktVJ0kG0{xctP#p-<_G2r|L z7X*Ucz>HZILM4*U*cTw!9+20H1H!^j!N#i#H z((!ySoa!N{Gq}eos_8j&<5~%ldp%Sm0{8X4I)DiuG*RfW&}mjtLOoX(OmTDZ7$|gY zbVpWi<+K|RFF>|Y$En_nNF9B4e$fx(o`v;sq81-X4F55!=R zdiy#a{(=pMg)w!a!ecS6kY3Bw5s8T;CnLjMhi3x#+lJW3+wX?c56 zM@wQA`7UGBbREhD9)G8D{W{)sU~B(?>po~Q;^mzt_+QP)dgk2H8QHIe35Lx8pA^7sa zxRMTLg2t)?Y7QH$}*NX2VUET6fzW9&r`_Jx)s_E@!i#Tgotki~E2(DbilW!g+AgTlLDryN*nV^6G zEA>-czUfgg{7hw5qRis|+Y~OWczl7w?-z<|*#60j+*$?7#T!TtAwQF6*6unx_$Y-q zMYHdb-rR7FAUi~#>#WqkI(5rMk&3_Y^gZ!-0oR4sDW?Pk)Zii`V;os?RgaSE;m z9A%?^iWX&l6rR@5=xB;ED~6TkM(gR*t|hBXtefK8cwr=cbOI-~7!QL`W<@DYpU>Y~ z>amWZQJ-D{d4)`foGpBk*QU7oxuqplR%&ARPr0_rz{$dLwfHmq8B8RY#KHi%8Ly0u zdUcH4AJ3L^9YD71RYNzvLcQ+`9y*uN$GYJKosVN^9q3Gjl^PA?n;G$$Y2kuLa>Yml z@i?qE%B*lgu$>8ErY<;~l16*o$6dM`#)3tbIi{65BL#xEjYG~+yGOFO0TVwyL zO{O)WOB{O>^T)|mI9}+D3h~OI9A;Yq&<1X@Qo~R~sqNrJ-Ur>qJQfcciV?Ugo_51b zgSarttiQg$OV`${Dt+39HKgHCd!=22y?l7z6~rm>Xc-0VUyH{dIeYuw(jHM*WvG&$ zK3&eybwvs$*Q+TOvC=<)tY^YxTSMA6`G>d`QsX?aJTNMs{6`2B%3E^lJj&kdjW|3j z!r_s4GxoZ}ZtSiWFV`xWqlIN&UxkQr!wXK$?6@)FgLTRdyZG#w^%175?N<7wYZrUm zcjP!ZK944z^9M->gPHQi(sFXxO)cSGBAhxU*gnz^vqhQlz=XE88!=VeFT9o@oyAic zx;i^eVqu)1icE?7`rl%z-3I;xmT!{Ji|Pz%2Joi`B4)~6G*zrroS?&-hQThf@HS)S zWZC4X3)ZoeL|L$HDvj8lctx!-YWr(T`;{?J5RL;R_bd!j7zxK=kiX$cqe2ufia?t% z@c|+W8+>bWbtdEURyZ0n=ofg7=+eSM8}AF&YlE5Pn`f>iAiL$epdUmz2#+3q{#(4o zk{q#OqFipN%b-VNDwvvq0b4nyT^QE^3~&=^LI=Sy2peqPU+r^~+xE|a1)#)Y3nAsW z^sAjc{ZrJ}b38mbh{*t;nQI`Vtd1jzh0`iru0cQI>#(F0r^XCDR2C}nw_1fNtrfxT zW3p*Ckn<2})nQKyUlprXmDPNWm|NTINj}UB2hv~204YZL8m%tq=*=@>3l$3wcI_wb zAaw~atlChvL>CP_8WXeu8Cz2gJY(d~dfH>5! zdCa{A(2Io70#V%M#1{7vz*-(r0s(dB&_U)ZES#`gdRsSinL-BPaTXkgOU6L7CY%^RtZ405HdK9;j3q+9N>uJ0~vmrkxI> zuc10(py&%Ivtbj?fyV$!weaW}q;N&cn>|Q&ab49^Q!_Z`4|j>x*7TS^#DgmRdkeK+ zK-2g*(K@?ddZShRgvJ?pAiT~f_8rz>9P(J3$qArVa;@`3@By*H z?2E=FlK1fNCLnmAVsv`mfJn~_w5UT=R6Tj-;det~_1TjHOG7#Br8|5{DJa5Ye?Xwa z(G0sPzd6Qu$^`=^va;Xsc!H#paia7$Z~`1OWYHG04Qhz-+LRPoP5?`@JmQEk-Ikf@ zEvkbD2_i(C8X4IIngDhZ^Dyo%&;MZVO~A2U+qQ4jTBTXDXwoQ+GDT^)Dn$c{L`7*b zG_0bOOluWQBt=#<5Q>BnS(?;JB{GI$70sqZq0r#_otL%l=YF5}`L_4{zVCgv@3r0A zy~5?X{{QoTp2xW#`>`MU`9Ts9sl)Q4IjowAfXp-nQ82tBNMH~SA*=uV$yO5u7wa_A zLIZo1NRVCs`E(kD9j(~OJRZpKS9cexHJEM*Q55XYEaP-o{| zVIFE6^$oTV#G<+S&%F!wxw-D-p_*MV8U4D@6u&CJ`LCB`1dIv1e3>0vvY#-qF6F(5 zL<-|iWDeiN9PKMHiLual9K~#u$Y#E@3NlwPIMXN5;->wR@w6f$ehjj<>|h0}cS zT;c2Zy5<{yQI@3rJi;I#@5ATEQRY_~U~kUzLU0-(nJw=6FE9UeFASL;VGJ}frkG9+ zQL*5tPRB{Q4KwZX>xY)hcI~=w-aOXK9!Y50J!7SZ30h{&9&cAoU7=UBF{{7f?~i*Z zD5y-T5{{~m9X77h48D?3RJk#uxShXR14$ntB?&JY?hA!D`WTwBj(WANx}t6WWuRU8 z#w_Cz{4itk2HeaH{FOff<$w?!m1uUjic||rVcgU16clWSB?uaPYMb7*UFp25K*+Z0 znwqr2Z6g80Yj=o`Q*R04(fsV}8kR*~#ZhccP!+j>IP(<)D@M;9x}|mP4Q`xq^77&h z(?>_Yo2n~URU^oY4&BBYhtES(8~g2Pv~d(w9G=@{q!L}^C%*V@_WPJl&qjOgx;f?X zw0#R^HQ%UwHgrtbmlT7D`kv+u=jK%h#rfuS_v@b$ZWmclNlgHO9rC`fjg36C0{SY` z(#pB9Jj1_Q^v^g6PlTfUu>t3sPEZ%p!M}nB0m9*W`HEc}cAvJP#JPLkJY@zj5uZ^i ztF|wl7jAWE()V{L1v~rQt4T%)!xHE#ae*_bu^Kc<ST= zwp^akiOm^BXKaxuynX$;4pJS^{28Uc#IsZ0^)Ih|r?Vuqz8VZ?X(C!oacMHHZ2P6Q zb~3yeSpho1@Sm?T;dRknmQ9TL6%p-NL!(z_-vNR{c0YOYg)ju* z(x3rv>sH(RNI%CrV^5vApdped?J<{@$2P9$o*HkL^%%-vJXSR{PQWx^2{QU^%BTPX zoCqR$ZdW%yXTK<_N%`}c@e6B5%M8aAr|Rw3eD078288Wg>ul_ z{mU!t5-qMRUV6^Y?3(7%b2rx+hh=H>8VXaPhugln_oEH)X1S9I6QdwXP(aW6;SUzdJWdTmS94;wh$x zIUs0gX@wikKB--&n}OLAAbTkxU+re;?~aBqxYvq==z>b}7e#sbwa)oMt@YE&+`(78 zgRhW!kQn5wamY$|q7rOb=^bBmGD0tN$Hy~&ZDW!W^;zG?w}x;3usN$J{~Kuxpy23; z`Rk{4f0&jQ2}Qvk`vY$ZaQCpTu}9`#ixnm5`}zZqIVWA~$Sueb)CQR_%UZ0qcc6Qa%uGfimYp!4uf=BM({WQKc(*PZ!Lax?g2-g(4G zq=#3oUM=k6*8J+wgw&QDT~8QCnE>vd+a|kxRjzP)%IK9R(9g%he~^1VsE+^m8iQ5f__w44q8SJ24-#(t#{Err(Ygbgi$1X=NIR9<| ziQjzLP}e61F8=U!+!6Z9eG9ftM;7gKgyn}@LFN>%5Tj&H8DdX|?x+cmRP34qmiCx7 zS?*Bx?L&^6I_M>DAM(Qxbr+#0kZL26=Sh}Px*gzcfPEAX$78_FZ2Rqa=)DJuVG$Gd zGrIj#YW0uBUu$^6JkawU6jOBKLO~_jApJ6-@GQknm!R9N0HzvG6a&5Hua^dcNh4Mc zb7ioKUk?P7OtGf&n~H-;NRa|d*%DF}{#qO7$bhhV5W#}6I)A{fHGhWcz~&BDBz^DL(V?I6KN zP`o9&KbbafJ}qs$Y9*T!)v~n~p_IU3`w?%<^-+juvYo7{xm4%7JNs(?etw6qGkArg zCMp@OYw$~3uNF9V`PBtq+h1XhDI$KA`!hS+(BXNK_nGL8&M~}Qc=l5ReM+--Phwrs zsX@=$W&K1;@EIiJFg-Y&(p(s>2fZ`;r7MDi;y^i2Q%ruwI_5H9m!Y$-`GadE)+%QX zJ~{^XJ$5Bd5)kDJ`%x9_hr&K1D5#sF<^sDWnm)f&|8?HMNwvayZSmZyi=UHpt{W-M zCK?EvnqIxsFrT;Ge^m~c-w!GEVwC`DOiym*ow+Imi}cPD$tjdjXXw8n?a=pj^>=<< z=5o8;nGsT{Ksj zGGoVHqzsO2q(MYH~cEY|Y?gxwDx5Q|clr`NvkbfRwOp zxUu^SZCgveT6EQsPRMXeJJg*6p)p0!R;Au70VlqFf`a*v46$WwUJr#C*WfsTmcyjdV%!72`xw?;USo<(Im8_ zzl9gaMSqXU8gDWQ%9q@Yx=1zYC@a@2%7hDcq7fNN9|j_^>^<>-q#Qj(#PiogBhIFP zwGXgO+8SFkIAm0}Q~~_mGj}--$+AQ<0Y8Aa=un?A=aOfI9!6WfVD9O)BZdrt?P>={ zN@9{1&|$m!rea5QuFY{1C%9ZxwUF&0Cl_L*EOz5i z^O&|#=&iLnD<%c&7h&w=kAwz3 zBm2}8jOI;Yr11`bf?jt+G3L;r`D@p{VJZ$CHq7vp&bo~oSH!N+CZ9l%p)?!rkLme? zeH{&vu%H4|q4QH@wVpb)GPZO$*`XSoF^i5q@sjIKaa8<{9a}<;Dg|q{>cQsJaRPy1 z4^mT4PI1b*eLIB0marbKt9$%xvx)3#JG*OiJM)M|^hM4CXJ=e&!}g0T0aaX9xu{Q* zu%c($)@>o*5M1Luzqc zv|qR}du&Vl{dCRkaXDVjQU>eIcy{*S(2$j*d37$B;dIvEzKc!-c&F!5UXKQAFmaaC8J)TVBqmsUL$O zq~#0=8JrmXEtdE)qaXoN&0B(gKL_;_xnqY8yqnH@;N{~<3Iu3+$^o<#C+3ZVnlV1a z&Z>SmnXOGFz>eJy_a5d??dj%xiN1(p#1=$B zbooI%%+}LeZ!wD+5kZ%$0;-2mMOz1@UGe_?MoOTsUQJ?UH#Y8c{b485_>A@P;!3Z+ zeMjob;}*W-Iy=ofIN=uRz3PUHqmX^T9}27pX<7iPy+v^!wr}r5Ev+Y(myALsc8SeI zB%tVNFeaUi+l+whW};T;++e4K^P#7o-(m}kVn}0L1;*HVk$#{_c!Ab}%es8&lAMI? zKaP(FsAv}?E!{DKwr01)_*m{N?qOJKbnMnmv>gHd zA_6uvn6S z@ol)D!J)&!r+`QSz7zKD_w{|^qE-9lOW>q>ZuRb;e^TU`0zBo7ZQ}}`KE=WRKJXKd z&5{x~N}F>I_U<8Ba7sr*RrNB%3Qzq97?o)fzHyw7_QT4!tUIMY%bbyM4r}AGBfO-L z4Pl+eT|icN2M{tdw3mc_I3fL_hk`;en`n?WkfLvo9;?y!=)!a!s9%NXlX8k=KSD$y zR%M&J_RGuPXbSFjLGUv0)5s+*+J5}}xp3+Syyf7?_sUV6?J2>LPSuBKK_LhxJYKtTo6gY3os_7^Bh9*5vuIzoB)+e4SMuI7l{cPf9lo0KNXHEDz zJh_0v@uMxv;@W#;_fl23z2QMtmT*MlE%^e~_TMm3GizI^^3Mx>M3H!UfLG_vN)!z2 zSFH-sjhcO+r*^d1NrN6hDzHx;YahY7o2;#Ub61_7g7&z@Q6|k7>)8DB_Q1kmtx{N0 zVtIWhPF1JmW7M|I`i~Dj&*Y+3-&Z0`F{Ihfr7KsC4PrEJlC?@vJjGTESwGha7ZID* zuOcrZ2TRL5>nSI}_H|);4lpin*>eO(K_Ie~wit3pgA82Nl!7&YESAm_dCFi)<$!c}Zyw7J?kd?hS-r$UmS!~j z2xF}CCvV6aP~9JGEv0r(IWmBgTq9|qaD|2B64le}cj$gN3I;-5DHWwHD`DhbWs3wG z9=IpEyB@+>USFRSwVJhle1a7gA@^294VIZUf-U~cDMFaYbf~GxUdBWQu)qv3JTYk< z;UmxU4Tu~pJ#6p(0|%N|`Us3r#&=YjO@iz=ephH1)pOufKZ$^#AlCc$Ih9O@(RhYN zCe~y#(yZLjn>|}r ziF-jlqMc~*9V=@lbtd8+CINifF64Q!`Q2sC^c2g(58DK;8enO=Y17I`BPCcj_7WyK z@{M_FU2u;lda<}jg3*KQK+cGZ0t-EBf0;LxtR$Bv+IpQ(`g4LD|Ib`tLY5g65lMOK z6#PchhUqtS4#RqxOD4Iu&1TTAvDPtCqKisiL4k_jS&;`SYGwVQ3E59n((ku-5HpaP zKk_U2L4!V&m2F@TiX?7%c4BvYh`5(Hmux&;#>JqEdYM0eCzmID5jeQd2&;_w3}sVX!?bxz-g{c%+<$x{Eq^=5)Lblm2nb?R)N>r%ZC0(SJ#Ak?zP`wt z^PncGkDbx7L=2_!%LNg}n<#d(QBm6h2_8w@NEwl$QC;Mvi(n?oe|AZT8ooojLf`B> zQ@}PqM^CR(z5P!=asJIW*QK4w9iEvQS=Zy_Aj&HQHt-)_=Qf69YG)8MBdyyJw!Ykf zRRjAEQORz}9>W)P>2aBBR;~JlLo*?gINOE(4$dSkXE7F!!6b7V`|yB)1EWo%C>}e! zzTdUF0c0bvGLew4xaCQ)XU>#Dk3~e$KRFn6s@z$!aWY?+={^$lxw+}-sN9wW1&}P? zd-5cSk)?D7ELeVhM@+sOLj^J9g64{!QpJOBL$%gZKrf`0)lhJVhVn{>jytQCwH!s+ zndvD6B$P!&UZ{dpZdZLt#3T;KL70lrY2%hHElAEm$~ya6R2k(o@2zhAYTaY3Qc7@I zBzh8*1tR2fsZpNK{*Cxy$q{=TSBT&i#%}c9OP4SEjns8%`~`{)hGGCjFcGEf(TT_4 z22SyViBw7@WTMgoG$X=0a{BZwc3eOTUJJ{XFQ0!14E7zWh4H>%d9#^|bZf3S4-Te% z{Mpl|e=w2Q$nk&x$~izdMD(eIv=YY@(Pg@?tKw2=-c| ziHCatTp~(+Ws^uFo^*Z7U;@(&%}%qu5BvH~r`L?d;VB2j5u~}v0yiJQDw&HyCgU%W z1u@VuT&>UED3sQCJk=+FK0w@u4;-L8drt08xi1{m^|je*%3UIk{SP`T64*uq}Dv4>2;*M8=%CksanE5O@&R zj(O*IF!^wilfOCMRyA@B1x$!o3UeIVwJrnZa|`@m%+i!HJ9d@GhdENH*-;4^NqMhG zL;z*wm_N_!0=@*FZ*!8&*6ZQQg8iRWqh>8bc#Jox0wHZG=o zSj(Dvq=8hA#{77){9=!uJ&!K*E*#~!UT3ZO?vH$^2F`5Jb4pqi9FPz8R|KUoK`;+1 zI&W*sdLpVuR5PNh%b zF}%IFSbu|v9j|F-OxMHrU-rX0>LHP~E=Yp0ej*3qg9N<(^#F8-~>UXBr2!+6MyoM zE-)E6{^trS9*ShNdHh;1s_xXOB&uHkh}+*kRmF`7S}?05qBO&FulgnJM6#M_uR)M{_+p{BIRhu6Sc7N2cy{|eHG|ddrv?{yx`>p$0cq?eJ zM>Q(7w@`IIY#AmqB9P4VKIQ_4yolQL$shWU#QLfAN|m#DBmto?$4YfN!@sT-JHJu3uBfZ3vGRy znMJ754v6&PjP|RLjdqDazqWp3ov?AmIpM07YE$&bf)4y+e{OuP6k7AEda)UK$d6N$ zW+p@Qup}0Q^Z&A!M470}npL93XJjjEHG*^QB+H_u()EVp`eaF9g#pas-qmYsGKN1=>X>XxqOt(V0{Q3jj$f&CgyHX(>3 z4d^I`S(vJ_VD5|=9*f#${YoXZGC?p0plp(J_iT-JGDb7xI;dEp0)qr`D`t^6<>V#F>^sh}tA3ow$C zuqfQ1ysPF`iiO(R9`}tsduE~X5=Eg?V?UIa%lGduYRXBrkMHC2$>Hp>xv(a{Hhnb$ z-h;}PvWkk)3k?=6B4%x5o)X#Y3NKdBD~+YvLV`OM(K!M_&!L1mbnh|_uH<4qM_b)= z=ERyO9JnlMEEb8Kx^^vyLZYydW+*O+jvEJTxW9baus~KOhnU4PEL3KK`}_| z&VZPRD2jEbC$5%&62E`@#*T!Y$5Vhe-5W3kc1zt*?Vyta*Y4cCTWq>z5i~5r5|63) z$fE<#DQega*A<7tP*0|IM1RpIwoD-yw@PtiN>Kwl!DkM3|JGyRz?;l3rM*ZC?oY4e zRqTu0btS-%{t!XHa4e4~XQB}Z+U15z6?`sbK(pp5GPydoDXLgYvcGf%N6hg)?j!Fr z0KKIlYCjxK<<0`OCFEE3AeY2Taj3)fd z7T^M;;gXoEIJW`3g$V>%c~8katBI6$c|u8GX8pR~gMv6RlXm`PvYaxw9Vo;e{xiLC z95iyi)diC>-VIY16mZ@N?QzX_Ymcvn|dMW3*v zL|RqFvW#}U=Sw=YE+90qngR(LTHKL&vkb@fR0fn%(FxR#p27pd0T1fU*z0(g0h1^4 z-czTpc>L<+%g|H5vnK;?B2u~$Jm;Q2%oU{xktFq6Om4JBkJh(qLQW*QY-r#8GWG4E z>q*=U$8zlDgzG*M`g4qAmA0?3e_B-3QHd1JK0i_^(Pq%vVYGb0hEf2 z8O*}n9A{!@cfmNSw*&!{Yx;4!$6Xfs`A@Ja$^LI5jjWx=28$TIZwQW!I>MsXV$hK?YQ3AN4xqyZ9Lz~qG?Sl`SZ_L z2=DU}@LIdTlZQa|?W>!0y-fLSwV@`yzri{VruxXC-st$Fjl3UCQ^|+V#8-T4;EuC{2pUO z0BUgPjLD zbHom0@B4rexkstu613fxTTLS$+aEQFu@J}6(dbfI>JDQ;3)Kl8E0(wQ7m0-~8$y;w zd&=Q4!z6@TZCaTNB)nuY!}qU|Z@o1}y!F-?>DF88 zux?3|iO>yrWDodX^E{Z5t5zN1o|4~Fbfy6zON@A1+CNc#pSJ4k|Nmu^bK6kC~$7bbSdC{{rFf=&;=6} zRzpy!=fAy!u6H@JYy96Yn#JV%*Ee@>^X6}cw!XQp0L?@CNhiR^3Z?(?H;EPtx2R83 z6&EPpgWl^!|NcFYdz$V)zx?mN0WZ)JBk~fN!M*;zxFZ~F!8svHsYE{tVpJ?omiB2Q zQ>*&-{dc9?qW;U58vGIv<1$u&9$mIZBI`S+l>qMn-gVbKe7oPwZB(bLGC$gkgN7X- z=;S0_22EWi%OdtG-Od|s4bc+w{NuwTXRqkNKz;xIy)LRRiOZx^n_Z+MEEe@Qu6NJR zs60|Q^%Nf5fU0SQ?PPAx-8)&i`^wzLR2!Y}4qX-Ayn1ysNxealntKnC7rWi>K{j*hbZ2kaO5{Ett~9%Du%Lzp(7`S-DqmyrgfZ z@kOw>zGIWfs2XR>g}tTPSg+^4SttcQd^lZdI;3AMxo+AVZ=d6HBaHt^c&17do*BIU ztTfuNocc*qAGuVUWa;epU_Q}~{fDO#YkJ<^PjVvW4B4N%O&Wau5C6yF+w{{tg$RGW zayrwcbI;JJsWvWnXedFmftw$1UP0vuc?z^iq9SzzTNGMr?ssurdL1T-;us6jzz||j zu}2TTE1g(|c-?Zy!>2P2)B`Z#c>A6;r~JEgTt40rWQr`CS;y-8sLiOboqF+=1Z7Ns z0h_+-QtHHfh3q0BFC6f|Mrkh_!{_37&XP%_YBwOIlZ?#IatDkCS*uU`9V!W)>EzU1 z#*b+f4TiB6Jz*qh$U_Pfgt=B~YB-cg^F^VJg^%R4-aq7q(!$8$*AM#8;ReCFlU=Dr zyNv3f!#AQy?*+ppyY}Zc9)VSMrfodHV1zF1zdP;hDglpl&Vpeyu3%>pPbC48FK-En z9Kinzy@##-ix~W|3Hz^KpZVe)8zBIDC6Yn~UF&u065*e0Pa_%x4^aTkfb-`ziyY0M zqi7eJ>wM8TijK?|FJE%<re${p1NeqGeGg zJR15k4I+$fAJ{|~H_=|u1$DGDC$?Bn)Thph+mvnGvw!E+>E{XhOh+Xr%njt+Iwp%` zf84F+N@5v`7=KG0MY{Ah6~#xqGo>^V-hqk^Y&<31;<&alw;vpyt=wHzVX!gqSpfZ0 z>*Ra&LNL@q)94CW8dx(pp3C-WJ_;SB|seRVlAWGyU@5d%3q~5*z znkPrqtZ2eA?vUXkR4$2GTGd2-&gBMM(%hJ(y%>mOz|Hcc@$^4Xk0HGwXYM0GYZXRC z7*xmG3CC%`tdF;MLf{3mxX(+_azTXZTB&Mk()k(Pe?qtK6w*m8)?N`KO3ou>QyhIG zq{JkMR6U$;DYLQ%va-n&Slsg%?S=aK$&|aeA^e6r_fZ+1Nc

!6@s*1qeD2(L%u>-m4}}2kuR|S5QGq9Y{PiT3_zRYJj-pTZi>x`4M$*~$ z5GM45YE&~H5Bm5#rxKHOyZnn8Kow&Th5nS0%CEp~6ab<*Yt5XOp?i62y_$un%kbNzDsvy2?~ntMBItL2$AmOUsk~+khVgTh;v-&yM1q_K`RMD14QOzyV$eu zh6@o%#ye9yC(3JTN>M?OVSrsy3;+;Ha`ol$YxONGf;ctu zoJ95newNzTpvvFy4H8y-A(P<5Ss(&#FXMM+Olktu^Tke}V?kLsm) zd>ffnllSL>QhYL*iw!F!wzA#2i9QJUj9Wa~kY_`Bqzf!7BY@4EfBpf&q2sAufJ&Kq zZ~1Y)G(RB3lO{PiYSsy2d>p0OmiO^d)2E-2J`g@9N8~=zclsB6dM<`h);eK@^z{ND z<^JEjNU3-8_>hyKid}E-Jjwa-r6XQ+twU$$)bVZ4l+JG#6JACA_pBjPU>~3OdMg4? zux&Yat2)@F_SakUmE)Miq8R6D)~3I>O)4)(-THFDe7bx91R;_u@%lF3rrd%$)I1R! z%wX9>vtdNkj(y}KePbEvy!&4^w~phw!q>k|uPzCq>^B579%3A2GpBV@t!tfK5@r8i zr8@48p+@aRnPcY?j{g*vLf{@`5zxEqVIB#~$2IXzXIU;T%q8QYSL5ug{_G^_ft-^j z**EK9AGQxV3OX5@NW{QSRnog_bdv!A|3LjUkN-)=`yNz=&@13sgeVOqEii5%oCh>a z&5gRla<58Tetco#tXo$Ew16+yw9B8D>i=9n0!awVtY!w zk8=mJOj{`mMq6g$P!uVf@*{KkxIPD4_FkrL9+iY=MTU+A4mg~6N=-A;L6_a$)XZ21 z88iLN26g>QCrd)Rq}s@<^mX{j!}v3<0L0v($LxV2P)eAIbhUN6M_DiYBP1bGlZ4IB zi=n9zYC)};h{i#(7xc>S-o9P<`^1^fX@uSJLV1b;A&%AsU=S7e0fPov8X1MSdIG2} zB_&}umowne!-re9Z$G8&aL(}ymOmZ4baABXHYel`yC!Vig%SrgY|5`|f4 zX$qAiHm$t0Y9`(fa)`31=25q|w^~*-Q~F$Iji^fPB(u|%D9s6*j|lHpB?-{#MDx)B z>GT`%1vO^-4Gjo%7M)Gae0&ib9ui#cYoYANtSL399_Pq@EZT5JzdMC}K_kJx;#vZ2 zC7?5-%pqzEmOcpQ2OQ)j8tDeWTHA1v8Vxg2Jt65|KMBem7hoRr!TZvb7Ct0@itt#W zkDVIURD_J8mxH3OynG{=x1$mkD5??nniB;+T8r3_HK7bE!ZITa8R8@mLr!N zW-;Gknj;RspwUR>)@jV!K{^d9nOD)%B zwQ&a-|9GGQ+wHBEA74R83_Pw!Q>o-$95#8%6rpJ`UCsj+M}Sk4X8PMYs5?1*Ij10w zIo?}d7!!;a&^0AgFRs+4qxzf*2xvgY1-WN5(1$9*@&uc$)W0+Dq75;1y-sHX+4tyk z@3!sRoFJX>Be%=UsECH7;|u`KrWi1Ia0)CQvt$zz5H+X1N7*H*NOCe^0|^EY+1%Wm z)>xN^KIQ04mT-0Dg8Yik>IYd1BjGj7fL zu1ORzm@>Hd%^4)pmC{nH2xHDju-d$uA2Ybb9lj8>S3f@QtNw$9wtLqu?p;4v-D)_% zRw)(9lH=~3<};!r=c09=Tpn+E?H6SIi|Y`AEWgC4ji`YoJlVt3uUL#L-u?h@OfAW3dE zqq3n{5B8ayGAgrsC}Ey5#i{A;ZqUnQ>J0IlAMy={<;S&CRq#3}?=uOP+cFm6oXu1s zPdu}~5s*NZLGEf%7!kbs(S@Zk#ays31tzB9Wk(a4NR)6wYgQRf_&q`U3CD8qSTV{3 z-onEB1qSw`5K0CQTO{;+JP-sHCWF=B;F*I24I*VR@!{F?=eU?*+JHC_Q_uo}9c=y5 z3nmq&BdDI(l~g0f(a|GeM-KG~0u0Gm&a?AORkZsEk@Pt?h$ll6_)7|_F`nmr5NaTb zq<%0_Svif2f9idkUKI|HL_;=*S7o|)x$z$UqvidT?5kT!V{0THS{pfqinmoxm@#@H zf-uTq6$Wb}CJ=Kt_VwCLGI(-&AuhZ2wQ%_s>`^KsE|_U^P%Y^nYR#A$iREa7O4-?Q ztgYtp;VZbBTy&ce`m)OXSd^F9@2#)ZY}3}!vApS=m{TTqEq79mk5}kfMW0FYu4pt+ zT*gA_IjzvX4ZY(oZhz3mzwO_fuC;o46v9#-V&cZ4JXCGqC0J3;l1g^2B0#_>P;FQ2 z+n40}U1g=#^z z_jp=?W6iN)jK|hNX>a|&i&VXtF7&M=vKgaZl6oO}27O8XM+*>faFE*q8G|mWV$sX^ z4`g<-CYN%$2O&%DEXb5BnEK80D;s0c`_Fq=egtN( zefy%v0D8veo@wuqH@6kK+zapKKNqHh0u)799LgTvxg*Sxg03?$VvMjRMVo9~cl4f+ z1M&zcBNYr2v3o`GC#HSeAWjzdpOi5BTI{Fjr7vS(YmXg!%&k&F-!RZp!DHL6M#{tm zne@Xn1lz5*Ie?pyb9#Ju+Wd>c>Zoor6<4{lJj?VJjF&j+HuvP9P2hwkIotTTZf-xK z(+OM0s0dO*rtdz9xOC&j)N0GV9^3Zl4`hO@jx5&;n<^5ScdpCHCQ=6a$Mz8~lZ*)} zxM;q_b@_*;(%f9ZwouC>Xo&7M%G9LUwA`#4_|pt?J?T)iLm`s9W((e~%?*-0@Y9tU zqwRr(&JtNle)n5o`#xwK670$B*{LvWC^R`RH1Ek&1&>Ld(wp=$aMd;s86X^sVXZXI z{~|Bz}=DPQDHH4CrvUgEA?ch(Y;`>vN}+h3!)Qar59AckVzlein|10Ne`nI zhXv<%LOI$l4j{#GW*a9yAR?fQIOdBH4;Td_ylGciSxS#dmoELmND}Yh0jQ)WX#W-% z$YE(Fh_ks7TmplGmt)++#(47NNxH-cHZ+O>fn1v6dd?_9*dVf$b#tK+DVXCErGSlo zYfCR(>LB`MH}kHHrRNJ^Bkal8uTXH}a90pItAlO)K24a$Nw#_6&_Yz4;W)-xB&6Yx zhqbY;?nxZfbNAx{$cQ^_zk56{DO4@aGG`Sfs9<#l4!*iS|tStNxC z?1ii_+M`XmdCH?mjTO|^FW|cCR|fe>QO4iEaBFMnHihG-aqOVed+E|bQX{AvJTD__ z^1Lbhv=1ert+}z77eg>xC#xy^!ANezLCkMMAXfOO| zv_7Y2O#FM3kV4_DJfAoe-h?`&=S$vngQjPkzn6y*tnC~j2ccpKfHUfX+tHi_{sfgK zVhYB7I*kZJkRow3SdO}wa|Q74!qV}mpC2VBVY-z0nl^Ss3TyD}puL9<(Gt;^wDMX4 zr=>LnI-|IxsI5vrB&0Y9DejaV0ECR& zBG+#Ovq#KaHmOQENWkyVb@(iTg!P9}U4!Qf=CsHIKL(`t7R&KLH?M($tN{wLChlHqbI~`oszL!zzvQ6PBBs+iu3nk=w}IWZR&Jem-*`(&XkH|HRmP12~%NzRs-+ z+YW)&22Mex>_mpf_9BjYAl*dmPj-rKk)tcn@rLRWA`6mQcI1{=JA39O^>L~$UPsEO zZHZR6p&HO=*V3~~I~K>a+=(VGvw!l~HusuE4uUJCy^&^dC9u~x#DZoOiX)ethk4*e zVJRx_u>VGFnQvAv&fWg=xP$8NisRZXZs&C&BxFYE2r8zC1-<1Y7N1gp5>#^ClNDnk znDn@;Ff*6v3Yixrj>sAimw>!9@P2a-JwTe}7`FO$<9=D0bwvbwq~Y&5a1?+FQv)!7 zaYZ7rKCfUI=(Hc|eIzaD;1bE1(BYCo-Y3}5ES#YIM`B_FxP;yv?15iYRgwA8S!zYR zz2JmeynE-)!8+UR5DccIq*R^T#hDf4d<~F z%7og9a72v8`8ImPRz%Ihx)m28k9sV4$XQD``<}dkjLukCJwd0ZHvNM7=b=ZfkK-H-`sykr%%a zMSN&86_XdEg#N=iVSr$WNSGyub@<>E1!7bdG(%Tl#z2@W_l8 zvY$V{Nlbx?Tla+d)UdK``p{PJO#~K0^)KM{&sB@7+1rXB{@WYtGwYw1v=T8MW4R-a zP~fKKEvOX21>TtD&R7i>tfCkZ9uNa4ct)`%1wVV_5brHzZ0C{uw&n?FcUJITM*f+S(0$<&%M?o{~^5$bUM2%8!Ti2wVN8T(4bD*HPX6^*wimYTNPlfmu8w znN>Ua{zQJ886E!qlUyG!KWMiZS|=y36;IO(GTw@?s2tcjv@_2tO7B3$^g{T5{-{5` zb&}qN^zZilTq9HN)oY&^?ju}G8stuh;@?Bd9Qu1y?4v_MJf?qZ(Q>kg zwM;1{&^9ruyv5bY*?$`SGYsGtaZ7EIjg~X>nAH>4ySNVU*qr8BJd_EwZvFag)g9&4 z7S+f&-aRmD`N<8dBLx!>7`A?AUR2(0rAS zZzP+T)VGIhb>6oER&Nj$chaBxWZXo8AK$+s?XM!B<`ufPd@W_a58Qs655b<-yv$jg zc@I{Em~E&josO9qs0~^=r^#lE2%`CF?b(hvF77?;tW`|aU z|J~0|72XSAi^B&@(|T%~Y~5yCgv}h8H`r&uz<~&Rv`HmIG5*-0Lm4FCo^ntnvK>42 zkYFUBELD*-++`DHA}j%!L2`BD_%H#dwg}S(<0uzlkTz~P`fqX&Q+VoI$@zPwr7c&or!jM)<^C)w`N2d*V?&AR4zvT4LUiJT zD{Y!q*PtFHfA%w#gYXuBEzUtaZXwv9%Ie9#N7**g?Kylm#P#>bOy#Acoj7?K` z>gcUIc4%s8q1Q!Y`otxTsgR#U^BJdy?c9k1@;l7XK0{DMEF>#fNI%)4MX>@~o&{5qX9(9r9X(F@u%>@a>QqAG zvEZ&Tz=J;rJ>#B@@E;_K$j;agM~z>coN!Gq=Ywck`#m_A2U7&&N={%fcg!}sgHWY3 zZF|ZQeaPkVR#9-lAf&|g%X2!t<_$yGPKLz1nZEVaUY)I*XwHSFMBdf@7LKPCwgIGCserD)33ZYgmMlGKXPwcQyNU+c{zn#S<7_H{uzOnAD0@ zG@(isJ_lU5yI?6t-N3v};0#JA!Qug#1qF#S_q3<>B(H8`#G_^HK#+$oL_qra<3~T_ zEcnb4m?(r9`9=Rff+wVn3ffVNY4+g3-1MBvQ^I*)6gnXP|Bb`hRGIGx?L+lP z_#5h4+u7-`ei5)_&Y2IQjLOQ9$kO!7GNs>p<+WgnBb+?snt1kSRg2vM!^K+5uDL&= zOK32O7@cshnNNZYL!{2?29g>-`oV*sd{B+ZkKaJxa;3_dKI4zl{0-5_kP$VSD2zot zk=Yf%fC>w0@)a<<=)UWc30H$;#@LRI4EkZ6>*w9{y*6ts&Wa4!vGnVl*}UHCIKF`w z%-W&}|8K~o@zsHb9P@fy?rhGTh-78K`uNe-+S_>*JPB)S{rYoqL+KpX(&DJ1(t=O= zrIWXXN@xCpN)OdFxi&WrlL~WQSPkg{ThrFyk~XiTA^hJ9E!YHIhi>Ex zoz&MYgNd{LSeOXCm##4x4bx52?ZCTx_MHbdap|lSuQl%+ej zOA;2K2a~>)!X}JapSnouAdy?nM?|n#gF_Ms1!&pcKq@SUP6!3(w?Tci-e9V99R9~?OSmMXy3jB{t2EE6uK|zuIbh5 z3WJJpld6L&+967WCWaA_SRlYpkh0PRL{{6tD&ItRKX``(l>873!t#nwOk|gi(tJxX zgcU1Vk62wjB%&mVYdvC05^F~Xhu!SIlx*Pi!)QdB{}svCTNHwr;2CSqmnHS%p)ZNmoOa(CH&*`bPiiLrI$N_ zsR{uB!I?>6ffxWkhla8i^I36d!22RKEiEUybC9(GcE0^(rB4S3F%X=?!-!-jFaEa7 zKvva4ibbu}z551E+`EFzc+=x^nNFpyP6Pp9Ii1ZAO#>(YGQ-u>4<>2>E-)iUu7^}4 zezP|~_@`xLKuWsM1w=0wDto^1W=UW0MS!6SeFNMQZ_Bm7-Lsbvg|pMs>0z`Z#mVrkYxPuy_Tk?hfRH<5I zp|}yX`&EscZg?p?e z1TI{(XuSHMfdfw+KYq?~+t;q!935$QvJ365mSC1RM{oz5d0fYIJhqil5+Z=$xv}?F z=heph3I(=!efq;YtK+t2rTA0x}P8#WmJ zK2ZtX0F?{0uE}mGE0mS=><>X{d+pl}P{AFU?cT=KEOZ>}jsw5ul$w6){vXj$#!*kY zDSI7ECZ%ECWMpJK7U}4S)|kJF{cIdZKrv%05Wu|c2|!l~I4JU0vo5P2EsJXBk+na! zn97z*uwwfI`U5AfzerxUA=PFRIbECAH@k-2yLY=yxwm}GIFX!7w~&Z#GCIS!O4ySE z;=s-=cWp>s{`DHnFx10WCy2~b&%3bib9U5QVn=;u^RGYa6JsN|M_B8qe$f=8 zD)X=R5T7FwN^v1QG*(6ap5JDT9@(q7*t2Wew7Wldw*0uP>VH0g^cmWq+?xR|Uv}F_ z#43RdNs$Dtl0E|HM>6U0vBWF)0;?K%3JJQ$Htx5W%yia0-c2pQz%;aNOP{tmKbLjm zGyhJPr62guOGtek?DO37an*NaS3jLf1$;{D{Lb~*i7(~j@?Q>uV}|3;9E zdsWxK{+!zSA*b)+b~0VMkLxL;vnF8hrmYt|LthOW>fOI=ht0$1&sRcQj6w7g@p(n- zZ9`_=wc%!pCnNc|HGgLv9&;S>-gL(;mK=Wz z(cJY@M1TTw;cp7&TLe>2o^d7N{J3vA;|9&3GpwJTbjpnyGe#W8EEtRA*?-R~F(q({ zq7f_i-{zGGD*fFYz63#4zsF%p>%E<~kXPZ5fm=~b8tKFS`)&9A{edFGt^H-8jEv6f zXQAVG{PKgSPzX*!nufRYc)1$Q%PGIki9!H)cb2zTE_(P1cEX00bDCNwI-}p#HdoO6F+CnS42wR_%!j84|sm$pZ5}T$5N}GCI!4G6Kr2s=)%qP6(wC2 z0qdVk-FZSHKJ>1*@ZD-`dxka^g@?q(|&t z_P}fe5ukf;&n3uq;LQB)sOGdul1R^J9>k1UdiD=BWD0hW&^x)v;W~X;#1wQv93+M^ z{`0pC>36~80{NyOU!d=K^Qzpiea^?!pNsAu5CI7e`l`JBsaaWzWR*^(U=LG}gn|a? zeOSrNYktAOU)fXiou;5XiZULdw3ilW77le`k+3m8NIcKKIrm4a616zE<0)#|WN0Tj z43Bd_iT&Xn2T5cp^(*a7O8TWs-8)B(`P`Y*4bXy$Ebejl8=4^r3&W z03a0}xt_2R(Xgve>L89GVHYN!o{Z3wO04iF1bEHa5hOO}b?XA)flS!+bxYGSM)#C5 zw~;q5uXxI__DXx-d3SrrYHQzpkT^FOq&RBiNc>}LcHMUWUS-Ado(`@R;>+H>#erEv zogmv<2?(-OAV6GQ&x3PY?_(r9pJYF_MLT#J3}C*K?zKh?PK0uTge>{mwd2pOc$1}o zJmB3%eCh3Bx#Gsot!%<*hw_}1JO)v%8bBQ!M6&{y4V?+Y?oQ^cFpr`auzn7j-bnJ8=wW?!x>lc;b-6` zLuQZifztL@V1$`GmDdk`ig9rYqizW(IGMMrRWNQeru2Dv-t5*plkT{JR#y;v2n4rG zNbBy&`$T@vH0u&^XybhNw~M!ALVFw*t<8))_oy1-gk;LSd*!5cEO`pB)o&LDO?MvFKRY9X$!H!rFn!f2Z?&w+ zuUr4GO7jP&^%54Ipy^2)*hzJ7wEXeIWbK~&_F?T(ab503*Vigu6W6b}C#QEPTE4IC z`=^(AXLoJAe!Ke2dZ%YUKf+j5+}dpqK_z@Wbs*P$*zn2#=H*{}8*sSqa&@v&{p%*@bDP{CfmVMEf!B4c_BM`S&EB*F$4K$`<+Ccfkbh`-Cj z^txj5=o;AG-z#E%pwI;Zf+?(vqfzAkeDcJsbWXthd)vc$_3R1Pxx_BQezc^Rqjfhg4sikoLHj2}_XKuJfC~=0Htg6EO zRe9n1!cjCU>lVdpompzLEVLF>HYx&WpaFfGV|{!$&T0Rksx~Dd2!2?+HYKdnU-^N- zLI?--me1mt)!Vmc=oYr~S6IG$d8X5DFmbqHz>&RPUbu`HW{3x6*!*ndbB64({kYq~ z;U6A$lsaR)Q(rY4ZR_NuyOV7-7jj}{lj3!F?-kWfBlTKz8I7oR&)E#o7gbyz@M2*l z;^zEg!Rod>-E+MbFaGjI+~SXJ?bZxyFd{bJE`4gXZ+w-W=>9}tXX%?1m`$3 zdVep6s}yihXj`15+2);j1{hQJn0uPkIB;Dk7u{9P3cab}_G{m{%P*=yMUm0Nmc}r8DXv``tHVE2!tEnnK>NS|5 zrl#e>7SnVKEsM`81E_ssyik)9dY5y78=(WEAHa{5Wd@t2xVa{Gm|f)W=Z6Hh2trlb z(#N4}PTC_j^Aq?|x(;CQuExh3`*Kdt71$G~q=Ge@lsh6O)YjG}A9%~<(E0f4#fy-d zg9bvb7IyY5oN_$5E?fsPnt88FUFsI1(ZDvYe)*vE!yu43bLP;2*~pl;PZ?W)nIDV7 zgan+(1)&_ZWFdB1us!4x(WRe^dUBgUrW;6%>9N4*zc6T8(P}UKp}qc5 zXeBJ8Xsa~uF#AM=iAu1BJv#kJdYEQ&xW(s(_3WC{QDT6E=+NrCiR|wJ^u?BqcZYos z2CmYNxB)u~kjG=EIU}mb-QS~ZpmD#GPfA1Ka#GQj>PHq*_*P3kx zIq2P}`;)U;+Dx4=!7$XQcK7Nd;X8Y=qi_WWSbl=y=QmhqMAKGTIk%&4pvZ zDUB$Hq`_HbRe<4Zwn6HD!YC~0fnU;Qj6afGU0f{dMlZgF{KLF3X=z3nyJzO_WnO2Q z#XLx`+E0+1QRHZ%#Iyi@tF=aGkOMWdZuRP!8AUeeec!W1?#$eiwmfRslqtcci@g_8 z-EsT&aW2avo+AAREDdT@LbOx@(l3?i2~+dpVhf7CC&BJm7?2i@NJ~#QaESOxKG-ZK z`>wRH#AD+{Y4z3r;;W^VScg*kk3>L5IM#nIpq) zMJvw)2`t1B{?jMtUB?`o0)}ecS;gAvt?eH6(-R-UsbKN>*iL~~s)#!(PdSy4e~gI6 zO^+Bp=99vh6>j(By%jVrtiJ0dMvx~;ni7>}fAN>5J~z@yV}!1=vL*!n7?_}2G2;?w#GKefK4&MN7ZhOGUc z{^0iZs7@(ve;YJd#>04|{c{^@b)#;eak@+pbz3^rvK%==91P~UgBZ)BzL}^kPCUdP zu8y>(0!7USp3wI3pK$4fLP;(C@Hr^E?JzpE)!#YF&(j+paOl7L)yST6Wy+7~syw88{`ondd)N!n}uYKQ!-}c*n zn*i?M0?v(%k?xaw|Gp78AMLbtlm7-}4_ob@9osu#dcyJV3uG;u)(8;*V~Tn*E^7HB zk>7s6C+PQDpPrMJEGEC0)Xlii9};B>*eERjle4V6j%p9LBP>H-ort6`}8@*3nM2bJ$hS#A#=RCdNFYS zo5avidOV+Tswu_MF|6b7-$po_;QRPUxiAqf>E6t58+Xf+kS(dIE+E?IkdEh?-J4kh>?4O zmH~H^y~E9sHcBk$s?ycd6B;x~s33ps3<-(+Cdl0qo1ha(kYPyk5H{_sLib+1gz@Ac z#pGOtWo2a*6`OblvggPvgPoEzb#*NTISFQ&)0q*A75B7D!_U58@+FL!z<>q2fzVC`^O7bOaL|e=@LidVBsC<{WOjI7eEGe5_ZASCLw@9(Jj@;jIUF3ig0~N&LP!7*nc_pC3OK~t)AyD4 z?Adz$n)?IBChM(vlr0}{7s)2#a$&IDo;_gv1C})cyHJ-A+!Ph+DaA2i>uglk*uj|i zKD=^c*IvEcmo0l!Q{#{jaQ^&2H0KE&6?LqrxY2odrcD8<&F@v)6yVafaM)3%9m8C$ zjgw_|e|tXK$dkQ`a>;WUgX3@HQj$k-(^Yr=iUr<&a z^P^tp%_?#TSk44fqc}kpg}(oz&^%Lt*w%y;dc=sU>=ugR zzoxYO{HgMJV9tt>G@fJe(9<77fqU6WEFF+Ma|Rb&agR1$4H=(Ej-!2%eyu|h^C)Vu zu^>x6)Zq+KhCok$x1K$crKQiGo1uRM_u;-gU>AeF5S37}K;Q%`KR{oFFmO>cbA4Gul6O4%EK z4^pUm2rvbY-NZRZ^fd$sg@P@S->}Lp6ru*~BgF@SIixlcAQWn=Fo^7jjhM2T&?iaV zV0~`@HNN49Fb#op#ouw=uaU=f!9}itu2Eg_XCAHbC2pQTT;<1rVeUVs;V04h$(KKPj}LS&F-0ZK-60}u(q+3BbS4-rj;5i|)~{B-+#V6a1M z8=N4j*>Q2T|JJfE@E1FG?OKK00ALoLa9H%kNo}qddKRDDrTy#kv9@x^&Ol~^iubm= z9Rm6Q+ImLoH~HITw^EB9AL-`ALvk>hDfb9G(~`4mf-VIOWVi{iK6#9RaqFjtZ!L(1 znSZkbK|%N$!YDe5nZlM_{aa!_m=u5JKr& zJ+$x8t08o&V8v^@j9EltMuhF;sxXBnq}9Ywyikq5(}_A=*wE!#KgyTDO0d)({8AVI z)$z`q^O)sJ@_ja0(m-7LePvsJVVk0v8v&X>+dZV+44Cn-V%#~l7b=cMXioXYjd4?>$gD{o{zW+ z!bdV#pu0>f5a$I)`kzz%Umb2&7vtq~D9jfr8X`2L{;%4Py?AiH%a3m#klwq?{&JhI zs55lvv>#Xe(7VRR(Ns1YG>5Q7w@)AZ^8^J0IoXpu9j=b<1wlG=SRu}f(d45}Im~+L zI&2V`3;8(sHVg8 z%~TLJvLOjT==~gvW+j+DHts>S8{{?cb92H3>X)d+SAhE?>;A&zDNT_ahWjZ^8ekDiGTn9C~CJoAHUAek5=}@WQl~B zI24Ye2^JJXha zI|gP`pt^AD)-5_@WMyQCHvvJ<99+<$>T4HHA>P@*|Em3kmbBDWuU`^Os8fx>15;Z6 zjn)fO%1))$HZ(y0N`|07ghKHbSHK{jsn=~cT-EfL)r?u@;*{aDuT}YzQs8GC)|_Np z0)q_J|7XbI+v(I22+JUPTqAGBt4Txg7_wL$DU!}^las#iuT)tE^FR)edZ3OF9h7eIEU54 zb8pd2=fZOc2{}%PHUO&zJ=L99ePPmiT@y-|pl?~D8mg)y871i!=@ZvhOFhSQV@ndn z3<9ZPD%n4u?|0$R;BOLp21!6Kcq?yEk0uA+x8Quw-@SCP`cnr3+h~(myDXS<{)Da@fGUd+@&p=s=3lqH2uYM7$Mm)U`h$Kw zq*9SJ17T$QB}KD`A5#527TYhNSL7;R9V`zGC~#Z;G$}T98*@kLHzA=reDvt%S9jFD zo^pFcPR6V9qBn;VwOrO)xT}5w7X9gG(1{jRW@#)9{4AkTCg^)&NX8=c>Xz10N}>i+ zE*sN`vmtm*xwVO$r36Tibn3wL3O+}F!ug)_oG9dC06*jJsoede!wL37D2x-#-*#0| zNrR6;t^`(dt~W}w)eHtgCyl6<;NNB&ks+$cLJWZi$A*2s=ifSX#E9q#yZC&$c6&$e z5Qbd-`GG$V~sM68^Dn_j#YV(9#Sx`42WN9030E#ZJQL z<=Fyiz<*u(4+=7XMh*a$TXWeALVJQZ_YYoXK55Z^0Xu);Gs$20tUllm)Dp0#;J;9{ zJAYuGz`xjvd58Z3*#`c_X=}0N3`QNC~25Z)J58!}FDnlV}1& zg1t~MB(b^PID-M-&n{{9uGu(~C6otKCBcn~8I(_-!0NS@Qee7yKU%|8Z>P5TPu|bPgObjjrTW z^ipMJJ-UCNWk+8!$99!UjKbLm!`E*Yd_i z{rK^NAf{WlZd78VeU-DG6T@LnNJC6fH=r%i=lb`Vj*dLvjgK-~i(Oq$(&JlK2fcH1 z+i&HL^1<6A!VF*Hc_Dxs)MkgZB?dxJ=b-J9C(d?uyP$X-Ia2N`D}DWPYAWcB{(u3U zKTYbRtBcgc#GR_iD65PcV_*f~0ehUvYlVluin8)qu7m_hVZ4 zfXF={xukE~IBU(C6f&f%SHEMzv;5Z`vBnS6QJO$tcj3bR0|#EoJDhp=`0)&Y?Q3P? z^HfQ*vgS{o9B!!;E}uff9wS-kM&~2aQ^cgI6B&w%I$&n&*thldG!uOsGgB7c)=sHmf_q}9bxl?ZH+p(??; z*0)cSOM+1m=rwx%!;%t-Z=?q`bd<=?B_yb-t6xY+2r-kF^fB(iG}Coh@B;O~;biZq z-B3;7@?0t<`gJ`aK_3{yV_@8b+8fEHKS_$zE+kf}mS0==>(k*ZQrPb^I!v(l@@0%7- z5>Z_F#R2{MQvhsyB~>+tfuW8wW~|Q6@^9Ph;GpzklzD1WnfugPZ6(8Hw+V_^iNS{5 zrA1A@QnE@dCXhQ6FR8W5T`+pdCo&u9&uZqk9H&o*0D9`D*w%WRe_(+NiUc)wx$QwWWEgw8`$<@j%? zF8I>jMKAfe1dW_D9V@kpuj^LhL)aPR#kP3oSA*}j6N!ZrH+!E+YqNQa(QKHRm6eqU zD1uJF7Xd;Ep50zNw!@!GK_PZ8mZlo8A~5oTb}&h(n13P3$Yd| zA%_Q(k(AR>R9#reQwKFJHcspHW(+QQ%K$l1Wx_kh+ZFn!6B3|vZM?QSz-^&X^yqJS zVcJOetXd&Y-KL`G!OubJBF(oy`p1Doa>*#y9ow^SpKG#H`ZTe0^;koZg-c3VO_G9Y zW3M5%>jwvx4e2PV|8RIoTxRdZ#sry(&w70HBhyCAXyU}+$d}cXE%;hD;~veKd66A% z%8Y#huJ-k^;mhv_n!r?%?pi&kSUg@sRb@ZJMAt&1sV@2)Q;U>+#Ye?O_uADO%GYj+ zi0C<7ep8AEcU4{<==qDcQ=zc(=FwpPTA;_5a+wWi@#nW)G&zTD!a8r+msC`?Hx2+Ot(oH9h1F?->{ zhqSa?%JYyP0rwi?*QnW`DeAe~tU#ibO4!OfAzZDIm$tkE4Oi%6p>|(B&M)rxaiQfN zK5?OXdr{-OkhMEvQ&Yc;eKxsY1~6*i_g=k3Ss{kMl>-8>IOq>n+2syxg!Hl{)KhKH zn1WL+zfJ~NA73wOvxCG#km(s;rPko!QQi+BN3FcYsXKSd?JdfWv5*|!Dch~nDcXa4 zclgVm?a=s1$hKCnc)ttT;HXV5!OMjhh`Bhpk?{nj&WUv|mH{pGpTSqE%M+z;wEBQ7@P3qRO*HRd z!7wnw6Y0lEX&NdX4aaz{^zbO;aq?gHm03Vjyw2*2TPSK~{Tzg0I79>Sk001V?Z8zp zuJucBGfx<1V$y%W0FLp{_RocFXBW6Mt{^_W+p%NaHiqkG$sU66G&qj0fkwsYCRCg| zbLPa8pO57>qjq9Hb!u+R)}`VriX<%+G(@DR4NuIb!t2SrO55NEJJY6J~(yq z$NJ!@)25+JN+swd_MU+&U_mm<3gVO>Kh`@sI%2|3pE)CpTk%D8a6t&KZ*@>XwqD=9 zPo6#7VJ4F^>7}xdp{krwbb0eIlAc2(9<0k7?~%J7=WYF%>yoT0C!4z@4y3^|9=Ycr zaa1Ybg^R=7+eJk~qHRqMPu31?oV~Vjv}MDt{7REKw~C6Exx2sgG^HQQW7#sY%RNj# z%Bhixb(}dfJ~45ZnT&y=d!erAxS8VMfAg2`ngP0PJ0mH;KzqL0JV!^Ks=J297@C<$ z3#eIi8rV~^1R(R}?32ym&SrX2q6%RT&e(qn#>1D#-Ir?5v}*e>IGOxJPfriuP0s`x zsik>&7S3e>hH(yO@c-%48kP2yiX;cc25%+jZabXc>{N1?ecOz4?j}7s7XemrU#E@~lsc%x!BwZH{x)2qGiMZC*^cR9ue{=`qBAl^b_&{RCL;=1 zV41%209tOzPV^$AZxc)3u?icp+f42VACo-w{N>AzlO~-xaUzdI9yzx1Hi4!~cb~;Lk(8sB5jL6(qdw~^w!=aPZik%JFB~RG{p)(c;(ZLv# z9aGOdJaOU#nZwyLXZq>uQ>zmj^_0vWsC+IX1COHOD-k~F(Zh%NF=+i7oS27M=rw3x zsf*pqC2>FN(;^QY>Y<^rYSAJF)2syXhms?;w4DX#d+wAe3cj7@YtOG$PKPi@|;S>FqmbOyj4ZfNM7?1W9dlAJtFj7#2e zES=szJaV!=4^8H{t0 zd+OP<%QtNrjhQ`s;D9m?vAkcFBVpE)+x4U`G95akUA_#g$plS}C2`}Ts19J1w0+)c zzSuQMWUg>xbWdS*Ue?u3(si)03FAOF3>38+J^DK23*n=B5130^GTxAzrc?n5tJ<|w z$*JSVbA%kC)BLx!pLH2_Wx2s*_IZ;RXV~O!#<(C2?v)Ga=k#sAMw=X-H+%Mv%X2{UYrVkF zBXrlUS9cu@Pzs}y1`JMmN{7Iin>S~%sT;aon?850ZPHC0QGkYJ7X17P>S8U_FPyCV z-?cn-eG$qJgu5S`K%79Iy!-oOGI{v1#`tI4)mW2 zs($y$fi$CNvcbtpp~ipvd47|1NZ*wWRMYy`ZR>}F?L}YoJ}pjdIj_21Sc3d#^<&nC zdYM@bH!;C+AkvqU=T;vSX|LT*>YZD(3=9m+ z%rM*8sKYE;xUh87OCVi;$VDzLCwLncZZK$p2b$=Q+N1R0cX2>4@9~za(e7QT?o9MG zR4p;?A`E@?i)gky=_x zN>8TyBnFy`nPAfOeV@|8w|XMc?vRaY7ZMW(kD1sBDNc6E6=6Zh*om$mQ6z z{Oi`RgJDFWFDglpuuN<=u&<#eCXesmSK2sAGnMgPgkdAun$+m?Up$1JG=2$9zqb&xjk-n}cIKGm;sHa|$bbRa6KtGmC@uARGC zTa=?B*ehi56B3+e&I~padyrrY6cCa;TfL#i#;JuS1Lw`FKuK|KnP~p(*|SOIZp550 z*tD-y0K{;k>_Q?H_yD@XH?Swr4v`m(JUzcFSN?Ws_7;h9B@0uo9XK$<$LDgwF>TuO z{a9=Q<$I)}m6gYA^_sI}g-!k|z7wYL9*N(IN_7fK)Ji25?k1UuiFObD7fM}(HN4w- zu{VQUE?*udY@2bCZ)5LNT09*7f?5u&hE}A$Nr5rcn4tydV-+o+@nIXKcXbFv?R}D^ zW&4J1qlZM_fnDiQHe)~W;iozqDNAW>Em#5YtA!AzNr*+Vwu@6rPt3O>!%PGZCw~;j zQkUS^UA}U~F52i+^Ugf#Xxcq|^axS=+FcL2`c8S2W*hDQ z<4dU7pe~{qJ0z$y=c`?JFEbKC_^sxFWo|?{yXU`kc5yKxi!?b*2Qh*M%G6)T9Wh^7 z)GC_`z&^-bwkmWqB-|Cm+-QJC*bO*=8L#i&y$ds&J_Tf`^68=DNl8f{^c#1zvmDP! z*|x16M&V&uSv1^c%JSvo{_AV8{%{Df{!choE0!-Ob|5B^SWq~Fp29r%w_ed(wv1!; z>U$}b0nD`mkO6>jHHyyVY{_wd<+1C@v*<^AKUxD_$?6*&5ThkH<4pDI-~a8KH=C0C zX-LM?iM?QBSx6!#gy4kVAWafU+H2pjdiUmyqVv{o0H$=LzkFQs$Cmd9fBe&E{P-K3 zQC!EtF1<_}aSikr0^8C6IQRPcVZKXx_$Id1hJ~+QW;`1thO!Kx2*f=Q!{LF1m@2yh z%vG-g1JFDA-|zE>d1RTJ^uAJz5GzH2QmiE(yoMy%;Ds{kJ(Q*{m6fvF)>kt8dZcBcm=NvJ4v-KVUW23mK+Z_de`-CKEynve}#lKnwpmS zacnz+0%Ag#3SLXUH&e4qfdq?hCPqb_mp^&I-?9iG2hy$hsczK;Jvrqtf-#6iTEgp$Chr_G%)0W-{Zvz>uH}Bq=A^xEC zE4}5i(GPqBC zE|qobzN3Ju8CdVVo{S&tX~Kj8-aX)6xz7<2;MBy#Rm1~H#S^8on3-6N3%qqJ9cjm@ zQ+IFQev?D=)AXHX4x+2^Ou5}a{E0>v0N+>a(GioChpM(o#TK|EQ;!kwBr~xA@)+YD zw#i9}S`};dmE!w(>sI6q+&(cO7Hb6~2db(hii*{s2tK)lsEoxVJ{v&0%{&|H5xwm2 z5Oe1Ad|_Qd4hw*qTK`QDQ{i6>BF7 zsFJu&4Mx3f;enCD>@4|=#Fxb@T>89ve#@dtFG7lWa!gjNV z{x~|~q@-p)T${W5z_0=me?j|#gt4rzbS-POm2PgL4`lT$rao5Azo!NTA3$?|fJnw%9oM zMj5@bVPTz{JLByAO1BOM9l$`eH@jn`r6o0Hu(akX@uh@zTCOfG4o*&>6bcXqo}!jV zD>R4<^+!stvG)3-6NeZZyQY+}N(-HrE@f;Hd3A%Pc+;t!gZuY~m9^T}_ZTDy#MvT( zCYj&4jszr1@obW(B$exrnROOvnppe^e)zlX;Iy1eypN`i&KiD>rZd`P0IAI?E-qWS zvY@+ycpFBGEU3JnWN*g+>OM3`;l;eqh4qrH7& zfywoN1fk!TzGlLH$ZZW{`*rJdxR_S&okx#q+=T7EjPmq#p@jeyY3xOztI@*#J-k(V z_Jn-u#AbaDhzazUK20QQA8x-`2#AFc22>}pM-Q#(bhs>7@R7;c^X8$G<+3w3plb3_ zp?9&UVj{hylXUms!V<0L@g`Zdhs>X_C%ZOsuegCw0(xf5=uX+Zxx2S{Ohrh^) z*;Go9#B2mAD_@czLy`+8QF zbjJMT6EqXZp&Eh#Zi znh;o3N%>|3Q+g~C$gdzD|MSms;yGkf{igAeUtWAbbj6T;N;Hf2#|x4D4lE0htVg1v zd4o!RBP39eb&>>8&Nh3qM_;~vMd6w~*zKKC^I*{ za~)X$bK<_FMJ1QL$MAOqoLx{CK0PH#q!<8n7!Tf4k8mPCJ)B?DT1%`|n)^9$v%;q~ z?ZpuUBj4&b*@=XLP-d};C?KkR@Qra|VA4R<_^N?J~enM5{Wf{YfGF!Q)le#sO* zM?;j<5QKcZaL~}zYUJ~5z!7>Ixr*;twbL9!P_=$vY1@Z~;|}WB0KfBZ3+BvOv2ta& zS!WN=wu_imrn~w2_zddTucKJ(oKl8`w4XWib!%5&D@`%LIqBZs$jE~P4JctW9d-TQ zP++JN`#;6hHOOIcE32ke8md-)BK5qO&c^s zURDX)edl7K@vMRU;;yg}1{KCB#Ze*0i)~d@iSxkJ2QNz91Ba zB}ojgrfoLhcaSLT%NK9tLo~KwL*YzVh_TSimM_nX?iTQl+6|!t$V?&nC--8WF|u^1 z^np^?jqqCXVzwF(UR*}=qT%!b-i$hS3U|!5r0kW4 z(YF5%b&0G7eY~wiiFgDMQ`kp`6iu8NIA?_Jg z;DB?=Tsvs`eV0=dgkh+vwj(_TrTCgP@t#XEU=HNct3=^24!bU?HEPkoVYI?#*=8ma zfp_(wW-mCtMjdCha7hm+RjXGUk&nd#>q$m}y~7CR)h;s@)IcbsLKzG)>mli76iwND z+Yobd*NC7}G`++E95OP7WN-;t!@+~97zbOaElF5bQkBDFHWBVzn?7%zdhgy-t}X5_ z3h>`@aB(W06HF$Niq&8^6HpS7s{>^uNF*^&A1{?Vl{&x!=A3^>mu8v1=*RIW^QKOP z;1nO&^CXP@>KHL!A_Lh?cip_P9oUZFHmO){4gk+j&Gifco+UoP(M zbBWX>kgtx>^1%3EO7mOHx=HjIgbqo2?!BYCcbCR>6&k`56B{6aGs+!HmPK?AZLDfX zhrssXR*b0BUal4nKxeKmNvc-2um-iXh;?1EglT%!ck?!o+p~iaZ|wm=$ihNUV5WWs ze~-HN78X##?m|@LRK*6454$2WsuU6unJ8|ZZN-IY*g8cfu!IZ&V(NsX!%9E~I83lp z0e%gX9`dhSV2RMTlI=g`meDgj6dGaY&m%76Mn#VdkbVIIZuVd<5vXyGF8@wj6MrJ} z*gy58eb54p`e<-w+#3*V9~2M)$l*t`S<0LUV9C&{;u z<&acJE1L9`s9g%Se(CF~g$EpjW~tFj&UL<-fK)6i7~T4 z@?DTcS-O|bi|OV$fEZr;OK~~ZJw82r@`N;}0j4?<)k=Gd0N8T&bP-2@6x=;LoDfVy z=Os{cPP3-F8nQ5E05=>24CEpX?53s~ndWyywdkM+1O!m6z>?GT4g7GeDZtrYzZS&4%|7P>B7w@1dOey& z-JMeO1}$VHzoVRpxh0XYu#k4oQCh<}c#-LAK*%9X{U#-=B~FkGZf_6=vwu6seX9OG zNT4HQ?pAw}aGgBq`Xr4<#T;VT)8kV`(l;)f{8&-pK|_wfKf&&ho_7jTQdQM&aW}VS zOBCO~zZwYxc7+28GkhjmOn8AJ^K{A8ohCzu5Z@3}2s%-oORI;_74>2pyl2jEmyR9D zQRJd+l{fa+G)%u;Z_sK&6)+oS8WboRpN@f5V0f!HhnVsL9qjET!gC1h(lxL1>qH6IMAQ}|4phETLa3ZqULD;Z@aBhS<|6IFeFw%aGFv89$3+{zoss!D{7n9uU#WB zOQxst&XEz*Y;4X&tN_g>m&T4T704jTStNQNa*gzn`covq*{4sd>18RO)nYMaN`zhf z12>5k-zquR(LvTG9agFWKUlL^2w*&e3etji3+_@WeW#$} zikN_aYsJKPEtT4;)ABiILsThb6mcwKo|=JA-7$_PiK`xq1UmU3rzCTsVken@;=vnE z0O--`%BV@!)}B+;KQ%WmphpVU%ggm^`^lgXXWiYEK#% zNeq$l2{Y>~>9tTlPSFCB55WAHK>EkaqOSOI)4sDtp?QlatUJgs>*QoN%b5+KNz(vuNYjr(QPLyIxVN-8SBW-<#fb>vza@a&|5EY4iK zsPb|sfeT&dUsP$vL;vO2(4}@jGVu5z^UA1o6o;Z~D-_--|HGkyl`^vBa7>)t&c zL^mM?tiTd%5D5gaBmJK#z%uv_8sRz`4it$LlJ(;X!+SpQ2{6f}m3g_F8)Kq2H)WPj z4i1Q9GijRn2)4y(AP%G92C7@Ynt|N*7-;oy$3GX`m?cUs$j>Jk?&?upPA3AH z!tcu;I*=3r(q!^debvJ zAYxly-fO;d%^UV|{yTXJqJpsi+1~hJE0|a-TN&_;gC+>Q(M|~xL-f*KO-U3G^Ad`@ z=Tb=^n~c@N@Ni6~OqftdO30?*9M0P&CtJ%YASWj*XTX6KB^25XK=f}O2}t7=0Lsv{ z(EnikhSpzKL2sTM=tm+8_JujX&_!CG1l(XpUve5qNi6rJltm4FZoTARW9@FL2Nt3^ ziHrH+^vGUGD-3b=Tmh1#bR$)N{1_pj zSPj}wy&)NfFq?&GNMivsxC27wnq3|JkYk)2IY(}+f9w02n){0yc|O8bAPb!ruDUJC zEG8?JGgC zoWx!suD7gcbM^4ZUS0Q3pgn}fw6x*TwnD4~`Gl{>DZ+z?gK!D)=JFv@24FOuF73RF zy!gbfl3`^HSa;lKZ8p=}pDYdJ2=Q9ux3Do)P81 zIco2==5MHn=E*BGZ6Gn6$ncmSV3drGW)l1wC7BoPq&9;J&# z^EPfAwf`RmGtY=59>qGf;(iHN0N}`_Sg^YC@6g({EEp4OwbNd zzF)!eRBUYN*RQNSQouNzVax!O57yDr+A>77szawBAj@;7P8EFYOI_c{#H3+}69x{j z3XG2+UZ4|pwmqNZbRK|DAgHq|<}<2BWo*~w%a$=yxxE%_{VY;PT0bVojNEUjO<&2b z=L33(G`HuwnLGo?E-(K@$iXkF1m7U!oQ2pAf>5Ld0Pjo3k5>T^NjErmN$T9OBcjmaCObQXRc9c1O%tg~Hab4AG$5M_*!$VDj3jm< z50L8HUL@{5S6#~eZ8BMu2i1pkReUHr~K!$p5DLjLgL`&*1~ua zs-4(olf!~(0R-S>QKQ<0`T^MrWdXZ*CJZ8}WWGXPlz-&u{!)Ghc=t`M=S*p_oZR~% zN)kyxpN>@nwy3*0IXK*}z2dWUX*}f*4(|E$@5wI-qC_qtEy^Luq(v7em+gQlq}MPa z2IdmxaWrT$OHcHN3g%I05lnM%bcEe{9&HWc1wcf#49j*R-TalGftd?wekzC`3qSY~ z*e;%$0gO!9(RIrCN0*swU`PmUzHIJFTcy6HX3ou@@t*FS#pgfJGz`H7r9 zy{puGC8u}wmo6DKgcdHiu_WbQj9K3RT~XttX28%?szb1QA+Tm)eauc=ya)vM4aRh@ zHxU2-d4JoD)E?jUV$;{q_>(7JQpUh3%ZtURFDrEF1ll5TjbE7Kv2~d`MafwMXUyze zVDQ6b<}!0Rcaxu#(P)JJlTuGO4w|G)tPOZYR@PvRF{wP(S4vc4#g_z@VD?cABZ;k~ zXC@;v)7NLb&TebaEc}DtG${?GJMP|CY|7!y?=Thw;d*<^Y7dcUJ}WD0GV7S*0#r$r z4YNRt9CN$#)q}I02F{=)!&h=J6)j+1(`)?g%NIH|YRCg|lgHE4VXJu!j4Rx!cEj0_ z4<^7fKy#rX-b7dv_$augMY=h2wnm48wm(F_b3L7|>B}DI_3nL4II&-!ERd6Jr}dC@ zt=3c7Tn?iOVVQuPe}qnvziLh3`j}}UlGzOW6BNhkzM&bz<5_A)Uvp7_r(z+ogfXT7 z4+Qgf!C!|3Od|}bB4TeQTcf!8qVTzN>4aGO_V!k6DE7vhY!hQ+Y-w^*5{)CV#6-Yk z$fXlkG4)!UIkV%L>(^r}wW&MPB1$*gq zH!kzoG@6#Hu?RGDY6mjabj#2_B&MM=E9(bC_2DMrr^#w5qKq6yrX=f4{M_P|X@Ttrq5jONQ^P3f6I5|N%1cEGfHlNtjbHnL(ebu2 zHP$lrZU26d*zN4oz(8hLe)?LrB?niycK;p-@o@gx$Y|7xwTz;f!gG~E8oGxIi*Ief z4xC_Sb7&aASw6Xgt7 zsw{I0j06S(!UPK3VHOeqEa~+zkJ`pOkiIi#I{IoiUZ;9l?>!Qg5P@r?q%?1su|-Jn zl8duakjknfV8QefO#a%iR@0e43Cx#}3UL&&1)3#)gHCZX>`wxwSK*~39}eUP2A3e+K(JsLEmzsNi= zK&BN9is+Cl zLCM>>=mj*oFZeJ7P3cXwr+F_^cBH<(&{_h{f}#Sl$Y+`A9if2GVaf28JK?a@7~w6^ z(AZ@bqWPEBBnaIB>b}+(AhJA!Wogr8d8**Y|6Ok}NDGG}WHHeG_+}_c#f|hQEd2==IZxEjrxF~h{r z(39R^0ajO+h&1;cr{W_pLa-4KIizU)`PA5$zM(lWKE7{xSYNr#_pFAh?o2cSUmY=Y z=-$kX)T>vcGjC~xjD(Pe(<5^^oa@YtWQ`H4bxlnV`uV=5;na-sJB~u2I%uU5r^2UU zSTm=e?L=_gUGS;aTaA8X6KZUP1JE$sxZ~*y7sCH`*2qiyg;vIl%s6HYz)W-a``542 zcre<~ZWMJpv8%{+b!8L!rBMC~Ij1^-G@n5W)$H3h*i1cwgq)e0N?N)G2EBXrnmcRO zRn!+bWb`k6F@2{&V(_0tv{QL^c{W+T+3BzW1_mF2UD?lIyz>FKh@M`py;l{C|6w{A z#lH(lNlYxrD=qZ`mZB97wn-1~oytDK=9iPRwYElD^9PEv%u|>(@BmgDVnws6r#5j^ zIPV&gs)aQsmN@ngZF-8(4`3_`5N|81Z}Wh#$O|WFrt!~!r-8ln2P2_o=J~5rwC#(e zh1*Di(@JycNEOO<`}XB$_m$boNEl-Tz)2ta_UXeWvY9q*C_`#ao^0G3 z+svClNpc@Vp}+LtZMpW-r@#E*pUs27yaDbb9pw^hnJ}CpMvz(mX=xsrZlKDNQ{QKx z0dXaqysu;mh@&N#ML>bzWcEZc>LLT+7JRI$li4&vB&t+c64!=TRUnUR(CAj(_Yv@ZrNa4FA^8y&Fb!HlP0f*=3<$Uru&B@0b%iaNwPSf?@GIu zHz+PMBGOuy_yU9Y`CDi}4oC~WpPZ3+@kLqLtZCCelPZ@x=J6`2-{z=CE?|1?dJGp^ zl-Ea}ebugZB2DRzriF>YFqU(2awsZFRNj{WgBWdG zU3D7*s^%~{ZY*SSEmsO3$as{OpO1^{s-{--yyKfTMssqAWo2bY{qm5053&aIOTI!$ z0^En5v(+{E9i95fi9~JP?pQUG=Z~c#gjcWSH31Ir)#hSQAyZS+^W>Nt(zgQugZ_g& z5?z9*UHz&iSwba2dvB3gpiom-4~&e6P`Ib!lX~Tf#Rkh7t=-ndzQQqO6-Z#tojuFc zfgNVuJfNgi0QBqWy;`BdJvCmQs4Fr*Z?5)R25%0!0;z@SYXn)-pGf7t8`bjLv`#`d z+<=FvxYu4buZZ-#urU5z?5J_KR5Eok&U#dH?(y|l(jj)=tpP$Znys;L!oBk-r*XfM=}Bf{PoI} zZyX7>?leb7?AJo-;w+)~>hT~yoHfDiz26xdCKZpjx9Vz3;0ECc^eIDus813XM-yr} z`Q)$Pzd2LSA3weV8q&}mTE-_}9XM%n*Lz5bu1T+m%f3x(KymR(jy~DD4HYZy_}|;N zd2O8SkSG0S^fFZ4DPTd2Dgj@mQaHJdS6+_);UajX_XC2Y?Gj=R&??$De)?x@{ynKp z2k@PXIE8D#0H#k2CFY`3O|nhU3=9@o!)|-FM*)i8ehrYPw3a<1_&+Ww*6%c;aP2)$#Z7ovmjH#(8g66DBBL zHh?QI&B^I0IhePP&s7}|9u99w;J5^uT`6A+7g#K15VEnyj|D06{r|+3zJ2$Oocrp8 zL1`I%AUzpHKfh}Was?2}^~#?;!ary3SRx$h6z}eju@L1BnV87y!WApA3)HCS_sRu7inbmeJgVuKlFF6f%MUsI(#@Bt4a!m+&O^;7dYU5$~g=SPR?`njDc7%%`wz44jgVuh?MD zcA^SM>2+a)nkdJ^-l{De^Fg2ayzI>}2`jTXc0{j;D+`jgtPHK6M2H^=;O7k(E%Nv~ z-DV`oBt9Or3~&Z&WeN)pmuC3z!ZDWzsTleDU&*N3@VslMbs8N<8}7& zl?;{sj_ik4;}#tvBt~>oS>~<1+(looFmVPhx!*P^kywGX^o1&`x0iHk2C20$nuw9O z_=H~aO96t(O0n5A))d5qxzRw_n_!sGi=sOVLM;aLyU7Ly zV1kSAjH4O`Zfs(~*bOPv$x-SR2EdN=c3ZcO)Ma;6RH1sfm^+}7Nh{@Z}S%b#UpXa1|YBiW@zv8#4^pR`!MWPs- z5^??8o3d-JOy3a_0*DzN7Pc2H;?=A3gcx=(YczvXPssZs&7(J|wji;Lk|fk;kW20n zNVeKe_iQWk-*^_v9u-fXsQ4-?(#zSaS1$n>;m=d2g3NY_-Q3;F=|d#uqNW>ad8geK z%((Cymlp^mA_AUrJI1SOTIX-vTNli3CmV*<3x{(vqE>Uzn5A$qYSbu*5DZRy#(G!D z_c_b!TSMXqK$sAly_d5IyI|nJfnaU82hT#qwd`Rm7$_0k?y*hF>tQv~W@mDEG!;so z#3!64q#NH~h(rUOO@8zSjU{={AYTuVYDdXeP(-m4m|CgJ%7bBVlPJ$faA9# zB4W6aQC>`tsmZEx7;qUh5tNS$oC#Tn$`$uG49R~iO@KM?*1mq-Ea8$GMOB>4`3dnl_26Z zfq?L;Ps3N=G$Q@P2^T8kHzTzYC!t%hciyl5-xhU~l9I@uUkN>c!h?n@D4;GtnuYU%G7P_(ipGS= zlLs;X?B_5b2)-*_X-E4-ixv@{3s}Xt5FMPCzP>f(7hg&BCzJ$_@y&b}emp&;v4E@y z(hV>SKc*=3fUrMiU0?4rZk$vD(k}s$eWjIXJpUBNSVav+`e7*#3qFpb$H0F5MnKmk zp5`IkVM@4VRaO=3BC&OivgQkid;b>!+TK9()0KCZx~O!*mtEU_(DlQHW32`=x#xMiu9UQZ1JzNOiOojSt z;<{S)sC5YKqfeUe z^M?2DC*(LX(x-p3fpW9?0RUHaxtB`NySHzTj##qwKu!KcL0HESRwc7tD*&di%JLGGU)&%EniMkLno&dc#i`8 z9ENw%ty|_@kPJ{V?D^Y+S~RsKSVIC!oNNRhUpi$-8c7=F3jIXiF_TD`0Pg~DLhI3( z_@3i}j^bh_0st}J=E_a6d`ZGAp)sF_*PMg0tv=QxybbkWL+s}FF@00 z&K#lj1TBSzf4kRvXB@m|b&Lfj6mkOa#@ zo;wE_ptxeP!uoqw{!_UXLKq@U&gZ&yS0Vff(K~p~p9{n8e7}?2wp`F?x;5^RrvrT7 zy}QIvm0SyL29y%k(9`wvtIpAsQueB#ltk!AB9gtG>%H-J)>VVh9OmdG^2gl&#(aY!KI-JA{hs*w2PS^fe=4!o-Qk7(|aGH(TRv>%Z34 zG!T(8ca)chkJ_eUkPho*{vXL)R5l18_2aT_p#Fi~?Eh!9Z^IG7dXftgRB}on1UmH4 zB=PL*b(93m}gn_UqjyxesT8 ze3Jm2+7lB)jvl>xVKS9S&>l7`tRXHTN1HyKFj%-~@9f@PNXl#*o?JYJN*$>x@s97g z`s^Dn$+&e;X)aI|QXgz8J~GB`siA6@w((?kZ{H?NU%qnX1-QB}i&;Zfl6aGJnfMw0 zL)DUkG@D721TV#=2ZzZVvqJGXuM6SDM?3iTQ&{QwbU{(G%UPIeb* zS~^cHY=%3;ZWPB-Am%s8ERas35=P{MEobHE2;%!2DxxMxZ_5?S%k#P)KYVz^c-egp z{mGN9ug1?L8=;1N;)K9KEm+r9K78=omM54M?jFcB+^Tj)GN~9>`P(aOb;wZsiMQwN zgE7a&jeq6O3UZOGPSRBhwXCsw55-G9m=bK6`&yFqKlwVzCoR2Z7Whd)E`#--bBK)h zZ)VOmS|xLli2Ll>+k@U=a$3LneEJwtbYd6FNSvLu{_E|+!r;c0o}@Y8D;!bsC#d6o zA76|nN2c$O%o@@k&^MkyClF*9b`Pdvp$7B;cS61mUMuSPWgI1W{e=~Szma$7DxEn)cu-fd{goLL*du7pe0M17H&*)0L=i;1;9LT$A3PYGcr^}*2l7ujS`MT`iHJgOR7?!xI!by! z3pj$wV1bHU_o%mTy-$#ZKcaq3m-5rJ*EN1ohYvp|J`#!@*Ip8xDK$s!+Xwpymv-19 z7B8k$cv4FAQgSoeC!G4=wdb*?9F{>igS?Fy#~PzE2B^geoMhXwnKqUQ+EbBK1y5xC z$Q(wYqD;2=`*$3!eGRpuPC=g-)XP|xl!<$!zx=~v9>ZM{{sNs*uKGD8__V#g!x>|q zdg##4wA8&%XVlFaz1`qjY(<{QOxPp`DXSZPD}7%SrGJ z`IN7FKa0(Td#BC@Ql6TUVn&^jVdS(hF?%$pZZ+9ARTR(uTB0wVS}vXq<2=IHu+vq7 zfO4$B1!$ActEM9JkF70TQGLx~?ob9ea_G={A_XiZTu@a0LB8)Qtk~qKrM|2Ia-u@Y9;R*vQ`_JFT?ttx%=(Z$dmPNog^~2YHbN9bd~*TFm0&sl`$gBK z?M3z7?mR3lAio!CdT8JF-`_)7`M}}BJrxykExEN#*xL=Y$2x2=Z)D^*cGi~E3&#OA zzy|UjIq!71=hbq7R}ddBG~*DHTyJ_^vaVwEw25sGJ1luC5w{oijJ0)AO3GnUF#i?z z+J{2Gj)|sd zN=3$pdN~wIp~j@v%Us}G%_kKfGQ^9Nl8{)H!AE7Gx3nZ;nA5ONrW{#^$L-a<`(->} zk8t8U6Z%;%wUZXzk{yN@YCd4k0lwA!>Q|GNI=;w z!v$WapP%Wmdm#K(L^6w?;l3WhgQgZLw2{GLg>l4T<=l8r@f3Zc5G3$0?JM9>;nS!{a7^ zrxesjx;i`410-|lL6JM5bbj|U({ERMl2y>@sHqhCV$n2$Jf<7j+1f&Qg!6on^3c(J z`vfw{sdZAf07ttq#Fp|ec?pFBMm;exF?Fmjbe>B9g}67$5LC*{kf^6vlOII^5)(9O z73KKmZ9OXH@k8((aN?-x*Hu?1gUtZme)b!M$)eoIjor!1s{z{U)+xv;u49k|mH=u| z8iMl8vI2GN_isRah|k1#dQ>)h_2RJX$o)~W;n9;PBy>xdV#`U!Aku$Na)&6emYGTs zivi-!09ythA3kyfy5c2hfw-XNn{6GXL@$EIsaJ%kw$bG?l=z!!3gJAsC9UCrLy_u} zmk{1DKqG2M8uRR_p(2NaMwW;s77!UNg6?C-wTw^@E$>X!;jCb`YepI z<#=txlF+yY8^#pY+NJSDowt}vphRL$@UCNyLA9}pwMT^>J2hqO?`}l8Yt31<;2sqp zZeoj3BE-wq*QXQR6XCpQ>ARLsL|vC;<}DkFNa*U^RSOr!QV^XPGmsrw$Iy=2+8eb^ zbm?$5yYlLg*oU^p1<@Kxw@*=5DewJcSp+)NwPDVAlE@Qs!AEr(^V|R0=HIj7Hcy@E zg*O1X#fhx&@Ob_CJI$5{4Hrt6!O9vV*1PtuBSo0y6o9ffU#GHNHfv0`h(dy-nFL(u zG!U*br-V`H!$;EyOX8pa<8)tC6bvp`QglM>l`8>&BNXR+3>EW|2;DjL)Xt3s}zeu`>H(RaMpd_oHYj1{po7@L&#mV`Ec+3K$K1 zGbT_iKnEMzl;BJmNl0+PZ$sL>G*nu&QGa9W)A7)Je~fLW-;Z%u z`rx1_7*NnzfOB5xX#2Q7fle2*yN}nm{#wUdqWKf20AZlCyqs#sjmX$p-5n9sst4DC&f$jRl73 zL4ZW}t(w}KmoL4Ub>QtLs_$tZH>sAxyYR+#;#@Y<-k4J zv56+Bw5&YzLv**M@-s?AHBT#2w#iCJ{gA1seq~7tVG>G3^N=Dci=r5l`+aiX|J?uH zuh)J3V~g+gz0UJG&*MCf&+$1v2Mt}Kqwm7IrGoP`jw++%?BVoj+A-|sbL{*!*9HHb z7g04csSLy(PSgy57FMeBniL8~X&~~olhfZ6ui~1b#-ugxv&jWXibqk98ar}7>F;+x|9+V~I`KJ}2eZ^cP($9H zg)`H*I&tEdkqnP0goB*5$Q5WZhymlR$nHzZ%37^631=Gc@Zm;V?X{FC+t^SaLH(0a z!nw3G)B@1V+JEat@YfW6IO=$lwxhBg^FYO-d?=fMW0je~A%_)ouew@c?AU_e^_XSpP$M-UQ+yl30IMc$s6Vw#CS*mAX zdC7XfXgy-BOpmrLTWS`yERoC_52&on1I57o`m-+Qeof7>cAddP#m^vP1r84M)ceo5 zQY_Lw^oh@XFqGo6s&=E?E8t6=NpzEZy7&tur$k-8{wWV+QYR#A9Fl}{BiFDdy_$S&8mFttO%3y9yL_u*=eh4`&9GpY#hLR6Z z`CCZb$1J0qfcn5rmVk~B#Q|)Pn7qy)jS|$<)b4s-q`<#aag0=Xo#(1zpNTeb^svxC z2fdxjl3-GbUqN|rzY~TfqX@#qMJ=`z4|bOJA95ik;|(_00UppGuoo43E8`H zjfITMFXn>)x~M`ehn3H8myaBfc_@^aMGKt4^;WibfQna24Q4 zhV8)3wyUMj-Uz}DCYEp*MM6ACNI4Y3uGCV(NB|Z*ZfH1JcnRUXj|?`PL!*xHs2Dv; zykp$<*@*XcYhpP$yZHW&1`ZtvIySbCRwmn@NJt=x!EYkj$bB;-BE~%3D}{WJx~zH> zNgMn@qKQ-^Yq{jE>>&T@A0AzlU#fU-0p2DN!>--C2X>mzQ9F#zgg>o<9zXt#M6N=J zT*wE7@Nog7y}Vjkpl|>B8vz>HKpf+pR4#tAE0>(V?#6%L0pDFUc^w`YY9ZKJ1-W4< z5pweL(ZFg=k(FW;4Sst>Z8i=8l$JisDg|_2+0P`iL=B}#Rz8^O3*SBh3%mwwdE?TY zPG>mABn@y-@VJm5I7W%~_|#ES%;>bL>xZsf@h1aB$u3L@Y5CoNG{s-=sDQ1qr7A(6q(pSjkXQ2G_=Yp;3jd zIQ9^L@9=3kDaM$|ItlfMdEk`vtT>r)@}wObG{$u}%ER?%!5oZ;n8KoW4Vrzp_#jop zKm0)d`6^a2)DiAy^oxE~GEKrnYXi$F!t(K3eO;3XgQ#X>kgP}V$CuJCUHY+onWVm) z=no?bxc^IR>_w9604t^mcV0Vlz%glsIbjNN0to=G!nMU_bGi@?^UMik4h|QNmSWbX zRRx7(n~;+Ral$JGwskFc7$psijw>VAa18Dv+7TdUEN){&TkbBYymjjo%y~LyUIn+~ z+yj+IR0YQu@x9;DM#rlom7mAJwxm|*!)N~6fstSiS7e(QvqqPCTX3t*ri~b7$7kY{y zuETfsBfLiS%yd;%zV26Yw3(%hV)7T+Bh37>E?*v?YE^jQa1Cb6FeNIYZ~FJ`_EM!Y z6HU`tkygTFz-}w6uXp}eIQ|GpWHvmE8^T%8=EEz_2+7$`KRtI`c7)XJ$HdoR@hJ}t zVp)hkE-FGE`020eAtu^P1<@mX0oW0ZfYnr0H@ItpRuZRf){Cd81a_6kMCb0x0_xxe z#*LO6^6mOZk)QylgDt^li*MiF`@YtTnEBYBpCql%APBXy`#_aJye(~kTM>tH)OomP zdsMmS_^EoQ5&EPWgae3D1MW|dYDUwA-uY=&10{Iyz&o+S-C__ofr-grzoFjOqg7U_ z1ycK)Uc5{}V&L0S!YOp%8nzoNMQR}sMSR1;g_uPULO5Nh<&S9xhhXww-g_cRLGoLD zZ#=^pr&|`oW{dCHy*r4vN*cAjdAg%yb_%!Gm6`|kJYgM^h0nHm;glKKH_2g9P(;kG zIeT_-W232-mIu)WU>S%eC};zq%KBNOrMB3GYnR`6SXT!%nK3TNcKnqXk|;Q^B>a|H zzO8C~vS~&2Y8MyiOz*(|!G3H{4$1i7i*;N{2WbKde;NR}p7X}&Uq$3e>Y;FObfOYQ znj$M}(N8~2U5$j7VzgDyH^XEI64Rw4I&>xJXz0Suf~|1p_uH&`z3^9A2Oal025CwezjC}(IX=m1MCe<;8eh^kaK^A{AS`wD^`;+s-3f!T zQ*QR-?j0al!TUlqE}1a~*BH~eKdrdY_cDXH0}278wcEe{KF`45GW#<#Qx~9s z(&zGu3NO9*LR&9z8J2PxC|zkOTD_EYBRZ|wU-WLL)2EAv2t9vtT#yH0Dd71!XKx-B zw#Raz0ttEo1A_Sw1PHitnt93iEeO zf&=0}vOI9&0XHsRyomgXF7d~eE3^AIzgx-T#SzMbcf-lYF)wtrD7gGI_-7H0>%U-0 zg@%Ro_4gymaV;kY2=qQc1opBlQCUhlZxIz{`VS1YySvyt4tm%71+k5(^QtH-iI>>9RMX26_|KoNdJBNttx$p*cmJ-`jR7}14={@_0vlw zU<)&heCI4#a$?096TnzD0^==mgH{&QRlbKelU>d}AW7X{8LEtP zPC@C;)O3(Q7AJgnUw1k=LjpvW;BYP!(rT^D9*m8DxXVuc4O&V<0(lr3Xldb*7$Eon zS5NaXdgHWB6*nyq`L}9c2$+TCB)W*Wxq5qh!MTYx51aIr0N4_bu!RNPI6rB-&bwPJ zJ_WaKVRkZ7Q)Bh!jvY^)Fc|TR2RJnaSQaH7x>uGD@)Xvtpbxr&P-beY7SYkvb;e=i;MW zctwFGuE0W&HNnL!Ee{k34X5E>4oBdh9Y@9c4kZXWKU3*`;HG*{=twy^(3{nBlu5IY!MI*ITI`aC%9wLLFg z+bk@SFL0b;>qydNXCJM(OwdmPNRLWdP)0pHn00SDI~U@RS66EpFOZ~?vDI5*jG@s^ zsjvDge8T462F5QNT&Xi@VjMY0EhSQsS&o3_2o_!w|1$3=*BqulF&KKrQ9Rdc07u8A198_G{&qhcgPL@$& zc_BA9#TZ`FT(~gi>Zy#3X+WzAFV*MUHu^P}eC#$p)!j9s##LzufyvzdaiU0=K)z&z zN}Br(r$;kd79hh0)qB6uFdx2p$7lnUCkJ-qIJs@=W`Xf#tdT_@Z|_;{;GiW}Gr@9a z#7^VFO=>ub&o?42Q|`$;(TgQplg*zPsxq!K`S#%%Gp0<5_^2ZwAJV3cAKGyt!In{S zz3QaR$T5znMdwXQN?l#5$WI6o{T##MKS~q~RK!m+&$KG>Q_|y8;%Cg%t@`3;$Q}C> z8Sx(;VJf1hm8AY3)uMZqB*iEHf8IiwC$D@&)XMA2UfXRTyH7G*$79aNj#L&2Dp_k+ zsa@5h=I75Lp`U0+pG_LvH}pH(ctXu+wWTZkV)CeS^C1KQg4aL_M#>Ln9(=D8C2MPG zZEMU+Xjcz1-?|l%u2z^xgYV7yS{!?YaLnGmbV^ccBLifT?TJ1`{WCj^ z5RDeqAtGZ+w~HsiOswxU!I~muh0$e~ipi~oS%U0vyS+~;9Kl($S~&99#xAm%)(GfZ6sA9ATJX3s(unlkPuKGRXkJo!$#4fTMu2_l%5JI1VtUPeURH z=wjJ~sEjk)&r{(Lh;(XT@4^KORzX)_S)_=Bq8e+u|FLkaxYxy#0K-2 z5N@EFLPO^A_w}9r*|sUQGOQmDaaX-`2lPo|8JW^2T|h$EPqI7!jJV@$3S4kjD;osn zDSb5S;$RVP4D$rpr{Frm+5E)`?GG?|ZUxhk1GCYs>X=oft<4>^O z7$Ptm*?nf}6EIF(oqq+Y9Tt%d#W>hSXXi||G&Tc6GF0FQCb|XeeA~7|$?UJnIF?zr z_At=9%&Xtf`vZW^oi?74>AlYA(HVf=WsN|Cf#nmd2~L<48wG62Udot$TkYgr~$yL33( zbVKzeyLdo;tG-#I)}(`Ar4ed%Ab0xir)Kfx-1Ufcglljpx*;l67X{WV*eyfpe1Jc> z1Lp6?GRrXw{>NGrxIJO1|KH5U4>-IYUPHHO+pG-9!^j)oIC7aqN|m)j_j2n8yM2zs zs|_vpj8$#EQSxVne7cd1_u8iV2AiSZ$#=ZBvp9P<_78{PKJWE-%pH4Usg8GWXs_Ki zD5V>%GFi*Xz}kClyi%>CReYR(;^Vvd_}h3se%F$ZS2zT}^DeIWU&gI{Q0BWup356c SBJPMMwE4nr**S|%`~M5@Hz=$C literal 0 HcmV?d00001 diff --git a/src/lib.rs b/src/lib.rs index bd775f8cf..3e7225923 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,426 @@ +//! **Torrust Tracker** is a modern and feature-rich (private) [`BitTorrent`](https://www.bittorrent.org/) tracker. +//! +//! [`BitTorrent`](https://en.wikipedia.org/wiki/BitTorrent) is a protocol for distributing files using a peer-to-peer network. +//! +//! Peers in the networks need to know where they can find other peers with the files they are looking for. +//! +//! Tracker are services that allow peers to quickly find other peers. Client peers announce their existence to a tracker, +//! and the tracker responds to the peer with a list of other peers in the swarm. +//! +//! You can learn more about `BitTorrent` and `BitTorrent` Trackers on these sites: +//! +//! - +//! - +//! - +//! +//! Torrust Tracker is a `BitTorrent` tracker with a focus on: +//! +//! - Performance +//! - Robustness +//! - Extensibility +//! - Security +//! - Usability +//! - And with a community-driven development +//! +//! # Table of contents +//! +//! - [Features](#features) +//! - [Services](#services) +//! - [Installation](#installation) +//! - [Configuration](#configuration) +//! - [Usage](#usage) +//! - [Components](#components) +//! - [Implemented BEPs](#implemented-beps) +//! +//! # Features +//! +//! - Multiple UDP server and HTTP(S) server blocks for socket binding possible +//! - Full IPv4 and IPv6 support for both UDP and HTTP(S) +//! - Private and Whitelisted mode +//! - Built-in API +//! - Peer authentication using time-bound keys +//! - Database persistence for authentication keys, whitelist and completed peers counter +//! - DB Support for `SQLite` and `MySQl` +//! +//! # Services +//! +//! From the end-user perspective the Torrust Tracker exposes three different services. +//! +//! - A REST [`API`](crate::servers::apis) +//! - One or more [`UDP`](crate::servers::udp) trackers +//! - One or more [`HTTP`](crate::servers::http) trackers +//! +//! # Installation +//! +//! ## Minimum requirements +//! +//! - Rust Stable `1.68` +//! - You might have problems compiling with a machine with low resources. Or with low resources limits on docker containers. It has been tested with docker containers with 6 CPUs, 7.5 GM of memory and 2GB of swap. +//! +//! ## Prerequisites +//! +//! With the default configuration you will need to create the `storage` directory: +//! +//! ```text +//! storage/ +//! ├── database +//! │   └── data.db +//! └── ssl_certificates +//! ├── localhost.crt +//! └── localhost.key +//! ``` +//! +//! The default configuration expects a directory `./storage/database` to be writable by the tracker process. +//! +//! By default the tracker uses `SQLite` and the database file name `data.db`. +//! +//! You only need the `ssl_certificates` directory in case you are setting up SSL for the HTTP tracker or the tracker API. +//! Visit [`HTTP`](crate::servers::http) or [`API`](crate::servers::apis) if you want to know how you can use HTTPS. +//! +//! ## Install from sources +//! +//! ```text +//! git clone https://github.com/torrust/torrust-tracker.git \ +//! && cd torrust-tracker \ +//! && cargo build --release \ +//! && mkdir -p ./storage/database \ +//! && mkdir -p ./storage/ssl_certificates +//! ``` +//! +//! ## Run with docker +//! +//! You can run the tracker with a pre-built docker image: +//! +//! ```text +//! mkdir -p ./storage/database \ +//! && mkdir -p ./storage/ssl_certificates \ +//! && export TORRUST_TRACKER_USER_UID=1000 \ +//! && docker run -it \ +//! --user="$TORRUST_TRACKER_USER_UID" \ +//! --publish 6969:6969/udp \ +//! --publish 7070:7070/tcp \ +//! --publish 1212:1212/tcp \ +//! --volume "$(pwd)/storage":"/app/storage" \ +//! torrust/tracker:3.0.0-alpha.1 +//! ``` +//! +//! For more information about using docker visit the [tracker docker documentation](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! +//! # Configuration +//! +//! In order to run the tracker you need to provide the configuration. If you run the tracker without providing the configuration, +//! the tracker will generate the default configuration the first time you run it. It will generate a `config.toml` file with +//! in the root directory. +//! +//! The default configuration is: +//! +//! ```toml +//! log_level = "info" +//! mode = "public" +//! db_driver = "Sqlite3" +//! db_path = "./storage/database/data.db" +//! announce_interval = 120 +//! min_announce_interval = 120 +//! max_peer_timeout = 900 +//! on_reverse_proxy = false +//! external_ip = "0.0.0.0" +//! tracker_usage_statistics = true +//! persistent_torrent_completed_stat = false +//! inactive_peer_cleanup_interval = 600 +//! remove_peerless_torrents = true +//! +//! [[udp_trackers]] +//! enabled = false +//! bind_address = "0.0.0.0:6969" +//! +//! [[http_trackers]] +//! enabled = false +//! bind_address = "0.0.0.0:7070" +//! ssl_enabled = false +//! ssl_cert_path = "" +//! ssl_key_path = "" +//! +//! [http_api] +//! enabled = true +//! bind_address = "127.0.0.1:1212" +//! ssl_enabled = false +//! ssl_cert_path = "" +//! ssl_key_path = "" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! The default configuration includes one disabled UDP server, one disabled HTTP server and the enabled API. +//! +//! For more information about each service and options you can visit the documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration/3.0.0-alpha.1/torrust_tracker_configuration/). +//! +//! Alternatively to the `config.toml` file you can use one environment variable `TORRUST_TRACKER_CONFIG` to pass the configuration to the tracker: +//! +//! ```text +//! TORRUST_TRACKER_CONFIG=$(cat config.toml) +//! cargo run +//! ``` +//! +//! In the previous example you are just setting the env var with the contents of the `config.toml` file. +//! +//! The env var contains the same data as the `config.toml`. It's particularly useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! +//! > NOTE: The `TORRUST_TRACKER_CONFIG` env var has priority over the `config.toml` file. +//! +//! # Usage +//! +//! Running the tracker with the default configuration and enabling the UDP and HTTP trackers will expose the services on these URLs: +//! +//! - REST API: +//! - UDP tracker: +//! - HTTP tracker: +//! +//! ## API usage +//! +//! In order to use the tracker API you need to enable it in the configuration: +//! +//! ```toml +//! [http_api] +//! enabled = true +//! bind_address = "127.0.0.1:1212" +//! ssl_enabled = false +//! ssl_cert_path = "" +//! ssl_key_path = "" +//! ``` +//! +//! By default it's enabled on port `1212`. You also need to add access tokens in the configuration: +//! +//! ```toml +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! LABEL = "YOUR_TOKEN" +//! ``` +//! +//! All tokens give full access the the API. Once you have defined you token you can make request adding the token as a `GET` parameter. For example: +//! +//! +//! +//! That endpoint will give you the tracker metrics: +//! +//! ```json +//! { +//! "torrents": 0, +//! "seeders": 0, +//! "completed": 0, +//! "leechers": 0, +//! "tcp4_connections_handled": 0, +//! "tcp4_announces_handled": 0, +//! "tcp4_scrapes_handled": 0, +//! "tcp6_connections_handled": 0, +//! "tcp6_announces_handled": 0, +//! "tcp6_scrapes_handled": 0, +//! "udp4_connections_handled": 0, +//! "udp4_announces_handled": 0, +//! "udp4_scrapes_handled": 0, +//! "udp6_connections_handled": 0, +//! "udp6_announces_handled": 0, +//! "udp6_scrapes_handled": 0 +//! } +//! ``` +//! +//! Refer to the [`API`](crate::servers::apis) documentation for more information about the [`API`](crate::servers::apis) endpoints. +//! +//! ## HTTP tracker usage +//! +//! The HTTP tracker implements two type of requests: +//! +//! - Announce: +//! - Scrape: +//! +//! In you are using the tracker in `private` or `private_listed` mode you will need to append the authentication key: +//! +//! - Announce: +//! - Scrape: +//! +//! In order to use the HTTP tracker you need to enable at least one server in the configuration: +//! +//! ```toml +//! [[http_trackers]] +//! enabled = true +//! bind_address = "0.0.0.0:7070" +//! ``` +//! +//! Refer to the [`HTTP`](crate::servers::http) documentation for more information about the [`HTTP`](crate::servers::http) tracker. +//! +//! ### Announce +//! +//! The `announce` request allows a peer to announce itself and obtain a list of peer for an specific torrent. +//! +//! A sample `announce` request: +//! +//! +//! +//! If you want to know more about the `announce` request: +//! +//! - [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze announce docs](https://wiki.vuze.com/w/Announce) +//! +//! ### Scrape +//! +//! The `scrape` request allows a peer to get swarm metadata for multiple torrents at the same time. +//! +//! A sample `scrape` request for only one torrent: +//! +//! +//! +//! The response contains the swarm metadata for that torrent: +//! +//! - `complete`: the number of active peers that have completed downloading, also known as seeders. Peers from which other peers can get a full copy of the torrent. +//! - `downloaded`: the number of peers that have ever completed downloading. +//! - `incomplete`: the number of active peers that have not completed downloading, also known as leechers. +//! +//! The `scrape` response is a bencoded byte array like the following: +//! +//! ```text +//! d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +//! ``` +//! +//! If you save the response as a file and you open it with a program that can handle binary data you would see: +//! +//! ```text +//! 00000000: 6435 3a66 696c 6573 6432 303a 8100 0000 d5:filesd20:.... +//! 00000010: 0000 0000 0000 0000 0000 0000 0000 0000 ................ +//! 00000020: 6438 3a63 6f6d 706c 6574 6569 3165 3130 d8:completei1e10 +//! 00000030: 3a64 6f77 6e6c 6f61 6465 6469 3065 3130 :downloadedi0e10 +//! 00000040: 3a69 6e63 6f6d 706c 6574 6569 3065 6565 :incompletei0eee +//! 00000050: 65 e +//! ``` +//! +//! `BitTorrent` uses a data formatting specification called [Bencode](https://en.wikipedia.org/wiki/Bencode). +//! +//! If you want to know more about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [Vuze scrape docs](https://wiki.vuze.com/w/Scrape) +//! +//! ### Authentication keys +//! +//! If the tracker is running in `private` or `private_listed` mode you will need to provide a valid authentication key. +//! +//! Right now the only way to add new keys is via the REST [`API`](crate::servers::apis). The endpoint `POST /api/vi/key/:duration_in_seconds` +//! will return an expiring key that will be valid for `duration_in_seconds` seconds. +//! +//! Using `curl` you can create a 2-minute valid auth key: +//! +//! ```text +//! $ curl -X POST http://127.0.0.1:1212/api/v1/key/120?token=MyAccessToken +//! ``` +//! +//! Response: +//! +//! ```json +//! { +//! "key": "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7", +//! "valid_until": 1679334334, +//! "expiry_time": "2023-03-20 17:45:34.712077008 UTC" +//! } +//! ``` +//! +//! You can also use the Torrust Tracker together with the [Torrust Index](https://github.com/torrust/torrust-index). If that's the case, +//! the Index will create the keys by using the API. +//! +//! ## UDP tracker usage +//! +//! The UDP tracker also implements two type of requests: +//! +//! - Announce: +//! - Scrape: +//! +//! In order to use the UDP tracker you need to enable at least one server in the configuration: +//! +//! ```toml +//! [[udp_trackers]] +//! enabled = true +//! bind_address = "0.0.0.0:6969" +//! ``` +//! +//! Refer to the [`UDP`](crate::servers::udp) documentation for more information about the [`UDP`](crate::servers::udp) tracker. +//! +//! If you want to know more about the UDP tracker protocol: +//! +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +//! +//! # Components +//! +//! Torrust Tracker has four main components: +//! +//! - The core [`tracker`](crate::tracker) +//! - The tracker REST [`API`](crate::servers::apis) +//! - The [`UDP`](crate::servers::udp) tracker +//! - The [`HTTP`](crate::servers::http) tracker +//! +//! ![Torrust Tracker Components](https://github.com/torrust/torrust-tracker/blob/main/docs/media/torrust-tracker-components.png) +//! +//! ## Core tracker +//! +//! The core tracker is the main containing the tracker generic tracker logic. +//! +//! The core tracker handles: +//! +//! - Authentication with keys +//! - Authorization using a torrent whitelist +//! - Statistics +//! - Persistence +//! +//! See [`tracker`](crate::tracker) for more details on the [`tracker`](crate::tracker) module. +//! +//! ## Tracker API +//! +//! The tracker exposes a REST API. The API has four resource groups: +//! +//! - Authentication keys: to handle the keys for the HTTP tracker +//! - Statistics: to get the tracker metrics like requests counters +//! - Torrents: to get peers for a torrent +//! - Whitelist: to handle the torrent whitelist when the tracker runs on `listed` or `private_listed` mode +//! +//! See [`API`](crate::servers::apis) for more details on the REST API. +//! +//! ## UDP tracker +//! +//! UDP trackers are trackers with focus on performance. By Using UDP instead of HTTP the tracker removed the overhead +//! of opening and closing TCP connections. It also reduces the response size. +//! +//! You can find more information about UDP tracker on: +//! +//! - [Wikipedia: UDP tracker](https://en.wikipedia.org/wiki/UDP_tracker) +//! - [BEP 15: UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +//! +//! See [`UDP`](crate::servers::udp) for more details on the UDP tracker. +//! +//! ## HTTP tracker +//! +//! HTTP tracker was the original tracker specification defined on the [BEP 3]((https://www.bittorrent.org/beps/bep_0003.html)). +//! +//! See [`HTTP`](crate::servers::http) for more details on the HTTP tracker. +//! +//! You can find more information about UDP tracker on: +//! +//! - [Wikipedia: `BitTorrent` tracker](https://en.wikipedia.org/wiki/BitTorrent_tracker) +//! - [BEP 3: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! +//! # Implemented BEPs +//! +//! BEP stands for `BitTorrent` Enhancement Proposal. BEPs are documents providing information to the `BitTorrent` +//! community or describing a new feature for the `BitTorrent` protocols. +//! +//! You can find all BEPs on +//! +//! Torrust Tracker implements these BEPs: +//! +//! - [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The `BitTorrent` Protocol +//! - [BEP 7](https://www.bittorrent.org/beps/bep_0007.html): IPv6 Support +//! - [BEP 15](https://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for `BitTorrent` +//! - [BEP 23](https://www.bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists +//! - [BEP 27](https://www.bittorrent.org/beps/bep_0027.html): Private Torrents +//! - [BEP 41](https://www.bittorrent.org/beps/bep_0041.html): UDP Tracker Protocol Extensions +//! - [BEP 48](https://www.bittorrent.org/beps/bep_0048.html): Tracker Protocol Extension: Scrape pub mod app; pub mod bootstrap; pub mod servers; From 78f295bebf9ffbe1c4617f080fc69d0f7665ca6e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Mar 2023 11:53:54 +0000 Subject: [PATCH 0497/1003] docs: [#253] crate docs for app.rs --- src/app.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/app.rs b/src/app.rs index 5f75449ca..3fc790a23 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,3 +1,22 @@ +//! Torrust Tracker application. +//! +//! The tracker application has a global configuration for multiple jobs. +//! It's basically a container for other services. +//! It also check constraint and dependencies between services. For example: +//! It's not safe to run a UDP tracker on top of a core public tracker, as UDP trackers +//! do not allow private access to the tracker data. +//! +//! The application is responsible for: +//! +//! - Loading data from the database when it's needed. +//! - Starting some jobs depending on the configuration. +//! +//! The started jobs may be: +//! +//! - Torrent cleaner: it removes inactive peers and (optionally) peerless torrents. +//! - UDP trackers: the user can enable multiple UDP tracker on several ports. +//! - HTTP trackers: the user can enable multiple HTTP tracker on several ports. +//! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; use log::warn; From fe28ef5cd18d0c9c36d89f034a1bc3c99ed65355 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Mar 2023 18:07:58 +0000 Subject: [PATCH 0498/1003] docs: [#256] crate docs for tracker and bootstrap mods --- cSpell.json | 1 + packages/configuration/src/lib.rs | 11 +- src/bootstrap/app.rs | 41 ++ src/bootstrap/jobs/http_tracker.rs | 20 + src/bootstrap/jobs/mod.rs | 8 + src/bootstrap/jobs/torrent_cleanup.rs | 17 + src/bootstrap/jobs/tracker_apis.rs | 34 + src/bootstrap/jobs/udp_tracker.rs | 11 + src/bootstrap/logging.rs | 13 + src/bootstrap/mod.rs | 7 + src/lib.rs | 11 +- src/tracker/auth.rs | 60 ++ src/tracker/databases/driver.rs | 27 +- src/tracker/databases/error.rs | 9 + src/tracker/databases/mod.rs | 167 ++++- src/tracker/databases/mysql.rs | 17 + src/tracker/databases/sqlite.rs | 17 + src/tracker/error.rs | 9 + src/tracker/mod.rs | 887 ++++++++++++++++++----- src/tracker/peer.rs | 55 +- src/tracker/services/mod.rs | 8 + src/tracker/services/statistics/mod.rs | 47 ++ src/tracker/services/statistics/setup.rs | 12 + src/tracker/services/torrent.rs | 24 + src/tracker/statistics.rs | 45 ++ src/tracker/torrent.rs | 78 +- 26 files changed, 1423 insertions(+), 213 deletions(-) diff --git a/cSpell.json b/cSpell.json index d8dee5c6b..88794b2ad 100644 --- a/cSpell.json +++ b/cSpell.json @@ -51,6 +51,7 @@ "proot", "Quickstart", "Rasterbar", + "reannounce", "repr", "reqwest", "rngs", diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index d42c82df9..8b4d9363d 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -59,17 +59,26 @@ impl HttpApi { pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, + + // Database configuration pub db_driver: DatabaseDriver, pub db_path: String, + + /// Interval in seconds that the client should wait between sending regular announce requests to the tracker pub announce_interval: u32, + /// Minimum announce interval. Clients must not reannounce more frequently than this pub min_announce_interval: u32, - pub max_peer_timeout: u32, pub on_reverse_proxy: bool, pub external_ip: Option, pub tracker_usage_statistics: bool, pub persistent_torrent_completed_stat: bool, + + // Cleanup job configuration + pub max_peer_timeout: u32, pub inactive_peer_cleanup_interval: u64, pub remove_peerless_torrents: bool, + + // Server jobs configuration pub udp_trackers: Vec, pub http_trackers: Vec, pub http_api: HttpApi, diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index e845feac0..c0e688a0d 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -1,3 +1,16 @@ +//! Setup for the main tracker application. +//! +//! The [`setup`](bootstrap::app::setup) only builds the application and its dependencies but it does not start the application. +//! In fact, there is no such thing as the main application process. When the application starts, the only thing it does is +//! starting a bunch of independent jobs. If you are looking for how things are started you should read [`app::start`](crate::app::start) +//! function documentation. +//! +//! Setup steps: +//! +//! 1. Load the global application configuration. +//! 2. Initialize static variables. +//! 3. Initialize logging. +//! 4. Initialize the domain tracker. use std::env; use std::sync::Arc; @@ -9,6 +22,7 @@ use crate::shared::crypto::ephemeral_instance_keys; use crate::tracker::services::tracker_factory; use crate::tracker::Tracker; +/// It loads the configuration from the environment and builds the main domain [`tracker`](crate::tracker::Tracker) struct. #[must_use] pub fn setup() -> (Arc, Arc) { let configuration = Arc::new(initialize_configuration()); @@ -17,6 +31,9 @@ pub fn setup() -> (Arc, Arc) { (configuration, tracker) } +/// It initializes the application with the given configuration. +/// +/// The configuration may be obtained from the environment (via config file or env vars). #[must_use] pub fn initialize_with_configuration(configuration: &Arc) -> Arc { initialize_static(); @@ -24,6 +41,12 @@ pub fn initialize_with_configuration(configuration: &Arc) -> Arc< Arc::new(initialize_tracker(configuration)) } +/// It initializes the application static values. +/// +/// These values are accessible throughout the entire application: +/// +/// - The time when the application started. +/// - An ephemeral instance random seed. This seed is used for encryption and it's changed when the main application process is restarted. pub fn initialize_static() { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -32,6 +55,17 @@ pub fn initialize_static() { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); } +/// It loads the application configuration from the environment. +/// +/// There are two methods to inject the configuration: +/// +/// 1. By using a config file: `config.toml`. The file must be in the same folder where you are running the tracker. +/// 2. Environment variable: `TORRUST_TRACKER_CONFIG`. The variable contains the same contents as the `config.toml` file. +/// +/// Environment variable has priority over the config file. +/// +/// Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. +/// /// # Panics /// /// Will panic if it can't load the configuration from either @@ -50,11 +84,18 @@ fn initialize_configuration() -> Configuration { } } +/// It builds the domain tracker +/// +/// The tracker is the domain layer service. It's the entrypoint to make requests to the domain layer. +/// It's used by other higher-level components like the UDP and HTTP trackers or the tracker API. #[must_use] pub fn initialize_tracker(config: &Arc) -> Tracker { tracker_factory(config.clone()) } +/// It initializes the log level, format and channel. +/// +/// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. pub fn initialize_logging(config: &Arc) { bootstrap::logging::setup(config); } diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 43bd0076f..ac0161640 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -1,3 +1,16 @@ +//! HTTP tracker job starter. +//! +//! The function [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) starts a new HTTP tracker server. +//! +//! > **NOTICE**: the application can launch more than one HTTP tracker on different ports. +//! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. +//! +//! The [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) function spawns a new asynchronous task, +//! that tasks is the "**launcher**". The "**launcher**" starts the actual server and sends a message back to the main application. +//! The main application waits until receives the message [`ServerJobStarted`](crate::bootstrap::jobs::http_tracker::ServerJobStarted) from the "**launcher**". +//! +//! The "**launcher**" is an intermediary thread that decouples the HTTP servers from the process that handles it. The HTTP could be used independently in the future. +//! In that case it would not need to notify a parent process. use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; @@ -10,9 +23,16 @@ use crate::servers::http::v1::launcher; use crate::servers::http::Version; use crate::tracker; +/// This is the message that the "**launcher**" spawned task sends to the main application process to notify that the HTTP server was successfully started. +/// +/// > **NOTICE**: it does not mean the HTTP server is ready to receive requests. It only means the new server started. It might take some time to the server to be ready to accept request. #[derive(Debug)] pub struct ServerJobStarted(); +/// It starts a new HTTP server with the provided configuration and version. +/// +/// Right now there is only one version but in the future we could support more than one HTTP tracker version at the same time. +/// This feature allows supporting breaking changes on `BitTorrent` BEPs. pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { match version { Version::V1 => start_v1(config, tracker.clone()).await, diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index ba44a56ad..c519a9f4b 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -1,3 +1,11 @@ +//! Application jobs launchers. +//! +//! The main application setup has only two main stages: +//! +//! 1. Setup the domain layer: the core tracker. +//! 2. Launch all the application services as concurrent jobs. +//! +//! This modules contains all the functions needed to start those jobs. pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_apis; diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 64240bffe..d48769139 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -1,3 +1,15 @@ +//! Job that runs a task on intervals to clean up torrents. +//! +//! It removes inactive peers and (optionally) peerless torrents. +//! +//! **Inactive peers** are peers that have not been updated for more than `max_peer_timeout` seconds. +//! `max_peer_timeout` is a customizable core tracker option. +//! +//! If the core tracker configuration option `remove_peerless_torrents` is true, the cleanup job will also +//! remove **peerless torrents** which are torrents with an empty peer list. +//! +//! Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about those options. + use std::sync::Arc; use chrono::Utc; @@ -7,6 +19,11 @@ use torrust_tracker_configuration::Configuration; use crate::tracker; +/// It starts a jobs for cleaning up the torrent data in the tracker. +/// +/// The cleaning task is executed on an `inactive_peer_cleanup_interval`. +/// +/// Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about that option. #[must_use] pub fn start_job(config: &Arc, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index cdebc21a8..9afe4ab24 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -1,3 +1,25 @@ +//! Tracker API job starter. +//! +//! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) +//! function starts a the HTTP tracker REST API. +//! +//! > **NOTICE**: that even thought there is only one job the API has different +//! versions. API consumers can choose which version to use. The API version is +//! part of the URL, for example: `http://localhost:1212/api/v1/stats`. +//! +//! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) +//! function spawns a new asynchronous task, that tasks is the "**launcher**". +//! The "**launcher**" starts the actual server and sends a message back +//! to the main application. The main application waits until receives +//! the message [`ApiServerJobStarted`](crate::bootstrap::jobs::tracker_apis::ApiServerJobStarted) +//! from the "**launcher**". +//! +//! The "**launcher**" is an intermediary thread that decouples the API server +//! from the process that handles it. The API could be used independently +//! in the future. In that case it would not need to notify a parent process. +//! +//! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! for the API configuration options. use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; @@ -9,9 +31,21 @@ use torrust_tracker_configuration::HttpApi; use crate::servers::apis::server; use crate::tracker; +/// This is the message that the "launcher" spawned task sends to the main +/// application process to notify the API server was successfully started. +/// +/// > **NOTICE**: it does not mean the API server is ready to receive requests. +/// It only means the new server started. It might take some time to the server +/// to be ready to accept request. #[derive(Debug)] pub struct ApiServerJobStarted(); +/// This function starts a new API server with the provided configuration. +/// +/// The functions starts a new concurrent task that will run the API server. +/// This task will send a message to the main application process to notify +/// that the API server was successfully started. +/// /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 138222daf..76c465a8d 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -1,3 +1,11 @@ +//! UDP tracker job starter. +//! +//! The [`udp_tracker::start_job`](crate::bootstrap::jobs::udp_tracker::start_job) +//! function starts a new UDP tracker server. +//! +//! > **NOTICE**: that the application can launch more than one UDP tracker +//! on different ports. Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! for the configuration options. use std::sync::Arc; use log::{error, info, warn}; @@ -7,6 +15,9 @@ use torrust_tracker_configuration::UdpTracker; use crate::servers::udp::server::Udp; use crate::tracker; +/// It starts a new UDP server with the provided configuration. +/// +/// It spawns a new asynchronous task for the new UDP server. #[must_use] pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 83e2c9360..97e26919d 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -1,3 +1,15 @@ +//! Setup for the application logging. +//! +//! It redirects the log info to the standard output with the log level defined in the configuration. +//! +//! - `Off` +//! - `Error` +//! - `Warn` +//! - `Info` +//! - `Debug` +//! - `Trace` +//! +//! Refer to the [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) to know how to change log settings. use std::str::FromStr; use std::sync::Once; @@ -6,6 +18,7 @@ use torrust_tracker_configuration::Configuration; static INIT: Once = Once::new(); +/// It redirects the log info to the standard output with the log level defined in the configuration pub fn setup(cfg: &Configuration) { let level = config_level_or_default(&cfg.log_level); diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs index e3b6467ee..e39cf3adc 100644 --- a/src/bootstrap/mod.rs +++ b/src/bootstrap/mod.rs @@ -1,3 +1,10 @@ +//! Tracker application bootstrapping. +//! +//! This module includes all the functions to build the application, its dependencies, and run the jobs. +//! +//! Jobs are tasks executed concurrently. Some of them are concurrent because of the asynchronous nature of the task, +//! like cleaning torrents, and other jobs because they can be enabled/disabled depending on the configuration. +//! For example, you can have more than one UDP and HTTP tracker, each server is executed like a independent job. pub mod app; pub mod jobs; pub mod logging; diff --git a/src/lib.rs b/src/lib.rs index 3e7225923..a460a28b8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,10 +27,15 @@ //! - [Features](#features) //! - [Services](#services) //! - [Installation](#installation) +//! - [Minimum requirements](#minimum-requirements) +//! - [Prerequisites](#prerequisites) +//! - [Install from sources](#install-from-sources) +//! - [Run with docker](#run-with-docker) //! - [Configuration](#configuration) //! - [Usage](#usage) //! - [Components](#components) //! - [Implemented BEPs](#implemented-beps) +//! - [Contributing](#contributing) //! //! # Features //! @@ -356,7 +361,7 @@ //! - The [`UDP`](crate::servers::udp) tracker //! - The [`HTTP`](crate::servers::http) tracker //! -//! ![Torrust Tracker Components](https://github.com/torrust/torrust-tracker/blob/main/docs/media/torrust-tracker-components.png) +//! ![Torrust Tracker Components](https://raw.githubusercontent.com/torrust/torrust-tracker/main/docs/media/torrust-tracker-components.png) //! //! ## Core tracker //! @@ -421,6 +426,10 @@ //! - [BEP 27](https://www.bittorrent.org/beps/bep_0027.html): Private Torrents //! - [BEP 41](https://www.bittorrent.org/beps/bep_0041.html): UDP Tracker Protocol Extensions //! - [BEP 48](https://www.bittorrent.org/beps/bep_0048.html): Tracker Protocol Extension: Scrape +//! +//! # Contributing +//! +//! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). pub mod app; pub mod bootstrap; pub mod servers; diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 31e1f50e4..9068a94f0 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -1,3 +1,38 @@ +//! Tracker authentication services and structs. +//! +//! This module contains functions to handle tracker keys. +//! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs +//! in `private` or `private_listed` modes. +//! +//! There are services to [`generate`](crate::tracker::auth::generate) and [`verify`](crate::tracker::auth::verify) authentication keys. +//! +//! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means +//! they are only valid during a period of time. After that time the expiring key will no longer be valid. +//! +//! Keys are stored in this struct: +//! +//! ```rust,no_run +//! pub struct ExpiringKey { +//! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` +//! pub key: Key, +//! /// Timestamp, the key will be no longer valid after this timestamp +//! pub valid_until: DurationSinceUnixEpoch, +//! } +//! ``` +//! +//! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: +//! +//! ```rust,no_run +//! let expiring_key = auth::generate(Duration::new(9999, 0)); +//! +//! assert!(auth::verify(&expiring_key).is_ok()); +//! ``` +//! +//! And you can later verify it with: +//! +//! ```rust,no_run +//! assert!(auth::verify(&expiring_key).is_ok()); +//! ``` use std::panic::Location; use std::str::FromStr; use std::sync::Arc; @@ -15,6 +50,8 @@ use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; #[must_use] +/// It generates a new random 32-char authentication [`ExpiringKey`](crate::tracker::auth::ExpiringKey) +/// /// # Panics /// /// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. @@ -33,6 +70,8 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { } } +/// It verifies an [`ExpiringKey`](crate::tracker::auth::ExpiringKey). It checks if the expiration date has passed. +/// /// # Errors /// /// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. @@ -50,9 +89,13 @@ pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { } } +/// An authentication key which has an expiration time. +/// After that time is will automatically become invalid. #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { + /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` pub key: Key, + /// Timestamp, the key will be no longer valid after this timestamp pub valid_until: DurationSinceUnixEpoch, } @@ -82,9 +125,24 @@ impl ExpiringKey { } } +/// A randomly generated token used for authentication. +/// +/// It contains lower and uppercase letters and numbers. +/// It's a 32-char string. #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] pub struct Key(String); +/// Error returned when a key cannot be parsed from a string. +/// +/// ```rust,no_run +/// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; +/// let key = Key::from_str(key_string); +/// +/// assert!(key.is_ok()); +/// assert_eq!(key.unwrap().to_string(), key_string); +/// ``` +/// +/// If the string does not contains a valid key, the parser function will return this error. #[derive(Debug, PartialEq, Eq)] pub struct ParseKeyError; @@ -100,6 +158,8 @@ impl FromStr for Key { } } +/// Verification error. Error returned when an [`ExpiringKey`](crate::tracker::auth::ExpiringKey) cannot be verified with the [`verify(...)`](crate::tracker::auth::verify) function. +/// #[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { diff --git a/src/tracker/databases/driver.rs b/src/tracker/databases/driver.rs index 4ce6ea515..ef9a4eb07 100644 --- a/src/tracker/databases/driver.rs +++ b/src/tracker/databases/driver.rs @@ -1,3 +1,7 @@ +//! Database driver factory. +//! +//! See [`databases::driver::build`](crate::tracker::databases::driver::build) +//! function for more information. use torrust_tracker_primitives::DatabaseDriver; use super::error::Error; @@ -5,7 +9,28 @@ use super::mysql::Mysql; use super::sqlite::Sqlite; use super::{Builder, Database}; -/// . +/// It builds a new database driver. +/// +/// Example for `SQLite3`: +/// +/// ```rust,no_run +/// let db_driver = "Sqlite3".to_string(); +/// let db_path = "./storage/database/data.db".to_string(); +/// let database = databases::driver::build(&db_driver, &db_path)?; +/// ``` +/// +/// Example for `MySQL`: +/// +/// ```rust,no_run +/// let db_driver = "MySQL".to_string(); +/// let db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker".to_string(); +/// let database = databases::driver::build(&db_driver, &db_path)?; +/// ``` +/// +/// Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +/// for more information about the database configuration. +/// +/// > **WARNING**: The driver instantiation runs database migrations. /// /// # Errors /// diff --git a/src/tracker/databases/error.rs b/src/tracker/databases/error.rs index 68b732190..d89ec05de 100644 --- a/src/tracker/databases/error.rs +++ b/src/tracker/databases/error.rs @@ -1,3 +1,6 @@ +//! Database errors. +//! +//! This module contains the [Database errors](crate::tracker::databases::error::Error). use std::panic::Location; use std::sync::Arc; @@ -7,24 +10,28 @@ use torrust_tracker_primitives::DatabaseDriver; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { + /// The query unexpectedly returned nothing. #[error("The {driver} query unexpectedly returned nothing: {source}")] QueryReturnedNoRows { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, driver: DatabaseDriver, }, + /// The query was malformed. #[error("The {driver} query was malformed: {source}")] InvalidQuery { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, driver: DatabaseDriver, }, + /// Unable to insert a record into the database #[error("Unable to insert record into {driver} database, {location}")] InsertFailed { location: &'static Location<'static>, driver: DatabaseDriver, }, + /// Unable to delete a record into the database #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] DeleteFailed { location: &'static Location<'static>, @@ -32,12 +39,14 @@ pub enum Error { driver: DatabaseDriver, }, + /// Unable to connect to the database #[error("Failed to connect to {driver} database: {source}")] ConnectionError { source: LocatedError<'static, UrlError>, driver: DatabaseDriver, }, + /// Unable to create a connection pool #[error("Failed to create r2d2 {driver} connection pool: {source}")] ConnectionPool { source: LocatedError<'static, r2d2::Error>, diff --git a/src/tracker/databases/mod.rs b/src/tracker/databases/mod.rs index f68288bbe..3b02415df 100644 --- a/src/tracker/databases/mod.rs +++ b/src/tracker/databases/mod.rs @@ -1,3 +1,48 @@ +//! The persistence module. +//! +//! Persistence is currently implemented with one [`Database`](crate::tracker::databases::Database) trait. +//! +//! There are two implementations of the trait (two drivers): +//! +//! - [`Mysql`](crate::tracker::databases::mysql::Mysql) +//! - [`Sqlite`](crate::tracker::databases::sqlite::Sqlite) +//! +//! > **NOTICE**: There are no database migrations. If there are any changes, +//! we will implemented them or provide a script to migrate to the new schema. +//! +//! The persistent objects are: +//! +//! - [Torrent metrics](#torrent-metrics) +//! - [Torrent whitelist](torrent-whitelist) +//! - [Authentication keys](authentication-keys) +//! +//! # Torrent metrics +//! +//! Field | Sample data | Description +//! ---|---|--- +//! `id` | 1 | Autoincrement id +//! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 +//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](crate::tracker::torrent::Entry) for more information. +//! +//! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be +//! regenerated again after some minutes. +//! +//! # Torrent whitelist +//! +//! Field | Sample data | Description +//! ---|---|--- +//! `id` | 1 | Autoincrement id +//! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 +//! +//! # Authentication keys +//! +//! Field | Sample data | Description +//! ---|---|--- +//! `id` | 1 | Autoincrement id +//! `key` | `IrweYtVuQPGbG9Jzx1DihcPmJGGpVy82` | Token +//! `valid_until` | 1672419840 | Timestamp for the expiring date +//! +//! > **NOTICE**: All keys must have an expiration date. pub mod driver; pub mod error; pub mod mysql; @@ -32,9 +77,10 @@ where } } +/// The persistence trait. It contains all the methods to interact with the database. #[async_trait] pub trait Database: Sync + Send { - /// . + /// It instantiates a new database driver. /// /// # Errors /// @@ -43,39 +89,142 @@ pub trait Database: Sync + Send { where Self: std::marker::Sized; - /// . + // Schema + + /// It generates the database tables. SQL queries are hardcoded in the trait + /// implementation. + /// + /// # Context: Schema /// /// # Errors /// /// Will return `Error` if unable to create own tables. fn create_database_tables(&self) -> Result<(), Error>; + /// It drops the database tables. + /// + /// # Context: Schema + /// /// # Errors /// /// Will return `Err` if unable to drop tables. fn drop_database_tables(&self) -> Result<(), Error>; + // Torrent Metrics + + /// It loads the torrent metrics data from the database. + /// + /// It returns an array of tuples with the torrent + /// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) and the + /// [`completed`](crate::tracker::torrent::Entry::completed) counter + /// which is the number of times the torrent has been downloaded. + /// See [`Entry::completed`](crate::tracker::torrent::Entry::completed). + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Will return `Err` if unable to load. async fn load_persistent_torrents(&self) -> Result, Error>; - async fn load_keys(&self) -> Result, Error>; + /// It saves the torrent metrics data into the database. + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Will return `Err` if unable to save. + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; - async fn load_whitelist(&self) -> Result, Error>; + // Whitelist - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + /// It loads the whitelisted torrents from the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + async fn load_whitelist(&self) -> Result, Error>; + /// It checks if the torrent is whitelisted. + /// + /// It returns `Some(InfoHash)` if the torrent is whitelisted, `None` otherwise. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to load. async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error>; + /// It adds the torrent to the whitelist. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to save. async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; + /// It checks if the torrent is whitelisted. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { + Ok(self.get_info_hash_from_whitelist(info_hash).await?.is_some()) + } + + /// It removes the torrent from the whitelist. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to save. async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + // Authentication keys + + /// It loads the expiring authentication keys from the database. + /// + /// # Context: Authentication Keys + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + async fn load_keys(&self) -> Result, Error>; + + /// It gets an expiring authentication key from the database. + /// + /// It returns `Some(ExpiringKey)` if a [`ExpiringKey`](crate::tracker::auth::ExpiringKey) + /// with the input [`Key`](crate::tracker::auth::Key) exists, `None` otherwise. + /// + /// # Context: Authentication Keys + /// + /// # Errors + /// + /// Will return `Err` if unable to load. async fn get_key_from_keys(&self, key: &Key) -> Result, Error>; + /// It adds an expiring authentication key to the database. + /// + /// # Context: Authentication Keys + /// + /// # Errors + /// + /// Will return `Err` if unable to save. async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; + /// It removes an expiring authentication key from the database. + /// + /// # Context: Authentication Keys + /// + /// # Errors + /// + /// Will return `Err` if unable to load. async fn remove_key_from_keys(&self, key: &Key) -> Result; - - async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - Ok(self.get_info_hash_from_whitelist(info_hash).await?.is_some()) - } } diff --git a/src/tracker/databases/mysql.rs b/src/tracker/databases/mysql.rs index 7e4aab99e..4419666ab 100644 --- a/src/tracker/databases/mysql.rs +++ b/src/tracker/databases/mysql.rs @@ -1,3 +1,4 @@ +//! The `MySQL` database driver. use std::str::FromStr; use std::time::Duration; @@ -22,6 +23,10 @@ pub struct Mysql { #[async_trait] impl Database for Mysql { + /// It instantiates a new `MySQL` database driver. + /// + /// Refer to [`databases::Database::new`](crate::tracker::databases::Database::new). + /// /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. @@ -34,6 +39,7 @@ impl Database for Mysql { Ok(Self { pool }) } + /// Refer to [`databases::Database::create_database_tables`](crate::tracker::databases::Database::create_database_tables). fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -73,6 +79,7 @@ impl Database for Mysql { Ok(()) } + /// Refer to [`databases::Database::drop_database_tables`](crate::tracker::databases::Database::drop_database_tables). fn drop_database_tables(&self) -> Result<(), Error> { let drop_whitelist_table = " DROP TABLE `whitelist`;" @@ -97,6 +104,7 @@ impl Database for Mysql { Ok(()) } + /// Refer to [`databases::Database::load_persistent_torrents`](crate::tracker::databases::Database::load_persistent_torrents). async fn load_persistent_torrents(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -111,6 +119,7 @@ impl Database for Mysql { Ok(torrents) } + /// Refer to [`databases::Database::load_keys`](crate::tracker::databases::Database::load_keys). async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -125,6 +134,7 @@ impl Database for Mysql { Ok(keys) } + /// Refer to [`databases::Database::load_whitelist`](crate::tracker::databases::Database::load_whitelist). async fn load_whitelist(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -135,6 +145,7 @@ impl Database for Mysql { Ok(info_hashes) } + /// Refer to [`databases::Database::save_persistent_torrent`](crate::tracker::databases::Database::save_persistent_torrent). async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; @@ -147,6 +158,7 @@ impl Database for Mysql { Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) } + /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::tracker::databases::Database::get_info_hash_from_whitelist). async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -160,6 +172,7 @@ impl Database for Mysql { Ok(info_hash) } + /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::tracker::databases::Database::add_info_hash_to_whitelist). async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -173,6 +186,7 @@ impl Database for Mysql { Ok(1) } + /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::tracker::databases::Database::remove_info_hash_from_whitelist). async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -183,6 +197,7 @@ impl Database for Mysql { Ok(1) } + /// Refer to [`databases::Database::get_key_from_keys`](crate::tracker::databases::Database::get_key_from_keys). async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -199,6 +214,7 @@ impl Database for Mysql { })) } + /// Refer to [`databases::Database::add_key_to_keys`](crate::tracker::databases::Database::add_key_to_keys). async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -213,6 +229,7 @@ impl Database for Mysql { Ok(1) } + /// Refer to [`databases::Database::remove_key_from_keys`](crate::tracker::databases::Database::remove_key_from_keys). async fn remove_key_from_keys(&self, key: &Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/src/tracker/databases/sqlite.rs b/src/tracker/databases/sqlite.rs index 931289183..1968ee049 100644 --- a/src/tracker/databases/sqlite.rs +++ b/src/tracker/databases/sqlite.rs @@ -1,3 +1,4 @@ +//! The `SQLite3` database driver. use std::panic::Location; use std::str::FromStr; @@ -19,6 +20,10 @@ pub struct Sqlite { #[async_trait] impl Database for Sqlite { + /// It instantiates a new `SQLite3` database driver. + /// + /// Refer to [`databases::Database::new`](crate::tracker::databases::Database::new). + /// /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. @@ -27,6 +32,7 @@ impl Database for Sqlite { Pool::new(cm).map_or_else(|err| Err((err, DatabaseDriver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) } + /// Refer to [`databases::Database::create_database_tables`](crate::tracker::databases::Database::create_database_tables). fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -60,6 +66,7 @@ impl Database for Sqlite { Ok(()) } + /// Refer to [`databases::Database::drop_database_tables`](crate::tracker::databases::Database::drop_database_tables). fn drop_database_tables(&self) -> Result<(), Error> { let drop_whitelist_table = " DROP TABLE whitelist;" @@ -82,6 +89,7 @@ impl Database for Sqlite { Ok(()) } + /// Refer to [`databases::Database::load_persistent_torrents`](crate::tracker::databases::Database::load_persistent_torrents). async fn load_persistent_torrents(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -102,6 +110,7 @@ impl Database for Sqlite { Ok(torrents) } + /// Refer to [`databases::Database::load_keys`](crate::tracker::databases::Database::load_keys). async fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -122,6 +131,7 @@ impl Database for Sqlite { Ok(keys) } + /// Refer to [`databases::Database::load_whitelist`](crate::tracker::databases::Database::load_whitelist). async fn load_whitelist(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -138,6 +148,7 @@ impl Database for Sqlite { Ok(info_hashes) } + /// Refer to [`databases::Database::save_persistent_torrent`](crate::tracker::databases::Database::save_persistent_torrent). async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -156,6 +167,7 @@ impl Database for Sqlite { } } + /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::tracker::databases::Database::get_info_hash_from_whitelist). async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -168,6 +180,7 @@ impl Database for Sqlite { Ok(query.map(|f| InfoHash::from_str(&f.get_unwrap::<_, String>(0)).unwrap())) } + /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::tracker::databases::Database::add_info_hash_to_whitelist). async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -183,6 +196,7 @@ impl Database for Sqlite { } } + /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::tracker::databases::Database::remove_info_hash_from_whitelist). async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -200,6 +214,7 @@ impl Database for Sqlite { } } + /// Refer to [`databases::Database::get_key_from_keys`](crate::tracker::databases::Database::get_key_from_keys). async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -219,6 +234,7 @@ impl Database for Sqlite { })) } + /// Refer to [`databases::Database::add_key_to_keys`](crate::tracker::databases::Database::add_key_to_keys). async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -237,6 +253,7 @@ impl Database for Sqlite { } } + /// Refer to [`databases::Database::remove_key_from_keys`](crate::tracker::databases::Database::remove_key_from_keys). async fn remove_key_from_keys(&self, key: &Key) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/src/tracker/error.rs b/src/tracker/error.rs index aaf755e0d..f1e622673 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -1,7 +1,16 @@ +//! Error returned by the core `Tracker`. +//! +//! Error | Context | Description +//! ---|---|--- +//! `PeerKeyNotValid` | Authentication | The supplied key is not valid. It may not be registered or expired. +//! `PeerNotAuthenticated` | Authentication | The peer did not provide the authentication key. +//! `TorrentNotWhitelisted` | Authorization | The action cannot be perform on a not-whitelisted torrent (it only applies for trackers running in `listed` or `private_listed` modes). +//! use std::panic::Location; use torrust_tracker_located_error::LocatedError; +/// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] pub enum Error { // Authentication errors diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a89d6df2c..ce69b6125 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,3 +1,412 @@ +//! The core `tracker` module contains the generic `BitTorrent` tracker logic which is independent of the delivery layer. +//! +//! It contains the tracker services and their dependencies. It's a domain layer which does not +//! specify how the end user should connect to the `Tracker`. +//! +//! Typically this module is intended to be used by higher modules like: +//! +//! - A UDP tracker +//! - A HTTP tracker +//! - A tracker REST API +//! +//! ```text +//! Delivery layer Domain layer +//! +//! HTTP tracker | +//! UDP tracker |> Core tracker +//! Tracker REST API | +//! ``` +//! +//! # Table of contents +//! +//! - [Tracker](#tracker) +//! - [Announce request](#announce-request) +//! - [Scrape request](#scrape-request) +//! - [Torrents](#torrents) +//! - [Peers](#peers) +//! - [Configuration](#configuration) +//! - [Services](#services) +//! - [Authentication](#authentication) +//! - [Statistics](#statistics) +//! - [Persistence](#persistence) +//! +//! # Tracker +//! +//! The `Tracker` is the main struct in this module. `The` tracker has some groups of responsibilities: +//! +//! - **Core tracker**: it handles the information about torrents and peers. +//! - **Authentication**: it handles authentication keys which are used by HTTP trackers. +//! - **Authorization**: it handles the permission to perform requests. +//! - **Whitelist**: when the tracker runs in `listed` or `private_listed` mode all operations are restricted to whitelisted torrents. +//! - **Statistics**: it keeps and serves the tracker statistics. +//! +//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration) crate docs to get more information about the tracker settings. +//! +//! ## Announce request +//! +//! Handling `announce` requests is the most important task for a `BitTorrent` tracker. +//! +//! A `BitTorrent` swarm is a network of peers that are all trying to download the same torrent. +//! When a peer wants to find other peers it announces itself to the swarm via the tracker. +//! The peer sends its data to the tracker so that the tracker can add it to the swarm. +//! The tracker responds to the peer with the list of other peers in the swarm so that +//! the peer can contact them to start downloading pieces of the file from them. +//! +//! Once you have instantiated the `Tracker` you can `announce` a new [`peer`](crate::tracker::peer::Peer) with: +//! +//! ```rust,no_run +//! let info_hash = InfoHash { +//! "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() +//! }; +//! +//! let peer = Peer { +//! peer_id: peer::Id(*b"-qB00000000000000001"), +//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), +//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +//! uploaded: NumberOfBytes(0), +//! downloaded: NumberOfBytes(0), +//! left: NumberOfBytes(0), +//! event: AnnounceEvent::Completed, +//! } +//! +//! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); +//! +//! let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip).await; +//! ``` +//! +//! The `Tracker` returns the list of peers for the torrent with the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, +//! filtering out the peer that is making the `announce` request. +//! +//! > **NOTICE**: that the peer argument is mutable because the `Tracker` can change the peer IP if the peer is using a loopback IP. +//! +//! The `peer_ip` argument is the resolved peer ip. It's a common practice that trackers ignore the peer ip in the `announce` request params, +//! and resolve the peer ip using the IP of the client making the request. As the tracker is a domain service, the peer IP must be provided +//! for the `Tracker` user, which is usually a higher component with access the the request metadata, for example, connection data, proxy headers, +//! etcetera. +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! pub struct AnnounceData { +//! pub peers: Vec, +//! pub swarm_stats: SwarmStats, +//! pub interval: u32, // Option `announce_interval` from core tracker configuration +//! pub interval_min: u32, // Option `min_announce_interval` from core tracker configuration +//! } +//! +//! pub struct SwarmStats { +//! pub completed: u32, // The number of peers that have ever completed downloading +//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) +//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! +//! // Core tracker configuration +//! pub struct Configuration { +//! // ... +//! pub announce_interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker +//! pub min_announce_interval: u32, // Minimum announce interval. Clients must not reannounce more frequently than this +//! // ... +//! } +//! ``` +//! +//! Refer to `BitTorrent` BEPs and other sites for more information about the `announce` request: +//! +//! - [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Announce) +//! +//! ## Scrape request +//! +//! The `scrape` request allows clients to query metadata about the swarm in bulk. +//! +//! An `scrape` request includes a list of infohashes whose swarm metadata you want to collect. +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! pub struct ScrapeData { +//! pub files: HashMap, +//! } +//! +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! ``` +//! +//! The JSON representation of a sample `scrape` response would be like the following: +//! +//! ```json +//! { +//! 'files': { +//! 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +//! 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +//! } +//! } +//! ``` +//! +//! `xxxxxxxxxxxxxxxxxxxx` and `yyyyyyyyyyyyyyyyyyyy` are 20-byte infohash arrays. +//! There are two data structures for infohashes: byte arrays and hex strings: +//! +//! ```rust,no_run +//! let info_hash: InfoHash = [255u8; 20].into(); +//! +//! assert_eq!( +//! info_hash, +//! InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() +//! ); +//! ``` +//! Refer to `BitTorrent` BEPs and other sites for more information about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`. Scrape section](https://www.bittorrent.org/beps/bep_0015.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Scrape) +//! +//! ## Torrents +//! +//! The [`torrent`](crate::tracker::torrent) module contains all the data structures stored by the `Tracker` except for peers. +//! +//! We can represent the data stored in memory internally by the `Tracker` with this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! The `Tracker` maintains an indexed-by-info-hash list of torrents. For each torrent, it stores a torrent `Entry`. +//! The torrent entry has two attributes: +//! +//! - `completed`: which is hte number of peers that have completed downloading the torrent file/s. As they have completed downloading, +//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". +//! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. +//! +//! The [`torrent`](crate::tracker::torrent) module not only contains the original data obtained from peer via `announce` requests, it also contains +//! aggregate data that can be derived from the original data. For example: +//! +//! ```rust,no_run +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! +//! pub struct SwarmStats { +//! pub completed: u32, // The number of peers that have ever completed downloading +//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) +//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! ``` +//! +//! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". +//! +//! `SwarmStats` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmStats` +//! is used for the rest of cases. +//! +//! Refer to [`torrent`](crate::tracker::torrent) module for more details about these data structures. +//! +//! ## Peers +//! +//! A `Peer` is the struct used by the `Tracker` to keep peers data: +//! +//! ```rust,no_run +//! pub struct Peer { +//! pub peer_id: Id, // The peer ID +//! pub peer_addr: SocketAddr, // Peer socket address +//! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated +//! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far +//! pub downloaded: NumberOfBytes, // Number of bytes the peer has downloaded so far +//! pub left: NumberOfBytes, // The number of bytes this peer still has to download +//! pub event: AnnounceEvent, // The event the peer has announced: `started`, `completed`, `stopped` +//! } +//! ``` +//! +//! Notice that most of the attributes are obtained from the `announce` request. +//! For example, an HTTP announce request would contain the following `GET` parameters: +//! +//! +//! +//! The `Tracker` keeps an in-memory ordered data structure with all the torrents and a list of peers for each torrent, together with some swarm metrics. +//! +//! We can represent the data stored in memory with this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! That JSON object does not exist, it's only a representation of the `Tracker` torrents data. +//! +//! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers +//! that have a full version of the torrent data, also known as seeders. +//! +//! Refer to [`peer`](crate::tracker::peer) module for more information about peers. +//! +//! # Configuration +//! +//! You can control the behavior of this module with the module settings: +//! +//! ```toml +//! log_level = "debug" +//! mode = "public" +//! db_driver = "Sqlite3" +//! db_path = "./storage/database/data.db" +//! announce_interval = 120 +//! min_announce_interval = 120 +//! max_peer_timeout = 900 +//! on_reverse_proxy = false +//! external_ip = "2.137.87.41" +//! tracker_usage_statistics = true +//! persistent_torrent_completed_stat = true +//! inactive_peer_cleanup_interval = 600 +//! remove_peerless_torrents = false +//! ``` +//! +//! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. +//! +//! # Services +//! +//! Services are domain services on top of the core tracker. Right now there are two types of service: +//! +//! - For statistics +//! - For torrents +//! +//! Services usually format the data inside the tracker to make it easier to consume by other parts. +//! They also decouple the internal data structure, used by the tracker, from the way we deliver that data to the consumers. +//! The internal data structure is designed for performance or low memory consumption. And it should be changed +//! without affecting the external consumers. +//! +//! Services can include extra features like pagination, for example. +//! +//! Refer to [`services`](crate::tracker::services) module for more information about services. +//! +//! # Authentication +//! +//! One of the core `Tracker` responsibilities is to create and keep authentication keys. Auth keys are used by HTTP trackers +//! when the tracker is running in `private` or `private_listed` mode. +//! +//! HTTP tracker's clients need to obtain an auth key before starting requesting the tracker. Once the get one they have to include +//! a `PATH` param with the key in all the HTTP requests. For example, when a peer wants to `announce` itself it has to use the +//! HTTP tracker endpoint `GET /announce/:key`. +//! +//! The common way to obtain the keys is by using the tracker API directly or via other applications like the [Torrust Index](https://github.com/torrust/torrust-index). +//! +//! To learn more about tracker authentication, refer to the following modules : +//! +//! - [`auth`](crate::tracker::auth) module. +//! - [`tracker`](crate::tracker) module. +//! - [`http`](crate::servers::http) module. +//! +//! # Statistics +//! +//! The `Tracker` keeps metrics for some events: +//! +//! ```rust,no_run +//! pub struct Metrics { +//! // IP version 4 +//! +//! // HTTP tracker +//! pub tcp4_connections_handled: u64, +//! pub tcp4_announces_handled: u64, +//! pub tcp4_scrapes_handled: u64, +//! +//! // UDP tracker +//! pub udp4_connections_handled: u64, +//! pub udp4_announces_handled: u64, +//! pub udp4_scrapes_handled: u64, +//! +//! // IP version 6 +//! +//! // HTTP tracker +//! pub tcp6_connections_handled: u64, +//! pub tcp6_announces_handled: u64, +//! pub tcp6_scrapes_handled: u64, +//! +//! // UDP tracker +//! pub udp6_connections_handled: u64, +//! pub udp6_announces_handled: u64, +//! pub udp6_scrapes_handled: u64, +//! } +//! ``` +//! +//! The metrics maintained by the `Tracker` are: +//! +//! - `connections_handled`: number of connections handled by the tracker +//! - `announces_handled`: number of `announce` requests handled by the tracker +//! - `scrapes_handled`: number of `scrape` handled requests by the tracker +//! +//! > **NOTICE**: as the HTTP tracker does not have an specific `connection` request like the UDP tracker, `connections_handled` are +//! increased on every `announce` and `scrape` requests. +//! +//! The tracker exposes an event sender API that allows the tracker users to send events. When a higher application service handles a +//! `connection` , `announce` or `scrape` requests, it notifies the `Tracker` by sending statistics events. +//! +//! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. +//! +//! ```rust,no_run +//! tracker.send_stats_event(statistics::Event::Tcp4Announce).await +//! ``` +//! +//! Refer to [`statistics`](crate::tracker::statistics) module for more information about statistics. +//! +//! # Persistence +//! +//! Right now the `Tracker` is responsible for storing and load data into and +//! from the database, when persistence is enabled. +//! +//! There are three types of persistent object: +//! +//! - Authentication keys (only expiring keys) +//! - Torrent whitelist +//! - Torrent metrics +//! +//! Refer to [`databases`](crate::tracker::databases) module for more information about persistence. pub mod auth; pub mod databases; pub mod error; @@ -25,28 +434,45 @@ use self::torrent::{SwarmMetadata, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::databases::Database; +/// The domain layer tracker service. +/// +/// Its main responsibility is to handle the `announce` and `scrape` requests. +/// But it's also a container for the `Tracker` configuration, persistence, +/// authentication and other services. +/// +/// > **NOTICE**: the `Tracker` is not responsible for handling the network layer. +/// Typically, the `Tracker` is used by a higher application service that handles +/// the network layer. pub struct Tracker { + /// `Tracker` configuration. See pub config: Arc, + /// A database driver implementation: [`Sqlite3`](crate::tracker::databases::sqlite) + /// or [`MySQL`](crate::tracker::databases::mysql) + pub database: Box, mode: TrackerMode, keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, stats_repository: statistics::Repo, - pub database: Box, } +/// Structure that holds general `Tracker` torrents metrics. +/// +/// Metrics are aggregate values for all torrents. #[derive(Debug, PartialEq, Default)] pub struct TorrentsMetrics { - // code-review: consider using `SwarmStats` for - // `seeders`, `completed`, and `leechers` attributes. - // pub swarm_stats: SwarmStats; + /// Total number of seeders for all torrents pub seeders: u64, + /// Total number of peers that have ever completed downloading for all torrents. pub completed: u64, + /// Total number of leechers for all torrents. pub leechers: u64, + /// Total number of torrents. pub torrents: u64, } +/// Structure that holds the data returned by the `announce` request. #[derive(Debug, PartialEq, Default)] pub struct AnnounceData { pub peers: Vec, @@ -55,6 +481,7 @@ pub struct AnnounceData { pub interval_min: u32, } +/// Structure that holds the data returned by the `scrape` request. #[derive(Debug, PartialEq, Default)] pub struct ScrapeData { pub files: HashMap, @@ -88,9 +515,11 @@ impl ScrapeData { } impl Tracker { + /// `Tracker` constructor. + /// /// # Errors /// - /// Will return a `databases::error::Error` if unable to connect to database. + /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( config: Arc, stats_event_sender: Option>, @@ -130,6 +559,8 @@ impl Tracker { /// It handles an announce request. /// + /// # Context: Tracker + /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { // code-review: maybe instead of mutating the peer we could just return @@ -163,6 +594,8 @@ impl Tracker { /// It handles a scrape request. /// + /// # Context: Tracker + /// /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { let mut scrape_data = ScrapeData::empty(); @@ -178,6 +611,7 @@ impl Tracker { scrape_data } + /// It returns the data for a `scrape` response. async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { let torrents = self.get_torrents().await; match torrents.get(info_hash) { @@ -186,6 +620,168 @@ impl Tracker { } } + /// It loads the torrents from database into memory. It only loads the torrent entry list with the number of seeders for each torrent. + /// Peers data is not persisted. + /// + /// # Context: Tracker + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. + pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + let persistent_torrents = self.database.load_persistent_torrents().await?; + + let mut torrents = self.torrents.write().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(&info_hash) { + continue; + } + + let torrent_entry = torrent::Entry { + peers: BTreeMap::default(), + completed, + }; + + torrents.insert(info_hash, torrent_entry); + } + + Ok(()) + } + + async fn get_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers_for_peer(peer).into_iter().copied().collect(), + } + } + + /// # Context: Tracker + /// + /// Get all torrent peers for a given torrent + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_all_peers().into_iter().copied().collect(), + } + } + + /// It updates the torrent entry in memory, it also stores in the database + /// the torrent info data which is persistent, and finally return the data + /// needed for a `announce` request response. + /// + /// # Context: Tracker + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { + // code-review: consider splitting the function in two (command and query segregation). + // `update_torrent_with_peer` and `get_stats` + + let mut torrents = self.torrents.write().await; + + let torrent_entry = match torrents.entry(*info_hash) { + Entry::Vacant(vacant) => vacant.insert(torrent::Entry::new()), + Entry::Occupied(entry) => entry.into_mut(), + }; + + let stats_updated = torrent_entry.update_peer(peer); + + // todo: move this action to a separate worker + if self.config.persistent_torrent_completed_stat && stats_updated { + let _ = self + .database + .save_persistent_torrent(info_hash, torrent_entry.completed) + .await; + } + + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + torrent::SwarmStats { + completed, + seeders, + leechers, + } + } + + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { + self.torrents.read().await + } + + /// It calculates and returns the general `Tracker` + /// [`TorrentsMetrics`](crate::tracker::TorrentsMetrics) + /// + /// # Context: Tracker + pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { + let mut torrents_metrics = TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0, + }; + + let db = self.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + torrents_metrics.seeders += u64::from(seeders); + torrents_metrics.completed += u64::from(completed); + torrents_metrics.leechers += u64::from(leechers); + torrents_metrics.torrents += 1; + }); + + torrents_metrics + } + + /// Remove inactive peers and (optionally) peerless torrents + /// + /// # Context: Tracker + pub async fn cleanup_torrents(&self) { + let mut torrents_lock = self.torrents.write().await; + + // If we don't need to remove torrents we will use the faster iter + if self.config.remove_peerless_torrents { + torrents_lock.retain(|_, torrent_entry| { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + + if self.config.persistent_torrent_completed_stat { + torrent_entry.completed > 0 || !torrent_entry.peers.is_empty() + } else { + !torrent_entry.peers.is_empty() + } + }); + } else { + for (_, torrent_entry) in torrents_lock.iter_mut() { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + } + } + } + + /// It authenticates the peer `key` against the `Tracker` authentication + /// key list. + /// + /// # Errors + /// + /// Will return an error if the the authentication key cannot be verified. + /// + /// # Context: Authentication + pub async fn authenticate(&self, key: &Key) -> Result<(), auth::Error> { + if self.is_private() { + self.verify_auth_key(key).await + } else { + Ok(()) + } + } + + /// It generates a new expiring authentication key. + /// `lifetime` param is the duration in seconds for the new key. + /// The key will be no longer valid after `lifetime` seconds. + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -196,6 +792,10 @@ impl Tracker { Ok(auth_key) } + /// It removes an authentication key. + /// + /// # Context: Authentication + /// /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. @@ -209,6 +809,10 @@ impl Tracker { Ok(()) } + /// It verifies an authentication key. + /// + /// # Context: Authentication + /// /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. @@ -224,6 +828,13 @@ impl Tracker { } } + /// The `Tracker` stores the authentication keys in memory and in the database. + /// In case you need to restart the `Tracker` you can load the keys from the database + /// into memory with this function. Keys are automatically stored in the database when they + /// are generated. + /// + /// # Context: Authentication + /// /// # Errors /// /// Will return a `database::Error` if unable to `load_keys` from the database. @@ -240,84 +851,10 @@ impl Tracker { Ok(()) } - /// Adding torrents is not relevant to public trackers. + /// It authenticates and authorizes a UDP tracker request. /// - /// # Errors + /// # Context: Authentication and Authorization /// - /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.add_torrent_to_database_whitelist(info_hash).await?; - self.add_torrent_to_memory_whitelist(info_hash).await; - Ok(()) - } - - /// It adds a torrent to the whitelist if it has not been whitelisted previously - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; - - if is_whitelisted { - return Ok(()); - } - - self.database.add_info_hash_to_whitelist(*info_hash).await?; - - Ok(()) - } - - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.insert(*info_hash) - } - - /// Removing torrents is not relevant to public trackers. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.remove_torrent_from_database_whitelist(info_hash).await?; - self.remove_torrent_from_memory_whitelist(info_hash).await; - Ok(()) - } - - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; - - if !is_whitelisted { - return Ok(()); - } - - self.database.remove_info_hash_from_whitelist(*info_hash).await?; - - Ok(()) - } - - pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.remove(info_hash) - } - - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.whitelist.read().await.contains(info_hash) - } - - /// # Errors - /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist().await?; - let mut whitelist = self.whitelist.write().await; - - whitelist.clear(); - - for info_hash in whitelisted_torrents_from_database { - let _ = whitelist.insert(info_hash); - } - - Ok(()) - } - /// # Errors /// /// Will return a `torrent::Error::PeerKeyNotValid` if the `key` is not valid. @@ -325,6 +862,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. + #[deprecated(since = "3.0.0", note = "please use `authenticate` and `authorize` instead")] pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { // todo: this is a deprecated method. // We're splitting authentication and authorization responsibilities. @@ -369,18 +907,10 @@ impl Tracker { Ok(()) } - /// # Errors + /// Right now, there is only authorization when the `Tracker` runs in + /// `listed` or `private_listed` modes. /// - /// Will return an error if the the authentication key cannot be verified. - pub async fn authenticate(&self, key: &Key) -> Result<(), auth::Error> { - if self.is_private() { - self.verify_auth_key(key).await - } else { - Ok(()) - } - } - - /// The only authorization process is the whitelist. + /// # Context: Authorization /// /// # Errors /// @@ -401,139 +931,120 @@ impl Tracker { }); } - /// Loading the torrents from database into memory + /// It adds a torrent to the whitelist. + /// Adding torrents is not relevant to public trackers. + /// + /// # Context: Whitelist /// /// # Errors /// - /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.database.load_persistent_torrents().await?; - - let mut torrents = self.torrents.write().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = torrent::Entry { - peers: BTreeMap::default(), - completed, - }; - - torrents.insert(info_hash, torrent_entry); - } - + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_memory_whitelist(info_hash).await; Ok(()) } - async fn get_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { - let read_lock = self.torrents.read().await; + /// It adds a torrent to the whitelist if it has not been whitelisted previously + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers_for_peer(peer).into_iter().copied().collect(), + if is_whitelisted { + return Ok(()); } - } - /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.read().await; + self.database.add_info_hash_to_whitelist(*info_hash).await?; - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => entry.get_all_peers().into_iter().copied().collect(), - } + Ok(()) } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { - // code-review: consider splitting the function in two (command and query segregation). - // `update_torrent_with_peer` and `get_stats` - - let mut torrents = self.torrents.write().await; + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } - let torrent_entry = match torrents.entry(*info_hash) { - Entry::Vacant(vacant) => vacant.insert(torrent::Entry::new()), - Entry::Occupied(entry) => entry.into_mut(), - }; + /// It removes a torrent from the whitelist. + /// Removing torrents is not relevant to public trackers. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.remove_torrent_from_database_whitelist(info_hash).await?; + self.remove_torrent_from_memory_whitelist(info_hash).await; + Ok(()) + } - let stats_updated = torrent_entry.update_peer(peer); + /// It removes a torrent from the whitelist in the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; - // todo: move this action to a separate worker - if self.config.persistent_torrent_completed_stat && stats_updated { - let _ = self - .database - .save_persistent_torrent(info_hash, torrent_entry.completed) - .await; + if !is_whitelisted { + return Ok(()); } - let (seeders, completed, leechers) = torrent_entry.get_stats(); + self.database.remove_info_hash_from_whitelist(*info_hash).await?; - torrent::SwarmStats { - completed, - seeders, - leechers, - } + Ok(()) } - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { - self.torrents.read().await + /// It removes a torrent from the whitelist in memory. + /// + /// # Context: Whitelist + pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) } - pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - let mut torrents_metrics = TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, - torrents: 0, - }; + /// It checks if a torrent is whitelisted. + /// + /// # Context: Whitelist + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } - let db = self.get_torrents().await; + /// It loads the whitelist from the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrents_metrics.seeders += u64::from(seeders); - torrents_metrics.completed += u64::from(completed); - torrents_metrics.leechers += u64::from(leechers); - torrents_metrics.torrents += 1; - }); + whitelist.clear(); - torrents_metrics + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); + } + + Ok(()) } + /// It return the `Tracker` [`statistics::Metrics`]. + /// + /// # Context: Statistics pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { self.stats_repository.get_stats().await } + /// It allows to send a statistic events which eventually will be used to update [`statistics::Metrics`]. + /// + /// # Context: Statistics pub async fn send_stats_event(&self, event: statistics::Event) -> Option>> { match &self.stats_event_sender { None => None, Some(stats_event_sender) => stats_event_sender.send_event(event).await, } } - - // Remove inactive peers and (optionally) peerless torrents - pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.write().await; - - // If we don't need to remove torrents we will use the faster iter - if self.config.remove_peerless_torrents { - torrents_lock.retain(|_, torrent_entry| { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - - if self.config.persistent_torrent_completed_stat { - torrent_entry.completed > 0 || !torrent_entry.peers.is_empty() - } else { - !torrent_entry.peers.is_empty() - } - }); - } else { - for (_, torrent_entry) in torrents_lock.iter_mut() { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - } - } - } } #[must_use] diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 6a298c9df..3626db93d 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,3 +1,18 @@ +//! Peer struct used by the core `Tracker`. +//! +//! A sample peer: +//! +//! ```rust,no_run +//! peer::Peer { +//! peer_id: peer::Id(*b"-qB00000000000000000"), +//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), +//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +//! uploaded: NumberOfBytes(0), +//! downloaded: NumberOfBytes(0), +//! left: NumberOfBytes(0), +//! event: AnnounceEvent::Started, +//! } +//! ``` use std::net::{IpAddr, SocketAddr}; use std::panic::Location; @@ -10,24 +25,49 @@ use crate::shared::bit_torrent::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::shared::clock::utils::ser_unix_time_value; use crate::shared::clock::DurationSinceUnixEpoch; +/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 #[derive(PartialEq, Eq, Debug)] pub enum IPVersion { + /// IPv4, + /// IPv6, } +/// Peer struct used by the core `Tracker`. +/// +/// A sample peer: +/// +/// ```rust,no_run +/// peer::Peer { +/// peer_id: peer::Id(*b"-qB00000000000000000"), +/// peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), +/// updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +/// uploaded: NumberOfBytes(0), +/// downloaded: NumberOfBytes(0), +/// left: NumberOfBytes(0), +/// event: AnnounceEvent::Started, +/// } +/// ``` #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] pub struct Peer { + /// ID used by the downloader peer pub peer_id: Id, + /// The IP and port this peer is listening on pub peer_addr: SocketAddr, + /// The last time the the tracker receive an announce request from this peer (timestamp) #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, + /// The total amount of bytes uploaded by this peer so far #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, + /// The total amount of bytes downloaded by this peer so far #[serde(with = "NumberOfBytesDef")] pub downloaded: NumberOfBytes, + /// The number of bytes this peer still has to download #[serde(with = "NumberOfBytesDef")] - pub left: NumberOfBytes, // The number of bytes this peer still has to download + pub left: NumberOfBytes, + /// This is an optional key which maps to started, completed, or stopped (or empty, which is the same as not being present). #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } @@ -56,11 +96,24 @@ impl Peer { } } +/// Peer ID. A 20-byte array. +/// +/// A string of length 20 which this downloader uses as its id. +/// Each downloader generates its own id at random at the start of a new download. +/// +/// A sample peer ID: +/// +/// ```rust,no_run +/// let peer_id = peer::Id(*b"-qB00000000000000000"); +/// ``` #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] pub struct Id(pub [u8; 20]); const PEER_ID_BYTES_LEN: usize = 20; +/// Error returned when trying to convert an invalid peer id from another type. +/// +/// Usually because the source format does not contain 20 bytes. #[derive(Error, Debug)] pub enum IdConversionError { #[error("not enough bytes for peer id: {message} {location}")] diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index 8667f79a9..deb07a439 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -1,3 +1,9 @@ +//! Tracker domain services. Core and statistics services. +//! +//! There are two types of service: +//! +//! - [Core tracker services](crate::tracker::services::torrent): related to the tracker main functionalities like getting info about torrents. +//! - [Services for statistics](crate::tracker::services::statistics): related to tracker metrics. Aggregate data about the tracker server. pub mod statistics; pub mod torrent; @@ -7,6 +13,8 @@ use torrust_tracker_configuration::Configuration; use crate::tracker::Tracker; +/// It returns a new tracker building its dependencies. +/// /// # Panics /// /// Will panic if tracker cannot be instantiated. diff --git a/src/tracker/services/statistics/mod.rs b/src/tracker/services/statistics/mod.rs index cae4d1d69..143761420 100644 --- a/src/tracker/services/statistics/mod.rs +++ b/src/tracker/services/statistics/mod.rs @@ -1,3 +1,41 @@ +//! Statistics services. +//! +//! It includes: +//! +//! - A [`factory`](crate::tracker::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`](crate::tracker::services::statistics::get_metrics) service to get the [`tracker metrics`](crate::tracker::statistics::Metrics). +//! +//! Tracker metrics are collected using a Publisher-Subscribe pattern. +//! +//! The factory function builds two structs: +//! +//! - An statistics [`EventSender`](crate::tracker::statistics::EventSender) +//! - An statistics [`Repo`](crate::tracker::statistics::Repo) +//! +//! ```rust,no_run +//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); +//! ``` +//! +//! The statistics repository is responsible for storing the metrics in memory. +//! The statistics event sender allows sending events related to metrics. +//! There is an event listener that is receiving all the events and processing them with an event handler. +//! Then, the event handler updates the metrics depending on the received event. +//! +//! For example, if you send the event [`Event::Udp4Connect`](crate::tracker::statistics::Event::Udp4Connect): +//! +//! ```rust,no_run +//! let result = event_sender.send_event(Event::Udp4Connect).await; +//! ``` +//! +//! Eventually the counter for UDP connections from IPv4 peers will be increased. +//! +//! ```rust,no_run +//! pub struct Metrics { +//! // ... +//! pub udp4_connections_handled: u64, // This will be incremented +//! // ... +//! } +//! ``` pub mod setup; use std::sync::Arc; @@ -5,12 +43,21 @@ use std::sync::Arc; use crate::tracker::statistics::Metrics; use crate::tracker::{TorrentsMetrics, Tracker}; +/// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] pub struct TrackerMetrics { + /// Domain level metrics. + /// + /// General metrics for all torrents (number of seeders, leechers, etcetera) pub torrents_metrics: TorrentsMetrics, + + /// Application level metrics. Usage statistics/metrics. + /// + /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) pub protocol_metrics: Metrics, } +/// It returns all the [`TrackerMetrics`](crate::tracker::services::statistics::TrackerMetrics) pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { let torrents_metrics = tracker.get_torrents_metrics().await; let stats = tracker.get_stats().await; diff --git a/src/tracker/services/statistics/setup.rs b/src/tracker/services/statistics/setup.rs index b7cb831cb..b8d325ab4 100644 --- a/src/tracker/services/statistics/setup.rs +++ b/src/tracker/services/statistics/setup.rs @@ -1,5 +1,17 @@ +//! Setup for the tracker statistics. +//! +//! The [`factory`](crate::tracker::services::statistics::setup::factory) function builds the structs needed for handling the tracker metrics. use crate::tracker::statistics; +/// It builds the structs needed for handling the tracker metrics. +/// +/// It returns: +/// +/// - An statistics [`EventSender`](crate::tracker::statistics::EventSender) that allows you to send events related to statistics. +/// - An statistics [`Repo`](crate::tracker::statistics::Repo) which is an in-memory repository for the tracker metrics. +/// +/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics +/// events are sent are received but not dispatched to the handler. #[must_use] pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { let mut stats_event_sender = None; diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index 30d24eb00..3610d930c 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -1,3 +1,9 @@ +//! Core tracker domain services. +//! +//! There are two services: +//! +//! - [`get_torrent_info`](crate::tracker::services::torrent::get_torrent_info): it returns all the data about one torrent. +//! - [`get_torrents`](crate::tracker::services::torrent::get_torrents): it returns data about some torrent in bulk excluding the peer list. use std::sync::Arc; use serde::Deserialize; @@ -6,26 +12,42 @@ use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::peer::Peer; use crate::tracker::Tracker; +/// It contains all the information the tracker has about a torrent #[derive(Debug, PartialEq)] pub struct Info { + /// The infohash of the torrent this data is related to pub info_hash: InfoHash, + /// The total number of seeders for this torrent. Peer that actively serving a full copy of the torrent data pub seeders: u64, + /// The total number of peers that have ever complete downloading this torrent pub completed: u64, + /// The total number of leechers for this torrent. Peers that actively downloading this torrent pub leechers: u64, + /// The swarm: the list of peers that are actively trying to download or serving this torrent pub peers: Option>, } +/// It contains only part of the information the tracker has about a torrent +/// +/// It contains the same data as [Info](crate::tracker::services::torrent::Info) but without the list of peers in the swarm. #[derive(Debug, PartialEq, Clone)] pub struct BasicInfo { + /// The infohash of the torrent this data is related to pub info_hash: InfoHash, + /// The total number of seeders for this torrent. Peer that actively serving a full copy of the torrent data pub seeders: u64, + /// The total number of peers that have ever complete downloading this torrent pub completed: u64, + /// The total number of leechers for this torrent. Peers that actively downloading this torrent pub leechers: u64, } +/// A struct to keep information about the page when results are being paginated #[derive(Deserialize)] pub struct Pagination { + /// The page number, starting at 0 pub offset: u32, + /// Page size. The number of results per page pub limit: u32, } @@ -69,6 +91,7 @@ impl Default for Pagination { } } +/// It returns all the information the tracker has about one torrent in a [Info](crate::tracker::services::torrent::Info) struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { let db = tracker.get_torrents().await; @@ -93,6 +116,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op }) } +/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`](crate::tracker::services::torrent::BasicInfo) struct, excluding the peer list. pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec { let db = tracker.get_torrents().await; diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index f9079962c..03f4fc081 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,3 +1,22 @@ +//! Structs to collect and keep tracker metrics. +//! +//! The tracker collects metrics such as: +//! +//! - Number of connections handled +//! - Number of `announce` requests handled +//! - Number of `scrape` request handled +//! +//! These metrics are collected for each connection type: UDP and HTTP and +//! also for each IP version used by the peers: IPv4 and IPv6. +//! +//! > Notice: that UDP tracker have an specific `connection` request. For the HTTP metrics the counter counts one connection for each `announce` or `scrape` request. +//! +//! The data is collected by using an `event-sender -> event listener` model. +//! +//! The tracker uses an [`statistics::EventSender`](crate::tracker::statistics::EventSender) instance to send an event. +//! The [`statistics::Keeper`](crate::tracker::statistics::Keeper) listens to new events and uses the [`statistics::Repo`](crate::tracker::statistics::Repo) to upgrade and store metrics. +//! +//! See the [`statistics::Event`](crate::tracker::statistics::Event) enum to check which events are available. use std::sync::Arc; use async_trait::async_trait; @@ -9,6 +28,14 @@ use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; +/// An statistics event. It is used to collect tracker metrics. +/// +/// - `Tcp` prefix means the event was triggered by the HTTP tracker +/// - `Udp` prefix means the event was triggered by the UDP tracker +/// - `4` or `6` prefixes means the IP version used by the peer +/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` +/// +/// > NOTE: HTTP trackers do not use `connection` requests. #[derive(Debug, PartialEq, Eq)] pub enum Event { // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } @@ -25,6 +52,14 @@ pub enum Event { Udp6Scrape, } +/// Metrics collected by the tracker. +/// +/// - Number of connections handled +/// - Number of `announce` requests handled +/// - Number of `scrape` request handled +/// +/// These metrics are collected for each connection type: UDP and HTTP +/// and also for each IP version used by the peers: IPv4 and IPv6. #[derive(Debug, PartialEq, Default)] pub struct Metrics { pub tcp4_connections_handled: u64, @@ -41,6 +76,10 @@ pub struct Metrics { pub udp6_scrapes_handled: u64, } +/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). +/// +/// It actively listen to new statistics events. When it receives a new event +/// it accordingly increases the counters. pub struct Keeper { pub repository: Repo, } @@ -131,12 +170,17 @@ async fn event_handler(event: Event, stats_repository: &Repo) { debug!("stats: {:?}", stats_repository.get_stats().await); } +/// A trait to allow sending statistics events #[async_trait] #[cfg_attr(test, automock)] pub trait EventSender: Sync + Send { async fn send_event(&self, event: Event) -> Option>>; } +/// An [`statistics::EventSender`](crate::tracker::statistics::EventSender) implementation. +/// +/// It uses a channel sender to send the statistic events. The channel is created by a +/// [`statistics::Keeper`](crate::tracker::statistics::Keeper) pub struct Sender { sender: mpsc::Sender, } @@ -148,6 +192,7 @@ impl EventSender for Sender { } } +/// A repository for the tracker metrics. #[derive(Clone)] pub struct Repo { pub stats: Arc>, diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 882e52ff1..8eb557f1e 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,3 +1,33 @@ +//! Structs to store the swarm data. +//! +//! There are to main data structures: +//! +//! - A torrent [`Entry`](crate::tracker::torrent::Entry): it contains all the information stored by the tracker for one torrent. +//! - The [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. +//! +//! A "swarm" is a network of peers that are trying to download the same torrent. +//! +//! The torrent entry contains the "swarm" data, which is basically the list of peers in the swarm. +//! That's the most valuable information the peer want to get from the tracker, because it allows them to +//! start downloading torrent from those peers. +//! +//! > **NOTICE**: that both swarm data (torrent entries) and swarm metadata (aggregate counters) are related to only one torrent. +//! +//! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: +//! +//! - For **active peers**: metrics related to the current active peers in the swarm. +//! - **Historical data**: since the tracker started running. +//! +//! The tracker collects metrics for: +//! +//! - The number of peers that have completed downloading the torrent since the tracker started collecting metrics. +//! - The number of peers that have completed downloading the torrent and are still active, that means they are actively participating in the network, +//! by announcing themselves periodically to the tracker. Since they have completed downloading they have a full copy of the torrent data. Peers with a +//! full copy of the data are called "seeders". +//! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. +//! Peer that don not have a full copy of the torrent data are called "leechers". +//! +//! > **NOTICE**: that both [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata) and [`SwarmStats`](crate::tracker::torrent::SwarmStats) contain the same information. [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata) is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; @@ -7,21 +37,33 @@ use super::peer::{self, Peer}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::shared::clock::{Current, TimeNow}; +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Entry { + /// The swarm: a network of peers that are all trying to download the torrent associated to this entry #[serde(skip)] pub peers: std::collections::BTreeMap, + /// The number of peers that have ever completed downloading the torrent associated to this entry pub completed: u32, } /// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. -/// BEP 48: +/// +/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) #[derive(Debug, PartialEq, Default)] pub struct SwarmMetadata { - pub complete: u32, // The number of active peers that have completed downloading (seeders) - pub downloaded: u32, // The number of peers that have ever completed downloading - pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) + /// The number of peers that have ever completed downloading + pub downloaded: u32, + + /// The number of active peers that have completed downloading (seeders) + pub complete: u32, + /// The number of active peers that have not completed downloading (leechers) + pub incomplete: u32, } impl SwarmMetadata { @@ -32,12 +74,17 @@ impl SwarmMetadata { } /// Swarm statistics for one torrent. -/// Alternative struct for swarm metadata in scrape response. +/// +/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) #[derive(Debug, PartialEq, Default)] pub struct SwarmStats { - pub completed: u32, // The number of peers that have ever completed downloading - pub seeders: u32, // The number of active peers that have completed downloading (seeders) - pub leechers: u32, // The number of active peers that have not completed downloading (leechers) + /// The number of peers that have ever completed downloading + pub completed: u32, + + /// The number of active peers that have completed downloading (seeders) + pub seeders: u32, + /// The number of active peers that have not completed downloading (leechers) + pub leechers: u32, } impl Entry { @@ -49,7 +96,10 @@ impl Entry { } } - // Update peer and return completed (times torrent has been downloaded) + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. pub fn update_peer(&mut self, peer: &peer::Peer) -> bool { let mut did_torrent_stats_change: bool = false; @@ -73,14 +123,15 @@ impl Entry { did_torrent_stats_change } - /// Get all peers, limiting the result to the maximum number of scrape torrents. + /// Get all swarm peers, limiting the result to the maximum number of scrape torrents. #[must_use] pub fn get_all_peers(&self) -> Vec<&peer::Peer> { self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() } - /// Returns the list of peers for a given client. - /// It filters out the input peer. + /// It returns the list of peers for a given peer client. + /// + /// It filters out the input peer, typically because we want to return this list of peers to that client peer. #[must_use] pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { self.peers @@ -92,6 +143,7 @@ impl Entry { .collect() } + /// It returns the swarm metadata (statistics) as a tuple `(seeders, completed, leechers)` #[allow(clippy::cast_possible_truncation)] #[must_use] pub fn get_stats(&self) -> (u32, u32, u32) { @@ -100,6 +152,7 @@ impl Entry { (seeders, self.completed, leechers) } + /// It returns the swarm metadata (statistics) as an struct #[must_use] pub fn get_swarm_metadata(&self) -> SwarmMetadata { // code-review: consider using always this function instead of `get_stats`. @@ -111,6 +164,7 @@ impl Entry { } } + /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); self.peers.retain(|_, peer| peer.updated > current_cutoff); From eab4ebeb6b1a251fddf89047c1ca072785359e1a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 28 Mar 2023 11:56:07 +0200 Subject: [PATCH 0499/1003] feat: add lint for rust-documentation --- .github/workflows/test_build_release.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 3b9a9a44a..d8c25bd56 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -34,6 +34,8 @@ jobs: run: cargo check --all-targets - name: Clippy Rust Code run: cargo clippy --all-targets -- -D clippy::pedantic + - name: Test Documentation + run: cargo test --doc - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests From e33cb6df85f132930c7dd2cc6598a73defebb162 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 28 Mar 2023 12:08:07 +0200 Subject: [PATCH 0500/1003] doc: fix basic doc linting errors --- src/tracker/auth.rs | 16 +++++++---- src/tracker/databases/driver.rs | 14 +++++++--- src/tracker/mod.rs | 37 +++++++++++++++++++++----- src/tracker/peer.rs | 20 ++++++++++++-- src/tracker/services/statistics/mod.rs | 4 +-- 5 files changed, 71 insertions(+), 20 deletions(-) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 9068a94f0..9fe111e5e 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -12,6 +12,9 @@ //! Keys are stored in this struct: //! //! ```rust,no_run +//! use torrust_tracker::tracker::auth::Key; +//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! //! pub struct ExpiringKey { //! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` //! pub key: Key, @@ -23,16 +26,16 @@ //! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: //! //! ```rust,no_run -//! let expiring_key = auth::generate(Duration::new(9999, 0)); +//! use torrust_tracker::tracker::auth; +//! use std::time::Duration; //! -//! assert!(auth::verify(&expiring_key).is_ok()); -//! ``` +//! let expiring_key = auth::generate(Duration::new(9999, 0)); //! -//! And you can later verify it with: +//! // And you can later verify it with: //! -//! ```rust,no_run //! assert!(auth::verify(&expiring_key).is_ok()); //! ``` + use std::panic::Location; use std::str::FromStr; use std::sync::Arc; @@ -135,6 +138,9 @@ pub struct Key(String); /// Error returned when a key cannot be parsed from a string. /// /// ```rust,no_run +/// use torrust_tracker::tracker::auth::Key; +/// use std::str::FromStr; +/// /// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; /// let key = Key::from_str(key_string); /// diff --git a/src/tracker/databases/driver.rs b/src/tracker/databases/driver.rs index ef9a4eb07..7115bae8e 100644 --- a/src/tracker/databases/driver.rs +++ b/src/tracker/databases/driver.rs @@ -14,17 +14,23 @@ use super::{Builder, Database}; /// Example for `SQLite3`: /// /// ```rust,no_run -/// let db_driver = "Sqlite3".to_string(); +/// use torrust_tracker::tracker::databases; +/// use torrust_tracker_primitives::DatabaseDriver; +/// +/// let db_driver = DatabaseDriver::Sqlite3; /// let db_path = "./storage/database/data.db".to_string(); -/// let database = databases::driver::build(&db_driver, &db_path)?; +/// let database = databases::driver::build(&db_driver, &db_path); /// ``` /// /// Example for `MySQL`: /// /// ```rust,no_run -/// let db_driver = "MySQL".to_string(); +/// use torrust_tracker::tracker::databases; +/// use torrust_tracker_primitives::DatabaseDriver; +/// +/// let db_driver = DatabaseDriver::MySQL; /// let db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker".to_string(); -/// let database = databases::driver::build(&db_driver, &db_path)?; +/// let database = databases::driver::build(&db_driver, &db_path); /// ``` /// /// Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index ce69b6125..faabbe095 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -55,11 +55,19 @@ //! Once you have instantiated the `Tracker` you can `announce` a new [`peer`](crate::tracker::peer::Peer) with: //! //! ```rust,no_run -//! let info_hash = InfoHash { -//! "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() -//! }; +//! use torrust_tracker::tracker::peer; +//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use std::net::SocketAddr; +//! use std::net::IpAddr; +//! use std::net::Ipv4Addr; +//! use std::str::FromStr; +//! +//! +//! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); //! -//! let peer = Peer { +//! let peer = peer::Peer { //! peer_id: peer::Id(*b"-qB00000000000000001"), //! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), //! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), @@ -67,10 +75,11 @@ //! downloaded: NumberOfBytes(0), //! left: NumberOfBytes(0), //! event: AnnounceEvent::Completed, -//! } +//! }; //! //! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); -//! +//! ``` +//! ```rust,ignore //! let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip).await; //! ``` //! @@ -87,6 +96,8 @@ //! The returned struct is: //! //! ```rust,no_run +//! use torrust_tracker::tracker::peer::Peer; +//! //! pub struct AnnounceData { //! pub peers: Vec, //! pub swarm_stats: SwarmStats, @@ -124,6 +135,9 @@ //! The returned struct is: //! //! ```rust,no_run +//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use std::collections::HashMap; +//! //! pub struct ScrapeData { //! pub files: HashMap, //! } @@ -150,6 +164,9 @@ //! There are two data structures for infohashes: byte arrays and hex strings: //! //! ```rust,no_run +//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use std::str::FromStr; +//! //! let info_hash: InfoHash = [255u8; 20].into(); //! //! assert_eq!( @@ -233,6 +250,12 @@ //! A `Peer` is the struct used by the `Tracker` to keep peers data: //! //! ```rust,no_run +//! use torrust_tracker::tracker::peer::Id; +//! use std::net::SocketAddr; +//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use aquatic_udp_protocol::NumberOfBytes; +//! use aquatic_udp_protocol::AnnounceEvent; +//! //! pub struct Peer { //! pub peer_id: Id, // The peer ID //! pub peer_addr: SocketAddr, // Peer socket address @@ -389,7 +412,7 @@ //! //! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. //! -//! ```rust,no_run +//! ```rust,ignore //! tracker.send_stats_event(statistics::Event::Tcp4Announce).await //! ``` //! diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 3626db93d..a54346280 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -3,6 +3,13 @@ //! A sample peer: //! //! ```rust,no_run +//! use torrust_tracker::tracker::peer; +//! use std::net::SocketAddr; +//! use std::net::IpAddr; +//! use std::net::Ipv4Addr; +//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! //! peer::Peer { //! peer_id: peer::Id(*b"-qB00000000000000000"), //! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), @@ -11,7 +18,7 @@ //! downloaded: NumberOfBytes(0), //! left: NumberOfBytes(0), //! event: AnnounceEvent::Started, -//! } +//! }; //! ``` use std::net::{IpAddr, SocketAddr}; use std::panic::Location; @@ -39,6 +46,13 @@ pub enum IPVersion { /// A sample peer: /// /// ```rust,no_run +/// use torrust_tracker::tracker::peer; +/// use std::net::SocketAddr; +/// use std::net::IpAddr; +/// use std::net::Ipv4Addr; +/// use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +/// use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +/// /// peer::Peer { /// peer_id: peer::Id(*b"-qB00000000000000000"), /// peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), @@ -47,7 +61,7 @@ pub enum IPVersion { /// downloaded: NumberOfBytes(0), /// left: NumberOfBytes(0), /// event: AnnounceEvent::Started, -/// } +/// }; /// ``` #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] pub struct Peer { @@ -104,6 +118,8 @@ impl Peer { /// A sample peer ID: /// /// ```rust,no_run +/// use torrust_tracker::tracker::peer; +/// /// let peer_id = peer::Id(*b"-qB00000000000000000"); /// ``` #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] diff --git a/src/tracker/services/statistics/mod.rs b/src/tracker/services/statistics/mod.rs index 143761420..ac3ba510e 100644 --- a/src/tracker/services/statistics/mod.rs +++ b/src/tracker/services/statistics/mod.rs @@ -12,7 +12,7 @@ //! - An statistics [`EventSender`](crate::tracker::statistics::EventSender) //! - An statistics [`Repo`](crate::tracker::statistics::Repo) //! -//! ```rust,no_run +//! ```rust,ignore //! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); //! ``` //! @@ -23,7 +23,7 @@ //! //! For example, if you send the event [`Event::Udp4Connect`](crate::tracker::statistics::Event::Udp4Connect): //! -//! ```rust,no_run +//! ```rust,ignore //! let result = event_sender.send_event(Event::Udp4Connect).await; //! ``` //! From b7e78ab4eab5d7784c7d65aee8bbcb59f803f74e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Mar 2023 17:25:30 +0000 Subject: [PATCH 0501/1003] docs: [#260] crate docs for shared mod --- cSpell.json | 1 + ...ndelbrot_2048x2048_infohash_v1.png.torrent | Bin 0 -> 375 bytes ...rot_2048x2048_infohash_v1.png.torrent.json | 10 ++ src/shared/bit_torrent/common.rs | 31 +++- src/shared/bit_torrent/info_hash.rs | 140 +++++++++++++++++- src/shared/bit_torrent/mod.rs | 1 + src/shared/clock/mod.rs | 67 +++++++++ src/shared/clock/static_time.rs | 3 + src/shared/clock/time_extent.rs | 110 ++++++++++++++ src/shared/clock/utils.rs | 2 + src/shared/crypto/ephemeral_instance_keys.rs | 5 + src/shared/crypto/keys.rs | 17 +++ src/shared/crypto/mod.rs | 1 + src/shared/mod.rs | 5 + src/tracker/torrent.rs | 13 +- 15 files changed, 400 insertions(+), 6 deletions(-) create mode 100644 docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent create mode 100644 docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json diff --git a/cSpell.json b/cSpell.json index 88794b2ad..b0ad4caf7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -37,6 +37,7 @@ "leechers", "libtorrent", "Lphant", + "metainfo", "middlewares", "mockall", "multimap", diff --git a/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent new file mode 100644 index 0000000000000000000000000000000000000000..1a08a811bfc6bfe1f10e2362b951343b11c09dcf GIT binary patch literal 375 zcmYc>G_Xo8N=+V(0!*Kov4u&h8APxcXogko#6sS=zPmbRhbV+xRV(U^YO{Z+ zc)$Gk_uV<=(-hNGEsowiySkg>eUHlQO<#|k)w<7pfak;7`$ZF;Zgcce&T*gJH{Wd) z4{zVa>HptlbjO=4Zogi5blJ~7y=8_qdq4bqr2O+t!}<^gmY=PeGUc5I%lxjb+Ml@W zP{YjC+u{@S9agCBGg@prmz}3FW5$)a$)4*KBGT0IXJqFZnVhZV5RH?%z`z;4+Q3>N z;ou#i_G_P?yZRTTbD9~3ep2|Bvs5%+r*mqiSa4&l;m2)z&7IB&a&P*h?YXpd+n=%$ Im&vKA0ABB%Bme*a literal 0 HcmV?d00001 diff --git a/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json new file mode 100644 index 000000000..caaa1a417 --- /dev/null +++ b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json @@ -0,0 +1,10 @@ +{ + "created by": "qBittorrent v4.4.1", + "creation date": 1679674628, + "info": { + "length": 172204, + "name": "mandelbrot_2048x2048.png", + "piece length": 16384, + "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" + } +} \ No newline at end of file diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 527ae9ebc..fd52e098c 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -1,27 +1,56 @@ +//! `BitTorrent` protocol primitive types +//! +//! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::{Deserialize, Serialize}; +/// The maximum number of torrents that can be returned in an `scrape` response. +/// It's also the maximum number of peers returned in an `announce` response. +/// +/// The [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +/// defines this limit: +/// +/// "Up to about 74 torrents can be scraped at once. A full scrape can't be done +/// with this protocol." +/// +/// The [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +/// does not specifically mention this limit, but the limit is being used for +/// both the UDP and HTTP trackers since it's applied at the domain level. pub const MAX_SCRAPE_TORRENTS: u8 = 74; + +/// HTTP tracker authentication key length. +/// +/// See function to [`generate`](crate::tracker::auth::generate) the +/// [`ExpiringKeys`](crate::tracker::auth::ExpiringKey) for more information. pub const AUTH_KEY_LENGTH: usize = 32; #[repr(u32)] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub enum Actions { +enum Actions { + // todo: it seems this enum is not used anywhere. Values match the ones in + // aquatic_udp_protocol::request::Request::from_bytes. Connect = 0, Announce = 1, Scrape = 2, Error = 3, } +/// Announce events. Described on the +/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) #[derive(Serialize, Deserialize)] #[serde(remote = "AnnounceEvent")] pub enum AnnounceEventDef { + /// The peer has started downloading the torrent. Started, + /// The peer has ceased downloading the torrent. Stopped, + /// The peer has completed downloading the torrent. Completed, + /// This is one of the announcements done at regular intervals. None, } +/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. #[derive(Serialize, Deserialize)] #[serde(remote = "NumberOfBytes")] pub struct NumberOfBytesDef(pub i64); diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs index fd7602cdd..7392c791d 100644 --- a/src/shared/bit_torrent/info_hash.rs +++ b/src/shared/bit_torrent/info_hash.rs @@ -1,13 +1,147 @@ +//! A `BitTorrent` `InfoHash`. It's a unique identifier for a `BitTorrent` torrent. +//! +//! "The 20-byte sha1 hash of the bencoded form of the info value +//! from the metainfo file." +//! +//! See [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! for the official specification. +//! +//! This modules provides a type that can be used to represent infohashes. +//! +//! > **NOTICE**: It only supports Info Hash v1. +//! +//! Typically infohashes are represented as hex strings, but internally they are +//! a 20-byte array. +//! +//! # Calculating the info-hash of a torrent file +//! +//! A sample torrent: +//! +//! - Torrent file: `mandelbrot_2048x2048_infohash_v1.png.torrent` +//! - File: `mandelbrot_2048x2048.png` +//! - Info Hash v1: `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! - Sha1 hash of the info dictionary: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` +//! +//! A torrent file is a binary file encoded with [Bencode encoding](https://en.wikipedia.org/wiki/Bencode): +//! +//! ```text +//! 0000000: 6431 303a 6372 6561 7465 6420 6279 3138 d10:created by18 +//! 0000010: 3a71 4269 7474 6f72 7265 6e74 2076 342e :qBittorrent v4. +//! 0000020: 342e 3131 333a 6372 6561 7469 6f6e 2064 4.113:creation d +//! 0000030: 6174 6569 3136 3739 3637 3436 3238 6534 atei1679674628e4 +//! 0000040: 3a69 6e66 6f64 363a 6c65 6e67 7468 6931 :infod6:lengthi1 +//! 0000050: 3732 3230 3465 343a 6e61 6d65 3234 3a6d 72204e4:name24:m +//! 0000060: 616e 6465 6c62 726f 745f 3230 3438 7832 andelbrot_2048x2 +//! 0000070: 3034 382e 706e 6731 323a 7069 6563 6520 048.png12:piece +//! 0000080: 6c65 6e67 7468 6931 3633 3834 6536 3a70 lengthi16384e6:p +//! 0000090: 6965 6365 7332 3230 3a7d 9171 0d9d 4dba ieces220:}.q..M. +//! 00000a0: 889b 5420 54d5 2672 8d5a 863f e121 df77 ..T T.&r.Z.?.!.w +//! 00000b0: c7f7 bb6c 7796 2166 2538 c5d9 cdab 8b08 ...lw.!f%8...... +//! 00000c0: ef8c 249b b2f5 c4cd 2adf 0bc0 0cf0 addf ..$.....*....... +//! 00000d0: 7290 e5b6 414c 236c 479b 8e9f 46aa 0c0d r...AL#lG...F... +//! 00000e0: 8ed1 97ff ee68 8b5f 34a3 87d7 71c5 a6f9 .....h._4...q... +//! 00000f0: 8e2e a631 7cbd f0f9 e223 f9cc 80af 5400 ...1|....#....T. +//! 0000100: 04f9 8569 1c77 89c1 764e d6aa bf61 a6c2 ...i.w..vN...a.. +//! 0000110: 8099 abb6 5f60 2f40 a825 be32 a33d 9d07 ...._`/@.%.2.=.. +//! 0000120: 0c79 6898 d49d 6349 af20 5866 266f 986b .yh...cI. Xf&o.k +//! 0000130: 6d32 34cd 7d08 155e 1ad0 0009 57ab 303b m24.}..^....W.0; +//! 0000140: 2060 c1dc 1287 d6f3 e745 4f70 6709 3631 `.......EOpg.61 +//! 0000150: 55f2 20f6 6ca5 156f 2c89 9569 1653 817d U. .l..o,..i.S.} +//! 0000160: 31f1 b6bd 3742 cc11 0bb2 fc2b 49a5 85b6 1...7B.....+I... +//! 0000170: fc76 7444 9365 65 .vtD.ee +//! ``` +//! +//! You can generate that output with the command: +//! +//! ```text +//! xxd mandelbrot_2048x2048_infohash_v1.png.torrent +//! ``` +//! +//! And you can show only the bytes (hexadecimal): +//! +//! ```text +//! 6431303a6372656174656420627931383a71426974746f7272656e742076 +//! 342e342e3131333a6372656174696f6e2064617465693136373936373436 +//! 323865343a696e666f64363a6c656e6774686931373232303465343a6e61 +//! 6d6532343a6d616e64656c62726f745f3230343878323034382e706e6731 +//! 323a7069656365206c656e67746869313633383465363a70696563657332 +//! 32303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c +//! 779621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290 +//! e5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f9 +//! 8e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61 +//! a6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866 +//! 266f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e745 +//! 4f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc11 +//! 0bb2fc2b49a585b6fc767444936565 +//! ``` +//! +//! You can generate that output with the command: +//! +//! ```text +//! `xxd -ps mandelbrot_2048x2048_infohash_v1.png.torrent`. +//! ``` +//! +//! The same data can be represented in a JSON format: +//! +//! ```json +//! { +//! "created by": "qBittorrent v4.4.1", +//! "creation date": 1679674628, +//! "info": { +//! "length": 172204, +//! "name": "mandelbrot_2048x2048.png", +//! "piece length": 16384, +//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" +//! } +//! } +//! ``` +//! +//! The JSON object was generated with: +//! +//! As you can see, there is a `info` attribute: +//! +//! ```json +//! { +//! "length": 172204, +//! "name": "mandelbrot_2048x2048.png", +//! "piece length": 16384, +//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" +//! } +//! ``` +//! +//! The infohash is the [SHA1](https://en.wikipedia.org/wiki/SHA-1) hash +//! of the `info` attribute. That is, the SHA1 hash of: +//! +//! ```text +//! 64363a6c656e6774686931373232303465343a6e61 +//! d6532343a6d616e64656c62726f745f3230343878323034382e706e6731 +//! 23a7069656365206c656e67746869313633383465363a70696563657332 +//! 2303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c +//! 79621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290 +//! 5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f9 +//! e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61 +//! 6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866 +//! 66f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e745 +//! f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc11 +//! bb2fc2b49a585b6fc7674449365 +//! ``` +//! +//! You can hash that byte string with +//! +//! The result is a 20-char string: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` use std::panic::Location; use thiserror::Error; +/// `BitTorrent` Info Hash v1 #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); const INFO_HASH_BYTES_LEN: usize = 20; impl InfoHash { + /// Create a new `InfoHash` from a byte slice. + /// /// # Panics /// /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. @@ -19,12 +153,13 @@ impl InfoHash { ret } - /// For readability, when accessing the bytes array + /// Returns the `InfoHash` internal byte array. #[must_use] pub fn bytes(&self) -> [u8; 20] { self.0 } + /// Returns the `InfoHash` as a hex string. #[must_use] pub fn to_hex_string(&self) -> String { self.to_string() @@ -79,13 +214,16 @@ impl std::convert::From<[u8; 20]> for InfoHash { } } +/// Errors that can occur when converting from a `Vec` to an `InfoHash`. #[derive(Error, Debug)] pub enum ConversionError { + /// Not enough bytes for infohash. An infohash is 20 bytes. #[error("not enough bytes for infohash: {message} {location}")] NotEnoughBytes { location: &'static Location<'static>, message: String, }, + /// Too many bytes for infohash. An infohash is 20 bytes. #[error("too many bytes for infohash: {message} {location}")] TooManyBytes { location: &'static Location<'static>, diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs index 7579a0780..0e5d7e7f2 100644 --- a/src/shared/bit_torrent/mod.rs +++ b/src/shared/bit_torrent/mod.rs @@ -1,2 +1,3 @@ +//! Common code for the `BitTorrent` protocol. pub mod common; pub mod info_hash; diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs index b5001e10e..df638f835 100644 --- a/src/shared/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -1,3 +1,27 @@ +//! Time related functions and types. +//! +//! It's usually a good idea to control where the time comes from +//! in an application so that it can be mocked for testing and it can be +//! controlled in production so we get the intended behavior without +//! relying on the specific time zone for the underlying system. +//! +//! Clocks use the type `DurationSinceUnixEpoch` which is a +//! `std::time::Duration` since the Unix Epoch (timestamp). +//! +//! ```text +//! Local time: lun 2023-03-27 16:12:00 WEST +//! Universal time: lun 2023-03-27 15:12:00 UTC +//! Time zone: Atlantic/Canary (WEST, +0100) +//! Timestamp: 1679929914 +//! Duration: 1679929914.10167426 +//! ``` +//! +//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will +//! overflow in 584.9 billion years. +//! +//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you +//! the ability to use the clock regardless of the underlying system time zone +//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). pub mod static_time; pub mod time_extent; pub mod utils; @@ -8,30 +32,47 @@ use std::time::Duration; use chrono::{DateTime, NaiveDateTime, Utc}; +/// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; +/// Clock types. #[derive(Debug)] pub enum Type { + /// Clock that returns the current time. WorkingClock, + /// Clock that returns always the same fixed time. StoppedClock, } +/// A generic structure that represents a clock. +/// +/// It can be either the working clock (production) or the stopped clock +/// (testing). It implements the `Time` trait, which gives you the current time. #[derive(Debug)] pub struct Clock; +/// The working clock. It returns the current time. pub type Working = Clock<{ Type::WorkingClock as usize }>; +/// The stopped clock. It returns always the same fixed time. pub type Stopped = Clock<{ Type::StoppedClock as usize }>; +/// The current clock. Defined at compilation time. +/// It can be either the working clock (production) or the stopped clock (testing). #[cfg(not(test))] pub type Current = Working; +/// The current clock. Defined at compilation time. +/// It can be either the working clock (production) or the stopped clock (testing). #[cfg(test)] pub type Current = Stopped; +/// Trait for types that can be used as a timestamp clock. pub trait Time: Sized { fn now() -> DurationSinceUnixEpoch; } +/// Trait for types that can be manipulate the current time in order to +/// get time in the future or in the past after or before a duration of time. pub trait TimeNow: Time { #[must_use] fn add(add_time: &Duration) -> Option { @@ -43,6 +84,10 @@ pub trait TimeNow: Time { } } +/// It converts a string in ISO 8601 format to a timestamp. +/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch +/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// /// # Panics /// /// Will panic if the input time cannot be converted to `DateTime::`. @@ -52,6 +97,10 @@ pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEp convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) } +/// It converts a `DateTime::` to a timestamp. +/// For example, the `DateTime::` of the Unix Epoch will be converted to a +/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// /// # Panics /// /// Will panic if the input time overflows the u64 type. @@ -61,6 +110,10 @@ pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> D DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) } +/// It converts a timestamp to a `DateTime::`. +/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be +/// converted to the `DateTime::` of the Unix Epoch. +/// /// # Panics /// /// Will panic if the input time overflows the i64 type. @@ -144,23 +197,37 @@ mod working_clock { impl TimeNow for Working {} } +/// Trait for types that can be used as a timestamp clock stopped +/// at a given time. pub trait StoppedTime: TimeNow { + /// It sets the clock to a given time. fn local_set(unix_time: &DurationSinceUnixEpoch); + + /// It sets the clock to the Unix Epoch. fn local_set_to_unix_epoch() { Self::local_set(&DurationSinceUnixEpoch::ZERO); } + + /// It sets the clock to the time the application started. fn local_set_to_app_start_time(); + + /// It sets the clock to the current system time. fn local_set_to_system_time_now(); + /// It adds a `Duration` to the clock. + /// /// # Errors /// /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + /// It subtracts a `Duration` from the clock. /// # Errors /// /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). fn local_reset(); } diff --git a/src/shared/clock/static_time.rs b/src/shared/clock/static_time.rs index f916cec9c..79557b3c4 100644 --- a/src/shared/clock/static_time.rs +++ b/src/shared/clock/static_time.rs @@ -1,5 +1,8 @@ +//! It contains a static variable that is set to the time at which +//! the application started. use std::time::SystemTime; lazy_static! { + /// The time at which the application started. pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); } diff --git a/src/shared/clock/time_extent.rs b/src/shared/clock/time_extent.rs index 64142c404..2f9e003be 100644 --- a/src/shared/clock/time_extent.rs +++ b/src/shared/clock/time_extent.rs @@ -1,43 +1,124 @@ +//! It includes functionality to handle time extents. +//! +//! Time extents are used to represent a duration of time which contains +//! N times intervals of the same duration. +//! +//! Given a duration of: 60 seconds. +//! +//! ```text +//! |------------------------------------------------------------| +//! ``` +//! +//! If we define a **base** duration of `10` seconds, we would have `6` intervals. +//! +//! ```text +//! |----------|----------|----------|----------|----------|----------| +//! ^--- 10 seconds +//! ``` +//! +//! Then, You can represent half of the duration (`30` seconds) as: +//! +//! ```text +//! |----------|----------|----------|----------|----------|----------| +//! ^--- 30 seconds +//! ``` +//! +//! `3` times (**multiplier**) the **base** interval (3*10 = 30 seconds): +//! +//! ```text +//! |----------|----------|----------|----------|----------|----------| +//! ^--- 30 seconds (3 units of 10 seconds) +//! ``` +//! +//! Time extents are a way to measure time duration using only one unit of time +//! (**base** duration) repeated `N` times (**multiplier**). +//! +//! Time extents are not clocks in a sense that they do not have a start time. +//! They are not synchronized with the real time. In order to measure time, +//! you need to define a start time for the intervals. +//! +//! For example, we could measure time is "lustrums" (5 years) since the start +//! of the 21st century. The time extent would contains a base 5-year duration +//! and the multiplier. The current "lustrum" (2023) would be 5th one if we +//! start counting "lustrums" at 1. +//! +//! ```text +//! Lustrum 1: 2000-2004 +//! Lustrum 2: 2005-2009 +//! Lustrum 3: 2010-2014 +//! Lustrum 4: 2015-2019 +//! Lustrum 5: 2020-2024 +//! ``` +//! +//! More practically time extents are used to represent number of time intervals +//! since the Unix Epoch. Each interval is typically an amount of seconds. +//! It's specially useful to check expiring dates. For example, you can have an +//! authentication token that expires after 120 seconds. If you divide the +//! current timestamp by 120 you get the number of 2-minute intervals since the +//! Unix Epoch, you can hash that value with a secret key and send it to a +//! client. The client can authenticate by sending the hashed value back to the +//! server. The server can build the same hash and compare it with the one sent +//! by the client. The hash would be the same during the 2-minute interval, but +//! it would change after that. This method is one of the methods used by UDP +//! trackers to generate and verify a connection ID, which a a token sent to +//! the client to identify the connection. use std::num::{IntErrorKind, TryFromIntError}; use std::time::Duration; use super::{Stopped, TimeNow, Type, Working}; +/// This trait defines the operations that can be performed on a `TimeExtent`. pub trait Extent: Sized + Default { type Base; type Multiplier; type Product; + /// It creates a new `TimeExtent`. fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; + /// It increases the `TimeExtent` by a multiplier. + /// /// # Errors /// /// Will return `IntErrorKind` if `add` would overflow the internal `Duration`. fn increase(&self, add: Self::Multiplier) -> Result; + /// It decreases the `TimeExtent` by a multiplier. + /// /// # Errors /// /// Will return `IntErrorKind` if `sub` would underflow the internal `Duration`. fn decrease(&self, sub: Self::Multiplier) -> Result; + /// It returns the total `Duration` of the `TimeExtent`. fn total(&self) -> Option>; + + /// It returns the total `Duration` of the `TimeExtent` plus one increment. fn total_next(&self) -> Option>; } +/// The `TimeExtent` base `Duration`, which is the duration of a single interval. pub type Base = Duration; +/// The `TimeExtent` `Multiplier`, which is the number of `Base` duration intervals. pub type Multiplier = u64; +/// The `TimeExtent` product, which is the total duration of the `TimeExtent`. pub type Product = Base; +/// A `TimeExtent` is a duration of time which contains N times intervals +/// of the same duration. #[derive(Debug, Default, Hash, PartialEq, Eq)] pub struct TimeExtent { pub increment: Base, pub amount: Multiplier, } +/// A zero time extent. It's the additive identity for a `TimeExtent`. pub const ZERO: TimeExtent = TimeExtent { increment: Base::ZERO, amount: Multiplier::MIN, }; + +/// The maximum value for a `TimeExtent`. pub const MAX: TimeExtent = TimeExtent { increment: Base::MAX, amount: Multiplier::MAX, @@ -114,10 +195,23 @@ impl Extent for TimeExtent { } } +/// A `TimeExtent` maker. It's a clock base on time extents. +/// It gives you the time in time extents. pub trait Make: Sized where Clock: TimeNow, { + /// It gives you the current time extent (with a certain increment) for + /// the current time. It gets the current timestamp front he `Clock`. + /// + /// For example: + /// + /// - If the base increment is `1` second, it will return a time extent + /// whose duration is `1 second` and whose multiplier is the the number + /// of seconds since the Unix Epoch (time extent). + /// - If the base increment is `1` minute, it will return a time extent + /// whose duration is `60 seconds` and whose multiplier is the number of + /// minutes since the Unix Epoch (time extent). #[must_use] fn now(increment: &Base) -> Option> { Clock::now() @@ -129,6 +223,9 @@ where }) } + /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// will add an extra duration to the current time before calculating the + /// time extent. It gives you a time extent for a time in the future. #[must_use] fn now_after(increment: &Base, add_time: &Duration) -> Option> { match Clock::add(add_time) { @@ -143,6 +240,9 @@ where } } + /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// will subtract a duration to the current time before calculating the + /// time extent. It gives you a time extent for a time in the past. #[must_use] fn now_before(increment: &Base, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { @@ -158,18 +258,28 @@ where } } +/// A `TimeExtent` maker which makes `TimeExtents`. +/// +/// It's a clock which measures time in `TimeExtents`. #[derive(Debug)] pub struct Maker {} +/// A `TimeExtent` maker which makes `TimeExtents` from the `Working` clock. pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; + +/// A `TimeExtent` maker which makes `TimeExtents` from the `Stopped` clock. pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; impl Make for WorkingTimeExtentMaker {} impl Make for StoppedTimeExtentMaker {} +/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production +/// and `StoppedTimeExtentMaker` in tests. #[cfg(not(test))] pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; +/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production +/// and `StoppedTimeExtentMaker` in tests. #[cfg(test)] pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; diff --git a/src/shared/clock/utils.rs b/src/shared/clock/utils.rs index 9127f97b1..94d88d288 100644 --- a/src/shared/clock/utils.rs +++ b/src/shared/clock/utils.rs @@ -1,5 +1,7 @@ +//! It contains helper functions related to time. use super::DurationSinceUnixEpoch; +/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. /// # Errors /// /// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. diff --git a/src/shared/crypto/ephemeral_instance_keys.rs b/src/shared/crypto/ephemeral_instance_keys.rs index 635d10fbd..44283365a 100644 --- a/src/shared/crypto/ephemeral_instance_keys.rs +++ b/src/shared/crypto/ephemeral_instance_keys.rs @@ -1,8 +1,13 @@ +//! This module contains the ephemeral instance keys used by the application. +//! +//! They are ephemeral because they are generated at runtime when the +//! application starts and are not persisted anywhere. use rand::rngs::ThreadRng; use rand::Rng; pub type Seed = [u8; 32]; lazy_static! { + /// The random static seed. pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); } diff --git a/src/shared/crypto/keys.rs b/src/shared/crypto/keys.rs index 5e04eb551..92e180996 100644 --- a/src/shared/crypto/keys.rs +++ b/src/shared/crypto/keys.rs @@ -1,13 +1,30 @@ +//! This module contains logic related to cryptographic keys. pub mod seeds { + //! This module contains logic related to cryptographic seeds. + //! + //! Specifically, it contains the logic for storing the seed and providing + //! it to other modules. + //! + //! A **seed** is a pseudo-random number that is used as a secret key for + //! cryptographic operations. use self::detail::CURRENT_SEED; use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + /// This trait is for structures that can keep and provide a seed. pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; + + /// It returns a reference to the seed that is keeping. fn get_seed() -> &'static Self::Seed; } + /// The seed keeper for the instance. When the application is running + /// in production, this will be the seed keeper that is used. pub struct Instance; + + /// The seed keeper for the current execution. It's a facade at compilation + /// time that will either be the instance seed keeper (with a randomly + /// generated key for production) or the zeroed seed keeper. pub struct Current; impl Keeper for Instance { diff --git a/src/shared/crypto/mod.rs b/src/shared/crypto/mod.rs index 066eb0f46..3c7c287b5 100644 --- a/src/shared/crypto/mod.rs +++ b/src/shared/crypto/mod.rs @@ -1,2 +1,3 @@ +//! Cryptographic primitives. pub mod ephemeral_instance_keys; pub mod keys; diff --git a/src/shared/mod.rs b/src/shared/mod.rs index 4b0d9138e..f016ba913 100644 --- a/src/shared/mod.rs +++ b/src/shared/mod.rs @@ -1,3 +1,8 @@ +//! Modules with generic logic used by several modules. +//! +//! - [`bit_torrent`]: `BitTorrent` protocol related logic. +//! - [`clock`]: Times services. +//! - [`crypto`]: Encryption related logic. pub mod bit_torrent; pub mod clock; pub mod crypto; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 8eb557f1e..1e78cd909 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -123,15 +123,18 @@ impl Entry { did_torrent_stats_change } - /// Get all swarm peers, limiting the result to the maximum number of scrape torrents. + /// Get all swarm peers, limiting the result to the maximum number of scrape + /// torrents. #[must_use] pub fn get_all_peers(&self) -> Vec<&peer::Peer> { self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() } - /// It returns the list of peers for a given peer client. + /// It returns the list of peers for a given peer client, limiting the + /// result to the maximum number of scrape torrents. /// - /// It filters out the input peer, typically because we want to return this list of peers to that client peer. + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. #[must_use] pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { self.peers @@ -143,7 +146,9 @@ impl Entry { .collect() } - /// It returns the swarm metadata (statistics) as a tuple `(seeders, completed, leechers)` + /// It returns the swarm metadata (statistics) as a tuple: + /// + /// `(seeders, completed, leechers)` #[allow(clippy::cast_possible_truncation)] #[must_use] pub fn get_stats(&self) -> (u32, u32, u32) { From ddf4dc639bfd591e1862b43fb312b11240f50d51 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 28 Mar 2023 16:18:01 +0200 Subject: [PATCH 0502/1003] fix: fix doc for DateTime overflow panic --- src/shared/clock/mod.rs | 12 ++++++------ src/tracker/auth.rs | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs index df638f835..7a5290f49 100644 --- a/src/shared/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -90,8 +90,8 @@ pub trait TimeNow: Time { /// /// # Panics /// -/// Will panic if the input time cannot be converted to `DateTime::`. -/// +/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. +/// (this will naturally happen in 292.5 billion years) #[must_use] pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) @@ -103,8 +103,8 @@ pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEp /// /// # Panics /// -/// Will panic if the input time overflows the u64 type. -/// +/// Will panic if the input time overflows the `u64` type. +/// (this will naturally happen in 584.9 billion years) #[must_use] pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) @@ -116,8 +116,8 @@ pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> D /// /// # Panics /// -/// Will panic if the input time overflows the i64 type. -/// +/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. +/// (this will naturally happen in 292.5 billion years) #[must_use] pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { DateTime::::from_utc( diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 9fe111e5e..466187af5 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -120,8 +120,8 @@ impl ExpiringKey { /// /// # Panics /// - /// Will panic when the key timestamp overflows the ui64 type. - /// + /// Will panic when the key timestamp overflows the internal i64 type. + /// (this will naturally happen in 292.5 billion years) #[must_use] pub fn expiry_time(&self) -> chrono::DateTime { convert_from_timestamp_to_datetime_utc(self.valid_until) From 81c4d7072103144e5bd55bbf4147b47190e0951e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Mar 2023 11:26:44 +0100 Subject: [PATCH 0503/1003] docs: [#263] crate docs for apis mod --- cSpell.json | 1 + src/lib.rs | 4 +- src/servers/apis/mod.rs | 166 ++++++++++++++++++ src/servers/apis/routes.rs | 14 +- src/servers/apis/server.rs | 50 ++++++ .../apis/v1/context/auth_key/handlers.rs | 50 ++++++ src/servers/apis/v1/context/auth_key/mod.rs | 121 +++++++++++++ .../apis/v1/context/auth_key/resources.rs | 6 + .../apis/v1/context/auth_key/responses.rs | 7 + .../apis/v1/context/auth_key/routes.rs | 9 + src/servers/apis/v1/context/mod.rs | 4 + src/servers/apis/v1/context/stats/handlers.rs | 8 + src/servers/apis/v1/context/stats/mod.rs | 48 +++++ .../apis/v1/context/stats/resources.rs | 24 +++ .../apis/v1/context/stats/responses.rs | 3 + src/servers/apis/v1/context/stats/routes.rs | 6 + .../apis/v1/context/torrent/handlers.rs | 29 ++- src/servers/apis/v1/context/torrent/mod.rs | 109 ++++++++++++ .../apis/v1/context/torrent/resources/mod.rs | 2 + .../apis/v1/context/torrent/resources/peer.rs | 14 ++ .../v1/context/torrent/resources/torrent.rs | 32 +++- .../apis/v1/context/torrent/responses.rs | 9 + src/servers/apis/v1/context/torrent/routes.rs | 7 + .../apis/v1/context/whitelist/handlers.rs | 31 ++++ src/servers/apis/v1/context/whitelist/mod.rs | 95 ++++++++++ .../apis/v1/context/whitelist/responses.rs | 5 + .../apis/v1/context/whitelist/routes.rs | 8 + src/servers/apis/v1/middlewares/auth.rs | 30 +++- src/servers/apis/v1/middlewares/mod.rs | 1 + src/servers/apis/v1/mod.rs | 18 ++ src/servers/apis/v1/responses.rs | 3 + src/servers/apis/v1/routes.rs | 7 + src/servers/http/mod.rs | 2 +- src/tracker/mod.rs | 5 +- src/tracker/services/statistics/mod.rs | 4 +- src/tracker/statistics.rs | 14 ++ 36 files changed, 933 insertions(+), 13 deletions(-) diff --git a/cSpell.json b/cSpell.json index b0ad4caf7..e7c0166f8 100644 --- a/cSpell.json +++ b/cSpell.json @@ -16,6 +16,7 @@ "byteorder", "canonicalize", "canonicalized", + "certbot", "chrono", "clippy", "completei", diff --git a/src/lib.rs b/src/lib.rs index a460a28b8..3b9777b36 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -315,7 +315,7 @@ //! Using `curl` you can create a 2-minute valid auth key: //! //! ```text -//! $ curl -X POST http://127.0.0.1:1212/api/v1/key/120?token=MyAccessToken +//! $ curl -X POST "http://127.0.0.1:1212/api/v1/key/120?token=MyAccessToken" //! ``` //! //! Response: @@ -329,7 +329,7 @@ //! ``` //! //! You can also use the Torrust Tracker together with the [Torrust Index](https://github.com/torrust/torrust-index). If that's the case, -//! the Index will create the keys by using the API. +//! the Index will create the keys by using the tracker [API](crate::servers::apis). //! //! ## UDP tracker usage //! diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index 1bc257916..203f1d146 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -1,8 +1,174 @@ +//! The tracker REST API with all its versions. +//! +//! > **NOTICE**: This API should not be exposed directly to the internet, it is +//! intended for internal use only. +//! +//! Endpoints for the latest API: [v1](crate::servers::apis::v1). +//! +//! All endpoints require an authorization token which must be set in the +//! configuration before running the tracker. The default configuration uses +//! `?token=MyAccessToken`. Refer to [Authentication](#authentication) for more +//! information. +//! +//! # Table of contents +//! +//! - [Configuration](#configuration) +//! - [Authentication](#authentication) +//! - [Versioning](#versioning) +//! - [Endpoints](#endpoints) +//! - [Documentation](#documentation) +//! +//! # Configuration +//! +//! The configuration file has a [`[http_api]`](torrust_tracker_configuration::HttpApi) +//! section that can be used to enable the API. +//! +//! ```toml +//! [http_api] +//! enabled = true +//! bind_address = "0.0.0.0:1212" +//! ssl_enabled = false +//! ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +//! ssl_key_path = "./storage/ssl_certificates/localhost.key" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration>) +//! for more information about the API configuration. +//! +//! When you run the tracker with enabled API, you will see the following message: +//! +//! ```text +//! Loading configuration from config file ./config.toml +//! 023-03-28T12:19:24.963054069+01:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. +//! ... +//! 023-03-28T12:19:24.964138723+01:00 [torrust_tracker::bootstrap::jobs::tracker_apis][INFO] Starting Torrust APIs server on: http://0.0.0.0:1212 +//! ``` +//! +//! The API server will be available on the address specified in the configuration. +//! +//! You can test the API by loading the following URL on a browser: +//! +//! +//! +//! Or using `curl`: +//! +//! ```bash +//! $ curl -s "http://0.0.0.0:1212/api/v1/stats?token=MyAccessToken" +//! ``` +//! +//! The response will be a JSON object. For example, the [tracker statistics +//! endpoint](crate::servers::apis::v1::context::stats#get-tracker-statistics): +//! +//! ```json +//! { +//! "torrents": 0, +//! "seeders": 0, +//! "completed": 0, +//! "leechers": 0, +//! "tcp4_connections_handled": 0, +//! "tcp4_announces_handled": 0, +//! "tcp4_scrapes_handled": 0, +//! "tcp6_connections_handled": 0, +//! "tcp6_announces_handled": 0, +//! "tcp6_scrapes_handled": 0, +//! "udp4_connections_handled": 0, +//! "udp4_announces_handled": 0, +//! "udp4_scrapes_handled": 0, +//! "udp6_connections_handled": 0, +//! "udp6_announces_handled": 0, +//! "udp6_scrapes_handled": 0 +//! } +//! ``` +//! +//! # Authentication +//! +//! The API supports authentication using a GET parameter token. +//! +//! +//! +//! You can set as many tokens as you want in the configuration file: +//! +//! ```toml +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! The token label is used to identify the token. All tokens have full access +//! to the API. +//! +//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration>) +//! for more information about the API configuration and to the +//! [`auth`](crate::servers::apis::v1::middlewares::auth) middleware for more +//! information about the authentication process. +//! +//! # Setup SSL (optional) +//! +//! The API server supports SSL. You can enable it by setting the +//! [`ssl_enabled`](torrust_tracker_configuration::HttpApi::ssl_enabled) option +//! to `true` in the configuration file +//! ([`http_api`](torrust_tracker_configuration::HttpApi) section). +//! +//! ```toml +//! [http_api] +//! enabled = true +//! bind_address = "0.0.0.0:1212" +//! ssl_enabled = true +//! ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +//! ssl_key_path = "./storage/ssl_certificates/localhost.key" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! > **NOTICE**: If you are using a reverse proxy like NGINX, you can skip this +//! step and use NGINX for the SSL instead. See +//! [other alternatives to Nginx/certbot](https://github.com/torrust/torrust-tracker/discussions/131) +//! +//! > **NOTICE**: You can generate a self-signed certificate for localhost using +//! OpenSSL. See [Let's Encrypt](https://letsencrypt.org/docs/certificates-for-localhost/). +//! That's particularly useful for testing purposes. Once you have the certificate +//! you need to set the [`ssl_cert_path`](torrust_tracker_configuration::HttpApi::ssl_cert_path) +//! and [`ssl_key_path`](torrust_tracker_configuration::HttpApi::ssl_key_path) +//! options in the configuration file with the paths to the certificate +//! (`localhost.crt`) and key (`localhost.key`) files. +//! +//! # Versioning +//! +//! The API is versioned and each version has its own module. +//! The API server runs all the API versions on the same server using +//! the same port. Currently there is only one API version: [v1](crate::servers::apis::v1) +//! but a version [`v2`](https://github.com/torrust/torrust-tracker/issues/144) +//! is planned. +//! +//! # Endpoints +//! +//! Refer to the [v1](crate::servers::apis::v1) module for the list of available +//! API endpoints. +//! +//! # Documentation +//! +//! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). +//! +//! > **NOTICE**: we are using [curl](https://curl.se/) in the API examples. +//! And you have to use quotes around the URL in order to avoid unexpected +//! errors. For example: `curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken"`. pub mod routes; pub mod server; pub mod v1; use serde::Deserialize; +/// The info hash URL path parameter. +/// +/// Some API endpoints require an info hash as a path parameter. +/// +/// For example: `http://localhost:1212/api/v1/torrent/{info_hash}`. +/// +/// The info hash represents teh value collected from the URL path parameter. +/// It does not include validation as this is done by the API endpoint handler, +/// in order to provide a more specific error message. #[derive(Deserialize)] pub struct InfoHashParam(pub String); diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 2545d6b88..a4c4642c7 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -1,11 +1,18 @@ +//! API routes. +//! +//! It loads all the API routes for all API versions and adds the authentication +//! middleware to them. +//! +//! All the API routes have the `/api` prefix and the version number as the +//! first path segment. For example: `/api/v1/torrents`. use std::sync::Arc; use axum::{middleware, Router}; use super::v1; -use super::v1::middlewares::auth::auth; use crate::tracker::Tracker; +/// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { let router = Router::new(); @@ -14,5 +21,8 @@ pub fn router(tracker: Arc) -> Router { let router = v1::routes::add(prefix, router, tracker.clone()); - router.layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) + router.layer(middleware::from_fn_with_state( + tracker.config.clone(), + v1::middlewares::auth::auth, + )) } diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index e4714cd9a..76396cc51 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -1,3 +1,28 @@ +//! Logic to run the HTTP API server. +//! +//! It contains two main structs: `ApiServer` and `Launcher`, +//! and two main functions: `start` and `start_tls`. +//! +//! The `ApiServer` struct is responsible for: +//! - Starting and stopping the server. +//! - Storing the configuration. +//! +//! `ApiServer` relies on a launcher to start the actual server. +/// +/// 1. `ApiServer::start` -> spawns new asynchronous task. +/// 2. `Launcher::start` -> starts the server on the spawned task. +/// +/// The `Launcher` struct is responsible for: +/// +/// - Knowing how to start the server with graceful shutdown. +/// +/// For the time being the `ApiServer` and `Launcher` are only used in tests +/// where we need to start and stop the server multiple times. In production +/// code and the main application uses the `start` and `start_tls` functions +/// to start the servers directly since we do not need to control the server +/// when it's running. In the future we might need to control the server, +/// for example, to restart it to apply new configuration changes, to remotely +/// shutdown the server, etc. use std::net::SocketAddr; use std::str::FromStr; use std::sync::Arc; @@ -12,24 +37,35 @@ use super::routes::router; use crate::servers::signals::shutdown_signal; use crate::tracker::Tracker; +/// Errors that can occur when starting or stopping the API server. #[derive(Debug)] pub enum Error { Error(String), } +/// An alias for the `ApiServer` struct with the `Stopped` state. #[allow(clippy::module_name_repetitions)] pub type StoppedApiServer = ApiServer; + +/// An alias for the `ApiServer` struct with the `Running` state. #[allow(clippy::module_name_repetitions)] pub type RunningApiServer = ApiServer; +/// A struct responsible for starting and stopping an API server with a +/// specific configuration and keeping track of the started server. +/// +/// It's a state machine that can be in one of two +/// states: `Stopped` or `Running`. #[allow(clippy::module_name_repetitions)] pub struct ApiServer { pub cfg: torrust_tracker_configuration::HttpApi, pub state: S, } +/// The `Stopped` state of the `ApiServer` struct. pub struct Stopped; +/// The `Running` state of the `ApiServer` struct. pub struct Running { pub bind_addr: SocketAddr, task_killer: tokio::sync::oneshot::Sender, @@ -42,6 +78,8 @@ impl ApiServer { Self { cfg, state: Stopped {} } } + /// Starts the API server with the given configuration. + /// /// # Errors /// /// It would return an error if no `SocketAddr` is returned after launching the server. @@ -75,6 +113,8 @@ impl ApiServer { } impl ApiServer { + /// Stops the API server. + /// /// # Errors /// /// It would return an error if the channel for the task killer signal was closed. @@ -93,9 +133,15 @@ impl ApiServer { } } +/// A struct responsible for starting the API server. struct Launcher; impl Launcher { + /// Starts the API server with graceful shutdown. + /// + /// If TLS is enabled in the configuration, it will start the server with + /// TLS. See [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration>) + /// for more information about configuration. pub fn start( cfg: &torrust_tracker_configuration::HttpApi, tracker: Arc, @@ -126,6 +172,7 @@ impl Launcher { } } + /// Starts the API server with graceful shutdown. pub fn start_with_graceful_shutdown( tcp_listener: std::net::TcpListener, tracker: Arc, @@ -146,6 +193,7 @@ impl Launcher { }) } + /// Starts the API server with graceful shutdown and TLS. pub fn start_tls_with_graceful_shutdown( tcp_listener: std::net::TcpListener, (ssl_cert_path, ssl_key_path): (String, String), @@ -180,6 +228,7 @@ impl Launcher { } } +/// Starts the API server with graceful shutdown on the current thread. pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); @@ -191,6 +240,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future>, Path(seconds_valid_or_key): Path) -> Response { let seconds_valid = seconds_valid_or_key; match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { @@ -22,9 +34,35 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path } } +/// A container for the `key` parameter extracted from the URL PATH. +/// +/// It does not perform any validation, it just stores the value. +/// +/// In the current API version, the `key` parameter can be either a valid key +/// like `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6` or the number of seconds the +/// key will be valid, for example two minutes `120`. +/// +/// For example, the `key` is used in the following requests: +/// +/// - `POST /api/v1/key/120`. It will generate a new key valid for two minutes. +/// - `DELETE /api/v1/key/xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. It will delete the +/// key `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. +/// +/// > **NOTICE**: this may change in the future, in the [API v2](https://github.com/torrust/torrust-tracker/issues/144). #[derive(Deserialize)] pub struct KeyParam(String); +/// It handles the request to delete an authentication key. +/// +/// It returns two types of responses: +/// +/// - `200` with an json [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) +/// response. If the key was deleted successfully. +/// - `500` with serialized error in debug format. If the key couldn't be +/// deleted. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#delete-an-authentication-key) +/// for more information about this endpoint. pub async fn delete_auth_key_handler( State(tracker): State>, Path(seconds_valid_or_key): Path, @@ -38,6 +76,18 @@ pub async fn delete_auth_key_handler( } } +/// It handles the request to reload the authentication keys from the database +/// into memory. +/// +/// It returns two types of responses: +/// +/// - `200` with an json [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) +/// response. If the keys were successfully reloaded. +/// - `500` with serialized error in debug format. If the they couldn't be +/// reloaded. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#reload-authentication-keys) +/// for more information about this endpoint. pub async fn reload_keys_handler(State(tracker): State>) -> Response { match tracker.load_keys_from_database().await { Ok(_) => ok_response(), diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs index 746a2f064..11bc8a43f 100644 --- a/src/servers/apis/v1/context/auth_key/mod.rs +++ b/src/servers/apis/v1/context/auth_key/mod.rs @@ -1,3 +1,124 @@ +//! Authentication keys API context. +//! +//! Authentication keys are used to authenticate HTTP tracker `announce` and +//! `scrape` requests. +//! +//! When the tracker is running in `private` or `private_listed` mode, the +//! authentication keys are required to announce and scrape torrents. +//! +//! A sample `announce` request **without** authentication key: +//! +//! +//! +//! A sample `announce` request **with** authentication key: +//! +//! +//! +//! # Endpoints +//! +//! - [Generate a new authentication key](#generate-a-new-authentication-key) +//! - [Delete an authentication key](#delete-an-authentication-key) +//! - [Reload authentication keys](#reload-authentication-keys) +//! +//! # Generate a new authentication key +//! +//! `POST /key/:seconds_valid` +//! +//! It generates a new authentication key. +//! +//! > **NOTICE**: keys expire after a certain amount of time. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `seconds_valid` | positive integer | The number of seconds the key will be valid. | Yes | `3600` +//! +//! **Example request** +//! +//! ```bash +//! curl -X POST "http://127.0.0.1:1212/api/v1/key/120?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "key": "xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6", +//! "valid_until": 1680009900, +//! "expiry_time": "2023-03-28 13:25:00.058085050 UTC" +//! } +//! ``` +//! +//! > **NOTICE**: `valid_until` and `expiry_time` represent the same time. +//! `valid_until` is the number of seconds since the Unix epoch +//! ([timestamp](https://en.wikipedia.org/wiki/Timestamp)), while `expiry_time` +//! is the human-readable time ([ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html)). +//! +//! **Resource** +//! +//! Refer to the API [`AuthKey`](crate::servers::apis::v1::context::auth_key::resources::AuthKey) +//! resource for more information about the response attributes. +//! +//! # Delete an authentication key +//! +//! `DELETE /key/:key` +//! +//! It deletes a previously generated authentication key. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `key` | 40-char string | The `key` to remove. | Yes | `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6` +//! +//! **Example request** +//! +//! ```bash +//! curl -X DELETE "http://127.0.0.1:1212/api/v1/key/xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` +//! +//! It you try to delete a non-existent key, the response will be an error with +//! a `500` status code. +//! +//! **Example error response** `500` +//! +//! ```text +//! Unhandled rejection: Err { reason: "failed to delete key: Failed to remove record from Sqlite3 database, error-code: 0, src/tracker/databases/sqlite.rs:267:27" } +//! ``` +//! +//! > **NOTICE**: a `500` status code will be returned and the body is not a +//! valid JSON. It's a text body containing the serialized-to-display error +//! message. +//! +//! # Reload authentication keys +//! +//! `GET /keys/reload` +//! +//! The tracker persists the authentication keys in a database. This endpoint +//! reloads the keys from the database. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/keys/reload?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` pub mod handlers; pub mod resources; pub mod responses; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index 400b34eb7..3eeafbda0 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -1,3 +1,4 @@ +//! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. use std::convert::From; use serde::{Deserialize, Serialize}; @@ -5,10 +6,15 @@ use serde::{Deserialize, Serialize}; use crate::shared::clock::convert_from_iso_8601_to_timestamp; use crate::tracker::auth::{self, Key}; +/// A resource that represents an authentication key. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { + /// The authentication key. pub key: String, + /// The timestamp when the key will expire. + #[deprecated(since = "3.0.0", note = "please use `expiry_time` instead")] pub valid_until: u64, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. + /// The ISO 8601 timestamp when the key will expire. pub expiry_time: String, } diff --git a/src/servers/apis/v1/context/auth_key/responses.rs b/src/servers/apis/v1/context/auth_key/responses.rs index 4e3b0c711..51be162c5 100644 --- a/src/servers/apis/v1/context/auth_key/responses.rs +++ b/src/servers/apis/v1/context/auth_key/responses.rs @@ -1,3 +1,4 @@ +//! API responses for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. use std::error::Error; use axum::http::{header, StatusCode}; @@ -6,6 +7,8 @@ use axum::response::{IntoResponse, Response}; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::unhandled_rejection_response; +/// `200` response that contains the `AuthKey` resource as json. +/// /// # Panics /// /// Will panic if it can't convert the `AuthKey` resource to json @@ -19,16 +22,20 @@ pub fn auth_key_response(auth_key: &AuthKey) -> Response { .into_response() } +/// `500` error response when a new authentication key cannot be generated. #[must_use] pub fn failed_to_generate_key_response(e: E) -> Response { unhandled_rejection_response(format!("failed to generate key: {e}")) } +/// `500` error response when an authentication key cannot be deleted. #[must_use] pub fn failed_to_delete_key_response(e: E) -> Response { unhandled_rejection_response(format!("failed to delete key: {e}")) } +/// `500` error response when the authentication keys cannot be reloaded from +/// the database into memory. #[must_use] pub fn failed_to_reload_keys_response(e: E) -> Response { unhandled_rejection_response(format!("failed to reload keys: {e}")) diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index 9b155c2a5..76c634e21 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -1,3 +1,11 @@ +//! API routes for the [`auth_key`](crate::servers::apis::v1::context::auth_key) +//! API context. +//! +//! - `POST /key/:seconds_valid` +//! - `DELETE /key/:key` +//! - `GET /keys/reload` +//! +//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key). use std::sync::Arc; use axum::routing::{get, post}; @@ -6,6 +14,7 @@ use axum::Router; use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; use crate::tracker::Tracker; +/// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Keys router diff --git a/src/servers/apis/v1/context/mod.rs b/src/servers/apis/v1/context/mod.rs index 6d3fb7566..5e268a429 100644 --- a/src/servers/apis/v1/context/mod.rs +++ b/src/servers/apis/v1/context/mod.rs @@ -1,3 +1,7 @@ +//! API is organized in resource groups called contexts. +//! +//! Each context is a module that contains the API endpoints related to a +//! specific resource group. pub mod auth_key; pub mod stats; pub mod torrent; diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index e93e65996..dfb983f77 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -1,3 +1,5 @@ +//! API handlers for the [`stats`](crate::servers::apis::v1::context::stats) +//! API context. use std::sync::Arc; use axum::extract::State; @@ -8,6 +10,12 @@ use super::responses::stats_response; use crate::tracker::services::statistics::get_metrics; use crate::tracker::Tracker; +/// It handles the request to get the tracker statistics. +/// +/// It returns a `200` response with a json [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats#get-tracker-statistics) +/// for more information about this endpoint. pub async fn get_stats_handler(State(tracker): State>) -> Json { stats_response(get_metrics(tracker.clone()).await) } diff --git a/src/servers/apis/v1/context/stats/mod.rs b/src/servers/apis/v1/context/stats/mod.rs index 746a2f064..80f37f73f 100644 --- a/src/servers/apis/v1/context/stats/mod.rs +++ b/src/servers/apis/v1/context/stats/mod.rs @@ -1,3 +1,51 @@ +//! Tracker statistics API context. +//! +//! The tracker collects statistics about the number of torrents, seeders, +//! leechers, completed downloads, and the number of requests handled. +//! +//! # Endpoints +//! +//! - [Get tracker statistics](#get-tracker-statistics) +//! +//! # Get tracker statistics +//! +//! `GET /stats` +//! +//! Returns the tracker statistics. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "torrents": 0, +//! "seeders": 0, +//! "completed": 0, +//! "leechers": 0, +//! "tcp4_connections_handled": 0, +//! "tcp4_announces_handled": 0, +//! "tcp4_scrapes_handled": 0, +//! "tcp6_connections_handled": 0, +//! "tcp6_announces_handled": 0, +//! "tcp6_scrapes_handled": 0, +//! "udp4_connections_handled": 0, +//! "udp4_announces_handled": 0, +//! "udp4_scrapes_handled": 0, +//! "udp6_connections_handled": 0, +//! "udp6_announces_handled": 0, +//! "udp6_scrapes_handled": 0 +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) +//! resource for more information about the response attributes. pub mod handlers; pub mod resources; pub mod responses; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 44ac814dc..355a1e448 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -1,24 +1,48 @@ +//! API resources for the [`stats`](crate::servers::apis::v1::context::stats) +//! API context. use serde::{Deserialize, Serialize}; use crate::tracker::services::statistics::TrackerMetrics; +/// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Stats { + // Torrent metrics + /// Total number of torrents. pub torrents: u64, + /// Total number of seeders for all torrents. pub seeders: u64, + /// Total number of peers that have ever completed downloading for all torrents. pub completed: u64, + /// Total number of leechers for all torrents. pub leechers: u64, + + // Protocol metrics + /// Total number of TCP (HTTP tracker) connections from IPv4 peers. + /// Since the HTTP tracker spec does not require a handshake, this metric + /// increases for every HTTP request. pub tcp4_connections_handled: u64, + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. pub tcp4_announces_handled: u64, + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. pub tcp4_scrapes_handled: u64, + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. pub tcp6_connections_handled: u64, + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. pub tcp6_announces_handled: u64, + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) connections from IPv4 peers. pub udp4_connections_handled: u64, + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. pub udp4_announces_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. pub udp6_connections_handled: u64, + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. pub udp6_announces_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_scrapes_handled: u64, } diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index ea9a2480a..a4dad77e4 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -1,8 +1,11 @@ +//! API responses for the [`stats`](crate::servers::apis::v1::context::stats) +//! API context. use axum::response::Json; use super::resources::Stats; use crate::tracker::services::statistics::TrackerMetrics; +/// `200` response that contains the [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) resource as json. pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { Json(Stats::from(tracker_metrics)) } diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index 07f88aa70..9198562dd 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -1,3 +1,8 @@ +//! API routes for the [`stats`](crate::servers::apis::v1::context::stats) API context. +//! +//! - `GET /stats` +//! +//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats). use std::sync::Arc; use axum::routing::get; @@ -6,6 +11,7 @@ use axum::Router; use super::handlers::get_stats_handler; use crate::tracker::Tracker; +/// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { router.route(&format!("{prefix}/stats"), get(get_stats_handler).with_state(tracker)) } diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 4032f2e9a..002d4356e 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -1,9 +1,12 @@ +//! API handlers for the [`torrent`](crate::servers::apis::v1::context::torrent) +//! API context. use std::fmt; use std::str::FromStr; use std::sync::Arc; use axum::extract::{Path, Query, State}; use axum::response::{IntoResponse, Json, Response}; +use log::debug; use serde::{de, Deserialize, Deserializer}; use super::resources::torrent::ListItem; @@ -14,6 +17,15 @@ use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; +/// It handles the request to get the torrent data. +/// +/// It returns: +/// +/// - `200` response with a json [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent). +/// - `500` with serialized error in debug format if the torrent is not known. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#get-a-torrent) +/// for more information about this endpoint. pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), @@ -24,17 +36,32 @@ pub async fn get_torrent_handler(State(tracker): State>, Path(info_ } } -#[derive(Deserialize)] +/// A container for the optional URL query pagination parameters: +/// `offset` and `limit`. +#[derive(Deserialize, Debug)] pub struct PaginationParams { + /// The offset of the first page to return. Starts at 0. #[serde(default, deserialize_with = "empty_string_as_none")] pub offset: Option, + /// The maximum number of items to return per page + #[serde(default, deserialize_with = "empty_string_as_none")] pub limit: Option, } +/// It handles the request to get a list of torrents. +/// +/// It returns a `200` response with a json array with +/// [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) +/// resources. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#list-torrents) +/// for more information about this endpoint. pub async fn get_torrents_handler( State(tracker): State>, pagination: Query, ) -> Json> { + debug!("pagination: {:?}", pagination); + torrent_list_response( &get_torrents( tracker.clone(), diff --git a/src/servers/apis/v1/context/torrent/mod.rs b/src/servers/apis/v1/context/torrent/mod.rs index 746a2f064..1658e1748 100644 --- a/src/servers/apis/v1/context/torrent/mod.rs +++ b/src/servers/apis/v1/context/torrent/mod.rs @@ -1,3 +1,112 @@ +//! Torrents API context. +//! +//! This API context is responsible for handling all the requests related to +//! the torrents data stored by the tracker. +//! +//! # Endpoints +//! +//! - [Get a torrent](#get-a-torrent) +//! - [List torrents](#list-torrents) +//! +//! # Get a torrent +//! +//! `GET /torrent/:info_hash` +//! +//! Returns all the information about a torrent. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | 40-char string | The Info Hash v1 | Yes | `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/torrent/5452869be36f9f3350ccee6b4544e7e76caaadab?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "info_hash": "5452869be36f9f3350ccee6b4544e7e76caaadab", +//! "seeders": 1, +//! "completed": 0, +//! "leechers": 0, +//! "peers": [ +//! { +//! "peer_id": { +//! "id": "0x2d7142343431302d2a64465a3844484944704579", +//! "client": "qBittorrent" +//! }, +//! "peer_addr": "192.168.1.88:17548", +//! "updated": 1680082693001, +//! "updated_milliseconds_ago": 1680082693001, +//! "uploaded": 0, +//! "downloaded": 0, +//! "left": 0, +//! "event": "None" +//! } +//! ] +//! } +//! ``` +//! +//! **Not Found response** `200` +//! +//! This response is returned when the tracker does not have the torrent. +//! +//! ```json +//! "torrent not known" +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent) +//! resource for more information about the response attributes. +//! +//! # List torrents +//! +//! `GET /torrents` +//! +//! Returns basic information (no peer list) for all torrents. +//! +//! **Query parameters** +//! +//! The endpoint supports pagination. +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `offset` | positive integer | The page number, starting at 0 | No | `1` +//! `limit` | positive integer | Page size. The number of results per page | No | `10` +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/torrents?token=MyAccessToken&offset=1&limit=1" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! [ +//! { +//! "info_hash": "5452869be36f9f3350ccee6b4544e7e76caaadab", +//! "seeders": 1, +//! "completed": 0, +//! "leechers": 0, +//! "peers": null +//! } +//! ] +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) +//! resource for more information about the attributes for a single item in the +//! response. +//! +//! > **NOTICE**: this endpoint does not include the `peers` list. pub mod handlers; pub mod resources; pub mod responses; diff --git a/src/servers/apis/v1/context/torrent/resources/mod.rs b/src/servers/apis/v1/context/torrent/resources/mod.rs index 46d62aac5..a6dbff726 100644 --- a/src/servers/apis/v1/context/torrent/resources/mod.rs +++ b/src/servers/apis/v1/context/torrent/resources/mod.rs @@ -1,2 +1,4 @@ +//! API resources for the [`torrent`](crate::servers::apis::v1::context::torrent) +//! API context. pub mod peer; pub mod torrent; diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 5284d26f6..539637b35 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -1,23 +1,37 @@ +//! `Peer` and Peer `Id` API resources. use serde::{Deserialize, Serialize}; use crate::tracker; +/// `Peer` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Peer { + /// The peer's ID. See [`Id`](crate::servers::apis::v1::context::torrent::resources::peer::Id). pub peer_id: Id, + /// The peer's socket address. For example: `192.168.1.88:17548`. pub peer_addr: String, + /// The peer's last update time in milliseconds. #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] pub updated: u128, + /// The peer's last update time in milliseconds. pub updated_milliseconds_ago: u128, + /// The peer's uploaded bytes. pub uploaded: i64, + /// The peer's downloaded bytes. pub downloaded: i64, + /// The peer's left bytes (pending to download). pub left: i64, + /// The peer's event: `started`, `stopped`, `completed`. + /// See [`AnnounceEventDef`](crate::shared::bit_torrent::common::AnnounceEventDef). pub event: String, } +/// Peer `Id` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Id { + /// The peer's ID in hex format. For example: `0x2d7142343431302d2a64465a3844484944704579`. pub id: Option, + /// The peer's client name. For example: `qBittorrent`. pub client: Option, } diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index e328f80c4..c9dbd1c02 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -1,26 +1,52 @@ +//! `Torrent` and `ListItem` API resources. +//! +//! - `Torrent` is the full torrent resource. +//! - `ListItem` is a list item resource on a torrent list. `ListItem` does +//! include a `peers` field but it is always `None` in the struct and `null` in +//! the JSON response. use serde::{Deserialize, Serialize}; use super::peer; use crate::tracker::services::torrent::{BasicInfo, Info}; +/// `Torrent` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Torrent { + /// The torrent's info hash v1. pub info_hash: String, + /// The torrent's seeders counter. Active peers with a full copy of the + /// torrent. pub seeders: u64, + /// The torrent's completed counter. Peers that have ever completed the + /// download. pub completed: u64, + /// The torrent's leechers counter. Active peers that are downloading the + /// torrent. pub leechers: u64, + /// The torrent's peers. See [`Peer`](crate::servers::apis::v1::context::torrent::resources::peer::Peer). #[serde(skip_serializing_if = "Option::is_none")] pub peers: Option>, } +/// `ListItem` API resource. A list item on a torrent list. +/// `ListItem` does include a `peers` field but it is always `None` in the +/// struct and `null` in the JSON response. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct ListItem { + /// The torrent's info hash v1. pub info_hash: String, + /// The torrent's seeders counter. Active peers with a full copy of the + /// torrent. pub seeders: u64, + /// The torrent's completed counter. Peers that have ever completed the + /// download. pub completed: u64, + /// The torrent's leechers counter. Active peers that are downloading the + /// torrent. pub leechers: u64, - // todo: this is always None. Remove field from endpoint? - pub peers: Option>, + /// The torrent's peers. It's always `None` in the struct and `null` in the + /// JSON response. + pub peers: Option>, // todo: this is always None. Remove field from endpoint? } impl ListItem { @@ -33,6 +59,8 @@ impl ListItem { } } +/// Maps an array of the domain type [`BasicInfo`](crate::tracker::services::torrent::BasicInfo) +/// to the API resource type [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem). #[must_use] pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { basic_info_vec diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs index 48e3c6e7f..d3be092eb 100644 --- a/src/servers/apis/v1/context/torrent/responses.rs +++ b/src/servers/apis/v1/context/torrent/responses.rs @@ -1,17 +1,26 @@ +//! API responses for the [`torrent`](crate::servers::apis::v1::context::torrent) +//! API context. use axum::response::{IntoResponse, Json, Response}; use serde_json::json; use super::resources::torrent::{ListItem, Torrent}; use crate::tracker::services::torrent::{BasicInfo, Info}; +/// `200` response that contains an array of +/// [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) +/// resources as json. pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { Json(ListItem::new_vec(basic_infos)) } +/// `200` response that contains a +/// [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent) +/// resources as json. pub fn torrent_info_response(info: Info) -> Json { Json(Torrent::from(info)) } +/// `500` error response in plain text returned when a torrent is not found. #[must_use] pub fn torrent_not_known_response() -> Response { Json(json!("torrent not known")).into_response() diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs index 00faa9665..18295f2a2 100644 --- a/src/servers/apis/v1/context/torrent/routes.rs +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -1,3 +1,9 @@ +//! API routes for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. +//! +//! - `GET /torrent/:info_hash` +//! - `GET /torrents` +//! +//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent). use std::sync::Arc; use axum::routing::get; @@ -6,6 +12,7 @@ use axum::Router; use super::handlers::{get_torrent_handler, get_torrents_handler}; use crate::tracker::Tracker; +/// It adds the routes to the router for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Torrents router diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index 25e285c0b..8e8c20b50 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -1,3 +1,5 @@ +//! API handlers for the [`whitelist`](crate::servers::apis::v1::context::whitelist) +//! API context. use std::str::FromStr; use std::sync::Arc; @@ -12,6 +14,15 @@ use crate::servers::apis::InfoHashParam; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::Tracker; +/// It handles the request to add a torrent to the whitelist. +/// +/// It returns: +/// +/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `500` with serialized error in debug format if the torrent couldn't be whitelisted. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#add-a-torrent-to-the-whitelist) +/// for more information about this endpoint. pub async fn add_torrent_to_whitelist_handler( State(tracker): State>, Path(info_hash): Path, @@ -25,6 +36,16 @@ pub async fn add_torrent_to_whitelist_handler( } } +/// It handles the request to remove a torrent to the whitelist. +/// +/// It returns: +/// +/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `500` with serialized error in debug format if the torrent couldn't be +/// removed from the whitelisted. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#remove-a-torrent-from-the-whitelist) +/// for more information about this endpoint. pub async fn remove_torrent_from_whitelist_handler( State(tracker): State>, Path(info_hash): Path, @@ -38,6 +59,16 @@ pub async fn remove_torrent_from_whitelist_handler( } } +/// It handles the request to reload the torrent whitelist from the database. +/// +/// It returns: +/// +/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `500` with serialized error in debug format if the torrent whitelist +/// couldn't be reloaded from the database. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#reload-the-whitelist) +/// for more information about this endpoint. pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { match tracker.load_whitelist_from_database().await { Ok(_) => ok_response(), diff --git a/src/servers/apis/v1/context/whitelist/mod.rs b/src/servers/apis/v1/context/whitelist/mod.rs index f6f000f34..2bb35ef65 100644 --- a/src/servers/apis/v1/context/whitelist/mod.rs +++ b/src/servers/apis/v1/context/whitelist/mod.rs @@ -1,3 +1,98 @@ +//! Whitelist API context. +//! +//! This API context is responsible for handling all the requests related to +//! the torrent whitelist. +//! +//! A torrent whitelist is a list of Info Hashes that are allowed to be tracked +//! by the tracker. This is useful when you want to limit the torrents that are +//! tracked by the tracker. +//! +//! Common tracker requests like `announce` and `scrape` are limited to the +//! torrents in the whitelist. The whitelist can be updated using the API. +//! +//! > **NOTICE**: the whitelist is only used when the tracker is configured to +//! in `listed` or `private_listed` modes. Refer to the +//! [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) +//! to know how to enable the those modes. +//! +//! > **NOTICE**: if the tracker is not running in `listed` or `private_listed` +//! modes the requests to the whitelist API will be ignored. +//! +//! # Endpoints +//! +//! - [Add a torrent to the whitelist](#add-a-torrent-to-the-whitelist) +//! - [Remove a torrent from the whitelist](#remove-a-torrent-from-the-whitelist) +//! - [Reload the whitelist](#reload-the-whitelist) +//! +//! # Add a torrent to the whitelist +//! +//! `POST /whitelist/:info_hash` +//! +//! It adds a torrent infohash to the whitelist. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | 40-char string | The Info Hash v1 | Yes | `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! +//! **Example request** +//! +//! ```bash +//! curl -X POST "http://127.0.0.1:1212/api/v1/whitelist/5452869be36f9f3350ccee6b4544e7e76caaadab?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` +//! +//! # Remove a torrent from the whitelist +//! +//! `DELETE /whitelist/:info_hash` +//! +//! It removes a torrent infohash to the whitelist. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | 40-char string | The Info Hash v1 | Yes | `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! +//! **Example request** +//! +//! ```bash +//! curl -X DELETE "http://127.0.0.1:1212/api/v1/whitelist/5452869be36f9f3350ccee6b4544e7e76caaadab?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` +//! +//! # Reload the whitelist +//! +//! It reloads the whitelist from the database. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/whitelist/reload?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` pub mod handlers; pub mod responses; pub mod routes; diff --git a/src/servers/apis/v1/context/whitelist/responses.rs b/src/servers/apis/v1/context/whitelist/responses.rs index 06d4a9448..ce901c2f0 100644 --- a/src/servers/apis/v1/context/whitelist/responses.rs +++ b/src/servers/apis/v1/context/whitelist/responses.rs @@ -1,19 +1,24 @@ +//! API responses for the [`whitelist`](crate::servers::apis::v1::context::whitelist) +//! API context. use std::error::Error; use axum::response::Response; use crate::servers::apis::v1::responses::unhandled_rejection_response; +/// `500` error response when a torrent cannot be removed from the whitelist. #[must_use] pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) } +/// `500` error response when a torrent cannot be added to the whitelist. #[must_use] pub fn failed_to_whitelist_torrent_response(e: E) -> Response { unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) } +/// `500` error response when the whitelist cannot be reloaded from the database. #[must_use] pub fn failed_to_reload_whitelist_response(e: E) -> Response { unhandled_rejection_response(format!("failed to reload whitelist: {e}")) diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs index 06011b462..65d511341 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -1,3 +1,10 @@ +//! API routes for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. +//! +//! - `POST /whitelist/:info_hash` +//! - `DELETE /whitelist/:info_hash` +//! - `GET /whitelist/reload` +//! +//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent). use std::sync::Arc; use axum::routing::{delete, get, post}; @@ -6,6 +13,7 @@ use axum::Router; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; use crate::tracker::Tracker; +/// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { let prefix = format!("{prefix}/whitelist"); diff --git a/src/servers/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs index f0c63250b..608a1b7d2 100644 --- a/src/servers/apis/v1/middlewares/auth.rs +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -1,3 +1,26 @@ +//! Authentication middleware for the API. +//! +//! It uses a "token" GET param to authenticate the user. URLs must be of the +//! form: +//! +//! `http://:/api/v1/?token=`. +//! +//! > **NOTICE**: the token can be at any position in the URL, not just at the +//! > beginning or at the end. +//! +//! The token must be one of the `access_tokens` in the tracker +//! [HTTP API configuration](torrust_tracker_configuration::HttpApi). +//! +//! The configuration file `config.toml` contains a list of tokens: +//! +//! ```toml +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! All the tokes have the same permissions, so it is not possible to have +//! different permissions for different tokens. The label is only used to +//! identify the token. use std::sync::Arc; use axum::extract::{Query, State}; @@ -9,13 +32,14 @@ use torrust_tracker_configuration::{Configuration, HttpApi}; use crate::servers::apis::v1::responses::unhandled_rejection_response; +/// Container for the `token` extracted from the query params. #[derive(Deserialize, Debug)] pub struct QueryParams { pub token: Option, } /// Middleware for authentication using a "token" GET param. -/// The token must be one of the tokens in the tracker HTTP API configuration. +/// The token must be one of the tokens in the tracker [HTTP API configuration](torrust_tracker_configuration::HttpApi). pub async fn auth( State(config): State>, Query(params): Query, @@ -35,7 +59,9 @@ where } enum AuthError { + /// Missing token for authentication. Unauthorized, + /// Token was provided but it is not valid. TokenNotValid, } @@ -52,11 +78,13 @@ fn authenticate(token: &str, http_api_config: &HttpApi) -> bool { http_api_config.contains_token(token) } +/// `500` error response returned when the token is missing. #[must_use] pub fn unauthorized_response() -> Response { unhandled_rejection_response("unauthorized".to_string()) } +/// `500` error response when the provided token is not valid. #[must_use] pub fn token_not_valid_response() -> Response { unhandled_rejection_response("token not valid".to_string()) diff --git a/src/servers/apis/v1/middlewares/mod.rs b/src/servers/apis/v1/middlewares/mod.rs index 0e4a05d59..141e3038a 100644 --- a/src/servers/apis/v1/middlewares/mod.rs +++ b/src/servers/apis/v1/middlewares/mod.rs @@ -1 +1,2 @@ +//! API middlewares. See [Axum middlewares](axum::middleware). pub mod auth; diff --git a/src/servers/apis/v1/mod.rs b/src/servers/apis/v1/mod.rs index e87984b8e..213ee9335 100644 --- a/src/servers/apis/v1/mod.rs +++ b/src/servers/apis/v1/mod.rs @@ -1,3 +1,21 @@ +//! The API version `v1`. +//! +//! The API is organized in the following contexts: +//! +//! Context | Description | Version +//! ---|---|--- +//! `Stats` | Tracker statistics | [`v1`](crate::servers::apis::v1::context::stats) +//! `Torrents` | Torrents | [`v1`](crate::servers::apis::v1::context::torrent) +//! `Whitelist` | Torrents whitelist | [`v1`](crate::servers::apis::v1::context::whitelist) +//! `Authentication keys` | Authentication keys | [`v1`](crate::servers::apis::v1::context::auth_key) +//! +//! > **NOTICE**: +//! - The authentication keys are only used by the HTTP tracker. +//! - The whitelist is only used when the tracker is running in `listed` or +//! `private_listed` mode. +//! +//! Refer to the [authentication middleware](crate::servers::apis::v1::middlewares::auth) +//! for more information about the authentication process. pub mod context; pub mod middlewares; pub mod responses; diff --git a/src/servers/apis/v1/responses.rs b/src/servers/apis/v1/responses.rs index 4a9c39bf9..ecaf90098 100644 --- a/src/servers/apis/v1/responses.rs +++ b/src/servers/apis/v1/responses.rs @@ -1,3 +1,4 @@ +//! Common responses for the API v1 shared by all the contexts. use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; use serde::Serialize; @@ -22,6 +23,8 @@ use serde::Serialize; We can put the second level of validation in the application and domain services. */ +/// Response status used when requests have only two possible results +/// `Ok` or `Error` and no data is returned. #[derive(Serialize, Debug)] #[serde(tag = "status", rename_all = "snake_case")] pub enum ActionStatus<'a> { diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index d45319c4b..7b792f8a8 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -1,3 +1,4 @@ +//! Route initialization for the v1 API. use std::sync::Arc; use axum::Router; @@ -5,6 +6,12 @@ use axum::Router; use super::context::{auth_key, stats, torrent, whitelist}; use crate::tracker::Tracker; +/// Add the routes for the v1 API. +/// +/// > **NOTICE**: the old API endpoints without `v1` prefix are kept for +/// backward compatibility. For example, the `GET /api/stats` endpoint is +/// still available, but it is deprecated and will be removed in the future. +/// You should use the `GET /api/v1/stats` endpoint instead. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Without `v1` prefix. // We keep the old API endpoints without `v1` prefix for backward compatibility. diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index b8aa6b19f..4212f86c4 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -1,4 +1,4 @@ -//! Tracker HTTP/HTTPS Protocol: +//! Tracker HTTP/HTTPS Protocol. //! //! Original specification in BEP 3 (section "Trackers"): //! diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index faabbe095..dd2e94660 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -79,7 +79,8 @@ //! //! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); //! ``` -//! ```rust,ignore +//! +//! ```text //! let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip).await; //! ``` //! @@ -412,7 +413,7 @@ //! //! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. //! -//! ```rust,ignore +//! ```text //! tracker.send_stats_event(statistics::Event::Tcp4Announce).await //! ``` //! diff --git a/src/tracker/services/statistics/mod.rs b/src/tracker/services/statistics/mod.rs index ac3ba510e..3761e38de 100644 --- a/src/tracker/services/statistics/mod.rs +++ b/src/tracker/services/statistics/mod.rs @@ -12,7 +12,7 @@ //! - An statistics [`EventSender`](crate::tracker::statistics::EventSender) //! - An statistics [`Repo`](crate::tracker::statistics::Repo) //! -//! ```rust,ignore +//! ```text //! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); //! ``` //! @@ -23,7 +23,7 @@ //! //! For example, if you send the event [`Event::Udp4Connect`](crate::tracker::statistics::Event::Udp4Connect): //! -//! ```rust,ignore +//! ```text //! let result = event_sender.send_event(Event::Udp4Connect).await; //! ``` //! diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 03f4fc081..85cc4f255 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,17 +62,31 @@ pub enum Event { /// and also for each IP version used by the peers: IPv4 and IPv6. #[derive(Debug, PartialEq, Default)] pub struct Metrics { + /// Total number of TCP (HTTP tracker) connections from IPv4 peers. + /// Since the HTTP tracker spec does not require a handshake, this metric + /// increases for every HTTP request. pub tcp4_connections_handled: u64, + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. pub tcp4_announces_handled: u64, + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. pub tcp4_scrapes_handled: u64, + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. pub tcp6_connections_handled: u64, + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. pub tcp6_announces_handled: u64, + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) connections from IPv4 peers. pub udp4_connections_handled: u64, + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. pub udp4_announces_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. pub udp6_connections_handled: u64, + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. pub udp6_announces_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_scrapes_handled: u64, } From 11d87317b3362ce41102b1b11ee8de9943b50d22 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 30 Mar 2023 20:02:00 +0100 Subject: [PATCH 0504/1003] docs: [#266] crate docs for servers::http mod --- cSpell.json | 2 + packages/configuration/src/lib.rs | 24 +- src/lib.rs | 19 +- src/servers/http/mod.rs | 309 +++++++++++++++++- src/servers/http/percent_encoding.rs | 63 +++- src/servers/http/server.rs | 50 ++- .../http/v1/extractors/announce_request.rs | 31 ++ .../http/v1/extractors/authentication_key.rs | 46 ++- .../http/v1/extractors/client_ip_sources.rs | 41 ++- src/servers/http/v1/extractors/mod.rs | 4 + .../http/v1/extractors/scrape_request.rs | 31 ++ src/servers/http/v1/handlers/announce.rs | 18 + src/servers/http/v1/handlers/common/auth.rs | 8 + src/servers/http/v1/handlers/common/mod.rs | 1 + .../http/v1/handlers/common/peer_ip.rs | 6 + src/servers/http/v1/handlers/mod.rs | 4 + src/servers/http/v1/handlers/scrape.rs | 14 + src/servers/http/v1/launcher.rs | 1 + src/servers/http/v1/mod.rs | 4 + src/servers/http/v1/query.rs | 60 +++- src/servers/http/v1/requests/announce.rs | 81 +++++ src/servers/http/v1/requests/mod.rs | 4 + src/servers/http/v1/requests/scrape.rs | 3 + src/servers/http/v1/responses/announce.rs | 191 ++++++++++- src/servers/http/v1/responses/error.rs | 31 +- src/servers/http/v1/responses/mod.rs | 4 + src/servers/http/v1/responses/scrape.rs | 37 +++ src/servers/http/v1/routes.rs | 5 + src/servers/http/v1/services/announce.rs | 20 ++ src/servers/http/v1/services/mod.rs | 7 + .../http/v1/services/peer_ip_resolver.rs | 75 ++++- src/servers/http/v1/services/scrape.rs | 24 ++ src/servers/mod.rs | 1 + src/servers/signals.rs | 1 + src/shared/bit_torrent/mod.rs | 68 ++++ src/tracker/mod.rs | 17 + src/tracker/torrent.rs | 2 - 37 files changed, 1269 insertions(+), 38 deletions(-) diff --git a/cSpell.json b/cSpell.json index e7c0166f8..af0de7101 100644 --- a/cSpell.json +++ b/cSpell.json @@ -5,6 +5,7 @@ "automock", "Avicora", "Azureus", + "bdecode", "bencode", "bencoded", "beps", @@ -56,6 +57,7 @@ "reannounce", "repr", "reqwest", + "rerequests", "rngs", "rusqlite", "rustfmt", diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 8b4d9363d..d5beca236 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -64,9 +64,29 @@ pub struct Configuration { pub db_driver: DatabaseDriver, pub db_path: String, - /// Interval in seconds that the client should wait between sending regular announce requests to the tracker + /// Interval in seconds that the client should wait between sending regular + /// announce requests to the tracker. + /// + /// It's a **recommended** wait time between announcements. + /// + /// This is the standard amount of time that clients should wait between + /// sending consecutive announcements to the tracker. This value is set by + /// the tracker and is typically provided in the tracker's response to a + /// client's initial request. It serves as a guideline for clients to know + /// how often they should contact the tracker for updates on the peer list, + /// while ensuring that the tracker is not overwhelmed with requests. pub announce_interval: u32, - /// Minimum announce interval. Clients must not reannounce more frequently than this + /// Minimum announce interval. Clients must not reannounce more frequently + /// than this. + /// + /// It establishes the shortest allowed wait time. + /// + /// This is an optional parameter in the protocol that the tracker may + /// provide in its response. It sets a lower limit on the frequency at which + /// clients are allowed to send announcements. Clients should respect this + /// value to prevent sending too many requests in a short period, which + /// could lead to excessive load on the tracker or even getting banned by + /// the tracker for not adhering to the rules. pub min_announce_interval: u32, pub on_reverse_proxy: bool, pub external_ip: Option, diff --git a/src/lib.rs b/src/lib.rs index 3b9777b36..36d1792d3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -33,9 +33,13 @@ //! - [Run with docker](#run-with-docker) //! - [Configuration](#configuration) //! - [Usage](#usage) +//! - [API](#api) +//! - [HTTP Tracker](#http-tracker) +//! - [UDP Tracker](#udp-tracker) //! - [Components](#components) //! - [Implemented BEPs](#implemented-beps) //! - [Contributing](#contributing) +//! - [Documentation](#documentation) //! //! # Features //! @@ -181,7 +185,7 @@ //! - UDP tracker: //! - HTTP tracker: //! -//! ## API usage +//! ## API //! //! In order to use the tracker API you need to enable it in the configuration: //! @@ -231,7 +235,7 @@ //! //! Refer to the [`API`](crate::servers::apis) documentation for more information about the [`API`](crate::servers::apis) endpoints. //! -//! ## HTTP tracker usage +//! ## HTTP tracker //! //! The HTTP tracker implements two type of requests: //! @@ -331,7 +335,7 @@ //! You can also use the Torrust Tracker together with the [Torrust Index](https://github.com/torrust/torrust-index). If that's the case, //! the Index will create the keys by using the tracker [API](crate::servers::apis). //! -//! ## UDP tracker usage +//! ## UDP tracker //! //! The UDP tracker also implements two type of requests: //! @@ -430,6 +434,15 @@ //! # Contributing //! //! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). +//! +//! # Documentation +//! +//! You can find this documentation on [docs.rs](https://docs.rs/torrust-tracker/). +//! +//! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). +//! +//! In addition to the production code documentation you can find a lot of +//! examples on the integration and unit tests. pub mod app; pub mod bootstrap; pub mod servers; diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 4212f86c4..78c086892 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -1,22 +1,317 @@ -//! Tracker HTTP/HTTPS Protocol. +//! HTTP Tracker. //! -//! Original specification in BEP 3 (section "Trackers"): +//! This module contains the HTTP tracker implementation. //! -//! +//! The HTTP tracker is a simple HTTP server that responds to two `GET` requests: //! -//! Other resources: +//! - `Announce`: used to announce the presence of a peer to the tracker. +//! - `Scrape`: used to get information about a torrent. //! -//! - -//! - +//! Refer to the [`bit_torrent`](crate::shared::bit_torrent) module for more +//! information about the `BitTorrent` protocol. //! - +//! ## Table of Contents +//! +//! - [Requests](#requests) +//! - [Announce](#announce) +//! - [Scrape](#scrape) +//! - [Versioning](#versioning) +//! - [Links](#links) +//! +//! ## Requests +//! +//! ### Announce +//! +//! `Announce` requests are used to announce the presence of a peer to the +//! tracker. The tracker responds with a list of peers that are also downloading +//! the same torrent. A "swarm" is a group of peers that are downloading the +//! same torrent. +//! +//! `Announce` responses are encoded in [bencoded](https://en.wikipedia.org/wiki/Bencode) +//! format. +//! +//! There are two types of `Announce` responses: `compact` and `non-compact`. In +//! a compact response, the peers are encoded in a single string. In a +//! non-compact response, the peers are encoded in a list of dictionaries. The +//! compact response is more efficient than the non-compact response and it does +//! not contain the peer's IDs. +//! +//! **Query parameters** +//! +//! > **NOTICE**: you can click on the parameter name to see a full description +//! after extracting and parsing the parameter from the URL query component. +//! +//! Parameter | Type | Description | Required | Default | Example +//! ---|---|---|---|---|--- +//! [`info_hash`](crate::servers::http::v1::requests::announce::Announce::info_hash) | percent encoded of 40-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! `peer_addr` | string |The IP address of the peer. | No | No | `2.137.87.41` +//! [`downloaded`](crate::servers::http::v1::requests::announce::Announce::downloaded) | positive integer |The number of bytes downloaded by the peer. | No | `0` | `0` +//! [`uploaded`](crate::servers::http::v1::requests::announce::Announce::uploaded) | positive integer | The number of bytes uploaded by the peer. | No | `0` | `0` +//! [`peer_id`](crate::servers::http::v1::requests::announce::Announce::peer_id) | percent encoded of 20-byte array | The ID of the peer. | Yes | No | `-qB00000000000000001` +//! [`port`](crate::servers::http::v1::requests::announce::Announce::port) | positive integer | The port used by the peer. | Yes | No | `17548` +//! [`left`](crate::servers::http::v1::requests::announce::Announce::left) | positive integer | The number of bytes pending to download. | No | `0` | `0` +//! [`event`](crate::servers::http::v1::requests::announce::Announce::event) | positive integer | The event that triggered the `Announce` request: `started`, `completed`, `stopped` | No | `None` | `completed` +//! [`compact`](crate::servers::http::v1::requests::announce::Announce::compact) | `0` or `1` | Whether the tracker should return a compact peer list. | No | `None` | `0` +//! `numwant` | positive integer | **Not implemented**. The maximum number of peers you want in the reply. | No | `50` | `50` +//! +//! Refer to the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! request for more information about the parameters. +//! +//! > **NOTICE**: the [BEP 03](https://www.bittorrent.org/beps/bep_0003.html) +//! defines only the `ip` and `event` parameters as optional. However, the +//! tracker assigns default values to the optional parameters if they are not +//! provided. +//! +//! > **NOTICE**: the `peer_addr` parameter is not part of the original +//! specification. But the peer IP was added in the +//! [UDP Tracker protocol](https://www.bittorrent.org/beps/bep_0015.html). It is +//! used to provide the peer's IP address to the tracker, but it is ignored by +//! the tracker. The tracker uses the IP address of the peer that sent the +//! request or the right-most-ip in the `X-Forwarded-For` header if the tracker +//! is behind a reverse proxy. +//! +//! > **NOTICE**: the maximum number of peers that the tracker can return is +//! `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) +//! for more information about this limitation. +//! +//! > **NOTICE**: the `info_hash` parameter is NOT a `URL` encoded string param. +//! It is percent encode of the raw `info_hash` bytes (40 bytes). URL `GET` params +//! can contain any bytes, not only well-formed UTF-8. The `info_hash` is a +//! 20-byte SHA1. Check the [`percent_encoding`](crate::servers::http::percent_encoding) +//! module to know more about the encoding. +//! +//! > **NOTICE**: the `peer_id` parameter is NOT a `URL` encoded string param. +//! It is percent encode of the raw peer ID bytes (20 bytes). URL `GET` params +//! can contain any bytes, not only well-formed UTF-8. The `info_hash` is a +//! 20-byte SHA1. Check the [`percent_encoding`](crate::servers::http::percent_encoding) +//! module to know more about the encoding. +//! +//! > **NOTICE**: by default, the tracker returns the non-compact peer list when +//! no `compact` parameter is provided or is empty. The +//! [BEP 23](https://www.bittorrent.org/beps/bep_0023.html) suggests to do the +//! opposite. The tracker should return the compact peer list by default and +//! return the non-compact peer list if the `compact` parameter is `0`. +//! +//! **Sample announce URL** +//! +//! A sample `GET` `announce` request: +//! +//! +//! +//! **Sample non-compact response** +//! +//! In [bencoded](https://en.wikipedia.org/wiki/Bencode) format: +//! +//! ```text +//! d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee +//! ``` +//! +//! And represented as a json: +//! +//! ```json +//! { +//! "complete": 333, +//! "incomplete": 444, +//! "interval": 111, +//! "min interval": 222, +//! "peers": [ +//! { +//! "ip": "105.105.105.105", +//! "peer id": "-qB00000000000000001", +//! "port": 28784 +//! }, +//! { +//! "ip": "6969:6969:6969:6969:6969:6969:6969:6969", +//! "peer id": "-qB00000000000000002", +//! "port": 28784 +//! } +//! ] +//! } +//! ``` +//! +//! If you save the response as a file and you open it with a program that can +//! handle binary data you would see: +//! +//! ```text +//! 00000000: 6438 3a63 6f6d 706c 6574 6569 3333 3365 d8:completei333e +//! 00000010: 3130 3a69 6e63 6f6d 706c 6574 6569 3434 10:incompletei44 +//! 00000020: 3465 383a 696e 7465 7276 616c 6931 3131 4e8:intervali111 +//! 00000030: 6531 323a 6d69 6e20 696e 7465 7276 616c e12:min interval +//! 00000040: 6932 3232 6535 3a70 6565 7273 6c64 323a i222e5:peersld2: +//! 00000050: 6970 3135 3a31 3035 2e31 3035 2e31 3035 ip15:105.105.105 +//! 00000060: 2e31 3035 373a 7065 6572 2069 6432 303a .1057:peer id20: +//! 00000070: 2d71 4230 3030 3030 3030 3030 3030 3030 -qB0000000000000 +//! 00000080: 3030 3031 343a 706f 7274 6932 3837 3834 00014:porti28784 +//! 00000090: 6565 6432 3a69 7033 393a 3639 3639 3a36 eed2:ip39:6969:6 +//! 000000a0: 3936 393a 3639 3639 3a36 3936 393a 3639 969:6969:6969:69 +//! 000000b0: 3639 3a36 3936 393a 3639 3639 3a36 3936 69:6969:6969:696 +//! 000000c0: 3937 3a70 6565 7220 6964 3230 3a2d 7142 97:peer id20:-qB +//! 000000d0: 3030 3030 3030 3030 3030 3030 3030 3030 0000000000000000 +//! 000000e0: 3234 3a70 6f72 7469 3238 3738 3465 6565 24:porti28784eee +//! 000000f0: 65 e +//! ``` +//! +//! Refer to the [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) +//! response for more information about the response. +//! +//! **Sample compact response** +//! +//! In [bencoded](https://en.wikipedia.org/wiki/Bencode) format: +//! +//! ```text +//! d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe +//! ``` +//! +//! And represented as a json: +//! +//! ```json +//! { +//! "complete": 333, +//! "incomplete": 444, +//! "interval": 111, +//! "min interval": 222, +//! "peers": "iiiipp", +//! "peers6": "iiiiiiiiiiiiiiiipp" +//! } +//! ``` +//! +//! If you save the response as a file and you open it with a program that can +//! handle binary data you would see: +//! +//! ```text +//! 0000000: 6438 3a63 6f6d 706c 6574 6569 3333 3365 d8:completei333e +//! 0000010: 3130 3a69 6e63 6f6d 706c 6574 6569 3434 10:incompletei44 +//! 0000020: 3465 383a 696e 7465 7276 616c 6931 3131 4e8:intervali111 +//! 0000030: 6531 323a 6d69 6e20 696e 7465 7276 616c e12:min interval +//! 0000040: 6932 3232 6535 3a70 6565 7273 363a 6969 i222e5:peers6:ii +//! 0000050: 6969 7070 363a 7065 6572 7336 3138 3a69 iipp6:peers618:i +//! 0000060: 6969 6969 6969 6969 6969 6969 6969 6970 iiiiiiiiiiiiiiip +//! 0000070: 7065 pe +//! ``` +//! +//! Refer to the [`Compact`](crate::servers::http::v1::responses::announce::Compact) +//! response for more information about the response. +//! +//! **Protocol** +//! +//! Original specification in [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). +//! +//! If you want to know more about the `announce` request: +//! +//! - [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze announce docs](https://wiki.vuze.com/w/Announce) +//! - [wiki.theory.org - Announce](https://wiki.theory.org/BitTorrent_Tracker_Protocol#Basic_Tracker_Announce_Request) +//! +//! ### Scrape +//! +//! The `scrape` request allows a peer to get [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! for multiple torrents at the same time. +//! +//! The response contains the [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! for that torrent: +//! +//! - [complete](crate::tracker::torrent::SwarmMetadata::complete) +//! - [downloaded](crate::tracker::torrent::SwarmMetadata::downloaded) +//! - [incomplete](crate::tracker::torrent::SwarmMetadata::incomplete) +//! +//! **Query parameters** +//! +//! Parameter | Type | Description | Required | Default | Example +//! ---|---|---|---|---|--- +//! [`info_hash`](crate::servers::http::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 40-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! +//! > **NOTICE**: you can scrape multiple torrents at the same time by passing +//! multiple `info_hash` parameters. +//! +//! Refer to the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! request for more information about the parameters. +//! +//! **Sample scrape URL** +//! +//! A sample `scrape` request for only one torrent: +//! +//! +//! +//! In order to scrape multiple torrents at the same time you can pass multiple +//! `info_hash` parameters: `info_hash=%81%00%0...00%00%00&info_hash=%82%00%0...00%00%00` +//! +//! > **NOTICE**: the maximum number of torrent you can scrape at the same time +//! is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! +//! **Sample response** +//! +//! The `scrape` response is a [bencoded](https://en.wikipedia.org/wiki/Bencode) +//! byte array like the following: +//! +//! ```text +//! d5:filesd20:iiiiiiiiiiiiiiiiiiiid8:completei1e10:downloadedi2e10:incompletei3eeee +//! ``` +//! +//! And represented as a json: +//! +//! ```json +//! { +//! "files": { +//! "iiiiiiiiiiiiiiiiiiii": { +//! "complete": 1, +//! "downloaded": 2, +//! "incomplete": 3 +//! } +//! } +//! } +//! ``` +//! +//! Where the `files` key contains a dictionary of dictionaries. The first +//! dictionary key is the `info_hash` of the torrent (`iiiiiiiiiiiiiiiiiiii` in +//! the example). The second level dictionary contains the +//! [swarm metadata](crate::tracker::torrent::SwarmMetadata) for that torrent. +//! +//! If you save the response as a file and you open it with a program that +//! can handle binary data you would see: +//! +//! ```text +//! 00000000: 6435 3a66 696c 6573 6432 303a 6969 6969 d5:filesd20:iiii +//! 00000010: 6969 6969 6969 6969 6969 6969 6969 6969 iiiiiiiiiiiiiiii +//! 00000020: 6438 3a63 6f6d 706c 6574 6569 3165 3130 d8:completei1e10 +//! 00000030: 3a64 6f77 6e6c 6f61 6465 6469 3265 3130 :downloadedi2e10 +//! 00000040: 3a69 6e63 6f6d 706c 6574 6569 3365 6565 :incompletei3eee +//! 00000050: 65 e +//! ``` +//! +//! **Protocol** +//! +//! If you want to know more about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [Vuze scrape docs](https://wiki.vuze.com/w/Scrape) +//! +//! ## Versioning +//! +//! Right not there is only version `v1`. The HTTP tracker implements BEPS: +//! +//! - [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 07. IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! +//! In the future there could be a `v2` that implements new BEPS with breaking +//! changes. +//! +//! ## Links +//! +//! - [Bencode](https://en.wikipedia.org/wiki/Bencode). +//! - [Bencode to Json Online converter](https://chocobo1.github.io/bencode_online). use serde::{Deserialize, Serialize}; pub mod percent_encoding; pub mod server; pub mod v1; +/// The version of the HTTP tracker. #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { + /// The `v1` version of the HTTP tracker. V1, } diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index 019735e0f..b807e74c9 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -1,17 +1,76 @@ +//! This module contains functions for percent decoding infohashes and peer IDs. +//! +//! Percent encoding is an encoding format used to encode arbitrary data in a +//! format that is safe to use in URLs. It is used by the HTTP tracker protocol +//! to encode infohashes and peer ids in the URLs of requests. +//! +//! `BitTorrent` infohashes and peer ids are percent encoded like any other +//! arbitrary URL parameter. But they are encoded from binary data (byte arrays) +//! which may not be valid UTF-8. That makes hard to use the `percent_encoding` +//! crate to decode them because all of them expect a well-formed UTF-8 string. +//! However, percent encoding is not limited to UTF-8 strings. +//! +//! More information about "Percent Encoding" can be found here: +//! +//! - +//! - +//! - use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; +/// Percent decodes a percent encoded infohash. Internally an +/// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) is a 40-byte array. +/// +/// For example, given the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, +/// it's percent encoded representation is `%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0`. +/// +/// ```rust +/// use std::str::FromStr; +/// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; +/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +/// use torrust_tracker::tracker::peer; +/// +/// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; +/// +/// let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); +/// +/// assert_eq!( +/// info_hash, +/// InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() +/// ); +/// ``` +/// /// # Errors /// -/// Will return `Err` if the decoded bytes do not represent a valid `InfoHash`. +/// Will return `Err` if the decoded bytes do not represent a valid +/// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash). pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); InfoHash::try_from(bytes) } +/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](crate::tracker::peer::Id) +/// is a 20-byte array. +/// +/// For example, given the peer id `*b"-qB00000000000000000"`, +/// it's percent encoded representation is `%2DqB00000000000000000`. +/// +/// ```rust +/// use std::str::FromStr; +/// use torrust_tracker::servers::http::percent_encoding::percent_decode_peer_id; +/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +/// use torrust_tracker::tracker::peer; +/// +/// let encoded_peer_id = "%2DqB00000000000000000"; +/// +/// let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); +/// +/// assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); +/// ``` +/// /// # Errors /// -/// Will return `Err` if if the decoded bytes do not represent a valid `peer::Id`. +/// Will return `Err` if if the decoded bytes do not represent a valid [`peer::Id`](crate::tracker::peer::Id). pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); peer::Id::try_from(bytes) diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 510c685d4..3008771ee 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -1,3 +1,4 @@ +//! Module to handle the HTTP server instances. use std::future::Future; use std::net::SocketAddr; use std::sync::Arc; @@ -7,7 +8,10 @@ use futures::future::BoxFuture; use crate::servers::signals::shutdown_signal; use crate::tracker::Tracker; -/// Trait to be implemented by a http server launcher for the tracker. +/// Trait to be implemented by a HTTP server launcher for the tracker. +/// +/// A launcher is responsible for starting the server and returning the +/// `SocketAddr` it is bound to. #[allow(clippy::module_name_repetitions)] pub trait HttpServerLauncher: Sync + Send { fn new() -> Self; @@ -22,26 +26,62 @@ pub trait HttpServerLauncher: Sync + Send { F: Future + Send + 'static; } +/// Error that can occur when starting or stopping the HTTP server. +/// +/// Some errors triggered while starting the server are: +/// +/// - The spawned server cannot send its `SocketAddr` back to the main thread. +/// - The launcher cannot receive the `SocketAddr` from the spawned server. +/// +/// Some errors triggered while stopping the server are: +/// +/// - The channel to send the shutdown signal to the server is closed. +/// - The task to shutdown the server on the spawned server failed to execute to +/// completion. #[derive(Debug)] pub enum Error { - Error(String), + /// Any kind of error starting or stopping the server. + Error(String), // todo: refactor to use thiserror and add more variants for specific errors. } +/// A stopped HTTP server. #[allow(clippy::module_name_repetitions)] pub type StoppedHttpServer = HttpServer>; + +/// A running HTTP server. #[allow(clippy::module_name_repetitions)] pub type RunningHttpServer = HttpServer>; +/// A HTTP running server controller. +/// +/// It's responsible for: +/// +/// - Keeping the initial configuration of the server. +/// - Starting and stopping the server. +/// - Keeping the state of the server: `running` or `stopped`. +/// +/// It's an state machine. Configurations cannot be changed. This struct +/// represents concrete configuration and state. It allows to start and stop the +/// server but always keeping the same configuration. +/// +/// > **NOTICE**: if the configurations changes after running the server it will +/// reset to the initial value after stopping the server. This struct is not +/// intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] pub struct HttpServer { + /// The configuration of the server that will be used every time the server + /// is started. pub cfg: torrust_tracker_configuration::HttpTracker, + /// The state of the server: `running` or `stopped`. pub state: S, } +/// A stopped HTTP server state. pub struct Stopped { launcher: I, } +/// A running HTTP server state. pub struct Running { pub bind_addr: SocketAddr, task_killer: tokio::sync::oneshot::Sender, @@ -56,6 +96,9 @@ impl HttpServer> { } } + /// It starts the server and returns a `HttpServer` controller in `running` + /// state. + /// /// # Errors /// /// It would return an error if no `SocketAddr` is returned after launching the server. @@ -93,6 +136,9 @@ impl HttpServer> { } impl HttpServer> { + /// It stops the server and returns a `HttpServer` controller in `stopped` + /// state. + /// /// # Errors /// /// It would return an error if the channel for the task killer signal was closed. diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index 501181c8c..5d947ef91 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -1,3 +1,32 @@ +//! Axum [`extractor`](axum::extract) for the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! request. +//! +//! It parses the query parameters returning an [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! request. +//! +//! Refer to [`Announce`](crate::servers::http::v1::requests::announce) for more +//! information about the returned structure. +//! +//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! response (`500`) if the query parameters are missing or invalid. +//! +//! **Sample announce request** +//! +//! +//! +//! **Sample error response** +//! +//! Missing query params for `announce` request: +//! +//! ```text +//! d14:failure reason149:Cannot parse query params for announce request: missing query params for announce request in src/servers/http/v1/extractors/announce_request.rs:54:23e +//! ``` +//! +//! Invalid query param (`info_hash`): +//! +//! ```text +//! d14:failure reason240:Cannot parse query params for announce request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/announce.rs:182:42e +//! ``` use std::panic::Location; use axum::async_trait; @@ -9,6 +38,8 @@ use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; use crate::servers::http::v1::responses; +/// Extractor for the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +/// request. pub struct ExtractRequest(pub Announce); #[async_trait] diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index 71e9b9d25..20dc1c90b 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -1,4 +1,47 @@ -//! Wrapper for Axum `Path` extractor to return custom errors. +//! Axum [`extractor`](axum::extract) to extract the authentication [`Key`](crate::tracker::auth::Key) +//! from the URL path. +//! +//! It's only used when the tracker is running in private mode. +//! +//! Given the following URL route with a path param: `/announce/:key`, +//! it extracts the `key` param from the URL path. +//! +//! It's a wrapper for Axum `Path` extractor in order to return custom +//! authentication errors. +//! +//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! response (`500`) if the `key` parameter are missing or invalid. +//! +//! **Sample authentication error responses** +//! +//! When the key param is **missing**: +//! +//! ```text +//! d14:failure reason131:Authentication error: Missing authentication key param for private tracker. Error in src/servers/http/v1/handlers/announce.rs:79:31e +//! ``` +//! +//! When the key param has an **invalid format**: +//! +//! ```text +//! d14:failure reason134:Authentication error: Invalid format for authentication key param. Error in src/servers/http/v1/extractors/authentication_key.rs:73:23e +//! ``` +//! +//! When the key is **not found** in the database: +//! +//! ```text +//! d14:failure reason101:Authentication error: Failed to read key: YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ, src/tracker/mod.rs:848:27e +//! ``` +//! +//! When the key is found in the database but it's **expired**: +//! +//! ```text +//! d14:failure reason64:Authentication error: Key has expired, src/tracker/auth.rs:88:23e +//! ``` +//! +//! > **NOTICE**: the returned HTTP status code is always `200` for authentication errors. +//! Neither [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! nor [The Private Torrents](https://www.bittorrent.org/beps/bep_0027.html) +//! specifications specify any HTTP status code for authentication errors. use std::panic::Location; use axum::async_trait; @@ -12,6 +55,7 @@ use crate::servers::http::v1::handlers::common::auth; use crate::servers::http::v1::responses; use crate::tracker::auth::Key; +/// Extractor for the [`Key`](crate::tracker::auth::Key) struct. pub struct Extract(pub Key); #[derive(Deserialize)] diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs index b291eba12..f04300402 100644 --- a/src/servers/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -1,5 +1,40 @@ -//! Wrapper for two Axum extractors to get the relevant information -//! to resolve the remote client IP. +//! Axum [`extractor`](axum::extract) to get the relevant information to resolve the remote +//! client IP. +//! +//! It's a wrapper for two third-party Axum extractors. +//! +//! The first one is `RightmostXForwardedFor` from the `axum-client-ip` crate. +//! This extractor is used to get the right-most IP address from the +//! `X-Forwarded-For` header. +//! +//! The second one is `ConnectInfo` from the `axum` crate. This extractor is +//! used to get the IP address of the client from the connection info. +//! +//! The `ClientIpSources` struct is a wrapper for the two extractors. +//! +//! The tracker can be configured to run behind a reverse proxy. In this case, +//! the tracker will use the `X-Forwarded-For` header to get the client IP +//! address. +//! +//! See [`torrust_tracker_configuration::Configuration::on_reverse_proxy`]. +//! +//! The tracker can also be configured to run without a reverse proxy. In this +//! case, the tracker will use the IP address from the connection info. +//! +//! Given the following scenario: +//! +//! ```text +//! client <-> http proxy 1 <-> http proxy 2 <-> server +//! ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 +//! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +//! ``` +//! +//! This extractor returns these values: +//! +//! ```text +//! `right_most_x_forwarded_for` = 126.0.0.2 +//! `connection_info_ip` = 126.0.0.3 +//! ``` use std::net::SocketAddr; use axum::async_trait; @@ -10,6 +45,8 @@ use axum_client_ip::RightmostXForwardedFor; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; +/// Extractor for the [`ClientIpSources`](crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources) +/// struct. pub struct Extract(pub ClientIpSources); #[async_trait] diff --git a/src/servers/http/v1/extractors/mod.rs b/src/servers/http/v1/extractors/mod.rs index 557330257..beab3f2b8 100644 --- a/src/servers/http/v1/extractors/mod.rs +++ b/src/servers/http/v1/extractors/mod.rs @@ -1,3 +1,7 @@ +//! Axum [`extractors`](axum::extract) for the HTTP server. +//! +//! This module contains the extractors used by the HTTP server to parse the +//! incoming requests. pub mod announce_request; pub mod authentication_key; pub mod client_ip_sources; diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs index ee2502066..63c4dba69 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -1,3 +1,32 @@ +//! Axum [`extractor`](axum::extract) for the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! request. +//! +//! It parses the query parameters returning an [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! request. +//! +//! Refer to [`Scrape`](crate::servers::http::v1::requests::scrape) for more +//! information about the returned structure. +//! +//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! response (`500`) if the query parameters are missing or invalid. +//! +//! **Sample scrape request** +//! +//! +//! +//! **Sample error response** +//! +//! Missing query params for scrape request: +//! +//! ```text +//! d14:failure reason143:Cannot parse query params for scrape request: missing query params for scrape request in src/servers/http/v1/extractors/scrape_request.rs:52:23e +//! ``` +//! +//! Invalid query params for scrape request: +//! +//! ```text +//! d14:failure reason235:Cannot parse query params for scrape request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/scrape.rs:66:46e +//! ``` use std::panic::Location; use axum::async_trait; @@ -9,6 +38,8 @@ use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; use crate::servers::http::v1::responses; +/// Extractor for the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +/// request. pub struct ExtractRequest(pub Scrape); #[async_trait] diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index db41388ab..5b26b3758 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -1,3 +1,10 @@ +//! Axum [`handlers`](axum#handlers) for the `announce` requests. +//! +//! Refer to [HTTP server](crate::servers::http) for more information about the +//! `announce` request. +//! +//! The handlers perform the authentication and authorization of the request, +//! and resolve the client IP address. use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; @@ -20,6 +27,8 @@ use crate::tracker::auth::Key; use crate::tracker::peer::Peer; use crate::tracker::{AnnounceData, Tracker}; +/// It handles the `announce` request when the HTTP tracker does not require +/// authentication (no PATH `key` parameter required). #[allow(clippy::unused_async)] pub async fn handle_without_key( State(tracker): State>, @@ -31,6 +40,8 @@ pub async fn handle_without_key( handle(&tracker, &announce_request, &client_ip_sources, None).await } +/// It handles the `announce` request when the HTTP tracker requires +/// authentication (PATH `key` parameter required). #[allow(clippy::unused_async)] pub async fn handle_with_key( State(tracker): State>, @@ -43,6 +54,10 @@ pub async fn handle_with_key( handle(&tracker, &announce_request, &client_ip_sources, Some(key)).await } +/// It handles the `announce` request. +/// +/// Internal implementation that handles both the `authenticated` and +/// `unauthenticated` modes. async fn handle( tracker: &Arc, announce_request: &Announce, @@ -59,6 +74,7 @@ async fn handle( /* code-review: authentication, authorization and peer IP resolution could be moved from the handler (Axum) layer into the app layer `services::announce::invoke`. That would make the handler even simpler and the code more reusable and decoupled from Axum. + See https://github.com/torrust/torrust-tracker/discussions/240. */ async fn handle_announce( @@ -111,6 +127,8 @@ fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> R } } +/// It builds a `Peer` from the announce request. +/// /// It ignores the peer address in the announce request params. #[must_use] fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs index 644556e95..f41635d69 100644 --- a/src/servers/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -1,3 +1,6 @@ +//! HTTP server authentication error and conversion to +//! [`responses::error::Error`](crate::servers::http::v1::responses::error::Error) +//! response. use std::panic::Location; use thiserror::Error; @@ -5,6 +8,11 @@ use thiserror::Error; use crate::servers::http::v1::responses; use crate::tracker::auth; +/// Authentication error. +/// +/// When the tracker is private, the authentication key is required in the URL +/// path. These are the possible errors that can occur when extracting the key +/// from the URL path. #[derive(Debug, Error)] pub enum Error { #[error("Missing authentication key param for private tracker. Error in {location}")] diff --git a/src/servers/http/v1/handlers/common/mod.rs b/src/servers/http/v1/handlers/common/mod.rs index dc028cabf..30eaf37b7 100644 --- a/src/servers/http/v1/handlers/common/mod.rs +++ b/src/servers/http/v1/handlers/common/mod.rs @@ -1,2 +1,3 @@ +//! Common logic for HTTP handlers. pub mod auth; pub mod peer_ip; diff --git a/src/servers/http/v1/handlers/common/peer_ip.rs b/src/servers/http/v1/handlers/common/peer_ip.rs index 685324b4a..d65efbc79 100644 --- a/src/servers/http/v1/handlers/common/peer_ip.rs +++ b/src/servers/http/v1/handlers/common/peer_ip.rs @@ -1,3 +1,9 @@ +//! Logic to convert peer IP resolution errors into responses. +//! +//! The HTTP tracker may fail to resolve the peer IP address. This module +//! contains the logic to convert those +//! [`PeerIpResolutionError`](crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError) +//! errors into responses. use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; diff --git a/src/servers/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs index 69b69127e..d78dee7d5 100644 --- a/src/servers/http/v1/handlers/mod.rs +++ b/src/servers/http/v1/handlers/mod.rs @@ -1,3 +1,7 @@ +//! Axum [`handlers`](axum#handlers) for the HTTP server. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the HTTP tracker. use super::responses; use crate::tracker::error::Error; diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index f55194810..b8c1cbea1 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -1,3 +1,10 @@ +//! Axum [`handlers`](axum#handlers) for the `announce` requests. +//! +//! Refer to [HTTP server](crate::servers::http) for more information about the +//! `scrape` request. +//! +//! The handlers perform the authentication and authorization of the request, +//! and resolve the client IP address. use std::sync::Arc; use axum::extract::State; @@ -13,6 +20,8 @@ use crate::servers::http::v1::{responses, services}; use crate::tracker::auth::Key; use crate::tracker::{ScrapeData, Tracker}; +/// It handles the `scrape` request when the HTTP tracker is configured +/// to run in `public` mode. #[allow(clippy::unused_async)] pub async fn handle_without_key( State(tracker): State>, @@ -24,6 +33,10 @@ pub async fn handle_without_key( handle(&tracker, &scrape_request, &client_ip_sources, None).await } +/// It handles the `scrape` request when the HTTP tracker is configured +/// to run in `private` or `private_listed` mode. +/// +/// In this case, the authentication `key` parameter is required. #[allow(clippy::unused_async)] pub async fn handle_with_key( State(tracker): State>, @@ -52,6 +65,7 @@ async fn handle( /* code-review: authentication, authorization and peer IP resolution could be moved from the handler (Axum) layer into the app layer `services::announce::invoke`. That would make the handler even simpler and the code more reusable and decoupled from Axum. + See https://github.com/torrust/torrust-tracker/discussions/240. */ async fn handle_scrape( diff --git a/src/servers/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs index 4cfa4295d..96dd1baac 100644 --- a/src/servers/http/v1/launcher.rs +++ b/src/servers/http/v1/launcher.rs @@ -1,3 +1,4 @@ +//! Logic to start new HTTP server instances. use std::future::Future; use std::net::SocketAddr; use std::str::FromStr; diff --git a/src/servers/http/v1/mod.rs b/src/servers/http/v1/mod.rs index 79d230255..464a7ee14 100644 --- a/src/servers/http/v1/mod.rs +++ b/src/servers/http/v1/mod.rs @@ -1,3 +1,7 @@ +//! HTTP server implementation for the `v1` API. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the endpoints and their usage. pub mod extractors; pub mod handlers; pub mod launcher; diff --git a/src/servers/http/v1/query.rs b/src/servers/http/v1/query.rs index c40e7949f..6bbdc63e9 100644 --- a/src/servers/http/v1/query.rs +++ b/src/servers/http/v1/query.rs @@ -1,3 +1,8 @@ +//! The `Query` struct used to parse and store the URL query parameters. +//! +/// ```text +/// URI = scheme ":" ["//" authority] path ["?" query] ["#" fragment] +/// ``` use std::panic::Location; use std::str::FromStr; @@ -7,7 +12,7 @@ use thiserror::Error; type ParamName = String; type ParamValue = String; -/// Represent a URL query component: +/// It represents a URL query component. /// /// ```text /// URI = scheme ":" ["//" authority] path ["?" query] ["#" fragment] @@ -22,19 +27,60 @@ pub struct Query { } impl Query { - /// Returns only the first param value even if it has multiple values like this: + /// It return `Some(value)` for a URL query param if the param with the + /// input `name` exists. For example: + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::query::Query; + /// + /// let raw_query = "param1=value1¶m2=value2"; /// - /// ```text - /// param1=value1¶m1=value2 + /// let query = raw_query.parse::().unwrap(); + /// + /// assert_eq!(query.get_param("param1").unwrap(), "value1"); + /// assert_eq!(query.get_param("param2").unwrap(), "value2"); /// ``` /// - /// In that case `get_param("param1")` will return `value1`. + /// It returns only the first param value even if it has multiple values: + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::query::Query; + /// + /// let raw_query = "param1=value1¶m1=value2"; + /// + /// let query = raw_query.parse::().unwrap(); + /// + /// assert_eq!(query.get_param("param1").unwrap(), "value1"); + /// ``` #[must_use] pub fn get_param(&self, name: &str) -> Option { self.params.get(name).map(|pair| pair.value.clone()) } + /// Returns all the param values as a vector. + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::query::Query; + /// + /// let query = "param1=value1¶m1=value2".parse::().unwrap(); + /// + /// assert_eq!( + /// query.get_param_vec("param1"), + /// Some(vec!["value1".to_string(), "value2".to_string()]) + /// ); + /// ``` + /// /// Returns all the param values as a vector even if it has only one value. + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::query::Query; + /// + /// let query = "param1=value1".parse::().unwrap(); + /// + /// assert_eq!( + /// query.get_param_vec("param1"), Some(vec!["value1".to_string()]) + /// ); + /// ``` #[must_use] pub fn get_param_vec(&self, name: &str) -> Option> { self.params.get_vec(name).map(|pairs| { @@ -47,8 +93,12 @@ impl Query { } } +/// This error can be returned when parsing a [`Query`](crate::servers::http::v1::query::Query) +/// from a string. #[derive(Error, Debug)] pub enum ParseQueryError { + /// Invalid URL query param. For example: `"name=value=value"`. It contains + /// an unescaped `=` character. #[error("invalid param {raw_param} in {location}")] InvalidParam { location: &'static Location<'static>, diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 7ab260d99..3725ee1df 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -1,3 +1,6 @@ +//! `Announce` request for the HTTP tracker. +//! +//! Data structures and logic for parsing the `announce` request. use std::fmt; use std::panic::Location; use std::str::FromStr; @@ -11,6 +14,8 @@ use crate::servers::http::v1::responses; use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; +/// The number of bytes `downloaded`, `uploaded` or `left`. It's used in the +/// `Announce` request for parameters that represent a number of bytes. pub type NumberOfBytes = i64; // Query param names @@ -23,22 +28,71 @@ const LEFT: &str = "left"; const EVENT: &str = "event"; const COMPACT: &str = "compact"; +/// The `Announce` request. Fields use the domain types after parsing the +/// query params of the request. +/// +/// ```rust +/// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; +/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +/// use torrust_tracker::tracker::peer; +/// +/// let request = Announce { +/// // Mandatory params +/// info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), +/// peer_id: "-qB00000000000000001".parse::().unwrap(), +/// port: 17548, +/// // Optional params +/// downloaded: Some(1), +/// uploaded: Some(2), +/// left: Some(3), +/// event: Some(Event::Started), +/// compact: Some(Compact::NotAccepted) +/// }; +/// ``` +/// +/// > **NOTICE**: The [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +/// specifies that only the peer `IP` and `event`are optional. However, the +/// tracker defines default values for some of the mandatory params. +/// +/// > **NOTICE**: The struct does not contain the `IP` of the peer. It's not +/// mandatory and it's not used by the tracker. The `IP` is obtained from the +/// request itself. #[derive(Debug, PartialEq)] pub struct Announce { // Mandatory params + /// The `InfoHash` of the torrent. pub info_hash: InfoHash, + /// The `peer::Id` of the peer. pub peer_id: peer::Id, + /// The port of the peer. pub port: u16, + // Optional params + /// The number of bytes downloaded by the peer. pub downloaded: Option, + + /// The number of bytes uploaded by the peer. pub uploaded: Option, + + /// The number of bytes left to download by the peer. pub left: Option, + + /// The event that the peer is reporting. It can be `Started`, `Stopped` or + /// `Completed`. pub event: Option, + + /// Whether the response should be in compact mode or not. pub compact: Option, } +/// Errors that can occur when parsing the `Announce` request. +/// +/// The `info_hash` and `peer_id` query params are special because they contain +/// binary data. The `info_hash` is a 40-byte SHA1 hash and the `peer_id` is a +/// 20-byte array. #[derive(Error, Debug)] pub enum ParseAnnounceQueryError { + /// A mandatory param is missing. #[error("missing query params for announce request in {location}")] MissingParams { location: &'static Location<'static> }, #[error("missing param {param_name} in {location}")] @@ -46,24 +100,28 @@ pub enum ParseAnnounceQueryError { location: &'static Location<'static>, param_name: String, }, + /// The param cannot be parsed into the domain type. #[error("invalid param value {param_value} for {param_name} in {location}")] InvalidParam { param_name: String, param_value: String, location: &'static Location<'static>, }, + /// The param value is out of range. #[error("param value overflow {param_value} for {param_name} in {location}")] NumberOfBytesOverflow { param_name: String, param_value: String, location: &'static Location<'static>, }, + /// The `info_hash` is invalid. #[error("invalid param value {param_value} for {param_name} in {source}")] InvalidInfoHashParam { param_name: String, param_value: String, source: LocatedError<'static, ConversionError>, }, + /// The `peer_id` is invalid. #[error("invalid param value {param_value} for {param_name} in {source}")] InvalidPeerIdParam { param_name: String, @@ -72,10 +130,21 @@ pub enum ParseAnnounceQueryError { }, } +/// The event that the peer is reporting: `started`, `completed` or `stopped`. +/// +/// If the event is not present or empty that means that the peer is just +/// updating its status. It's one of the announcements done at regular intervals. +/// +/// Refer to [BEP 03. The `BitTorrent Protocol` Specification](https://www.bittorrent.org/beps/bep_0003.html) +/// for more information. #[derive(PartialEq, Debug)] pub enum Event { + /// Event sent when a download first begins. Started, + /// Event sent when the downloader cease downloading. Stopped, + /// Event sent when the download is complete. + /// No `completed` is sent if the file was complete when started Completed, } @@ -106,9 +175,21 @@ impl fmt::Display for Event { } } +/// Whether the `announce` response should be in compact mode or not. +/// +/// Depending on the value of this param, the tracker will return a different +/// response: +/// +/// - [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) response. +/// - [`Compact`](crate::servers::http::v1::responses::announce::Compact) response. +/// +/// Refer to [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) #[derive(PartialEq, Debug)] pub enum Compact { + /// The client advises the tracker that the client prefers compact format. Accepted = 1, + /// The client advises the tracker that is prefers the original format + /// described in [BEP 03. The BitTorrent Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) NotAccepted = 0, } diff --git a/src/servers/http/v1/requests/mod.rs b/src/servers/http/v1/requests/mod.rs index 776d2dfbf..ee34ca72a 100644 --- a/src/servers/http/v1/requests/mod.rs +++ b/src/servers/http/v1/requests/mod.rs @@ -1,2 +1,6 @@ +//! HTTP requests for the HTTP tracker. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the HTTP tracker. pub mod announce; pub mod scrape; diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index a7ec962e2..227ea74ae 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -1,3 +1,6 @@ +//! `Scrape` request for the HTTP tracker. +//! +//! Data structures and logic for parsing the `scrape` request. use std::panic::Location; use thiserror::Error; diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 4902e0d62..8fbe5df35 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -1,3 +1,6 @@ +//! `Announce` response for the HTTP tracker [`announce`](crate::servers::http::v1::requests::announce::Announce) request. +//! +//! Data structures and logic to build the `announce` response. use std::io::Write; use std::net::IpAddr; use std::panic::Location; @@ -11,25 +14,103 @@ use thiserror::Error; use crate::servers::http::v1::responses; use crate::tracker::{self, AnnounceData}; -/// Normal (non compact) "announce" response +/// Normal (non compact) `announce` response. /// -/// BEP 03: The ``BitTorrent`` Protocol Specification -/// +/// It's a bencoded dictionary. /// +/// ```rust +/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{NonCompact, Peer}; +/// +/// let response = NonCompact { +/// interval: 111, +/// interval_min: 222, +/// complete: 333, +/// incomplete: 444, +/// peers: vec![ +/// // IPV4 +/// Peer { +/// peer_id: *b"-qB00000000000000001", +/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 +/// port: 0x7070, // 28784 +/// }, +/// // IPV6 +/// Peer { +/// peer_id: *b"-qB00000000000000002", +/// ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), +/// port: 0x7070, // 28784 +/// }, +/// ], +/// }; +/// +/// let bytes = response.body(); +/// +/// // The expected bencoded response. +/// let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; +/// +/// assert_eq!( +/// String::from_utf8(bytes).unwrap(), +/// String::from_utf8(expected_bytes.to_vec()).unwrap() +/// ); +/// ``` +/// +/// Refer to [BEP 03: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +/// for more information. #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct NonCompact { + /// Interval in seconds that the client should wait between sending regular + /// announce requests to the tracker. + /// + /// It's a **recommended** wait time between announcements. + /// + /// This is the standard amount of time that clients should wait between + /// sending consecutive announcements to the tracker. This value is set by + /// the tracker and is typically provided in the tracker's response to a + /// client's initial request. It serves as a guideline for clients to know + /// how often they should contact the tracker for updates on the peer list, + /// while ensuring that the tracker is not overwhelmed with requests. pub interval: u32, + /// Minimum announce interval. Clients must not reannounce more frequently + /// than this. + /// + /// It establishes the shortest allowed wait time. + /// + /// This is an optional parameter in the protocol that the tracker may + /// provide in its response. It sets a lower limit on the frequency at which + /// clients are allowed to send announcements. Clients should respect this + /// value to prevent sending too many requests in a short period, which + /// could lead to excessive load on the tracker or even getting banned by + /// the tracker for not adhering to the rules. #[serde(rename = "min interval")] pub interval_min: u32, + /// Number of peers with the entire file, i.e. seeders. pub complete: u32, + /// Number of non-seeder peers, aka "leechers". pub incomplete: u32, + /// A list of peers. The value is a list of dictionaries. pub peers: Vec, } +/// Peer information in the [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) +/// response. +/// +/// ```rust +/// use std::net::{IpAddr, Ipv4Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{NonCompact, Peer}; +/// +/// let peer = Peer { +/// peer_id: *b"-qB00000000000000001", +/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 +/// port: 0x7070, // 28784 +/// }; +/// ``` #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Peer { + /// The peer's ID. pub peer_id: [u8; 20], + /// The peer's IP address. pub ip: IpAddr, + /// The peer's port number. pub port: u16, } @@ -55,6 +136,8 @@ impl From for Peer { } impl NonCompact { + /// Returns the bencoded body of the non-compact response. + /// /// # Panics /// /// Will return an error if it can't access the bencode as a mutable `BListAccess`. @@ -97,31 +180,120 @@ impl From for NonCompact { } } -/// Compact "announce" response +/// Compact `announce` response. +/// +/// _"To reduce the size of tracker responses and to reduce memory and +/// computational requirements in trackers, trackers may return peers as a +/// packed string rather than as a bencoded list."_ +/// +/// ```rust +/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer}; /// -/// BEP 23: Tracker Returns Compact Peer Lists -/// +/// let response = Compact { +/// interval: 111, +/// interval_min: 222, +/// complete: 333, +/// incomplete: 444, +/// peers: vec![ +/// // IPV4 +/// CompactPeer { +/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 +/// port: 0x7070, // 28784 +/// }, +/// // IPV6 +/// CompactPeer { +/// ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), +/// port: 0x7070, // 28784 +/// }, +/// ], +/// }; /// -/// BEP 07: IPv6 Tracker Extension -/// +/// let bytes = response.body().unwrap(); /// +/// // The expected bencoded response. +/// let expected_bytes = +/// // cspell:disable-next-line +/// b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe"; +/// +/// assert_eq!( +/// String::from_utf8(bytes).unwrap(), +/// String::from_utf8(expected_bytes.to_vec()).unwrap() +/// ); +/// ``` +/// +/// Refer to the official BEPs for more information: +/// +/// - [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +/// - [BEP 07: IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Compact { + /// Interval in seconds that the client should wait between sending regular + /// announce requests to the tracker. + /// + /// It's a **recommended** wait time between announcements. + /// + /// This is the standard amount of time that clients should wait between + /// sending consecutive announcements to the tracker. This value is set by + /// the tracker and is typically provided in the tracker's response to a + /// client's initial request. It serves as a guideline for clients to know + /// how often they should contact the tracker for updates on the peer list, + /// while ensuring that the tracker is not overwhelmed with requests. pub interval: u32, + /// Minimum announce interval. Clients must not reannounce more frequently + /// than this. + /// + /// It establishes the shortest allowed wait time. + /// + /// This is an optional parameter in the protocol that the tracker may + /// provide in its response. It sets a lower limit on the frequency at which + /// clients are allowed to send announcements. Clients should respect this + /// value to prevent sending too many requests in a short period, which + /// could lead to excessive load on the tracker or even getting banned by + /// the tracker for not adhering to the rules. #[serde(rename = "min interval")] pub interval_min: u32, + /// Number of seeders, aka "completed". pub complete: u32, + /// Number of non-seeder peers, aka "incomplete". pub incomplete: u32, + /// Compact peer list. pub peers: Vec, } +/// Compact peer. It's used in the [`Compact`](crate::servers::http::v1::responses::announce::Compact) +/// response. +/// +/// _"To reduce the size of tracker responses and to reduce memory and +/// computational requirements in trackers, trackers may return peers as a +/// packed string rather than as a bencoded list."_ +/// +/// A part from reducing the size of the response, this format does not contain +/// the peer's ID. +/// +/// ```rust +/// use std::net::{IpAddr, Ipv4Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::CompactPeer; +/// +/// let compact_peer = CompactPeer { +/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 +/// port: 0x7070 // 28784 +/// }; +/// ``` +/// +/// Refer to [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +/// for more information. #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct CompactPeer { + /// The peer's IP address. pub ip: IpAddr, + /// The peer's port number. pub port: u16, } impl CompactPeer { + /// Returns the compact peer as a byte vector. + /// /// # Errors /// /// Will return `Err` if internally interrupted. @@ -150,6 +322,8 @@ impl From for CompactPeer { } impl Compact { + /// Returns the bencoded compact response as a byte vector. + /// /// # Errors /// /// Will return `Err` if internally interrupted. @@ -196,6 +370,7 @@ impl Compact { } } +/// `Compact` response serialization error. #[derive(Error, Debug)] pub enum CompactSerializationError { #[error("cannot write bytes: {inner_error} in {location}")] diff --git a/src/servers/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs index 0bcdbd9fb..606ead3b2 100644 --- a/src/servers/http/v1/responses/error.rs +++ b/src/servers/http/v1/responses/error.rs @@ -1,17 +1,46 @@ +//! `Error` response for the [`HTTP tracker`](crate::servers::http). +//! +//! Data structures and logic to build the error responses. +//! +//! From the [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html): +//! +//! _"Tracker responses are bencoded dictionaries. If a tracker response has a +//! key failure reason, then that maps to a human readable string which explains +//! why the query failed, and no other keys are required."_ +//! +//! > **NOTICE**: error responses are bencoded and always have a `200 OK` status +//! code. The official `BitTorrent` specification does not specify the status +//! code. use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use serde::{self, Serialize}; +/// `Error` response for the [`HTTP tracker`](crate::servers::http). #[derive(Serialize, Debug, PartialEq)] pub struct Error { + /// Human readable string which explains why the request failed. #[serde(rename = "failure reason")] pub failure_reason: String, } impl Error { + /// Returns the bencoded representation of the `Error` struct. + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::responses::error::Error; + /// + /// let err = Error { + /// failure_reason: "error message".to_owned(), + /// }; + /// + /// // cspell:disable-next-line + /// assert_eq!(err.write(), "d14:failure reason13:error messagee"); + /// ``` + /// /// # Panics /// - /// It would panic if the `Error` struct contained an inappropriate type. + /// It would panic if the `Error` struct contained an inappropriate field + /// type. #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() diff --git a/src/servers/http/v1/responses/mod.rs b/src/servers/http/v1/responses/mod.rs index bdc689056..3c6632fed 100644 --- a/src/servers/http/v1/responses/mod.rs +++ b/src/servers/http/v1/responses/mod.rs @@ -1,3 +1,7 @@ +//! HTTP responses for the HTTP tracker. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the HTTP tracker. pub mod announce; pub mod error; pub mod scrape; diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index 36e4f3282..6610f9dc4 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -1,3 +1,6 @@ +//! `Scrape` response for the HTTP tracker [`scrape`](crate::servers::http::v1::requests::scrape::Scrape) request. +//! +//! Data structures and logic to build the `scrape` response. use std::borrow::Cow; use axum::http::StatusCode; @@ -6,12 +9,46 @@ use bip_bencode::{ben_int, ben_map, BMutAccess}; use crate::tracker::ScrapeData; +/// The `Scrape` response for the HTTP tracker. +/// +/// ```rust +/// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; +/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +/// use torrust_tracker::tracker::torrent::SwarmMetadata; +/// use torrust_tracker::tracker::ScrapeData; +/// +/// let info_hash = InfoHash([0x69; 20]); +/// let mut scrape_data = ScrapeData::empty(); +/// scrape_data.add_file( +/// &info_hash, +/// SwarmMetadata { +/// complete: 1, +/// downloaded: 2, +/// incomplete: 3, +/// }, +/// ); +/// +/// let response = Bencoded::from(scrape_data); +/// +/// let bytes = response.body(); +/// +/// // cspell:disable-next-line +/// let expected_bytes = b"d5:filesd20:iiiiiiiiiiiiiiiiiiiid8:completei1e10:downloadedi2e10:incompletei3eeee"; +/// +/// assert_eq!( +/// String::from_utf8(bytes).unwrap(), +/// String::from_utf8(expected_bytes.to_vec()).unwrap() +/// ); +/// ``` #[derive(Debug, PartialEq, Default)] pub struct Bencoded { + /// The scrape data to be bencoded. scrape_data: ScrapeData, } impl Bencoded { + /// Returns the bencoded representation of the `Scrape` struct. + /// /// # Panics /// /// Will return an error if it can't access the bencode as a mutable `BDictAccess`. diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index a8e740f69..86bdf480f 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -1,3 +1,4 @@ +//! HTTP server routes for version `v1`. use std::sync::Arc; use axum::routing::get; @@ -7,6 +8,10 @@ use axum_client_ip::SecureClientIpSource; use super::handlers::{announce, scrape}; use crate::tracker::Tracker; +/// It adds the routes to the router. +/// +/// > **NOTICE**: it's added a layer to get the client IP from the connection +/// info. The tracker could use the connection info to get the client IP. #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { Router::new() diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 116dc1e95..4c1b262ba 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -1,3 +1,13 @@ +//! The `announce` service. +//! +//! The service is responsible for handling the `announce` requests. +//! +//! It delegates the `announce` logic to the [`Tracker`](crate::tracker::Tracker::announce) +//! and it returns the [`AnnounceData`](crate::tracker::AnnounceData) returned +//! by the [`Tracker`](crate::tracker::Tracker). +//! +//! It also sends an [`statistics::Event`](crate::tracker::statistics::Event) +//! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; @@ -5,6 +15,16 @@ use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::peer::Peer; use crate::tracker::{statistics, AnnounceData, Tracker}; +/// The HTTP tracker `announce` service. +/// +/// The service sends an statistics event that increments: +/// +/// - The number of TCP connections handled by the HTTP tracker. +/// - The number of TCP `announce` requests handled by the HTTP tracker. +/// +/// > **NOTICE**: as the HTTP tracker does not requires a connection request +/// like the UDP tracker, the number of TCP connections is incremented for +/// each `announce` request. pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); diff --git a/src/servers/http/v1/services/mod.rs b/src/servers/http/v1/services/mod.rs index 5d1acd67d..2e6285d1a 100644 --- a/src/servers/http/v1/services/mod.rs +++ b/src/servers/http/v1/services/mod.rs @@ -1,3 +1,10 @@ +//! Application services for the HTTP tracker. +//! +//! These modules contain logic that is specific for the HTTP tracker but it +//! does depend on the Axum web server. It could be reused for other web +//! servers. +//! +//! Refer to [`torrust_tracker`](crate) documentation. pub mod announce; pub mod peer_ip_resolver; pub mod scrape; diff --git a/src/servers/http/v1/services/peer_ip_resolver.rs b/src/servers/http/v1/services/peer_ip_resolver.rs index ac5b8c79f..b8987bb4d 100644 --- a/src/servers/http/v1/services/peer_ip_resolver.rs +++ b/src/servers/http/v1/services/peer_ip_resolver.rs @@ -1,13 +1,23 @@ +//! This service resolves the peer IP from the request. +//! +//! The peer IP is used to identify the peer in the tracker. It's the peer IP +//! that is used in the `announce` responses (peer list). And it's also used to +//! send statistics events. +//! //! Given this request chain: //! +//! ```text //! client <-> http proxy 1 <-> http proxy 2 <-> server //! ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 //! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +//! ``` //! -//! This service resolves the peer IP from these values: +//! This service returns two options for the peer IP: //! -//! `right_most_x_forwarded_for` = 126.0.0.2 -//! `connection_info_ip` = 126.0.0.3 +//! ```text +//! right_most_x_forwarded_for = 126.0.0.2 +//! connection_info_ip = 126.0.0.3 +//! ``` //! //! Depending on the tracker configuration. use std::net::IpAddr; @@ -16,22 +26,81 @@ use std::panic::Location; use serde::{Deserialize, Serialize}; use thiserror::Error; +/// This struct contains the sources from which the peer IP can be obtained. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct ClientIpSources { + /// The right most IP from the `X-Forwarded-For` HTTP header. pub right_most_x_forwarded_for: Option, + /// The IP from the connection info. pub connection_info_ip: Option, } +/// The error that can occur when resolving the peer IP. #[derive(Error, Debug)] pub enum PeerIpResolutionError { + /// The peer IP cannot be obtained because the tracker is configured as a + /// reverse proxy but the `X-Forwarded-For` HTTP header is missing or + /// invalid. #[error( "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" )] MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + /// The peer IP cannot be obtained because the tracker is not configured as + /// a reverse proxy but the connection info was not provided to the Axum + /// framework via a route extension. #[error("cannot get the client IP from the connection info in {location}")] MissingClientIp { location: &'static Location<'static> }, } +/// Resolves the peer IP from the request. +/// +/// Given the sources from which the peer IP can be obtained, this function +/// resolves the peer IP according to the tracker configuration. +/// +/// With the tracker running on reverse proxy mode: +/// +/// ```rust +/// use std::net::IpAddr; +/// use std::str::FromStr; +/// +/// use torrust_tracker::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// +/// let on_reverse_proxy = true; +/// +/// let ip = invoke( +/// on_reverse_proxy, +/// &ClientIpSources { +/// right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), +/// connection_info_ip: None, +/// }, +/// ) +/// .unwrap(); +/// +/// assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); +/// ``` +/// +/// With the tracker non running on reverse proxy mode: +/// +/// ```rust +/// use std::net::IpAddr; +/// use std::str::FromStr; +/// +/// use torrust_tracker::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// +/// let on_reverse_proxy = false; +/// +/// let ip = invoke( +/// on_reverse_proxy, +/// &ClientIpSources { +/// right_most_x_forwarded_for: None, +/// connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), +/// }, +/// ) +/// .unwrap(); +/// +/// assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); +/// ``` +/// /// # Errors /// /// Will return an error if the peer IP cannot be obtained according to the configuration. diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 82ecc72e0..240680ca3 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -1,9 +1,29 @@ +//! The `scrape` service. +//! +//! The service is responsible for handling the `scrape` requests. +//! +//! It delegates the `scrape` logic to the [`Tracker`](crate::tracker::Tracker::scrape) +//! and it returns the [`ScrapeData`](crate::tracker::ScrapeData) returned +//! by the [`Tracker`](crate::tracker::Tracker). +//! +//! It also sends an [`statistics::Event`](crate::tracker::statistics::Event) +//! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::{statistics, ScrapeData, Tracker}; +/// The HTTP tracker `scrape` service. +/// +/// The service sends an statistics event that increments: +/// +/// - The number of TCP connections handled by the HTTP tracker. +/// - The number of TCP `scrape` requests handled by the HTTP tracker. +/// +/// > **NOTICE**: as the HTTP tracker does not requires a connection request +/// like the UDP tracker, the number of TCP connections is incremented for +/// each `scrape` request. pub async fn invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { let scrape_data = tracker.scrape(info_hashes).await; @@ -12,8 +32,12 @@ pub async fn invoke(tracker: &Arc, info_hashes: &Vec, origina scrape_data } +/// The HTTP tracker fake `scrape` service. It returns zeroed stats. +/// /// When the peer is not authenticated and the tracker is running in `private` mode, /// the tracker returns empty stats for all the torrents. +/// +/// > **NOTICE**: tracker statistics are not updated in this case. pub async fn fake(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { send_scrape_event(original_peer_ip, tracker).await; diff --git a/src/servers/mod.rs b/src/servers/mod.rs index a71b3f029..38b4b70cd 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,3 +1,4 @@ +//! Servers. Services that can be started and stopped. pub mod apis; pub mod http; pub mod signals; diff --git a/src/servers/signals.rs b/src/servers/signals.rs index b5a25ded7..879a82d5e 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -1,3 +1,4 @@ +/// This module contains functions to handle signals. use log::info; /// Resolves on `ctrl_c` or the `terminate` signal. diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs index 0e5d7e7f2..eba90b4ab 100644 --- a/src/shared/bit_torrent/mod.rs +++ b/src/shared/bit_torrent/mod.rs @@ -1,3 +1,71 @@ //! Common code for the `BitTorrent` protocol. +//! +//! # Glossary +//! +//! - [Announce](#announce) +//! - [Info Hash](#info-hash) +//! - [Leecher](#leechers) +//! - [Peer ID](#peer-id) +//! - [Peer List](#peer-list) +//! - [Peer](#peer) +//! - [Scrape](#scrape) +//! - [Seeders](#seeders) +//! - [Swarm](#swarm) +//! - [Tracker](#tracker) +//! +//! Glossary of `BitTorrent` terms. +//! +//! # Announce +//! +//! A request to the tracker to announce the presence of a peer. +//! +//! ## Info Hash +//! +//! A unique identifier for a torrent. +//! +//! ## Leecher +//! +//! Peers that are only downloading data. +//! +//! ## Peer ID +//! +//! A unique identifier for a peer. +//! +//! ## Peer List +//! +//! A list of peers that are downloading a torrent. +//! +//! ## Peer +//! +//! A client that is downloading or uploading a torrent. +//! +//! ## Scrape +//! +//! A request to the tracker to get information about a torrent. +//! +//! ## Seeder +//! +//! Peers that are only uploading data. +//! +//! ## Swarm +//! +//! A group of peers that are downloading the same torrent. +//! +//! ## Tracker +//! +//! A server that keeps track of peers that are downloading a torrent. +//! +//! # Links +//! +//! Description | Link +//! ---|--- +//! `BitTorrent.org`. A forum for developers to exchange ideas about the direction of the `BitTorrent` protocol | +//! Wikipedia entry for Glossary of `BitTorrent` term | +//! `BitTorrent` Specification Wiki | +//! Vuze Wiki. A `BitTorrent` client implementation | +//! `libtorrent`. Complete C++ bittorrent implementation| +//! UDP Tracker Protocol docs by `libtorrent` | +//! Percent Encoding spec | +//!Bencode & bdecode in your browser | pub mod common; pub mod info_hash; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index dd2e94660..03853e1aa 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -499,25 +499,36 @@ pub struct TorrentsMetrics { /// Structure that holds the data returned by the `announce` request. #[derive(Debug, PartialEq, Default)] pub struct AnnounceData { + /// The list of peers that are downloading the same torrent. + /// It excludes the peer that made the request. pub peers: Vec, + /// Swarm statistics pub swarm_stats: SwarmStats, + /// The interval in seconds that the client should wait between sending + /// regular requests to the tracker. + /// Refer to [`announce_interval`](torrust_tracker_configuration::Configuration::announce_interval). pub interval: u32, + /// The minimum announce interval in seconds that the client should wait. + /// Refer to [`min_announce_interval`](torrust_tracker_configuration::Configuration::min_announce_interval). pub interval_min: u32, } /// Structure that holds the data returned by the `scrape` request. #[derive(Debug, PartialEq, Default)] pub struct ScrapeData { + /// A map of infohashes and swarm metadata for each torrent. pub files: HashMap, } impl ScrapeData { + /// Creates a new empty `ScrapeData` with no files (torrents). #[must_use] pub fn empty() -> Self { let files: HashMap = HashMap::new(); Self { files } } + /// Creates a new `ScrapeData` with zeroed metadata for each torrent. #[must_use] pub fn zeroed(info_hashes: &Vec) -> Self { let mut scrape_data = Self::empty(); @@ -529,10 +540,12 @@ impl ScrapeData { scrape_data } + /// Adds a torrent to the `ScrapeData`. pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { self.files.insert(*info_hash, swarm_metadata); } + /// Adds a torrent to the `ScrapeData` with zeroed metadata. pub fn add_file_with_zeroed_metadata(&mut self, info_hash: &InfoHash) { self.files.insert(*info_hash, SwarmMetadata::zeroed()); } @@ -565,18 +578,22 @@ impl Tracker { }) } + /// Returns `true` is the tracker is in public mode. pub fn is_public(&self) -> bool { self.mode == TrackerMode::Public } + /// Returns `true` is the tracker is in private mode. pub fn is_private(&self) -> bool { self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed } + /// Returns `true` is the tracker is in whitelisted mode. pub fn is_whitelisted(&self) -> bool { self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } + /// Returns `true` if the tracker requires authentication. pub fn requires_authentication(&self) -> bool { self.is_private() } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 1e78cd909..22deed2b4 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -59,7 +59,6 @@ pub struct Entry { pub struct SwarmMetadata { /// The number of peers that have ever completed downloading pub downloaded: u32, - /// The number of active peers that have completed downloading (seeders) pub complete: u32, /// The number of active peers that have not completed downloading (leechers) @@ -80,7 +79,6 @@ impl SwarmMetadata { pub struct SwarmStats { /// The number of peers that have ever completed downloading pub completed: u32, - /// The number of active peers that have completed downloading (seeders) pub seeders: u32, /// The number of active peers that have not completed downloading (leechers) From 7014a4645d0c71e503e5d789373f6e6bfc04d310 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Apr 2023 13:48:20 +0100 Subject: [PATCH 0505/1003] docs: [#271] crate docs for servers::udp mod --- cSpell.json | 5 + src/lib.rs | 1 - src/servers/http/mod.rs | 4 +- src/servers/http/percent_encoding.rs | 2 +- src/servers/http/server.rs | 8 +- src/servers/http/v1/requests/announce.rs | 2 +- src/servers/signals.rs | 2 +- src/servers/udp/connection_cookie.rs | 73 +++ src/servers/udp/error.rs | 6 + src/servers/udp/handlers.rs | 68 ++- src/servers/udp/mod.rs | 647 +++++++++++++++++++++++ src/servers/udp/peer_builder.rs | 9 + src/servers/udp/request.rs | 11 + src/servers/udp/server.rs | 71 ++- tests/servers/api/v1/asserts.rs | 2 +- 15 files changed, 886 insertions(+), 25 deletions(-) diff --git a/cSpell.json b/cSpell.json index af0de7101..f07e2bfb4 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,6 +1,7 @@ { "words": [ "appuser", + "Arvid", "AUTOINCREMENT", "automock", "Avicora", @@ -21,6 +22,7 @@ "chrono", "clippy", "completei", + "connectionless", "dockerhub", "downloadedi", "filesd", @@ -47,6 +49,7 @@ "nanos", "nextest", "nocapture", + "Norberg", "numwant", "oneshot", "ostr", @@ -59,6 +62,7 @@ "reqwest", "rerequests", "rngs", + "routable", "rusqlite", "rustfmt", "Rustls", @@ -82,6 +86,7 @@ "Vagaa", "Vuze", "whitespaces", + "XBTT", "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", diff --git a/src/lib.rs b/src/lib.rs index 36d1792d3..adcf3d1f2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -428,7 +428,6 @@ //! - [BEP 15](https://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for `BitTorrent` //! - [BEP 23](https://www.bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists //! - [BEP 27](https://www.bittorrent.org/beps/bep_0027.html): Private Torrents -//! - [BEP 41](https://www.bittorrent.org/beps/bep_0041.html): UDP Tracker Protocol Extensions //! - [BEP 48](https://www.bittorrent.org/beps/bep_0048.html): Tracker Protocol Extension: Scrape //! //! # Contributing diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 78c086892..067e88fdd 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -43,7 +43,7 @@ //! //! Parameter | Type | Description | Required | Default | Example //! ---|---|---|---|---|--- -//! [`info_hash`](crate::servers::http::v1::requests::announce::Announce::info_hash) | percent encoded of 40-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! [`info_hash`](crate::servers::http::v1::requests::announce::Announce::info_hash) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` //! `peer_addr` | string |The IP address of the peer. | No | No | `2.137.87.41` //! [`downloaded`](crate::servers::http::v1::requests::announce::Announce::downloaded) | positive integer |The number of bytes downloaded by the peer. | No | `0` | `0` //! [`uploaded`](crate::servers::http::v1::requests::announce::Announce::uploaded) | positive integer | The number of bytes uploaded by the peer. | No | `0` | `0` @@ -220,7 +220,7 @@ //! //! Parameter | Type | Description | Required | Default | Example //! ---|---|---|---|---|--- -//! [`info_hash`](crate::servers::http::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 40-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! [`info_hash`](crate::servers::http::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` //! //! > **NOTICE**: you can scrape multiple torrents at the same time by passing //! multiple `info_hash` parameters. diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index b807e74c9..c8f0f7f12 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -19,7 +19,7 @@ use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; /// Percent decodes a percent encoded infohash. Internally an -/// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) is a 40-byte array. +/// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) is a 20-byte array. /// /// For example, given the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, /// it's percent encoded representation is `%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0`. diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 3008771ee..6a46b81df 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -44,15 +44,15 @@ pub enum Error { Error(String), // todo: refactor to use thiserror and add more variants for specific errors. } -/// A stopped HTTP server. +/// A HTTP server instance controller with no HTTP instance running. #[allow(clippy::module_name_repetitions)] pub type StoppedHttpServer = HttpServer>; -/// A running HTTP server. +/// A HTTP server instance controller with a running HTTP instance. #[allow(clippy::module_name_repetitions)] pub type RunningHttpServer = HttpServer>; -/// A HTTP running server controller. +/// A HTTP server instance controller. /// /// It's responsible for: /// @@ -83,12 +83,14 @@ pub struct Stopped { /// A running HTTP server state. pub struct Running { + /// The address where the server is bound. pub bind_addr: SocketAddr, task_killer: tokio::sync::oneshot::Sender, task: tokio::task::JoinHandle, } impl HttpServer> { + /// It creates a new `HttpServer` controller in `stopped` state. pub fn new(cfg: torrust_tracker_configuration::HttpTracker, launcher: I) -> Self { Self { cfg, diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 3725ee1df..1cf632eb5 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -88,7 +88,7 @@ pub struct Announce { /// Errors that can occur when parsing the `Announce` request. /// /// The `info_hash` and `peer_id` query params are special because they contain -/// binary data. The `info_hash` is a 40-byte SHA1 hash and the `peer_id` is a +/// binary data. The `info_hash` is a 20-byte SHA1 hash and the `peer_id` is a /// 20-byte array. #[derive(Error, Debug)] pub enum ParseAnnounceQueryError { diff --git a/src/servers/signals.rs b/src/servers/signals.rs index 879a82d5e..f0312b886 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -1,4 +1,4 @@ -/// This module contains functions to handle signals. +//! This module contains functions to handle signals. use log::info; /// Resolves on `ctrl_c` or the `terminate` signal. diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 4a75145c1..a389388a7 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -1,3 +1,71 @@ +//! Logic for generating and verifying connection IDs. +//! +//! The UDP tracker requires the client to connect to the server before it can +//! send any data. The server responds with a random 64-bit integer that the +//! client must use to identify itself. +//! +//! This connection ID is used to avoid spoofing attacks. The client must send +//! the connection ID in all requests to the server. The server will ignore any +//! requests that do not contain the correct connection ID. +//! +//! The simplest way to implement this would be to generate a random number when +//! the client connects and store it in a hash table. However, this would +//! require the server to store a large number of connection IDs, which would be +//! a waste of memory. Instead, the server generates a connection ID based on +//! the client's IP address and the current time. This allows the server to +//! verify the connection ID without storing it. +//! +//! This module implements this method of generating connection IDs. It's the +//! most common way to generate connection IDs. The connection ID is generated +//! using a time based algorithm and it is valid for a certain amount of time +//! (usually two minutes). The connection ID is generated using the following: +//! +//! ```text +//! connection ID = hash(client IP + current time slot + secret seed) +//! ``` +//! +//! Time slots are two minute intervals since the Unix epoch. The secret seed is +//! a random number that is generated when the server starts. And the client IP +//! is used in order generate a unique connection ID for each client. +//! +//! The BEP-15 recommends a two-minute time slot. +//! +//! ```text +//! Timestamp (seconds from Unix epoch): +//! |------------|------------|------------|------------| +//! 0 120 240 360 480 +//! Time slots (two-minutes time extents from Unix epoch): +//! |------------|------------|------------|------------| +//! 0 1 2 3 4 +//! Peer connections: +//! Peer A |-------------------------| +//! Peer B |-------------------------| +//! Peer C |------------------| +//! Peer A connects at timestamp 120 slot 1 -> connection ID will be valid from timestamp 120 to 360 +//! Peer B connects at timestamp 240 slot 2 -> connection ID will be valid from timestamp 240 to 480 +//! Peer C connects at timestamp 180 slot 1 -> connection ID will be valid from timestamp 180 to 360 +//! ``` +//! > **NOTICE**: connection ID is always the same for a given peer +//! (socket address) and time slot. +//! +//! > **NOTICE**: connection ID will be valid for two time extents, **not two +//! minutes**. It'll be valid for the the current time extent and the next one. +//! +//! Refer to [`Connect`](crate::servers::udp#connect) for more information about +//! the connection process. +//! +//! ## Advantages +//! +//! - It consumes less memory than storing a hash table of connection IDs. +//! - It's easy to implement. +//! - It's fast. +//! +//! ## Disadvantages +//! +//! - It's not very flexible. The connection ID is only valid for a certain +//! amount of time. +//! - It's not very accurate. The connection ID is valid for more than two +//! minutes. use std::net::SocketAddr; use std::panic::Location; @@ -12,16 +80,19 @@ pub type SinceUnixEpochTimeExtent = TimeExtent; pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); +/// Converts a connection ID into a connection cookie. #[must_use] pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { connection_id.0.to_le_bytes() } +/// Converts a connection cookie into a connection ID. #[must_use] pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { ConnectionId(i64::from_le_bytes(*connection_cookie)) } +/// Generates a new connection cookie. #[must_use] pub fn make(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); @@ -30,6 +101,8 @@ pub fn make(remote_address: &SocketAddr) -> Cookie { cookie_builder::build(remote_address, &time_extent) } +/// Checks if the supplied `connection_cookie` is valid. +/// /// # Panics /// /// It would panic if the `COOKIE_LIFETIME` constant would be an unreasonably large number. diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index a6381cc78..ce59cd015 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -1,24 +1,30 @@ +//! Error types for the UDP server. use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::LocatedError; +/// Error returned by the UDP server. #[derive(Error, Debug)] pub enum Error { + /// Error returned when the domain tracker returns an error. #[error("tracker server error: {source}")] TrackerError { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + /// Error returned from a third-party library (aquatic_udp_protocol). #[error("internal server error: {message}, {location}")] InternalServer { location: &'static Location<'static>, message: String, }, + /// Error returned when the connection id could not be verified. #[error("connection id could not be verified")] InvalidConnectionId { location: &'static Location<'static> }, + /// Error returned when the request is invalid. #[error("bad request: {source}")] BadRequest { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index e00203cfc..e94e0292f 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -1,3 +1,4 @@ +//! Handlers for the UDP server. use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::panic::Location; use std::sync::Arc; @@ -16,6 +17,15 @@ use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::{statistics, Tracker}; +/// It handles the incoming UDP packets. +/// +/// It's responsible for: +/// +/// - Parsing the incoming packet. +/// - Delegating the request to the correct handler depending on the request +/// type. +/// +/// It will return an `Error` response if the request is invalid. pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { message: format!("{e:?}"), @@ -43,6 +53,8 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: & } } +/// It dispatches the request to the correct handler. +/// /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. @@ -54,17 +66,24 @@ pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: } } +/// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) +/// request for more information. +/// /// # Errors /// -/// This function dose not ever return an error. +/// This function does not ever return an error. pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { + debug!("udp connect request: {:#?}", request); + let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); - let response = Response::from(ConnectResponse { + let response = ConnectResponse { transaction_id: request.transaction_id, connection_id, - }); + }; + + debug!("udp connect response: {:#?}", response); // send stats event match remote_addr { @@ -76,9 +95,12 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t } } - Ok(response) + Ok(Response::from(response)) } +/// It authenticates the request. It returns an error if the peer is not allowed +/// to make the request. +/// /// # Errors /// /// Will return `Error` if unable to `authenticate_request`. @@ -91,6 +113,9 @@ pub async fn authenticate(info_hash: &InfoHash, tracker: &Tracker) -> Result<(), }) } +/// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) +/// request for more information. +/// /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. @@ -124,8 +149,8 @@ pub async fn handle_announce( } #[allow(clippy::cast_possible_truncation)] - let announce_response = if remote_addr.is_ipv4() { - Response::from(AnnounceResponse { + if remote_addr.is_ipv4() { + let announce_response = AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), @@ -144,9 +169,13 @@ pub async fn handle_announce( } }) .collect(), - }) + }; + + debug!("udp announce response: {:#?}", announce_response); + + Ok(Response::from(announce_response)) } else { - Response::from(AnnounceResponse { + let announce_response = AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), @@ -165,16 +194,23 @@ pub async fn handle_announce( } }) .collect(), - }) - }; + }; + + debug!("udp announce response: {:#?}", announce_response); - Ok(announce_response) + Ok(Response::from(announce_response)) + } } +/// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) +/// request for more information. +/// /// # Errors /// -/// This function dose not ever return an error. +/// This function does not ever return an error. pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { + debug!("udp scrape request: {:#?}", request); + // Convert from aquatic infohashes let mut info_hashes = vec![]; for info_hash in &request.info_hashes { @@ -217,10 +253,14 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra } } - Ok(Response::from(ScrapeResponse { + let response = ScrapeResponse { transaction_id: request.transaction_id, torrent_stats, - })) + }; + + debug!("udp scrape response: {:#?}", response); + + Ok(Response::from(response)) } fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 7b755a20b..edbfd77d2 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -1,3 +1,643 @@ +//! UDP Tracker. +//! +//! This module contains the UDP tracker implementation. +//! +//! The UDP tracker is a simple UDP server that responds to these requests: +//! +//! - `Connect`: used to get a connection ID which must be provided on each +//! request in order to avoid spoofing the source address of the UDP packets. +//! - `Announce`: used to announce the presence of a peer to the tracker. +//! - `Scrape`: used to get information about a torrent. +//! +//! It was introduced in [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +//! as an alternative to the [HTTP tracker](https://www.bittorrent.org/beps/bep_0003.html). +//! The UDP tracker is more efficient than the HTTP tracker because it uses UDP +//! instead of TCP. +//! +//! Refer to the [`bit_torrent`](crate::shared::bit_torrent) module for more +//! information about the `BitTorrent` protocol. +//! +//! Refer to [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +//! and to [BEP 41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html) +//! for more information about the UDP tracker protocol. +//! +//! > **NOTICE**: [BEP-41](https://www.bittorrent.org/beps/bep_0041.html) is not +//! implemented yet. +//! +//! > **NOTICE**: we are using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) +//! crate so requests and responses are handled by it. +//! +//! > **NOTICE**: all values are send in network byte order ([big endian](https://en.wikipedia.org/wiki/Endianness)). +//! +//! ## Table of Contents +//! +//! - [Actions](#actions) +//! - [Connect](#connect) +//! - [Connect Request](#connect-request) +//! - [Connect Response](#connect-response) +//! - [Announce](#announce) +//! - [Announce Request](#announce-request) +//! - [Announce Response](#announce-response) +//! - [Scrape](#scrape) +//! - [Scrape Request](#scrape-request) +//! - [Scrape Response](#scrape-response) +//! - [Errors](#errors) +//! - [Extensions](#extensions) +//! - [Links](#links) +//! - [Credits](#credits) +//! +//! ## Actions +//! +//! Requests are sent to the tracker using UDP packets. The UDP tracker protocol +//! is designed to be as simple as possible. It uses a single UDP port and +//! supports only three types of requests: `Connect`, `Announce` and `Scrape`. +//! +//! Request are parsed from UDP packets using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) +//! crate and then handled by the [`Tracker`](crate::tracker::Tracker) struct. +//! And then the response is also build using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) +//! and converted to a UDP packet. +//! +//! ```text +//! UDP packet -> Aquatic Struct Request -> [Torrust Struct Request] -> Tracker -> Aquatic Struct Response -> UDP packet +//! ``` +//! +//! For the `Announce` request there is a wrapper struct [`AnnounceWrapper`](crate::servers::udp::request::AnnounceWrapper). +//! It was added to add an extra field with the internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) struct. +//! +//! ### Connect +//! +//! `Connect` requests are used to get a connection ID which must be provided on +//! each request in order to avoid spoofing the source address of the UDP. +//! +//! The connection ID is a random 64-bit integer that is used to identify the +//! client. It is used to prevent spoofing of the source address of the UDP +//! packets. Before announcing or scraping, you have to obtain a connection ID. +//! +//! The connection ID is generated by the tracker and sent back to the client's +//! IP address. Only the client using that IP can receive the response, so the +//! tracker can be sure that the client is the one who sent the request. If the +//! client's IP was spoofed the tracker will send the response to the wrong +//! client and the client will not receive it. +//! +//! The reason why the UDP tracker protocol needs a connection ID to avoid IP +//! spoofing can be explained as follows: +//! +//! 1. No connection state: Unlike TCP, UDP is a connectionless protocol, +//! meaning that it does not establish a connection between two endpoints before +//! exchanging data. As a result, it is more susceptible to IP spoofing, where +//! an attacker sends packets with a forged source IP address, tricking the +//! receiver into believing that they are coming from a legitimate source. +//! +//! 2. Mitigating IP spoofing: To mitigate IP spoofing in the UDP tracker +//! protocol, a connection ID is used. When a client wants to interact with a +//! tracker, it sends a "connect" request to the tracker, which, in turn, +//! responds with a unique connection ID. This connection ID must be included in +//! all subsequent requests from the client to the tracker. +//! +//! 3. Validating requests: By requiring the connection ID, the tracker can +//! verify that the requests are coming from the same client that initially sent +//! the "connect" request. If an attacker attempts to spoof the client's IP +//! address, they would also need to know the valid connection ID to be accepted +//! by the tracker. This makes it significantly more challenging for an attacker +//! to spoof IP addresses and disrupt the P2P network. +//! +//! There are different ways to generate a connection ID. The most common way is +//! to generate a time bound secret. The secret is generated using a time based +//! algorithm and it is valid for a certain amount of time. +//! +//! ```text +//! connection ID = hash(client IP + current time slot + secret seed) +//! ``` +//! +//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`](crate::servers::udp::connection_cookie) +//! for more information about the connection ID generation with this method. +//! +//! #### Connect Request +//! +//! **Connect request (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -------|-------------------|------------------|-------------------------------------------------|-----------------------------|----------------- +//! 0 | [`i64`](std::i64) | `protocol_id` | Magic constant that will identify the protocol. | `0x00_00_04_17_27_10_19_80` | `4497486125440` +//! 8 | [`i32`](std::i32) | `action` | Action identifying the connect request. | `0x00_00_00_00` | `0` +//! 12 | [`i32`](std::i32) | `transaction_id` | Randomly generated by the client. | `0x34_FA_A1_F9` | `-888840697` +//! +//! **Sample connect request (UDP packet)** +//! +//! UDP packet bytes: +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +//! Decimal: [ 0, 0, 4, 23, 39, 16, 25, 128, 0, 0, 0, 0, 203, 5, 94, 7] +//! Hex: [0x00, 0x00, 0x04, 0x17, 0x27, 0x10, 0x19, 0x80, 0x00, 0x00, 0x00, 0x00, 0xCB, 0x05, 0x5E, 0x07] +//! Param: [<------------- protocol_id ------------------>,<------- action ------>,<--- transaction_id -->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes Dec (Big Endian) | Hex | Decimal +//! -------|-------------------|------------------|--------------------------------|-----------------------------|---------------- +//! 0 | [`i64`](std::i64) | `protocol_id` | [0, 0, 4, 23, 39, 16, 25, 128] | `0x00_00_04_17_27_10_19_80` | `4497486125440` +//! 4 | [`i32`](std::i32) | `action` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! 8 | [`i32`](std::i32) | `transaction_id` | [35, 63, 226, 1] | `0xCB_05_5E_07` | `-888840697` +//! +//! **Connect request (parsed struct)** +//! +//! After parsing the UDP packet, the [`ConnectRequest`](aquatic_udp_protocol::request::ConnectRequest) +//! request struct will look like this: +//! +//! Field | Type | Example +//! -----------------|----------------------------------------------------------------|------------- +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `1950635409` +//! +//! #### Connect Response +//! +//! **Connect response (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -------|-------------------|------------------|-------------------------------------------------------|-----------------------------|----------------------- +//! 0 | [`i64`](std::i32) | `action` | Action identifying the connect request | `0x00_00_00_00` | `0` +//! 4 | [`i32`](std::i32) | `transaction_id` | Must match the `transaction_id` sent from the client. | `0xCB_05_5E_07` | `-888840697` +//! 8 | [`i32`](std::i64) | `connection_id` | Generated by the tracker to authenticate the client. | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! +//! > **NOTICE**: the `connection_id` is used when further information is +//! exchanged with the tracker, to identify the client. This `connection_id` can +//! be reused for multiple requests, but if it's cached for too long, it will +//! not be valid anymore. +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! **Sample connect response (UDP packet)** +//! +//! UDP packet bytes: +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +//! Decimal: [ 0, 0, 0, 0, 203, 5, 94, 7, 197, 88, 124, 9, 8, 72, 216, 55] +//! Hex: [0x00, 0x00, 0x00, 0x00, 0xCB, 0x05, 0x5E, 0x07, 0xC5, 0x58, 0x7C, 0x09, 0x08, 0x48, 0xD8, 0x37] +//! Param: [<------ action ------>,<-- transaction_id --->,<--------------- connection_id --------------->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! -------|-------------------|------------------|-----------------------------------|------------------------------|----------------------- +//! 0 | [`i64`](std::i32) | `action` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! 4 | [`i64`](std::i32) | `transaction_id` | [203, 5, 94, 7] | `0xCB_05_5E_07` | `-888840697` +//! 8 | [`i64`](std::i64) | `connection_id` | [197, 88, 124, 9, 8, 72, 216, 55] | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! **Connect response (struct)** +//! +//! Before building the UDP packet, the [`ConnectResponse`](aquatic_udp_protocol::response::ConnectResponse) +//! struct will look like this: +//! +//! Field | Type | Example +//! -----------------|----------------------------------------------------------------|------------------------- +//! `connection_id` | [`ConnectionId`](aquatic_udp_protocol::common::ConnectionId) | `-4226491872051668937` +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-888840697` +//! +//! **Connect specification** +//! +//! Original specification in [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html). +//! +//! ### Announce +//! +//! `Announce` requests are used to announce the presence of a peer to the +//! tracker. The tracker responds with a list of peers that are also downloading +//! the same torrent. A "swarm" is a group of peers that are downloading the +//! same torrent. +//! +//! #### Announce Request +//! +//! **Announce request (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -------|-------------------|------------------|--------------------------------------------------------------|-----------------------------------------------------------------|---------------------------------------------------------- +//! 0 | [`i64`](std::i64) | `connection_id` | The connection id acquired from establishing the connection. | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! 8 | [`i32`](std::i32) | `action` | Action for announce request. | `0x00_00_00_01` | `1` +//! 12 | [`i32`](std::i32) | `transaction_id` | Randomly generated by the client. | `0xA2_F9_54_48` | `-1560718264` +//! 16 | 20-byte | `info_hash` | The infohash of the torrent being announced. | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! 36 | 20-byte | `peer_id` | The ID of the peer announcing the torrent. | `0x2D_71_42_34_34_31_30_2D_29_53_64_7E_64_65_34_78_4D_70_36_44` | `259430336069436570531165609119312093997849130564` +//! 56 | [`i64`](std::i64) | `downloaded` | The number of bytes the peer has downloaded so far. | `0x00_00_00_00_00_00_00_00` | `0` +//! 64 | [`i64`](std::i64) | `left` | The number of bytes left to download by the peer. | `0x00_00_00_00_00_00_00_00` | `0` +//! 72 | [`i64`](std::i64) | `uploaded` | The number of bytes the peer has uploaded so far. | `0x00_00_00_00_00_00_00_00` | `0` +//! 80 | [`i32`](std::i32) | `event` | The event the peer is reporting to the tracker. | `0x0`, `0x1`, `0x2`, `0x3` | `0`: none; `1`: completed; `2`: started; `3`: stopped +//! 84 | [`i32`](std::i32) | `IP address` | The peer IP. Ignored by the tracker. It uses the Sender's IP.| `0x00_00_00_00` | `0` +//! 88 | [`i32`](std::i32) | `key` | A unique key that is randomized by the client. | `0xEF_34_95_D6` | `-281766442` +//! 92 | [`i32`](std::i32) | `num_want` | The maximum number of peers the peer wants in the response. | `0x00_00_00_C8` | `200` +//! 96 | [`i16`](std::i16) | `port` | The port the peer is listening on. | `0x44_8C` | `17548` +//! +//! **Peer IP address** +//! +//! The peer IP address is always ignored by the tracker. It uses the sender's +//! IP address. +//! +//! _"Do note that most trackers will only honor the IP address field under +//! limited circumstances."_ ([BEP 15](https://www.bittorrent.org/beps/bep_0015.html)). +//! +//! Although not supported by this tracker a UDP tracker can use the IP address +//! provided by the peer in the announce request under specific circumstances +//! when it cannot rely on the source IP address of the incoming request. These +//! circumstances might include: +//! +//! 1. Network Address Translation (NAT): In cases where a peer is behind a NAT, +//! the private IP address of the peer is not directly routable over the +//! internet. The NAT device translates the private IP address to a public one +//! when sending packets to the tracker. The public IP address is what the +//! tracker sees as the source IP of the incoming request. However, if the peer +//! provides its private IP address in the announce request, the tracker can use +//! this information to facilitate communication between peers in the same +//! private network. +//! +//! 2. Proxy or VPN usage: If a peer uses a proxy or VPN service to connect to +//! the tracker, the source IP address seen by the tracker will be the one +//! assigned by the proxy or VPN server. In this case, if the peer provides its +//! actual IP address in the announce request, the tracker can use it to +//! establish a direct connection with other peers, bypassing the proxy or VPN +//! server. This might improve performance or help in cases where some peers +//! cannot connect to the proxy or VPN server. +//! +//! 3. Tracker is behind a NAT, firewall, proxy, VPN, or load balancer: In cases +//! where the tracker is behind a NAT, firewall, proxy, VPN, or load balancer, +//! the source IP address of the incoming request will be the public IP address +//! of the NAT, firewall, proxy, VPN, or load balancer. If the peer provides its +//! private IP address in the announce request, the tracker can use this +//! information to establish a direct connection with the peer. +//! +//! It's important to note that using the provided IP address can pose security +//! risks, as malicious peers might spoof their IP addresses in the announce +//! request to perform various types of attacks. +//! +//! > **NOTICE**: The current tracker behavior is to ignore the IP address +//! provided by the peer, and use the source IP address of the incoming request, +//! when the tracker is not running behind a proxy, and to use the right-most IP +//! address in the `X-Forwarded-For` header when the tracker is running behind a +//! proxy. +//! +//! > **NOTICE**: The tracker also changes the peer IP address to the tracker +//! external IP when the peer is using a loopback IP address. +//! +//! **Sample announce request (UDP packet)** +//! +//! Some values used in the sample request: +//! +//! - Infohash: `0x03840548643AF2A7B63A9F5CBCA348BC7150CA3A` +//! - Peer ID: `0x2D7142343431302D2953647E646534784D703644` +//! +//! UDP packet bytes: +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100] +//! Decimal: [ 197, 88, 124, 9, 8, 72, 216, 55, 0, 0, 0, 1, 162, 249, 84, 72, 3, 132, 5, 72, 100, 58, 242, 167, 182, 58, 159, 92, 188, 163, 72, 188, 113, 80, 202, 58, 45, 113, 66, 52, 52, 49, 48, 45, 41, 83, 100, 126, 100, 101, 52, 120, 77, 112, 54, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 239, 52, 149, 214, 0, 0, 0, 200, 68, 140, 2, 1, 47] +//! Hex: [ 0xC5, 0x58, 0x7C, 0x09, 0x08, 0x48, 0xD8, 0x37, 0x00, 0x00, 0x00, 0x01, 0xA2, 0xF9, 0x54, 0x48, 0x03, 0x84, 0x05, 0x48, 0x64, 0x3A, 0xF2, 0xA7, 0xB6, 0x3A, 0x9F, 0x5C, 0xBC, 0xA3, 0x48, 0xBC, 0x71, 0x50, 0xCA, 0x3A, 0x2D, 0x71, 0x42, 0x34, 0x34, 0x31, 0x30, 0x2D, 0x29, 0x53, 0x64, 0x7E, 0x64, 0x65, 0x34, 0x78, 0x4D, 0x70, 0x36, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x34, 0x95, 0xD6, 0x00, 0x00, 0x00, 0xC8, 0x44, 0x8C, 0x02, 0x01, 0x2F] +//! Param: [<--------------- connection_id --------------->,<--------- action ---->,<-- transaction_id --->,<--------------------------------------------------------- info_hash ------------------------------------------------->,<---------------------------------------------- peer_id -------------------------------------------------------------->,<------------------- downloaded -------------->,<-------------------- left ------------------->,<---------------- uploaded ------------------->,<-------- event ------>,<----- IP address ---->,<--------- key ------->,<------ num_want ----->,<-- port --><---- BEP 41 --->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes Dec (Big Endian) | Hex | Decimal +//! -------|-------------------|-------------------|--------------------------------------------------------------------------|-----------------------------------------------------------------|---------------------------------------------------- +//! 0 | [`i64`](std::i64) | `connection_id` | `[197,88,124,9,8,72,216,55]` | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! 8 | [`i32`](std::i32) | `action` | `[0,0,0,1]` | `0x00_00_00_01` | `1` +//! 12 | [`i32`](std::i32) | `transaction_id` | `[162,249,84,72]` | `0xA2_F9_54_48` | `-1560718264` +//! 16 | 20 bytes | `info_hash` | `[3,132,5,72,100,58,242,167,182,58,159,92,188,163,72,188,113,80,202,58]` | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! 36 | 20 bytes | `peer_id` | `[45,113,66,52,52,49,48,45,41,83,100,126,100,101,52,120,77,112,54,68]` | `0x2D_71_42_34_34_31_30_2D_29_53_64_7E_64_65_34_78_4D_70_36_44` | `259430336069436570531165609119312093997849130564` +//! 56 | [`i64`](std::i64) | `downloaded` | `[0,0,0,0,0,0,0,0]` | `0x00_00_00_00_00_00_00_00` | `0` +//! 64 | [`i64`](std::i64) | `left` | `[0,0,0,0,0,0,0,0]` | `0x00_00_00_00_00_00_00_00` | `0` +//! 72 | [`i64`](std::i64) | `uploaded` | `[0,0,0,0,0,0,0,0]` | `0x00_00_00_00_00_00_00_00` | `0` +//! 80 | [`i32`](std::i32) | `event` | `[0,0,0,2]` | `0x00_00_00_02` | `2` (`Started`) +//! 84 | [`i32`](std::i32) | `IP address` | `[0,0,0,0]` | `0x00_00_00_00` | `0` +//! 88 | [`i32`](std::i32) | `key` | `[239,52,149,214]` | `0xEF_34_95_D6` | `-281766442` +//! 92 | [`i32`](std::i32) | `num_want` | `[0,0,0,200]` | `0x00_00_00_C8` | `200` +//! 96 | [`i16`](std::i16) | `port` | `[8,140]` | `0x44_8C` | `17548` +//! 98 | 1 byte | `Option-Type` | `[2]` | `0x02` | `2` +//! 99 | 2 byte | `Length Byte` | `[1,47]` | `0x01_2F` | `303` +//! 101 | N bytes | | | | +//! +//! > **NOTICE**: bytes after offset 98 are part of the [BEP-41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html). +//! There are three options defined for byte 98: `0x0` (`EndOfOptions`), `0x1` (`NOP`) and `0x2` (`URLData`). +//! +//! > **NOTICE**: `num_want` is being ignored by the tracker. Refer to +//! [issue 262](https://github.com/torrust/torrust-tracker/issues/262) for more +//! information. +//! +//! **Announce request (parsed struct)** +//! +//! After parsing the UDP packet, the [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) +//! struct will contain the following fields: +//! +//! Field | Type | Example +//! -------------------|---------------------------------------------------------------- |-------------- +//! `connection_id` | [`ConnectionId`](aquatic_udp_protocol::common::ConnectionId) | `-4226491872051668937` +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-1560718264` +//! `info_hash` | [`InfoHash`](aquatic_udp_protocol::common::InfoHash) | `[3,132,5,72,100,58,242,167,182,58,159,92,188,163,72,188,113,80,202,58]` +//! `peer_id` | [`PeerId`](aquatic_udp_protocol::common::PeerId) | `[45,113,66,52,52,49,48,45,41,83,100,126,100,101,52,120,77,112,54,68]` +//! `bytes_downloaded` | [`NumberOfBytes`](aquatic_udp_protocol::common::NumberOfBytes) | `0` +//! `bytes_uploaded` | [`TransactionId`](aquatic_udp_protocol::common::NumberOfBytes) | `0` +//! `event` | [`AnnounceEvent`](aquatic_udp_protocol::request::AnnounceEvent) | `Started` +//! `ip_address` | [`Ipv4Addr`](aquatic_udp_protocol::common::ConnectionId) | `None` +//! `peers_wanted` | [`NumberOfPeers`](aquatic_udp_protocol::common::NumberOfPeers) | `200` +//! `port` | [`Port`](aquatic_udp_protocol::common::Port) | `17548` +//! +//! > **NOTICE**: the `peers_wanted` field is the `num_want` field in the UDP +//! packet. +//! +//! We are using a wrapper struct for the aquatic [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) +//! struct, because we have our internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) +//! struct. +//! +//! ```text +//! pub struct AnnounceWrapper { +//! pub announce_request: AnnounceRequest, // aquatic +//! pub info_hash: InfoHash, // our own +//! } +//! ``` +//! +//! #### Announce Response +//! +//! **Announce response (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -----------|-------------------|------------------|---------------------------------------------------------------------------------|-----------------|---------------------------- +//! 0 | [`i32`](std::i32) | `action` | The action this is a reply to. | `0x00_00_00_01` | `1`: announce; `3`: error +//! 4 | [`i32`](std::i32) | `transaction_id` | Must match the `transaction_id` sent in the announce request. | `0x00_00_00_00` | `0` +//! 8 | [`i32`](std::i32) | `interval` | The number of seconds the peer should wait until re-announcing itself. | `0x00_00_00_00` | `0` +//! 12 | [`i32`](std::i32) | `leechers` | The number of peers in the swarm that has not finished downloading. | `0x00_00_00_00` | `0` +//! 16 | [`i32`](std::i32) | `seeders` | The number of peers in the swarm that has finished downloading and are seeding. | `0x00_00_00_00` | `0` +//! | | | | | +//! 20 + 6 * n | [`i32`](std::i32) | `IP address` | The IP of a peer in the swarm. | `0x69_69_69_69` | `1768515945` +//! 24 + 6 * n | [`i16`](std::i16) | `TCP port` | The peer's listen port. | `0x44_8C` | `17548` +//! 20 + 6 * N | | | | | +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! > **NOTICE**: `IP address` should always be set to 0 when the peer is using +//! `IPv6`. +//! +//! **Sample announce response (UDP packet)** +//! +//! UDP packet bytes (fixed part): +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] +//! Decimal: [ 0, 0, 0, 1, 162, 249, 84, 72, 0, 0, 0, 120, 0, 0, 0, 0, 0, 0, 0, 1] +//! Hex: [ 0x00, 0x00, 0x00, 0x01, 0xA2, 0xF9, 0x54, 0x48, 0x00, 0x00, 0x00, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01] +//! Param: [<------- action ------>,<-- transaction_id --->,<----- interval ------>,<----- leechers ------>,<------ seeders ------>] +//! ``` +//! +//! UDP packet fields (fixed part): +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! -----------|-------------------|------------------|---------------------|-----------------|---------------------------- +//! 0 | [`i32`](std::i32) | `action` | `[0, 0, 0, 0]` | `0x00_00_00_01` | `1`: announce; `3`: error +//! 4 | [`i32`](std::i32) | `transaction_id` | `[162,249,84,72]` | `0xA2_F9_54_48` | `-1560718264` +//! 8 | [`i32`](std::i32) | `interval` | `[0,0,0,120]` | `0x00_00_00_78` | `120` +//! 12 | [`i32`](std::i32) | `leechers` | `[0, 0, 0, 0]` | `0x00_00_00_00` | `0` +//! 16 | [`i32`](std::i32) | `seeders` | `[0, 0, 0, 1]` | `0x00_00_00_01` | `1` +//! +//! This is the fixed part of the packet. After the fixed part there is +//! dynamically generated data with the list of peers in the swarm. The list may +//! include `IPv4` or `IPv6` peers, depending on the address family of the +//! underlying UDP packet. I.e. packets from a v4 address use the v4 format, +//! those from a v6 address use the v6 format. +//! +//! UDP packet bytes (`IPv4` peer list): +//! +//! ```text +//! Offset: [ 20, 21, 22, 23, 24, 25] +//! Decimal: [ 105, 105, 105, 105, 08, 140] +//! Hex: [ 0x69, 0x69, 0x69, 0x69, 0x44, 0x8C] +//! Param: [<----- IP address ---->,<-TCP port>] +//! ``` +//! +//! > **NOTICE**: there are 6 bytes per peer (4 bytes for the `IPv4` address and +//! 2 bytes for the TCP port). +//! +//! UDP packet fields (`IPv4` peer list): +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! ---------|-------------------|--------------|---------------------|-----------------|---------------------------- +//! 20 + 6*n | [`i32`](std::i32) | `IP address` | `[105,105,105,105]` | `0x69_69_69_69` | `1768515945` +//! 24 + 6*n | [`i16`](std::i16) | `TCP port` | `[8,140]` | `0x44_8C` | `17548` +//! 20 + 6*N | | | | | +//! +//! UDP packet bytes (`IPv6` peer list): +//! +//! ```text +//! Offset: [ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37] +//! Decimal: [ 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 08, 140] +//! Hex: [ 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x44, 0x8C] +//! Param: [<-------------------------------------------- IP address ------------------------------------->,<-TCP port>] +//! ``` +//! +//! > **NOTICE**: there are 18 bytes per peer (16 bytes for the `IPv6` address and +//! 2 bytes for the TCP port). +//! +//! UDP packet fields (`IPv6` peer list): +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! ----------|---------------------|--------------|---------------------------------------------------------------------|-----------------------------------------------------|------------------------------------------- +//! 20 + 18*n | [`i128`](std::i128) | `IP address` | `[105,105,105,105,105,105,105,105,105,105,105,105,105,105,105,105]` | `0x69_69_69_69_69_69_69_69_69_69_69_69_69_69_69_69` | `140116268732151132014330720707198675305` +//! 24 + 18*n | [`i16`](std::i16) | `TCP port` | `[8,140]` | `0x44_8C` | `17548` +//! 20 + 18*N | | | | | +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! > **NOTICE**: the peer list does not include the peer that sent the announce +//! request. +//! +//! **Announce response (struct)** +//! +//! The [`AnnounceResponse`](aquatic_udp_protocol::response::AnnounceResponse) +//! struct will have the following fields: +//! +//! Field | Type | Example +//! --------------------|------------------------------------------------------------------------|-------------- +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-1560718264` +//! `announce_interval` | [`AnnounceInterval`](aquatic_udp_protocol::common::AnnounceInterval) | `120` +//! `leechers` | [`NumberOfPeers`](aquatic_udp_protocol::common::NumberOfPeers) | `0` +//! `seeders` | [`NumberOfPeers`](aquatic_udp_protocol::common::NumberOfPeers) | `1` +//! `peers` | Vector of [`ResponsePeer`](aquatic_udp_protocol::common::ResponsePeer) | `[]` +//! +//! **Announce specification** +//! +//! Original specification in [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html). +//! +//! ### Scrape +//! +//! The `scrape` request allows a peer to get [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! for multiple torrents at the same time. +//! +//! The response contains the [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! for that torrent: +//! +//! - [complete](crate::tracker::torrent::SwarmMetadata::complete) +//! - [downloaded](crate::tracker::torrent::SwarmMetadata::downloaded) +//! - [incomplete](crate::tracker::torrent::SwarmMetadata::incomplete) +//! +//! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape +//! can't be done with this protocol. This is a limitation of the UDP protocol. +//! Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) +//! for more information about this limitation. +//! +//! #### Scrape Request +//! +//! **Scrape request (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! ----------|-------------------|------------------|------------------------------------------------------------------------|-----------------------------------------------------------------|-------------------------------------------------- +//! 0 | [`i64`](std::i64) | `connection_id` | The `connection_id` retrieved from the establishing of the connection. | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! 8 | [`i32`](std::i32) | `action` | Action identifying the scrape request | `0x00_00_00_02` | `2` (`Scrape`) +//! 12 | [`i32`](std::i32) | `transaction_id` | Randomly generated by the client. | `0xA2_F9_54_48` | `-1560718264` +//! 16 + 20*n | 20 bytes | `info_hash` | The infohash of the torrent being scraped. | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! 16 + 20*N | | | | +//! +//! The last field (`info_hash`) is repeated for each torrent being scraped. +//! +//! Dynamic part of the UDP packet: +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! ----------|-------------------|-------------|--------------------------------------------|-----------------------------------------------------------------|--------------------------------------------------- +//! 16 + 20*n | 20 bytes | `info_hash` | The infohash of the torrent being scraped. | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! +//! **Sample scrape request (UDP packet)** +//! +//! UDP packet bytes (fixed part): +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35] +//! Decimal: [ 197, 88, 124, 9, 8, 72, 216, 55, 0, 0, 0, 2, 162, 249, 84, 72, 3, 132, 5, 72, 100, 58, 242, 167, 182, 58, 159, 92, 188, 163, 72, 188, 113, 80, 202, 58] +//! Hex: [ 0xC5, 0x58, 0x7C, 0x09, 0x08, 0x48, 0xD8, 0x37, 0x00, 0x00, 0x00, 0x02, 0xA2, 0xF9, 0x54, 0x48, 0x03, 0x84, 0x05, 0x48, 0x64, 0x3A, 0xF2, 0xA7, 0xB6, 0x3A, 0x9F, 0x5C, 0xBC, 0xA3, 0x48, 0xBC, 0x71, 0x50, 0xCA, 0x3A] +//! Param: [<--------------- connection_id --------------->,<--------- action ---->,<-- transaction_id --->,<--------------------------------------------------------- info_hash ------------------------------------------------->] +//! ``` +//! +//! UDP packet bytes (infohash list): +//! +//! ```text +//! Offset: [ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35] +//! Decimal: [ 3, 132, 5, 72, 100, 58, 242, 167, 182, 58, 159, 92, 188, 163, 72, 188, 113, 80, 202, 58] +//! Hex: [ 0x03, 0x84, 0x05, 0x48, 0x64, 0x3A, 0xF2, 0xA7, 0xB6, 0x3A, 0x9F, 0x5C, 0xBC, 0xA3, 0x48, 0xBC, 0x71, 0x50, 0xCA, 0x3A] +//! Param: [<--------------------------------------------------------- info_hash ------------------------------------------------->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes Dec (Big Endian) | Hex | Decimal +//! -------|-------------------|------------------|--------------------------------------------------------------------------|-----------------------------------------------------------------|-------------------------------------------------- +//! 0 | [`i64`](std::i64) | `connection_id` | `[197,88,124,9,8,72,216,55]` | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! 4 | [`i32`](std::i32) | `action` | `[0, 0, 0, 2]` | `0x00_00_00_02` | `2` (`Scrape`) +//! 8 | [`i32`](std::i32) | `transaction_id` | `[162,249,84,72]` | `0xA2_F9_54_48` | `-1560718264` +//! 8 | 20 bytes | `info_hash` | `[3,132,5,72,100,58,242,167,182,58,159,92,188,163,72,188,113,80,202,58]` | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! +//! **Scrape request (parsed struct)** +//! +//! After parsing the UDP packet, the [`ScrapeRequest`](aquatic_udp_protocol::request::ScrapeRequest) +//! struct will look like this: +//! +//! Field | Type | Example +//! -----------------|----------------------------------------------------------------|---------------------------------------------------------------------------- +//! `connection_id` | [`ConnectionId`](aquatic_udp_protocol::common::ConnectionId) | `-4226491872051668937` +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-1560718264` +//! `info_hashes` | Vector of [`InfoHash`](aquatic_udp_protocol::common::InfoHash) | `[[3,132,5,72,100,58,242,167,182,58,159,92,188,163,72,188,113,80,202,58]]` +//! +//! #### Scrape Response +//! +//! **Scrape response (UDP packet)** +//! +//! Offset | Type/Size | Name (BEP15 or libtorrent) | Description | Hex | Decimal +//! ----------|-------------------|-----------------------------|-------------------------------------------------------|-----------------|----------------- +//! 0 | [`i32`](std::i32) | `action` | Action identifying the connect request | `0x00_00_00_00` | `2` (`Scrape`) +//! 4 | [`i32`](std::i32) | `transaction_id` | Must match the `transaction_id` sent from the client. | `0xA2_F9_54_48` | `-1560718264` +//! 8 + 12*n | [`i32`](std::i32) | `seeders` or `complete` | The current number of connected seeds. | `0x00_00_00_00` | `0` +//! 12 + 12*n | [`i32`](std::i32) | `completed` or `downloaded` | The number of times this torrent has been downloaded. | `0x00_00_00_00` | `0` +//! 16 + 12*n | [`i32`](std::i32) | `leechers` or `incomplete` | The current number of connected leechers. | `0x00_00_00_00` | `0` +//! 8 + 12*N | | | | | +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! Dynamic part of the UDP packet: +//! +//! Offset | Type/Size | Name (BEP15 or libtorrent) | Description | Hex | Decimal +//! ----------|-------------------|-----------------------------|-------------------------------------------------------|-----------------|----------------- +//! 8 + 12*n | [`i32`](std::i32) | `seeders` or `complete` | The current number of connected seeds. | `0x00_00_00_00` | `0` +//! 12 + 12*n | [`i32`](std::i32) | `completed` or `downloaded` | The number of times this torrent has been downloaded. | `0x00_00_00_00` | `0` +//! 16 + 12*n | [`i32`](std::i32) | `leechers` or `incomplete` | The current number of connected leechers. | `0x00_00_00_00` | `0` +//! 8 + 12*N | | | | | +//! +//! For each info hash in the request there will be 3 32-bit integers (12 bytes) +//! in the response with the number of seeders, leechers and downloads. +//! +//! **Sample scrape response (UDP packet)** +//! +//! UDP packet bytes: +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] +//! Decimal: [ 0, 0, 0, 0, 203, 5, 94, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] +//! Hex: [0x00, 0x00, 0x00, 0x00, 0xCB, 0x05, 0x5E, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] +//! Param: [<------ action ------>,<-- transaction_id --->,<------ seeders ------>,<----- completed ----->,<------ leechers ----->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! -------|-------------------|------------------|--------------------|------------------|---------------- +//! 0 | [`i32`](std::i32) | `action` | [0, 0, 0, 2] | `0x00_00_00_02` | `2` (`Scrape`) +//! 4 | [`i32`](std::i32) | `transaction_id` | [203, 5, 94, 7] | `0xA2_F9_54_48` | `-1560718264` +//! 8 | [`i32`](std::i32) | `seeders` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! 12 | [`i32`](std::i32) | `completed` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! 16 | [`i32`](std::i32) | `leechers` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! **Scrape response (struct)** +//! +//! Before building the UDP packet, the [`ScrapeResponse`](aquatic_udp_protocol::response::ScrapeResponse) +//! struct will look like this: +//! +//! Field | Type | Example +//! -----------------|-------------------------------------------------------------------------------------------------|--------------- +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-1560718264` +//! `torrent_stats` | Vector of [`TorrentScrapeStatistics`](aquatic_udp_protocol::response::TorrentScrapeStatistics) | `[]` +//! +//! **Scrape specification** +//! +//! Original specification in [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html). +//! +//! ## Errors +//! +//! ### Error Response +//! +//! **Error response (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -------|-------------------|------------------|-------------------------------------------------------|-----------------------------|----------------------- +//! 0 | [`i32`](std::i32) | `action` | Action identifying the error response. | `0x00_00_00_03` | `3` +//! 4 | [`i32`](std::i32) | `transaction_id` | Must match the `transaction_id` sent from the client. | `0xCB_05_5E_07` | `-888840697` +//! 8 | N Bytes | `error_string` | Error description. | | +//! +//! ## Extensions +//! +//! Extensions described in [BEP 41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html) +//! are not supported yet. +//! +//! ## Links +//! +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html). +//! - [BEP 41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html). +//! - [libtorrent - Bittorrent UDP-tracker protocol extension](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). +//! - [XBTT Tracker. UDP tracker protocol](https://xbtt.sourceforge.net/udp_tracker_protocol.html). +//! - [Wikipedia: UDP tracker](https://en.wikipedia.org/wiki/UDP_tracker). +//! +//! ## Credits +//! +//! [Bittorrent UDP-tracker protocol extension](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html) +//! documentation by [Arvid Norberg](https://github.com/arvidn) was very +//! supportive in the development of this documentation. Some descriptions were +//! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). pub mod connection_cookie; pub mod error; pub mod handlers; @@ -5,9 +645,16 @@ pub mod peer_builder; pub mod request; pub mod server; +/// Number of bytes. pub type Bytes = u64; +/// The port the peer is listening on. pub type Port = u16; +/// The transaction id. A random number generated byt the peer that is used to +/// match requests and responses. pub type TransactionId = i64; +/// The maximum number of bytes in a UDP packet. pub const MAX_PACKET_SIZE: usize = 1496; +/// A magic 64-bit integer constant defined in the protocol that is used to +/// identify the protocol. pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 8d8852dc7..ac62a7ecd 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,9 +1,18 @@ +//! Logic to extract the peer info from the announce request. use std::net::{IpAddr, SocketAddr}; use super::request::AnnounceWrapper; use crate::shared::clock::{Current, Time}; use crate::tracker::peer::{Id, Peer}; +/// Extracts the [`Peer`](crate::tracker::peer::Peer) info from the +/// announce request. +/// +/// # Arguments +/// +/// * `announce_wrapper` - The announce request to extract the peer info from. +/// * `peer_ip` - The real IP address of the peer, not the one in the announce +/// request. #[must_use] pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> Peer { Peer { diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index 4be99e6d0..0afa02806 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -1,13 +1,24 @@ +//! UDP request types. +//! +//! Torrust Tracker uses the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) +//! crate to parse and serialize UDP requests. +//! +//! Some of the type in this module are wrappers around the types in the +//! `aquatic_udp_protocol` crate. use aquatic_udp_protocol::AnnounceRequest; use crate::shared::bit_torrent::info_hash::InfoHash; +/// Wrapper around [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest). pub struct AnnounceWrapper { + /// [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) to wrap. pub announce_request: AnnounceRequest, + /// Info hash of the torrent. pub info_hash: InfoHash, } impl AnnounceWrapper { + /// Creates a new [`AnnounceWrapper`] from an [`AnnounceRequest`]. #[must_use] pub fn new(announce_request: &AnnounceRequest) -> Self { AnnounceWrapper { diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 9eb9836fe..a4f1faae8 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -1,3 +1,22 @@ +//! Module to handle the UDP server instances. +//! +//! There are two main types in this module: +//! +//! - [`UdpServer`](crate::servers::udp::server::UdpServer): a controller to +//! start and stop the server. +//! - [`Udp`](crate::servers::udp::server::Udp): the server launcher. +//! +//! The `UdpServer` is an state machine for a given configuration. This struct +//! represents concrete configuration and state. It allows to start and +//! stop the server but always keeping the same configuration. +//! +//! The `Udp` is the server launcher. It's responsible for launching the UDP +//! but without keeping any state. +//! +//! For the time being, the `UdpServer` is only used for testing purposes, +//! because we want to be able to start and stop the server multiple times, and +//! we want to know the bound address and the current state of the server. +//! In production, the `Udp` launcher is used directly. use std::future::Future; use std::io::Cursor; use std::net::SocketAddr; @@ -14,36 +33,76 @@ use crate::servers::udp::handlers::handle_packet; use crate::servers::udp::MAX_PACKET_SIZE; use crate::tracker::Tracker; +/// Error that can occur when starting or stopping the UDP server. +/// +/// Some errors triggered while starting the server are: +/// +/// - The server cannot bind to the given address. +/// - It cannot get the bound address. +/// +/// Some errors triggered while stopping the server are: +/// +/// - The [`UdpServer`](crate::servers::udp::server::UdpServer) cannot send the +/// shutdown signal to the spawned UDP service thread. #[derive(Debug)] pub enum Error { - Error(String), + /// Any kind of error starting or stopping the server. + Error(String), // todo: refactor to use thiserror and add more variants for specific errors. } +/// A UDP server instance controller with no UDP instance running. #[allow(clippy::module_name_repetitions)] pub type StoppedUdpServer = UdpServer; + +/// A UDP server instance controller with a running UDP instance. #[allow(clippy::module_name_repetitions)] pub type RunningUdpServer = UdpServer; +/// A UDP server instance controller. +/// +/// It's responsible for: +/// +/// - Keeping the initial configuration of the server. +/// - Starting and stopping the server. +/// - Keeping the state of the server: `running` or `stopped`. +/// +/// It's an state machine. Configurations cannot be changed. This struct +/// represents concrete configuration and state. It allows to start and stop the +/// server but always keeping the same configuration. +/// +/// > **NOTICE**: if the configurations changes after running the server it will +/// reset to the initial value after stopping the server. This struct is not +/// intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] pub struct UdpServer { + /// The configuration of the server that will be used every time the server + /// is started. pub cfg: torrust_tracker_configuration::UdpTracker, + /// The state of the server: `running` or `stopped`. pub state: S, } +/// A stopped UDP server state. pub struct Stopped; +/// A running UDP server state. pub struct Running { + /// The address where the server is bound. pub bind_address: SocketAddr, stop_job_sender: tokio::sync::oneshot::Sender, job: JoinHandle<()>, } impl UdpServer { + /// Creates a new `UdpServer` instance in `stopped`state. #[must_use] pub fn new(cfg: torrust_tracker_configuration::UdpTracker) -> Self { Self { cfg, state: Stopped {} } } + /// It starts the server and returns a `UdpServer` controller in `running` + /// state. + /// /// # Errors /// /// Will return `Err` if UDP can't bind to given bind address. @@ -74,6 +133,9 @@ impl UdpServer { } impl UdpServer { + /// It stops the server and returns a `UdpServer` controller in `stopped` + /// state. + /// /// # Errors /// /// Will return `Err` if the oneshot channel to send the stop signal @@ -92,11 +154,14 @@ impl UdpServer { } } +/// A UDP server instance launcher. pub struct Udp { socket: Arc, } impl Udp { + /// Creates a new `Udp` instance. + /// /// # Errors /// /// Will return `Err` unable to bind to the supplied `bind_address`. @@ -108,6 +173,8 @@ impl Udp { }) } + /// It starts the UDP server instance. + /// /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. @@ -136,6 +203,8 @@ impl Udp { } } + /// It starts the UDP server instance with graceful shutdown. + /// /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index 1b1f204a2..955293db1 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -134,6 +134,6 @@ async fn assert_unhandled_rejection(response: Response, reason: &str) { let response_text = response.text().await.unwrap(); assert!( response_text.contains(&reason_text), - ":\n response: `\"{response_text}\"`\n dose not contain: `\"{reason_text}\"`." + ":\n response: `\"{response_text}\"`\n does not contain: `\"{reason_text}\"`." ); } From ae5f6286a3486e9186ea27a54bee69503a5421a7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Apr 2023 18:26:43 +0100 Subject: [PATCH 0506/1003] docs: [#275] crate docs for configuration package --- packages/configuration/src/lib.rs | 322 +++++++++++++++++++++++++++++- src/servers/apis/mod.rs | 4 +- src/servers/apis/server.rs | 2 +- src/tracker/mod.rs | 2 +- 4 files changed, 325 insertions(+), 5 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index d5beca236..6b051b572 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -1,3 +1,231 @@ +//! Configuration data structures for [Torrust Tracker](https://docs.rs/torrust-tracker). +//! +//! This module contains the configuration data structures for the +//! Torrust Tracker, which is a `BitTorrent` tracker server. +//! +//! The configuration is loaded from a [TOML](https://toml.io/en/) file +//! `config.toml` in the project root folder or from an environment variable +//! with the same content as the file. +//! +//! When you run the tracker without a configuration file, a new one will be +//! created with the default values, but the tracker immediately exits. You can +//! then edit the configuration file and run the tracker again. +//! +//! Configuration can not only be loaded from a file, but also from environment +//! variable `TORRUST_TRACKER_CONFIG`. This is useful when running the tracker +//! in a Docker container or environments where you do not have a persistent +//! storage or you cannot inject a configuration file. Refer to +//! [`Torrust Tracker documentation`](https://docs.rs/torrust-tracker) for more +//! information about how to pass configuration to the tracker. +//! +//! # Table of contents +//! +//! - [Sections](#sections) +//! - [Port binding](#port-binding) +//! - [TSL support](#tsl-support) +//! - [Generating self-signed certificates](#generating-self-signed-certificates) +//! - [Default configuration](#default-configuration) +//! +//! ## Sections +//! +//! Each section in the toml structure is mapped to a data structure. For +//! example, the `[http_api]` section (configuration for the tracker HTTP API) +//! is mapped to the [`HttpApi`](HttpApi) structure. +//! +//! > **NOTICE**: some sections are arrays of structures. For example, the +//! > `[[udp_trackers]]` section is an array of [`UdpTracker`](UdpTracker) since +//! > you can have multiple running UDP trackers bound to different ports. +//! +//! Please refer to the documentation of each structure for more information +//! about each section. +//! +//! - [`Core configuration`](crate::Configuration) +//! - [`HTTP API configuration`](crate::HttpApi) +//! - [`HTTP Tracker configuration`](crate::HttpTracker) +//! - [`UDP Tracker configuration`](crate::UdpTracker) +//! +//! ## Port binding +//! +//! For the API, HTTP and UDP trackers you can bind to a random port by using +//! port `0`. For example, if you want to bind to a random port on all +//! interfaces, use `0.0.0.0:0`. The OS will choose a random port but the +//! tracker will not print the port it is listening to when it starts. It just +//! says `Starting Torrust HTTP tracker server on: http://0.0.0.0:0`. It shows +//! the port used in the configuration file, and not the port the +//! tracker is actually listening to. This is a planned feature, see issue +//! [186](https://github.com/torrust/torrust-tracker/issues/186) for more +//! information. +//! +//! ## TSL support +//! +//! For the API and HTTP tracker you can enable TSL by setting `ssl_enabled` to +//! `true` and setting the paths to the certificate and key files. +//! +//! Typically, you will have a directory structure like this: +//! +//! ```text +//! storage/ +//! ├── database +//! │ └── data.db +//! └── ssl_certificates +//! ├── localhost.crt +//! └── localhost.key +//! ``` +//! +//! where you can store all the persistent data. +//! +//! Alternatively, you could setup a reverse proxy like Nginx or Apache to +//! handle the SSL/TLS part and forward the requests to the tracker. If you do +//! that, you should set [`on_reverse_proxy`](crate::Configuration::on_reverse_proxy) +//! to `true` in the configuration file. It's out of scope for this +//! documentation to explain in detail how to setup a reverse proxy, but the +//! configuration file should be something like this: +//! +//! For [NGINX](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/): +//! +//! ```text +//! # HTTPS only (with SSL - force redirect to HTTPS) +//! +//! server { +//! listen 80; +//! server_name tracker.torrust.com; +//! +//! return 301 https://$host$request_uri; +//! } +//! +//! server { +//! listen 443; +//! server_name tracker.torrust.com; +//! +//! ssl_certificate CERT_PATH +//! ssl_certificate_key CERT_KEY_PATH; +//! +//! location / { +//! proxy_set_header X-Forwarded-For $remote_addr; +//! proxy_pass http://127.0.0.1:6969; +//! } +//! } +//! ``` +//! +//! For [Apache](https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html): +//! +//! ```text +//! # HTTPS only (with SSL - force redirect to HTTPS) +//! +//! +//! ServerAdmin webmaster@tracker.torrust.com +//! ServerName tracker.torrust.com +//! +//! +//! RewriteEngine on +//! RewriteCond %{HTTPS} off +//! RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent] +//! +//! +//! +//! +//! +//! ServerAdmin webmaster@tracker.torrust.com +//! ServerName tracker.torrust.com +//! +//! +//! Order allow,deny +//! Allow from all +//! +//! +//! ProxyPreserveHost On +//! ProxyRequests Off +//! AllowEncodedSlashes NoDecode +//! +//! ProxyPass / http://localhost:3000/ +//! ProxyPassReverse / http://localhost:3000/ +//! ProxyPassReverse / http://tracker.torrust.com/ +//! +//! RequestHeader set X-Forwarded-Proto "https" +//! RequestHeader set X-Forwarded-Port "443" +//! +//! ErrorLog ${APACHE_LOG_DIR}/tracker.torrust.com-error.log +//! CustomLog ${APACHE_LOG_DIR}/tracker.torrust.com-access.log combined +//! +//! SSLCertificateFile CERT_PATH +//! SSLCertificateKeyFile CERT_KEY_PATH +//! +//! +//! ``` +//! +//! ## Generating self-signed certificates +//! +//! For testing purposes, you can use self-signed certificates. +//! +//! Refer to [Let's Encrypt - Certificates for localhost](https://letsencrypt.org/docs/certificates-for-localhost/) +//! for more information. +//! +//! Running the following command will generate a certificate (`localhost.crt`) +//! and key (`localhost.key`) file in your current directory: +//! +//! ```s +//! openssl req -x509 -out localhost.crt -keyout localhost.key \ +//! -newkey rsa:2048 -nodes -sha256 \ +//! -subj '/CN=localhost' -extensions EXT -config <( \ +//! printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") +//! ``` +//! +//! You can then use the generated files in the configuration file: +//! +//! ```s +//! [[http_trackers]] +//! enabled = true +//! ... +//! ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +//! ssl_key_path = "./storage/ssl_certificates/localhost.key" +//! +//! [http_api] +//! enabled = true +//! ... +//! ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +//! ssl_key_path = "./storage/ssl_certificates/localhost.key" +//! ``` +//! +//! ## Default configuration +//! +//! The default configuration is: +//! +//! ```toml +//! log_level = "info" +//! mode = "public" +//! db_driver = "Sqlite3" +//! db_path = "./storage/database/data.db" +//! announce_interval = 120 +//! min_announce_interval = 120 +//! max_peer_timeout = 900 +//! on_reverse_proxy = false +//! external_ip = "0.0.0.0" +//! tracker_usage_statistics = true +//! persistent_torrent_completed_stat = false +//! inactive_peer_cleanup_interval = 600 +//! remove_peerless_torrents = true +//! +//! [[udp_trackers]] +//! enabled = false +//! bind_address = "0.0.0.0:6969" +//! +//! [[http_trackers]] +//! enabled = false +//! bind_address = "0.0.0.0:7070" +//! ssl_enabled = false +//! ssl_cert_path = "" +//! ssl_key_path = "" +//! +//! [http_api] +//! enabled = true +//! bind_address = "127.0.0.1:1212" +//! ssl_enabled = false +//! ssl_cert_path = "" +//! ssl_key_path = "" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//!``` use std::collections::{HashMap, HashSet}; use std::net::IpAddr; use std::panic::Location; @@ -14,38 +242,67 @@ use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +/// Configuration for each UDP tracker. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct UdpTracker { + /// Weather the UDP tracker is enabled or not. pub enabled: bool, + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. pub bind_address: String, } +/// Configuration for each HTTP tracker. #[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpTracker { + /// Weather the HTTP tracker is enabled or not. pub enabled: bool, + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. pub bind_address: String, + /// Weather the HTTP tracker will use SSL or not. pub ssl_enabled: bool, + /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. #[serde_as(as = "NoneAsEmptyString")] pub ssl_cert_path: Option, + /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. #[serde_as(as = "NoneAsEmptyString")] pub ssl_key_path: Option, } +/// Configuration for the HTTP API. #[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpApi { + /// Weather the HTTP API is enabled or not. pub enabled: bool, + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. pub bind_address: String, + /// Weather the HTTP API will use SSL or not. pub ssl_enabled: bool, + /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. #[serde_as(as = "NoneAsEmptyString")] pub ssl_cert_path: Option, + /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. #[serde_as(as = "NoneAsEmptyString")] pub ssl_key_path: Option, + /// Access tokens for the HTTP API. The key is a label identifying the + /// token and the value is the token itself. The token is used to + /// authenticate the user. All tokens are valid for all endpoints and have + /// the all permissions. pub access_tokens: HashMap, } impl HttpApi { + /// Checks if the given token is one of the token in the configuration. #[must_use] pub fn contains_token(&self, token: &str) -> bool { let tokens: HashMap = self.access_tokens.clone(); @@ -54,14 +311,24 @@ impl HttpApi { } } +/// Core configuration for the tracker. #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { + /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, + /// `Debug` and `Trace`. Default is `Info`. pub log_level: Option, + /// Tracker mode. See [`TrackerMode`](torrust_tracker_primitives::TrackerMode) for more information. pub mode: TrackerMode, // Database configuration + /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. pub db_driver: DatabaseDriver, + /// Database connection string. The format depends on the database driver. + /// For `Sqlite3`, the format is `path/to/database.db`, for example: + /// `./storage/database/data.db`. + /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for + /// example: `root:password@localhost:3306/torrust`. pub db_path: String, /// Interval in seconds that the client should wait between sending regular @@ -88,35 +355,77 @@ pub struct Configuration { /// could lead to excessive load on the tracker or even getting banned by /// the tracker for not adhering to the rules. pub min_announce_interval: u32, + /// Weather the tracker is behind a reverse proxy or not. + /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header + /// sent from the proxy will be used to get the client's IP address. pub on_reverse_proxy: bool, + /// The external IP address of the tracker. If the client is using a + /// loopback IP address, this IP address will be used instead. If the peer + /// is using a loopback IP address, the tracker assumes that the peer is + /// in the same network as the tracker and will use the tracker's IP + /// address instead. pub external_ip: Option, + /// Weather the tracker should collect statistics about tracker usage. + /// If enabled, the tracker will collect statistics like the number of + /// connections handled, the number of announce requests handled, etc. + /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more + /// information about the collected metrics. pub tracker_usage_statistics: bool, + /// If enabled the tracker will persist the number of completed downloads. + /// That's how many times a torrent has been downloaded completely. pub persistent_torrent_completed_stat: bool, // Cleanup job configuration + /// Maximum time in seconds that a peer can be inactive before being + /// considered an inactive peer. If a peer is inactive for more than this + /// time, it will be removed from the torrent peer list. pub max_peer_timeout: u32, + /// Interval in seconds that the cleanup job will run to remove inactive + /// peers from the torrent peer list. pub inactive_peer_cleanup_interval: u64, + /// If enabled, the tracker will remove torrents that have no peers. + /// THe clean up torrent job runs every `inactive_peer_cleanup_interval` + /// seconds and it removes inactive peers. Eventually, the peer list of a + /// torrent could be empty and the torrent will be removed if this option is + /// enabled. pub remove_peerless_torrents: bool, // Server jobs configuration + /// The list of UDP trackers the tracker is running. Each UDP tracker + /// represents a UDP server that the tracker is running and it has its own + /// configuration. pub udp_trackers: Vec, + /// The list of HTTP trackers the tracker is running. Each HTTP tracker + /// represents a HTTP server that the tracker is running and it has its own + /// configuration. pub http_trackers: Vec, + /// The HTTP API configuration. pub http_api: HttpApi, } +/// Errors that can occur when loading the configuration. #[derive(Error, Debug)] pub enum Error { + /// Unable to load the configuration from the environment variable. + /// This error only occurs if there is no configuration file and the + /// `TORRUST_TRACKER_CONFIG` environment variable is not set. #[error("Unable to load from Environmental Variable: {source}")] UnableToLoadFromEnvironmentVariable { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + /// If you run the tracker without providing the configuration (via the + /// `TORRUST_TRACKER_CONFIG` environment variable or configuration file), + /// the tracker will create a default configuration file but it will not + /// load it. It will return this error instead and you have to restart the + /// it. #[error("Default configuration created at: `{path}`, please review and reload tracker, {location}")] CreatedNewConfigHalt { location: &'static Location<'static>, path: String, }, + /// Unable to load the configuration from the configuration file. #[error("Failed processing the configuration: {source}")] ConfigError { source: LocatedError<'static, ConfigError> }, } @@ -176,6 +485,8 @@ impl Default for Configuration { } impl Configuration { + /// Returns the tracker public IP address id defined in the configuration, + /// and `None` otherwise. #[must_use] pub fn get_ext_ip(&self) -> Option { match &self.external_ip { @@ -187,6 +498,8 @@ impl Configuration { } } + /// Loads the configuration from the configuration file. + /// /// # Errors /// /// Will return `Err` if `path` does not exist or has a bad configuration. @@ -214,6 +527,10 @@ impl Configuration { Ok(torrust_config) } + /// Loads the configuration from the environment variable. The whole + /// configuration must be in the environment variable. It contains the same + /// configuration as the configuration file with the same format. + /// /// # Errors /// /// Will return `Err` if the environment variable does not exist or has a bad configuration. @@ -232,10 +549,13 @@ impl Configuration { } } + /// Saves the configuration to the configuration file. + /// /// # Errors /// /// Will return `Err` if `filename` does not exist or the user does not have - /// permission to read it. + /// permission to read it. Will also return `Err` if the configuration is + /// not valid or cannot be encoded to TOML. pub fn save_to_file(&self, path: &str) -> Result<(), Error> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index 203f1d146..eb278bf3c 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -35,7 +35,7 @@ //! admin = "MyAccessToken" //! ``` //! -//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration>) +//! Refer to [`torrust-tracker-configuration`](torrust_tracker_configuration) //! for more information about the API configuration. //! //! When you run the tracker with enabled API, you will see the following message: @@ -99,7 +99,7 @@ //! The token label is used to identify the token. All tokens have full access //! to the API. //! -//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration>) +//! Refer to [`torrust-tracker-configuration`](torrust_tracker_configuration) //! for more information about the API configuration and to the //! [`auth`](crate::servers::apis::v1::middlewares::auth) middleware for more //! information about the authentication process. diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 76396cc51..91821aa79 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -140,7 +140,7 @@ impl Launcher { /// Starts the API server with graceful shutdown. /// /// If TLS is enabled in the configuration, it will start the server with - /// TLS. See [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration>) + /// TLS. See [`torrust-tracker-configuration`](torrust_tracker_configuration) /// for more information about configuration. pub fn start( cfg: &torrust_tracker_configuration::HttpApi, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 03853e1aa..2cabd5a82 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -468,7 +468,7 @@ use crate::tracker::databases::Database; /// Typically, the `Tracker` is used by a higher application service that handles /// the network layer. pub struct Tracker { - /// `Tracker` configuration. See + /// `Tracker` configuration. See [`torrust-tracker-configuration`](torrust_tracker_configuration) pub config: Arc, /// A database driver implementation: [`Sqlite3`](crate::tracker::databases::sqlite) /// or [`MySQL`](crate::tracker::databases::mysql) From a109168cd30a05648bec5d6be643e9525d76032f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Apr 2023 07:00:27 +0100 Subject: [PATCH 0507/1003] feat: dependabot workflow To open pull requests automatically to keep the dependencies up-to-date . --- .github/dependabot.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..8f36cb692 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,13 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: daily + target-branch: "develop" + + - package-ecosystem: cargo + directory: / + schedule: + interval: daily + target-branch: "develop" From 117dc0bbfe59ecac7ecf94751dd01d962c785c99 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Apr 2023 19:34:53 +0100 Subject: [PATCH 0508/1003] docs: [#275] crate docs for locate-error package --- packages/located-error/src/lib.rs | 39 ++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs index d45517e5a..67c432528 100644 --- a/packages/located-error/src/lib.rs +++ b/packages/located-error/src/lib.rs @@ -1,11 +1,44 @@ -// https://stackoverflow.com/questions/74336993/getting-line-numbers-with-when-using-boxdyn-stderrorerror - +//! This crate provides a wrapper around an error that includes the location of +//! the error. +//! +//! ```rust +//! use std::error::Error; +//! use std::panic::Location; +//! use std::sync::Arc; +//! use torrust_tracker_located_error::{Located, LocatedError}; +//! +//! #[derive(thiserror::Error, Debug)] +//! enum TestError { +//! #[error("Test")] +//! Test, +//! } +//! +//! #[track_caller] +//! fn get_caller_location() -> Location<'static> { +//! *Location::caller() +//! } +//! +//! let e = TestError::Test; +//! +//! let b: LocatedError = Located(e).into(); +//! let l = get_caller_location(); +//! +//! assert!(b.to_string().contains("Test, src/lib.rs")); +//! ``` +//! +//! # Credits +//! +//! use std::error::Error; use std::panic::Location; use std::sync::Arc; +/// A generic wrapper around an error. +/// +/// Where `E` is the inner error (source error). pub struct Located(pub E); +/// A wrapper around an error that includes the location of the error. #[derive(Debug)] pub struct LocatedError<'a, E> where @@ -78,7 +111,7 @@ mod tests { use std::panic::Location; use super::LocatedError; - use crate::located_error::Located; + use crate::Located; #[derive(thiserror::Error, Debug)] enum TestError { From cb6fc79e316528e2f1ffd807bdb3e46c255d78ee Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Apr 2023 12:10:05 +0100 Subject: [PATCH 0509/1003] fix: update dependencies --- Cargo.lock | 601 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 337 insertions(+), 264 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bc78bd67..d7053d618 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -81,13 +81,13 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -98,9 +98,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.10" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8582122b8edba2af43eaf6b80dbfd33f421b5a0eb3a3113d21bc096ac5b44faf" +checksum = "3b32c5ea3aabaf4deb5f5ced2d688ec0844c881c9e6c696a8b769a05fc691e62" dependencies = [ "async-trait", "axum-core", @@ -124,7 +124,6 @@ dependencies = [ "sync_wrapper", "tokio", "tower", - "tower-http", "tower-layer", "tower-service", ] @@ -142,9 +141,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2f958c80c248b34b9a877a643811be8dbca03ca5ba827f2b63baf3a81e5fc4e" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", @@ -159,9 +158,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e4a990e1593e286b1b96e6df76da9dbcb84945a810287ca8101f1a4f000f61" +checksum = "bace45b270e36e3c27a190c65883de6dfc9f1d18c829907c127464815dc67b24" dependencies = [ "arc-swap", "bytes", @@ -278,9 +277,9 @@ dependencies = [ [[package]] name = "borsh" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40f9ca3698b2e4cb7c15571db0abc5551dca417a21ae8140460b50309bb2cc62" +checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" dependencies = [ "borsh-derive", "hashbrown 0.13.2", @@ -288,37 +287,37 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598b3eacc6db9c3ee57b22707ad8f6a8d2f6d442bfe24ffeb8cbb70ca59e6a35" +checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", "proc-macro-crate", "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] name = "borsh-derive-internal" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186b734fa1c9f6743e90c95d7233c9faab6360d1a96d4ffa19d9cfd1e9350f8a" +checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "borsh-schema-derive-internal" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b7ff1008316626f485991b960ade129253d4034014616b94f309a15366cc49" +checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -352,7 +351,7 @@ checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -390,9 +389,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -406,9 +405,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -417,9 +416,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] @@ -471,15 +470,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" dependencies = [ "libc", ] @@ -509,9 +508,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -572,9 +571,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -584,9 +583,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -594,24 +593,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 2.0.14", ] [[package]] name = "cxxbridge-flags" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -635,7 +634,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn", + "syn 1.0.109", ] [[package]] @@ -646,7 +645,7 @@ checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -659,18 +658,18 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn", + "syn 1.0.109", ] [[package]] name = "derive_utils" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7590f99468735a318c254ca9158d0c065aa9b5312896b5a043b5e39bc96f5fa2" +checksum = "dff8f6a793f528719e1ad4425a52a213ac1214ac7158c5fb97a7f50a64bfc96d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -718,13 +717,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -769,9 +768,9 @@ dependencies = [ [[package]] name = "fern" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdd7b0849075e79ee9a1836df22c717d1eba30451796fdc631b04565dd11e2a" +checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" dependencies = [ "log", ] @@ -867,7 +866,7 @@ checksum = "b83164912bb4c97cfe0772913c7af7387ee2e00cb6d4636fb65a35b3d0c8f173" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -879,7 +878,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -903,7 +902,7 @@ dependencies = [ "frunk_proc_macro_helpers", "proc-macro-hack", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -914,9 +913,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -929,9 +928,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -939,15 +938,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -956,38 +955,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -1003,9 +1002,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1013,9 +1012,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", @@ -1089,6 +1088,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -1117,12 +1122,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" - [[package]] name = "httparse" version = "1.8.0" @@ -1137,9 +1136,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -1174,16 +1173,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] @@ -1214,9 +1213,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", @@ -1234,30 +1233,30 @@ dependencies = [ [[package]] name = "io-enum" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4b0d47a958cb166282b4dc4840a35783e861c2b39080af846e6481ebe145eee" +checksum = "01c662c349c9c9f542e7bfd9134143beb27da4b20dfbc3b3ef5b2a5b507dafbd" dependencies = [ "derive_utils", - "quote", - "syn", + "syn 2.0.14", ] [[package]] name = "io-lifetimes" -version = "1.0.6" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -1270,9 +1269,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" @@ -1381,9 +1380,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.140" +version = "0.2.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" +checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" [[package]] name = "libloading" @@ -1434,9 +1433,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" [[package]] name = "local-ip-address" @@ -1501,9 +1500,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minimal-lexical" @@ -1534,9 +1533,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ "cfg-if", "downcast", @@ -1549,14 +1548,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1726,7 +1725,7 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi", + "hermit-abi 0.2.6", "libc", ] @@ -1747,9 +1746,9 @@ checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "7e30d8bc91859781f0a943411186324d580f2bbeb71b452fe91ae344806af3f1" dependencies = [ "bitflags", "cfg-if", @@ -1762,13 +1761,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -1779,20 +1778,19 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.1+1.1.1t" +version = "111.25.2+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ef9a9cc6ea7d9d5e7c4a913dc4b48d0e359eddf01af1dfec96ba7064b4aba10" +checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "0d3d193fb1488ad46ffe3aaabc912cc931d02ee8518fe2959aea8ef52718b0c0" dependencies = [ - "autocfg", "cc", "libc", "openssl-src", @@ -1828,7 +1826,7 @@ checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "windows-sys 0.45.0", ] @@ -1862,9 +1860,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.6" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +checksum = "7b1403e8401ad5dedea73c626b99758535b342502f8d1e361f4a2dd952749122" dependencies = [ "thiserror", "ucd-trie", @@ -1872,9 +1870,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.6" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" +checksum = "be99c4c1d2fc2769b1d00239431d711d08f6efedcecb8b6e30707160aee99c15" dependencies = [ "pest", "pest_generator", @@ -1882,22 +1880,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.6" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" +checksum = "e56094789873daa36164de2e822b3888c6ae4b4f9da555a1103587658c805b1e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] name = "pest_meta" -version = "2.5.6" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +checksum = "6733073c7cff3d8459fda0e42f13a047870242aed8b509fe98000928975f359e" dependencies = [ "once_cell", "pest", @@ -1921,7 +1919,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1964,15 +1962,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" +checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" [[package]] name = "predicates-tree" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" +checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" dependencies = [ "predicates-core", "termtree", @@ -1995,9 +1993,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] @@ -2019,14 +2017,14 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -2107,11 +2105,20 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -2120,9 +2127,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "rend" @@ -2135,9 +2142,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" dependencies = [ "base64 0.21.0", "bytes", @@ -2187,9 +2194,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.40" +version = "0.7.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c30f1d45d9aa61cbc8cd1eb87705470892289bb2d01943e7803b873a57404dc3" +checksum = "21499ed91807f07ae081880aabb2ccc0235e9d88011867d984525e9a4c3cfa3e" dependencies = [ "bytecheck", "hashbrown 0.12.3", @@ -2201,13 +2208,13 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.40" +version = "0.7.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" +checksum = "ac1c672430eb41556291981f45ca900a0239ad007242d1cb4b4167af842db666" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2247,9 +2254,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.28.1" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13cf35f7140155d02ba4ec3294373d513a3c7baa8364c162b030e33c61520a8" +checksum = "26bd36b60561ee1fb5ec2817f198b6fd09fa571c897a5e86d1487cfc2b096dfc" dependencies = [ "arrayvec", "borsh", @@ -2265,9 +2272,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" [[package]] name = "rustc-hash" @@ -2286,16 +2293,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.9" +version = "0.37.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "85597d61f83914ddeba6a47b3b8ffe7365107221c2e557ed94426489fefb5f77" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2408,15 +2415,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "serde" -version = "1.0.154" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cdd151213925e7f1ab45a9bbfb129316bd00799784b174b7cc7bcd16961c49e" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] @@ -2442,20 +2449,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.154" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fc80d722935453bcafdc2c9a73cd6fac4dc1938f0346035d84bf99fa9e33217" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" dependencies = [ "itoa", "ryu", @@ -2464,22 +2471,22 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0969fff533976baadd92e08b1d102c5a3d8a8049eadfd69d4d1e3c5b2ed189" +checksum = "f7f05c1d5476066defcdfacce1f52fc3cae3af1d3089727100c02ae92e5abbe0" dependencies = [ "serde", ] [[package]] name = "serde_repr" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395627de918015623b32e7669714206363a7fc00382bf477e72c1f7533e8eafc" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -2505,9 +2512,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.0" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea48c9627169d206b35905699f513f513c303ab9d964a59b44fdcf66c1d1ab7" +checksum = "331bb8c3bf9b92457ab7abecf07078c13f7d270ba490103e84e8b014490cd0b0" dependencies = [ "base64 0.13.1", "chrono", @@ -2521,14 +2528,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.0" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6b7e52858f9f06c25e1c566bbb4ab428200cb3b30053ea09dc50837de7538b" +checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2638,6 +2645,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf316d5356ed6847742d036f8a39c3b8435cac10bd528a4bd461928a6ab34d5" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -2652,15 +2670,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand", - "redox_syscall", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -2674,28 +2692,28 @@ dependencies = [ [[package]] name = "termtree" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -2753,14 +2771,13 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.26.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg", "bytes", "libc", - "memchr", "mio", "num_cpus", "pin-project-lite", @@ -2772,13 +2789,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -2827,9 +2844,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" +checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" dependencies = [ "serde", "serde_spanned", @@ -2848,9 +2865,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.4" +version = "0.19.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" +checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" dependencies = [ "indexmap", "serde", @@ -2898,7 +2915,7 @@ dependencies = [ "serde_with", "thiserror", "tokio", - "toml 0.7.2", + "toml 0.7.3", "torrust-tracker-configuration", "torrust-tracker-located-error", "torrust-tracker-primitives", @@ -2964,25 +2981,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" -dependencies = [ - "bitflags", - "bytes", - "futures-core", - "futures-util", - "http", - "http-body", - "http-range-header", - "pin-project-lite", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.2" @@ -3047,9 +3045,9 @@ checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "unicode-bidi" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" @@ -3091,9 +3089,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "5b55a3fef2a1e3b3a00ce878640918820d3c51081576ac657d23af9fc7928fdb" dependencies = [ "getrandom", ] @@ -3153,7 +3151,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -3187,7 +3185,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3249,19 +3247,28 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.0", +] + [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -3270,71 +3277,137 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", ] [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28" dependencies = [ "memchr", ] From 776d7d9801f4b57d4d0bac25f186134695594ca1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Apr 2023 12:14:42 +0100 Subject: [PATCH 0510/1003] Create SECURITY.md --- SECURITY.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..b36d27978 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,27 @@ +# Security Policy + +Thanks for helping make Torrust Tracker safe for everyone. + +## Security + +[Torrust](https://github.com/torrust) takes the security of our software products and services seriously. + +## Reporting Security Issues + +If you believe you have found a security vulnerability in any of our repositories, please report it to us through coordinated disclosure. + +**Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** + +Instead, please send an email to info[@]nautilus-cyberneering.de. + +Please include as much of the information listed below as you can to help us better understand and resolve the issue: + +- The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting) +- Full paths of source file(s) related to the manifestation of the issue +- The location of the affected source code (tag/branch/commit or direct URL) +- Any special configuration required to reproduce the issue +- Step-by-step instructions to reproduce the issue +- Proof-of-concept or exploit code (if possible) +- Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. From f78638aa93384ba6f1674574001e210e2874289e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Apr 2023 13:20:07 +0100 Subject: [PATCH 0511/1003] docs: [#279] crate docs for locate-error package --- packages/primitives/src/lib.rs | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index bcd48145f..e6f8cb93b 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -1,27 +1,47 @@ +//! Primitive types for [Torrust Tracker](https://docs.rs/torrust-tracker). +//! +//! This module contains the basic data structures for the [Torrust Tracker](https://docs.rs/torrust-tracker), +//! which is a `BitTorrent` tracker server. These structures are used not only +//! by the tracker server crate, but also by other crates in the Torrust +//! ecosystem. use serde::{Deserialize, Serialize}; -// TODO: Move to the database crate once that gets its own crate. +/// The database management system used by the tracker. +/// +/// Refer to: +/// +/// - [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration). +/// - [Torrust Tracker](https://docs.rs/torrust-tracker). +/// +/// For more information about persistence. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] pub enum DatabaseDriver { + // TODO: Move to the database crate once that gets its own crate. + /// The Sqlite3 database driver. Sqlite3, + /// The MySQL database driver. MySQL, } +/// The mode the tracker will run in. +/// +/// Refer to [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration) +/// to know how to configure the tracker to run in each mode. #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum TrackerMode { - // Will track every new info hash and serve every peer. + /// Will track every new info hash and serve every peer. #[serde(rename = "public")] Public, - // Will only track whitelisted info hashes. + /// Will only track whitelisted info hashes. #[serde(rename = "listed")] Listed, - // Will only serve authenticated peers + /// Will only serve authenticated peers #[serde(rename = "private")] Private, - // Will only track whitelisted info hashes and serve authenticated peers + /// Will only track whitelisted info hashes and serve authenticated peers #[serde(rename = "private_listed")] PrivateListed, } From 9e6e6085725e34762d176c08da07cff05cd58a60 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Apr 2023 13:47:08 +0100 Subject: [PATCH 0512/1003] docs: [#280] crate docs for test-helpers package --- packages/test-helpers/src/configuration.rs | 23 ++++++++++++++++++++-- packages/test-helpers/src/lib.rs | 3 +++ packages/test-helpers/src/random.rs | 3 +++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 0b7a269ff..437475ee2 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -1,3 +1,4 @@ +//! Tracker configuration factories for testing. use std::env; use std::net::IpAddr; @@ -6,8 +7,17 @@ use torrust_tracker_primitives::TrackerMode; use crate::random; -/// This configuration is used for testing. It generates random config values so they do not collide -/// if you run more than one tracker at the same time. +/// This configuration is used for testing. It generates random config values +/// so they do not collide if you run more than one tracker at the same time. +/// +/// > **NOTICE**: This configuration is not meant to be used in production. +/// +/// > **NOTICE**: Port 0 is used for ephemeral ports, which means that the OS +/// will assign a random free port for the tracker to use. +/// +/// > **NOTICE**: You can change the log level to `debug` to see the logs of the +/// tracker while running the tests. That can be particularly useful when +/// debugging tests. /// /// # Panics /// @@ -46,6 +56,7 @@ pub fn ephemeral() -> Configuration { config } +/// Ephemeral configuration with reverse proxy enabled. #[must_use] pub fn ephemeral_with_reverse_proxy() -> Configuration { let mut cfg = ephemeral(); @@ -55,6 +66,7 @@ pub fn ephemeral_with_reverse_proxy() -> Configuration { cfg } +/// Ephemeral configuration with reverse proxy disabled. #[must_use] pub fn ephemeral_without_reverse_proxy() -> Configuration { let mut cfg = ephemeral(); @@ -64,6 +76,7 @@ pub fn ephemeral_without_reverse_proxy() -> Configuration { cfg } +/// Ephemeral configuration with `public` mode. #[must_use] pub fn ephemeral_mode_public() -> Configuration { let mut cfg = ephemeral(); @@ -73,6 +86,7 @@ pub fn ephemeral_mode_public() -> Configuration { cfg } +/// Ephemeral configuration with `private` mode. #[must_use] pub fn ephemeral_mode_private() -> Configuration { let mut cfg = ephemeral(); @@ -82,6 +96,7 @@ pub fn ephemeral_mode_private() -> Configuration { cfg } +/// Ephemeral configuration with `listed` mode. #[must_use] pub fn ephemeral_mode_whitelisted() -> Configuration { let mut cfg = ephemeral(); @@ -91,6 +106,7 @@ pub fn ephemeral_mode_whitelisted() -> Configuration { cfg } +/// Ephemeral configuration with `private_listed` mode. #[must_use] pub fn ephemeral_mode_private_whitelisted() -> Configuration { let mut cfg = ephemeral(); @@ -100,6 +116,7 @@ pub fn ephemeral_mode_private_whitelisted() -> Configuration { cfg } +/// Ephemeral configuration with a custom external (public) IP for the tracker. #[must_use] pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { let mut cfg = ephemeral(); @@ -109,6 +126,8 @@ pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { cfg } +/// Ephemeral configuration using a wildcard IPv6 for the UDP, HTTP and API +/// services. #[must_use] pub fn ephemeral_ipv6() -> Configuration { let mut cfg = ephemeral(); diff --git a/packages/test-helpers/src/lib.rs b/packages/test-helpers/src/lib.rs index e0f350131..e66ea2adc 100644 --- a/packages/test-helpers/src/lib.rs +++ b/packages/test-helpers/src/lib.rs @@ -1,2 +1,5 @@ +//! Testing helpers for [Torrust Tracker](https://docs.rs/torrust-tracker). +//! +//! A collection of functions and types to help with testing the tracker server. pub mod configuration; pub mod random; diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs index ffb2ccd6f..2133dcd29 100644 --- a/packages/test-helpers/src/random.rs +++ b/packages/test-helpers/src/random.rs @@ -1,7 +1,10 @@ +//! Random data generators for testing. use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; /// Returns a random alphanumeric string of a certain size. +/// +/// It is useful for generating random names, IDs, etc for testing. pub fn string(size: usize) -> String { thread_rng().sample_iter(&Alphanumeric).take(size).map(char::from).collect() } From f555e6888082386068bfd5f8eeb8d214f1105bf7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Apr 2023 16:10:05 +0100 Subject: [PATCH 0513/1003] docs: update README --- README.md | 154 ++++++++++++++++++++++------------------------------ cSpell.json | 2 + 2 files changed, 68 insertions(+), 88 deletions(-) diff --git a/README.md b/README.md index 4e464dd68..5d74ef130 100644 --- a/README.md +++ b/README.md @@ -1,102 +1,80 @@ # Torrust Tracker -![Test](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml/badge.svg) - -## Project Description -Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTorrent tracker made using Rust. - - -### Features -* [X] Multiple UDP server and HTTP(S) server blocks for socket binding possible -* [X] Full IPv4 and IPv6 support for both UDP and HTTP(S) -* [X] Private & Whitelisted mode -* [X] Built-in API -* [X] Torrent whitelisting -* [X] Peer authentication using time-bound keys -* [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled -* [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count -* [X] MySQL support added as engine option -* [X] Periodically saving added, interval can be configured - -### Implemented BEPs -* [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol -* [BEP 7](https://www.bittorrent.org/beps/bep_0007.html): IPv6 Support -* [BEP 15](http://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for BitTorrent -* [BEP 23](http://bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists -* [BEP 27](http://bittorrent.org/beps/bep_0027.html): Private Torrents -* [BEP 41](http://bittorrent.org/beps/bep_0041.html): UDP Tracker Protocol Extensions -* [BEP 48](http://bittorrent.org/beps/bep_0048.html): Tracker Protocol Extension: Scrape + +[![Build & Release](https://github.com/torrust/torrust-tracker/actions/workflows/build_release.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/build_release.yml) [![CI](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml) [![Publish crate](https://github.com/torrust/torrust-tracker/actions/workflows/publish_crate.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/publish_crate.yml) [![Publish docker image](https://github.com/torrust/torrust-tracker/actions/workflows/publish_docker_image.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/publish_docker_image.yml) [![Test](https://github.com/torrust/torrust-tracker/actions/workflows/test.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test.yml) [![Test docker build](https://github.com/torrust/torrust-tracker/actions/workflows/test_docker.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test_docker.yml) [![Upload code coverage](https://github.com/torrust/torrust-tracker/actions/workflows/codecov.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/codecov.yml) + +Torrust Tracker is a lightweight but incredibly high-performance and feature-rich BitTorrent tracker written in [Rust](https://www.rust-lang.org/). + +It aims to provide a reliable and efficient solution for serving torrents to a vast number of peers while maintaining a high level of performance, robustness, extensibility, security, usability and with community-driven development. + +## Key Features + +* [X] Multiple UDP server and HTTP(S) server blocks for socket binding are possible. +* [X] Full IPv4 and IPv6 support for both UDP and HTTP(S). +* [X] Private & Whitelisted mode. +* [X] Built-in API. +* [X] Torrent whitelisting. +* [X] Peer authentication using time-bound keys. +* [X] [newTrackon](https://newtrackon.com/) check is supported for both HTTP and UDP, where IPv4 and IPv6 are properly handled. +* [X] SQLite3 and MySQL persistence, loading and saving of the torrent hashes and downloads completed count. +* [X] Comprehensive documentation. +* [X] A complete suite of tests. See [code coverage](https://app.codecov.io/gh/torrust/torrust-tracker) report. + +## Implemented BEPs + +* [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol. +* [BEP 7](https://www.bittorrent.org/beps/bep_0007.html): IPv6 Support. +* [BEP 15](http://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for BitTorrent. +* [BEP 23](http://bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists. +* [BEP 27](http://bittorrent.org/beps/bep_0027.html): Private Torrents. +* [BEP 48](http://bittorrent.org/beps/bep_0048.html): Tracker Protocol Extension: Scrape. ## Getting Started -You can get the latest binaries from [releases](https://github.com/torrust/torrust-tracker/releases) or follow the install from scratch instructions below. -### Install From Scratch -1. Clone the repo. -```bash -git clone https://github.com/torrust/torrust-tracker.git -cd torrust-tracker -``` +Requirements: -2. Build the source code. -```bash -cargo build --release -``` +* Rust Stable `1.68` +* You might have problems compiling with a machine or docker container with low resources. It has been tested with docker containers with 6 CPUs, 7.5 GM of memory and 2GB of swap. + +You can follow the [documentation](https://docs.rs/torrust-tracker/) to install and use Torrust Tracker in different ways, but if you want to give it a quick try, you can use the following commands: -### Usage -* Run the torrust-tracker once to create the `config.toml` file: -```bash -./target/release/torrust-tracker +```s +git clone https://github.com/torrust/torrust-tracker.git \ + && cd torrust-tracker \ + && cargo build --release \ + && mkdir -p ./storage/database \ + && mkdir -p ./storage/ssl_certificates ``` +And then run `cargo run` twice. The first time to generate the `config.toml` file and the second time to run the tracker with the default configuration. -* Edit the newly created config.toml file according to your liking, see [configuration documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/config/). Eg: -```toml -log_level = "info" -mode = "public" -db_driver = "Sqlite3" -db_path = "data.db" -announce_interval = 120 -min_announce_interval = 120 -max_peer_timeout = 900 -on_reverse_proxy = false -external_ip = "0.0.0.0" -tracker_usage_statistics = true -persistent_torrent_completed_stat = false -inactive_peer_cleanup_interval = 600 -remove_peerless_torrents = true - -[[udp_trackers]] -enabled = false -bind_address = "0.0.0.0:6969" - -[[http_trackers]] -enabled = true -bind_address = "0.0.0.0:7070" -ssl_enabled = false -ssl_cert_path = "" -ssl_key_path = "" - -[http_api] -enabled = true -bind_address = "127.0.0.1:1212" - -[http_api.access_tokens] -admin = "MyAccessToken" -``` +After running the tracker these services will be available: +* UDP tracker: `udp://127.0.0.1:6969/announce`. +* HTTP tracker: `http://127.0.0.1:6969/announce`. +* API: `http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken`. -* Run the torrust-tracker again: -```bash -./target/release/torrust-tracker -``` +## Documentation + +* [Crate documentation](https://docs.rs/torrust-tracker/). +* [API documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/api/). + +## Contributing + +We welcome contributions from the community! + +How can you contribute? + +* Bug reports and feature requests. +* Code contributions. You can start by looking at the issues labeled ["good first issues"](https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22). +* Documentation improvements. Check the [documentation](https://docs.rs/torrust-tracker/) and [API documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/api/) for typos, errors, or missing information. +* Participation in the community. You can help by answering questions in the [discussions](https://github.com/torrust/torrust-tracker/discussions). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). -### Tracker URL -Your tracker announce URL will be **udp://{tracker-ip:port}** and/or **http://{tracker-ip:port}/announce** and/or **https://{tracker-ip:port}/announce** depending on your bindings. -In private & private_listed mode, tracker keys are added after the tracker URL like: **https://{tracker-ip:port}/announce/{key}**. +There is an ongoing discussion about the license of the project. You can follow the discussion [here](https://github.com/torrust/torrust-tracker/pull/251). -### Built-in API -Read the API documentation [here](https://torrust.github.io/torrust-documentation/torrust-tracker/api/). +## Acknowledgments -### Credits -This project was a joint effort by [Nautilus Cyberneering GmbH](https://nautilus-cyberneering.de/) and [Dutch Bits](https://dutchbits.nl). -Also thanks to [Naim A.](https://github.com/naim94a/udpt) and [greatest-ape](https://github.com/greatest-ape/aquatic) for some parts of the code. -Further added features and functions thanks to [Power2All](https://github.com/power2all). +This project was a joint effort by [Nautilus Cyberneering GmbH](https://nautilus-cyberneering.de/) and [Dutch Bits](https://dutchbits.nl). Also thanks to [Naim A.](https://github.com/naim94a/udpt) and [greatest-ape](https://github.com/greatest-ape/aquatic) for some parts of the code. Further added features and functions thanks to [Power2All](https://github.com/power2all). diff --git a/cSpell.json b/cSpell.json index f07e2bfb4..2fa80b58a 100644 --- a/cSpell.json +++ b/cSpell.json @@ -46,6 +46,7 @@ "mockall", "multimap", "myacicontext", + "Naim", "nanos", "nextest", "nocapture", @@ -79,6 +80,7 @@ "torrust", "torrustracker", "trackerid", + "Trackon", "typenum", "Unamed", "untuple", From 09da301d1612af8fff27d629fa59797bf182f937 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 08:24:29 +0100 Subject: [PATCH 0514/1003] docs: [#295] add README to packages --- README.md | 6 +- packages/configuration/LICENSE | 661 +++++++++++++++++++++++++++++++ packages/configuration/README.md | 11 + packages/located-error/LICENSE | 661 +++++++++++++++++++++++++++++++ packages/located-error/README.md | 11 + packages/primitives/LICENSE | 661 +++++++++++++++++++++++++++++++ packages/primitives/README.md | 11 + packages/test-helpers/LICENSE | 661 +++++++++++++++++++++++++++++++ packages/test-helpers/README.md | 11 + 9 files changed, 2692 insertions(+), 2 deletions(-) create mode 100644 packages/configuration/LICENSE create mode 100644 packages/configuration/README.md create mode 100644 packages/located-error/LICENSE create mode 100644 packages/located-error/README.md create mode 100644 packages/primitives/LICENSE create mode 100644 packages/primitives/README.md create mode 100644 packages/test-helpers/LICENSE create mode 100644 packages/test-helpers/README.md diff --git a/README.md b/README.md index 5d74ef130..666868a87 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,9 @@ After running the tracker these services will be available: ## Documentation * [Crate documentation](https://docs.rs/torrust-tracker/). -* [API documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/api/). +* [API `v1`](https://docs.rs/torrust-tracker/3.0.0-alpha.1/torrust_tracker/servers/apis/v1). +* [HTTP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.1/torrust_tracker/servers/http). +* [UDP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.1/torrust_tracker/servers/udp). ## Contributing @@ -66,7 +68,7 @@ How can you contribute? * Bug reports and feature requests. * Code contributions. You can start by looking at the issues labeled ["good first issues"](https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22). -* Documentation improvements. Check the [documentation](https://docs.rs/torrust-tracker/) and [API documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/api/) for typos, errors, or missing information. +* Documentation improvements. Check the [documentation](https://docs.rs/torrust-tracker/) and [API documentation](https://docs.rs/torrust-tracker/3.0.0-alpha.1/torrust_tracker/servers/apis/v1) for typos, errors, or missing information. * Participation in the community. You can help by answering questions in the [discussions](https://github.com/torrust/torrust-tracker/discussions). ## License diff --git a/packages/configuration/LICENSE b/packages/configuration/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/configuration/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/configuration/README.md b/packages/configuration/README.md new file mode 100644 index 000000000..ccae51d70 --- /dev/null +++ b/packages/configuration/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Configuration + +A library to provide configuration to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-configuration). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/located-error/LICENSE b/packages/located-error/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/located-error/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/located-error/README.md b/packages/located-error/README.md new file mode 100644 index 000000000..c3c18fa49 --- /dev/null +++ b/packages/located-error/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Located Error + +A library to provide an error decorator with the location and the source of the original error. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-located-error). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/primitives/LICENSE b/packages/primitives/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/primitives/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/primitives/README.md b/packages/primitives/README.md new file mode 100644 index 000000000..791955859 --- /dev/null +++ b/packages/primitives/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Primitives + +A library with the primitive types shared by the [Torrust Tracker](https://github.com/torrust/torrust-tracker) packages. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-primitives). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/test-helpers/LICENSE b/packages/test-helpers/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/test-helpers/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/test-helpers/README.md b/packages/test-helpers/README.md new file mode 100644 index 000000000..7389dce11 --- /dev/null +++ b/packages/test-helpers/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Configuration + +A library providing helpers for testing the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-test-helpers). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). From aec3a4ce4967082a79036e9b147c82091d89b925 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 09:04:32 +0100 Subject: [PATCH 0515/1003] chore(api)!: remove API endpoint without version prefix BREAKING CHANGE: API endpoints without version prefixes are no longer available. For example, `/api/stats` use `/api/v1/stats` instead. --- src/servers/apis/v1/routes.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 7b792f8a8..74778ca14 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -7,21 +7,7 @@ use super::context::{auth_key, stats, torrent, whitelist}; use crate::tracker::Tracker; /// Add the routes for the v1 API. -/// -/// > **NOTICE**: the old API endpoints without `v1` prefix are kept for -/// backward compatibility. For example, the `GET /api/stats` endpoint is -/// still available, but it is deprecated and will be removed in the future. -/// You should use the `GET /api/v1/stats` endpoint instead. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { - // Without `v1` prefix. - // We keep the old API endpoints without `v1` prefix for backward compatibility. - // todo: remove when the torrust index backend is using the `v1` prefix. - let router = auth_key::routes::add(prefix, router, tracker.clone()); - let router = stats::routes::add(prefix, router, tracker.clone()); - let router = whitelist::routes::add(prefix, router, tracker.clone()); - let router = torrent::routes::add(prefix, router, tracker.clone()); - - // With `v1` prefix let v1_prefix = format!("{prefix}/v1"); let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); let router = stats::routes::add(&v1_prefix, router, tracker.clone()); From feaa1737b29336fc8348cd5cc238d53c69d98ec6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 10:27:33 +0100 Subject: [PATCH 0516/1003] fix: test for configuration Fields were reorganized and package test were not being executed on the CI. --- packages/configuration/src/lib.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 6b051b572..e48355757 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -557,10 +557,14 @@ impl Configuration { /// permission to read it. Will also return `Err` if the configuration is /// not valid or cannot be encoded to TOML. pub fn save_to_file(&self, path: &str) -> Result<(), Error> { - let toml_string = toml::to_string(self).expect("Could not encode TOML value"); - fs::write(path, toml_string).expect("Could not write to file!"); + fs::write(path, self.to_toml()).expect("Could not write to file!"); Ok(()) } + + /// Encodes the configuration to TOML. + fn to_toml(&self) -> String { + toml::to_string(self).expect("Could not encode TOML value") + } } #[cfg(test)] @@ -575,11 +579,11 @@ mod tests { db_path = "./storage/database/data.db" announce_interval = 120 min_announce_interval = 120 - max_peer_timeout = 900 on_reverse_proxy = false external_ip = "0.0.0.0" tracker_usage_statistics = true persistent_torrent_completed_stat = false + max_peer_timeout = 900 inactive_peer_cleanup_interval = 600 remove_peerless_torrents = true From a933a6929c9ebb67bf1abd7372840be1216a2267 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 10:28:45 +0100 Subject: [PATCH 0517/1003] ci: run tests for workspace packages Test for packages were not being executed. We need to add the `--workpspace` option: ``` cargo test --workspace ``` --- .github/workflows/test_build_release.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index d8c25bd56..f1bc295ce 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -36,9 +36,11 @@ jobs: run: cargo clippy --all-targets -- -D clippy::pedantic - name: Test Documentation run: cargo test --doc + - name: Run Tests + run: cargo test --workspace - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - - name: Run Tests + - name: Show coverage run: cargo llvm-cov nextest build: From 823537e9c2e8d004de72fae208603e19f95a4ab8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 10:45:40 +0100 Subject: [PATCH 0518/1003] fix: [#234] remove unused dependencies And move to `[dev-dependencies]` the ones that are only used for testing. --- Cargo.lock | 61 ++----------------------------- Cargo.toml | 2 - packages/configuration/Cargo.toml | 2 + packages/located-error/Cargo.toml | 3 ++ packages/test-helpers/Cargo.toml | 1 - 5 files changed, 8 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7053d618..ae256a5a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -448,7 +448,7 @@ dependencies = [ "rust-ini", "serde", "serde_json", - "toml 0.5.11", + "toml", "yaml-rust", ] @@ -1982,7 +1982,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "toml 0.5.11", + "toml", ] [[package]] @@ -2489,15 +2489,6 @@ dependencies = [ "syn 2.0.14", ] -[[package]] -name = "serde_spanned" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" -dependencies = [ - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2842,40 +2833,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - -[[package]] -name = "toml_datetime" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.19.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "winnow", -] - [[package]] name = "torrust-tracker" version = "3.0.0-alpha.1" @@ -2892,7 +2849,6 @@ dependencies = [ "derive_more", "fern", "futures", - "hex", "hyper", "lazy_static", "local-ip-address", @@ -2915,7 +2871,6 @@ dependencies = [ "serde_with", "thiserror", "tokio", - "toml 0.7.3", "torrust-tracker-configuration", "torrust-tracker-located-error", "torrust-tracker-primitives", @@ -2932,7 +2887,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml 0.5.11", + "toml", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -2960,7 +2915,6 @@ version = "3.0.0-alpha.1" dependencies = [ "lazy_static", "rand", - "tokio", "torrust-tracker-configuration", "torrust-tracker-primitives", ] @@ -3403,15 +3357,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" -[[package]] -name = "winnow" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28" -dependencies = [ - "memchr", -] - [[package]] name = "winreg" version = "0.10.1" diff --git a/Cargo.toml b/Cargo.toml index 4b6bcb323..13dd8d572 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,13 +18,11 @@ serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2" serde_json = "1.0" serde_with = "2.0" -hex = "0.4" percent-encoding = "2.2" binascii = "0.1" lazy_static = "1.4" openssl = { version = "0.10", features = ["vendored"] } config = "0.13" -toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index aade6272d..36fb6d665 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -15,4 +15,6 @@ log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives" } torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "../located-error" } + +[dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index f67ef340f..acd13def3 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -8,4 +8,7 @@ edition.workspace = true [dependencies] log = { version = "0.4", features = ["release_max_level_info"] } + +[dev-dependencies] thiserror = "1.0" + diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 4483f8f4d..2f876470b 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -7,7 +7,6 @@ authors.workspace = true edition.workspace = true [dependencies] -tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } lazy_static = "1.4" rand = "0.8.5" torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "../configuration"} From 09e97a1e9a7184c3bf2f10fde1ed47ec9861b6c4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 11:30:33 +0100 Subject: [PATCH 0519/1003] feat: [#267] remove always restart configutation for MySQL from docker compose Since the configuration is intended for development, it does not make sense. --- compose.yaml | 1 - docker/README.md | 17 ++++++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/compose.yaml b/compose.yaml index d11f9c8ae..49f3055a8 100644 --- a/compose.yaml +++ b/compose.yaml @@ -22,7 +22,6 @@ services: mysql: image: mysql:8.0 command: '--default-authentication-plugin=mysql_native_password' - restart: always healthcheck: test: ['CMD-SHELL', 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent'] interval: 3s diff --git a/docker/README.md b/docker/README.md index e5b4dfe74..e0fee61e7 100644 --- a/docker/README.md +++ b/docker/README.md @@ -19,6 +19,17 @@ storage/ ## Dev environment +When using docker you have to bind the exposed ports to the wildcard address `0.0.0.0`, so you can access the application from the host machine. + +The default API configuration uses `127.0.0.1`, so you have to change it to: + +```toml +[http_api] +bind_address = "0.0.0.0:1212" +``` + +Otherwise the API will be only accessible from inside the container. + ### With docker Build and run locally: @@ -78,7 +89,7 @@ CONTAINER ID IMAGE COMMAND CREATED STATU And you should be able to use the application, for example making a request to the API: - + You can stop the containers with: @@ -169,7 +180,7 @@ ssl_key_path = "./storage/ssl_certificates/localhost.key" If you enable the SSL certificate for the API, for example, you can load the API with this URL: - + ## Prod environment @@ -232,7 +243,7 @@ CONTAINER ID IMAGE intelligent-hawking registry.hub.docker.com/torrust/tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp ``` -After a while, you can use the tracker API `http://4.236.213.57:1212/api/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. +After a while, you can use the tracker API `http://4.236.213.57:1212/api/v1/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. > NOTES: > From 77a5e233d3cc7c70afb7bb1114baa9b413fa252f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 13:35:26 +0100 Subject: [PATCH 0520/1003] chore(deps): [#297] update workflow actions --- .github/workflows/publish_docker_image.yml | 2 +- .github/workflows/test_build_release.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index 20152a727..1dd65e3a7 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -73,7 +73,7 @@ jobs: uses: docker/setup-buildx-action@v2 - name: Build and push - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 with: context: . file: ./Dockerfile diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index f1bc295ce..88234b97a 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -61,7 +61,7 @@ jobs: - name: Build Torrust Tracker run: cargo build --release - name: Upload Build Artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: torrust-tracker path: ./target/release/torrust-tracker @@ -71,7 +71,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download Build Artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: torrust-tracker - name: Release From dfcc4b36356fc4dce87b74e35d8eabf3f1e5a741 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 13:50:23 +0100 Subject: [PATCH 0521/1003] chore(deps): [#297] update cargo dependencies --- Cargo.lock | 70 +++++++++++++++++++++++++++---- Cargo.toml | 4 +- packages/configuration/Cargo.toml | 2 +- 3 files changed, 64 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae256a5a9..995d051dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -59,9 +59,9 @@ dependencies = [ [[package]] name = "aquatic_udp_protocol" -version = "0.2.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16149f27924d42b337a637cd90a8ee2a8973bbccf32aabebce2b3c66913f947f" +checksum = "b2919b480121f7d20d247524da62bad1b6b7928bc3f50898f624b5c592727341" dependencies = [ "byteorder", "either", @@ -448,7 +448,7 @@ dependencies = [ "rust-ini", "serde", "serde_json", - "toml", + "toml 0.5.11", "yaml-rust", ] @@ -1560,9 +1560,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.8.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +checksum = "70db9248a93dc36a36d9a47898caa007a32755c7ad140ec64eeeb50d5a730631" dependencies = [ "serde", ] @@ -1982,7 +1982,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "toml", + "toml 0.5.11", ] [[package]] @@ -2460,9 +2460,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -2489,6 +2489,15 @@ dependencies = [ "syn 2.0.14", ] +[[package]] +name = "serde_spanned" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2833,6 +2842,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "torrust-tracker" version = "3.0.0-alpha.1" @@ -2887,7 +2930,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml", + "toml 0.7.3", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -3357,6 +3400,15 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "winnow" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.10.1" diff --git a/Cargo.toml b/Cargo.toml index 13dd8d572..f8a59bf4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ derive_more = "0.99" thiserror = "1.0" futures = "0.3" async-trait = "0.1" -aquatic_udp_protocol = "0.2" +aquatic_udp_protocol = "0.8" uuid = { version = "1", features = ["v4"] } axum = "0.6.10" axum-server = { version = "0.4", features = ["tls-rustls"] } @@ -43,7 +43,7 @@ bip_bencode = "0.4" torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "packages/primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "packages/configuration" } torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "packages/located-error" } -multimap = "0.8" +multimap = "0.9" hyper = "0.14" [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 36fb6d665..8ad5aa3fb 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -10,7 +10,7 @@ edition.workspace = true serde = { version = "1.0", features = ["derive"] } serde_with = "2.0" config = "0.13" -toml = "0.5" +toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives" } From 5e2356effba1b24fe8a640b6b57b63efcab41886 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 15:28:36 +0100 Subject: [PATCH 0522/1003] feat: release 3.0.0-alpha.2 --- Cargo.lock | 10 +++++----- Cargo.toml | 10 +++++----- README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- src/lib.rs | 4 ++-- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 995d051dd..899e8855f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2878,7 +2878,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.1" +version = "3.0.0-alpha.2" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -2923,7 +2923,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.1" +version = "3.0.0-alpha.2" dependencies = [ "config", "log", @@ -2938,7 +2938,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.1" +version = "3.0.0-alpha.2" dependencies = [ "log", "thiserror", @@ -2946,7 +2946,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.1" +version = "3.0.0-alpha.2" dependencies = [ "derive_more", "serde", @@ -2954,7 +2954,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.1" +version = "3.0.0-alpha.2" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index f8a59bf4c..51c191275 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ version.workspace = true authors = ["Nautilus Cyberneering , Mick van Dijke "] edition = "2021" repository = "https://github.com/torrust/torrust-tracker" -version = "3.0.0-alpha.1" +version = "3.0.0-alpha.2" [dependencies] tokio = { version = "1.26", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } @@ -40,9 +40,9 @@ axum = "0.6.10" axum-server = { version = "0.4", features = ["tls-rustls"] } axum-client-ip = "0.4" bip_bencode = "0.4" -torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "packages/primitives" } -torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "packages/configuration" } -torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.2", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.2", path = "packages/configuration" } +torrust-tracker-located-error = { version = "3.0.0-alpha.2", path = "packages/located-error" } multimap = "0.9" hyper = "0.14" @@ -53,7 +53,7 @@ serde_urlencoded = "0.7" serde_repr = "0.1" serde_bytes = "0.11" local-ip-address = "0.5" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.1", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.2", path = "packages/test-helpers" } [workspace] members = [ diff --git a/README.md b/README.md index 666868a87..c3d0a127b 100644 --- a/README.md +++ b/README.md @@ -56,9 +56,9 @@ After running the tracker these services will be available: ## Documentation * [Crate documentation](https://docs.rs/torrust-tracker/). -* [API `v1`](https://docs.rs/torrust-tracker/3.0.0-alpha.1/torrust_tracker/servers/apis/v1). -* [HTTP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.1/torrust_tracker/servers/http). -* [UDP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.1/torrust_tracker/servers/udp). +* [API `v1`](https://docs.rs/torrust-tracker/3.0.0-alpha.2/torrust_tracker/servers/apis/v1). +* [HTTP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.2/torrust_tracker/servers/http). +* [UDP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.2/torrust_tracker/servers/udp). ## Contributing @@ -68,7 +68,7 @@ How can you contribute? * Bug reports and feature requests. * Code contributions. You can start by looking at the issues labeled ["good first issues"](https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22). -* Documentation improvements. Check the [documentation](https://docs.rs/torrust-tracker/) and [API documentation](https://docs.rs/torrust-tracker/3.0.0-alpha.1/torrust_tracker/servers/apis/v1) for typos, errors, or missing information. +* Documentation improvements. Check the [documentation](https://docs.rs/torrust-tracker/) and [API documentation](https://docs.rs/torrust-tracker/3.0.0-alpha.2/torrust_tracker/servers/apis/v1) for typos, errors, or missing information. * Participation in the community. You can help by answering questions in the [discussions](https://github.com/torrust/torrust-tracker/discussions). ## License diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 8ad5aa3fb..6f9e5cbc5 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -13,8 +13,8 @@ config = "0.13" toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" -torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives" } -torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.2", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.2", path = "../located-error" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 2f876470b..5d360d101 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -9,5 +9,5 @@ edition.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "../configuration"} -torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives"} +torrust-tracker-configuration = { version = "3.0.0-alpha.2", path = "../configuration"} +torrust-tracker-primitives = { version = "3.0.0-alpha.2", path = "../primitives"} diff --git a/src/lib.rs b/src/lib.rs index adcf3d1f2..e219e59e3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -110,7 +110,7 @@ //! --publish 7070:7070/tcp \ //! --publish 1212:1212/tcp \ //! --volume "$(pwd)/storage":"/app/storage" \ -//! torrust/tracker:3.0.0-alpha.1 +//! torrust/tracker:3.0.0-alpha.2 //! ``` //! //! For more information about using docker visit the [tracker docker documentation](https://github.com/torrust/torrust-tracker/tree/develop/docker). @@ -162,7 +162,7 @@ //! //! The default configuration includes one disabled UDP server, one disabled HTTP server and the enabled API. //! -//! For more information about each service and options you can visit the documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration/3.0.0-alpha.1/torrust_tracker_configuration/). +//! For more information about each service and options you can visit the documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration). //! //! Alternatively to the `config.toml` file you can use one environment variable `TORRUST_TRACKER_CONFIG` to pass the configuration to the tracker: //! From a9042596bc3851f510efd39526c94e081ac65e4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Apr 2023 15:59:56 +0100 Subject: [PATCH 0523/1003] chore(deps): [#297] update workflow actions --- .github/workflows/test_docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml index 0c3fc36d8..a62965878 100644 --- a/.github/workflows/test_docker.yml +++ b/.github/workflows/test_docker.yml @@ -14,7 +14,7 @@ jobs: uses: docker/setup-buildx-action@v2 - name: Build docker image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 with: context: . file: ./Dockerfile From 494074189122d0421dd8033fa5b5f3ecedeec8fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Apr 2023 17:08:16 +0000 Subject: [PATCH 0524/1003] chore(deps): bump h2 from 0.3.16 to 0.3.17 Bumps [h2](https://github.com/hyperium/h2) from 0.3.16 to 0.3.17. - [Release notes](https://github.com/hyperium/h2/releases) - [Changelog](https://github.com/hyperium/h2/blob/master/CHANGELOG.md) - [Commits](https://github.com/hyperium/h2/compare/v0.3.16...v0.3.17) --- updated-dependencies: - dependency-name: h2 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 899e8855f..aedc11565 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1035,9 +1035,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +checksum = "66b91535aa35fea1523ad1b86cb6b53c28e0ae566ba4a460f4457e936cad7c6f" dependencies = [ "bytes", "fnv", From 70a9a9e6806610a809e893793c65f12342299e3b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Apr 2023 17:29:32 +0100 Subject: [PATCH 0525/1003] fix: [#306] fix CVE-2020-26235 vulnerability By removing the `chrono` package feature that was using the `time` pacakage version with the vulnerability. --- Cargo.lock | 28 ++++------------------------ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aedc11565..ef26f1a5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -394,12 +394,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", - "js-sys", "num-integer", "num-traits", "serde", - "time 0.1.45", - "wasm-bindgen", "winapi", ] @@ -1018,7 +1015,7 @@ checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -1527,7 +1524,7 @@ checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.45.0", ] @@ -1626,7 +1623,7 @@ dependencies = [ "smallvec", "subprocess", "thiserror", - "time 0.3.20", + "time", "uuid", ] @@ -2523,7 +2520,7 @@ dependencies = [ "serde", "serde_json", "serde_with_macros", - "time 0.3.20", + "time", ] [[package]] @@ -2716,17 +2713,6 @@ dependencies = [ "syn 2.0.14", ] -[[package]] -name = "time" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - [[package]] name = "time" version = "0.3.20" @@ -3115,12 +3101,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" diff --git a/Cargo.toml b/Cargo.toml index 51c191275..9dba4d515 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ openssl = { version = "0.10", features = ["vendored"] } config = "0.13" log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" -chrono = "0.4" +chrono = { version = "0.4.24", default-features = false, features = ["clock"] } r2d2 = "0.8" r2d2_mysql = "23.0" r2d2_sqlite = { version = "0.21", features = ["bundled"] } From 66bf35340553e67a79f8a7e03e8b212f47d5345a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Apr 2023 20:04:37 +0000 Subject: [PATCH 0526/1003] chore(deps): bump hyper from 0.14.25 to 0.14.26 Bumps [hyper](https://github.com/hyperium/hyper) from 0.14.25 to 0.14.26. - [Release notes](https://github.com/hyperium/hyper/releases) - [Changelog](https://github.com/hyperium/hyper/blob/v0.14.26/CHANGELOG.md) - [Commits](https://github.com/hyperium/hyper/compare/v0.14.25...v0.14.26) --- updated-dependencies: - dependency-name: hyper dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef26f1a5b..8c3f917b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1133,9 +1133,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", From fea35aec357f1b9cd6467122397feaf8f9fb4706 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 20 Apr 2023 23:19:11 +0200 Subject: [PATCH 0527/1003] clippy: add explict types to 'let_underscore_untyped' --- src/servers/apis/server.rs | 2 +- src/servers/udp/server.rs | 2 +- src/tracker/mod.rs | 4 ++-- src/tracker/peer.rs | 8 ++++---- src/tracker/torrent.rs | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 91821aa79..1e24e6655 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -124,7 +124,7 @@ impl ApiServer { .send(0) .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; - let _ = self.state.task.await; + let _: Result<(), tokio::task::JoinError> = self.state.task.await; Ok(ApiServer { cfg: self.cfg, diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index a4f1faae8..6b4b18831 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -143,7 +143,7 @@ impl UdpServer { pub async fn stop(self) -> Result, Error> { self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; - let _ = self.state.job.await; + let _: Result<(), tokio::task::JoinError> = self.state.job.await; let stopped_api_server: UdpServer = UdpServer { cfg: self.cfg, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 2cabd5a82..0d3263988 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -732,7 +732,7 @@ impl Tracker { // todo: move this action to a separate worker if self.config.persistent_torrent_completed_stat && stats_updated { - let _ = self + let _: Result<(), databases::error::Error> = self .database .save_persistent_torrent(info_hash, torrent_entry.completed) .await; @@ -1064,7 +1064,7 @@ impl Tracker { whitelist.clear(); for info_hash in whitelisted_torrents_from_database { - let _ = whitelist.insert(info_hash); + let _: bool = whitelist.insert(info_hash); } Ok(()) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index a54346280..76747fea2 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -366,14 +366,14 @@ mod test { #[should_panic] fn should_fail_trying_to_instantiate_from_a_byte_slice_with_less_than_20_bytes() { let less_than_20_bytes = [0; 19]; - let _ = peer::Id::from_bytes(&less_than_20_bytes); + let _: peer::Id = peer::Id::from_bytes(&less_than_20_bytes); } #[test] #[should_panic] fn should_fail_trying_to_instantiate_from_a_byte_slice_with_more_than_20_bytes() { let more_than_20_bytes = [0; 21]; - let _ = peer::Id::from_bytes(&more_than_20_bytes); + let _: peer::Id = peer::Id::from_bytes(&more_than_20_bytes); } #[test] @@ -420,13 +420,13 @@ mod test { #[test] #[should_panic] fn should_fail_trying_to_convert_from_a_byte_vector_with_less_than_20_bytes() { - let _ = peer::Id::try_from([0; 19].to_vec()).unwrap(); + let _: peer::Id = peer::Id::try_from([0; 19].to_vec()).unwrap(); } #[test] #[should_panic] fn should_fail_trying_to_convert_from_a_byte_vector_with_more_than_20_bytes() { - let _ = peer::Id::try_from([0; 21].to_vec()).unwrap(); + let _: peer::Id = peer::Id::try_from([0; 21].to_vec()).unwrap(); } #[test] diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 22deed2b4..4f7e28b6b 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -103,7 +103,7 @@ impl Entry { match peer.event { AnnounceEvent::Stopped => { - let _ = self.peers.remove(&peer.peer_id); + let _: Option = self.peers.remove(&peer.peer_id); } AnnounceEvent::Completed => { let peer_old = self.peers.insert(peer.peer_id, *peer); @@ -114,7 +114,7 @@ impl Entry { } } _ => { - let _ = self.peers.insert(peer.peer_id, *peer); + let _: Option = self.peers.insert(peer.peer_id, *peer); } } From 954f81d257e9e35c9947f6f9ebe7900b07c60968 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Apr 2023 20:04:46 +0000 Subject: [PATCH 0528/1003] chore(deps): bump axum from 0.6.15 to 0.6.16 Bumps [axum](https://github.com/tokio-rs/axum) from 0.6.15 to 0.6.16. - [Release notes](https://github.com/tokio-rs/axum/releases) - [Changelog](https://github.com/tokio-rs/axum/blob/main/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/axum/compare/axum-v0.6.15...axum-v0.6.16) --- updated-dependencies: - dependency-name: axum dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8c3f917b7..a9d8594d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -98,9 +98,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.15" +version = "0.6.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b32c5ea3aabaf4deb5f5ced2d688ec0844c881c9e6c696a8b769a05fc691e62" +checksum = "113713495a32dd0ab52baf5c10044725aa3aec00b31beda84218e469029b72a3" dependencies = [ "async-trait", "axum-core", diff --git a/Cargo.toml b/Cargo.toml index 9dba4d515..ffe97a8fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,7 +36,7 @@ futures = "0.3" async-trait = "0.1" aquatic_udp_protocol = "0.8" uuid = { version = "1", features = ["v4"] } -axum = "0.6.10" +axum = "0.6.16" axum-server = { version = "0.4", features = ["tls-rustls"] } axum-client-ip = "0.4" bip_bencode = "0.4" From c65f039e0dcfd796775eaf4404743197d6916b53 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 May 2023 09:47:39 +0100 Subject: [PATCH 0529/1003] docs: [#319] add system dependencies --- cSpell.json | 1 + src/lib.rs | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/cSpell.json b/cSpell.json index 2fa80b58a..a20812bdd 100644 --- a/cSpell.json +++ b/cSpell.json @@ -39,6 +39,7 @@ "lcov", "leecher", "leechers", + "libsqlite", "libtorrent", "Lphant", "metainfo", diff --git a/src/lib.rs b/src/lib.rs index e219e59e3..74005e339 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -68,6 +68,26 @@ //! //! ## Prerequisites //! +//! The tracker has some system dependencies: +//! +//! Since we are using the `openssl` crate with the [vendored feature](https://docs.rs/openssl/latest/openssl/#vendored), +//! enabled, you will need to install the following dependencies: +//! +//! ```text +//! sudo apt-get install pkg-config libssl-dev make +//! ``` +//! +//! If you are using `SQLite3` as database driver, you will need to install the +//! following dependency: +//! +//! ```text +//! sudo apt-get install libsqlite3-dev +//! ``` +//! +//! > **NOTICE**: those are the commands for `Ubuntu`. If you are using a +//! different OS, you will need to install the equivalent packages. Please +//! refer to the documentation of your OS. +//! //! With the default configuration you will need to create the `storage` directory: //! //! ```text From 1616ab0531a1dbb4c8912c18bc2a1780c4d9600f Mon Sep 17 00:00:00 2001 From: nyacat Date: Tue, 2 May 2023 11:43:24 +0100 Subject: [PATCH 0530/1003] docs: [#321] increase swap to avoid build errors @nyacat proposed a solution if you have problems building in a machine with low resources. I've added the solution to the docs. --- cSpell.json | 2 ++ src/lib.rs | 20 +++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/cSpell.json b/cSpell.json index a20812bdd..c935021c0 100644 --- a/cSpell.json +++ b/cSpell.json @@ -31,6 +31,7 @@ "hexlify", "hlocalhost", "Hydranode", + "Icelake", "incompletei", "infohash", "infohashes", @@ -90,6 +91,7 @@ "Vuze", "whitespaces", "XBTT", + "Xeon", "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", diff --git a/src/lib.rs b/src/lib.rs index 74005e339..744642a3f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -64,7 +64,25 @@ //! ## Minimum requirements //! //! - Rust Stable `1.68` -//! - You might have problems compiling with a machine with low resources. Or with low resources limits on docker containers. It has been tested with docker containers with 6 CPUs, 7.5 GM of memory and 2GB of swap. +//! - You might have problems compiling with a machine with low resources. +//! +//! It has been tested with: +//! +//! Docker containers with: +//! +//! - 6 CPUs +//! - 7.5G of ram +//! - 2GB of swap +//! +//! [VM](https://github.com/torrust/torrust-tracker/issues/321) with: +//! +//! - 1 core of Intel Xeon Processor (Icelake) +//! - 1G of ram +//! - 25G of disk +//! - Debian 11 +//! - no swap by default +//! +//! Adding swap may help with compilation. See issue [#321](https://github.com/torrust/torrust-tracker/issues/321). //! //! ## Prerequisites //! From 7b0b9269890312e2dbcfcbafaa1cbae0ff91586f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 6 Jul 2023 15:56:24 +0100 Subject: [PATCH 0531/1003] fix: udpate dependencies Getting this error: ``` Compiling ident_case v1.0.1 Compiling ppv-lite86 v0.2.17 error[E0635]: unknown feature `proc_macro_span_shrink` --> /home/josecelano/.cargo/registry/src/index.crates.io-6f17d22bba15001f/proc-macro2-1.0.56/src/lib.rs:92:30 | 92 | feature(proc_macro_span, proc_macro_span_shrink) | ``` Compiling with: - rustup 1.26.0 (5af9b9484 2023-04-05) - rustc 1.72.0-nightly (839e9a6e1 2023-07-02) --- Cargo.lock | 863 ++++++++++++++++++++++------------------------------- Cargo.toml | 2 +- 2 files changed, 360 insertions(+), 505 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a9d8594d4..5e880459a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] @@ -41,13 +41,25 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -75,19 +87,19 @@ checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] @@ -98,9 +110,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.16" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "113713495a32dd0ab52baf5c10044725aa3aec00b31beda84218e469029b72a3" +checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", @@ -130,9 +142,9 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d719fabd6813392bbc10e1fe67f2977fad52791a836e51236f7e02f2482e017" +checksum = "df8e81eacc93f36480825da5f46a33b5fb2246ed024eacc9e8933425b80c5807" dependencies = [ "axum", "forwarded-header-value", @@ -178,9 +190,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", @@ -199,15 +211,15 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "bigdecimal" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744" +checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" dependencies = [ "num-bigint", "num-integer", @@ -328,15 +340,15 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytecheck" -version = "0.6.10" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f" +checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" dependencies = [ "bytecheck_derive", "ptr_meta", @@ -345,9 +357,9 @@ dependencies = [ [[package]] name = "bytecheck_derive" -version = "0.6.10" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" +checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" dependencies = [ "proc-macro2", "quote", @@ -389,12 +401,12 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", - "num-integer", "num-traits", "serde", "winapi", @@ -420,16 +432,6 @@ dependencies = [ "cc", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "config" version = "0.13.3" @@ -473,9 +475,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -526,9 +528,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", @@ -549,9 +551,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -566,55 +568,11 @@ dependencies = [ "typenum", ] -[[package]] -name = "cxx" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn 2.0.14", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.14", -] - [[package]] name = "darling" -version = "0.14.4" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" dependencies = [ "darling_core", "darling_macro", @@ -622,27 +580,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.4" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 1.0.109", + "syn 2.0.23", ] [[package]] name = "darling_macro" -version = "0.14.4" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 1.0.109", + "syn 2.0.23", ] [[package]] @@ -660,13 +618,13 @@ dependencies = [ [[package]] name = "derive_utils" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff8f6a793f528719e1ad4425a52a213ac1214ac7158c5fb97a7f50a64bfc96d" +checksum = "20ce151e1b790e3e36d767ae57691240feafe8b605e1c2fe081183d64ac1bff3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] @@ -677,9 +635,9 @@ checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", @@ -712,6 +670,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "equivalent" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" + [[package]] name = "errno" version = "0.3.1" @@ -720,7 +684,7 @@ checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -774,9 +738,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "libz-sys", @@ -815,9 +779,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -840,9 +804,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frunk" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89c703bf50009f383a0873845357cc400a95fc535f836feddfe015d7df6e1e0" +checksum = "11a351b59e12f97b4176ee78497dff72e4276fb1ceb13e19056aca7fa0206287" dependencies = [ "frunk_core", "frunk_derives", @@ -851,55 +815,43 @@ dependencies = [ [[package]] name = "frunk_core" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a446d01a558301dca28ef43222864a9fa2bd9a2e71370f769d5d5d5ec9f3537" +checksum = "af2469fab0bd07e64ccf0ad57a1438f63160c69b2e57f04a439653d68eb558d6" [[package]] name = "frunk_derives" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b83164912bb4c97cfe0772913c7af7387ee2e00cb6d4636fb65a35b3d0c8f173" +checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 1.0.109", + "syn 2.0.23", ] [[package]] name = "frunk_proc_macro_helpers" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "015425591bbeb0f5b8a75593340f1789af428e9f887a4f1e36c0c471f067ef50" +checksum = "35b54add839292b743aeda6ebedbd8b11e93404f902c56223e51b9ec18a13d2c" dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.23", ] [[package]] name = "frunk_proc_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea01524f285deab48affffb342b97f186e657b119c3f1821ac531780e0fbfae0" -dependencies = [ - "frunk_core", - "frunk_proc_macros_impl", - "proc-macro-hack", -] - -[[package]] -name = "frunk_proc_macros_impl" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a802d974cc18ee7fe1a7868fc9ce31086294fd96ba62f8da64ecb44e92a2653" +checksum = "71b85a1d4a9a6b300b41c05e8e13ef2feca03e0334127f29eca9506a7fe13a93" dependencies = [ "frunk_core", "frunk_proc_macro_helpers", - "proc-macro-hack", "quote", - "syn 1.0.109", + "syn 2.0.23", ] [[package]] @@ -964,7 +916,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] @@ -1009,9 +961,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -1020,9 +972,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" @@ -1032,9 +984,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66b91535aa35fea1523ad1b86cb6b53c28e0ae566ba4a460f4457e936cad7c6f" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", @@ -1042,7 +994,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util", @@ -1068,28 +1020,29 @@ dependencies = [ ] [[package]] -name = "hashlink" -version = "0.8.1" +name = "hashbrown" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ - "hashbrown 0.12.3", + "ahash 0.8.3", + "allocator-api2", ] [[package]] -name = "hermit-abi" -version = "0.2.6" +name = "hashlink" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" dependencies = [ - "libc", + "hashbrown 0.14.0", ] [[package]] name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -1133,9 +1086,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -1170,9 +1123,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1184,12 +1137,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -1200,9 +1152,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1219,6 +1171,16 @@ dependencies = [ "serde", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + [[package]] name = "instant" version = "0.1.12" @@ -1230,30 +1192,30 @@ dependencies = [ [[package]] name = "io-enum" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01c662c349c9c9f542e7bfd9134143beb27da4b20dfbc3b3ef5b2a5b507dafbd" +checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi", "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] name = "ipnet" -version = "2.7.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "itertools" @@ -1266,15 +1228,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -1377,9 +1339,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.141" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libloading" @@ -1404,24 +1366,15 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "pkg-config", "vcpkg", ] -[[package]] -name = "link-cplusplus" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -1430,27 +1383,27 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.1" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "local-ip-address" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa9d02443a1741e9f51dafdfcbffb3863b2a89c457d762b40337d6c5153ef81" +checksum = "2815836665de176ba66deaa449ada98fdf208d84730d1a84a22cbeed6151a6fa" dependencies = [ "libc", "neli", "thiserror", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -1458,12 +1411,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "lru" @@ -1488,9 +1438,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -1509,23 +1459,22 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.6.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] @@ -1656,12 +1605,27 @@ dependencies = [ [[package]] name = "neli" -version = "0.5.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9053554eb5dcb7e10d9cdab1206965bde870eed5d0d341532ca035e3ba221508" +checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" dependencies = [ "byteorder", "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", ] [[package]] @@ -1718,34 +1682,34 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi", "libc", ] [[package]] name = "object" -version = "0.30.3" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" -version = "0.10.50" +version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e30d8bc91859781f0a943411186324d580f2bbeb71b452fe91ae344806af3f1" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags", "cfg-if", @@ -1764,7 +1728,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] @@ -1775,18 +1739,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.2+1.1.1t" +version = "111.26.0+1.1.1u" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" +checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.85" +version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d3d193fb1488ad46ffe3aaabc912cc931d02ee8518fe2959aea8ef52718b0c0" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc", @@ -1817,15 +1781,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", + "redox_syscall", "smallvec", - "windows-sys 0.45.0", + "windows-targets", ] [[package]] @@ -1851,15 +1815,15 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.5.7" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1403e8401ad5dedea73c626b99758535b342502f8d1e361f4a2dd952749122" +checksum = "f73935e4d55e2abf7f130186537b19e7a4abc886a0252380b59248af473a3fc9" dependencies = [ "thiserror", "ucd-trie", @@ -1867,9 +1831,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.7" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be99c4c1d2fc2769b1d00239431d711d08f6efedcecb8b6e30707160aee99c15" +checksum = "aef623c9bbfa0eedf5a0efba11a5ee83209c326653ca31ff019bec3a95bfff2b" dependencies = [ "pest", "pest_generator", @@ -1877,22 +1841,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.7" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e56094789873daa36164de2e822b3888c6ae4b4f9da555a1103587658c805b1e" +checksum = "b3e8cba4ec22bada7fc55ffe51e2deb6a0e0db2d0b7ab0b103acc80d2510c190" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] name = "pest_meta" -version = "2.5.7" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6733073c7cff3d8459fda0e42f13a047870242aed8b509fe98000928975f359e" +checksum = "a01f71cb40bd8bb94232df14b946909e14660e33fc05db3e50ae2a82d7ea0ca0" dependencies = [ "once_cell", "pest", @@ -1901,29 +1865,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.23", ] [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" @@ -1933,9 +1897,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "ppv-lite86" @@ -1982,17 +1946,11 @@ dependencies = [ "toml 0.5.11", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" dependencies = [ "unicode-ident", ] @@ -2019,9 +1977,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ "proc-macro2", ] @@ -2095,27 +2053,30 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags", ] [[package]] -name = "redox_syscall" -version = "0.3.5" +name = "regex" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "89089e897c013b3deb627116ae56a6955a72b8bed395c9526af31c9fe528b484" dependencies = [ - "bitflags", + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", ] [[package]] -name = "regex" -version = "1.7.3" +name = "regex-automata" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" +checksum = "fa250384981ea14565685dea16a9ccc4d1c541a13f82b9c168572264d1df8c56" dependencies = [ "aho-corasick", "memchr", @@ -2124,9 +2085,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.29" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" [[package]] name = "rend" @@ -2139,11 +2100,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.16" +version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -2191,23 +2152,26 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.41" +version = "0.7.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21499ed91807f07ae081880aabb2ccc0235e9d88011867d984525e9a4c3cfa3e" +checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" dependencies = [ + "bitvec", "bytecheck", "hashbrown 0.12.3", "ptr_meta", "rend", "rkyv_derive", "seahash", + "tinyvec", + "uuid", ] [[package]] name = "rkyv_derive" -version = "0.7.41" +version = "0.7.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1c672430eb41556291981f45ca900a0239ad007242d1cb4b4167af842db666" +checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" dependencies = [ "proc-macro2", "quote", @@ -2251,9 +2215,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.29.1" +version = "1.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26bd36b60561ee1fb5ec2817f198b6fd09fa571c897a5e86d1487cfc2b096dfc" +checksum = "d0446843641c69436765a35a5a77088e28c2e6a12da93e84aa3ab1cd4aa5a042" dependencies = [ "arrayvec", "borsh", @@ -2269,9 +2233,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -2290,16 +2254,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.11" +version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85597d61f83914ddeba6a47b3b8ffe7365107221c2e557ed94426489fefb5f77" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -2316,24 +2280,24 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] name = "rustversion" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" [[package]] name = "saturating" @@ -2343,11 +2307,11 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -2365,12 +2329,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "sct" version = "0.7.0" @@ -2389,9 +2347,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", @@ -2402,9 +2360,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -2418,9 +2376,9 @@ checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "serde" -version = "1.0.160" +version = "1.0.166" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" +checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8" dependencies = [ "serde_derive", ] @@ -2437,29 +2395,29 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.9" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" +checksum = "5a16be4fe5320ade08736447e3198294a5ea9a6d44dde6f35f0a5e06859c427a" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.160" +version = "1.0.166" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" +checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" dependencies = [ "itoa", "ryu", @@ -2468,29 +2426,30 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7f05c1d5476066defcdfacce1f52fc3cae3af1d3089727100c02ae92e5abbe0" +checksum = "8acc4422959dd87a76cb117c191dcbffc20467f06c9100b76721dab370f24d3a" dependencies = [ + "itoa", "serde", ] [[package]] name = "serde_repr" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" +checksum = "1d89a8107374290037607734c0b73a85db7ed80cae314b3c5791f192a496e731" dependencies = [ "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] name = "serde_spanned" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] @@ -2509,14 +2468,14 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "331bb8c3bf9b92457ab7abecf07078c13f7d270ba490103e84e8b014490cd0b0" +checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" dependencies = [ "base64 0.13.1", "chrono", "hex", - "indexmap", + "indexmap 1.9.3", "serde", "serde_json", "serde_with_macros", @@ -2525,14 +2484,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" +checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ "darling", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.23", ] [[package]] @@ -2548,9 +2507,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", @@ -2589,9 +2548,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "socket2" @@ -2644,9 +2603,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.14" +version = "2.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf316d5356ed6847742d036f8a39c3b8435cac10bd528a4bd461928a6ab34d5" +checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" dependencies = [ "proc-macro2", "quote", @@ -2667,24 +2626,16 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg", "cfg-if", "fastrand", - "redox_syscall 0.3.5", + "redox_syscall", "rustix", - "windows-sys 0.45.0", -] - -[[package]] -name = "termcolor" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" -dependencies = [ - "winapi-util", + "windows-sys", ] [[package]] @@ -2695,29 +2646,29 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "c16a64ba9387ef3fdae4f9c1a7f07a0997fce91985c0336f1ddc1822b3b37802" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "d14928354b01c4d6a4f0e549069adef399a284e7995c7ccca94e8a07a5346c59" dependencies = [ "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] name = "time" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa", "serde", @@ -2727,15 +2678,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -2757,11 +2708,12 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.27.0" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg", + "backtrace", "bytes", "libc", "mio", @@ -2770,18 +2722,18 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] name = "tokio-macros" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.14", + "syn 2.0.23", ] [[package]] @@ -2807,9 +2759,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -2830,9 +2782,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.3" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" dependencies = [ "serde", "serde_spanned", @@ -2842,20 +2794,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.8" +version = "0.19.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" dependencies = [ - "indexmap", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", @@ -2916,7 +2868,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml 0.7.3", + "toml 0.7.6", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -2990,9 +2942,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", ] @@ -3034,9 +2986,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" [[package]] name = "unicode-normalization" @@ -3047,12 +2999,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - [[package]] name = "untrusted" version = "0.7.1" @@ -3061,9 +3007,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", @@ -3072,9 +3018,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b55a3fef2a1e3b3a00ce878640918820d3c51081576ac657d23af9fc7928fdb" +checksum = "d023da39d1fde5a8a3fe1f3e01ca9632ada0a63e9797de55a879d6e2236277be" dependencies = [ "getrandom", ] @@ -3093,11 +3039,10 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -3109,9 +3054,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3119,24 +3064,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.23", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -3146,9 +3091,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3156,28 +3101,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.23", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -3209,15 +3154,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -3230,31 +3166,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", + "windows-targets", ] [[package]] @@ -3263,117 +3175,60 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets", ] [[package]] name = "windows-targets" -version = "0.42.2" +version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] -[[package]] -name = "windows-targets" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" -dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - [[package]] name = "windows_x86_64_msvc" version = "0.48.0" @@ -3382,9 +3237,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.1" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index ffe97a8fd..a8dae6105 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ repository = "https://github.com/torrust/torrust-tracker" version = "3.0.0-alpha.2" [dependencies] -tokio = { version = "1.26", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +tokio = { version = "1.29", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2" serde_json = "1.0" From b8d521347943d39bf93562e71b2aace2902eec28 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 6 Jul 2023 16:45:14 +0100 Subject: [PATCH 0532/1003] fix: clippy warnings --- packages/configuration/src/lib.rs | 4 ++++ src/servers/apis/server.rs | 12 ++++++++++++ src/servers/apis/v1/middlewares/auth.rs | 4 +++- src/servers/http/server.rs | 5 +++++ src/servers/http/v1/launcher.rs | 26 +++++++++++++++++++++++++ src/servers/signals.rs | 8 ++++++++ src/tracker/databases/driver.rs | 4 ++++ src/tracker/databases/mod.rs | 2 +- src/tracker/mod.rs | 2 +- src/tracker/services/torrent.rs | 2 +- 10 files changed, 65 insertions(+), 4 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index e48355757..f785aa976 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -556,6 +556,10 @@ impl Configuration { /// Will return `Err` if `filename` does not exist or the user does not have /// permission to read it. Will also return `Err` if the configuration is /// not valid or cannot be encoded to TOML. + /// + /// # Panics + /// + /// Will panic if the configuration cannot be written into the file. pub fn save_to_file(&self, path: &str) -> Result<(), Error> { fs::write(path, self.to_toml()).expect("Could not write to file!"); Ok(()) diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 1e24e6655..716a36c11 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -83,6 +83,10 @@ impl ApiServer { /// # Errors /// /// It would return an error if no `SocketAddr` is returned after launching the server. + /// + /// # Panics + /// + /// It would panic if the bound socket address cannot be sent back to this starter. pub async fn start(self, tracker: Arc) -> Result, Error> { let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); @@ -229,6 +233,10 @@ impl Launcher { } /// Starts the API server with graceful shutdown on the current thread. +/// +/// # Panics +/// +/// It would panic if it fails to listen to shutdown signal. pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); @@ -241,6 +249,10 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future( where B: Send, { - let Some(token) = params.token else { return AuthError::Unauthorized.into_response() }; + let Some(token) = params.token else { + return AuthError::Unauthorized.into_response(); + }; if !authenticate(&token, &config.http_api) { return AuthError::TokenNotValid.into_response(); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 6a46b81df..6007d212d 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -104,6 +104,11 @@ impl HttpServer> { /// # Errors /// /// It would return an error if no `SocketAddr` is returned after launching the server. + /// + /// # Panics + /// + /// It would panic spawned HTTP server launcher cannot send the bound `SocketAddr` + /// back to the main thread. pub async fn start(self, tracker: Arc) -> Result>, Error> { let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); diff --git a/src/servers/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs index 96dd1baac..b5faf8d46 100644 --- a/src/servers/http/v1/launcher.rs +++ b/src/servers/http/v1/launcher.rs @@ -22,6 +22,14 @@ pub enum Error { pub struct Launcher; impl Launcher { + /// It starts a new HTTP server instance from a TCP listener with graceful shutdown. + /// + /// # Panics + /// + /// Will panic if: + /// + /// - The TCP listener could not be bound. + /// - The Axum server crashes. pub fn start_from_tcp_listener_with_graceful_shutdown( tcp_listener: std::net::TcpListener, tracker: Arc, @@ -42,6 +50,14 @@ impl Launcher { }) } + /// It starts a new HTTPS server instance from a TCP listener with graceful shutdown. + /// + /// # Panics + /// + /// Will panic if: + /// + /// - The SSL certificate could not be read from the provided path or is invalid. + /// - The Axum server crashes. pub fn start_tls_from_tcp_listener_with_graceful_shutdown( tcp_listener: std::net::TcpListener, (ssl_cert_path, ssl_key_path): (String, String), @@ -114,6 +130,11 @@ impl HttpServerLauncher for Launcher { } } +/// Starts a new HTTP server instance. +/// +/// # Panics +/// +/// Panics if the server could not listen to shutdown (ctrl+c) signal. pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); @@ -125,6 +146,11 @@ pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl F }) } +/// Starts a new HTTPS server instance. +/// +/// # Panics +/// +/// Panics if the server could not listen to shutdown (ctrl+c) signal. pub fn start_tls( socket_addr: std::net::SocketAddr, ssl_config: RustlsConfig, diff --git a/src/servers/signals.rs b/src/servers/signals.rs index f0312b886..d34cca108 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -2,6 +2,10 @@ use log::info; /// Resolves on `ctrl_c` or the `terminate` signal. +/// +/// # Panics +/// +/// Will panic if the `ctrl_c` or `terminate` signal resolves with an error. pub async fn global_shutdown_signal() { let ctrl_c = async { tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); @@ -25,6 +29,10 @@ pub async fn global_shutdown_signal() { } /// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. +/// +/// # Panics +/// +/// Will panic if the `stop_receiver` resolves with an error. pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; diff --git a/src/tracker/databases/driver.rs b/src/tracker/databases/driver.rs index 7115bae8e..4ff9314d2 100644 --- a/src/tracker/databases/driver.rs +++ b/src/tracker/databases/driver.rs @@ -41,6 +41,10 @@ use super::{Builder, Database}; /// # Errors /// /// This function will return an error if unable to connect to the database. +/// +/// # Panics +/// +/// This function will panic if unable to create database tables. pub fn build(driver: &DatabaseDriver, db_path: &str) -> Result, Error> { let database = match driver { DatabaseDriver::Sqlite3 => Builder::::build(db_path), diff --git a/src/tracker/databases/mod.rs b/src/tracker/databases/mod.rs index 3b02415df..e0a26be23 100644 --- a/src/tracker/databases/mod.rs +++ b/src/tracker/databases/mod.rs @@ -56,7 +56,7 @@ use self::error::Error; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; -pub(self) struct Builder +struct Builder where T: Database, { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0d3263988..4d7d8d37e 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -794,7 +794,7 @@ impl Tracker { } }); } else { - for (_, torrent_entry) in torrents_lock.iter_mut() { + for torrent_entry in (*torrents_lock).values_mut() { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); } } diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index 3610d930c..0db044d07 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -99,7 +99,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let Some(torrent_entry) = torrent_entry_option else { return None; - }; + }; let (seeders, completed, leechers) = torrent_entry.get_stats(); From 61c5551e576d7ea32f431fedae7d689d64e08fa6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 6 Jul 2023 17:19:07 +0100 Subject: [PATCH 0533/1003] chore: udpate dependencies - axum: from 0.6.16 to 0.6.18 - acum-client-ip: from 0.4.0 to 0.4.1 - reqwest: from 0.11.16 to 0.11.18 --- Cargo.lock | 4 ++-- Cargo.toml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5e880459a..9cf3cb302 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3237,9 +3237,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" +checksum = "a9482fe6ceabdf32f3966bfdd350ba69256a97c30253dc616fe0005af24f164e" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index a8dae6105..fec8784d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,9 +36,9 @@ futures = "0.3" async-trait = "0.1" aquatic_udp_protocol = "0.8" uuid = { version = "1", features = ["v4"] } -axum = "0.6.16" +axum = "0.6.18" axum-server = { version = "0.4", features = ["tls-rustls"] } -axum-client-ip = "0.4" +axum-client-ip = "0.4.1" bip_bencode = "0.4" torrust-tracker-primitives = { version = "3.0.0-alpha.2", path = "packages/primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.2", path = "packages/configuration" } @@ -48,7 +48,7 @@ hyper = "0.14" [dev-dependencies] mockall = "0.11" -reqwest = { version = "0.11", features = ["json"] } +reqwest = { version = "0.11.18", features = ["json"] } serde_urlencoded = "0.7" serde_repr = "0.1" serde_bytes = "0.11" From 0da0d23aaa65a23b00481fe1ef36012603dbf527 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Jul 2023 10:19:47 +0100 Subject: [PATCH 0534/1003] chore(deps): udpate dependencies - bump serde_with from 2.3.3 to 3.0.0 - bump r2d2_sqlite from 0.21.0 to 0.22.0 - bump reqwest from 0.11.16 to 0.11.18 - bump axum from 0.6.16 to 0.6.18 - bump axum-client-ip from 0.4.0 to 0.4.1 --- Cargo.lock | 64 +++++++++++++++++-------------- Cargo.toml | 4 +- packages/configuration/Cargo.toml | 2 +- 3 files changed, 39 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9cf3cb302..5c72bef9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -116,7 +116,7 @@ checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", - "bitflags", + "bitflags 1.3.2", "bytes", "futures-util", "http", @@ -238,7 +238,7 @@ version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -266,6 +266,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" + [[package]] name = "bitvec" version = "1.0.1" @@ -1355,9 +1361,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" dependencies = [ "cc", "pkg-config", @@ -1548,7 +1554,7 @@ dependencies = [ "base64 0.13.1", "bigdecimal", "bindgen", - "bitflags", + "bitflags 1.3.2", "bitvec", "byteorder", "bytes", @@ -1711,7 +1717,7 @@ version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "foreign-types", "libc", @@ -2007,12 +2013,13 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" +checksum = "99f31323d6161385f385046738df520e0e8694fa74852d35891fc0be08348ddc" dependencies = [ "r2d2", "rusqlite", + "uuid", ] [[package]] @@ -2057,7 +2064,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -2185,17 +2192,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "serde", ] [[package]] name = "rusqlite" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags", + "bitflags 2.3.3", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2258,7 +2265,7 @@ version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", @@ -2351,7 +2358,7 @@ version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -2376,9 +2383,9 @@ checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "serde" -version = "1.0.166" +version = "1.0.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8" +checksum = "7daf513456463b42aa1d94cff7e0c24d682b429f020b9afa4f5ba5c40a22b237" dependencies = [ "serde_derive", ] @@ -2404,9 +2411,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.166" +version = "1.0.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" +checksum = "b69b106b68bc8054f0e974e70d19984040f8a5cf9215ca82626ea4853f82c4b9" dependencies = [ "proc-macro2", "quote", @@ -2468,11 +2475,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "9f02d8aa6e3c385bf084924f660ce2a3a6bd333ba55b35e8590b321f35d88513" dependencies = [ - "base64 0.13.1", + "base64 0.21.2", "chrono", "hex", "indexmap 1.9.3", @@ -2484,9 +2491,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.3" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" +checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" dependencies = [ "darling", "proc-macro2", @@ -2646,18 +2653,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.41" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c16a64ba9387ef3fdae4f9c1a7f07a0997fce91985c0336f1ddc1822b3b37802" +checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.41" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d14928354b01c4d6a4f0e549069adef399a284e7995c7ccca94e8a07a5346c59" +checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ "proc-macro2", "quote", @@ -3023,6 +3030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d023da39d1fde5a8a3fe1f3e01ca9632ada0a63e9797de55a879d6e2236277be" dependencies = [ "getrandom", + "rand", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index fec8784d4..a25ead8f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ tokio = { version = "1.29", features = ["rt-multi-thread", "net", "sync", "macro serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2" serde_json = "1.0" -serde_with = "2.0" +serde_with = "3.0" percent-encoding = "2.2" binascii = "0.1" lazy_static = "1.4" @@ -28,7 +28,7 @@ fern = "0.6" chrono = { version = "0.4.24", default-features = false, features = ["clock"] } r2d2 = "0.8" r2d2_mysql = "23.0" -r2d2_sqlite = { version = "0.21", features = ["bundled"] } +r2d2_sqlite = { version = "0.22", features = ["bundled"] } rand = "0.8" derive_more = "0.99" thiserror = "1.0" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 6f9e5cbc5..dd4938de7 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -8,7 +8,7 @@ edition.workspace = true [dependencies] serde = { version = "1.0", features = ["derive"] } -serde_with = "2.0" +serde_with = "3.0" config = "0.13" toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } From f256a6b201b5d5e007bfdd6666efda1c2709e452 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jul 2023 11:22:45 +0100 Subject: [PATCH 0535/1003] chore(deps): bump axum-server from 0.4.7 to 0.5.1 --- Cargo.lock | 35 +++++++++++++++++------------------ Cargo.toml | 2 +- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c72bef9e..e7febbe95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -170,9 +170,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.4.7" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bace45b270e36e3c27a190c65883de6dfc9f1d18c829907c127464815dc67b24" +checksum = "447f28c85900215cc1bea282f32d4a2f22d55c5a300afdfbc661c8d6a632e063" dependencies = [ "arc-swap", "bytes", @@ -2275,14 +2275,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.8" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "b19faa85ecb5197342b54f987b142fb3e30d0c90da40f80ef4fa9a726e6676ed" dependencies = [ "log", "ring", + "rustls-webpki", "sct", - "webpki", ] [[package]] @@ -2294,6 +2294,16 @@ dependencies = [ "base64 0.21.2", ] +[[package]] +name = "rustls-webpki" +version = "0.101.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f36a6828982f422756984e47912a7a51dcbc2a197aa791158f8ca61cd8204e" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.13" @@ -2755,13 +2765,12 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.4" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls", "tokio", - "webpki", ] [[package]] @@ -3136,16 +3145,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index a25ead8f1..1a46af194 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ async-trait = "0.1" aquatic_udp_protocol = "0.8" uuid = { version = "1", features = ["v4"] } axum = "0.6.18" -axum-server = { version = "0.4", features = ["tls-rustls"] } +axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" bip_bencode = "0.4" torrust-tracker-primitives = { version = "3.0.0-alpha.2", path = "packages/primitives" } From 402e6a15a1232e094d8443b4d68a5edf9f5ba6bb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jul 2023 11:32:37 +0100 Subject: [PATCH 0536/1003] docs: normalize license We will use the same dual license in all Torrust repos for now. We will open a new discussion to reach a community consensus. --- COPYRIGHT | 11 + Cargo.toml | 3 +- LICENSE-AGPL_3_0 | 662 ++++++++++++++++++++++++++++++ LICENSE-MIT_0 | 14 + README.md | 4 +- packages/configuration/Cargo.toml | 2 +- packages/located-error/Cargo.toml | 2 +- packages/primitives/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- 9 files changed, 694 insertions(+), 8 deletions(-) create mode 100644 COPYRIGHT create mode 100644 LICENSE-AGPL_3_0 create mode 100644 LICENSE-MIT_0 diff --git a/COPYRIGHT b/COPYRIGHT new file mode 100644 index 000000000..6eef820ec --- /dev/null +++ b/COPYRIGHT @@ -0,0 +1,11 @@ +Copyright 2023 in the Torrust-Tracker project are retained by their contributors. No +copyright assignment is required to contribute to the Torrust-Tracker project. + +Some files include explicit copyright notices and/or license notices. + +Except as otherwise noted (below and/or in individual files), Torrust-Tracker is +licensed under the GNU Affero General Public License, Version 3.0 . This license applies to all files in the Torrust-Tracker project, except as noted below. + +Except as otherwise noted (below and/or in individual files), Torrust-Tracker is licensed under the MIT-0 license for all commits made after 5 years of merging. This license applies to the version of the files merged into the Torrust-Tracker project at the time of merging, and does not apply to subsequent updates or revisions to those files. + +The contributors to the Torrust-Tracker project disclaim all liability for any damages or losses that may arise from the use of the project. diff --git a/Cargo.toml b/Cargo.toml index 1a46af194..4f396d5b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "torrust-tracker" description = "A feature rich BitTorrent tracker." -license = "AGPL-3.0" +license-file.workspace = true authors.workspace = true edition.workspace = true version.workspace = true [workspace.package] +license-file = "COPYRIGHT" authors = ["Nautilus Cyberneering , Mick van Dijke "] edition = "2021" repository = "https://github.com/torrust/torrust-tracker" diff --git a/LICENSE-AGPL_3_0 b/LICENSE-AGPL_3_0 new file mode 100644 index 000000000..2beb9e163 --- /dev/null +++ b/LICENSE-AGPL_3_0 @@ -0,0 +1,662 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. + diff --git a/LICENSE-MIT_0 b/LICENSE-MIT_0 new file mode 100644 index 000000000..fc06cc4fe --- /dev/null +++ b/LICENSE-MIT_0 @@ -0,0 +1,14 @@ +MIT No Attribution + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index c3d0a127b..6c20ee20f 100644 --- a/README.md +++ b/README.md @@ -73,9 +73,7 @@ How can you contribute? ## License -The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). - -There is an ongoing discussion about the license of the project. You can follow the discussion [here](https://github.com/torrust/torrust-tracker/pull/251). +The project is licensed under a dual license. See [COPYRIGHT](./COPYRIGHT). ## Acknowledgments diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index dd4938de7..aca3808d2 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "torrust-tracker-configuration" description = "A library to provide configuration to the Torrust Tracker." -license = "AGPL-3.0" +license-file.workspace = true version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index acd13def3..95cc1fd18 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "torrust-tracker-located-error" description = "A library to provide error decorator with the location and the source of the original error." -license = "AGPL-3.0" +license-file.workspace = true version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index bba45cf5d..a2e17b63c 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "torrust-tracker-primitives" description = "A library with the primitive types shared by the Torrust tracker packages." -license = "AGPL-3.0" +license-file.workspace = true version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 5d360d101..6812a90ae 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "torrust-tracker-test-helpers" description = "A library providing helpers for testing the Torrust tracker." -license = "AGPL-3.0" +license-file.workspace = true version.workspace = true authors.workspace = true edition.workspace = true From 38d1d494ebab90f8be53ec6966136f3eba57ed22 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jul 2023 11:59:28 +0100 Subject: [PATCH 0537/1003] chore(release): 3.0.0-alpha.3 --- Cargo.lock | 10 +++++----- Cargo.toml | 10 +++++----- README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- src/lib.rs | 2 +- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7febbe95..20557f7c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2832,7 +2832,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.2" +version = "3.0.0-alpha.3" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -2877,7 +2877,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.2" +version = "3.0.0-alpha.3" dependencies = [ "config", "log", @@ -2892,7 +2892,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.2" +version = "3.0.0-alpha.3" dependencies = [ "log", "thiserror", @@ -2900,7 +2900,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.2" +version = "3.0.0-alpha.3" dependencies = [ "derive_more", "serde", @@ -2908,7 +2908,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.2" +version = "3.0.0-alpha.3" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 4f396d5b8..4b4a05a2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ license-file = "COPYRIGHT" authors = ["Nautilus Cyberneering , Mick van Dijke "] edition = "2021" repository = "https://github.com/torrust/torrust-tracker" -version = "3.0.0-alpha.2" +version = "3.0.0-alpha.3" [dependencies] tokio = { version = "1.29", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } @@ -41,9 +41,9 @@ axum = "0.6.18" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" bip_bencode = "0.4" -torrust-tracker-primitives = { version = "3.0.0-alpha.2", path = "packages/primitives" } -torrust-tracker-configuration = { version = "3.0.0-alpha.2", path = "packages/configuration" } -torrust-tracker-located-error = { version = "3.0.0-alpha.2", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.3", path = "packages/configuration" } +torrust-tracker-located-error = { version = "3.0.0-alpha.3", path = "packages/located-error" } multimap = "0.9" hyper = "0.14" @@ -54,7 +54,7 @@ serde_urlencoded = "0.7" serde_repr = "0.1" serde_bytes = "0.11" local-ip-address = "0.5" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.2", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.3", path = "packages/test-helpers" } [workspace] members = [ diff --git a/README.md b/README.md index 6c20ee20f..9d2838a88 100644 --- a/README.md +++ b/README.md @@ -56,9 +56,9 @@ After running the tracker these services will be available: ## Documentation * [Crate documentation](https://docs.rs/torrust-tracker/). -* [API `v1`](https://docs.rs/torrust-tracker/3.0.0-alpha.2/torrust_tracker/servers/apis/v1). -* [HTTP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.2/torrust_tracker/servers/http). -* [UDP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.2/torrust_tracker/servers/udp). +* [API `v1`](https://docs.rs/torrust-tracker/3.0.0-alpha.3/torrust_tracker/servers/apis/v1). +* [HTTP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.3/torrust_tracker/servers/http). +* [UDP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.3/torrust_tracker/servers/udp). ## Contributing @@ -68,7 +68,7 @@ How can you contribute? * Bug reports and feature requests. * Code contributions. You can start by looking at the issues labeled ["good first issues"](https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22). -* Documentation improvements. Check the [documentation](https://docs.rs/torrust-tracker/) and [API documentation](https://docs.rs/torrust-tracker/3.0.0-alpha.2/torrust_tracker/servers/apis/v1) for typos, errors, or missing information. +* Documentation improvements. Check the [documentation](https://docs.rs/torrust-tracker/) and [API documentation](https://docs.rs/torrust-tracker/3.0.0-alpha.3/torrust_tracker/servers/apis/v1) for typos, errors, or missing information. * Participation in the community. You can help by answering questions in the [discussions](https://github.com/torrust/torrust-tracker/discussions). ## License diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index aca3808d2..d28df4798 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -13,8 +13,8 @@ config = "0.13" toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" -torrust-tracker-primitives = { version = "3.0.0-alpha.2", path = "../primitives" } -torrust-tracker-located-error = { version = "3.0.0-alpha.2", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.3", path = "../located-error" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 6812a90ae..e38fcf6f6 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -9,5 +9,5 @@ edition.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.2", path = "../configuration"} -torrust-tracker-primitives = { version = "3.0.0-alpha.2", path = "../primitives"} +torrust-tracker-configuration = { version = "3.0.0-alpha.3", path = "../configuration"} +torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "../primitives"} diff --git a/src/lib.rs b/src/lib.rs index 744642a3f..d14a3ada1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -148,7 +148,7 @@ //! --publish 7070:7070/tcp \ //! --publish 1212:1212/tcp \ //! --volume "$(pwd)/storage":"/app/storage" \ -//! torrust/tracker:3.0.0-alpha.2 +//! torrust/tracker:3.0.0-alpha.3 //! ``` //! //! For more information about using docker visit the [tracker docker documentation](https://github.com/torrust/torrust-tracker/tree/develop/docker). From 358c81ea5fa40a341f253ae5fdca9949a60c4f50 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jul 2023 17:10:04 +0100 Subject: [PATCH 0538/1003] ci: fix missing doc attribute while publishing crates It fixes the warning: ``` warning: manifest has no documentation, homepage or repository. ``` --- Cargo.toml | 1 + packages/configuration/Cargo.toml | 3 ++- packages/located-error/Cargo.toml | 3 ++- packages/primitives/Cargo.toml | 3 ++- packages/test-helpers/Cargo.toml | 3 ++- 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4b4a05a2e..152495463 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ description = "A feature rich BitTorrent tracker." license-file.workspace = true authors.workspace = true edition.workspace = true +repository.workspace = true version.workspace = true [workspace.package] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index d28df4798..a6cf57710 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -2,9 +2,10 @@ name = "torrust-tracker-configuration" description = "A library to provide configuration to the Torrust Tracker." license-file.workspace = true -version.workspace = true authors.workspace = true edition.workspace = true +repository.workspace = true +version.workspace = true [dependencies] serde = { version = "1.0", features = ["derive"] } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 95cc1fd18..19707e7e9 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -2,9 +2,10 @@ name = "torrust-tracker-located-error" description = "A library to provide error decorator with the location and the source of the original error." license-file.workspace = true -version.workspace = true authors.workspace = true edition.workspace = true +repository.workspace = true +version.workspace = true [dependencies] log = { version = "0.4", features = ["release_max_level_info"] } diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index a2e17b63c..7576e06d8 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -2,9 +2,10 @@ name = "torrust-tracker-primitives" description = "A library with the primitive types shared by the Torrust tracker packages." license-file.workspace = true -version.workspace = true authors.workspace = true edition.workspace = true +repository.workspace = true +version.workspace = true [dependencies] serde = { version = "1.0", features = ["derive"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index e38fcf6f6..e9d86a589 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -2,9 +2,10 @@ name = "torrust-tracker-test-helpers" description = "A library providing helpers for testing the Torrust tracker." license-file.workspace = true -version.workspace = true authors.workspace = true edition.workspace = true +repository.workspace = true +version.workspace = true [dependencies] lazy_static = "1.4" From 058f3ac5ba24e3f16f47d8c21c8e3a8f78a8621d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Jul 2023 11:29:36 +0100 Subject: [PATCH 0539/1003] feat: create storage folder in install script --- bin/install.sh | 3 +++ config.toml.local | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bin/install.sh b/bin/install.sh index d4314ce93..ef469a939 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -5,6 +5,9 @@ if ! [ -f "./config.toml" ]; then cp ./config.toml.local ./config.toml fi +# Generate storage directory if it does not exist +mkdir -p "./storage/database" + # Generate the sqlite database if it does not exist if ! [ -f "./storage/database/data.db" ]; then # todo: it should get the path from config.toml and only do it when we use sqlite diff --git a/config.toml.local b/config.toml.local index baf272d5a..be6a11a56 100644 --- a/config.toml.local +++ b/config.toml.local @@ -4,11 +4,11 @@ db_driver = "Sqlite3" db_path = "./storage/database/data.db" announce_interval = 120 min_announce_interval = 120 -max_peer_timeout = 900 on_reverse_proxy = false external_ip = "0.0.0.0" tracker_usage_statistics = true persistent_torrent_completed_stat = false +max_peer_timeout = 900 inactive_peer_cleanup_interval = 600 remove_peerless_torrents = true From d321ab0d039b739db2a8db73af1a00188150c80d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 10 Feb 2023 13:55:50 +0100 Subject: [PATCH 0540/1003] refactor: remove wildcard imports from time_extent.rs --- src/shared/clock/time_extent.rs | 38 ++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/src/shared/clock/time_extent.rs b/src/shared/clock/time_extent.rs index 2f9e003be..9c20de9c1 100644 --- a/src/shared/clock/time_extent.rs +++ b/src/shared/clock/time_extent.rs @@ -285,7 +285,6 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; #[cfg(test)] mod test { - use crate::shared::clock::time_extent::{ checked_duration_from_nanos, Base, DefaultTimeExtentMaker, Extent, Make, Multiplier, Product, TimeExtent, MAX, ZERO, }; @@ -296,7 +295,8 @@ mod test { mod fn_checked_duration_from_nanos { use std::time::Duration; - use super::*; + use crate::shared::clock::time_extent::checked_duration_from_nanos; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; const NANOS_PER_SEC: u32 = 1_000_000_000; @@ -335,11 +335,9 @@ mod test { } mod time_extent { - use super::*; mod fn_default { - - use super::*; + use crate::shared::clock::time_extent::{TimeExtent, ZERO}; #[test] fn it_should_default_initialize_to_zero() { @@ -348,7 +346,8 @@ mod test { } mod fn_from_sec { - use super::*; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::shared::clock::time_extent::{Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -364,7 +363,8 @@ mod test { } mod fn_new { - use super::*; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::shared::clock::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -386,7 +386,8 @@ mod test { mod fn_increase { use std::num::IntErrorKind; - use super::*; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_increase_for_zero() { @@ -413,7 +414,8 @@ mod test { mod fn_decrease { use std::num::IntErrorKind; - use super::*; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_decrease_for_zero() { @@ -438,7 +440,8 @@ mod test { } mod fn_total { - use super::*; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -485,7 +488,8 @@ mod test { } mod fn_total_next { - use super::*; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -539,10 +543,11 @@ mod test { } mod make_time_extent { - use super::*; mod fn_now { - use super::*; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; + use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; #[test] fn it_should_give_a_time_extent() { @@ -580,7 +585,9 @@ mod test { mod fn_now_after { use std::time::Duration; - use super::*; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make}; + use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; #[test] fn it_should_give_a_time_extent() { @@ -617,7 +624,8 @@ mod test { mod fn_now_before { use std::time::Duration; - use super::*; + use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; + use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; #[test] fn it_should_give_a_time_extent() { From eb838dd2abaff2cd7daac719b69b302513254157 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 2 Aug 2023 18:05:24 +0100 Subject: [PATCH 0541/1003] dev: tighten lint for build and clippy --- .cargo/config.toml | 20 +++++++++++ .vscode/settings.json | 21 ++++++++++-- packages/located-error/src/lib.rs | 2 +- src/servers/apis/server.rs | 2 +- .../apis/v1/context/auth_key/resources.rs | 4 +++ src/servers/http/v1/query.rs | 6 ++-- src/servers/http/v1/requests/announce.rs | 34 ++++++++----------- src/servers/http/v1/requests/scrape.rs | 10 +++--- src/servers/http/v1/responses/announce.rs | 2 +- src/servers/udp/handlers.rs | 2 ++ src/servers/udp/server.rs | 2 +- src/shared/bit_torrent/info_hash.rs | 6 ++-- src/shared/clock/time_extent.rs | 5 +-- src/tracker/mod.rs | 13 +++---- tests/servers/http/requests/announce.rs | 8 ++--- tests/servers/http/requests/scrape.rs | 4 +-- tests/servers/mod.rs | 2 -- 17 files changed, 86 insertions(+), 57 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 71480e92d..a88db5f38 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -3,3 +3,23 @@ cov = "llvm-cov" cov-lcov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" cov-html = "llvm-cov --html" time = "build --timings --all-targets" + +[build] +rustflags = [ + "-D", + "warnings", + "-D", + "future-incompatible", + "-D", + "let-underscore", + "-D", + "nonstandard-style", + "-D", + "rust-2018-compatibility", + "-D", + "rust-2018-idioms", + "-D", + "rust-2021-compatibility", + "-D", + "unused", +] diff --git a/.vscode/settings.json b/.vscode/settings.json index 94f199bd6..78239b757 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,7 +2,22 @@ "[rust]": { "editor.formatOnSave": true }, - "rust-analyzer.checkOnSave.command": "clippy", - "rust-analyzer.checkOnSave.allTargets": true, - "rust-analyzer.checkOnSave.extraArgs": ["--","-W","clippy::pedantic"], + "rust-analyzer.checkOnSave": true, + "rust-analyzer.check.command": "clippy", + "rust-analyzer.check.allTargets": true, + "rust-analyzer.check.extraArgs": [ + "--", + "-D", + "clippy::correctness", + "-D", + "clippy::suspicious", + "-W", + "clippy::complexity", + "-W", + "clippy::perf", + "-W", + "clippy::style", + "-W", + "clippy::pedantic", + ], } \ No newline at end of file diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs index 67c432528..bf8618686 100644 --- a/packages/located-error/src/lib.rs +++ b/packages/located-error/src/lib.rs @@ -128,7 +128,7 @@ mod tests { fn error_should_include_location() { let e = TestError::Test; - let b: LocatedError = Located(e).into(); + let b: LocatedError<'_, TestError> = Located(e).into(); let l = get_caller_location(); assert_eq!(b.location.file(), l.file()); diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 716a36c11..778a17d90 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -128,7 +128,7 @@ impl ApiServer { .send(0) .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; - let _: Result<(), tokio::task::JoinError> = self.state.task.await; + drop(self.state.task.await); Ok(ApiServer { cfg: self.cfg, diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index 3eeafbda0..5099fad8b 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -27,6 +27,7 @@ impl From for auth::ExpiringKey { } } +#[allow(deprecated)] impl From for AuthKey { fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { @@ -63,6 +64,7 @@ mod tests { } #[test] + #[allow(deprecated)] fn it_should_be_convertible_into_an_auth_key() { let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line @@ -80,6 +82,7 @@ mod tests { } #[test] + #[allow(deprecated)] fn it_should_be_convertible_from_an_auth_key() { let auth_key = auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line @@ -97,6 +100,7 @@ mod tests { } #[test] + #[allow(deprecated)] fn it_should_be_convertible_into_json() { assert_eq!( serde_json::to_string(&AuthKey { diff --git a/src/servers/http/v1/query.rs b/src/servers/http/v1/query.rs index 6bbdc63e9..745796b61 100644 --- a/src/servers/http/v1/query.rs +++ b/src/servers/http/v1/query.rs @@ -137,7 +137,7 @@ impl From> for Query { } impl std::fmt::Display for Query { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let query = self .params .iter_all() @@ -185,7 +185,7 @@ impl FromStr for NameValuePair { } impl std::fmt::Display for NameValuePair { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}={}", self.name, self.value) } } @@ -208,7 +208,7 @@ impl FieldValuePairSet { } impl std::fmt::Display for FieldValuePairSet { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let query = self .pairs .iter() diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 1cf632eb5..c330ca3bd 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -166,7 +166,7 @@ impl FromStr for Event { } impl fmt::Display for Event { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Event::Started => write!(f, "started"), Event::Stopped => write!(f, "stopped"), @@ -194,7 +194,7 @@ pub enum Compact { } impl fmt::Display for Compact { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Compact::Accepted => write!(f, "1"), Compact::NotAccepted => write!(f, "0"), @@ -264,12 +264,10 @@ fn extract_info_hash(query: &Query) -> Result })?, ) } - None => { - return Err(ParseAnnounceQueryError::MissingParam { - location: Location::caller(), - param_name: INFO_HASH.to_owned(), - }) - } + None => Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: INFO_HASH.to_owned(), + }), } } @@ -282,12 +280,10 @@ fn extract_peer_id(query: &Query) -> Result { source: Located(err).into(), })?, ), - None => { - return Err(ParseAnnounceQueryError::MissingParam { - location: Location::caller(), - param_name: PEER_ID.to_owned(), - }) - } + None => Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: PEER_ID.to_owned(), + }), } } @@ -298,12 +294,10 @@ fn extract_port(query: &Query) -> Result { param_value: raw_param.clone(), location: Location::caller(), })?), - None => { - return Err(ParseAnnounceQueryError::MissingParam { - location: Location::caller(), - param_name: PORT.to_owned(), - }) - } + None => Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: PORT.to_owned(), + }), } } diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index 227ea74ae..7c52b9fc4 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -74,12 +74,10 @@ fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryE Ok(info_hashes) } - None => { - return Err(ParseScrapeQueryError::MissingParam { - location: Location::caller(), - param_name: INFO_HASH.to_owned(), - }) - } + None => Err(ParseScrapeQueryError::MissingParam { + location: Location::caller(), + param_name: INFO_HASH.to_owned(), + }), } } diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 8fbe5df35..0cd62578a 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -116,7 +116,7 @@ pub struct Peer { impl Peer { #[must_use] - pub fn ben_map(&self) -> BencodeMut { + pub fn ben_map(&self) -> BencodeMut<'_> { ben_map! { "peer id" => ben_bytes!(self.peer_id.clone().to_vec()), "ip" => ben_bytes!(self.ip.to_string()), diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index e94e0292f..64d60e549 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -104,6 +104,7 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t /// # Errors /// /// Will return `Error` if unable to `authenticate_request`. +#[allow(deprecated)] pub async fn authenticate(info_hash: &InfoHash, tracker: &Tracker) -> Result<(), Error> { tracker .authenticate_request(info_hash, &None) @@ -225,6 +226,7 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra let info_hash = file.0; let swarm_metadata = file.1; + #[allow(deprecated)] let scrape_entry = if tracker.authenticate_request(info_hash, &None).await.is_ok() { #[allow(clippy::cast_possible_truncation)] TorrentScrapeStatistics { diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 6b4b18831..3bb5bd013 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -143,7 +143,7 @@ impl UdpServer { pub async fn stop(self) -> Result, Error> { self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; - let _: Result<(), tokio::task::JoinError> = self.state.job.await; + drop(self.state.job.await); let stopped_api_server: UdpServer = UdpServer { cfg: self.cfg, diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs index 7392c791d..20c3cb38b 100644 --- a/src/shared/bit_torrent/info_hash.rs +++ b/src/shared/bit_torrent/info_hash.rs @@ -167,7 +167,7 @@ impl InfoHash { } impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut chars = [0u8; 40]; binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); write!(f, "{}", std::str::from_utf8(&chars).unwrap()) @@ -195,7 +195,7 @@ impl Ord for InfoHash { impl std::cmp::PartialOrd for InfoHash { fn partial_cmp(&self, other: &InfoHash) -> Option { - self.0.partial_cmp(&other.0) + Some(self.cmp(other)) } } @@ -271,7 +271,7 @@ struct InfoHashVisitor; impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { type Value = InfoHash; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(formatter, "a 40 character long hash") } diff --git a/src/shared/clock/time_extent.rs b/src/shared/clock/time_extent.rs index 9c20de9c1..a5a359e52 100644 --- a/src/shared/clock/time_extent.rs +++ b/src/shared/clock/time_extent.rs @@ -285,10 +285,7 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; #[cfg(test)] mod test { - use crate::shared::clock::time_extent::{ - checked_duration_from_nanos, Base, DefaultTimeExtentMaker, Extent, Make, Multiplier, Product, TimeExtent, MAX, ZERO, - }; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::time_extent::TimeExtent; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4d7d8d37e..63c8b96d6 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -732,10 +732,11 @@ impl Tracker { // todo: move this action to a separate worker if self.config.persistent_torrent_completed_stat && stats_updated { - let _: Result<(), databases::error::Error> = self - .database - .save_persistent_torrent(info_hash, torrent_entry.completed) - .await; + drop( + self.database + .save_persistent_torrent(info_hash, torrent_entry.completed) + .await, + ); } let (seeders, completed, leechers) = torrent_entry.get_stats(); @@ -966,10 +967,10 @@ impl Tracker { return Ok(()); } - return Err(Error::TorrentNotWhitelisted { + Err(Error::TorrentNotWhitelisted { info_hash: *info_hash, location: Location::caller(), - }); + }) } /// It adds a torrent to the whitelist. diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index 20c5ddaa7..f7f25da3e 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -21,7 +21,7 @@ pub struct Query { } impl fmt::Display for Query { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.build()) } } @@ -57,7 +57,7 @@ pub enum Event { } impl fmt::Display for Event { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { //Event::Started => write!(f, "started"), //Event::Stopped => write!(f, "stopped"), @@ -74,7 +74,7 @@ pub enum Compact { } impl fmt::Display for Compact { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Compact::Accepted => write!(f, "1"), Compact::NotAccepted => write!(f, "0"), @@ -163,7 +163,7 @@ pub struct QueryParams { } impl std::fmt::Display for QueryParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut params = vec![]; if let Some(info_hash) = &self.info_hash { diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs index 9e4257d6c..264c72c33 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -10,7 +10,7 @@ pub struct Query { } impl fmt::Display for Query { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.build()) } } @@ -93,7 +93,7 @@ impl QueryParams { } impl std::fmt::Display for QueryParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let query = self .info_hash .iter() diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index c19f72020..7c30b6f40 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1,5 +1,3 @@ -extern crate rand; - mod api; mod http; mod udp; From 9fac926b1fb739d8e033546b389d496dff7d97ee Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 2 Aug 2023 19:34:13 +0100 Subject: [PATCH 0542/1003] ci: overhall codecov workflow --- .github/workflows/codecov.yml | 40 ------------------------ .github/workflows/coverage.yaml | 54 +++++++++++++++++++++++++++++++++ cSpell.json | 7 +++++ 3 files changed, 61 insertions(+), 40 deletions(-) delete mode 100644 .github/workflows/codecov.yml create mode 100644 .github/workflows/coverage.yaml diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml deleted file mode 100644 index 05551bafc..000000000 --- a/.github/workflows/codecov.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Upload code coverage - -on: - push: - pull_request: - -env: - CARGO_TERM_COLOR: always - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@stable - with: - toolchain: nightly - components: rustfmt, llvm-tools-preview - - name: Build - run: cargo build --release - env: - CARGO_INCREMENTAL: "0" - RUSTFLAGS: "-Cinstrument-coverage" - RUSTDOCFLAGS: "-Cinstrument-coverage" - - name: Test - run: cargo test --all-features --no-fail-fast - env: - CARGO_INCREMENTAL: "0" - RUSTFLAGS: "-Cinstrument-coverage" - RUSTDOCFLAGS: "-Cinstrument-coverage" - - name: Install grcov - run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi - - name: Run grcov - run: grcov . --binary-path target/debug/deps/ -s . -t lcov --branch --ignore-not-existing --ignore '../**' --ignore '/*' -o coverage.lcov - - uses: codecov/codecov-action@v3 - with: - files: ./coverage.lcov - flags: rust - fail_ci_if_error: true # optional (default = false) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml new file mode 100644 index 000000000..d2750000a --- /dev/null +++ b/.github/workflows/coverage.yaml @@ -0,0 +1,54 @@ +name: Coverage + +on: + push: + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + report: + name: Report + runs-on: ubuntu-latest + env: + CARGO_INCREMENTAL: "0" + RUSTFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" + RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v3 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + components: llvm-tools-preview + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + - id: check + name: Run Build-Checks + run: cargo check --workspace --all-targets --all-features + + - id: test + name: Run Unit Tests + run: cargo test --workspace --all-targets --all-features + + - id: coverage + name: Generate Coverage Report + uses: alekitto/grcov@v0.2 + + - id: upload + name: Upload Coverage Report + uses: codecov/codecov-action@v3 + with: + files: ${{ steps.coverage.outputs.report }} + flag: rust + verbose: true + fail_ci_if_error: true diff --git a/cSpell.json b/cSpell.json index c935021c0..ac83bf7f7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,5 +1,6 @@ { "words": [ + "alekitto", "appuser", "Arvid", "AUTOINCREMENT", @@ -21,12 +22,16 @@ "certbot", "chrono", "clippy", + "codecov", + "codegen", "completei", "connectionless", "dockerhub", "downloadedi", + "dtolnay", "filesd", "Freebox", + "Grcov", "hasher", "hexlify", "hlocalhost", @@ -67,6 +72,8 @@ "rngs", "routable", "rusqlite", + "RUSTDOCFLAGS", + "RUSTFLAGS", "rustfmt", "Rustls", "Seedable", From 8f39f6e476e97bb3dfe0df7181e8d80cfa2e6df7 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 3 Aug 2023 12:19:36 +0100 Subject: [PATCH 0543/1003] ci: update coverage workflow make workflow nicer add 'codecov.yml' for configuration remove 'rust' flag (will move to automatic flags in the future) --- .github/workflows/coverage.yaml | 9 +++++++-- cSpell.json | 1 + codecov.yml | 6 ++++++ 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 codecov.yml diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index d2750000a..08b869327 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -32,8 +32,14 @@ jobs: name: Enable Workflow Cache uses: Swatinem/rust-cache@v2 + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: grcov + - id: check - name: Run Build-Checks + name: Run Build Checks run: cargo check --workspace --all-targets --all-features - id: test @@ -49,6 +55,5 @@ jobs: uses: codecov/codecov-action@v3 with: files: ${{ steps.coverage.outputs.report }} - flag: rust verbose: true fail_ci_if_error: true diff --git a/cSpell.json b/cSpell.json index ac83bf7f7..5cafa68ed 100644 --- a/cSpell.json +++ b/cSpell.json @@ -84,6 +84,7 @@ "subsec", "Swatinem", "Swiftbit", + "taiki", "thiserror", "Torrentstorm", "torrust", diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 000000000..f0878195b --- /dev/null +++ b/codecov.yml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + target: auto + threshold: 0.5% From 8defabd531a40c09fafc73b7d57775eeb01633c1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 7 Aug 2023 17:18:28 +0100 Subject: [PATCH 0544/1003] doc: [#351] fix single-command to run demo env with docker --- bin/install-demo.sh | 27 ++++++++++ bin/install.sh | 2 + docker/README.md | 60 +++++++++++++++++------ docker/bin/{run.sh => run-local-image.sh} | 0 docker/bin/run-public-image.sh | 13 +++++ src/lib.rs | 18 +------ 6 files changed, 88 insertions(+), 32 deletions(-) create mode 100755 bin/install-demo.sh rename docker/bin/{run.sh => run-local-image.sh} (100%) create mode 100755 docker/bin/run-public-image.sh diff --git a/bin/install-demo.sh b/bin/install-demo.sh new file mode 100755 index 000000000..1b829ca1d --- /dev/null +++ b/bin/install-demo.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Single command to setup and run the tracker using the pre-built image. + +# Check if 'storage' directory exists +if [ -d "./storage" ]; then + echo "Warning: 'storage' directory already exists. Please remove or rename it before proceeding." + exit 1 +fi + +# Check if 'config.toml' file exists in the current directory +if [ -f "./config.toml" ]; then + echo "Warning: 'config.toml' file already exists in the root directory. Please remove or rename it before proceeding." + exit 1 +fi + +# Check if SQLite3 is installed +if ! command -v sqlite3 &> /dev/null; then + echo "Warning: SQLite3 is not installed on your system. Please install it and retry." + exit 1 +fi + +wget https://raw.githubusercontent.com/torrust/torrust-tracker/v3.0.0-alpha.3/config.toml.local -O config.toml \ + && mkdir -p ./storage/database \ + && mkdir -p ./storage/ssl_certificates \ + && touch ./storage/database/data.db \ + && echo ";" | sqlite3 ./storage/database/data.db diff --git a/bin/install.sh b/bin/install.sh index ef469a939..82ea940d0 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -1,5 +1,7 @@ #!/bin/bash +# This script is only intended to be used for local development or testing environments. + # Generate the default settings file if it does not exist if ! [ -f "./config.toml" ]; then cp ./config.toml.local ./config.toml diff --git a/docker/README.md b/docker/README.md index e0fee61e7..207dadbbc 100644 --- a/docker/README.md +++ b/docker/README.md @@ -17,6 +17,26 @@ storage/ > NOTE: you only need the `ssl_certificates` directory and certificates in case you have enabled SSL for the one HTTP tracker or the API. +## Demo environment + +You can run a single command to setup the tracker with the default +configuration and run it using the pre-built public docker image: + +```s +curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/torrust/torrust-tracker/v3.0.0-alpha.3/bin/install-demo.sh | bash +export TORRUST_TRACKER_USER_UID=1000 \ + && docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume "$(pwd)/storage":"/app/storage" \ + --volume "$(pwd)/config.toml":"/app/config.toml":ro \ + torrust/tracker:3.0.0-alpha.3 +``` + +This is intended to be used to run a quick demo of the application. + ## Dev environment When using docker you have to bind the exposed ports to the wildcard address `0.0.0.0`, so you can access the application from the host machine. @@ -28,38 +48,46 @@ The default API configuration uses `127.0.0.1`, so you have to change it to: bind_address = "0.0.0.0:1212" ``` -Otherwise the API will be only accessible from inside the container. +Otherwise, the API will be only accessible from inside the container. ### With docker -Build and run locally: +Build and run locally. You can build the docker image locally: ```s docker context use default export TORRUST_TRACKER_USER_UID=1000 ./docker/bin/build.sh $TORRUST_TRACKER_USER_UID ./bin/install.sh -./docker/bin/run.sh $TORRUST_TRACKER_USER_UID +./docker/bin/run-local-image.sh $TORRUST_TRACKER_USER_UID ``` -Run using the pre-built public docker image: +Or you can run locally using the pre-built docker image: ```s +docker context use default export TORRUST_TRACKER_USER_UID=1000 -docker run -it \ - --user="$TORRUST_TRACKER_USER_UID" \ - --publish 6969:6969/udp \ - --publish 7070:7070/tcp \ - --publish 1212:1212/tcp \ - --volume "$(pwd)/storage":"/app/storage" \ - torrust/tracker +./bin/install.sh +./docker/bin/run-public-image.sh $TORRUST_TRACKER_USER_UID ``` -> NOTES: -> -> - You have to create the SQLite DB (`data.db`) and configuration (`config.toml`) before running the tracker. See `bin/install.sh`. -> - You have to replace the user UID (`1000`) with yours. -> - Remember to switch to your default docker context `docker context use default`. +In both cases, you will need to: + +- Create the SQLite DB (`data.db`) if you are going to use SQLite. +- Create the configuration file (`config.toml`) before running the tracker. +- Replace the user UID (`1000`) with yours. + +> NOTICE: that the `./bin/install.sh` can setup the application for you. But it +uses a predefined configuration. + +Remember to switch to your default docker context `docker context use default` +and to change the API default configuration in `config.toml` to make it +available from the host machine: + +```toml +[http_api] +bind_address = "0.0.0.0:1212" +``` ### With docker-compose diff --git a/docker/bin/run.sh b/docker/bin/run-local-image.sh similarity index 100% rename from docker/bin/run.sh rename to docker/bin/run-local-image.sh diff --git a/docker/bin/run-public-image.sh b/docker/bin/run-public-image.sh new file mode 100755 index 000000000..50407f91b --- /dev/null +++ b/docker/bin/run-public-image.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_CONFIG=$(cat config.toml) + +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --env TORRUST_TRACKER_CONFIG="$TORRUST_TRACKER_CONFIG" \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust/tracker \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index d14a3ada1..28bac9244 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -136,22 +136,8 @@ //! //! ## Run with docker //! -//! You can run the tracker with a pre-built docker image: -//! -//! ```text -//! mkdir -p ./storage/database \ -//! && mkdir -p ./storage/ssl_certificates \ -//! && export TORRUST_TRACKER_USER_UID=1000 \ -//! && docker run -it \ -//! --user="$TORRUST_TRACKER_USER_UID" \ -//! --publish 6969:6969/udp \ -//! --publish 7070:7070/tcp \ -//! --publish 1212:1212/tcp \ -//! --volume "$(pwd)/storage":"/app/storage" \ -//! torrust/tracker:3.0.0-alpha.3 -//! ``` -//! -//! For more information about using docker visit the [tracker docker documentation](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! You can run the tracker with a pre-built docker image. Please refer to the +//! [tracker docker documentation](https://github.com/torrust/torrust-tracker/tree/develop/docker). //! //! # Configuration //! From 5a0441b75b77a02264be725650e25adbdd839602 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Aug 2023 09:42:47 +0100 Subject: [PATCH 0545/1003] docs: fix coverage badge on README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9d2838a88..b419c12c1 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Torrust Tracker -[![Build & Release](https://github.com/torrust/torrust-tracker/actions/workflows/build_release.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/build_release.yml) [![CI](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml) [![Publish crate](https://github.com/torrust/torrust-tracker/actions/workflows/publish_crate.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/publish_crate.yml) [![Publish docker image](https://github.com/torrust/torrust-tracker/actions/workflows/publish_docker_image.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/publish_docker_image.yml) [![Test](https://github.com/torrust/torrust-tracker/actions/workflows/test.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test.yml) [![Test docker build](https://github.com/torrust/torrust-tracker/actions/workflows/test_docker.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test_docker.yml) [![Upload code coverage](https://github.com/torrust/torrust-tracker/actions/workflows/codecov.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/codecov.yml) +[![Build & Release](https://github.com/torrust/torrust-tracker/actions/workflows/build_release.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/build_release.yml) [![CI](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml) [![Publish crate](https://github.com/torrust/torrust-tracker/actions/workflows/publish_crate.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/publish_crate.yml) [![Publish docker image](https://github.com/torrust/torrust-tracker/actions/workflows/publish_docker_image.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/publish_docker_image.yml) [![Test](https://github.com/torrust/torrust-tracker/actions/workflows/test.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test.yml) [![Test docker build](https://github.com/torrust/torrust-tracker/actions/workflows/test_docker.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test_docker.yml) [![Coverage](https://github.com/torrust/torrust-tracker/actions/workflows/coverage.yaml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/coverage.yaml) Torrust Tracker is a lightweight but incredibly high-performance and feature-rich BitTorrent tracker written in [Rust](https://www.rust-lang.org/). From 8a79fbea235ef84039957c1500e3e12466bc6080 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Aug 2023 10:22:12 +0100 Subject: [PATCH 0546/1003] refactor: [#361] extract mod for configuration --- src/bootstrap/app.rs | 31 +------------------------------ src/bootstrap/config.rs | 37 +++++++++++++++++++++++++++++++++++++ src/bootstrap/mod.rs | 1 + 3 files changed, 39 insertions(+), 30 deletions(-) create mode 100644 src/bootstrap/config.rs diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index c0e688a0d..6961e15f0 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -11,11 +11,11 @@ //! 2. Initialize static variables. //! 3. Initialize logging. //! 4. Initialize the domain tracker. -use std::env; use std::sync::Arc; use torrust_tracker_configuration::Configuration; +use super::config::initialize_configuration; use crate::bootstrap; use crate::shared::clock::static_time; use crate::shared::crypto::ephemeral_instance_keys; @@ -55,35 +55,6 @@ pub fn initialize_static() { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); } -/// It loads the application configuration from the environment. -/// -/// There are two methods to inject the configuration: -/// -/// 1. By using a config file: `config.toml`. The file must be in the same folder where you are running the tracker. -/// 2. Environment variable: `TORRUST_TRACKER_CONFIG`. The variable contains the same contents as the `config.toml` file. -/// -/// Environment variable has priority over the config file. -/// -/// Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. -/// -/// # Panics -/// -/// Will panic if it can't load the configuration from either -/// `./config.toml` file or the env var `TORRUST_TRACKER_CONFIG`. -#[must_use] -fn initialize_configuration() -> Configuration { - const CONFIG_PATH: &str = "./config.toml"; - const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; - - if env::var(CONFIG_ENV_VAR_NAME).is_ok() { - println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); - Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap() - } else { - println!("Loading configuration from config file {CONFIG_PATH}"); - Configuration::load_from_file(CONFIG_PATH).unwrap() - } -} - /// It builds the domain tracker /// /// The tracker is the domain layer service. It's the entrypoint to make requests to the domain layer. diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs new file mode 100644 index 000000000..187fddd0b --- /dev/null +++ b/src/bootstrap/config.rs @@ -0,0 +1,37 @@ +//! Initialize configuration from file or env var. +//! +//! All environment variables are prefixed with `TORRUST_TRACKER_BACK_`. +use std::env; + +use torrust_tracker_configuration::Configuration; + +// Environment variables + +const CONFIG_PATH: &str = "./config.toml"; +const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; + +/// It loads the application configuration from the environment. +/// +/// There are two methods to inject the configuration: +/// +/// 1. By using a config file: `config.toml`. The file must be in the same folder where you are running the tracker. +/// 2. Environment variable: `TORRUST_TRACKER_CONFIG`. The variable contains the same contents as the `config.toml` file. +/// +/// Environment variable has priority over the config file. +/// +/// Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. +/// +/// # Panics +/// +/// Will panic if it can't load the configuration from either +/// `./config.toml` file or the env var `TORRUST_TRACKER_CONFIG`. +#[must_use] +pub fn initialize_configuration() -> Configuration { + if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); + Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap() + } else { + println!("Loading configuration from config file {CONFIG_PATH}"); + Configuration::load_from_file(CONFIG_PATH).unwrap() + } +} diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs index e39cf3adc..22044aafd 100644 --- a/src/bootstrap/mod.rs +++ b/src/bootstrap/mod.rs @@ -6,5 +6,6 @@ //! like cleaning torrents, and other jobs because they can be enabled/disabled depending on the configuration. //! For example, you can have more than one UDP and HTTP tracker, each server is executed like a independent job. pub mod app; +pub mod config; pub mod jobs; pub mod logging; From 0adf373bca011c7985277066a7f20138785bc35f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Aug 2023 10:38:43 +0100 Subject: [PATCH 0547/1003] refactor: [#361] rename constants representing env vars All of then have the ENV_VAR prefix now. Like in the Index Backend. So we can identify constants that are related to env vars. --- src/bootstrap/config.rs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 187fddd0b..2b0740f0e 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -7,14 +7,19 @@ use torrust_tracker_configuration::Configuration; // Environment variables -const CONFIG_PATH: &str = "./config.toml"; -const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; +/// The whole `config.toml` file content. It has priority over the config file. +/// Even if the file is not on the default path. +const ENV_VAR_CONFIG: &str = "TORRUST_TRACKER_CONFIG"; + +// Default values + +const ENV_VAR_DEFAULT_CONFIG_PATH: &str = "./config.toml"; /// It loads the application configuration from the environment. /// /// There are two methods to inject the configuration: /// -/// 1. By using a config file: `config.toml`. The file must be in the same folder where you are running the tracker. +/// 1. By using a config file: `config.toml`. /// 2. Environment variable: `TORRUST_TRACKER_CONFIG`. The variable contains the same contents as the `config.toml` file. /// /// Environment variable has priority over the config file. @@ -27,11 +32,13 @@ const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; /// `./config.toml` file or the env var `TORRUST_TRACKER_CONFIG`. #[must_use] pub fn initialize_configuration() -> Configuration { - if env::var(CONFIG_ENV_VAR_NAME).is_ok() { - println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); - Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap() + if env::var(ENV_VAR_CONFIG).is_ok() { + println!("Loading configuration from env var {ENV_VAR_CONFIG}"); + + Configuration::load_from_env_var(ENV_VAR_CONFIG).unwrap() } else { - println!("Loading configuration from config file {CONFIG_PATH}"); - Configuration::load_from_file(CONFIG_PATH).unwrap() + println!("Loading configuration from config file {ENV_VAR_DEFAULT_CONFIG_PATH}"); + + Configuration::load_from_file(ENV_VAR_DEFAULT_CONFIG_PATH).unwrap() } } From 702dd1433a850090d02417b82cfb7ebc45510898 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Aug 2023 10:56:45 +0100 Subject: [PATCH 0548/1003] feat: allow to change the config.toml file path Now you can change the deafult location for the config file with an env var: ``` TORRUST_IDX_BACK_CONFIG_PATH="./storage/config.toml" cargo run ``` The default location is still `./config.toml` --- packages/configuration/src/lib.rs | 5 +++-- src/bootstrap/config.rs | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index f785aa976..ff604fa4e 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -512,10 +512,11 @@ impl Configuration { if Path::new(path).exists() { config = config_builder.add_source(File::with_name(path)).build()?; } else { - warn!("No config file found."); - warn!("Creating config file.."); + warn!("No config file found. Creating config file ..."); + let config = Configuration::default(); config.save_to_file(path)?; + return Err(Error::CreatedNewConfigHalt { location: Location::caller(), path: path.to_string(), diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 2b0740f0e..398d98563 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -2,6 +2,7 @@ //! //! All environment variables are prefixed with `TORRUST_TRACKER_BACK_`. use std::env; +use std::path::Path; use torrust_tracker_configuration::Configuration; @@ -11,6 +12,9 @@ use torrust_tracker_configuration::Configuration; /// Even if the file is not on the default path. const ENV_VAR_CONFIG: &str = "TORRUST_TRACKER_CONFIG"; +/// The `config.toml` file location. +pub const ENV_VAR_CONFIG_PATH: &str = "TORRUST_IDX_BACK_CONFIG_PATH"; + // Default values const ENV_VAR_DEFAULT_CONFIG_PATH: &str = "./config.toml"; @@ -37,8 +41,14 @@ pub fn initialize_configuration() -> Configuration { Configuration::load_from_env_var(ENV_VAR_CONFIG).unwrap() } else { - println!("Loading configuration from config file {ENV_VAR_DEFAULT_CONFIG_PATH}"); + let config_path = env::var(ENV_VAR_CONFIG_PATH).unwrap_or_else(|_| ENV_VAR_DEFAULT_CONFIG_PATH.to_string()); + + if Path::new(&config_path).is_file(){ + println!("Loading configuration from config file: `{config_path}`"); + } else { + println!("Creating default config file: `{config_path}`"); + } - Configuration::load_from_file(ENV_VAR_DEFAULT_CONFIG_PATH).unwrap() + Configuration::load_from_file(&config_path).expect("Error loading configuration from file") } } From 8936c6e747b7060cd01df03b711d6133fda242c6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Aug 2023 11:55:26 +0100 Subject: [PATCH 0549/1003] feat!: continue running the tracker after creating default config file Instead of halting the program, not the tracker continues the execution when no config.toml file is provided and the default one is created. It shows some messages: ``` Loading configuration from configuration file: `./config.toml` ... Missing configuration file. Creating a default configuration file: `./config.toml` ... Please review the config file: `./config.toml` and restart the tracker if needed. 2023-08-08T11:56:26.957162508+01:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. 2023-08-08T11:56:26.957931296+01:00 [torrust_tracker::bootstrap::jobs::tracker_apis][INFO] Starting Torrust APIs server on: http://127.0.0.1:1212 2023-08-08T11:56:26.958027355+01:00 [torrust_tracker::bootstrap::jobs::tracker_apis][INFO] Torrust APIs server started ``` --- packages/configuration/src/lib.rs | 42 +++++++++++-------------------- src/bootstrap/config.rs | 31 +++++++++++++++++------ 2 files changed, 37 insertions(+), 36 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index ff604fa4e..6de0e3ed7 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -228,14 +228,11 @@ //!``` use std::collections::{HashMap, HashSet}; use std::net::IpAddr; -use std::panic::Location; -use std::path::Path; use std::str::FromStr; use std::sync::Arc; use std::{env, fs}; use config::{Config, ConfigError, File, FileFormat}; -use log::warn; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use thiserror::Error; @@ -414,17 +411,6 @@ pub enum Error { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, - /// If you run the tracker without providing the configuration (via the - /// `TORRUST_TRACKER_CONFIG` environment variable or configuration file), - /// the tracker will create a default configuration file but it will not - /// load it. It will return this error instead and you have to restart the - /// it. - #[error("Default configuration created at: `{path}`, please review and reload tracker, {location}")] - CreatedNewConfigHalt { - location: &'static Location<'static>, - path: String, - }, - /// Unable to load the configuration from the configuration file. #[error("Failed processing the configuration: {source}")] ConfigError { source: LocatedError<'static, ConfigError> }, @@ -502,32 +488,32 @@ impl Configuration { /// /// # Errors /// - /// Will return `Err` if `path` does not exist or has a bad configuration. + /// Will return `Err` if `path` does not exist or has a bad configuration. pub fn load_from_file(path: &str) -> Result { let config_builder = Config::builder(); #[allow(unused_assignments)] let mut config = Config::default(); - if Path::new(path).exists() { - config = config_builder.add_source(File::with_name(path)).build()?; - } else { - warn!("No config file found. Creating config file ..."); - - let config = Configuration::default(); - config.save_to_file(path)?; - - return Err(Error::CreatedNewConfigHalt { - location: Location::caller(), - path: path.to_string(), - }); - } + config = config_builder.add_source(File::with_name(path)).build()?; let torrust_config: Configuration = config.try_deserialize()?; Ok(torrust_config) } + /// Saves the default configuration at the given path. + /// + /// # Errors + /// + /// Will return `Err` if `path` is not a valid path or the configuration + /// file cannot be created. + pub fn create_default_configuration_file(path: &str) -> Result { + let config = Configuration::default(); + config.save_to_file(path)?; + Ok(config) + } + /// Loads the configuration from the environment variable. The whole /// configuration must be in the environment variable. It contains the same /// configuration as the configuration file with the same format. diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 398d98563..eef3265f9 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -4,7 +4,7 @@ use std::env; use std::path::Path; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::{Configuration, Error}; // Environment variables @@ -37,18 +37,33 @@ const ENV_VAR_DEFAULT_CONFIG_PATH: &str = "./config.toml"; #[must_use] pub fn initialize_configuration() -> Configuration { if env::var(ENV_VAR_CONFIG).is_ok() { - println!("Loading configuration from env var {ENV_VAR_CONFIG}"); + println!("Loading configuration from env var {ENV_VAR_CONFIG} ..."); Configuration::load_from_env_var(ENV_VAR_CONFIG).unwrap() } else { let config_path = env::var(ENV_VAR_CONFIG_PATH).unwrap_or_else(|_| ENV_VAR_DEFAULT_CONFIG_PATH.to_string()); - if Path::new(&config_path).is_file(){ - println!("Loading configuration from config file: `{config_path}`"); - } else { - println!("Creating default config file: `{config_path}`"); - } + println!("Loading configuration from configuration file: `{config_path}` ..."); - Configuration::load_from_file(&config_path).expect("Error loading configuration from file") + load_from_file_or_create_default(&config_path).unwrap() + } +} + +/// Loads the configuration from the configuration file. If the file does +/// not exist, it will create a default configuration file and return an +/// error. +/// +/// # Errors +/// +/// Will return `Err` if `path` does not exist or has a bad configuration. +fn load_from_file_or_create_default(path: &str) -> Result { + if Path::new(&path).is_file() { + Configuration::load_from_file(path) + } else { + println!("Missing configuration file."); + println!("Creating a default configuration file: `{path}` ..."); + let config = Configuration::create_default_configuration_file(path)?; + println!("Please review the config file: `{path}` and restart the tracker if needed."); + Ok(config) } } From ec7bd7d8a14e050d695941d93cd5691e28e4a4c3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Aug 2023 12:25:56 +0100 Subject: [PATCH 0550/1003] fix: env var name. Wrong prefix --- src/bootstrap/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index eef3265f9..727bf59f7 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -13,7 +13,7 @@ use torrust_tracker_configuration::{Configuration, Error}; const ENV_VAR_CONFIG: &str = "TORRUST_TRACKER_CONFIG"; /// The `config.toml` file location. -pub const ENV_VAR_CONFIG_PATH: &str = "TORRUST_IDX_BACK_CONFIG_PATH"; +pub const ENV_VAR_CONFIG_PATH: &str = "TORRUST_TRACKER_CONFIG_PATH"; // Default values From 38eea4689ed0fd92ca0614a95c093a6981208357 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Aug 2023 09:33:12 +0100 Subject: [PATCH 0551/1003] chore: update dependencies - Bump serde_with from 3.0 to 3.2 - Bump axum from 0.6.18 to 0.6.20 --- Cargo.lock | 329 +++++++++++++++--------------- Cargo.toml | 4 +- packages/configuration/Cargo.toml | 2 +- 3 files changed, 162 insertions(+), 173 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20557f7c8..f1ae8cad0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,9 +50,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "android-tzdata" @@ -93,13 +93,13 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-trait" -version = "0.1.71" +version = "0.1.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" +checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -110,9 +110,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", @@ -386,9 +386,12 @@ checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +dependencies = [ + "libc", +] [[package]] name = "cexpr" @@ -576,9 +579,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" +checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" dependencies = [ "darling_core", "darling_macro", @@ -586,27 +589,36 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" +checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] name = "darling_macro" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" +checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.23", + "syn 2.0.28", +] + +[[package]] +name = "deranged" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +dependencies = [ + "serde", ] [[package]] @@ -624,13 +636,13 @@ dependencies = [ [[package]] name = "derive_utils" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20ce151e1b790e3e36d767ae57691240feafe8b605e1c2fe081183d64ac1bff3" +checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -663,9 +675,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encoding_rs" @@ -678,15 +690,15 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", @@ -726,12 +738,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.9.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" [[package]] name = "fern" @@ -833,7 +842,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -845,7 +854,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -857,7 +866,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -922,7 +931,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -1185,15 +1194,7 @@ checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", + "serde", ] [[package]] @@ -1203,18 +1204,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.23", -] - -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi", - "libc", - "windows-sys", + "syn 2.0.28", ] [[package]] @@ -1234,9 +1224,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" @@ -1372,9 +1362,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.9" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" dependencies = [ "cc", "pkg-config", @@ -1389,15 +1379,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.8" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "local-ip-address" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2815836665de176ba66deaa449ada98fdf208d84730d1a84a22cbeed6151a6fa" +checksum = "885efb07efcd6ae1c6af70be7565544121424fa9e5b1c3e4b58bbbf141a58cef" dependencies = [ "libc", "neli", @@ -1432,9 +1422,9 @@ dependencies = [ [[package]] name = "matchit" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" +checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" [[package]] name = "memchr" @@ -1679,9 +1669,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", ] @@ -1713,9 +1703,9 @@ checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" -version = "0.10.55" +version = "0.10.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" +checksum = "729b745ad4a5575dd06a3e1af1414bd330ee561c01b3899eb584baeaa8def17e" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -1734,7 +1724,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -1745,18 +1735,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.26.0+1.1.1u" +version = "111.27.0+1.1.1v" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37" +checksum = "06e8f197c82d7511c5b014030c9b1efeda40d7d5f99d23b4ceed3524a5e63f02" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.90" +version = "0.9.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +checksum = "866b5f16f90776b9bb8dc1e1802ac6f0513de3a7a7465867bfbc563dc737faac" dependencies = [ "cc", "libc", @@ -1827,9 +1817,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73935e4d55e2abf7f130186537b19e7a4abc886a0252380b59248af473a3fc9" +checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" dependencies = [ "thiserror", "ucd-trie", @@ -1837,9 +1827,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aef623c9bbfa0eedf5a0efba11a5ee83209c326653ca31ff019bec3a95bfff2b" +checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" dependencies = [ "pest", "pest_generator", @@ -1847,22 +1837,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e8cba4ec22bada7fc55ffe51e2deb6a0e0db2d0b7ab0b103acc80d2510c190" +checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] name = "pest_meta" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01f71cb40bd8bb94232df14b946909e14660e33fc05db3e50ae2a82d7ea0ca0" +checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" dependencies = [ "once_cell", "pest", @@ -1871,29 +1861,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] name = "pin-project-lite" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" +checksum = "2c516611246607d0c04186886dbb3a754368ef82c79e9827a802c6d836dd111c" [[package]] name = "pin-utils" @@ -1954,9 +1944,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -1983,9 +1973,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.29" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" dependencies = [ "proc-macro2", ] @@ -2069,9 +2059,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89089e897c013b3deb627116ae56a6955a72b8bed395c9526af31c9fe528b484" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ "aho-corasick", "memchr", @@ -2081,9 +2071,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa250384981ea14565685dea16a9ccc4d1c541a13f82b9c168572264d1df8c56" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick", "memchr", @@ -2092,9 +2082,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "rend" @@ -2222,13 +2212,12 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.30.0" +version = "1.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0446843641c69436765a35a5a77088e28c2e6a12da93e84aa3ab1cd4aa5a042" +checksum = "4a2ab0025103a60ecaaf3abf24db1db240a4e1c15837090d2c32f625ac98abea" dependencies = [ "arrayvec", "borsh", - "bytecheck", "byteorder", "bytes", "num-traits", @@ -2261,13 +2250,12 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.23" +version = "0.38.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" +checksum = "172891ebdceb05aa0005f533a6cbfca599ddd7d966f6f5d4d9b2e70478e70399" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.3.3", "errno", - "io-lifetimes", "libc", "linux-raw-sys", "windows-sys", @@ -2275,9 +2263,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.3" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b19faa85ecb5197342b54f987b142fb3e30d0c90da40f80ef4fa9a726e6676ed" +checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" dependencies = [ "log", "ring", @@ -2296,9 +2284,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.1" +version = "0.101.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f36a6828982f422756984e47912a7a51dcbc2a197aa791158f8ca61cd8204e" +checksum = "261e9e0888cba427c3316e6322805653c9425240b6fd96cee7cb671ab70ab8d0" dependencies = [ "ring", "untrusted", @@ -2306,15 +2294,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "saturating" @@ -2342,9 +2330,9 @@ dependencies = [ [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -2364,9 +2352,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.9.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -2377,9 +2365,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -2387,15 +2375,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" [[package]] name = "serde" -version = "1.0.167" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daf513456463b42aa1d94cff7e0c24d682b429f020b9afa4f5ba5c40a22b237" +checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" dependencies = [ "serde_derive", ] @@ -2412,29 +2400,29 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a16be4fe5320ade08736447e3198294a5ea9a6d44dde6f35f0a5e06859c427a" +checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.167" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69b106b68bc8054f0e974e70d19984040f8a5cf9215ca82626ea4853f82c4b9" +checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] name = "serde_json" -version = "1.0.100" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ "itoa", "ryu", @@ -2443,9 +2431,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc4422959dd87a76cb117c191dcbffc20467f06c9100b76721dab370f24d3a" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" dependencies = [ "itoa", "serde", @@ -2453,13 +2441,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d89a8107374290037607734c0b73a85db7ed80cae314b3c5791f192a496e731" +checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -2485,14 +2473,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.0.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02d8aa6e3c385bf084924f660ce2a3a6bd333ba55b35e8590b321f35d88513" +checksum = "1402f54f9a3b9e2efe71c1cea24e648acce55887983553eeb858cf3115acfd49" dependencies = [ "base64 0.21.2", "chrono", "hex", "indexmap 1.9.3", + "indexmap 2.0.0", "serde", "serde_json", "serde_with_macros", @@ -2501,14 +2490,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.0.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc7d5d3932fb12ce722ee5e64dd38c504efba37567f0c402f6ca728c3b8b070" +checksum = "9197f1ad0e3c173a0222d3c4404fb04c3afe87e962bcb327af73e8301fa203c7" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -2620,9 +2609,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.23" +version = "2.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" dependencies = [ "proc-macro2", "quote", @@ -2643,11 +2632,10 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.6.0" +version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" +checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" dependencies = [ - "autocfg", "cfg-if", "fastrand", "redox_syscall", @@ -2663,30 +2651,31 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.43" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.43" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] name = "time" -version = "0.3.22" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" +checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" dependencies = [ + "deranged", "itoa", "serde", "time-core", @@ -2701,9 +2690,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" dependencies = [ "time-core", ] @@ -2750,7 +2739,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", ] [[package]] @@ -2819,9 +2808,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.12" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ "indexmap 2.0.0", "serde", @@ -2990,9 +2979,9 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "unicode-bidi" @@ -3002,9 +2991,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -3034,9 +3023,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d023da39d1fde5a8a3fe1f3e01ca9632ada0a63e9797de55a879d6e2236277be" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ "getrandom", "rand", @@ -3090,7 +3079,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", "wasm-bindgen-shared", ] @@ -3124,7 +3113,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.28", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3244,9 +3233,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.8" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9482fe6ceabdf32f3966bfdd350ba69256a97c30253dc616fe0005af24f164e" +checksum = "acaaa1190073b2b101e15083c38ee8ec891b5e05cbee516521e94ec008f61e64" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index 152495463..e87f7b972 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ tokio = { version = "1.29", features = ["rt-multi-thread", "net", "sync", "macro serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2" serde_json = "1.0" -serde_with = "3.0" +serde_with = "3.2" percent-encoding = "2.2" binascii = "0.1" lazy_static = "1.4" @@ -38,7 +38,7 @@ futures = "0.3" async-trait = "0.1" aquatic_udp_protocol = "0.8" uuid = { version = "1", features = ["v4"] } -axum = "0.6.18" +axum = "0.6.20" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" bip_bencode = "0.4" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index a6cf57710..9b91534e9 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -9,7 +9,7 @@ version.workspace = true [dependencies] serde = { version = "1.0", features = ["derive"] } -serde_with = "3.0" +serde_with = "3.2" config = "0.13" toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } From ba61af0f0a31268c03bbe4eab80be63006f74671 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 21 Aug 2023 16:32:47 +0200 Subject: [PATCH 0552/1003] chore: fix new clippy warning --- src/servers/apis/v1/context/auth_key/handlers.rs | 4 ++-- src/servers/apis/v1/context/whitelist/handlers.rs | 6 +++--- src/servers/http/v1/handlers/announce.rs | 4 ++-- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/signals.rs | 6 +++--- src/servers/udp/server.rs | 4 ++-- src/tracker/mod.rs | 2 +- tests/servers/udp/client.rs | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index 57d55d4c0..85158c698 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -70,7 +70,7 @@ pub async fn delete_auth_key_handler( match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), Ok(key) => match tracker.remove_auth_key(&key).await { - Ok(_) => ok_response(), + Ok(()) => ok_response(), Err(e) => failed_to_delete_key_response(e), }, } @@ -90,7 +90,7 @@ pub async fn delete_auth_key_handler( /// for more information about this endpoint. pub async fn reload_keys_handler(State(tracker): State>) -> Response { match tracker.load_keys_from_database().await { - Ok(_) => ok_response(), + Ok(()) => ok_response(), Err(e) => failed_to_reload_keys_response(e), } } diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index 8e8c20b50..bd1da735e 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -30,7 +30,7 @@ pub async fn add_torrent_to_whitelist_handler( match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(_) => ok_response(), + Ok(()) => ok_response(), Err(e) => failed_to_whitelist_torrent_response(e), }, } @@ -53,7 +53,7 @@ pub async fn remove_torrent_from_whitelist_handler( match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => ok_response(), + Ok(()) => ok_response(), Err(e) => failed_to_remove_torrent_from_whitelist_response(e), }, } @@ -71,7 +71,7 @@ pub async fn remove_torrent_from_whitelist_handler( /// for more information about this endpoint. pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { match tracker.load_whitelist_from_database().await { - Ok(_) => ok_response(), + Ok(()) => ok_response(), Err(e) => failed_to_reload_whitelist_response(e), } } diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 5b26b3758..0e49bd422 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -87,7 +87,7 @@ async fn handle_announce( if tracker.requires_authentication() { match maybe_key { Some(key) => match tracker.authenticate(&key).await { - Ok(_) => (), + Ok(()) => (), Err(error) => return Err(responses::error::Error::from(error)), }, None => { @@ -100,7 +100,7 @@ async fn handle_announce( // Authorization match tracker.authorize(&announce_request.info_hash).await { - Ok(_) => (), + Ok(()) => (), Err(error) => return Err(responses::error::Error::from(error)), } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index b8c1cbea1..58b8aa84c 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -78,7 +78,7 @@ async fn handle_scrape( let return_real_scrape_data = if tracker.requires_authentication() { match maybe_key { Some(key) => match tracker.authenticate(&key).await { - Ok(_) => true, + Ok(()) => true, Err(_error) => false, }, None => false, diff --git a/src/servers/signals.rs b/src/servers/signals.rs index d34cca108..51f53738d 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -23,8 +23,8 @@ pub async fn global_shutdown_signal() { let terminate = std::future::pending::<()>(); tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {} + () = ctrl_c => {}, + () = terminate => {} } } @@ -38,7 +38,7 @@ pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) tokio::select! { _ = stop => {}, - _ = global_shutdown_signal() => {} + () = global_shutdown_signal() => {} } } diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 3bb5bd013..5e5c98704 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -220,7 +220,7 @@ impl Udp { let socket = self.socket.clone(); tokio::select! { - _ = &mut shutdown_signal => { + () = &mut shutdown_signal => { info!("Stopping UDP server: {}..", self.socket.local_addr().unwrap()); break; } @@ -244,7 +244,7 @@ impl Udp { let mut cursor = Cursor::new(buffer); match response.write(&mut cursor) { - Ok(_) => { + Ok(()) => { #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner = cursor.get_ref(); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 63c8b96d6..6823e8fe8 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -643,7 +643,7 @@ impl Tracker { for info_hash in info_hashes { let swarm_metadata = match self.authorize(info_hash).await { - Ok(_) => self.get_swarm_metadata(info_hash).await, + Ok(()) => self.get_swarm_metadata(info_hash).await, Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); diff --git a/tests/servers/udp/client.rs b/tests/servers/udp/client.rs index 75467055e..d267adaba 100644 --- a/tests/servers/udp/client.rs +++ b/tests/servers/udp/client.rs @@ -55,7 +55,7 @@ impl UdpTrackerClient { let mut cursor = Cursor::new(request_buffer); let request_data = match request.write(&mut cursor) { - Ok(_) => { + Ok(()) => { #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner_request_buffer = cursor.get_ref(); From 3a1ac86b306a3dcf0581bac990b0f1d41c067be3 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 21 Aug 2023 17:27:24 +0200 Subject: [PATCH 0553/1003] chore: update cargo lockfile --- Cargo.lock | 187 ++++++++++++++++++++++++++++------------------------- 1 file changed, 98 insertions(+), 89 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1ae8cad0..32d35cbe2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,9 +41,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] @@ -93,13 +93,13 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-trait" -version = "0.1.72" +version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" +checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -268,9 +268,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "bitvec" @@ -386,9 +386,9 @@ checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "cc" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "libc", ] @@ -598,7 +598,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -609,14 +609,14 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "deranged" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" dependencies = [ "serde", ] @@ -642,7 +642,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -753,9 +753,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "libz-sys", @@ -842,7 +842,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -854,7 +854,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -866,7 +866,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -931,7 +931,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1095,9 +1095,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" @@ -1116,7 +1116,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -1204,7 +1204,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1407,9 +1407,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lru" @@ -1530,7 +1530,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2", + "socket2 0.4.9", "twox-hash", "url", ] @@ -1724,7 +1724,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1845,7 +1845,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1876,14 +1876,14 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "pin-project-lite" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c516611246607d0c04186886dbb3a754368ef82c79e9827a802c6d836dd111c" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -1973,9 +1973,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -2192,7 +2192,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2250,11 +2250,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.7" +version = "0.38.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "172891ebdceb05aa0005f533a6cbfca599ddd7d966f6f5d4d9b2e70478e70399" +checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "errno", "libc", "linux-raw-sys", @@ -2381,9 +2381,9 @@ checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" [[package]] name = "serde" -version = "1.0.183" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" dependencies = [ "serde_derive", ] @@ -2409,20 +2409,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "serde_json" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" dependencies = [ "itoa", "ryu", @@ -2447,7 +2447,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -2473,9 +2473,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1402f54f9a3b9e2efe71c1cea24e648acce55887983553eeb858cf3115acfd49" +checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" dependencies = [ "base64 0.21.2", "chrono", @@ -2490,14 +2490,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9197f1ad0e3c173a0222d3c4404fb04c3afe87e962bcb327af73e8301fa203c7" +checksum = "2e6be15c453eb305019bfa438b1593c731f36a289a7853f7707ee29e870b3b3c" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -2568,6 +2568,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys", +] + [[package]] name = "spin" version = "0.5.2" @@ -2609,9 +2619,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.28" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2", "quote", @@ -2632,9 +2642,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.7.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", "fastrand", @@ -2651,29 +2661,29 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "time" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" +checksum = "a79d09ac6b08c1ab3906a2f7cc2e81a0e27c7ae89c63812df75e52bef0751e07" dependencies = [ "deranged", "itoa", @@ -2690,9 +2700,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" +checksum = "75c65469ed6b3a4809d987a41eb1dc918e9bc1d92211cbad7ae82931846f7451" dependencies = [ "time-core", ] @@ -2714,11 +2724,10 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.29.1" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ - "autocfg", "backtrace", "bytes", "libc", @@ -2726,7 +2735,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.3", "tokio-macros", "windows-sys", ] @@ -2739,7 +2748,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -3079,7 +3088,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", "wasm-bindgen-shared", ] @@ -3113,7 +3122,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3176,9 +3185,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -3191,51 +3200,51 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.4" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acaaa1190073b2b101e15083c38ee8ec891b5e05cbee516521e94ec008f61e64" +checksum = "d09770118a7eb1ccaf4a594a221334119a44a814fcb0d31c5b85e83e97227a97" dependencies = [ "memchr", ] From a4ac6829a6a4cd35968231d14f09b91c8be47d6e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 8 Aug 2023 10:58:52 +0200 Subject: [PATCH 0554/1003] dev: copy bencode into local contrib folder --- Cargo.lock | 241 ++++++++++- Cargo.toml | 3 +- contrib/bencode/Cargo.toml | 34 ++ contrib/bencode/README.md | 4 + contrib/bencode/benches/bencode_benchmark.rs | 27 ++ contrib/bencode/benches/multi_kb.bencode | 1 + contrib/bencode/src/access/bencode.rs | 120 ++++++ contrib/bencode/src/access/convert.rs | 230 +++++++++++ contrib/bencode/src/access/dict.rs | 64 +++ contrib/bencode/src/access/list.rs | 108 +++++ contrib/bencode/src/access/mod.rs | 4 + contrib/bencode/src/cow.rs | 44 ++ contrib/bencode/src/error.rs | 101 +++++ contrib/bencode/src/lib.rs | 143 +++++++ contrib/bencode/src/mutable/bencode_mut.rs | 229 +++++++++++ contrib/bencode/src/mutable/encode.rs | 67 ++++ contrib/bencode/src/mutable/mod.rs | 2 + contrib/bencode/src/reference/bencode_ref.rs | 265 ++++++++++++ contrib/bencode/src/reference/decode.rs | 398 +++++++++++++++++++ contrib/bencode/src/reference/decode_opt.rs | 55 +++ contrib/bencode/src/reference/mod.rs | 3 + contrib/bencode/test/mod.rs | 18 + src/servers/http/v1/responses/announce.rs | 2 +- src/servers/http/v1/responses/scrape.rs | 2 +- 24 files changed, 2150 insertions(+), 15 deletions(-) create mode 100644 contrib/bencode/Cargo.toml create mode 100644 contrib/bencode/README.md create mode 100644 contrib/bencode/benches/bencode_benchmark.rs create mode 100644 contrib/bencode/benches/multi_kb.bencode create mode 100644 contrib/bencode/src/access/bencode.rs create mode 100644 contrib/bencode/src/access/convert.rs create mode 100644 contrib/bencode/src/access/dict.rs create mode 100644 contrib/bencode/src/access/list.rs create mode 100644 contrib/bencode/src/access/mod.rs create mode 100644 contrib/bencode/src/cow.rs create mode 100644 contrib/bencode/src/error.rs create mode 100644 contrib/bencode/src/lib.rs create mode 100644 contrib/bencode/src/mutable/bencode_mut.rs create mode 100644 contrib/bencode/src/mutable/encode.rs create mode 100644 contrib/bencode/src/mutable/mod.rs create mode 100644 contrib/bencode/src/reference/bencode_ref.rs create mode 100644 contrib/bencode/src/reference/decode.rs create mode 100644 contrib/bencode/src/reference/decode_opt.rs create mode 100644 contrib/bencode/src/reference/mod.rs create mode 100644 contrib/bencode/test/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 32d35cbe2..f2053b043 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -69,6 +69,18 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" + [[package]] name = "aquatic_udp_protocol" version = "0.8.0" @@ -215,6 +227,14 @@ version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +[[package]] +name = "bencode" +version = "1.0.0-alpha.1" +dependencies = [ + "criterion", + "error-chain", +] + [[package]] name = "bigdecimal" version = "0.3.1" @@ -251,15 +271,6 @@ dependencies = [ "shlex", ] -[[package]] -name = "bip_bencode" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6048cc5d9680544a5098a290d2845df7dae292c97687b9896b70365bad0ea416" -dependencies = [ - "error-chain", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -384,6 +395,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.0.83" @@ -421,6 +438,33 @@ dependencies = [ "winapi", ] +[[package]] +name = "ciborium" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" + +[[package]] +name = "ciborium-ll" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "clang-sys" version = "1.6.1" @@ -432,6 +476,31 @@ dependencies = [ "libloading", ] +[[package]] +name = "clap" +version = "4.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" + [[package]] name = "cmake" version = "0.1.50" @@ -500,6 +569,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + [[package]] name = "crossbeam" version = "0.8.2" @@ -717,11 +822,12 @@ dependencies = [ [[package]] name = "error-chain" -version = "0.11.0" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" dependencies = [ "backtrace", + "version_check", ] [[package]] @@ -1016,6 +1122,12 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + [[package]] name = "hashbrown" version = "0.12.3" @@ -1213,6 +1325,17 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +[[package]] +name = "is-terminal" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +dependencies = [ + "hermit-abi", + "rustix", + "windows-sys", +] + [[package]] name = "itertools" version = "0.10.5" @@ -1701,6 +1824,12 @@ version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + [[package]] name = "openssl" version = "0.10.56" @@ -1897,6 +2026,34 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -2048,6 +2205,28 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rayon" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", + "num_cpus", +] + [[package]] name = "redox_syscall" version = "0.3.5" @@ -2304,6 +2483,15 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "saturating" version = "0.1.0" @@ -2707,6 +2895,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -2837,8 +3035,8 @@ dependencies = [ "axum", "axum-client-ip", "axum-server", + "bencode", "binascii", - "bip_bencode", "chrono", "config", "derive_more", @@ -3052,6 +3250,16 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "walkdir" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -3159,6 +3367,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index e87f7b972..a939318cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,7 @@ uuid = { version = "1", features = ["v4"] } axum = "0.6.20" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" -bip_bencode = "0.4" +bencode = { version = "1.0.0-alpha.1", path = "contrib/bencode" } torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "packages/primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.3", path = "packages/configuration" } torrust-tracker-located-error = { version = "3.0.0-alpha.3", path = "packages/located-error" } @@ -59,6 +59,7 @@ torrust-tracker-test-helpers = { version = "3.0.0-alpha.3", path = "packages/tes [workspace] members = [ + "contrib/bencode", "packages/configuration", "packages/primitives", "packages/test-helpers", diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml new file mode 100644 index 000000000..8334e270d --- /dev/null +++ b/contrib/bencode/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "bencode" +description = "Efficient decoding and encoding for bencode." +keywords = ["bencode"] +readme = "README.md" + + +authors = [ + "Nautilus Cyberneering , Andrew ", +] +categories = ["network-programming", "web-programming"] +documentation = "https://github.com/torrust/bittorrent-infrastructure-project" +edition = "2021" +homepage = "https://github.com/torrust/bittorrent-infrastructure-project" +license = "Apache-2.0" +publish = false # until we decide where to publish. +repository = "https://github.com/torrust/bittorrent-infrastructure-project" +rust-version = "1.71" +version = "1.0.0-alpha.1" + + +[dependencies] +error-chain = "0.12" + +[dev-dependencies] +criterion = "0.5" + +[[test]] +name = "test" +path = "test/mod.rs" + +[[bench]] +name = "bencode_benchmark" +harness = false \ No newline at end of file diff --git a/contrib/bencode/README.md b/contrib/bencode/README.md new file mode 100644 index 000000000..7a203082b --- /dev/null +++ b/contrib/bencode/README.md @@ -0,0 +1,4 @@ +# Bencode +This library allows for the creation and parsing of bencode encodings. + +Bencode is the binary encoding used throughout bittorrent technologies from metainfo files to DHT messages. Bencode types include integers, byte arrays, lists, and dictionaries, of which the last two can hold any bencode type (they could be recursively constructed). \ No newline at end of file diff --git a/contrib/bencode/benches/bencode_benchmark.rs b/contrib/bencode/benches/bencode_benchmark.rs new file mode 100644 index 000000000..729197d8a --- /dev/null +++ b/contrib/bencode/benches/bencode_benchmark.rs @@ -0,0 +1,27 @@ +use bencode::{BDecodeOpt, BencodeRef}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; + +const B_NESTED_LISTS: &[u8; 100] = + b"lllllllllllllllllllllllllllllllllllllllllllllllllleeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"; // cspell:disable-line +const MULTI_KB_BENCODE: &[u8; 30004] = include_bytes!("multi_kb.bencode"); + +fn bench_nested_lists(bencode: &[u8]) { + BencodeRef::decode(bencode, BDecodeOpt::new(50, true, true)).unwrap(); +} + +fn bench_multi_kb_bencode(bencode: &[u8]) { + BencodeRef::decode(bencode, BDecodeOpt::default()).unwrap(); +} + +fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("bencode nested lists", |b| { + b.iter(|| bench_nested_lists(black_box(B_NESTED_LISTS))); + }); + + c.bench_function("bencode multi kb", |b| { + b.iter(|| bench_multi_kb_bencode(black_box(MULTI_KB_BENCODE))); + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/contrib/bencode/benches/multi_kb.bencode b/contrib/bencode/benches/multi_kb.bencode new file mode 100644 index 000000000..b86f2846e --- /dev/null +++ b/contrib/bencode/benches/multi_kb.bencode @@ -0,0 +1 @@ +d7:comment17:Just Some Comment10:created by12:bip_metainfo13:creation datei1496618058e4:infod5:filesld6:lengthi1024e4:pathl1:b11:small_1.txteed6:lengthi1024e4:pathl1:b12:small_10.txteed6:lengthi1024e4:pathl1:b13:small_100.txteed6:lengthi1024e4:pathl1:b12:small_11.txteed6:lengthi1024e4:pathl1:b12:small_12.txteed6:lengthi1024e4:pathl1:b12:small_13.txteed6:lengthi1024e4:pathl1:b12:small_14.txteed6:lengthi1024e4:pathl1:b12:small_15.txteed6:lengthi1024e4:pathl1:b12:small_16.txteed6:lengthi1024e4:pathl1:b12:small_17.txteed6:lengthi1024e4:pathl1:b12:small_18.txteed6:lengthi1024e4:pathl1:b12:small_19.txteed6:lengthi1024e4:pathl1:b11:small_2.txteed6:lengthi1024e4:pathl1:b12:small_20.txteed6:lengthi1024e4:pathl1:b12:small_21.txteed6:lengthi1024e4:pathl1:b12:small_22.txteed6:lengthi1024e4:pathl1:b12:small_23.txteed6:lengthi1024e4:pathl1:b12:small_24.txteed6:lengthi1024e4:pathl1:b12:small_25.txteed6:lengthi1024e4:pathl1:b12:small_26.txteed6:lengthi1024e4:pathl1:b12:small_27.txteed6:lengthi1024e4:pathl1:b12:small_28.txteed6:lengthi1024e4:pathl1:b12:small_29.txteed6:lengthi1024e4:pathl1:b11:small_3.txteed6:lengthi1024e4:pathl1:b12:small_30.txteed6:lengthi1024e4:pathl1:b12:small_31.txteed6:lengthi1024e4:pathl1:b12:small_32.txteed6:lengthi1024e4:pathl1:b12:small_33.txteed6:lengthi1024e4:pathl1:b12:small_34.txteed6:lengthi1024e4:pathl1:b12:small_35.txteed6:lengthi1024e4:pathl1:b12:small_36.txteed6:lengthi1024e4:pathl1:b12:small_37.txteed6:lengthi1024e4:pathl1:b12:small_38.txteed6:lengthi1024e4:pathl1:b12:small_39.txteed6:lengthi1024e4:pathl1:b11:small_4.txteed6:lengthi1024e4:pathl1:b12:small_40.txteed6:lengthi1024e4:pathl1:b12:small_41.txteed6:lengthi1024e4:pathl1:b12:small_42.txteed6:lengthi1024e4:pathl1:b12:small_43.txteed6:lengthi1024e4:pathl1:b12:small_44.txteed6:lengthi1024e4:pathl1:b12:small_45.txteed6:lengthi1024e4:pathl1:b12:small_46.txteed6:lengthi1024e4:pathl1:b12:small_47.txteed6:lengthi1024e4:pathl1:b12:small_48.txteed6:lengthi1024e4:pathl1:b12:small_49.txteed6:lengthi1024e4:pathl1:b11:small_5.txteed6:lengthi1024e4:pathl1:b12:small_50.txteed6:lengthi1024e4:pathl1:b12:small_51.txteed6:lengthi1024e4:pathl1:b12:small_52.txteed6:lengthi1024e4:pathl1:b12:small_53.txteed6:lengthi1024e4:pathl1:b12:small_54.txteed6:lengthi1024e4:pathl1:b12:small_55.txteed6:lengthi1024e4:pathl1:b12:small_56.txteed6:lengthi1024e4:pathl1:b12:small_57.txteed6:lengthi1024e4:pathl1:b12:small_58.txteed6:lengthi1024e4:pathl1:b12:small_59.txteed6:lengthi1024e4:pathl1:b11:small_6.txteed6:lengthi1024e4:pathl1:b12:small_60.txteed6:lengthi1024e4:pathl1:b12:small_61.txteed6:lengthi1024e4:pathl1:b12:small_62.txteed6:lengthi1024e4:pathl1:b12:small_63.txteed6:lengthi1024e4:pathl1:b12:small_64.txteed6:lengthi1024e4:pathl1:b12:small_65.txteed6:lengthi1024e4:pathl1:b12:small_66.txteed6:lengthi1024e4:pathl1:b12:small_67.txteed6:lengthi1024e4:pathl1:b12:small_68.txteed6:lengthi1024e4:pathl1:b12:small_69.txteed6:lengthi1024e4:pathl1:b11:small_7.txteed6:lengthi1024e4:pathl1:b12:small_70.txteed6:lengthi1024e4:pathl1:b12:small_71.txteed6:lengthi1024e4:pathl1:b12:small_72.txteed6:lengthi1024e4:pathl1:b12:small_73.txteed6:lengthi1024e4:pathl1:b12:small_74.txteed6:lengthi1024e4:pathl1:b12:small_75.txteed6:lengthi1024e4:pathl1:b12:small_76.txteed6:lengthi1024e4:pathl1:b12:small_77.txteed6:lengthi1024e4:pathl1:b12:small_78.txteed6:lengthi1024e4:pathl1:b12:small_79.txteed6:lengthi1024e4:pathl1:b11:small_8.txteed6:lengthi1024e4:pathl1:b12:small_80.txteed6:lengthi1024e4:pathl1:b12:small_81.txteed6:lengthi1024e4:pathl1:b12:small_82.txteed6:lengthi1024e4:pathl1:b12:small_83.txteed6:lengthi1024e4:pathl1:b12:small_84.txteed6:lengthi1024e4:pathl1:b12:small_85.txteed6:lengthi1024e4:pathl1:b12:small_86.txteed6:lengthi1024e4:pathl1:b12:small_87.txteed6:lengthi1024e4:pathl1:b12:small_88.txteed6:lengthi1024e4:pathl1:b12:small_89.txteed6:lengthi1024e4:pathl1:b11:small_9.txteed6:lengthi1024e4:pathl1:b12:small_90.txteed6:lengthi1024e4:pathl1:b12:small_91.txteed6:lengthi1024e4:pathl1:b12:small_92.txteed6:lengthi1024e4:pathl1:b12:small_93.txteed6:lengthi1024e4:pathl1:b12:small_94.txteed6:lengthi1024e4:pathl1:b12:small_95.txteed6:lengthi1024e4:pathl1:b12:small_96.txteed6:lengthi1024e4:pathl1:b12:small_97.txteed6:lengthi1024e4:pathl1:b12:small_98.txteed6:lengthi1024e4:pathl1:b12:small_99.txteed6:lengthi5368709120e4:pathl9:large.txteee4:name1:a12:piece lengthi4194304e6:pieces25620:+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;ÃZiî½c^c.>óN7’±µìee \ No newline at end of file diff --git a/contrib/bencode/src/access/bencode.rs b/contrib/bencode/src/access/bencode.rs new file mode 100644 index 000000000..ee90296e2 --- /dev/null +++ b/contrib/bencode/src/access/bencode.rs @@ -0,0 +1,120 @@ +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; + +/// Abstract representation of a `BencodeRef` object. +pub enum RefKind<'a, K, V> { + /// Bencode Integer. + Int(i64), + /// Bencode Bytes. + Bytes(&'a [u8]), + /// Bencode List. + List(&'a dyn BListAccess), + /// Bencode Dictionary. + Dict(&'a dyn BDictAccess), +} + +/// Trait for read access to some bencode type. +pub trait BRefAccess: Sized { + type BKey; + type BType: BRefAccess; + + /// Access the bencode as a `BencodeRefKind`. + fn kind(&self) -> RefKind<'_, Self::BKey, Self::BType>; + + /// Attempt to access the bencode as a `str`. + fn str(&self) -> Option<&str>; + + /// Attempt to access the bencode as an `i64`. + fn int(&self) -> Option; + + /// Attempt to access the bencode as an `[u8]`. + fn bytes(&self) -> Option<&[u8]>; + + /// Attempt to access the bencode as an `BListAccess`. + fn list(&self) -> Option<&dyn BListAccess>; + + /// Attempt to access the bencode as an `BDictAccess`. + fn dict(&self) -> Option<&dyn BDictAccess>; +} + +/// Trait for extended read access to some bencode type. +/// +/// Use this trait when you want to make sure that the lifetime of +/// the underlying buffers is tied to the lifetime of the backing +/// bencode buffer. +pub trait BRefAccessExt<'a>: BRefAccess { + /// Attempt to access the bencode as a `str`. + fn str_ext(&self) -> Option<&'a str>; + + /// Attempt to access the bencode as an `[u8]`. + fn bytes_ext(&self) -> Option<&'a [u8]>; +} + +impl<'a, T> BRefAccess for &'a T +where + T: BRefAccess, +{ + type BKey = T::BKey; + type BType = T::BType; + + fn kind(&self) -> RefKind<'_, Self::BKey, Self::BType> { + (*self).kind() + } + + fn str(&self) -> Option<&str> { + (*self).str() + } + + fn int(&self) -> Option { + (*self).int() + } + + fn bytes(&self) -> Option<&[u8]> { + (*self).bytes() + } + + fn list(&self) -> Option<&dyn BListAccess> { + (*self).list() + } + + fn dict(&self) -> Option<&dyn BDictAccess> { + (*self).dict() + } +} + +impl<'a: 'b, 'b, T> BRefAccessExt<'a> for &'b T +where + T: BRefAccessExt<'a>, +{ + fn str_ext(&self) -> Option<&'a str> { + (*self).str_ext() + } + + fn bytes_ext(&self) -> Option<&'a [u8]> { + (*self).bytes_ext() + } +} + +/// Abstract representation of a `BencodeMut` object. +pub enum MutKind<'a, K, V> { + /// Bencode Integer. + Int(i64), + /// Bencode Bytes. + Bytes(&'a [u8]), + /// Bencode List. + List(&'a mut dyn BListAccess), + /// Bencode Dictionary. + Dict(&'a mut dyn BDictAccess), +} + +/// Trait for write access to some bencode type. +pub trait BMutAccess: Sized + BRefAccess { + /// Access the bencode as a `BencodeMutKind`. + fn kind_mut(&mut self) -> MutKind<'_, Self::BKey, Self::BType>; + + /// Attempt to access the bencode as a mutable `BListAccess`. + fn list_mut(&mut self) -> Option<&mut dyn BListAccess>; + + /// Attempt to access the bencode as a mutable `BDictAccess`. + fn dict_mut(&mut self) -> Option<&mut dyn BDictAccess>; +} diff --git a/contrib/bencode/src/access/convert.rs b/contrib/bencode/src/access/convert.rs new file mode 100644 index 000000000..42b04f267 --- /dev/null +++ b/contrib/bencode/src/access/convert.rs @@ -0,0 +1,230 @@ +#![allow(clippy::missing_errors_doc)] +use crate::access::bencode::{BRefAccess, BRefAccessExt}; +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; +use crate::{BencodeConvertError, BencodeConvertErrorKind}; + +/// Trait for extended casting of bencode objects and converting conversion errors into application specific errors. +pub trait BConvertExt: BConvert { + /// See `BConvert::convert_bytes`. + fn convert_bytes_ext<'a, B, E>(&self, bencode: B, error_key: E) -> Result<&'a [u8], Self::Error> + where + B: BRefAccessExt<'a>, + E: AsRef<[u8]>, + { + bencode.bytes_ext().ok_or( + self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Bytes".to_owned(), + })), + ) + } + + /// See `BConvert::convert_str`. + fn convert_str_ext<'a, B, E>(&self, bencode: &B, error_key: E) -> Result<&'a str, Self::Error> + where + B: BRefAccessExt<'a>, + E: AsRef<[u8]>, + { + bencode.str_ext().ok_or( + self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "UTF-8 Bytes".to_owned(), + })), + ) + } + + /// See `BConvert::lookup_and_convert_bytes`. + fn lookup_and_convert_bytes_ext<'a, B, K1, K2>( + &self, + dictionary: &dyn BDictAccess, + key: K2, + ) -> Result<&'a [u8], Self::Error> + where + B: BRefAccessExt<'a>, + K2: AsRef<[u8]>, + { + self.convert_bytes_ext(self.lookup(dictionary, &key)?, &key) + } + + /// See `BConvert::lookup_and_convert_str`. + fn lookup_and_convert_str_ext<'a, B, K1, K2>( + &self, + dictionary: &dyn BDictAccess, + key: K2, + ) -> Result<&'a str, Self::Error> + where + B: BRefAccessExt<'a>, + K2: AsRef<[u8]>, + { + self.convert_str_ext(self.lookup(dictionary, &key)?, &key) + } +} + +/// Trait for casting bencode objects and converting conversion errors into application specific errors. +#[allow(clippy::module_name_repetitions)] +pub trait BConvert { + type Error; + + /// Convert the given conversion error into the appropriate error type. + fn handle_error(&self, error: BencodeConvertError) -> Self::Error; + + /// Attempt to convert the given bencode value into an integer. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_int(&self, bencode: B, error_key: E) -> Result + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.int().ok_or( + self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Integer".to_owned(), + })), + ) + } + + /// Attempt to convert the given bencode value into bytes. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_bytes<'a, B, E>(&self, bencode: &'a B, error_key: E) -> Result<&'a [u8], Self::Error> + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.bytes().ok_or( + self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Bytes".to_owned(), + })), + ) + } + + /// Attempt to convert the given bencode value into a UTF-8 string. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_str<'a, B, E>(&self, bencode: &'a B, error_key: E) -> Result<&'a str, Self::Error> + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.str().ok_or( + self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "UTF-8 Bytes".to_owned(), + })), + ) + } + + /// Attempt to convert the given bencode value into a list. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_list<'a, B, E>(&self, bencode: &'a B, error_key: E) -> Result<&'a dyn BListAccess, Self::Error> + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.list().ok_or( + self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "List".to_owned(), + })), + ) + } + + /// Attempt to convert the given bencode value into a dictionary. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_dict<'a, B, E>(&self, bencode: &'a B, error_key: E) -> Result<&'a dyn BDictAccess, Self::Error> + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.dict().ok_or( + self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Dictionary".to_owned(), + })), + ) + } + + /// Look up a value in a dictionary of bencoded values using the given key. + fn lookup<'a, B, K1, K2>(&self, dictionary: &'a dyn BDictAccess, key: K2) -> Result<&'a B, Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + let key_ref = key.as_ref(); + + match dictionary.lookup(key_ref) { + Some(n) => Ok(n), + None => Err( + self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::MissingKey { + key: key_ref.to_owned(), + })), + ), + } + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to an integer. + fn lookup_and_convert_int(&self, dictionary: &dyn BDictAccess, key: K2) -> Result + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_int(self.lookup(dictionary, &key)?, &key) + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to a series of bytes. + fn lookup_and_convert_bytes<'a, B, K1, K2>( + &self, + dictionary: &'a dyn BDictAccess, + key: K2, + ) -> Result<&'a [u8], Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_bytes(self.lookup(dictionary, &key)?, &key) + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to a UTF-8 string. + fn lookup_and_convert_str<'a, B, K1, K2>( + &self, + dictionary: &'a dyn BDictAccess, + key: K2, + ) -> Result<&'a str, Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_str(self.lookup(dictionary, &key)?, &key) + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to a list. + fn lookup_and_convert_list<'a, B, K1, K2>( + &self, + dictionary: &'a dyn BDictAccess, + key: K2, + ) -> Result<&'a dyn BListAccess, Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_list(self.lookup(dictionary, &key)?, &key) + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to a dictionary. + fn lookup_and_convert_dict<'a, B, K1, K2>( + &self, + dictionary: &'a dyn BDictAccess, + key: K2, + ) -> Result<&'a dyn BDictAccess, Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_dict(self.lookup(dictionary, &key)?, &key) + } +} diff --git a/contrib/bencode/src/access/dict.rs b/contrib/bencode/src/access/dict.rs new file mode 100644 index 000000000..596d9535e --- /dev/null +++ b/contrib/bencode/src/access/dict.rs @@ -0,0 +1,64 @@ +use std::borrow::Cow; +use std::collections::BTreeMap; + +/// Trait for working with generic map data structures. +pub trait BDictAccess { + /// Convert the dictionary to an unordered list of key/value pairs. + fn to_list(&self) -> Vec<(&K, &V)>; + + /// Lookup a value in the dictionary. + fn lookup(&self, key: &[u8]) -> Option<&V>; + + /// Lookup a mutable value in the dictionary. + fn lookup_mut(&mut self, key: &[u8]) -> Option<&mut V>; + + /// Insert a key/value pair into the dictionary. + fn insert(&mut self, key: K, value: V) -> Option; + + /// Remove a value from the dictionary and return it. + fn remove(&mut self, key: &[u8]) -> Option; +} + +impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { + fn to_list(&self) -> Vec<(&&'a [u8], &V)> { + self.iter().map(|(k, v)| (k, v)).collect() + } + + fn lookup(&self, key: &[u8]) -> Option<&V> { + self.get(key) + } + + fn lookup_mut(&mut self, key: &[u8]) -> Option<&mut V> { + self.get_mut(key) + } + + fn insert(&mut self, key: &'a [u8], value: V) -> Option { + self.insert(key, value) + } + + fn remove(&mut self, key: &[u8]) -> Option { + self.remove(key) + } +} + +impl<'a, V> BDictAccess, V> for BTreeMap, V> { + fn to_list(&self) -> Vec<(&Cow<'a, [u8]>, &V)> { + self.iter().map(|(k, v)| (k, v)).collect() + } + + fn lookup(&self, key: &[u8]) -> Option<&V> { + self.get(key) + } + + fn lookup_mut(&mut self, key: &[u8]) -> Option<&mut V> { + self.get_mut(key) + } + + fn insert(&mut self, key: Cow<'a, [u8]>, value: V) -> Option { + self.insert(key, value) + } + + fn remove(&mut self, key: &[u8]) -> Option { + self.remove(key) + } +} diff --git a/contrib/bencode/src/access/list.rs b/contrib/bencode/src/access/list.rs new file mode 100644 index 000000000..840bffa1e --- /dev/null +++ b/contrib/bencode/src/access/list.rs @@ -0,0 +1,108 @@ +use std::ops::{Index, IndexMut}; + +/// Trait for working with generic list data structures. +pub trait BListAccess { + /// Get a list element at the given index. + fn get(&self, index: usize) -> Option<&V>; + + /// Get a mutable list element at the given index. + fn get_mut(&mut self, index: usize) -> Option<&mut V>; + + /// Remove a list element at the given index. + fn remove(&mut self, index: usize) -> Option; + + /// Insert a list element at the given index. + fn insert(&mut self, index: usize, item: V); + + /// Push an element to the back of the list. + fn push(&mut self, item: V); + + /// Get the length of the list. + fn len(&self) -> usize; + + fn is_empty(&self) -> bool; +} + +impl<'a, V: 'a> Index for &'a dyn BListAccess { + type Output = V; + + fn index(&self, index: usize) -> &V { + self.get(index).unwrap() + } +} + +impl<'a, V: 'a> Index for &'a mut dyn BListAccess { + type Output = V; + + fn index(&self, index: usize) -> &V { + self.get(index).unwrap() + } +} + +impl<'a, V: 'a> IndexMut for &'a mut dyn BListAccess { + fn index_mut(&mut self, index: usize) -> &mut V { + self.get_mut(index).unwrap() + } +} + +impl<'a, V: 'a> IntoIterator for &'a dyn BListAccess { + type Item = &'a V; + type IntoIter = BListIter<'a, V>; + + fn into_iter(self) -> BListIter<'a, V> { + BListIter { index: 0, access: self } + } +} + +pub struct BListIter<'a, V> { + index: usize, + access: &'a dyn BListAccess, +} + +impl<'a, V> Iterator for BListIter<'a, V> { + type Item = &'a V; + + fn next(&mut self) -> Option<&'a V> { + let opt_next = self.access.get(self.index); + + if opt_next.is_some() { + self.index += 1; + } + + opt_next + } +} + +impl BListAccess for Vec { + fn get(&self, index: usize) -> Option<&V> { + self[..].get(index) + } + + fn get_mut(&mut self, index: usize) -> Option<&mut V> { + self[..].get_mut(index) + } + + fn remove(&mut self, index: usize) -> Option { + if index >= self[..].len() { + None + } else { + Some(Vec::remove(self, index)) + } + } + + fn insert(&mut self, index: usize, item: V) { + Vec::insert(self, index, item); + } + + fn push(&mut self, item: V) { + Vec::push(self, item); + } + + fn len(&self) -> usize { + Vec::len(self) + } + + fn is_empty(&self) -> bool { + Vec::is_empty(self) + } +} diff --git a/contrib/bencode/src/access/mod.rs b/contrib/bencode/src/access/mod.rs new file mode 100644 index 000000000..f14b032d4 --- /dev/null +++ b/contrib/bencode/src/access/mod.rs @@ -0,0 +1,4 @@ +pub mod bencode; +pub mod convert; +pub mod dict; +pub mod list; diff --git a/contrib/bencode/src/cow.rs b/contrib/bencode/src/cow.rs new file mode 100644 index 000000000..0d38c751b --- /dev/null +++ b/contrib/bencode/src/cow.rs @@ -0,0 +1,44 @@ +use std::borrow::Cow; + +/// Trait for macros to convert owned/borrowed types to `Cow`. +/// +/// This is needed because `&str` and `String` do not have `From` +/// implements into `Cow<_, [u8]>`. One solution is to just call `AsRef<[u8]>` +/// before converting. However, then when a user specifies an owned type, +/// we will implicitly borrow that; this trait prevents that so that macro +/// behavior is intuitive, so that owned types stay owned. +pub trait BCowConvert<'a> { + fn convert(self) -> Cow<'a, [u8]>; +} + +// TODO: Enable when specialization lands. +/* +impl<'a, T> BCowConvert<'a> for T where T: AsRef<[u8]> + 'a { + fn convert(self) -> Cow<'a, [u8]> { + self.into() + } +}*/ + +impl<'a> BCowConvert<'a> for &'a [u8] { + fn convert(self) -> Cow<'a, [u8]> { + self.into() + } +} + +impl<'a> BCowConvert<'a> for &'a str { + fn convert(self) -> Cow<'a, [u8]> { + self.as_bytes().into() + } +} + +impl BCowConvert<'static> for String { + fn convert(self) -> Cow<'static, [u8]> { + self.into_bytes().into() + } +} + +impl BCowConvert<'static> for Vec { + fn convert(self) -> Cow<'static, [u8]> { + self.into() + } +} diff --git a/contrib/bencode/src/error.rs b/contrib/bencode/src/error.rs new file mode 100644 index 000000000..18ebe9605 --- /dev/null +++ b/contrib/bencode/src/error.rs @@ -0,0 +1,101 @@ +use error_chain::error_chain; + +error_chain! { + types { + BencodeParseError, BencodeParseErrorKind, BencodeParseResultExt, BencodeParseResult; + } + + errors { + BytesEmpty { + pos: usize + } { + description("Incomplete Number Of Bytes") + display("Incomplete Number Of Bytes At {:?}", pos) + } + InvalidByte { + pos: usize + } { + description("Invalid Byte Found") + display("Invalid Byte Found At {:?}", pos) + } + InvalidIntNoDelimiter { + pos: usize + } { + description("Invalid Integer Found With No Delimiter") + display("Invalid Integer Found With No Delimiter At {:?}", pos) + } + InvalidIntNegativeZero { + pos: usize + } { + description("Invalid Integer Found As Negative Zero") + display("Invalid Integer Found As Negative Zero At {:?}", pos) + } + InvalidIntZeroPadding { + pos: usize + } { + description("Invalid Integer Found With Zero Padding") + display("Invalid Integer Found With Zero Padding At {:?}", pos) + } + InvalidIntParseError { + pos: usize + } { + description("Invalid Integer Found To Fail Parsing") + display("Invalid Integer Found To Fail Parsing At {:?}", pos) + } + InvalidKeyOrdering { + pos: usize, + key: Vec + } { + description("Invalid Dictionary Key Ordering Found") + display("Invalid Dictionary Key Ordering Found At {:?} For Key {:?}", pos, key) + } + InvalidKeyDuplicates { + pos: usize, + key: Vec + } { + description("Invalid Dictionary Duplicate Keys Found") + display("Invalid Dictionary Key Found At {:?} For Key {:?}", pos, key) + } + InvalidLengthNegative { + pos: usize + } { + description("Invalid Byte Length Found As Negative") + display("Invalid Byte Length Found As Negative At {:?}", pos) + } + InvalidLengthOverflow { + pos: usize + } { + description("Invalid Byte Length Found To Overflow Buffer Length") + display("Invalid Byte Length Found To Overflow Buffer Length At {:?}", pos) + } + InvalidRecursionExceeded { + pos: usize, + max: usize + } { + description("Invalid Recursion Limit Exceeded") + display("Invalid Recursion Limit Exceeded At {:?} For Limit {:?}", pos, max) + } + } +} + +error_chain! { + types { + BencodeConvertError, BencodeConvertErrorKind, BencodeConvertResultExt, BencodeConvertResult; + } + + errors { + MissingKey { + key: Vec + } { + description("Missing Key In Bencode") + display("Missing Key In Bencode For {:?}", key) + } + WrongType { + key: Vec, + expected_type: String + } { + description("Wrong Type In Bencode") + display("Wrong Type In Bencode For {:?} Expected Type {}", key, expected_type) + } + } +} diff --git a/contrib/bencode/src/lib.rs b/contrib/bencode/src/lib.rs new file mode 100644 index 000000000..103a3c371 --- /dev/null +++ b/contrib/bencode/src/lib.rs @@ -0,0 +1,143 @@ +//! Library for parsing and converting bencoded data. +//! +//! # Examples +//! +//! Decoding bencoded data: +//! +//! ```rust +//! extern crate bencode; +//! +//! use std::default::Default; +//! use bencode::{BencodeRef, BRefAccess, BDecodeOpt}; +//! +//! fn main() { +//! let data = b"d12:lucky_numberi7ee"; // cspell:disable-line +//! let bencode = BencodeRef::decode(data, BDecodeOpt::default()).unwrap(); +//! +//! assert_eq!(7, bencode.dict().unwrap().lookup("lucky_number".as_bytes()) +//! .unwrap().int().unwrap()); +//! } +//! ``` +//! +//! Encoding bencoded data: +//! +//! ```rust +//! #[macro_use] +//! extern crate bencode; +//! +//! fn main() { +//! let message = (ben_map!{ +//! "lucky_number" => ben_int!(7), +//! "lucky_string" => ben_bytes!("7") +//! }).encode(); +//! +//! let data = b"d12:lucky_numberi7e12:lucky_string1:7e"; // cspell:disable-line +//! assert_eq!(&data[..], &message[..]); +//! } +//! ``` + +mod access; +mod cow; +mod error; +mod mutable; +mod reference; + +/// Traits for implementation functionality. +pub mod inner { + pub use crate::cow::BCowConvert; +} + +/// Traits for extended functionality. +pub mod ext { + #[allow(clippy::module_name_repetitions)] + pub use crate::access::bencode::BRefAccessExt; + #[allow(clippy::module_name_repetitions)] + pub use crate::access::convert::BConvertExt; +} + +#[deprecated(since = "1.0.0", note = "use `MutKind` instead.")] +pub use crate::access::bencode::MutKind as BencodeMutKind; +#[deprecated(since = "1.0.0", note = "use `RefKind` instead.")] +pub use crate::access::bencode::RefKind as BencodeRefKind; +pub use crate::access::bencode::{BMutAccess, BRefAccess, MutKind, RefKind}; +pub use crate::access::convert::BConvert; +pub use crate::access::dict::BDictAccess; +pub use crate::access::list::BListAccess; +pub use crate::error::{ + BencodeConvertError, BencodeConvertErrorKind, BencodeConvertResult, BencodeParseError, BencodeParseErrorKind, + BencodeParseResult, +}; +pub use crate::mutable::bencode_mut::BencodeMut; +pub use crate::reference::bencode_ref::BencodeRef; +pub use crate::reference::decode_opt::BDecodeOpt; + +const BEN_END: u8 = b'e'; +const DICT_START: u8 = b'd'; +const LIST_START: u8 = b'l'; +const INT_START: u8 = b'i'; + +const BYTE_LEN_LOW: u8 = b'0'; +const BYTE_LEN_HIGH: u8 = b'9'; +const BYTE_LEN_END: u8 = b':'; + +/// Construct a `BencodeMut` map by supplying string references as keys and `BencodeMut` as values. +#[macro_export] +macro_rules! ben_map { +( $($key:expr => $val:expr),* ) => { + { + use bencode::{BMutAccess, BencodeMut}; + use bencode::inner::BCowConvert; + + let mut bencode_map = BencodeMut::new_dict(); + { + let map = bencode_map.dict_mut().unwrap(); + $( + map.insert(BCowConvert::convert($key), $val); + )* + } + + bencode_map + } + } +} + +/// Construct a `BencodeMut` list by supplying a list of `BencodeMut` values. +#[macro_export] +macro_rules! ben_list { + ( $($ben:expr),* ) => { + { + use bencode::{BencodeMut, BMutAccess}; + + let mut bencode_list = BencodeMut::new_list(); + { + let list = bencode_list.list_mut().unwrap(); + $( + list.push($ben); + )* + } + + bencode_list + } + } +} + +/// Construct `BencodeMut` bytes by supplying a type convertible to `Vec`. +#[macro_export] +macro_rules! ben_bytes { + ( $ben:expr ) => {{ + use bencode::inner::BCowConvert; + use bencode::BencodeMut; + + BencodeMut::new_bytes(BCowConvert::convert($ben)) + }}; +} + +/// Construct a `BencodeMut` integer by supplying an `i64`. +#[macro_export] +macro_rules! ben_int { + ( $ben:expr ) => {{ + use bencode::BencodeMut; + + BencodeMut::new_int($ben) + }}; +} diff --git a/contrib/bencode/src/mutable/bencode_mut.rs b/contrib/bencode/src/mutable/bencode_mut.rs new file mode 100644 index 000000000..a3f95dbbf --- /dev/null +++ b/contrib/bencode/src/mutable/bencode_mut.rs @@ -0,0 +1,229 @@ +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::str; + +use crate::access::bencode::{BMutAccess, BRefAccess, MutKind, RefKind}; +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; +use crate::mutable::encode; + +/// Bencode object that holds references to the underlying data. +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub enum Inner<'a> { + /// Bencode Integer. + Int(i64), + /// Bencode Bytes. + Bytes(Cow<'a, [u8]>), + /// Bencode List. + List(Vec>), + /// Bencode Dictionary. + Dict(BTreeMap, BencodeMut<'a>>), +} + +/// `BencodeMut` object that stores references to some data. +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub struct BencodeMut<'a> { + inner: Inner<'a>, +} + +impl<'a> BencodeMut<'a> { + fn new(inner: Inner<'a>) -> BencodeMut<'a> { + BencodeMut { inner } + } + + /// Create a new `BencodeMut` representing an `i64`. + #[must_use] + pub fn new_int(value: i64) -> BencodeMut<'a> { + BencodeMut::new(Inner::Int(value)) + } + + /// Create a new `BencodeMut` representing a `[u8]`. + #[must_use] + pub fn new_bytes(value: Cow<'a, [u8]>) -> BencodeMut<'a> { + BencodeMut::new(Inner::Bytes(value)) + } + + /// Create a new `BencodeMut` representing a `BListAccess`. + #[must_use] + pub fn new_list() -> BencodeMut<'a> { + BencodeMut::new(Inner::List(Vec::new())) + } + + /// Create a new `BencodeMut` representing a `BDictAccess`. + #[must_use] + pub fn new_dict() -> BencodeMut<'a> { + BencodeMut::new(Inner::Dict(BTreeMap::new())) + } + + /// Encode the `BencodeMut` into a buffer representing the bencode. + #[must_use] + pub fn encode(&self) -> Vec { + let mut buffer = Vec::new(); + + encode::encode(self, &mut buffer); + + buffer + } +} + +impl<'a> BRefAccess for BencodeMut<'a> { + type BKey = Cow<'a, [u8]>; + type BType = BencodeMut<'a>; + + fn kind<'b>(&'b self) -> RefKind<'b, Cow<'a, [u8]>, BencodeMut<'a>> { + match self.inner { + Inner::Int(n) => RefKind::Int(n), + Inner::Bytes(ref n) => RefKind::Bytes(n), + Inner::List(ref n) => RefKind::List(n), + Inner::Dict(ref n) => RefKind::Dict(n), + } + } + + fn str(&self) -> Option<&str> { + let bytes = self.bytes()?; + + match str::from_utf8(bytes) { + Ok(n) => Some(n), + Err(_) => None, + } + } + + fn int(&self) -> Option { + match self.inner { + Inner::Int(n) => Some(n), + _ => None, + } + } + + fn bytes(&self) -> Option<&[u8]> { + match self.inner { + Inner::Bytes(ref n) => Some(n.as_ref()), + _ => None, + } + } + + fn list(&self) -> Option<&dyn BListAccess>> { + match self.inner { + Inner::List(ref n) => Some(n), + _ => None, + } + } + + fn dict(&self) -> Option<&dyn BDictAccess, BencodeMut<'a>>> { + match self.inner { + Inner::Dict(ref n) => Some(n), + _ => None, + } + } +} + +impl<'a> BMutAccess for BencodeMut<'a> { + fn kind_mut<'b>(&'b mut self) -> MutKind<'b, Cow<'a, [u8]>, BencodeMut<'a>> { + match self.inner { + Inner::Int(n) => MutKind::Int(n), + Inner::Bytes(ref mut n) => MutKind::Bytes((*n).as_ref()), + Inner::List(ref mut n) => MutKind::List(n), + Inner::Dict(ref mut n) => MutKind::Dict(n), + } + } + + fn list_mut(&mut self) -> Option<&mut dyn BListAccess>> { + match self.inner { + Inner::List(ref mut n) => Some(n), + _ => None, + } + } + + fn dict_mut(&mut self) -> Option<&mut dyn BDictAccess, BencodeMut<'a>>> { + match self.inner { + Inner::Dict(ref mut n) => Some(n), + _ => None, + } + } +} + +// impl<'a> From> for BencodeMut<'a> { +// fn from(value: BencodeRef<'a>) -> Self { +// let inner = match value.kind() { +// BencodeRefKind::Int(value) => InnerBencodeMut::Int(value), +// BencodeRefKind::Bytes(value) => InnerBencodeMut::Bytes(Cow::Owned(Vec::from(value))), +// BencodeRefKind::List(value) => { +// InnerBencodeMut::List(value.clone().into_iter().map(|b| BencodeMut::from(b.clone())).collect()) +// } +// BencodeRefKind::Dict(value) => InnerBencodeMut::Dict( +// value +// .to_list() +// .into_iter() +// .map(|(key, value)| (Cow::Owned(Vec::from(*key)), BencodeMut::from(value.clone()))) +// .collect(), +// ), +// }; +// BencodeMut { inner } +// } +// } + +#[cfg(test)] +mod test { + use crate::access::bencode::BMutAccess; + use crate::mutable::bencode_mut::BencodeMut; + + #[test] + fn positive_int_encode() { + let bencode_int = BencodeMut::new_int(-560); + + let int_bytes = b"i-560e"; // cspell:disable-line + assert_eq!(&int_bytes[..], &bencode_int.encode()[..]); + } + + #[test] + fn positive_bytes_encode() { + /* cspell:disable-next-line */ + let bencode_bytes = BencodeMut::new_bytes((&b"asdasd"[..]).into()); + + let bytes_bytes = b"6:asdasd"; // cspell:disable-line + assert_eq!(&bytes_bytes[..], &bencode_bytes.encode()[..]); + } + + #[test] + fn positive_empty_list_encode() { + let bencode_list = BencodeMut::new_list(); + + let list_bytes = b"le"; // cspell:disable-line + assert_eq!(&list_bytes[..], &bencode_list.encode()[..]); + } + + #[test] + fn positive_nonempty_list_encode() { + let mut bencode_list = BencodeMut::new_list(); + + { + let list_mut = bencode_list.list_mut().unwrap(); + list_mut.push(BencodeMut::new_int(56)); + } + + let list_bytes = b"li56ee"; // cspell:disable-line + assert_eq!(&list_bytes[..], &bencode_list.encode()[..]); + } + + #[test] + fn positive_empty_dict_encode() { + let bencode_dict = BencodeMut::new_dict(); + + let dict_bytes = b"de"; // cspell:disable-line + assert_eq!(&dict_bytes[..], &bencode_dict.encode()[..]); + } + + #[test] + fn positive_nonempty_dict_encode() { + let mut bencode_dict = BencodeMut::new_dict(); + + { + let dict_mut = bencode_dict.dict_mut().unwrap(); + /* cspell:disable-next-line */ + dict_mut.insert((&b"asd"[..]).into(), BencodeMut::new_bytes((&b"asdasd"[..]).into())); + } + + let dict_bytes = b"d3:asd6:asdasde"; // cspell:disable-line + assert_eq!(&dict_bytes[..], &bencode_dict.encode()[..]); + } +} diff --git a/contrib/bencode/src/mutable/encode.rs b/contrib/bencode/src/mutable/encode.rs new file mode 100644 index 000000000..811c35816 --- /dev/null +++ b/contrib/bencode/src/mutable/encode.rs @@ -0,0 +1,67 @@ +use std::iter::Extend; + +use crate::access::bencode::{BRefAccess, RefKind}; +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; + +pub fn encode(val: T, bytes: &mut Vec) +where + T: BRefAccess, + T::BKey: AsRef<[u8]>, +{ + match val.kind() { + RefKind::Int(n) => encode_int(n, bytes), + RefKind::Bytes(n) => encode_bytes(n, bytes), + RefKind::List(n) => encode_list(n, bytes), + RefKind::Dict(n) => encode_dict(n, bytes), + } +} + +fn encode_int(val: i64, bytes: &mut Vec) { + bytes.push(crate::INT_START); + + bytes.extend(val.to_string().into_bytes()); + + bytes.push(crate::BEN_END); +} + +fn encode_bytes(list: &[u8], bytes: &mut Vec) { + bytes.extend(list.len().to_string().into_bytes()); + + bytes.push(crate::BYTE_LEN_END); + + bytes.extend(list.iter().copied()); +} + +fn encode_list(list: &dyn BListAccess, bytes: &mut Vec) +where + T: BRefAccess, + T::BKey: AsRef<[u8]>, +{ + bytes.push(crate::LIST_START); + + for i in list { + encode(i, bytes); + } + + bytes.push(crate::BEN_END); +} + +fn encode_dict(dict: &dyn BDictAccess, bytes: &mut Vec) +where + K: AsRef<[u8]>, + V: BRefAccess, + V::BKey: AsRef<[u8]>, +{ + // Need To Sort The Keys In The Map Before Encoding + let mut sort_dict = dict.to_list(); + sort_dict.sort_by(|&(a, _), &(b, _)| a.as_ref().cmp(b.as_ref())); + + bytes.push(crate::DICT_START); + // Iterate And Dictionary Encode The (String, Bencode) Pairs + for (key, value) in &sort_dict { + encode_bytes(key.as_ref(), bytes); + encode(value, bytes); + } + bytes.push(crate::BEN_END); +} diff --git a/contrib/bencode/src/mutable/mod.rs b/contrib/bencode/src/mutable/mod.rs new file mode 100644 index 000000000..329ee9f7a --- /dev/null +++ b/contrib/bencode/src/mutable/mod.rs @@ -0,0 +1,2 @@ +pub mod bencode_mut; +mod encode; diff --git a/contrib/bencode/src/reference/bencode_ref.rs b/contrib/bencode/src/reference/bencode_ref.rs new file mode 100644 index 000000000..760dd3016 --- /dev/null +++ b/contrib/bencode/src/reference/bencode_ref.rs @@ -0,0 +1,265 @@ +use std::collections::BTreeMap; +use std::str; + +use crate::access::bencode::{BRefAccess, BRefAccessExt, RefKind}; +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; +use crate::error::{BencodeParseError, BencodeParseErrorKind, BencodeParseResult}; +use crate::reference::decode; +use crate::reference::decode_opt::BDecodeOpt; + +/// Bencode object that holds references to the underlying data. +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub enum Inner<'a> { + /// Bencode Integer. + Int(i64, &'a [u8]), + /// Bencode Bytes. + Bytes(&'a [u8], &'a [u8]), + /// Bencode List. + List(Vec>, &'a [u8]), + /// Bencode Dictionary. + Dict(BTreeMap<&'a [u8], BencodeRef<'a>>, &'a [u8]), +} + +impl<'a> From> for BencodeRef<'a> { + fn from(val: Inner<'a>) -> Self { + BencodeRef { inner: val } + } +} + +/// `BencodeRef` object that stores references to some buffer. +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub struct BencodeRef<'a> { + inner: Inner<'a>, +} + +impl<'a> BencodeRef<'a> { + /// Decode the given bytes into a `BencodeRef` using the given decode options. + #[allow(clippy::missing_errors_doc)] + pub fn decode(bytes: &'a [u8], opts: BDecodeOpt) -> BencodeParseResult> { + // Apply try so any errors return before the eof check + let (bencode, end_pos) = decode::decode(bytes, 0, opts, 0)?; + + if end_pos != bytes.len() && opts.enforce_full_decode() { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::BytesEmpty { + pos: end_pos, + })); + } + + Ok(bencode) + } + + /// Get a byte slice of the current bencode byte representation. + #[must_use] + pub fn buffer(&self) -> &'a [u8] { + #[allow(clippy::match_same_arms)] + match self.inner { + Inner::Int(_, buffer) => buffer, + Inner::Bytes(_, buffer) => buffer, + Inner::List(_, buffer) => buffer, + Inner::Dict(_, buffer) => buffer, + } + } +} + +impl<'a> BRefAccess for BencodeRef<'a> { + type BKey = &'a [u8]; + type BType = BencodeRef<'a>; + + fn kind<'b>(&'b self) -> RefKind<'b, &'a [u8], BencodeRef<'a>> { + match self.inner { + Inner::Int(n, _) => RefKind::Int(n), + Inner::Bytes(n, _) => RefKind::Bytes(n), + Inner::List(ref n, _) => RefKind::List(n), + Inner::Dict(ref n, _) => RefKind::Dict(n), + } + } + + fn str(&self) -> Option<&str> { + self.str_ext() + } + + fn int(&self) -> Option { + match self.inner { + Inner::Int(n, _) => Some(n), + _ => None, + } + } + + fn bytes(&self) -> Option<&[u8]> { + self.bytes_ext() + } + + fn list(&self) -> Option<&dyn BListAccess>> { + match self.inner { + Inner::List(ref n, _) => Some(n), + _ => None, + } + } + + fn dict(&self) -> Option<&dyn BDictAccess<&'a [u8], BencodeRef<'a>>> { + match self.inner { + Inner::Dict(ref n, _) => Some(n), + _ => None, + } + } +} + +impl<'a> BRefAccessExt<'a> for BencodeRef<'a> { + fn str_ext(&self) -> Option<&'a str> { + let bytes = self.bytes_ext()?; + + match str::from_utf8(bytes) { + Ok(n) => Some(n), + Err(_) => None, + } + } + + fn bytes_ext(&self) -> Option<&'a [u8]> { + match self.inner { + Inner::Bytes(n, _) => Some(&n[0..]), + _ => None, + } + } +} + +#[cfg(test)] +mod tests { + use std::default::Default; + + use crate::access::bencode::BRefAccess; + use crate::reference::bencode_ref::BencodeRef; + use crate::reference::decode_opt::BDecodeOpt; + + #[test] + fn positive_int_buffer() { + let int_bytes = b"i-500e"; // cspell:disable-line + let bencode = BencodeRef::decode(&int_bytes[..], BDecodeOpt::default()).unwrap(); + + assert_eq!(int_bytes, bencode.buffer()); + } + + #[test] + fn positive_bytes_buffer() { + let bytes_bytes = b"3:asd"; // cspell:disable-line + let bencode = BencodeRef::decode(&bytes_bytes[..], BDecodeOpt::default()).unwrap(); + + assert_eq!(bytes_bytes, bencode.buffer()); + } + + #[test] + fn positive_list_buffer() { + let list_bytes = b"l3:asde"; // cspell:disable-line + let bencode = BencodeRef::decode(&list_bytes[..], BDecodeOpt::default()).unwrap(); + + assert_eq!(list_bytes, bencode.buffer()); + } + + #[test] + fn positive_dict_buffer() { + let dict_bytes = b"d3:asd3:asde"; // cspell:disable-line + let bencode = BencodeRef::decode(&dict_bytes[..], BDecodeOpt::default()).unwrap(); + + assert_eq!(dict_bytes, bencode.buffer()); + } + + #[test] + fn positive_list_nested_int_buffer() { + let nested_int_bytes = b"li-500ee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_int_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_list = bencode.list().unwrap(); + let bencode_int = bencode_list.get(0).unwrap(); + + let int_bytes = b"i-500e"; // cspell:disable-line + assert_eq!(int_bytes, bencode_int.buffer()); + } + + #[test] + fn positive_dict_nested_int_buffer() { + let nested_int_bytes = b"d3:asdi-500ee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_int_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_dict = bencode.dict().unwrap(); + /* cspell:disable-next-line */ + let bencode_int = bencode_dict.lookup(&b"asd"[..]).unwrap(); + + let int_bytes = b"i-500e"; // cspell:disable-line + assert_eq!(int_bytes, bencode_int.buffer()); + } + + #[test] + fn positive_list_nested_bytes_buffer() { + let nested_bytes_bytes = b"l3:asde"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_bytes_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_list = bencode.list().unwrap(); + let bencode_bytes = bencode_list.get(0).unwrap(); + + let bytes_bytes = b"3:asd"; // cspell:disable-line + assert_eq!(bytes_bytes, bencode_bytes.buffer()); + } + + #[test] + fn positive_dict_nested_bytes_buffer() { + let nested_bytes_bytes = b"d3:asd3:asde"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_bytes_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_dict = bencode.dict().unwrap(); + /* cspell:disable-next-line */ + let bencode_bytes = bencode_dict.lookup(&b"asd"[..]).unwrap(); + + let bytes_bytes = b"3:asd"; // cspell:disable-line + assert_eq!(bytes_bytes, bencode_bytes.buffer()); + } + + #[test] + fn positive_list_nested_list_buffer() { + let nested_list_bytes = b"ll3:asdee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_list_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_list = bencode.list().unwrap(); + let bencode_list = bencode_list.get(0).unwrap(); + + let list_bytes = b"l3:asde"; // cspell:disable-line + assert_eq!(list_bytes, bencode_list.buffer()); + } + + #[test] + fn positive_dict_nested_list_buffer() { + let nested_list_bytes = b"d3:asdl3:asdee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_list_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_dict = bencode.dict().unwrap(); + /* cspell:disable-next-line */ + let bencode_list = bencode_dict.lookup(&b"asd"[..]).unwrap(); + + let list_bytes = b"l3:asde"; // cspell:disable-line + assert_eq!(list_bytes, bencode_list.buffer()); + } + + #[test] + fn positive_list_nested_dict_buffer() { + let nested_dict_bytes = b"ld3:asd3:asdee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_dict_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_list = bencode.list().unwrap(); + let bencode_dict = bencode_list.get(0).unwrap(); + + let dict_bytes = b"d3:asd3:asde"; // cspell:disable-line + assert_eq!(dict_bytes, bencode_dict.buffer()); + } + + #[test] + fn positive_dict_nested_dict_buffer() { + let nested_dict_bytes = b"d3:asdd3:asd3:asdee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_dict_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_dict = bencode.dict().unwrap(); + /* cspell:disable-next-line */ + let bencode_dict = bencode_dict.lookup(&b"asd"[..]).unwrap(); + + let dict_bytes = b"d3:asd3:asde"; // cspell:disable-line + assert_eq!(dict_bytes, bencode_dict.buffer()); + } +} diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs new file mode 100644 index 000000000..96ab6dfbf --- /dev/null +++ b/contrib/bencode/src/reference/decode.rs @@ -0,0 +1,398 @@ +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::str::{self}; + +use crate::error::{BencodeParseError, BencodeParseErrorKind, BencodeParseResult}; +use crate::reference::bencode_ref::{BencodeRef, Inner}; +use crate::reference::decode_opt::BDecodeOpt; + +pub fn decode(bytes: &[u8], pos: usize, opts: BDecodeOpt, depth: usize) -> BencodeParseResult<(BencodeRef<'_>, usize)> { + if depth >= opts.max_recursion() { + return Err(BencodeParseError::from_kind( + BencodeParseErrorKind::InvalidRecursionExceeded { pos, max: depth }, + )); + } + let curr_byte = peek_byte(bytes, pos)?; + + match curr_byte { + crate::INT_START => { + let (bencode, next_pos) = decode_int(bytes, pos + 1, crate::BEN_END)?; + Ok((Inner::Int(bencode, &bytes[pos..next_pos]).into(), next_pos)) + } + crate::LIST_START => { + let (bencode, next_pos) = decode_list(bytes, pos + 1, opts, depth)?; + Ok((Inner::List(bencode, &bytes[pos..next_pos]).into(), next_pos)) + } + crate::DICT_START => { + let (bencode, next_pos) = decode_dict(bytes, pos + 1, opts, depth)?; + Ok((Inner::Dict(bencode, &bytes[pos..next_pos]).into(), next_pos)) + } + crate::BYTE_LEN_LOW..=crate::BYTE_LEN_HIGH => { + let (bencode, next_pos) = decode_bytes(bytes, pos)?; + // Include the length digit, don't increment position + Ok((Inner::Bytes(bencode, &bytes[pos..next_pos]).into(), next_pos)) + } + _ => Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidByte { pos })), + } +} + +fn decode_int(bytes: &[u8], pos: usize, delim: u8) -> BencodeParseResult<(i64, usize)> { + let (_, begin_decode) = bytes.split_at(pos); + + let Some(relative_end_pos) = begin_decode.iter().position(|n| *n == delim) else { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntNoDelimiter { + pos, + })); + }; + let int_byte_slice = &begin_decode[..relative_end_pos]; + + if int_byte_slice.len() > 1 { + // Negative zero is not allowed (this would not be caught when converting) + if int_byte_slice[0] == b'-' && int_byte_slice[1] == b'0' { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntNegativeZero { + pos, + })); + } + + // Zero padding is illegal, and unspecified for key lengths (we disallow both) + if int_byte_slice[0] == b'0' { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntZeroPadding { + pos, + })); + } + } + + let Ok(int_str) = str::from_utf8(int_byte_slice) else { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntParseError { + pos, + })); + }; + + // Position of end of integer type, next byte is the start of the next value + let absolute_end_pos = pos + relative_end_pos; + let next_pos = absolute_end_pos + 1; + match int_str.parse::() { + Ok(n) => Ok((n, next_pos)), + Err(_) => Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntParseError { + pos, + })), + } +} + +fn decode_bytes(bytes: &[u8], pos: usize) -> BencodeParseResult<(&[u8], usize)> { + let (num_bytes, start_pos) = decode_int(bytes, pos, crate::BYTE_LEN_END)?; + + if num_bytes < 0 { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidLengthNegative { + pos, + })); + } + + // Should be safe to cast to usize (TODO: Check if cast would overflow to provide + // a more helpful error message, otherwise, parsing will probably fail with an + // unrelated message). + let num_bytes = + usize::try_from(num_bytes).map_err(|_| BencodeParseErrorKind::Msg(format!("input length is too long: {num_bytes}")))?; + + if num_bytes > bytes[start_pos..].len() { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidLengthOverflow { + pos, + })); + } + + let next_pos = start_pos + num_bytes; + Ok((&bytes[start_pos..next_pos], next_pos)) +} + +fn decode_list(bytes: &[u8], pos: usize, opts: BDecodeOpt, depth: usize) -> BencodeParseResult<(Vec>, usize)> { + let mut bencode_list = Vec::new(); + + let mut curr_pos = pos; + let mut curr_byte = peek_byte(bytes, curr_pos)?; + + while curr_byte != crate::BEN_END { + let (bencode, next_pos) = decode(bytes, curr_pos, opts, depth + 1)?; + + bencode_list.push(bencode); + + curr_pos = next_pos; + curr_byte = peek_byte(bytes, curr_pos)?; + } + + let next_pos = curr_pos + 1; + Ok((bencode_list, next_pos)) +} + +fn decode_dict( + bytes: &[u8], + pos: usize, + opts: BDecodeOpt, + depth: usize, +) -> BencodeParseResult<(BTreeMap<&[u8], BencodeRef<'_>>, usize)> { + let mut bencode_dict = BTreeMap::new(); + + let mut curr_pos = pos; + let mut curr_byte = peek_byte(bytes, curr_pos)?; + + while curr_byte != crate::BEN_END { + let (key_bytes, next_pos) = decode_bytes(bytes, curr_pos)?; + + // Spec says that the keys must be in alphabetical order + match (bencode_dict.keys().last(), opts.check_key_sort()) { + (Some(last_key), true) if key_bytes < *last_key => { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidKeyOrdering { + pos: curr_pos, + key: key_bytes.to_vec(), + })) + } + _ => (), + }; + curr_pos = next_pos; + + let (value, next_pos) = decode(bytes, curr_pos, opts, depth + 1)?; + match bencode_dict.entry(key_bytes) { + Entry::Vacant(n) => n.insert(value), + Entry::Occupied(_) => { + return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidKeyDuplicates { + pos: curr_pos, + key: key_bytes.to_vec(), + })) + } + }; + + curr_pos = next_pos; + curr_byte = peek_byte(bytes, curr_pos)?; + } + + let next_pos = curr_pos + 1; + Ok((bencode_dict, next_pos)) +} + +fn peek_byte(bytes: &[u8], pos: usize) -> BencodeParseResult { + bytes + .get(pos) + .copied() + .ok_or_else(|| BencodeParseError::from_kind(BencodeParseErrorKind::BytesEmpty { pos })) +} + +#[cfg(test)] +mod tests { + use std::default::Default; + + use crate::access::bencode::BRefAccess; + use crate::reference::bencode_ref::BencodeRef; + use crate::reference::decode_opt::BDecodeOpt; + + /* cSpell:disable */ + // Positive Cases + const GENERAL: &[u8] = b"d0:12:zero_len_key8:location17:udp://test.com:8011:nested dictd4:listli-500500eee6:numberi500500ee"; + const RECURSION: &[u8] = b"lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllleeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"; + const BYTES_UTF8: &[u8] = b"16:valid_utf8_bytes"; + const DICTIONARY: &[u8] = b"d9:test_dictd10:nested_key12:nested_value11:nested_listli500ei-500ei0eee8:test_key10:test_valuee"; + const LIST: &[u8] = b"l10:test_bytesi500ei0ei-500el12:nested_bytesed8:test_key10:test_valueee"; + const BYTES: &[u8] = b"5:\xC5\xE6\xBE\xE6\xF2"; + const BYTES_ZERO_LEN: &[u8] = b"0:"; + const INT: &[u8] = b"i500e"; + const INT_NEGATIVE: &[u8] = b"i-500e"; + const INT_ZERO: &[u8] = b"i0e"; + const PARTIAL: &[u8] = b"i0e_asd"; + + // Negative Cases + const BYTES_NEG_LEN: &[u8] = b"-4:test"; + const BYTES_EXTRA: &[u8] = b"l15:processed_bytese17:unprocessed_bytes"; + const BYTES_NOT_UTF8: &[u8] = b"5:\xC5\xE6\xBE\xE6\xF2"; + const INT_NAN: &[u8] = b"i500a500e"; + const INT_LEADING_ZERO: &[u8] = b"i0500e"; + const INT_DOUBLE_ZERO: &[u8] = b"i00e"; + const INT_NEGATIVE_ZERO: &[u8] = b"i-0e"; + const INT_DOUBLE_NEGATIVE: &[u8] = b"i--5e"; + const DICT_UNORDERED_KEYS: &[u8] = b"d5:z_key5:value5:a_key5:valuee"; + const DICT_DUP_KEYS_SAME_DATA: &[u8] = b"d5:a_keyi0e5:a_keyi0ee"; + const DICT_DUP_KEYS_DIFF_DATA: &[u8] = b"d5:a_keyi0e5:a_key7:a_valuee"; + /* cSpell:enable */ + + #[test] + fn positive_decode_general() { + let bencode = BencodeRef::decode(GENERAL, BDecodeOpt::default()).unwrap(); + + let ben_dict = bencode.dict().unwrap(); + assert_eq!(ben_dict.lookup("".as_bytes()).unwrap().str().unwrap(), "zero_len_key"); + assert_eq!( + ben_dict.lookup("location".as_bytes()).unwrap().str().unwrap(), + "udp://test.com:80" + ); + assert_eq!(ben_dict.lookup("number".as_bytes()).unwrap().int().unwrap(), 500_500_i64); + + let nested_dict = ben_dict.lookup("nested dict".as_bytes()).unwrap().dict().unwrap(); + let nested_list = nested_dict.lookup("list".as_bytes()).unwrap().list().unwrap(); + assert_eq!(nested_list[0].int().unwrap(), -500_500_i64); + } + + #[test] + fn positive_decode_recursion() { + BencodeRef::decode(RECURSION, BDecodeOpt::new(50, true, true)).unwrap_err(); + + // As long as we didn't overflow our call stack, we are good! + } + + #[test] + fn positive_decode_bytes_utf8() { + let bencode = BencodeRef::decode(BYTES_UTF8, BDecodeOpt::default()).unwrap(); + + assert_eq!(bencode.str().unwrap(), "valid_utf8_bytes"); + } + + #[test] + fn positive_decode_dict() { + let bencode = BencodeRef::decode(DICTIONARY, BDecodeOpt::default()).unwrap(); + let dict = bencode.dict().unwrap(); + assert_eq!(dict.lookup("test_key".as_bytes()).unwrap().str().unwrap(), "test_value"); + + let nested_dict = dict.lookup("test_dict".as_bytes()).unwrap().dict().unwrap(); + assert_eq!( + nested_dict.lookup("nested_key".as_bytes()).unwrap().str().unwrap(), + "nested_value" + ); + + let nested_list = nested_dict.lookup("nested_list".as_bytes()).unwrap().list().unwrap(); + assert_eq!(nested_list[0].int().unwrap(), 500i64); + assert_eq!(nested_list[1].int().unwrap(), -500i64); + assert_eq!(nested_list[2].int().unwrap(), 0i64); + } + + #[test] + fn positive_decode_list() { + let bencode = BencodeRef::decode(LIST, BDecodeOpt::default()).unwrap(); + let list = bencode.list().unwrap(); + + assert_eq!(list[0].str().unwrap(), "test_bytes"); + assert_eq!(list[1].int().unwrap(), 500i64); + assert_eq!(list[2].int().unwrap(), 0i64); + assert_eq!(list[3].int().unwrap(), -500i64); + + let nested_list = list[4].list().unwrap(); + assert_eq!(nested_list[0].str().unwrap(), "nested_bytes"); + + let nested_dict = list[5].dict().unwrap(); + assert_eq!( + nested_dict.lookup("test_key".as_bytes()).unwrap().str().unwrap(), + "test_value" + ); + } + + #[test] + fn positive_decode_bytes() { + let bytes = super::decode_bytes(BYTES, 0).unwrap().0; + assert_eq!(bytes.len(), 5); + assert_eq!(bytes[0] as char, 'Ã…'); + assert_eq!(bytes[1] as char, 'æ'); + assert_eq!(bytes[2] as char, '¾'); + assert_eq!(bytes[3] as char, 'æ'); + assert_eq!(bytes[4] as char, 'ò'); + } + + #[test] + fn positive_decode_bytes_zero_len() { + let bytes = super::decode_bytes(BYTES_ZERO_LEN, 0).unwrap().0; + assert_eq!(bytes.len(), 0); + } + + #[test] + fn positive_decode_int() { + let int_value = super::decode_int(INT, 1, crate::BEN_END).unwrap().0; + assert_eq!(int_value, 500i64); + } + + #[test] + fn positive_decode_int_negative() { + let int_value = super::decode_int(INT_NEGATIVE, 1, crate::BEN_END).unwrap().0; + assert_eq!(int_value, -500i64); + } + + #[test] + fn positive_decode_int_zero() { + let int_value = super::decode_int(INT_ZERO, 1, crate::BEN_END).unwrap().0; + assert_eq!(int_value, 0i64); + } + + #[test] + fn positive_decode_partial() { + let bencode = BencodeRef::decode(PARTIAL, BDecodeOpt::new(2, true, false)).unwrap(); + + assert_ne!(PARTIAL.len(), bencode.buffer().len()); + assert_eq!(3, bencode.buffer().len()); + } + + #[test] + fn positive_decode_dict_unordered_keys() { + BencodeRef::decode(DICT_UNORDERED_KEYS, BDecodeOpt::default()).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_bytes_neg_len() { + BencodeRef::decode(BYTES_NEG_LEN, BDecodeOpt::default()).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_bytes_extra() { + BencodeRef::decode(BYTES_EXTRA, BDecodeOpt::default()).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_bytes_not_utf8() { + let bencode = BencodeRef::decode(BYTES_NOT_UTF8, BDecodeOpt::default()).unwrap(); + + bencode.str().unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_int_nan() { + super::decode_int(INT_NAN, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_int_leading_zero() { + super::decode_int(INT_LEADING_ZERO, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_int_double_zero() { + super::decode_int(INT_DOUBLE_ZERO, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_int_negative_zero() { + super::decode_int(INT_NEGATIVE_ZERO, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_int_double_negative() { + super::decode_int(INT_DOUBLE_NEGATIVE, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_dict_unordered_keys() { + BencodeRef::decode(DICT_UNORDERED_KEYS, BDecodeOpt::new(5, true, true)).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_dict_dup_keys_same_data() { + BencodeRef::decode(DICT_DUP_KEYS_SAME_DATA, BDecodeOpt::default()).unwrap(); + } + + #[test] + #[should_panic] + fn negative_decode_dict_dup_keys_diff_data() { + BencodeRef::decode(DICT_DUP_KEYS_DIFF_DATA, BDecodeOpt::default()).unwrap(); + } +} diff --git a/contrib/bencode/src/reference/decode_opt.rs b/contrib/bencode/src/reference/decode_opt.rs new file mode 100644 index 000000000..ac94d0311 --- /dev/null +++ b/contrib/bencode/src/reference/decode_opt.rs @@ -0,0 +1,55 @@ +use std::default::Default; + +const DEFAULT_MAX_RECURSION: usize = 50; +const DEFAULT_CHECK_KEY_SORT: bool = false; +const DEFAULT_ENFORCE_FULL_DECODE: bool = true; + +/// Stores decoding options for modifying decode behavior. +#[derive(Copy, Clone)] +#[allow(clippy::module_name_repetitions)] +pub struct BDecodeOpt { + max_recursion: usize, + check_key_sort: bool, + enforce_full_decode: bool, +} + +impl BDecodeOpt { + /// Create a new `BDecodeOpt` object. + #[must_use] + pub fn new(max_recursion: usize, check_key_sort: bool, enforce_full_decode: bool) -> BDecodeOpt { + BDecodeOpt { + max_recursion, + check_key_sort, + enforce_full_decode, + } + } + + /// Maximum limit allowed when decoding bencode. + #[must_use] + pub fn max_recursion(&self) -> usize { + self.max_recursion + } + + /// Whether or not an error should be thrown for out of order dictionary keys. + #[must_use] + pub fn check_key_sort(&self) -> bool { + self.check_key_sort + } + + /// Whether or not we enforce that the decoded bencode must make up all of the input + /// bytes or not. + /// + /// It may be useful to disable this if for example, the input bencode is prepended to + /// some payload and you would like to disassociate it. In this case, to find where the + /// rest of the payload starts that wasn't decoded, get the bencode buffer, and call len(). + #[must_use] + pub fn enforce_full_decode(&self) -> bool { + self.enforce_full_decode + } +} + +impl Default for BDecodeOpt { + fn default() -> BDecodeOpt { + BDecodeOpt::new(DEFAULT_MAX_RECURSION, DEFAULT_CHECK_KEY_SORT, DEFAULT_ENFORCE_FULL_DECODE) + } +} diff --git a/contrib/bencode/src/reference/mod.rs b/contrib/bencode/src/reference/mod.rs new file mode 100644 index 000000000..6a0ae6e40 --- /dev/null +++ b/contrib/bencode/src/reference/mod.rs @@ -0,0 +1,3 @@ +pub mod bencode_ref; +pub mod decode; +pub mod decode_opt; diff --git a/contrib/bencode/test/mod.rs b/contrib/bencode/test/mod.rs new file mode 100644 index 000000000..c1454967d --- /dev/null +++ b/contrib/bencode/test/mod.rs @@ -0,0 +1,18 @@ +use bencode::{ben_bytes, ben_int, ben_list, ben_map}; + +#[test] +fn positive_ben_map_macro() { + let result = (ben_map! { + "key" => ben_bytes!("value") + }) + .encode(); + + assert_eq!("d3:key5:valuee".as_bytes(), &result[..]); // cspell:disable-line +} + +#[test] +fn positive_ben_list_macro() { + let result = (ben_list!(ben_int!(5))).encode(); + + assert_eq!("li5ee".as_bytes(), &result[..]); // cspell:disable-line +} diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 0cd62578a..e7b64522c 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -7,7 +7,7 @@ use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use bip_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; use serde::{self, Deserialize, Serialize}; use thiserror::Error; diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index 6610f9dc4..c2f099597 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -5,7 +5,7 @@ use std::borrow::Cow; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use bip_bencode::{ben_int, ben_map, BMutAccess}; +use bencode::{ben_int, ben_map, BMutAccess}; use crate::tracker::ScrapeData; From adcbf351d04c1b8b87f36d3b77634d42b2ef2493 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 7 Aug 2023 15:51:30 +0200 Subject: [PATCH 0555/1003] ci: overhaul testing workflow --- .github/workflows/test_build_release.yml | 81 ----------------- .github/workflows/testing.yaml | 109 +++++++++++++++++++++++ cSpell.json | 2 + 3 files changed, 111 insertions(+), 81 deletions(-) delete mode 100644 .github/workflows/test_build_release.yml create mode 100644 .github/workflows/testing.yaml diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml deleted file mode 100644 index 88234b97a..000000000 --- a/.github/workflows/test_build_release.yml +++ /dev/null @@ -1,81 +0,0 @@ -name: CI - -# Only trigger, when the test workflow succeeded -on: [push, pull_request] - -jobs: - format: - runs-on: ubuntu-latest - env: - CARGO_TERM_COLOR: always - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@stable - with: - toolchain: nightly - components: rustfmt, clippy - - uses: Swatinem/rust-cache@v2 - - name: Check Rust Formatting - run: cargo fmt --check - - test: - needs: format - runs-on: ubuntu-latest - env: - CARGO_TERM_COLOR: always - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@stable - with: - toolchain: stable - components: llvm-tools-preview - - uses: Swatinem/rust-cache@v2 - - name: Check Rust Code - run: cargo check --all-targets - - name: Clippy Rust Code - run: cargo clippy --all-targets -- -D clippy::pedantic - - name: Test Documentation - run: cargo test --doc - - name: Run Tests - run: cargo test --workspace - - uses: taiki-e/install-action@cargo-llvm-cov - - uses: taiki-e/install-action@nextest - - name: Show coverage - run: cargo llvm-cov nextest - - build: - needs: test - if: | - github.event_name == 'push' && - github.event.base_ref == 'refs/heads/main' && - startsWith(github.ref, 'refs/tags/v') - runs-on: ubuntu-latest - env: - CARGO_TERM_COLOR: always - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@stable - with: - toolchain: stable - - uses: Swatinem/rust-cache@v2 - - name: Build Torrust Tracker - run: cargo build --release - - name: Upload Build Artifact - uses: actions/upload-artifact@v3 - with: - name: torrust-tracker - path: ./target/release/torrust-tracker - - release: - needs: build - runs-on: ubuntu-latest - steps: - - name: Download Build Artifact - uses: actions/download-artifact@v3 - with: - name: torrust-tracker - - name: Release - uses: softprops/action-gh-release@v1 -# with: -# files: | -# torrust-tracker diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml new file mode 100644 index 000000000..c1f85a90b --- /dev/null +++ b/.github/workflows/testing.yaml @@ -0,0 +1,109 @@ +name: Testing + +on: + push: + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + format: + name: Formatting + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v3 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + components: rustfmt + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + - id: format + name: Run Formatting-Checks + run: cargo fmt --check + + check: + name: Static Analysis + runs-on: ubuntu-latest + needs: format + + strategy: + matrix: + toolchain: [stable, nightly] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v3 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + components: clippy + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + - id: check + name: Run Build Checks + run: cargo check --tests --benches --examples --workspace --all-targets --all-features + + - id: lint + name: Run Lint Checks + run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic + + - id: doc + name: Run Documentation Checks + run: cargo test --doc + + unit: + name: Units + runs-on: ubuntu-latest + needs: check + + strategy: + matrix: + toolchain: [stable, nightly] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v3 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-llvm-cov, cargo-nextest + + - id: test + name: Run Unit Tests + run: cargo test --tests --benches --examples --workspace --all-targets --all-features + + - id: coverage + name: Generate Coverage Report + run: cargo llvm-cov nextest --tests --benches --examples --workspace --all-targets --all-features diff --git a/cSpell.json b/cSpell.json index 5cafa68ed..8a85739e2 100644 --- a/cSpell.json +++ b/cSpell.json @@ -37,10 +37,12 @@ "hlocalhost", "Hydranode", "Icelake", + "imdl", "incompletei", "infohash", "infohashes", "infoschema", + "Intermodal", "intervali", "lcov", "leecher", From 76b270b1d6e8068574d9c2916297b7bc07fef78f Mon Sep 17 00:00:00 2001 From: Alex Wellnitz Date: Sat, 26 Aug 2023 21:44:38 +0200 Subject: [PATCH 0556/1003] #326: Add tower-http compression as middleware --- Cargo.lock | 158 ++++++++++++++++++++++++++++++++++ Cargo.toml | 1 + src/servers/apis/routes.rs | 11 ++- src/servers/http/v1/routes.rs | 2 + 4 files changed, 168 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2053b043..789e2caec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -48,6 +48,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + [[package]] name = "allocator-api2" version = "0.2.16" @@ -103,6 +118,22 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-compression" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6" +dependencies = [ + "brotli", + "flate2", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", +] + [[package]] name = "async-trait" version = "0.1.73" @@ -349,6 +380,27 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + [[package]] name = "bufstream" version = "0.1.4" @@ -407,6 +459,7 @@ version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ + "jobserver", "libc", ] @@ -1199,6 +1252,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + [[package]] name = "httparse" version = "1.8.0" @@ -1325,6 +1384,16 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +[[package]] +name = "iri-string" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21859b667d66a4c1dacd9df0863b3efb65785474255face87f5bca39dd8407c0" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is-terminal" version = "0.4.9" @@ -1351,6 +1420,15 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +[[package]] +name = "jobserver" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.64" @@ -1570,6 +1648,16 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -3068,6 +3156,7 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "tower-http", "uuid", ] @@ -3128,6 +3217,36 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" +dependencies = [ + "async-compression", + "base64 0.21.2", + "bitflags 2.4.0", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "httpdate", + "iri-string", + "mime", + "mime_guess", + "percent-encoding", + "pin-project-lite", + "tokio", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", + "uuid", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -3190,6 +3309,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" version = "0.3.13" @@ -3492,3 +3620,33 @@ checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] + +[[package]] +name = "zstd" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "6.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.8+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +dependencies = [ + "cc", + "libc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml index a939318cd..22d9b2b4f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,7 @@ uuid = { version = "1", features = ["v4"] } axum = "0.6.20" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" +tower-http = { version= "0.4.3", features = ["full"] } bencode = { version = "1.0.0-alpha.1", path = "contrib/bencode" } torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "packages/primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.3", path = "packages/configuration" } diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index a4c4642c7..7801389f3 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -8,6 +8,7 @@ use std::sync::Arc; use axum::{middleware, Router}; +use tower_http::compression::CompressionLayer; use super::v1; use crate::tracker::Tracker; @@ -21,8 +22,10 @@ pub fn router(tracker: Arc) -> Router { let router = v1::routes::add(prefix, router, tracker.clone()); - router.layer(middleware::from_fn_with_state( - tracker.config.clone(), - v1::middlewares::auth::auth, - )) + router + .layer(middleware::from_fn_with_state( + tracker.config.clone(), + v1::middlewares::auth::auth, + )) + .layer(CompressionLayer::new()) } diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 86bdf480f..6546dcbb8 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; use axum_client_ip::SecureClientIpSource; +use tower_http::compression::CompressionLayer; use super::handlers::{announce, scrape}; use crate::tracker::Tracker; @@ -23,4 +24,5 @@ pub fn router(tracker: Arc) -> Router { .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker)) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) + .layer(CompressionLayer::new()) } From 22bb98af388e8fe801b174786bfe0bbe49fc85c8 Mon Sep 17 00:00:00 2001 From: Alex Wellnitz Date: Sat, 26 Aug 2023 22:08:08 +0200 Subject: [PATCH 0557/1003] #326: Use only compression features of tower-http --- Cargo.lock | 38 -------------------------------------- Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 789e2caec..768d0ea60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1384,16 +1384,6 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" -[[package]] -name = "iri-string" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21859b667d66a4c1dacd9df0863b3efb65785474255face87f5bca39dd8407c0" -dependencies = [ - "memchr", - "serde", -] - [[package]] name = "is-terminal" version = "0.4.9" @@ -1648,16 +1638,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -3224,7 +3204,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" dependencies = [ "async-compression", - "base64 0.21.2", "bitflags 2.4.0", "bytes", "futures-core", @@ -3232,19 +3211,11 @@ dependencies = [ "http", "http-body", "http-range-header", - "httpdate", - "iri-string", - "mime", - "mime_guess", - "percent-encoding", "pin-project-lite", "tokio", "tokio-util", - "tower", "tower-layer", "tower-service", - "tracing", - "uuid", ] [[package]] @@ -3309,15 +3280,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.13" diff --git a/Cargo.toml b/Cargo.toml index 22d9b2b4f..a265f32ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,7 @@ uuid = { version = "1", features = ["v4"] } axum = "0.6.20" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" -tower-http = { version= "0.4.3", features = ["full"] } +tower-http = { version= "0.4.3", features = ["compression-full"] } bencode = { version = "1.0.0-alpha.1", path = "contrib/bencode" } torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "packages/primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.3", path = "packages/configuration" } From 2fa374286f90ef3c12b168889da2bd19eddb6768 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 5 Sep 2023 22:16:35 +0200 Subject: [PATCH 0558/1003] dev: add expected message to bencode panic tests --- contrib/bencode/src/reference/decode.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs index 96ab6dfbf..d2aa180f8 100644 --- a/contrib/bencode/src/reference/decode.rs +++ b/contrib/bencode/src/reference/decode.rs @@ -329,69 +329,68 @@ mod tests { } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(InvalidByte { pos: 0 }"] fn negative_decode_bytes_neg_len() { BencodeRef::decode(BYTES_NEG_LEN, BDecodeOpt::default()).unwrap(); } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(BytesEmpty { pos: 20 }"] fn negative_decode_bytes_extra() { BencodeRef::decode(BYTES_EXTRA, BDecodeOpt::default()).unwrap(); } #[test] - #[should_panic] fn negative_decode_bytes_not_utf8() { let bencode = BencodeRef::decode(BYTES_NOT_UTF8, BDecodeOpt::default()).unwrap(); - bencode.str().unwrap(); + assert!(bencode.str().is_none()); } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(InvalidIntParseError { pos: 1 }"] fn negative_decode_int_nan() { super::decode_int(INT_NAN, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(InvalidIntZeroPadding { pos: 1 }"] fn negative_decode_int_leading_zero() { super::decode_int(INT_LEADING_ZERO, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(InvalidIntZeroPadding { pos: 1 }"] fn negative_decode_int_double_zero() { super::decode_int(INT_DOUBLE_ZERO, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(InvalidIntNegativeZero { pos: 1 }"] fn negative_decode_int_negative_zero() { super::decode_int(INT_NEGATIVE_ZERO, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic] + #[should_panic = " BencodeParseError(InvalidIntParseError { pos: 1 }"] fn negative_decode_int_double_negative() { super::decode_int(INT_DOUBLE_NEGATIVE, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(InvalidKeyOrdering { pos: 15, key: [97, 95, 107, 101, 121] }"] fn negative_decode_dict_unordered_keys() { BencodeRef::decode(DICT_UNORDERED_KEYS, BDecodeOpt::new(5, true, true)).unwrap(); } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(InvalidKeyDuplicates { pos: 18, key: [97, 95, 107, 101, 121] }"] fn negative_decode_dict_dup_keys_same_data() { BencodeRef::decode(DICT_DUP_KEYS_SAME_DATA, BDecodeOpt::default()).unwrap(); } #[test] - #[should_panic] + #[should_panic = "BencodeParseError(InvalidKeyDuplicates { pos: 18, key: [97, 95, 107, 101, 121] }"] fn negative_decode_dict_dup_keys_diff_data() { BencodeRef::decode(DICT_DUP_KEYS_DIFF_DATA, BDecodeOpt::default()).unwrap(); } From 089fb48a521f9d3439c9104b4a683c99330a9552 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 5 Sep 2023 22:24:10 +0200 Subject: [PATCH 0559/1003] dev: add should_panic expected message to tracker tests --- src/servers/udp/connection_cookie.rs | 2 +- src/tracker/peer.rs | 16 +++++++++++----- tests/servers/api/v1/contract/configuration.rs | 2 +- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index a389388a7..4dc9896ab 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -314,7 +314,7 @@ mod tests { } #[test] - #[should_panic] + #[should_panic = "InvalidConnectionId"] fn it_should_be_not_valid_after_their_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 76747fea2..d6517f213 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -150,7 +150,13 @@ impl Id { /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. #[must_use] pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!(bytes.len(), PEER_ID_BYTES_LEN); + assert_eq!( + PEER_ID_BYTES_LEN, + bytes.len(), + "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", + PEER_ID_BYTES_LEN, + bytes.len(), + ); let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); ret.0.clone_from_slice(bytes); ret @@ -363,14 +369,14 @@ mod test { } #[test] - #[should_panic] + #[should_panic = "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` (20) and the supplied `bytes` length: 19"] fn should_fail_trying_to_instantiate_from_a_byte_slice_with_less_than_20_bytes() { let less_than_20_bytes = [0; 19]; let _: peer::Id = peer::Id::from_bytes(&less_than_20_bytes); } #[test] - #[should_panic] + #[should_panic = "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` (20) and the supplied `bytes` length: 21"] fn should_fail_trying_to_instantiate_from_a_byte_slice_with_more_than_20_bytes() { let more_than_20_bytes = [0; 21]; let _: peer::Id = peer::Id::from_bytes(&more_than_20_bytes); @@ -418,13 +424,13 @@ mod test { } #[test] - #[should_panic] + #[should_panic = "NotEnoughBytes"] fn should_fail_trying_to_convert_from_a_byte_vector_with_less_than_20_bytes() { let _: peer::Id = peer::Id::try_from([0; 19].to_vec()).unwrap(); } #[test] - #[should_panic] + #[should_panic = "TooManyBytes"] fn should_fail_trying_to_convert_from_a_byte_vector_with_more_than_20_bytes() { let _: peer::Id = peer::Id::try_from([0; 21].to_vec()).unwrap(); } diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs index e4b608607..9ed257c51 100644 --- a/tests/servers/api/v1/contract/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -3,7 +3,7 @@ use torrust_tracker_test_helpers::configuration; use crate::servers::api::test_environment::stopped_test_environment; #[tokio::test] -#[should_panic] +#[should_panic = "Could not receive bind_address."] async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { let mut test_env = stopped_test_environment(configuration::ephemeral()); From efd1c995becbbf7fccf730d9cfa02157ce0c022c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 6 Sep 2023 00:28:04 +0200 Subject: [PATCH 0560/1003] test: ignore ssl config test --- tests/servers/api/v1/contract/configuration.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs index 9ed257c51..cfdb59b0c 100644 --- a/tests/servers/api/v1/contract/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -3,6 +3,7 @@ use torrust_tracker_test_helpers::configuration; use crate::servers::api::test_environment::stopped_test_environment; #[tokio::test] +#[ignore] #[should_panic = "Could not receive bind_address."] async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { let mut test_env = stopped_test_environment(configuration::ephemeral()); From ebd169791c0bb13ce87124e0c64c327924821b16 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 6 Sep 2023 00:25:35 +0200 Subject: [PATCH 0561/1003] chore: update lockfile --- Cargo.lock | 245 ++++++++++++++++++++-------------------- cSpell.json | 1 + src/shared/clock/mod.rs | 6 +- 3 files changed, 127 insertions(+), 125 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 768d0ea60..bc88e9bf5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -41,9 +41,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783" dependencies = [ "memchr", ] @@ -92,9 +92,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstyle" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" [[package]] name = "aquatic_udp_protocol" @@ -120,9 +120,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b74f44609f0f91493e3082d3734d98497e094777144380ea4db9f9905dd5b6" +checksum = "d495b6dc0184693324491a5ac05f559acc97bf937ab31d7a1c33dd0016be6d2b" dependencies = [ "brotli", "flate2", @@ -142,7 +142,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -233,9 +233,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", @@ -254,9 +254,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" [[package]] name = "bencode" @@ -480,15 +480,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.26" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" +checksum = "d87d9d13be47a5b7c3907137f1290b0459a7f80efb26be8c52afb11963bccb02" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "winapi", + "windows-targets", ] [[package]] @@ -531,18 +531,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.23" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" +checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.3.23" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" +checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" dependencies = [ "anstyle", "clap_lex", @@ -550,9 +550,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" [[package]] name = "cmake" @@ -756,7 +756,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -767,7 +767,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -800,7 +800,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -839,9 +839,9 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encoding_rs" -version = "0.8.32" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if", ] @@ -854,9 +854,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" +checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" dependencies = [ "errno-dragonfly", "libc", @@ -1001,7 +1001,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1013,7 +1013,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1025,7 +1025,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1090,7 +1090,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1146,9 +1146,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "glob" @@ -1158,9 +1158,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -1211,9 +1211,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ "hashbrown 0.14.0", ] @@ -1375,7 +1375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1619,9 +1619,9 @@ checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" @@ -1839,9 +1839,9 @@ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ "autocfg", "num-integer", @@ -1879,9 +1879,9 @@ dependencies = [ [[package]] name = "object" -version = "0.31.1" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] @@ -1900,11 +1900,11 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "openssl" -version = "0.10.56" +version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729b745ad4a5575dd06a3e1af1414bd330ee561c01b3899eb584baeaa8def17e" +checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.0", "cfg-if", "foreign-types", "libc", @@ -1921,7 +1921,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -1932,18 +1932,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.27.0+1.1.1v" +version = "300.1.3+3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e8f197c82d7511c5b014030c9b1efeda40d7d5f99d23b4ceed3524a5e63f02" +checksum = "cd2c101a165fff9935e34def4669595ab1c7847943c42be86e21503e482be107" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.91" +version = "0.9.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "866b5f16f90776b9bb8dc1e1802ac6f0513de3a7a7465867bfbc563dc737faac" +checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" dependencies = [ "cc", "libc", @@ -2014,19 +2014,20 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.2" +version = "2.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" +checksum = "d7a4d085fd991ac8d5b05a147b437791b4260b76326baf0fc60cf7c9c27ecd33" dependencies = [ + "memchr", "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.2" +version = "2.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" +checksum = "a2bee7be22ce7918f641a33f08e3f43388c7656772244e2bbb2477f44cc9021a" dependencies = [ "pest", "pest_generator", @@ -2034,22 +2035,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.2" +version = "2.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" +checksum = "d1511785c5e98d79a05e8a6bc34b4ac2168a0e3e92161862030ad84daa223141" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] name = "pest_meta" -version = "2.7.2" +version = "2.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" +checksum = "b42f0394d3123e33353ca5e1e89092e533d2cc490389f2bd6131c43c634ebc5f" dependencies = [ "once_cell", "pest", @@ -2073,14 +2074,14 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] name = "pin-project-lite" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -2306,9 +2307,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", @@ -2318,9 +2319,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", @@ -2329,9 +2330,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "rend" @@ -2344,11 +2345,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.18" +version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" +checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "bytes", "encoding_rs", "futures-core", @@ -2459,13 +2460,12 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.31.0" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a2ab0025103a60ecaaf3abf24db1db240a4e1c15837090d2c32f625ac98abea" +checksum = "a4c4216490d5a413bc6d10fa4742bd7d4955941d062c0ef873141d6b0e7b30fd" dependencies = [ "arrayvec", "borsh", - "byteorder", "bytes", "num-traits", "rand", @@ -2497,9 +2497,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.8" +version = "0.38.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" +checksum = "c0c3dde1fc030af041adc40e79c0e7fbcf431dd24870053d187d7c66e4b87453" dependencies = [ "bitflags 2.4.0", "errno", @@ -2510,9 +2510,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.6" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", @@ -2526,14 +2526,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", ] [[package]] name = "rustls-webpki" -version = "0.101.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261e9e0888cba427c3316e6322805653c9425240b6fd96cee7cb671ab70ab8d0" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -2637,9 +2637,9 @@ checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" [[package]] name = "serde" -version = "1.0.171" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] @@ -2665,13 +2665,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.171" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -2703,7 +2703,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -2733,7 +2733,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "chrono", "hex", "indexmap 1.9.3", @@ -2753,7 +2753,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -2780,9 +2780,9 @@ dependencies = [ [[package]] name = "shlex" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" [[package]] name = "signal-hook-registry" @@ -2801,9 +2801,9 @@ checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] @@ -2875,9 +2875,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.29" +version = "2.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" +checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" dependencies = [ "proc-macro2", "quote", @@ -2917,29 +2917,29 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] name = "time" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a79d09ac6b08c1ab3906a2f7cc2e81a0e27c7ae89c63812df75e52bef0751e07" +checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" dependencies = [ "deranged", "itoa", @@ -2956,9 +2956,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c65469ed6b3a4809d987a41eb1dc918e9bc1d92211cbad7ae82931846f7451" +checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" dependencies = [ "time-core", ] @@ -3014,7 +3014,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", ] [[package]] @@ -3199,9 +3199,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", "bitflags 2.4.0", @@ -3309,9 +3309,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna", @@ -3342,9 +3342,9 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", @@ -3386,7 +3386,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", "wasm-bindgen-shared", ] @@ -3420,7 +3420,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.29", + "syn 2.0.31", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3549,20 +3549,21 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d09770118a7eb1ccaf4a594a221334119a44a814fcb0d31c5b85e83e97227a97" +checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" dependencies = [ "memchr", ] [[package]] name = "winreg" -version = "0.10.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "winapi", + "cfg-if", + "windows-sys", ] [[package]] diff --git a/cSpell.json b/cSpell.json index 8a85739e2..6a68e045d 100644 --- a/cSpell.json +++ b/cSpell.json @@ -26,6 +26,7 @@ "codegen", "completei", "connectionless", + "datetime", "dockerhub", "downloadedi", "dtolnay", diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs index 7a5290f49..922ca3200 100644 --- a/src/shared/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -120,7 +120,7 @@ pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> D /// (this will naturally happen in 292.5 billion years) #[must_use] pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { - DateTime::::from_utc( + DateTime::::from_naive_utc_and_offset( NaiveDateTime::from_timestamp_opt( i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), duration.subsec_nanos(), @@ -162,13 +162,13 @@ mod tests { let timestamp = DurationSinceUnixEpoch::ZERO; assert_eq!( convert_from_timestamp_to_datetime_utc(timestamp), - DateTime::::from_utc(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc) + DateTime::::from_naive_utc_and_offset(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc) ); } #[test] fn should_be_converted_from_datetime_utc() { - let datetime = DateTime::::from_utc(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc); + let datetime = DateTime::::from_naive_utc_and_offset(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc); assert_eq!( convert_from_datetime_utc_to_timestamp(&datetime), DurationSinceUnixEpoch::ZERO From 5da5a25e7416408022aec56379c0e62be132d9c1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 31 Aug 2023 12:04:10 +0200 Subject: [PATCH 0562/1003] dev: container overhaul --- .dockerignore | 32 +- .env.local | 1 - .github/workflows/container.yaml | 125 +++++ .github/workflows/publish_docker_image.yml | 86 ---- .github/workflows/test_docker.yml | 26 -- .gitignore | 3 +- .vscode/settings.json | 42 +- Cargo.lock | 10 +- Cargo.toml | 24 +- Containerfile | 140 ++++++ Dockerfile | 80 ---- README.md | 135 ++++-- bin/install-demo.sh | 27 -- bin/install.sh | 18 - cSpell.json | 27 +- compose.yaml | 26 +- contrib/bencode/src/reference/decode.rs | 2 + contrib/dev-tools/containers/docker-build.sh | 5 + .../dev-tools/containers/docker-install.sh | 3 + .../dev-tools/containers/docker-run-local.sh | 13 + .../dev-tools/containers/docker-run-public.sh | 13 + contrib/dev-tools/init/install-local.sh | 12 + contrib/dev-tools/su-exec/LICENSE | 22 + contrib/dev-tools/su-exec/Makefile | 17 + contrib/dev-tools/su-exec/README.md | 46 ++ contrib/dev-tools/su-exec/su-exec.c | 109 +++++ docker/README.md | 289 ------------ docker/bin/build.sh | 13 - docker/bin/install.sh | 4 - docker/bin/run-local-image.sh | 13 - docker/bin/run-public-image.sh | 13 - docs/containers.md | 428 ++++++++++++++++++ packages/configuration/Cargo.toml | 4 +- packages/configuration/src/lib.rs | 130 ++++-- packages/test-helpers/Cargo.toml | 4 +- rustfmt.toml | 1 - share/container/entry_script_sh | 81 ++++ share/container/message | 2 + .../config/tracker.container.mysql.toml | 39 ++ .../config/tracker.container.sqlite3.toml | 39 ++ .../config/tracker.development.sqlite3.toml | 2 +- src/bootstrap/config.rs | 62 +-- src/lib.rs | 24 +- src/servers/apis/mod.rs | 10 +- src/servers/apis/v1/middlewares/auth.rs | 2 +- src/tracker/databases/driver.rs | 2 +- src/tracker/mod.rs | 2 +- tests/servers/http/v1/contract.rs | 27 +- 48 files changed, 1479 insertions(+), 756 deletions(-) delete mode 100644 .env.local create mode 100644 .github/workflows/container.yaml delete mode 100644 .github/workflows/publish_docker_image.yml delete mode 100644 .github/workflows/test_docker.yml create mode 100644 Containerfile delete mode 100644 Dockerfile delete mode 100755 bin/install-demo.sh delete mode 100755 bin/install.sh create mode 100755 contrib/dev-tools/containers/docker-build.sh create mode 100755 contrib/dev-tools/containers/docker-install.sh create mode 100755 contrib/dev-tools/containers/docker-run-local.sh create mode 100755 contrib/dev-tools/containers/docker-run-public.sh create mode 100755 contrib/dev-tools/init/install-local.sh create mode 100644 contrib/dev-tools/su-exec/LICENSE create mode 100644 contrib/dev-tools/su-exec/Makefile create mode 100644 contrib/dev-tools/su-exec/README.md create mode 100644 contrib/dev-tools/su-exec/su-exec.c delete mode 100644 docker/README.md delete mode 100755 docker/bin/build.sh delete mode 100755 docker/bin/install.sh delete mode 100755 docker/bin/run-local-image.sh delete mode 100755 docker/bin/run-public-image.sh create mode 100644 docs/containers.md create mode 100644 share/container/entry_script_sh create mode 100644 share/container/message create mode 100644 share/default/config/tracker.container.mysql.toml create mode 100644 share/default/config/tracker.container.sqlite3.toml rename config.toml.local => share/default/config/tracker.development.sqlite3.toml (92%) diff --git a/.dockerignore b/.dockerignore index 3d8a25cce..f42859922 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,16 +1,16 @@ -.git -.git-blame-ignore -.github -.gitignore -.vscode -bin/ -config.toml -config.toml.local -cSpell.json -data.db -docker/ -NOTICE -README.md -rustfmt.toml -storage/ -target/ +/.git +/.git-blame-ignore +/.github +/.gitignore +/.vscode +/bin/ +/tracker.* +/cSpell.json +/data.db +/docker/bin/ +/NOTICE +/README.md +/rustfmt.toml +/storage/ +/target/ +/etc/ diff --git a/.env.local b/.env.local deleted file mode 100644 index fefed56c4..000000000 --- a/.env.local +++ /dev/null @@ -1 +0,0 @@ -TORRUST_TRACKER_USER_UID=1000 \ No newline at end of file diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml new file mode 100644 index 000000000..27a2dc93c --- /dev/null +++ b/.github/workflows/container.yaml @@ -0,0 +1,125 @@ +name: Container + +on: + push: + tags-ignore: + - "!v*" + pull_request: + branches: + - "develop" + - "main" + +env: + CARGO_TERM_COLOR: always + +jobs: + test: + name: Test (Docker) + runs-on: ubuntu-latest + + steps: + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v2 + + - id: build + name: Build + uses: docker/build-push-action@v4 + with: + file: ./Containerfile + push: false + load: true + tags: torrust-tracker:local + cache-from: type=gha + cache-to: type=gha,mode=max + + - id: inspect + name: Inspect + run: docker image inspect torrust-tracker:local + + - id: checkout + name: Checkout Repository + uses: actions/checkout@v3 + + - id: compose + name: Compose + run: docker compose build + + context: + name: Context + needs: test + runs-on: ubuntu-latest + + outputs: + continue: ${{ steps.check.outputs.continue }} + + steps: + - id: check + name: Check Context + run: | + if [[ "${{ github.event_name }}" == "push" && ( "${{ github.ref }}" == "refs/heads/main" || "${{ github.ref }}" == "refs/heads/develop" || "${{ github.ref }}" == "refs/heads/docker" ) ]] || + [[ "${{ github.event_name }}" == "push" && "${{ github.ref }}" =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + if [[ "${{ github.repository }}" == "torrust/torrust-tracker" ]]; then + echo "Context is torrust/torrust-tracker, and push is: main, develop, docker, or a tag of v*.*.*" + echo "continue=true" >> $GITHUB_OUTPUT + fi + fi + + secrets: + name: Secrets + needs: context + environment: dockerhub-torrust + if: needs.context.outputs.continue == 'true' + runs-on: ubuntu-latest + + outputs: + continue: ${{ steps.check.outputs.continue }} + + steps: + - id: check + name: Check + env: + DOCKER_HUB_ACCESS_TOKEN: "${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}" + if: "${{ env.DOCKER_HUB_ACCESS_TOKEN != '' }}" + run: echo "continue=true" >> $GITHUB_OUTPUT + + publish: + name: Publish + environment: dockerhub-torrust + needs: secrets + if: needs.secrets.outputs.continue == 'true' + runs-on: ubuntu-latest + + steps: + - id: meta + name: Docker meta + uses: docker/metadata-action@v4 + with: + images: | + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - id: login + name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v4 + with: + file: ./Containerfile + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml deleted file mode 100644 index 1dd65e3a7..000000000 --- a/.github/workflows/publish_docker_image.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: Publish docker image - -on: - push: - branches: - - "main" - - "develop" - tags: - - "v*" - -env: - # Azure file share volume mount requires the Linux container run as root - # https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations - # TORRUST_TRACKER_RUN_AS_USER: root - TORRUST_TRACKER_RUN_AS_USER: appuser - -jobs: - check-secret: - runs-on: ubuntu-latest - environment: dockerhub-torrust - outputs: - publish: ${{ steps.check.outputs.publish }} - steps: - - id: check - env: - DOCKER_HUB_USERNAME: "${{ secrets.DOCKER_HUB_USERNAME }}" - if: "${{ env.DOCKER_HUB_USERNAME != '' }}" - run: echo "publish=true" >> $GITHUB_OUTPUT - - test: - needs: check-secret - if: needs.check-secret.outputs.publish == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@stable - with: - toolchain: stable - components: llvm-tools-preview - - uses: Swatinem/rust-cache@v2 - - name: Run Tests - run: cargo test - - dockerhub: - needs: test - if: needs.check-secret.outputs.publish == 'true' - runs-on: ubuntu-latest - environment: dockerhub-torrust - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v4 - with: - images: | - # For example: torrust/tracker - "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" - tags: | - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_HUB_USERNAME }} - password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build and push - uses: docker/build-push-action@v4 - with: - context: . - file: ./Dockerfile - build-args: | - RUN_AS_USER=${{ env.TORRUST_TRACKER_RUN_AS_USER }} - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - cache-from: type=gha - cache-to: type=gha,mode=max diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml deleted file mode 100644 index a62965878..000000000 --- a/.github/workflows/test_docker.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Test docker build - -on: - push: - pull_request: - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build docker image - uses: docker/build-push-action@v4 - with: - context: . - file: ./Dockerfile - push: false - cache-from: type=gha - cache-to: type=gha,mode=max - - - name: Build docker-compose images - run: docker compose build diff --git a/.gitignore b/.gitignore index 6b58dcb45..2d8d0b8bd 100644 --- a/.gitignore +++ b/.gitignore @@ -3,9 +3,10 @@ /.coverage/ /.idea/ /.vscode/launch.json -/config.toml +/tracker.toml /data.db /database.db /database.json.bz2 /storage/ /target +/tracker.* diff --git a/.vscode/settings.json b/.vscode/settings.json index 78239b757..3bf0969e9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,23 +1,23 @@ { - "[rust]": { - "editor.formatOnSave": true - }, - "rust-analyzer.checkOnSave": true, - "rust-analyzer.check.command": "clippy", - "rust-analyzer.check.allTargets": true, - "rust-analyzer.check.extraArgs": [ - "--", - "-D", - "clippy::correctness", - "-D", - "clippy::suspicious", - "-W", - "clippy::complexity", - "-W", - "clippy::perf", - "-W", - "clippy::style", - "-W", - "clippy::pedantic", - ], + "[rust]": { + "editor.formatOnSave": true + }, + "rust-analyzer.checkOnSave": true, + "rust-analyzer.check.command": "clippy", + "rust-analyzer.check.allTargets": true, + "rust-analyzer.check.extraArgs": [ + "--", + "-D", + "clippy::correctness", + "-D", + "clippy::suspicious", + "-W", + "clippy::complexity", + "-W", + "clippy::perf", + "-W", + "clippy::style", + "-W", + "clippy::pedantic", + ], } \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index bc88e9bf5..0b7c9d0d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3096,7 +3096,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.3" +version = "3.0.0-alpha.4" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3142,7 +3142,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.3" +version = "3.0.0-alpha.4" dependencies = [ "config", "log", @@ -3157,7 +3157,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.3" +version = "3.0.0-alpha.4" dependencies = [ "log", "thiserror", @@ -3165,7 +3165,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.3" +version = "3.0.0-alpha.4" dependencies = [ "derive_more", "serde", @@ -3173,7 +3173,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.3" +version = "3.0.0-alpha.4" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index a265f32ef..9c94ea10d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,13 +9,21 @@ version.workspace = true [workspace.package] license-file = "COPYRIGHT" -authors = ["Nautilus Cyberneering , Mick van Dijke "] +authors = [ + "Nautilus Cyberneering , Mick van Dijke ", +] edition = "2021" repository = "https://github.com/torrust/torrust-tracker" -version = "3.0.0-alpha.3" +version = "3.0.0-alpha.4" [dependencies] -tokio = { version = "1.29", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +tokio = { version = "1.29", features = [ + "rt-multi-thread", + "net", + "sync", + "macros", + "signal", +] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2" serde_json = "1.0" @@ -41,11 +49,11 @@ uuid = { version = "1", features = ["v4"] } axum = "0.6.20" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" -tower-http = { version= "0.4.3", features = ["compression-full"] } +tower-http = { version = "0.4.3", features = ["compression-full"] } bencode = { version = "1.0.0-alpha.1", path = "contrib/bencode" } -torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "packages/primitives" } -torrust-tracker-configuration = { version = "3.0.0-alpha.3", path = "packages/configuration" } -torrust-tracker-located-error = { version = "3.0.0-alpha.3", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.4", path = "packages/configuration" } +torrust-tracker-located-error = { version = "3.0.0-alpha.4", path = "packages/located-error" } multimap = "0.9" hyper = "0.14" @@ -56,7 +64,7 @@ serde_urlencoded = "0.7" serde_repr = "0.1" serde_bytes = "0.11" local-ip-address = "0.5" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.3", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.4", path = "packages/test-helpers" } [workspace] members = [ diff --git a/Containerfile b/Containerfile new file mode 100644 index 000000000..229500cd8 --- /dev/null +++ b/Containerfile @@ -0,0 +1,140 @@ +# syntax=docker/dockerfile:latest + +# Torrust Tracker + +## Builder Image +FROM rust:bookworm as chef +WORKDIR /tmp +RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +RUN cargo binstall --no-confirm cargo-chef cargo-nextest + +## Tester Image +FROM rust:slim-bookworm as tester +WORKDIR /tmp + +RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean +RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +RUN cargo binstall --no-confirm cargo-nextest + +COPY ./share/ /app/share/torrust +RUN mkdir -p /app/share/torrust/default/database/; \ + sqlite3 /app/share/torrust/default/database/tracker.sqlite3.db "VACUUM;" + +## Su Exe Compile +FROM docker.io/library/gcc:bookworm as gcc +COPY ./contrib/dev-tools/su-exec/ /usr/local/src/su-exec/ +RUN cc -Wall -Werror -g /usr/local/src/su-exec/su-exec.c -o /usr/local/bin/su-exec; chmod +x /usr/local/bin/su-exec + + +## Chef Prepare (look at project and see wat we need) +FROM chef AS recipe +WORKDIR /build/src +COPY . /build/src +RUN cargo chef prepare --recipe-path /build/recipe.json + + +## Cook (debug) +FROM chef AS dependencies_debug +WORKDIR /build/src +COPY --from=recipe /build/recipe.json /build/recipe.json +RUN cargo chef cook --tests --benches --examples --workspace --all-targets --all-features --recipe-path /build/recipe.json +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/temp.tar.zst ; rm -f /build/temp.tar.zst + +## Cook (release) +FROM chef AS dependencies +WORKDIR /build/src +COPY --from=recipe /build/recipe.json /build/recipe.json +RUN cargo chef cook --tests --benches --examples --workspace --all-targets --all-features --recipe-path /build/recipe.json --release +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/temp.tar.zst --release ; rm -f /build/temp.tar.zst + + +## Build Archive (debug) +FROM dependencies_debug AS build_debug +WORKDIR /build/src +COPY . /build/src +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-tracker-debug.tar.zst + +## Build Archive (release) +FROM dependencies AS build +WORKDIR /build/src +COPY . /build/src +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-tracker.tar.zst --release + + +# Extract and Test (debug) +FROM tester as test_debug +WORKDIR /test +COPY . /test/src/ +COPY --from=build_debug \ + /build/torrust-tracker-debug.tar.zst \ + /test/torrust-tracker-debug.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-tracker-debug.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json + +RUN mkdir -p /app/bin/; cp -l /test/src/target/debug/torrust-tracker /app/bin/torrust-tracker +RUN mkdir /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-tracker | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1 +RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin + +# Extract and Test (release) +FROM tester as test +WORKDIR /test +COPY . /test/src +COPY --from=build \ + /build/torrust-tracker.tar.zst \ + /test/torrust-tracker.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-tracker.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json + +RUN mkdir -p /app/bin/; cp -l /test/src/target/release/torrust-tracker /app/bin/torrust-tracker +RUN mkdir -p /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-tracker | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1 +RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin + + +## Runtime +FROM gcr.io/distroless/cc-debian12:debug as runtime +RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] +COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec + +ARG TORRUST_TRACKER_PATH_CONFIG="/etc/torrust/tracker/tracker.toml" +ARG TORRUST_TRACKER_DATABASE="sqlite3" +ARG USER_ID=1000 +ARG UDP_PORT=6969 +ARG HTTP_PORT=7070 +ARG API_PORT=1212 + +ENV TORRUST_TRACKER_PATH_CONFIG=${TORRUST_TRACKER_PATH_CONFIG} +ENV TORRUST_TRACKER_DATABASE=${TORRUST_TRACKER_DATABASE} +ENV USER_ID=${USER_ID} +ENV UDP_PORT=${UDP_PORT} +ENV HTTP_PORT=${HTTP_PORT} +ENV API_PORT=${API_PORT} +ENV TZ=Etc/UTC + +EXPOSE ${UDP_PORT}/udp +EXPOSE ${HTTP_PORT}/tcp +EXPOSE ${API_PORT}/tcp + +RUN mkdir -p /var/lib/torrust/tracker /var/log/torrust/tracker /etc/torrust/tracker + +ENV ENV=/etc/profile +COPY --chmod=0555 ./share/container/entry_script_sh /usr/local/bin/entry.sh + +VOLUME ["/var/lib/torrust/tracker","/var/log/torrust/tracker","/etc/torrust/tracker"] + +ENV RUNTIME="runtime" +ENTRYPOINT ["/usr/local/bin/entry.sh"] + + +## Torrust-Tracker (debug) +FROM runtime as debug +ENV RUNTIME="debug" +COPY --from=test_debug /app/ /usr/ +RUN env +CMD ["sh"] + +## Torrust-Tracker (release) (default) +FROM runtime as release +ENV RUNTIME="release" +COPY --from=test /app/ /usr/ +# HEALTHCHECK CMD ["/usr/bin/wget", "--no-verbose", "--tries=1", "--spider", "localhost:${API_PORT}/version"] +CMD ["/usr/bin/torrust-tracker"] diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 96d21fa84..000000000 --- a/Dockerfile +++ /dev/null @@ -1,80 +0,0 @@ -FROM clux/muslrust:stable AS chef -WORKDIR /app -RUN cargo install cargo-chef - - -FROM chef AS planner -WORKDIR /app -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - - -FROM chef as development -WORKDIR /app -ARG UID=1000 -ARG RUN_AS_USER=appuser -ARG TRACKER_UDP_PORT=6969 -ARG TRACKER_HTTP_PORT=7070 -ARG TRACKER_API_PORT=1212 -# Add the app user for development -ENV USER=appuser -ENV UID=$UID -RUN adduser --uid "${UID}" "${USER}" -# Build dependencies -COPY --from=planner /app/recipe.json recipe.json -RUN cargo chef cook --recipe-path recipe.json -# Build the application -COPY . . -RUN cargo build --bin torrust-tracker -USER $RUN_AS_USER:$RUN_AS_USER -EXPOSE $TRACKER_UDP_PORT/udp -EXPOSE $TRACKER_HTTP_PORT/tcp -EXPOSE $TRACKER_API_PORT/tcp -CMD ["cargo", "run"] - - -FROM chef AS builder -WORKDIR /app -ARG UID=1000 -# Add the app user for production -ENV USER=appuser -ENV UID=$UID -RUN adduser \ - --disabled-password \ - --gecos "" \ - --home "/nonexistent" \ - --shell "/sbin/nologin" \ - --no-create-home \ - --uid "${UID}" \ - "${USER}" -# Build dependencies -COPY --from=planner /app/recipe.json recipe.json -RUN cargo chef cook --release --target x86_64-unknown-linux-musl --recipe-path recipe.json -# Build the application -COPY . . -RUN cargo build --release --target x86_64-unknown-linux-musl --bin torrust-tracker -# Strip the binary -# More info: https://github.com/LukeMathWalker/cargo-chef/issues/149 -RUN strip /app/target/x86_64-unknown-linux-musl/release/torrust-tracker - - -FROM alpine:latest -WORKDIR /app -ARG RUN_AS_USER=appuser -ARG TRACKER_UDP_PORT=6969 -ARG TRACKER_HTTP_PORT=7070 -ARG TRACKER_API_PORT=1212 -RUN apk --no-cache add ca-certificates -ENV TZ=Etc/UTC -ENV RUN_AS_USER=$RUN_AS_USER -COPY --from=builder /etc/passwd /etc/passwd -COPY --from=builder /etc/group /etc/group -COPY --from=builder --chown=$RUN_AS_USER \ - /app/target/x86_64-unknown-linux-musl/release/torrust-tracker \ - /app/torrust-tracker -RUN chown -R $RUN_AS_USER:$RUN_AS_USER /app -USER $RUN_AS_USER:$RUN_AS_USER -EXPOSE $TRACKER_UDP_PORT/udp -EXPOSE $TRACKER_HTTP_PORT/tcp -EXPOSE $TRACKER_API_PORT/tcp -ENTRYPOINT ["/app/torrust-tracker"] \ No newline at end of file diff --git a/README.md b/README.md index b419c12c1..832af0d85 100644 --- a/README.md +++ b/README.md @@ -1,32 +1,35 @@ # Torrust Tracker -[![Build & Release](https://github.com/torrust/torrust-tracker/actions/workflows/build_release.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/build_release.yml) [![CI](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml) [![Publish crate](https://github.com/torrust/torrust-tracker/actions/workflows/publish_crate.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/publish_crate.yml) [![Publish docker image](https://github.com/torrust/torrust-tracker/actions/workflows/publish_docker_image.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/publish_docker_image.yml) [![Test](https://github.com/torrust/torrust-tracker/actions/workflows/test.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test.yml) [![Test docker build](https://github.com/torrust/torrust-tracker/actions/workflows/test_docker.yml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/test_docker.yml) [![Coverage](https://github.com/torrust/torrust-tracker/actions/workflows/coverage.yaml/badge.svg)](https://github.com/torrust/torrust-tracker/actions/workflows/coverage.yaml) +[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![testing_wf_b]][testing_wf] -Torrust Tracker is a lightweight but incredibly high-performance and feature-rich BitTorrent tracker written in [Rust](https://www.rust-lang.org/). +Torrust Tracker is a lightweight but incredibly high-performance and feature-rich BitTorrent tracker written in [Rust Language][rust]. It aims to provide a reliable and efficient solution for serving torrents to a vast number of peers while maintaining a high level of performance, robustness, extensibility, security, usability and with community-driven development. +_We have a [container guide][containers.md] to get started with Docker or Podman_ + ## Key Features -* [X] Multiple UDP server and HTTP(S) server blocks for socket binding are possible. -* [X] Full IPv4 and IPv6 support for both UDP and HTTP(S). -* [X] Private & Whitelisted mode. -* [X] Built-in API. -* [X] Torrent whitelisting. -* [X] Peer authentication using time-bound keys. -* [X] [newTrackon](https://newtrackon.com/) check is supported for both HTTP and UDP, where IPv4 and IPv6 are properly handled. -* [X] SQLite3 and MySQL persistence, loading and saving of the torrent hashes and downloads completed count. -* [X] Comprehensive documentation. -* [X] A complete suite of tests. See [code coverage](https://app.codecov.io/gh/torrust/torrust-tracker) report. +* [x] Multiple UDP server and HTTP(S) server blocks for socket binding are possible. +* [x] Full IPv4 and IPv6 support for both UDP and HTTP(S). +* [x] Private & Whitelisted mode. +* [x] Built-in API. +* [x] Torrent whitelisting. +* [x] Peer authentication using time-bound keys. +* [x] [newTrackon][newtrackon] check is supported for both HTTP and UDP, where IPv4 and IPv6 are properly handled. +* [x] SQLite3 and MySQL persistence, loading and saving of the torrent hashes and downloads completed count. +* [x] Comprehensive documentation. +* [x] A complete suite of tests. See our [code coverage report][coverage]. ## Implemented BEPs -* [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol. -* [BEP 7](https://www.bittorrent.org/beps/bep_0007.html): IPv6 Support. -* [BEP 15](http://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for BitTorrent. -* [BEP 23](http://bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists. -* [BEP 27](http://bittorrent.org/beps/bep_0027.html): Private Torrents. -* [BEP 48](http://bittorrent.org/beps/bep_0048.html): Tracker Protocol Extension: Scrape. +* [BEP 03]: The BitTorrent Protocol. +* [BEP 07]: IPv6 Support. +* [BEP 15]: UDP Tracker Protocol for BitTorrent. +* [BEP 23]: Tracker Returns Compact Peer Lists. +* [BEP 27]: Private Torrents. +* [BEP 48]: Tracker Protocol Extension: Scrape. + ## Getting Started @@ -35,19 +38,36 @@ Requirements: * Rust Stable `1.68` * You might have problems compiling with a machine or docker container with low resources. It has been tested with docker containers with 6 CPUs, 7.5 GM of memory and 2GB of swap. -You can follow the [documentation](https://docs.rs/torrust-tracker/) to install and use Torrust Tracker in different ways, but if you want to give it a quick try, you can use the following commands: +You can follow the [documentation] to install and use Torrust Tracker in different ways, but if you want to give it a quick try, you can use the following commands: ```s git clone https://github.com/torrust/torrust-tracker.git \ && cd torrust-tracker \ && cargo build --release \ - && mkdir -p ./storage/database \ - && mkdir -p ./storage/ssl_certificates + && mkdir -p ./storage/tracker/lib/database \ + && mkdir -p ./storage/tracker/lib/tls ``` -And then run `cargo run` twice. The first time to generate the `config.toml` file and the second time to run the tracker with the default configuration. +### Configuration + +The [default configuration folder: `/share/default/config`][share.default.config]: + +- Contains the [development default][src.bootstrap.config.default] i.e: [`tracker.development.sqlite3.toml`][tracker.development.sqlite3.toml]. + +- Also contains the container defaults: [`sqlite3`][tracker.container.sqlite3.toml] and [`mysql`][tracker.container.mysql.toml]. + +To override the default configuration there is two options: -After running the tracker these services will be available: +- Configure a different configuration path by setting the [`TORRUST_TRACKER_PATH_CONFIG`][src.bootstrap.config.path.config] environmental variable. + +- Supply the entire configuration via the [`TORRUST_TRACKER_CONFIG`][src.bootstrap.config.config] environmental variable. + + +> NOTE: It is recommended for production you override the `api admin token` by placing your secret in the [`ENV_VAR_API_ADMIN_TOKEN`][src.bootstrap.config.admin.token] environmental variable. + + +### Services +After running the tracker these services will be available (as defined in the default configuration): * UDP tracker: `udp://127.0.0.1:6969/announce`. * HTTP tracker: `http://127.0.0.1:6969/announce`. @@ -55,10 +75,10 @@ After running the tracker these services will be available: ## Documentation -* [Crate documentation](https://docs.rs/torrust-tracker/). -* [API `v1`](https://docs.rs/torrust-tracker/3.0.0-alpha.3/torrust_tracker/servers/apis/v1). -* [HTTP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.3/torrust_tracker/servers/http). -* [UDP Tracker](https://docs.rs/torrust-tracker/3.0.0-alpha.3/torrust_tracker/servers/udp). +* [Crate documentation] +* [API `v1`] +* [HTTP Tracker] +* [UDP Tracker] ## Contributing @@ -67,14 +87,65 @@ We welcome contributions from the community! How can you contribute? * Bug reports and feature requests. -* Code contributions. You can start by looking at the issues labeled ["good first issues"](https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22). -* Documentation improvements. Check the [documentation](https://docs.rs/torrust-tracker/) and [API documentation](https://docs.rs/torrust-tracker/3.0.0-alpha.3/torrust_tracker/servers/apis/v1) for typos, errors, or missing information. -* Participation in the community. You can help by answering questions in the [discussions](https://github.com/torrust/torrust-tracker/discussions). +* Code contributions. You can start by looking at the issues labeled "[good first issues]". +* Documentation improvements. Check the [documentation] and [API documentation] for typos, errors, or missing information. +* Participation in the community. You can help by answering questions in the [discussions]. ## License -The project is licensed under a dual license. See [COPYRIGHT](./COPYRIGHT). +The project is licensed under a dual license. See [COPYRIGHT]. ## Acknowledgments -This project was a joint effort by [Nautilus Cyberneering GmbH](https://nautilus-cyberneering.de/) and [Dutch Bits](https://dutchbits.nl). Also thanks to [Naim A.](https://github.com/naim94a/udpt) and [greatest-ape](https://github.com/greatest-ape/aquatic) for some parts of the code. Further added features and functions thanks to [Power2All](https://github.com/power2all). +This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [Dutch Bits]. Also thanks to [Naim A.] and [greatest-ape] for some parts of the code. Further added features and functions thanks to [Power2All]. + + + +[container_wf]: https://github.com/torrust/torrust-tracker/actions/workflows/container.yaml +[container_wf_b]: https://github.com/torrust/torrust-tracker/actions/workflows/container.yaml/badge.svg +[coverage_wf]: https://github.com/torrust/torrust-tracker/actions/workflows/coverage.yaml +[coverage_wf_b]: https://github.com/torrust/torrust-tracker/actions/workflows/coverage.yaml/badge.svg +[testing_wf]: https://github.com/torrust/torrust-tracker/actions/workflows/testing.yaml +[testing_wf_b]: https://github.com/torrust/torrust-tracker/actions/workflows/testing.yaml/badge.svg + +[rust]: https://www.rust-lang.org/ +[newtrackon]: https://newtrackon.com/ +[coverage]: https://app.codecov.io/gh/torrust/torrust-tracker + +[BEP 03]: https://www.bittorrent.org/beps/bep_0003.html +[BEP 07]: https://www.bittorrent.org/beps/bep_0007.html +[BEP 15]: http://www.bittorrent.org/beps/bep_0015.html +[BEP 23]: http://bittorrent.org/beps/bep_0023.html +[BEP 27]: http://bittorrent.org/beps/bep_0027.html +[BEP 48]: http://bittorrent.org/beps/bep_0048.html + +[containers.md]: ./docs/containers.md + +[share.default.config]: ./share/default/config/ +[tracker.development.sqlite3.toml]: ./share/default/config/tracker.development.sqlite3.toml +[src.bootstrap.config.default]: ./src/bootstrap/config.rs#L18 +[tracker.container.sqlite3.toml]: ./share/default/config/tracker.container.sqlite3.toml +[tracker.container.mysql.toml]: ./share/default/config/tracker.container.mysql.toml +[share.container.entry_script_sh.default]: ./share/container/entry_script_sh#L10 + +[src.bootstrap.config.path.config]: ./src/bootstrap/config.rs#L15 +[src.bootstrap.config.config]: ./src/bootstrap/config.rs#L11 +[src.bootstrap.config.admin.token]: ./src/bootstrap/config.rs#L12 + +[Crate documentation]: https://docs.rs/torrust-tracker/ +[API `v1`]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/apis/v1 +[HTTP Tracker]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/http +[UDP Tracker]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/udp + +[good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 +[documentation]: https://docs.rs/torrust-tracker/ +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/apis/v1 +[discussions]: https://github.com/torrust/torrust-tracker/discussions + +[COPYRIGHT]: ./COPYRIGHT + +[nautilus]: https://nautilus-cyberneering.de/ +[Dutch Bits]: https://dutchbits.nl +[Naim A.]: https://github.com/naim94a/udpt +[greatest-ape]: https://github.com/greatest-ape/aquatic +[Power2All]: https://github.com/power2all diff --git a/bin/install-demo.sh b/bin/install-demo.sh deleted file mode 100755 index 1b829ca1d..000000000 --- a/bin/install-demo.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Single command to setup and run the tracker using the pre-built image. - -# Check if 'storage' directory exists -if [ -d "./storage" ]; then - echo "Warning: 'storage' directory already exists. Please remove or rename it before proceeding." - exit 1 -fi - -# Check if 'config.toml' file exists in the current directory -if [ -f "./config.toml" ]; then - echo "Warning: 'config.toml' file already exists in the root directory. Please remove or rename it before proceeding." - exit 1 -fi - -# Check if SQLite3 is installed -if ! command -v sqlite3 &> /dev/null; then - echo "Warning: SQLite3 is not installed on your system. Please install it and retry." - exit 1 -fi - -wget https://raw.githubusercontent.com/torrust/torrust-tracker/v3.0.0-alpha.3/config.toml.local -O config.toml \ - && mkdir -p ./storage/database \ - && mkdir -p ./storage/ssl_certificates \ - && touch ./storage/database/data.db \ - && echo ";" | sqlite3 ./storage/database/data.db diff --git a/bin/install.sh b/bin/install.sh deleted file mode 100755 index 82ea940d0..000000000 --- a/bin/install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# This script is only intended to be used for local development or testing environments. - -# Generate the default settings file if it does not exist -if ! [ -f "./config.toml" ]; then - cp ./config.toml.local ./config.toml -fi - -# Generate storage directory if it does not exist -mkdir -p "./storage/database" - -# Generate the sqlite database if it does not exist -if ! [ -f "./storage/database/data.db" ]; then - # todo: it should get the path from config.toml and only do it when we use sqlite - touch ./storage/database/data.db - echo ";" | sqlite3 ./storage/database/data.db -fi diff --git a/cSpell.json b/cSpell.json index 6a68e045d..fc9db42b7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,8 +1,10 @@ { "words": [ + "adduser", "alekitto", "appuser", "Arvid", + "autoclean", "AUTOINCREMENT", "automock", "Avicora", @@ -12,6 +14,7 @@ "bencoded", "beps", "binascii", + "binstall", "Bitflu", "bools", "bufs", @@ -26,12 +29,17 @@ "codegen", "completei", "connectionless", + "Containerfile", + "curr", + "Cyberneering", "datetime", + "distroless", "dockerhub", "downloadedi", "dtolnay", "filesd", "Freebox", + "gecos", "Grcov", "hasher", "hexlify", @@ -45,11 +53,14 @@ "infoschema", "Intermodal", "intervali", + "keyout", "lcov", "leecher", "leechers", "libsqlite", "libtorrent", + "libz", + "LOGNAME", "Lphant", "metainfo", "middlewares", @@ -58,16 +69,21 @@ "myacicontext", "Naim", "nanos", + "newkey", "nextest", "nocapture", + "nologin", + "nonroot", "Norberg", "numwant", "oneshot", "ostr", "Pando", "proot", + "proto", "Quickstart", "Rasterbar", + "realpath", "reannounce", "repr", "reqwest", @@ -80,8 +96,10 @@ "rustfmt", "Rustls", "Seedable", + "serde", "Shareaza", "sharktorrent", + "SHLVL", "socketaddr", "sqllite", "subsec", @@ -89,6 +107,7 @@ "Swiftbit", "taiki", "thiserror", + "tlsv", "Torrentstorm", "torrust", "torrustracker", @@ -100,6 +119,7 @@ "uroot", "Vagaa", "Vuze", + "Werror", "whitespaces", "XBTT", "Xeon", @@ -107,5 +127,10 @@ "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", "yyyyyyyyyyyyyyyyyyyyd" + ], + "enableFiletypes": [ + "dockerfile", + "shellscript", + "toml" ] -} +} \ No newline at end of file diff --git a/compose.yaml b/compose.yaml index 49f3055a8..02f95bccc 100644 --- a/compose.yaml +++ b/compose.yaml @@ -1,12 +1,11 @@ name: torrust services: - tracker: - build: - context: . - target: development - user: ${TORRUST_TRACKER_USER_UID:-1000}:${TORRUST_TRACKER_USER_UID:-1000} + image: torrust-tracker:release tty: true + environment: + - TORRUST_TRACKER_DATABASE=${TORRUST_TRACKER_DATABASE:-mysql} + - TORRUST_TRACKER_API_ADMIN_TOKEN=${TORRUST_TRACKER_API_ADMIN_TOKEN:-MyAccessToken} networks: - server_side ports: @@ -14,19 +13,24 @@ services: - 7070:7070 - 1212:1212 volumes: - - ./:/app - - ~/.cargo:/home/appuser/.cargo + - ./storage/tracker/lib:/var/lib/torrust/tracker:Z + - ./storage/tracker/log:/var/log/torrust/tracker:Z + - ./storage/tracker/etc:/etc/torrust/tracker:Z depends_on: - mysql mysql: image: mysql:8.0 - command: '--default-authentication-plugin=mysql_native_password' + command: "--default-authentication-plugin=mysql_native_password" healthcheck: - test: ['CMD-SHELL', 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent'] + test: + [ + "CMD-SHELL", + 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent', + ] interval: 3s retries: 5 - start_period: 30s + start_period: 30s environment: - MYSQL_ROOT_HOST=% - MYSQL_ROOT_PASSWORD=root_secret_password @@ -44,4 +48,4 @@ networks: server_side: {} volumes: - mysql_data: {} \ No newline at end of file + mysql_data: {} diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs index d2aa180f8..d18dffda0 100644 --- a/contrib/bencode/src/reference/decode.rs +++ b/contrib/bencode/src/reference/decode.rs @@ -1,3 +1,5 @@ +#![allow(clippy::should_panic_without_expect)] + use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::str::{self}; diff --git a/contrib/dev-tools/containers/docker-build.sh b/contrib/dev-tools/containers/docker-build.sh new file mode 100755 index 000000000..39143910f --- /dev/null +++ b/contrib/dev-tools/containers/docker-build.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +echo "Building docker image ..." + +docker build --target release --tag torrust-tracker:release --file Containerfile . diff --git a/contrib/dev-tools/containers/docker-install.sh b/contrib/dev-tools/containers/docker-install.sh new file mode 100755 index 000000000..6034e8233 --- /dev/null +++ b/contrib/dev-tools/containers/docker-install.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./contrib/dev-tools/containers/docker-build.sh diff --git a/contrib/dev-tools/containers/docker-run-local.sh b/contrib/dev-tools/containers/docker-run-local.sh new file mode 100755 index 000000000..05e23f4a0 --- /dev/null +++ b/contrib/dev-tools/containers/docker-run-local.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +docker run -it \ + --env USER_ID"$(id -u)" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume ./storage/tracker/lib:/var/lib/torrust/tracker:rw \ + --volume ./storage/tracker/log:/var/log/torrust/tracker:rw \ + --volume ./storage/tracker/etc:/etc/torrust/tracker:rw \ + torrust-tracker:release diff --git a/contrib/dev-tools/containers/docker-run-public.sh b/contrib/dev-tools/containers/docker-run-public.sh new file mode 100755 index 000000000..73bcf600a --- /dev/null +++ b/contrib/dev-tools/containers/docker-run-public.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +docker run -it \ + --env USER_ID"$(id -u)" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume ./storage/tracker/lib:/var/lib/torrust/tracker:rw \ + --volume ./storage/tracker/log:/var/log/torrust/tracker:rw \ + --volume ./storage/tracker/etc:/etc/torrust/tracker:rw \ + torrust/tracker:latest diff --git a/contrib/dev-tools/init/install-local.sh b/contrib/dev-tools/init/install-local.sh new file mode 100755 index 000000000..f9806a0b8 --- /dev/null +++ b/contrib/dev-tools/init/install-local.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# This script is only intended to be used for local development or testing environments. + +# Generate storage directory if it does not exist +mkdir -p ./storage/tracker/lib/database + +# Generate the sqlite database if it does not exist +if ! [ -f "./storage/tracker/lib/database/sqlite3.db" ]; then + # todo: it should get the path from tracker.toml and only do it when we use sqlite + sqlite3 ./storage/tracker/lib/database/sqlite3.db "VACUUM;" +fi diff --git a/contrib/dev-tools/su-exec/LICENSE b/contrib/dev-tools/su-exec/LICENSE new file mode 100644 index 000000000..f623b904e --- /dev/null +++ b/contrib/dev-tools/su-exec/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 ncopa + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/contrib/dev-tools/su-exec/Makefile b/contrib/dev-tools/su-exec/Makefile new file mode 100644 index 000000000..bda768957 --- /dev/null +++ b/contrib/dev-tools/su-exec/Makefile @@ -0,0 +1,17 @@ + +CFLAGS ?= -Wall -Werror -g +LDFLAGS ?= + +PROG := su-exec +SRCS := $(PROG).c + +all: $(PROG) + +$(PROG): $(SRCS) + $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) + +$(PROG)-static: $(SRCS) + $(CC) $(CFLAGS) -o $@ $^ -static $(LDFLAGS) + +clean: + rm -f $(PROG) $(PROG)-static diff --git a/contrib/dev-tools/su-exec/README.md b/contrib/dev-tools/su-exec/README.md new file mode 100644 index 000000000..2b0517377 --- /dev/null +++ b/contrib/dev-tools/su-exec/README.md @@ -0,0 +1,46 @@ +# su-exec +switch user and group id, setgroups and exec + +## Purpose + +This is a simple tool that will simply execute a program with different +privileges. The program will be executed directly and not run as a child, +like su and sudo does, which avoids TTY and signal issues (see below). + +Notice that su-exec depends on being run by the root user, non-root +users do not have permission to change uid/gid. + +## Usage + +```shell +su-exec user-spec command [ arguments... ] +``` + +`user-spec` is either a user name (e.g. `nobody`) or user name and group +name separated with colon (e.g. `nobody:ftp`). Numeric uid/gid values +can be used instead of names. Example: + +```shell +$ su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf +``` + +## TTY & parent/child handling + +Notice how `su` will make `ps` be a child of a shell while `su-exec` +just executes `ps` directly. + +```shell +$ docker run -it --rm alpine:edge su postgres -c 'ps aux' +PID USER TIME COMMAND + 1 postgres 0:00 ash -c ps aux + 12 postgres 0:00 ps aux +$ docker run -it --rm -v $PWD/su-exec:/sbin/su-exec:ro alpine:edge su-exec postgres ps aux +PID USER TIME COMMAND + 1 postgres 0:00 ps aux +``` + +## Why reinvent gosu? + +This does more or less exactly the same thing as [gosu](https://github.com/tianon/gosu) +but it is only 10kb instead of 1.8MB. + diff --git a/contrib/dev-tools/su-exec/su-exec.c b/contrib/dev-tools/su-exec/su-exec.c new file mode 100644 index 000000000..499071c6e --- /dev/null +++ b/contrib/dev-tools/su-exec/su-exec.c @@ -0,0 +1,109 @@ +/* set user and group id and exec */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static char *argv0; + +static void usage(int exitcode) +{ + printf("Usage: %s user-spec command [args]\n", argv0); + exit(exitcode); +} + +int main(int argc, char *argv[]) +{ + char *user, *group, **cmdargv; + char *end; + + uid_t uid = getuid(); + gid_t gid = getgid(); + + argv0 = argv[0]; + if (argc < 3) + usage(0); + + user = argv[1]; + group = strchr(user, ':'); + if (group) + *group++ = '\0'; + + cmdargv = &argv[2]; + + struct passwd *pw = NULL; + if (user[0] != '\0') { + uid_t nuid = strtol(user, &end, 10); + if (*end == '\0') + uid = nuid; + else { + pw = getpwnam(user); + if (pw == NULL) + err(1, "getpwnam(%s)", user); + } + } + if (pw == NULL) { + pw = getpwuid(uid); + } + if (pw != NULL) { + uid = pw->pw_uid; + gid = pw->pw_gid; + } + + setenv("HOME", pw != NULL ? pw->pw_dir : "/", 1); + + if (group && group[0] != '\0') { + /* group was specified, ignore grouplist for setgroups later */ + pw = NULL; + + gid_t ngid = strtol(group, &end, 10); + if (*end == '\0') + gid = ngid; + else { + struct group *gr = getgrnam(group); + if (gr == NULL) + err(1, "getgrnam(%s)", group); + gid = gr->gr_gid; + } + } + + if (pw == NULL) { + if (setgroups(1, &gid) < 0) + err(1, "setgroups(%i)", gid); + } else { + int ngroups = 0; + gid_t *glist = NULL; + + while (1) { + int r = getgrouplist(pw->pw_name, gid, glist, &ngroups); + + if (r >= 0) { + if (setgroups(ngroups, glist) < 0) + err(1, "setgroups"); + break; + } + + glist = realloc(glist, ngroups * sizeof(gid_t)); + if (glist == NULL) + err(1, "malloc"); + } + } + + if (setgid(gid) < 0) + err(1, "setgid(%i)", gid); + + if (setuid(uid) < 0) + err(1, "setuid(%i)", uid); + + execvp(cmdargv[0], cmdargv); + err(1, "%s", cmdargv[0]); + + return 1; +} diff --git a/docker/README.md b/docker/README.md deleted file mode 100644 index 207dadbbc..000000000 --- a/docker/README.md +++ /dev/null @@ -1,289 +0,0 @@ -# Docker - -## Requirements - -- Docker version 20.10.21 -- You need to create the `storage` directory with this structure and files: - -```s -$ tree storage/ -storage/ -├── database -│   └── data.db -└── ssl_certificates - ├── localhost.crt - └── localhost.key -``` - -> NOTE: you only need the `ssl_certificates` directory and certificates in case you have enabled SSL for the one HTTP tracker or the API. - -## Demo environment - -You can run a single command to setup the tracker with the default -configuration and run it using the pre-built public docker image: - -```s -curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/torrust/torrust-tracker/v3.0.0-alpha.3/bin/install-demo.sh | bash -export TORRUST_TRACKER_USER_UID=1000 \ - && docker run -it \ - --user="$TORRUST_TRACKER_USER_UID" \ - --publish 6969:6969/udp \ - --publish 7070:7070/tcp \ - --publish 1212:1212/tcp \ - --volume "$(pwd)/storage":"/app/storage" \ - --volume "$(pwd)/config.toml":"/app/config.toml":ro \ - torrust/tracker:3.0.0-alpha.3 -``` - -This is intended to be used to run a quick demo of the application. - -## Dev environment - -When using docker you have to bind the exposed ports to the wildcard address `0.0.0.0`, so you can access the application from the host machine. - -The default API configuration uses `127.0.0.1`, so you have to change it to: - -```toml -[http_api] -bind_address = "0.0.0.0:1212" -``` - -Otherwise, the API will be only accessible from inside the container. - -### With docker - -Build and run locally. You can build the docker image locally: - -```s -docker context use default -export TORRUST_TRACKER_USER_UID=1000 -./docker/bin/build.sh $TORRUST_TRACKER_USER_UID -./bin/install.sh -./docker/bin/run-local-image.sh $TORRUST_TRACKER_USER_UID -``` - -Or you can run locally using the pre-built docker image: - -```s -docker context use default -export TORRUST_TRACKER_USER_UID=1000 -./bin/install.sh -./docker/bin/run-public-image.sh $TORRUST_TRACKER_USER_UID -``` - -In both cases, you will need to: - -- Create the SQLite DB (`data.db`) if you are going to use SQLite. -- Create the configuration file (`config.toml`) before running the tracker. -- Replace the user UID (`1000`) with yours. - -> NOTICE: that the `./bin/install.sh` can setup the application for you. But it -uses a predefined configuration. - -Remember to switch to your default docker context `docker context use default` -and to change the API default configuration in `config.toml` to make it -available from the host machine: - -```toml -[http_api] -bind_address = "0.0.0.0:1212" -``` - -### With docker-compose - -The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you have to change your `config.toml` configuration: - -```toml -db_driver = "MySQL" -db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" -``` - -If you want to inject an environment variable into docker-compose you can use the file `.env`. There is a template `.env.local`. - -Build and run it locally: - -```s -docker compose up --build -``` - -After running the "up" command you will have two running containers: - -```s -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 -34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 -``` - -And you should be able to use the application, for example making a request to the API: - - - -You can stop the containers with: - -```s -docker compose down -``` - -Additionally, you can delete all resources (containers, volumes, networks) with: - -```s -docker compose down -v -``` - -### Access Mysql with docker - -These are some useful commands for MySQL. - -Open a shell in the MySQL container using docker or docker-compose. - -```s -docker exec -it torrust-mysql-1 /bin/bash -docker compose exec mysql /bin/bash -``` - -Connect to MySQL from inside the MySQL container or from the host: - -```s -mysql -h127.0.0.1 -uroot -proot_secret_password -``` - -The when MySQL container is started the first time, it creates the database, user, and permissions needed. -If you see the error "Host is not allowed to connect to this MySQL server" you can check that users have the right permissions in the database. Make sure the user `root` and `db_user` can connect from any host (`%`). - -```s -mysql> SELECT host, user FROM mysql.user; -+-----------+------------------+ -| host | user | -+-----------+------------------+ -| % | db_user | -| % | root | -| localhost | mysql.infoschema | -| localhost | mysql.session | -| localhost | mysql.sys | -| localhost | root | -+-----------+------------------+ -6 rows in set (0.00 sec) -``` - -If the database, user or permissions are not created the reason could be the MySQL container volume can be corrupted. Delete it and start again the containers. - -### SSL Certificates - -You can use a certificate for localhost. You can create your [localhost certificate](https://letsencrypt.org/docs/certificates-for-localhost/#making-and-trusting-your-own-certificates) and use it in the `storage` folder and the configuration file (`config.toml`). For example: - -The storage folder must contain your certificates: - -```s -$ tree storage/ -storage/ -├── database -│   └── data.db -└── ssl_certificates - ├── localhost.crt - └── localhost.key -``` - -You have not enabled it in your `config.toml` file: - -```toml -... -[[http_trackers]] -enabled = true -bind_address = "0.0.0.0:7070" -ssl_enabled = true -ssl_cert_path = "./storage/ssl_certificates/localhost.crt" -ssl_key_path = "./storage/ssl_certificates/localhost.key" - -[http_api] -enabled = true -bind_address = "0.0.0.0:1212" -ssl_enabled = true -ssl_cert_path = "./storage/ssl_certificates/localhost.crt" -ssl_key_path = "./storage/ssl_certificates/localhost.key" -... -``` - -> NOTE: you can enable it independently for each HTTP tracker or the API. - -If you enable the SSL certificate for the API, for example, you can load the API with this URL: - - - -## Prod environment - -In this section, you will learn how to deploy the tracker to a single docker container in Azure Container Instances. - -> NOTE: Azure Container Instances is a solution when you want to run an isolated container. If you need full container orchestration, including service discovery across multiple containers, automatic scaling, and coordinated application upgrades, we recommend [Kubernetes](https://kubernetes.io/). - -Deploy to Azure Container Instance following [docker documentation](https://docs.docker.com/cloud/aci-integration/). - -You have to create the ACI context and the storage: - -```s -docker context create aci myacicontext -docker context use myacicontext -docker volume create test-volume --storage-account torrustracker -``` - -You need to create all the files needed by the application in the storage dir `storage/database`. - -And finally, you can run the container: - -```s -docker run \ - --publish 6969:6969/udp \ - --publish 7070:7070/tcp \ - --publish 1212:1212/tcp \ - --volume torrustracker/test-volume:/app/storage \ - registry.hub.docker.com/torrust/tracker:latest -``` - -Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. - -```s -docker run \ - --detach - --publish 6969:6969/udp \ - --publish 7070:7070/tcp \ - --publish 1212:1212/tcp \latest - --volume torrustracker/test-volume:/app/storage \ - registry.hub.docker.com/torrust/tracker:latest -``` - -You should see something like this: - -```s -[+] Running 2/2 - ⠿ Group intelligent-hawking Created 5.0s - ⠿ intelligent-hawking Created 41.7s -2022-12-08T18:39:19.697869300+00:00 [torrust_tracker::logging][INFO] logging initialized. -2022-12-08T18:39:19.712651100+00:00 [torrust_tracker::jobs::udp_tracker][INFO] Starting UDP server on: 0.0.0.0:6969 -2022-12-08T18:39:19.712792700+00:00 [torrust_tracker::jobs::tracker_api][INFO] Starting Torrust API server on: 0.0.0.0:1212 -2022-12-08T18:39:19.725124+00:00 [torrust_tracker::jobs::tracker_api][INFO] Torrust API server started -``` - -You can see the container with: - -```s -$ docker ps -CONTAINER ID IMAGE COMMAND STATUS PORTS -intelligent-hawking registry.hub.docker.com/torrust/tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp -``` - -After a while, you can use the tracker API `http://4.236.213.57:1212/api/v1/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. - -> NOTES: -> -> - [There is no support for mounting a single file](https://docs.docker.com/cloud/aci-container-features/#persistent-volumes), or mounting a subfolder from an `Azure File Share`. -> - [ACI does not allow port mapping](https://docs.docker.com/cloud/aci-integration/#exposing-ports). -> - [Azure file share volume mount requires the Linux container run as root](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations). -> - It can take some minutes until the public IP for the ACI container is available. -> - You can use the Azure web UI to download files from the storage. For example, the SQLite database. -> - [It seems you can only expose web interfaces on port 80 on Azure Container Instances](https://stackoverflow.com/a/56768087/3012842). Not official documentation! - -## Links - -- [Deploying Docker containers on Azure](https://docs.docker.com/cloud/aci-integration/). -- [Docker run options for ACI containers](https://docs.docker.com/cloud/aci-container-features/). -- [Quickstart: Deploy a container instance in Azure using the Docker CLI](https://learn.microsoft.com/en-us/azure/container-instances/quickstart-docker-cli). diff --git a/docker/bin/build.sh b/docker/bin/build.sh deleted file mode 100755 index d77d1ad34..000000000 --- a/docker/bin/build.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} -TORRUST_TRACKER_RUN_AS_USER=${TORRUST_TRACKER_RUN_AS_USER:-appuser} - -echo "Building docker image ..." -echo "TORRUST_TRACKER_USER_UID: $TORRUST_TRACKER_USER_UID" -echo "TORRUST_TRACKER_RUN_AS_USER: $TORRUST_TRACKER_RUN_AS_USER" - -docker build \ - --build-arg UID="$TORRUST_TRACKER_USER_UID" \ - --build-arg RUN_AS_USER="$TORRUST_TRACKER_RUN_AS_USER" \ - -t torrust-tracker . diff --git a/docker/bin/install.sh b/docker/bin/install.sh deleted file mode 100755 index a58969378..000000000 --- a/docker/bin/install.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -./docker/bin/build.sh -./bin/install.sh diff --git a/docker/bin/run-local-image.sh b/docker/bin/run-local-image.sh deleted file mode 100755 index 86465baeb..000000000 --- a/docker/bin/run-local-image.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} -TORRUST_TRACKER_CONFIG=$(cat config.toml) - -docker run -it \ - --user="$TORRUST_TRACKER_USER_UID" \ - --publish 6969:6969/udp \ - --publish 7070:7070/tcp \ - --publish 1212:1212/tcp \ - --env TORRUST_TRACKER_CONFIG="$TORRUST_TRACKER_CONFIG" \ - --volume "$(pwd)/storage":"/app/storage" \ - torrust-tracker diff --git a/docker/bin/run-public-image.sh b/docker/bin/run-public-image.sh deleted file mode 100755 index 50407f91b..000000000 --- a/docker/bin/run-public-image.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} -TORRUST_TRACKER_CONFIG=$(cat config.toml) - -docker run -it \ - --user="$TORRUST_TRACKER_USER_UID" \ - --publish 6969:6969/udp \ - --publish 7070:7070/tcp \ - --publish 1212:1212/tcp \ - --env TORRUST_TRACKER_CONFIG="$TORRUST_TRACKER_CONFIG" \ - --volume "$(pwd)/storage":"/app/storage" \ - torrust/tracker \ No newline at end of file diff --git a/docs/containers.md b/docs/containers.md new file mode 100644 index 000000000..b9aa05a7a --- /dev/null +++ b/docs/containers.md @@ -0,0 +1,428 @@ +# Containers (Docker or Podman) + +## Demo environment +It is simple to setup the tracker with the default +configuration and run it using the pre-built public docker image: + + +With Docker: + +```sh +docker run -it torrust/tracker:latest +``` + +or with Podman: + +```sh +podman run -it torrust/tracker:latest +``` + + +## Requirements +- Tested with recent versions of Docker or Podman. + +## Volumes +The [Containerfile](../Containerfile) (i.e. the Dockerfile) Defines Three Volumes: + +```Dockerfile +VOLUME ["/var/lib/torrust/tracker","/var/log/torrust/tracker","/etc/torrust/tracker"] +``` + +When instancing the container image with the `docker run` or `podman run` command, we map these volumes to the local storage: + +```s +./storage/tracker/lib -> /var/lib/torrust/tracker +./storage/tracker/log -> /var/log/torrust/tracker +./storage/tracker/etc -> /etc/torrust/tracker +``` + +> NOTE: You can adjust this mapping for your preference, however this mapping is the default in our guides and scripts. + +### Pre-Create Host-Mapped Folders: +Please run this command where you wish to run the container: + +```sh +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ +``` + +### Matching Ownership ID's of Host Storage and Container Volumes +It is important that the `torrust` user has the same uid `$(id -u)` as the host mapped folders. In our [entry script](../share/container/entry_script_sh), installed to `/usr/local/bin/entry.sh` inside the container, switches to the `torrust` user created based upon the `USER_UID` environmental variable. + +When running the container, you may use the `--env USER_ID="$(id -u)"` argument that gets the current user-id and passes to the container. + +### Mapped Tree Structure +Using the standard mapping defined above produces this following mapped tree: + +```s +storage/tracker/ +├── lib +│ ├── database +│ │   └── sqlite3.db => /var/lib/torrust/tracker/database/sqlite3.db [auto populated] +│ └── tls +│ ├── localhost.crt => /var/lib/torrust/tracker/tls/localhost.crt [user supplied] +│ └── localhost.key => /var/lib/torrust/tracker/tls/localhost.key [user supplied] +├── log => /var/log/torrust/tracker (future use) +└── etc + └── tracker.toml => /etc/torrust/tracker/tracker.toml [auto populated] +``` + +> NOTE: you only need the `tls` directory and certificates in case you have enabled SSL. + +## Building the Container + +### Clone and Change into Repository + +```sh +# Inside your dev folder +git clone https://github.com/torrust/torrust-tracker.git; cd torrust-tracker +``` + +### (Docker) Setup Context +Before starting, if you are using docker, it is helpful to reset the context to the default: + +```sh +docker context use default +``` + +### (Docker) Build + +```sh +# Release Mode +docker build --target release --tag torrust-tracker:release --file Containerfile . + +# Debug Mode +docker build --target debug --tag torrust-tracker:debug --file Containerfile . +``` + +### (Podman) Build + +```sh +# Release Mode +podman build --target release --tag torrust-tracker:release --file Containerfile . + +# Debug Mode +podman build --target debug --tag torrust-tracker:debug --file Containerfile . +``` + +## Running the Container + +### Basic Run +No arguments are needed for simply checking the container image works: + +#### (Docker) Run Basic + +```sh +# Release Mode +docker run -it torrust-tracker:release + +# Debug Mode +docker run -it torrust-tracker:debug +``` +#### (Podman) Run Basic + +```sh +# Release Mode +podman run -it torrust-tracker:release + +# Debug Mode +podman run -it torrust-tracker:debug +``` + +### Arguments +The arguments need to be placed before the image tag. i.e. + +`run [arguments] torrust-tracker:release` + +#### Environmental Variables: +Environmental variables are loaded through the `--env`, in the format `--env VAR="value"`. + +The following environmental variables can be set: + +- `TORRUST_TRACKER_PATH_CONFIG` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). +- `TORRUST_TRACKER_API_ADMIN_TOKEN` - Override of the admin token. If set, this value overrides any value set in the config. +- `TORRUST_TRACKER_DATABASE` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_TRACKER_CONFIG` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG=$(cat tracker-tracker.toml)`). +- `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). +- `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). +- `HTTP_PORT` - The port for the HTTP tracker. This should match the port used in the configuration, (default `7070`). +- `API_PORT` - The port for the tracker API. This should match the port used in the configuration, (default `1212`). + + +### Sockets +Socket ports used internally within the container can be mapped to with the `--publish` argument. + +The format is: `--publish [optional_host_ip]:[host_port]:[container_port]/[optional_protocol]`, for example: `--publish 127.0.0.1:8080:80/tcp`. + +The default ports can be mapped with the following: + +```s +--publish 0.0.0.0:7070:7070/tcp \ +--publish 0.0.0.0:6969:6969/udp \ +--publish 0.0.0.0:1212:1212/tcp \ +``` + +> NOTE: Inside the container it is necessary to expose a socket with the wildcard address `0.0.0.0` so that it may be accessible from the host. Verify that the configuration that the sockets are wildcard. + +### Volumes +By default the container will use install volumes for `/var/lib/torrust/tracker`, `/var/log/torrust/tracker`, and `/etc/torrust/tracker`, however for better administration it good to make these volumes host-mapped. + +The argument to host-map volumes is `--volume`, with the format: `--volume=[host-src:]container-dest[:]`. + +The default mapping can be supplied with the following arguments: + +```s +--volume ./storage/tracker/lib:/var/lib/torrust/tracker:Z \ +--volume ./storage/tracker/log:/var/log/torrust/tracker:Z \ +--volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ +``` + + +Please not the `:Z` at the end of the podman `--volume` mapping arguments, this is to give read-write permission on SELinux enabled systemd, if this doesn't work on your system, you can use `:rw` instead. + +## Complete Example: + +### With Docker + +```sh +## Setup Docker Default Context +docker context use default + +## Build Container Image +docker build --target release --tag torrust-tracker:release --file Containerfile . + +## Setup Mapped Volumes +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +## Run Torrust Tracker Container Image +docker run -it \ + --env TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + --env USER_ID"$(id -u)" \ + --publish 0.0.0.0:7070:7070/tcp \ + --publish 0.0.0.0:6969:6969/udp \ + --publish 0.0.0.0:1212:1212/tcp \ + --volume ./storage/tracker/lib:/var/lib/torrust/tracker:Z \ + --volume ./storage/tracker/log:/var/log/torrust/tracker:Z \ + --volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ + torrust-tracker:release +``` + +### With Podman + +```sh +## Build Container Image +podman build --target release --tag torrust-tracker:release --file Containerfile . + +## Setup Mapped Volumes +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +## Run Torrust Tracker Container Image +podman run -it \ + --env TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + --env USER_ID"$(id -u)" \ + --publish 0.0.0.0:7070:7070/tcp \ + --publish 0.0.0.0:6969:6969/udp \ + --publish 0.0.0.0:1212:1212/tcp \ + --volume ./storage/tracker/lib:/var/lib/torrust/tracker:Z \ + --volume ./storage/tracker/log:/var/log/torrust/tracker:Z \ + --volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ + torrust-tracker:release +``` + +## Docker Compose + +The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you should verify the `/etc/torrust/tracker/tracker.toml` (i.e `./storage/tracker/etc/tracker.toml`) configuration: + +```toml +db_driver = "MySQL" +db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +``` + +### Build and Run: + +```sh +docker build --target release --tag torrust-tracker:release --file Containerfile . + +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +USER_ID=$(id -u) \ + TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + docker compose up --build +``` + +After running the `compose up` command you will have two running containers: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 +34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 +``` + +And you should be able to use the application, for example making a request to the API: + + + +You can stop the containers with: + +```s +docker compose down +``` + +Additionally, you can delete all resources (containers, volumes, networks) with: + +```s +docker compose down -v +``` + +### Access Mysql with docker + +These are some useful commands for MySQL. + +Open a shell in the MySQL container using docker or docker-compose. + +```s +docker exec -it torrust-mysql-1 /bin/bash +docker compose exec mysql /bin/bash +``` + +Connect to MySQL from inside the MySQL container or from the host: + +```s +mysql -h127.0.0.1 -uroot -proot_secret_password +``` + +The when MySQL container is started the first time, it creates the database, user, and permissions needed. +If you see the error "Host is not allowed to connect to this MySQL server" you can check that users have the right permissions in the database. Make sure the user `root` and `db_user` can connect from any host (`%`). + +```s +mysql> SELECT host, user FROM mysql.user; ++-----------+------------------+ +| host | user | ++-----------+------------------+ +| % | db_user | +| % | root | +| localhost | mysql.infoschema | +| localhost | mysql.session | +| localhost | mysql.sys | +| localhost | root | ++-----------+------------------+ +6 rows in set (0.00 sec) +``` + +If the database, user or permissions are not created the reason could be the MySQL container volume can be corrupted. Delete it and start again the containers. + +### SSL Certificates + +You can use a certificate for localhost. You can create your [localhost certificate](https://letsencrypt.org/docs/certificates-for-localhost/#making-and-trusting-your-own-certificates) and use it in the `storage` folder and the configuration file (`tracker.toml`). For example: + +The storage folder must contain your certificates: + +```s +storage/tracker/lib/tls + ├── localhost.crt + └── localhost.key +``` + +You have not enabled it in your `tracker.toml` file: + +```toml + +[[http_trackers]] +# ... +ssl_enabled = true +# ... + +[http_api] +# ... +ssl_enabled = true +# ... + +``` + +> NOTE: you can enable it independently for each HTTP tracker or the API. + +If you enable the SSL certificate for the API, for example, you can load the API with this URL: + + + +## Prod environment + +In this section, you will learn how to deploy the tracker to a single docker container in Azure Container Instances. + +> NOTE: Azure Container Instances is a solution when you want to run an isolated container. If you need full container orchestration, including service discovery across multiple containers, automatic scaling, and coordinated application upgrades, we recommend [Kubernetes](https://kubernetes.io/). + +Deploy to Azure Container Instance following [docker documentation](https://docs.docker.com/cloud/aci-integration/). + +You have to create the ACI context and the storage: + +```s +docker context create aci myacicontext +docker context use myacicontext +docker volume create test-volume --storage-account torrustracker +``` + +You need to create all the files needed by the application in the storage dir `storage/lib/database`. + +And finally, you can run the container: + +```s +docker run \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume torrustracker/lib:/var/lib/torrust/tracker:rw \ + --volume torrustracker/log:/var/log/torrust/tracker:rw \ + --volume torrustracker/etc:/etc/torrust/tracker:rw \ + registry.hub.docker.com/torrust/tracker:latest +``` + +Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. + +```s +docker run \ + --detach + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \latest + --volume torrustracker/lib:/var/lib/torrust/tracker:rw \ + --volume torrustracker/log:/var/log/torrust/tracker:rw \ + --volume torrustracker/etc:/etc/torrust/tracker:rw \ + registry.hub.docker.com/torrust/tracker:latest +``` + +You should see something like this: + +```s +[+] Running 2/2 + ⠿ Group intelligent-hawking Created 5.0s + ⠿ intelligent-hawking Created 41.7s +2022-12-08T18:39:19.697869300+00:00 [torrust_tracker::logging][INFO] logging initialized. +2022-12-08T18:39:19.712651100+00:00 [torrust_tracker::jobs::udp_tracker][INFO] Starting UDP server on: 0.0.0.0:6969 +2022-12-08T18:39:19.712792700+00:00 [torrust_tracker::jobs::tracker_api][INFO] Starting Torrust API server on: 0.0.0.0:1212 +2022-12-08T18:39:19.725124+00:00 [torrust_tracker::jobs::tracker_api][INFO] Torrust API server started +``` + +You can see the container with: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND STATUS PORTS +intelligent-hawking registry.hub.docker.com/torrust/tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +``` + +After a while, you can use the tracker API `http://4.236.213.57:1212/api/v1/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. + +> NOTES: +> +> - [There is no support for mounting a single file](https://docs.docker.com/cloud/aci-container-features/#persistent-volumes), or mounting a subfolder from an `Azure File Share`. +> - [ACI does not allow port mapping](https://docs.docker.com/cloud/aci-integration/#exposing-ports). +> - [Azure file share volume mount requires the Linux container run as root](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations). +> - It can take some minutes until the public IP for the ACI container is available. +> - You can use the Azure web UI to download files from the storage. For example, the SQLite database. +> - [It seems you can only expose web interfaces on port 80 on Azure Container Instances](https://stackoverflow.com/a/56768087/3012842). Not official documentation! + +## Links + +- [Deploying Docker containers on Azure](https://docs.docker.com/cloud/aci-integration/). +- [Docker run options for ACI containers](https://docs.docker.com/cloud/aci-container-features/). +- [Quickstart: Deploy a container instance in Azure using the Docker CLI](https://learn.microsoft.com/en-us/azure/container-instances/quickstart-docker-cli). diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 9b91534e9..e3ca1d932 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -14,8 +14,8 @@ config = "0.13" toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" -torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "../primitives" } -torrust-tracker-located-error = { version = "3.0.0-alpha.3", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.4", path = "../located-error" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 6de0e3ed7..059316a26 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -4,7 +4,7 @@ //! Torrust Tracker, which is a `BitTorrent` tracker server. //! //! The configuration is loaded from a [TOML](https://toml.io/en/) file -//! `config.toml` in the project root folder or from an environment variable +//! `tracker.toml` in the project root folder or from an environment variable //! with the same content as the file. //! //! When you run the tracker without a configuration file, a new one will be @@ -67,7 +67,7 @@ //! storage/ //! ├── database //! │ └── data.db -//! └── ssl_certificates +//! └── tls //! ├── localhost.crt //! └── localhost.key //! ``` @@ -176,14 +176,14 @@ //! [[http_trackers]] //! enabled = true //! ... -//! ssl_cert_path = "./storage/ssl_certificates/localhost.crt" -//! ssl_key_path = "./storage/ssl_certificates/localhost.key" +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! //! [http_api] //! enabled = true //! ... -//! ssl_cert_path = "./storage/ssl_certificates/localhost.crt" -//! ssl_key_path = "./storage/ssl_certificates/localhost.key" +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! ``` //! //! ## Default configuration @@ -194,7 +194,7 @@ //! log_level = "info" //! mode = "public" //! db_driver = "Sqlite3" -//! db_path = "./storage/database/data.db" +//! db_path = "./storage/tracker/lib/database/sqlite3.db" //! announce_interval = 120 //! min_announce_interval = 120 //! max_peer_timeout = 900 @@ -239,6 +239,67 @@ use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +/// Information required for loading config +#[derive(Debug, Default, Clone)] +pub struct Info { + tracker_toml: String, + api_admin_token: Option, +} + +impl Info { + /// Build Configuration Info + /// + /// # Examples + /// + /// ``` + /// use torrust_tracker_configuration::Info; + /// + /// let result = Info::new(env_var_config, env_var_path_config, default_path_config, env_var_api_admin_token); + /// assert_eq!(result, ); + /// ``` + /// + /// # Errors + /// + /// Will return `Err` if unable to obtain a configuration. + /// + #[allow(clippy::needless_pass_by_value)] + pub fn new( + env_var_config: String, + env_var_path_config: String, + default_path_config: String, + env_var_api_admin_token: String, + ) -> Result { + let tracker_toml = if let Ok(tracker_toml) = env::var(&env_var_config) { + println!("Loading configuration from env var {env_var_config} ..."); + + tracker_toml + } else { + let config_path = if let Ok(config_path) = env::var(env_var_path_config) { + println!("Loading configuration file: `{config_path}` ..."); + + config_path + } else { + println!("Loading default configuration file: `{default_path_config}` ..."); + + default_path_config + }; + + fs::read_to_string(config_path) + .map_err(|e| Error::UnableToLoadFromConfigFile { + source: (Arc::new(e) as Arc).into(), + })? + .parse() + .map_err(|_e: std::convert::Infallible| Error::Infallible)? + }; + let api_admin_token = env::var(env_var_api_admin_token).ok(); + + Ok(Self { + tracker_toml, + api_admin_token, + }) + } +} + /// Configuration for each UDP tracker. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct UdpTracker { @@ -298,6 +359,12 @@ pub struct HttpApi { pub access_tokens: HashMap, } +impl HttpApi { + fn override_admin_token(&mut self, api_admin_token: &str) { + self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); + } +} + impl HttpApi { /// Checks if the given token is one of the token in the configuration. #[must_use] @@ -323,7 +390,7 @@ pub struct Configuration { pub db_driver: DatabaseDriver, /// Database connection string. The format depends on the database driver. /// For `Sqlite3`, the format is `path/to/database.db`, for example: - /// `./storage/database/data.db`. + /// `./storage/tracker/lib/database/sqlite3.db`. /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for /// example: `root:password@localhost:3306/torrust`. pub db_path: String, @@ -411,9 +478,17 @@ pub enum Error { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + #[error("Unable to load from Config File: {source}")] + UnableToLoadFromConfigFile { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + /// Unable to load the configuration from the configuration file. #[error("Failed processing the configuration: {source}")] ConfigError { source: LocatedError<'static, ConfigError> }, + + #[error("The error for errors that can never happen.")] + Infallible, } impl From for Error { @@ -431,7 +506,7 @@ impl Default for Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::Public, db_driver: DatabaseDriver::Sqlite3, - db_path: String::from("./storage/database/data.db"), + db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), announce_interval: 120, min_announce_interval: 120, max_peer_timeout: 900, @@ -471,6 +546,10 @@ impl Default for Configuration { } impl Configuration { + fn override_api_admin_token(&mut self, api_admin_token: &str) { + self.http_api.override_admin_token(api_admin_token); + } + /// Returns the tracker public IP address id defined in the configuration, /// and `None` otherwise. #[must_use] @@ -514,26 +593,25 @@ impl Configuration { Ok(config) } - /// Loads the configuration from the environment variable. The whole - /// configuration must be in the environment variable. It contains the same - /// configuration as the configuration file with the same format. + /// Loads the configuration from the `Info` struct. The whole + /// configuration in toml format is included in the `info.tracker_toml` string. + /// + /// Optionally will override the admin api token. /// /// # Errors /// /// Will return `Err` if the environment variable does not exist or has a bad configuration. - pub fn load_from_env_var(config_env_var_name: &str) -> Result { - match env::var(config_env_var_name) { - Ok(config_toml) => { - let config_builder = Config::builder() - .add_source(File::from_str(&config_toml, FileFormat::Toml)) - .build()?; - let config = config_builder.try_deserialize()?; - Ok(config) - } - Err(e) => Err(Error::UnableToLoadFromEnvironmentVariable { - source: (Arc::new(e) as Arc).into(), - }), - } + pub fn load(info: &Info) -> Result { + let config_builder = Config::builder() + .add_source(File::from_str(&info.tracker_toml, FileFormat::Toml)) + .build()?; + let mut config: Configuration = config_builder.try_deserialize()?; + + if let Some(ref token) = info.api_admin_token { + config.override_api_admin_token(token); + }; + + Ok(config) } /// Saves the configuration to the configuration file. @@ -567,7 +645,7 @@ mod tests { let config = r#"log_level = "info" mode = "public" db_driver = "Sqlite3" - db_path = "./storage/database/data.db" + db_path = "./storage/tracker/lib/database/sqlite3.db" announce_interval = 120 min_announce_interval = 120 on_reverse_proxy = false diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index e9d86a589..4e6c70e66 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -10,5 +10,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.3", path = "../configuration"} -torrust-tracker-primitives = { version = "3.0.0-alpha.3", path = "../primitives"} +torrust-tracker-configuration = { version = "3.0.0-alpha.4", path = "../configuration"} +torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "../primitives"} diff --git a/rustfmt.toml b/rustfmt.toml index 3e878b271..abbed5eda 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,4 +1,3 @@ max_width = 130 imports_granularity = "Module" group_imports = "StdExternalCrate" - diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh new file mode 100644 index 000000000..88a832b40 --- /dev/null +++ b/share/container/entry_script_sh @@ -0,0 +1,81 @@ +#!/bin/sh +set -x + +to_lc() { echo "$1" | tr '[:upper:]' '[:lower:]'; } +clean() { echo "$1" | tr -d -c 'a-zA-Z0-9-' ; } +cmp_lc() { [ "$(to_lc "$(clean "$1")")" = "$(to_lc "$(clean "$2")")" ]; } + + +inst() { + if [ -n "$1" ] && [ -n "$2" ] && [ -e "$1" ] && [ ! -e "$2" ]; then + install -D -m 0640 -o torrust -g torrust "$1" "$2"; fi; } + + +# Add torrust user, based upon supplied user-id. +if [ -z "$USER_ID" ] && [ "$USER_ID" -lt 1000 ]; then + echo "ERROR: USER_ID is not set, or less than 1000" + exit 1 +fi + +adduser --disabled-password --shell "/bin/sh" --uid "$USER_ID" "torrust" + +# Configure Permissions for Torrust Folders +mkdir -p /var/lib/torrust/tracker/database/ /etc/torrust/tracker/ +chown -R "${USER_ID}":"${USER_ID}" /var/lib/torrust /var/log/torrust /etc/torrust +chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust + + +# Install the database and config: +if [ -n "$TORRUST_TRACKER_DATABASE" ]; then + if cmp_lc "$TORRUST_TRACKER_DATABASE" "sqlite3"; then + + # Select sqlite3 empty database + default_database="/usr/share/torrust/default/database/tracker.sqlite3.db" + + # Select sqlite3 default configuration + default_config="/usr/share/torrust/default/config/tracker.container.sqlite3.toml" + + elif cmp_lc "$TORRUST_TRACKER_DATABASE" "mysql"; then + + # (no database file needed for mysql) + + # Select default mysql configuration + default_config="/usr/share/torrust/default/config/tracker.container.mysql.toml" + + else + echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_DATABASE\"." + echo "Please Note: Supported Database Types: \"sqlite3\", \"mysql\"." + exit 1 + fi +else + echo "Error: \"\$TORRUST_TRACKER_DATABASE\" was not set!"; exit 1; +fi + +install_config="/etc/torrust/tracker/tracker.toml" +install_database="/var/lib/torrust/tracker/database/sqlite3.db" + +inst "$default_config" "$install_config" +inst "$default_database" "$install_database" + +# Make Minimal Message of the Day +if cmp_lc "$RUNTIME" "runtime"; then + printf '\n in runtime \n' >> /etc/motd; +elif cmp_lc "$RUNTIME" "debug"; then + printf '\n in debug mode \n' >> /etc/motd; +elif cmp_lc "$RUNTIME" "release"; then + printf '\n in release mode \n' >> /etc/motd; +else + echo "ERROR: running in unknown mode: \"$RUNTIME\""; exit 1 +fi + +if [ -e "/usr/share/torrust/container/message" ]; then + cat "/usr/share/torrust/container/message" >> /etc/motd; chmod 0644 /etc/motd +fi + +# Load message of the day from Profile +echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/profile + +cd /home/torrust || exit 1 + +# Switch to torrust user +exec /bin/su-exec torrust "$@" diff --git a/share/container/message b/share/container/message new file mode 100644 index 000000000..cd88b44ae --- /dev/null +++ b/share/container/message @@ -0,0 +1,2 @@ + +run 'torrust-tracker' to start tracker diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml new file mode 100644 index 000000000..60da25db2 --- /dev/null +++ b/share/default/config/tracker.container.mysql.toml @@ -0,0 +1,39 @@ +log_level = "info" +mode = "public" +db_driver = "MySQL" +db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +announce_interval = 120 +min_announce_interval = 120 +on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +max_peer_timeout = 900 +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true + +[[udp_trackers]] +enabled = false +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +enabled = false +bind_address = "0.0.0.0:7070" +ssl_enabled = false +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +[http_api] +enabled = true +bind_address = "0.0.0.0:1212" +ssl_enabled = false +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + + +# Please override the admin token setting the +# `TORRUST_TRACKER_API_ADMIN_TOKEN` +# environmental variable! + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml new file mode 100644 index 000000000..64cf75518 --- /dev/null +++ b/share/default/config/tracker.container.sqlite3.toml @@ -0,0 +1,39 @@ +log_level = "info" +mode = "public" +db_driver = "Sqlite3" +db_path = "/var/lib/torrust/tracker/database/sqlite3.db" +announce_interval = 120 +min_announce_interval = 120 +on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +max_peer_timeout = 900 +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true + +[[udp_trackers]] +enabled = false +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +enabled = false +bind_address = "0.0.0.0:7070" +ssl_enabled = false +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +[http_api] +enabled = true +bind_address = "0.0.0.0:1212" +ssl_enabled = false +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + + +# Please override the admin token setting the +# `TORRUST_TRACKER_API_ADMIN_TOKEN` +# environmental variable! + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/config.toml.local b/share/default/config/tracker.development.sqlite3.toml similarity index 92% rename from config.toml.local rename to share/default/config/tracker.development.sqlite3.toml index be6a11a56..be1877469 100644 --- a/config.toml.local +++ b/share/default/config/tracker.development.sqlite3.toml @@ -1,7 +1,7 @@ log_level = "info" mode = "public" db_driver = "Sqlite3" -db_path = "./storage/database/data.db" +db_path = "./storage/tracker/lib/database/sqlite3.db" announce_interval = 120 min_announce_interval = 120 on_reverse_proxy = false diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 727bf59f7..858fd59fc 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -1,30 +1,28 @@ //! Initialize configuration from file or env var. //! //! All environment variables are prefixed with `TORRUST_TRACKER_BACK_`. -use std::env; -use std::path::Path; -use torrust_tracker_configuration::{Configuration, Error}; +use torrust_tracker_configuration::{Configuration, Info}; // Environment variables -/// The whole `config.toml` file content. It has priority over the config file. +/// The whole `tracker.toml` file content. It has priority over the config file. /// Even if the file is not on the default path. const ENV_VAR_CONFIG: &str = "TORRUST_TRACKER_CONFIG"; +const ENV_VAR_API_ADMIN_TOKEN: &str = "TORRUST_TRACKER_API_ADMIN_TOKEN"; -/// The `config.toml` file location. -pub const ENV_VAR_CONFIG_PATH: &str = "TORRUST_TRACKER_CONFIG_PATH"; +/// The `tracker.toml` file location. +pub const ENV_VAR_PATH_CONFIG: &str = "TORRUST_TRACKER_PATH_CONFIG"; // Default values - -const ENV_VAR_DEFAULT_CONFIG_PATH: &str = "./config.toml"; +pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.development.sqlite3.toml"; /// It loads the application configuration from the environment. /// /// There are two methods to inject the configuration: /// -/// 1. By using a config file: `config.toml`. -/// 2. Environment variable: `TORRUST_TRACKER_CONFIG`. The variable contains the same contents as the `config.toml` file. +/// 1. By using a config file: `tracker.toml`. +/// 2. Environment variable: `TORRUST_TRACKER_CONFIG`. The variable contains the same contents as the `tracker.toml` file. /// /// Environment variable has priority over the config file. /// @@ -33,37 +31,27 @@ const ENV_VAR_DEFAULT_CONFIG_PATH: &str = "./config.toml"; /// # Panics /// /// Will panic if it can't load the configuration from either -/// `./config.toml` file or the env var `TORRUST_TRACKER_CONFIG`. +/// `./tracker.toml` file or the env var `TORRUST_TRACKER_CONFIG`. #[must_use] pub fn initialize_configuration() -> Configuration { - if env::var(ENV_VAR_CONFIG).is_ok() { - println!("Loading configuration from env var {ENV_VAR_CONFIG} ..."); + let info = Info::new( + ENV_VAR_CONFIG.to_string(), + ENV_VAR_PATH_CONFIG.to_string(), + DEFAULT_PATH_CONFIG.to_string(), + ENV_VAR_API_ADMIN_TOKEN.to_string(), + ) + .unwrap(); + + Configuration::load(&info).unwrap() +} - Configuration::load_from_env_var(ENV_VAR_CONFIG).unwrap() - } else { - let config_path = env::var(ENV_VAR_CONFIG_PATH).unwrap_or_else(|_| ENV_VAR_DEFAULT_CONFIG_PATH.to_string()); +#[cfg(test)] +mod tests { - println!("Loading configuration from configuration file: `{config_path}` ..."); + #[test] + fn it_should_load_with_default_config() { + use crate::bootstrap::config::initialize_configuration; - load_from_file_or_create_default(&config_path).unwrap() - } -} - -/// Loads the configuration from the configuration file. If the file does -/// not exist, it will create a default configuration file and return an -/// error. -/// -/// # Errors -/// -/// Will return `Err` if `path` does not exist or has a bad configuration. -fn load_from_file_or_create_default(path: &str) -> Result { - if Path::new(&path).is_file() { - Configuration::load_from_file(path) - } else { - println!("Missing configuration file."); - println!("Creating a default configuration file: `{path}` ..."); - let config = Configuration::create_default_configuration_file(path)?; - println!("Please review the config file: `{path}` and restart the tracker if needed."); - Ok(config) + drop(initialize_configuration()); } } diff --git a/src/lib.rs b/src/lib.rs index 28bac9244..c862d373a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -112,16 +112,16 @@ //! storage/ //! ├── database //! │   └── data.db -//! └── ssl_certificates +//! └── tls //! ├── localhost.crt //! └── localhost.key //! ``` //! -//! The default configuration expects a directory `./storage/database` to be writable by the tracker process. +//! The default configuration expects a directory `./storage/tracker/lib/database` to be writable by the tracker process. //! //! By default the tracker uses `SQLite` and the database file name `data.db`. //! -//! You only need the `ssl_certificates` directory in case you are setting up SSL for the HTTP tracker or the tracker API. +//! You only need the `tls` directory in case you are setting up SSL for the HTTP tracker or the tracker API. //! Visit [`HTTP`](crate::servers::http) or [`API`](crate::servers::apis) if you want to know how you can use HTTPS. //! //! ## Install from sources @@ -130,8 +130,8 @@ //! git clone https://github.com/torrust/torrust-tracker.git \ //! && cd torrust-tracker \ //! && cargo build --release \ -//! && mkdir -p ./storage/database \ -//! && mkdir -p ./storage/ssl_certificates +//! && mkdir -p ./storage/tracker/lib/database \ +//! && mkdir -p ./storage/tracker/lib/tls //! ``` //! //! ## Run with docker @@ -142,7 +142,7 @@ //! # Configuration //! //! In order to run the tracker you need to provide the configuration. If you run the tracker without providing the configuration, -//! the tracker will generate the default configuration the first time you run it. It will generate a `config.toml` file with +//! the tracker will generate the default configuration the first time you run it. It will generate a `tracker.toml` file with //! in the root directory. //! //! The default configuration is: @@ -151,7 +151,7 @@ //! log_level = "info" //! mode = "public" //! db_driver = "Sqlite3" -//! db_path = "./storage/database/data.db" +//! db_path = "./storage/tracker/lib/database/sqlite3.db" //! announce_interval = 120 //! min_announce_interval = 120 //! max_peer_timeout = 900 @@ -188,18 +188,18 @@ //! //! For more information about each service and options you can visit the documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration). //! -//! Alternatively to the `config.toml` file you can use one environment variable `TORRUST_TRACKER_CONFIG` to pass the configuration to the tracker: +//! Alternatively to the `tracker.toml` file you can use one environment variable `TORRUST_TRACKER_CONFIG` to pass the configuration to the tracker: //! //! ```text -//! TORRUST_TRACKER_CONFIG=$(cat config.toml) +//! TORRUST_TRACKER_CONFIG=$(cat tracker.toml) //! cargo run //! ``` //! -//! In the previous example you are just setting the env var with the contents of the `config.toml` file. +//! In the previous example you are just setting the env var with the contents of the `tracker.toml` file. //! -//! The env var contains the same data as the `config.toml`. It's particularly useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! The env var contains the same data as the `tracker.toml`. It's particularly useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). //! -//! > NOTE: The `TORRUST_TRACKER_CONFIG` env var has priority over the `config.toml` file. +//! > NOTE: The `TORRUST_TRACKER_CONFIG` env var has priority over the `tracker.toml` file. //! //! # Usage //! diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index eb278bf3c..afed9ff12 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -28,8 +28,8 @@ //! enabled = true //! bind_address = "0.0.0.0:1212" //! ssl_enabled = false -//! ssl_cert_path = "./storage/ssl_certificates/localhost.crt" -//! ssl_key_path = "./storage/ssl_certificates/localhost.key" +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! //! [http_api.access_tokens] //! admin = "MyAccessToken" @@ -41,7 +41,7 @@ //! When you run the tracker with enabled API, you will see the following message: //! //! ```text -//! Loading configuration from config file ./config.toml +//! Loading configuration from config file ./tracker.toml //! 023-03-28T12:19:24.963054069+01:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. //! ... //! 023-03-28T12:19:24.964138723+01:00 [torrust_tracker::bootstrap::jobs::tracker_apis][INFO] Starting Torrust APIs server on: http://0.0.0.0:1212 @@ -116,8 +116,8 @@ //! enabled = true //! bind_address = "0.0.0.0:1212" //! ssl_enabled = true -//! ssl_cert_path = "./storage/ssl_certificates/localhost.crt" -//! ssl_key_path = "./storage/ssl_certificates/localhost.key" +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! //! [http_api.access_tokens] //! admin = "MyAccessToken" diff --git a/src/servers/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs index 41af09031..3e8f74d0c 100644 --- a/src/servers/apis/v1/middlewares/auth.rs +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -11,7 +11,7 @@ //! The token must be one of the `access_tokens` in the tracker //! [HTTP API configuration](torrust_tracker_configuration::HttpApi). //! -//! The configuration file `config.toml` contains a list of tokens: +//! The configuration file `tracker.toml` contains a list of tokens: //! //! ```toml //! [http_api.access_tokens] diff --git a/src/tracker/databases/driver.rs b/src/tracker/databases/driver.rs index 4ff9314d2..19cb7046e 100644 --- a/src/tracker/databases/driver.rs +++ b/src/tracker/databases/driver.rs @@ -18,7 +18,7 @@ use super::{Builder, Database}; /// use torrust_tracker_primitives::DatabaseDriver; /// /// let db_driver = DatabaseDriver::Sqlite3; -/// let db_path = "./storage/database/data.db".to_string(); +/// let db_path = "./storage/tracker/lib/database/sqlite3.db".to_string(); /// let database = databases::driver::build(&db_driver, &db_path); /// ``` /// diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6823e8fe8..040751e12 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -320,7 +320,7 @@ //! log_level = "debug" //! mode = "public" //! db_driver = "Sqlite3" -//! db_path = "./storage/database/data.db" +//! db_path = "./storage/tracker/lib/database/sqlite3.db" //! announce_interval = 120 //! min_announce_interval = 120 //! max_peer_timeout = 900 diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index b508dfc39..2e24af6b7 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -67,11 +67,12 @@ mod for_all_config_modes { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Announce - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6}; use std::str::FromStr; use local_ip_address::local_ip; use reqwest::Response; + use tokio::net::TcpListener; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; @@ -594,6 +595,13 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) + .await + .is_err() + { + return; // we cannot bind to a ipv6 socket, so we will skip this test + } + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) @@ -651,6 +659,13 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) + .await + .is_err() + { + return; // we cannot bind to a ipv6 socket, so we will skip this test + } + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) @@ -830,9 +845,10 @@ mod for_all_config_modes { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Scrape - use std::net::IpAddr; + use std::net::{IpAddr, Ipv6Addr, SocketAddrV6}; use std::str::FromStr; + use tokio::net::TcpListener; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; @@ -1027,6 +1043,13 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) + .await + .is_err() + { + return; // we cannot bind to a ipv6 socket, so we will skip this test + } + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); From 5bea92d5c62ed4bd38d1033f5ed9d4393cf4a8b6 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 6 Sep 2023 13:16:21 +0200 Subject: [PATCH 0563/1003] ci: use coverage upload token --- .github/workflows/coverage.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 08b869327..a6d639e21 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -54,6 +54,7 @@ jobs: name: Upload Coverage Report uses: codecov/codecov-action@v3 with: + token: ${{ secrets.CODECOV_TOKEN }} files: ${{ steps.coverage.outputs.report }} verbose: true fail_ci_if_error: true From 39fd63d54f671e4e95067e08fec647c4ca9d2f49 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 6 Sep 2023 16:58:53 +0200 Subject: [PATCH 0564/1003] dev: update cargo toml files --- Cargo.lock | 18 +++++++------- Cargo.toml | 24 +++++++++++++++---- contrib/bencode/Cargo.toml | 25 ++++++++++---------- contrib/bencode/benches/bencode_benchmark.rs | 2 +- contrib/bencode/src/lib.rs | 12 +++++----- contrib/bencode/test/mod.rs | 2 +- packages/configuration/Cargo.toml | 10 +++++++- packages/located-error/Cargo.toml | 11 +++++++-- packages/primitives/Cargo.toml | 10 +++++++- packages/test-helpers/Cargo.toml | 13 +++++++--- src/servers/http/v1/responses/announce.rs | 2 +- src/servers/http/v1/responses/scrape.rs | 2 +- 12 files changed, 88 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b7c9d0d4..fae2d9dd9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -258,14 +258,6 @@ version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" -[[package]] -name = "bencode" -version = "1.0.0-alpha.1" -dependencies = [ - "criterion", - "error-chain", -] - [[package]] name = "bigdecimal" version = "0.3.1" @@ -3103,7 +3095,6 @@ dependencies = [ "axum", "axum-client-ip", "axum-server", - "bencode", "binascii", "chrono", "config", @@ -3133,6 +3124,7 @@ dependencies = [ "thiserror", "tokio", "torrust-tracker-configuration", + "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", @@ -3155,6 +3147,14 @@ dependencies = [ "uuid", ] +[[package]] +name = "torrust-tracker-contrib-bencode" +version = "3.0.0-alpha.4" +dependencies = [ + "criterion", + "error-chain", +] + [[package]] name = "torrust-tracker-located-error" version = "3.0.0-alpha.4" diff --git a/Cargo.toml b/Cargo.toml index 9c94ea10d..17c1cbbb7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,21 +1,37 @@ [package] name = "torrust-tracker" -description = "A feature rich BitTorrent tracker." -license-file.workspace = true +readme = "README.md" + authors.workspace = true +description.workspace = true +documentation.workspace = true edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license-file.workspace = true +publish.workspace = true repository.workspace = true +rust-version.workspace = true version.workspace = true + [workspace.package] -license-file = "COPYRIGHT" authors = [ "Nautilus Cyberneering , Mick van Dijke ", ] +categories = ["network-programming", "web-programming"] +description = "A feature rich BitTorrent tracker." +documentation = "https://docs.rs/crate/torrust-tracker/" edition = "2021" +homepage = "https://torrust.com/" +keywords = ["bittorrent", "tracker", "file-sharing", "peer-to-peer", "torrent"] +license-file = "COPYRIGHT" +publish = true repository = "https://github.com/torrust/torrust-tracker" +rust-version = "1.72" version = "3.0.0-alpha.4" + [dependencies] tokio = { version = "1.29", features = [ "rt-multi-thread", @@ -50,7 +66,7 @@ axum = "0.6.20" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" tower-http = { version = "0.4.3", features = ["compression-full"] } -bencode = { version = "1.0.0-alpha.1", path = "contrib/bencode" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.4", path = "contrib/bencode"} torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "packages/primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.4", path = "packages/configuration" } torrust-tracker-located-error = { version = "3.0.0-alpha.4", path = "packages/located-error" } diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index 8334e270d..2aed18409 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -1,22 +1,21 @@ [package] -name = "bencode" -description = "Efficient decoding and encoding for bencode." -keywords = ["bencode"] +name = "torrust-tracker-contrib-bencode" +description = "(contrib) Efficient decoding and encoding for bencode." +keywords = ["library", "contrib", "bencode"] readme = "README.md" - authors = [ "Nautilus Cyberneering , Andrew ", ] -categories = ["network-programming", "web-programming"] -documentation = "https://github.com/torrust/bittorrent-infrastructure-project" -edition = "2021" -homepage = "https://github.com/torrust/bittorrent-infrastructure-project" -license = "Apache-2.0" -publish = false # until we decide where to publish. repository = "https://github.com/torrust/bittorrent-infrastructure-project" -rust-version = "1.71" -version = "1.0.0-alpha.1" +license = "Apache-2.0" + +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +publish.workspace = true +rust-version.workspace = true +version.workspace = true [dependencies] @@ -31,4 +30,4 @@ path = "test/mod.rs" [[bench]] name = "bencode_benchmark" -harness = false \ No newline at end of file +harness = false diff --git a/contrib/bencode/benches/bencode_benchmark.rs b/contrib/bencode/benches/bencode_benchmark.rs index 729197d8a..b79bb0999 100644 --- a/contrib/bencode/benches/bencode_benchmark.rs +++ b/contrib/bencode/benches/bencode_benchmark.rs @@ -1,5 +1,5 @@ -use bencode::{BDecodeOpt, BencodeRef}; use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use torrust_tracker_contrib_bencode::{BDecodeOpt, BencodeRef}; const B_NESTED_LISTS: &[u8; 100] = b"lllllllllllllllllllllllllllllllllllllllllllllllllleeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"; // cspell:disable-line diff --git a/contrib/bencode/src/lib.rs b/contrib/bencode/src/lib.rs index 103a3c371..78e113b66 100644 --- a/contrib/bencode/src/lib.rs +++ b/contrib/bencode/src/lib.rs @@ -85,8 +85,8 @@ const BYTE_LEN_END: u8 = b':'; macro_rules! ben_map { ( $($key:expr => $val:expr),* ) => { { - use bencode::{BMutAccess, BencodeMut}; - use bencode::inner::BCowConvert; + use $crate::{BMutAccess, BencodeMut}; + use $crate::inner::BCowConvert; let mut bencode_map = BencodeMut::new_dict(); { @@ -106,7 +106,7 @@ macro_rules! ben_map { macro_rules! ben_list { ( $($ben:expr),* ) => { { - use bencode::{BencodeMut, BMutAccess}; + use $crate::{BencodeMut, BMutAccess}; let mut bencode_list = BencodeMut::new_list(); { @@ -125,8 +125,8 @@ macro_rules! ben_list { #[macro_export] macro_rules! ben_bytes { ( $ben:expr ) => {{ - use bencode::inner::BCowConvert; - use bencode::BencodeMut; + use $crate::inner::BCowConvert; + use $crate::BencodeMut; BencodeMut::new_bytes(BCowConvert::convert($ben)) }}; @@ -136,7 +136,7 @@ macro_rules! ben_bytes { #[macro_export] macro_rules! ben_int { ( $ben:expr ) => {{ - use bencode::BencodeMut; + use $crate::BencodeMut; BencodeMut::new_int($ben) }}; diff --git a/contrib/bencode/test/mod.rs b/contrib/bencode/test/mod.rs index c1454967d..14606c175 100644 --- a/contrib/bencode/test/mod.rs +++ b/contrib/bencode/test/mod.rs @@ -1,4 +1,4 @@ -use bencode::{ben_bytes, ben_int, ben_list, ben_map}; +use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map}; #[test] fn positive_ben_map_macro() { diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index e3ca1d932..93b45c0a7 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -1,12 +1,20 @@ [package] name = "torrust-tracker-configuration" description = "A library to provide configuration to the Torrust Tracker." -license-file.workspace = true +keywords = ["library", "config", "settings"] +readme = "README.md" + authors.workspace = true +documentation.workspace = true edition.workspace = true +homepage.workspace = true +license-file.workspace = true +publish.workspace = true repository.workspace = true +rust-version.workspace = true version.workspace = true + [dependencies] serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 19707e7e9..ea0b1639a 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -1,15 +1,22 @@ [package] name = "torrust-tracker-located-error" description = "A library to provide error decorator with the location and the source of the original error." -license-file.workspace = true +keywords = ["library", "helper", "errors"] +readme = "README.md" + authors.workspace = true +documentation.workspace = true edition.workspace = true +homepage.workspace = true +license-file.workspace = true +publish.workspace = true repository.workspace = true +rust-version.workspace = true version.workspace = true + [dependencies] log = { version = "0.4", features = ["release_max_level_info"] } [dev-dependencies] thiserror = "1.0" - diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 7576e06d8..9eb092e1c 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -1,12 +1,20 @@ [package] name = "torrust-tracker-primitives" description = "A library with the primitive types shared by the Torrust tracker packages." -license-file.workspace = true +keywords = ["library", "api", "primitives"] +readme = "README.md" + authors.workspace = true +documentation.workspace = true edition.workspace = true +homepage.workspace = true +license-file.workspace = true +publish.workspace = true repository.workspace = true +rust-version.workspace = true version.workspace = true + [dependencies] serde = { version = "1.0", features = ["derive"] } derive_more = "0.99" diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 4e6c70e66..91831399c 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -1,14 +1,21 @@ [package] name = "torrust-tracker-test-helpers" description = "A library providing helpers for testing the Torrust tracker." -license-file.workspace = true +keywords = ["library", "helper", "testing"] +readme = "README.md" + authors.workspace = true +documentation.workspace = true edition.workspace = true +homepage.workspace = true +license-file.workspace = true +publish.workspace = true repository.workspace = true +rust-version.workspace = true version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.4", path = "../configuration"} -torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "../primitives"} +torrust-tracker-configuration = { version = "3.0.0-alpha.4", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "../primitives" } diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index e7b64522c..3596275f4 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -7,9 +7,9 @@ use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; use serde::{self, Deserialize, Serialize}; use thiserror::Error; +use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; use crate::servers::http::v1::responses; use crate::tracker::{self, AnnounceData}; diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index c2f099597..9cd88b9ab 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -5,7 +5,7 @@ use std::borrow::Cow; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use bencode::{ben_int, ben_map, BMutAccess}; +use torrust_tracker_contrib_bencode::{ben_int, ben_map, BMutAccess}; use crate::tracker::ScrapeData; From 5160632b04ea11dd916aaecc315bb826bd4c009a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 6 Sep 2023 16:59:09 +0200 Subject: [PATCH 0565/1003] dev: update deployment scripts --- .github/workflows/container.yaml | 16 +++-- .github/workflows/deployment.yaml | 81 +++++++++++++++++++++++++ .github/workflows/publish_crate.yml | 57 ----------------- .github/workflows/testing.yaml | 2 +- cSpell.json | 3 +- contrib/bencode/src/reference/decode.rs | 2 - 6 files changed, 96 insertions(+), 65 deletions(-) create mode 100644 .github/workflows/deployment.yaml delete mode 100644 .github/workflows/publish_crate.yml diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 27a2dc93c..2064d0ee4 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -2,8 +2,11 @@ name: Container on: push: - tags-ignore: - - "!v*" + branches: + - "develop" + - "main" + tags: + - "v*" pull_request: branches: - "develop" @@ -17,6 +20,10 @@ jobs: name: Test (Docker) runs-on: ubuntu-latest + strategy: + matrix: + target: [debug, release] + steps: - id: setup name: Setup Toolchain @@ -29,9 +36,10 @@ jobs: file: ./Containerfile push: false load: true + target: ${{ matrix.target }} tags: torrust-tracker:local cache-from: type=gha - cache-to: type=gha,mode=max + cache-to: type=gha - id: inspect name: Inspect @@ -122,4 +130,4 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha - cache-to: type=gha,mode=max + cache-to: type=gha diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml new file mode 100644 index 000000000..6b6a64975 --- /dev/null +++ b/.github/workflows/deployment.yaml @@ -0,0 +1,81 @@ +name: Deployment + +on: + push: + tags: + - "v*" + +jobs: + secrets: + name: Secrets + needs: context + environment: dockerhub-torrust + if: needs.context.outputs.continue == 'true' + runs-on: ubuntu-latest + + outputs: + continue: ${{ steps.check.outputs.continue }} + + steps: + - id: check + name: Check + env: + CRATES_TOKEN: "${{ secrets.CRATES_TOKEN }}" + if: "${{ env.CRATES_TOKEN != '' }}" + run: echo "continue=true" >> $GITHUB_OUTPUT + + test: + name: Test + needs: secrets + if: needs.secrets.outputs.continue == 'true' + runs-on: ubuntu-latest + + strategy: + matrix: + toolchain: [stable, nightly] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v3 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + + - id: test + name: Run Unit Tests + run: cargo test --tests --benches --examples --workspace --all-targets --all-features + + publish: + name: Publish + environment: crates-io-torrust + needs: test + if: needs.secrets.outputs.continue == 'true' + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v3 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - id: publish + name: Publish Crates + run: | + cargo publish -p torrust-tracker-contrib-bencode + cargo publish -p torrust-tracker-located-error + cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-configuration + cargo publish -p torrust-tracker-test-helpers + cargo publish -p torrust-tracker diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml deleted file mode 100644 index 4d5d0772e..000000000 --- a/.github/workflows/publish_crate.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: Publish crate - -on: - push: - tags: - - "v*" - -jobs: - check-secret: - runs-on: ubuntu-latest - environment: crates-io-torrust - outputs: - publish: ${{ steps.check.outputs.publish }} - steps: - - id: check - env: - CRATES_TOKEN: "${{ secrets.CRATES_TOKEN }}" - if: "${{ env.CRATES_TOKEN != '' }}" - run: echo "publish=true" >> $GITHUB_OUTPUT - - test: - needs: check-secret - if: needs.check-secret.outputs.publish == 'true' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@stable - with: - toolchain: stable - components: llvm-tools-preview - - uses: Swatinem/rust-cache@v2 - - name: Run Tests - run: cargo test - - publish: - needs: test - if: needs.check-secret.outputs.publish == 'true' - runs-on: ubuntu-latest - environment: crates-io-torrust - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Install stable toolchain - uses: dtolnay/rust-toolchain@stable - with: - toolchain: stable - - - name: Publish workspace packages - run: | - cargo publish -p torrust-tracker-located-error - cargo publish -p torrust-tracker-primitives - cargo publish -p torrust-tracker-configuration - cargo publish -p torrust-tracker-test-helpers - cargo publish -p torrust-tracker - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index c1f85a90b..2fa52f5a4 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -50,7 +50,7 @@ jobs: name: Setup Toolchain uses: dtolnay/rust-toolchain@stable with: - toolchain: nightly + toolchain: ${{ matrix.toolchain }} components: clippy - id: cache diff --git a/cSpell.json b/cSpell.json index fc9db42b7..e4ac1d90b 100644 --- a/cSpell.json +++ b/cSpell.json @@ -33,6 +33,7 @@ "curr", "Cyberneering", "datetime", + "Dijke", "distroless", "dockerhub", "downloadedi", @@ -133,4 +134,4 @@ "shellscript", "toml" ] -} \ No newline at end of file +} diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs index d18dffda0..d2aa180f8 100644 --- a/contrib/bencode/src/reference/decode.rs +++ b/contrib/bencode/src/reference/decode.rs @@ -1,5 +1,3 @@ -#![allow(clippy::should_panic_without_expect)] - use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::str::{self}; From c7453bfb9f9915421ab0fc0fe4fd548f8f682215 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 16:31:08 +0000 Subject: [PATCH 0566/1003] chore(deps): bump actions/checkout from 3 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 2 +- .github/workflows/coverage.yaml | 2 +- .github/workflows/deployment.yaml | 4 ++-- .github/workflows/testing.yaml | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 2064d0ee4..da38db71f 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -47,7 +47,7 @@ jobs: - id: checkout name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: compose name: Compose diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index a6d639e21..6a99fb11a 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -19,7 +19,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: setup name: Setup Toolchain diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 6b6a64975..87bc61484 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -37,7 +37,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: setup name: Setup Toolchain @@ -59,7 +59,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: setup name: Setup Toolchain diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 2fa52f5a4..f138a95cc 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -15,7 +15,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: setup name: Setup Toolchain @@ -44,7 +44,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: setup name: Setup Toolchain @@ -81,7 +81,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - id: setup name: Setup Toolchain From 509d0456c72388850e0246c39c4a606d7f5e5d38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 17:16:01 +0000 Subject: [PATCH 0567/1003] chore(deps): bump r2d2_mysql from 23.0.0 to 24.0.0 Bumps [r2d2_mysql](https://github.com/outersky/r2d2-mysql) from 23.0.0 to 24.0.0. - [Release notes](https://github.com/outersky/r2d2-mysql/releases) - [Changelog](https://github.com/outersky/r2d2-mysql/blob/master/CHANGELOG.md) - [Commits](https://github.com/outersky/r2d2-mysql/commits) --- updated-dependencies: - dependency-name: r2d2_mysql dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- Cargo.lock | 101 ++++++++++++++++++++++++++++++++++++++++++++--------- Cargo.toml | 2 +- 2 files changed, 86 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fae2d9dd9..0ab5ffc45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -345,7 +345,7 @@ checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro2", "syn 1.0.109", ] @@ -1210,6 +1210,12 @@ dependencies = [ "hashbrown 0.14.0", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "hermit-abi" version = "0.3.2" @@ -1596,11 +1602,11 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lru" -version = "0.8.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" +checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" dependencies = [ - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -1694,9 +1700,9 @@ dependencies = [ [[package]] name = "mysql" -version = "23.0.1" +version = "24.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f11339ca5c251941805d51362a07823605a80586ced92914ab7de84fba813f" +checksum = "cfe2babc5f5b354eab9c0a0e40da3e69c4d77421c8b9b6ee03f97acc75bd7955" dependencies = [ "bufstream", "bytes", @@ -1713,21 +1719,39 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2 0.4.9", + "socket2 0.5.3", "twox-hash", "url", ] +[[package]] +name = "mysql-common-derive" +version = "0.30.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f" +dependencies = [ + "darling", + "heck", + "num-bigint", + "proc-macro-crate 1.3.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.31", + "termcolor", + "thiserror", +] + [[package]] name = "mysql_common" -version = "0.29.2" +version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9006c95034ccf7b903d955f210469119f6c3477fc9c9e7a7845ce38a3e665c2a" +checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" dependencies = [ - "base64 0.13.1", + "base64 0.21.3", "bigdecimal", "bindgen", - "bitflags 1.3.2", + "bitflags 2.4.0", "bitvec", "byteorder", "bytes", @@ -1738,6 +1762,7 @@ dependencies = [ "frunk", "lazy_static", "lexical", + "mysql-common-derive", "num-bigint", "num-traits", "rand", @@ -1991,11 +2016,12 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.1.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64 0.13.1", + "base64 0.21.3", + "serde", ] [[package]] @@ -2160,6 +2186,40 @@ dependencies = [ "toml 0.5.11", ] +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +dependencies = [ + "once_cell", + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + [[package]] name = "proc-macro2" version = "1.0.66" @@ -2211,9 +2271,9 @@ dependencies = [ [[package]] name = "r2d2_mysql" -version = "23.0.0" +version = "24.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9733d738ce65959a744f387bae69aa690a867e18d48e5486b171c47bc7b0c575" +checksum = "3fe5127e6c21971cdb9580f2f54cbe6d9c2226eb861036c3ca6d390c25f52574" dependencies = [ "mysql", "r2d2", @@ -2901,6 +2961,15 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "termcolor" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +dependencies = [ + "winapi-util", +] + [[package]] name = "termtree" version = "0.4.1" diff --git a/Cargo.toml b/Cargo.toml index 17c1cbbb7..30b44bf85 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,7 +53,7 @@ log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = { version = "0.4.24", default-features = false, features = ["clock"] } r2d2 = "0.8" -r2d2_mysql = "23.0" +r2d2_mysql = "24.0" r2d2_sqlite = { version = "0.22", features = ["bundled"] } rand = "0.8" derive_more = "0.99" From e3e111bd40baccfbc3e3ef113b38d7da6102c326 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 6 Sep 2023 21:00:18 +0200 Subject: [PATCH 0568/1003] docs: update readme --- README.md | 202 ++++++++++++++++++++++++++++++++-------------------- cSpell.json | 1 + 2 files changed, 125 insertions(+), 78 deletions(-) diff --git a/README.md b/README.md index 832af0d85..2f35e9017 100644 --- a/README.md +++ b/README.md @@ -1,95 +1,143 @@ # Torrust Tracker -[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![testing_wf_b]][testing_wf] +[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf] -Torrust Tracker is a lightweight but incredibly high-performance and feature-rich BitTorrent tracker written in [Rust Language][rust]. +__Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker (a service that matchmakes peers and collects statistics) written in [Rust Language][rust] and [axum] (a modern web application framework). ___This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).___ -It aims to provide a reliable and efficient solution for serving torrents to a vast number of peers while maintaining a high level of performance, robustness, extensibility, security, usability and with community-driven development. +> This is a [Torrust][torrust] project and is in active development. It is community supported as well as sponsored by [Nautilus Cyberneering][nautilus]. -_We have a [container guide][containers.md] to get started with Docker or Podman_ +- _We have a [container guide][containers.md] for those who wish to get started with __Docker__ or __Podman___ ## Key Features -* [x] Multiple UDP server and HTTP(S) server blocks for socket binding are possible. -* [x] Full IPv4 and IPv6 support for both UDP and HTTP(S). -* [x] Private & Whitelisted mode. -* [x] Built-in API. -* [x] Torrent whitelisting. -* [x] Peer authentication using time-bound keys. -* [x] [newTrackon][newtrackon] check is supported for both HTTP and UDP, where IPv4 and IPv6 are properly handled. -* [x] SQLite3 and MySQL persistence, loading and saving of the torrent hashes and downloads completed count. -* [x] Comprehensive documentation. -* [x] A complete suite of tests. See our [code coverage report][coverage]. +- [x] High Quality and Modern Rust Codebase. +- [x] [Documentation] Generated from Code Comments. +- [x] [Comprehensive Suit][coverage] of Unit and Functional Tests. +- [x] Good Performance in Busy Conditions. +- [x] Support for `UDP`, `HTTP`, and `TLS` Sockets. +- [x] Native `IPv4` and `IPv6` support. +- [x] Private & Whitelisted mode. +- [x] Tracker Management API. +- [x] Support [newTrackon][newtrackon] checks. +- [x] Persistent `SQLite3` or `MySQL` Databases. -## Implemented BEPs +## Implemented BitTorrent Enhancement Proposals (BEPs) +> _[Learn more about BitTorrent Enhancement Proposals][BEP 00]_ -* [BEP 03]: The BitTorrent Protocol. -* [BEP 07]: IPv6 Support. -* [BEP 15]: UDP Tracker Protocol for BitTorrent. -* [BEP 23]: Tracker Returns Compact Peer Lists. -* [BEP 27]: Private Torrents. -* [BEP 48]: Tracker Protocol Extension: Scrape. +- [BEP 03] : The BitTorrent Protocol. +- [BEP 07] : IPv6 Support. +- [BEP 15] : UDP Tracker Protocol for BitTorrent. +- [BEP 23] : Tracker Returns Compact Peer Lists. +- [BEP 27] : Private Torrents. +- [BEP 48] : Tracker Protocol Extension: Scrape. ## Getting Started -Requirements: +### Container Version -* Rust Stable `1.68` -* You might have problems compiling with a machine or docker container with low resources. It has been tested with docker containers with 6 CPUs, 7.5 GM of memory and 2GB of swap. +The Torrust Tracker is [deployed to DockerHub][dockerhub_torrust_tracker], you can run a demo immediately with the following commands: -You can follow the [documentation] to install and use Torrust Tracker in different ways, but if you want to give it a quick try, you can use the following commands: +#### Docker: -```s -git clone https://github.com/torrust/torrust-tracker.git \ - && cd torrust-tracker \ - && cargo build --release \ - && mkdir -p ./storage/tracker/lib/database \ - && mkdir -p ./storage/tracker/lib/tls +```sh +docker run -it torrust/tracker:develop ``` +> Please read our [container guide][containers.md] for more information. -### Configuration +#### Podman: -The [default configuration folder: `/share/default/config`][share.default.config]: +```sh +podman run -it torrust/tracker:develop +``` +> Please read our [container guide][containers.md] for more information. + +### Development Version + +- Please assure you have the ___[latest stable (or nightly) version of rust][rust]___. +- Please assure that you computer has enough ram. ___Recommended 16GB.___ + +#### Checkout, Test and Run: + +```sh +# Checkout repository into a new folder: +git clone https://github.com/torrust/torrust-tracker.git + +# Change into directory and create a empty database file: +cd torrust-tracker +mkdir -p ./storage/tracker/lib/database/ +touch ./storage/tracker/lib/database/sqlite3.db + +# Check all tests in application: +cargo test --tests --benches --examples --workspace --all-targets --all-features + +# Run the tracker: +cargo run +``` +#### Customization: -- Contains the [development default][src.bootstrap.config.default] i.e: [`tracker.development.sqlite3.toml`][tracker.development.sqlite3.toml]. +```sh +# Copy the default configuration into the standard location: +mkdir -p ./storage/tracker/etc/ +cp ./share/default/config/tracker.development.sqlite3.toml ./storage/tracker/etc/tracker.toml -- Also contains the container defaults: [`sqlite3`][tracker.container.sqlite3.toml] and [`mysql`][tracker.container.mysql.toml]. +# Customize the tracker configuration (for example): +vim ./storage/tracker/etc/tracker.toml -To override the default configuration there is two options: +# Run the tracker with the updated configuration: +TORRUST_TRACKER_PATH_CONFIG="./storage/tracker/etc/tracker.toml" cargo run +``` -- Configure a different configuration path by setting the [`TORRUST_TRACKER_PATH_CONFIG`][src.bootstrap.config.path.config] environmental variable. +_Optionally, you may choose to supply the entire configuration as an environmental variable:_ -- Supply the entire configuration via the [`TORRUST_TRACKER_CONFIG`][src.bootstrap.config.config] environmental variable. +```sh +# Use a configuration supplied on an environmental variable: +TORRUST_TRACKER_CONFIG=$(cat "./storage/tracker/etc/tracker.toml") cargo run +``` +_For deployment you __should__ override the `api_admin_token` by using an environmental variable:_ -> NOTE: It is recommended for production you override the `api admin token` by placing your secret in the [`ENV_VAR_API_ADMIN_TOKEN`][src.bootstrap.config.admin.token] environmental variable. +```sh +# Generate a Secret Token: +gpg --armor --gen-random 1 10 | tee ./storage/tracker/lib/tracker_api_admin_token.secret +chmod go-rwx ./storage/tracker/lib/tracker_api_admin_token.secret +# Override secret in configuration using an environmental variable: +TORRUST_TRACKER_CONFIG=$(cat "./storage/tracker/etc/tracker.toml") \ + TORRUST_TRACKER_API_ADMIN_TOKEN=$(cat "./storage/tracker/lib/tracker_api_admin_token.secret") \ + cargo run +``` + +> Please view our [crate documentation][documentation] for more detailed instructions. ### Services -After running the tracker these services will be available (as defined in the default configuration): +The following services are provided by the default configuration: + +- UDP _(tracker)_ + - `udp://127.0.0.1:6969/announce`. +- HTTP _(tracker)_ + - `http://127.0.0.1:6969/announce`. +- API _(management)_ + - `http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken`. -* UDP tracker: `udp://127.0.0.1:6969/announce`. -* HTTP tracker: `http://127.0.0.1:6969/announce`. -* API: `http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken`. ## Documentation -* [Crate documentation] -* [API `v1`] -* [HTTP Tracker] -* [UDP Tracker] +- [Management API (Version 1)][api] +- [Tracker (HTTP/TLS)][http] +- [Tracker (UDP)][udp] ## Contributing +This is an open-source community supported project.
We welcome contributions from the community! -How can you contribute? +__How can you contribute?__ -* Bug reports and feature requests. -* Code contributions. You can start by looking at the issues labeled "[good first issues]". -* Documentation improvements. Check the [documentation] and [API documentation] for typos, errors, or missing information. -* Participation in the community. You can help by answering questions in the [discussions]. +- Bug reports and feature requests. +- Code contributions. You can start by looking at the issues labeled "[good first issues]". +- Documentation improvements. Check the [documentation] and [API documentation] for typos, errors, or missing information. +- Participation in the community. You can help by answering questions in the [discussions]. ## License @@ -101,41 +149,39 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D -[container_wf]: https://github.com/torrust/torrust-tracker/actions/workflows/container.yaml -[container_wf_b]: https://github.com/torrust/torrust-tracker/actions/workflows/container.yaml/badge.svg -[coverage_wf]: https://github.com/torrust/torrust-tracker/actions/workflows/coverage.yaml -[coverage_wf_b]: https://github.com/torrust/torrust-tracker/actions/workflows/coverage.yaml/badge.svg -[testing_wf]: https://github.com/torrust/torrust-tracker/actions/workflows/testing.yaml -[testing_wf_b]: https://github.com/torrust/torrust-tracker/actions/workflows/testing.yaml/badge.svg +[container_wf]: ../../actions/workflows/container.yaml +[container_wf_b]: ../../actions/workflows/container.yaml/badge.svg +[coverage_wf]: ../../actions/workflows/coverage.yaml +[coverage_wf_b]: ../../actions/workflows/coverage.yaml/badge.svg +[deployment_wf]: ../../actions/workflows/deployment.yaml +[deployment_wf_b]: ../../actions/workflows/deployment.yaml/badge.svg +[testing_wf]: ../../actions/workflows/testing.yaml +[testing_wf_b]: ../../actions/workflows/testing.yaml/badge.svg +[bittorrent]: http://bittorrent.org/ [rust]: https://www.rust-lang.org/ +[axum]: https://github.com/tokio-rs/axum [newtrackon]: https://newtrackon.com/ [coverage]: https://app.codecov.io/gh/torrust/torrust-tracker +[torrust]: https://torrust.com/ +[dockerhub_torrust_tracker]: https://hub.docker.com/r/torrust/tracker/tags + +[torrent_source_felid]: https://github.com/qbittorrent/qBittorrent/discussions/19406 + +[BEP 00]: https://www.bittorrent.org/beps/bep_0000.html [BEP 03]: https://www.bittorrent.org/beps/bep_0003.html [BEP 07]: https://www.bittorrent.org/beps/bep_0007.html -[BEP 15]: http://www.bittorrent.org/beps/bep_0015.html -[BEP 23]: http://bittorrent.org/beps/bep_0023.html -[BEP 27]: http://bittorrent.org/beps/bep_0027.html -[BEP 48]: http://bittorrent.org/beps/bep_0048.html +[BEP 15]: https://www.bittorrent.org/beps/bep_0015.html +[BEP 23]: https://www.bittorrent.org/beps/bep_0023.html +[BEP 27]: https://www.bittorrent.org/beps/bep_0027.html +[BEP 48]: https://www.bittorrent.org/beps/bep_0048.html [containers.md]: ./docs/containers.md -[share.default.config]: ./share/default/config/ -[tracker.development.sqlite3.toml]: ./share/default/config/tracker.development.sqlite3.toml -[src.bootstrap.config.default]: ./src/bootstrap/config.rs#L18 -[tracker.container.sqlite3.toml]: ./share/default/config/tracker.container.sqlite3.toml -[tracker.container.mysql.toml]: ./share/default/config/tracker.container.mysql.toml -[share.container.entry_script_sh.default]: ./share/container/entry_script_sh#L10 - -[src.bootstrap.config.path.config]: ./src/bootstrap/config.rs#L15 -[src.bootstrap.config.config]: ./src/bootstrap/config.rs#L11 -[src.bootstrap.config.admin.token]: ./src/bootstrap/config.rs#L12 - -[Crate documentation]: https://docs.rs/torrust-tracker/ -[API `v1`]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/apis/v1 -[HTTP Tracker]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/http -[UDP Tracker]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ @@ -144,7 +190,7 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [COPYRIGHT]: ./COPYRIGHT -[nautilus]: https://nautilus-cyberneering.de/ +[nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ [Dutch Bits]: https://dutchbits.nl [Naim A.]: https://github.com/naim94a/udpt [greatest-ape]: https://github.com/greatest-ape/aquatic diff --git a/cSpell.json b/cSpell.json index e4ac1d90b..a02a9b8e8 100644 --- a/cSpell.json +++ b/cSpell.json @@ -63,6 +63,7 @@ "libz", "LOGNAME", "Lphant", + "matchmakes", "metainfo", "middlewares", "mockall", From 2b5b16875c7041ccd1662aa6156a119bf95a9434 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 7 Sep 2023 09:52:04 +0200 Subject: [PATCH 0569/1003] fix: deployment workflow bugfix --- .github/workflows/deployment.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 87bc61484..73c024143 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -8,9 +8,7 @@ on: jobs: secrets: name: Secrets - needs: context environment: dockerhub-torrust - if: needs.context.outputs.continue == 'true' runs-on: ubuntu-latest outputs: From 18ca78fad6a89eab90d248b038fcf7100c5a41db Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 7 Sep 2023 14:49:01 +0200 Subject: [PATCH 0570/1003] dev: vairous fixes --- .github/workflows/container.yaml | 16 ++++++++++------ docs/containers.md | 6 ++++-- share/container/message | 2 ++ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index da38db71f..86ee8692d 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -65,11 +65,15 @@ jobs: - id: check name: Check Context run: | - if [[ "${{ github.event_name }}" == "push" && ( "${{ github.ref }}" == "refs/heads/main" || "${{ github.ref }}" == "refs/heads/develop" || "${{ github.ref }}" == "refs/heads/docker" ) ]] || - [[ "${{ github.event_name }}" == "push" && "${{ github.ref }}" =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - if [[ "${{ github.repository }}" == "torrust/torrust-tracker" ]]; then - echo "Context is torrust/torrust-tracker, and push is: main, develop, docker, or a tag of v*.*.*" - echo "continue=true" >> $GITHUB_OUTPUT + if [[ "${{ github.repository }}" == "torrust/torrust-tracker" ]]; then + if [[ "${{ github.event_name }}" == "push" ]]; then + if [[ "${{ github.ref }}" == "refs/heads/main" || + "${{ github.ref }}" == "refs/heads/develop" || + "${{ github.ref }}" =~ ^v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ ]]; then + + echo "Context is torrust/torrust-tracker, and push is: main, develop, docker, or a tag with a semantic version" + echo "continue=true" >> $GITHUB_OUTPUT + fi fi fi @@ -126,7 +130,7 @@ jobs: uses: docker/build-push-action@v4 with: file: ./Containerfile - push: ${{ github.event_name != 'pull_request' }} + push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha diff --git a/docs/containers.md b/docs/containers.md index b9aa05a7a..dcf281e6e 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -196,7 +196,7 @@ mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ ## Run Torrust Tracker Container Image docker run -it \ --env TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ - --env USER_ID"$(id -u)" \ + --env USER_ID="$(id -u)" \ --publish 0.0.0.0:7070:7070/tcp \ --publish 0.0.0.0:6969:6969/udp \ --publish 0.0.0.0:1212:1212/tcp \ @@ -218,7 +218,7 @@ mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ ## Run Torrust Tracker Container Image podman run -it \ --env TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ - --env USER_ID"$(id -u)" \ + --env USER_ID="$(id -u)" \ --publish 0.0.0.0:7070:7070/tcp \ --publish 0.0.0.0:6969:6969/udp \ --publish 0.0.0.0:1212:1212/tcp \ @@ -367,6 +367,7 @@ And finally, you can run the container: ```s docker run \ + --env USER_ID="$(id -u)" \ --publish 6969:6969/udp \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \ @@ -381,6 +382,7 @@ Detach from container logs when the container starts. By default, the command li ```s docker run \ --detach + --env USER_ID="$(id -u)" \ --publish 6969:6969/udp \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \latest diff --git a/share/container/message b/share/container/message index cd88b44ae..6bfd6bfb8 100644 --- a/share/container/message +++ b/share/container/message @@ -1,2 +1,4 @@ +Lovely welcome to our Torrust Tracker Container! + run 'torrust-tracker' to start tracker From a53ee283d0f350826bdba4c2ef9ecc490d36ae57 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 10 Sep 2023 18:12:42 +0200 Subject: [PATCH 0571/1003] release: bump alpha version --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ab5ffc45..64aed490d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3157,7 +3157,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.4" +version = "3.0.0-alpha.5" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3203,7 +3203,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.4" +version = "3.0.0-alpha.5" dependencies = [ "config", "log", @@ -3218,7 +3218,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.4" +version = "3.0.0-alpha.5" dependencies = [ "criterion", "error-chain", @@ -3226,7 +3226,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.4" +version = "3.0.0-alpha.5" dependencies = [ "log", "thiserror", @@ -3234,7 +3234,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.4" +version = "3.0.0-alpha.5" dependencies = [ "derive_more", "serde", @@ -3242,7 +3242,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.4" +version = "3.0.0-alpha.5" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 30b44bf85..76ec97919 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.4" +version = "3.0.0-alpha.5" [dependencies] @@ -66,10 +66,10 @@ axum = "0.6.20" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4.1" tower-http = { version = "0.4.3", features = ["compression-full"] } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.4", path = "contrib/bencode"} -torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "packages/primitives" } -torrust-tracker-configuration = { version = "3.0.0-alpha.4", path = "packages/configuration" } -torrust-tracker-located-error = { version = "3.0.0-alpha.4", path = "packages/located-error" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.5", path = "contrib/bencode"} +torrust-tracker-primitives = { version = "3.0.0-alpha.5", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.5", path = "packages/configuration" } +torrust-tracker-located-error = { version = "3.0.0-alpha.5", path = "packages/located-error" } multimap = "0.9" hyper = "0.14" @@ -80,7 +80,7 @@ serde_urlencoded = "0.7" serde_repr = "0.1" serde_bytes = "0.11" local-ip-address = "0.5" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.4", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.5", path = "packages/test-helpers" } [workspace] members = [ diff --git a/README.md b/README.md index 2f35e9017..7e3d246e5 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.5/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.5/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.5/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.4/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.5/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 93b45c0a7..ba2f8a466 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -22,8 +22,8 @@ config = "0.13" toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" -torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "../primitives" } -torrust-tracker-located-error = { version = "3.0.0-alpha.4", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.5", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.5", path = "../located-error" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 91831399c..be3cc352d 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.4", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.4", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.5", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.5", path = "../primitives" } From 6665a059bb1668d64513b6bc61ae7c5184b0bde7 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 10 Sep 2023 17:55:19 +0200 Subject: [PATCH 0572/1003] dev: use releases branch prefix --- .github/workflows/container.yaml | 49 +++++++++++++++++++++++-------- .github/workflows/deployment.yaml | 10 +++---- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 86ee8692d..93ce87506 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -5,8 +5,7 @@ on: branches: - "develop" - "main" - tags: - - "v*" + - "releases/**/*" pull_request: branches: - "develop" @@ -60,6 +59,8 @@ jobs: outputs: continue: ${{ steps.check.outputs.continue }} + type: ${{ steps.check.outputs.type }} + version: ${{ steps.check.outputs.version }} steps: - id: check @@ -67,12 +68,25 @@ jobs: run: | if [[ "${{ github.repository }}" == "torrust/torrust-tracker" ]]; then if [[ "${{ github.event_name }}" == "push" ]]; then - if [[ "${{ github.ref }}" == "refs/heads/main" || - "${{ github.ref }}" == "refs/heads/develop" || - "${{ github.ref }}" =~ ^v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ ]]; then + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + + echo "type=development" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT + + fi + if [[ "${{ github.ref }}" == "refs/heads/develop" ]]; then + + echo "type=development" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT + + fi + if [[ "${{ github.ref }}" =~ ^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ ]]; then + + version=$(echo "${{ github.ref }}" | sed -n -E 's/^(refs\/heads\/releases\/)//p') + echo "version=$version" >> $GITHUB_OUTPUT + echo "type=release" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT - echo "Context is torrust/torrust-tracker, and push is: main, develop, docker, or a tag with a semantic version" - echo "continue=true" >> $GITHUB_OUTPUT fi fi fi @@ -103,17 +117,28 @@ jobs: runs-on: ubuntu-latest steps: - - id: meta - name: Docker meta + - id: meta_development + if: needs.secrets.check.type == 'development' + name: Docker Meta (development) uses: docker/metadata-action@v4 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" tags: | type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} + + - id: meta_release + if: needs.secrets.check.type == 'release' + name: Docker Meta (release) + uses: docker/metadata-action@v4 + with: + images: | + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=semver,value=${{ needs.secrets.check.version }},pattern={{raw}} + type=semver,value=${{ needs.secrets.check.version }},pattern={{version}} + type=semver,value=${{ needs.secrets.check.version }},pattern=v{{major}} + type=semver,value=${{ needs.secrets.check.version }},pattern={{major}}.{{minor}} - id: login name: Login to Docker Hub diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 73c024143..e3b042fd5 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -2,8 +2,8 @@ name: Deployment on: push: - tags: - - "v*" + branches: + - "releases/**/*" jobs: secrets: @@ -18,8 +18,8 @@ jobs: - id: check name: Check env: - CRATES_TOKEN: "${{ secrets.CRATES_TOKEN }}" - if: "${{ env.CRATES_TOKEN != '' }}" + CARGO_REGISTRY_TOKEN: "${{ secrets.CARGO_REGISTRY_TOKEN }}" + if: "${{ env.CARGO_REGISTRY_TOKEN != '' }}" run: echo "continue=true" >> $GITHUB_OUTPUT test: @@ -66,7 +66,7 @@ jobs: toolchain: stable env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} - id: publish name: Publish Crates From d09fa65b77a330525ee4f037a06c2a1f91cdefbb Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 11 Sep 2023 10:56:44 +0200 Subject: [PATCH 0573/1003] dev: fixup container workfows bug --- .github/workflows/container.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 93ce87506..d541cc3aa 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -118,7 +118,7 @@ jobs: steps: - id: meta_development - if: needs.secrets.check.type == 'development' + if: needs.context.check.type == 'development' name: Docker Meta (development) uses: docker/metadata-action@v4 with: @@ -128,17 +128,17 @@ jobs: type=ref,event=branch - id: meta_release - if: needs.secrets.check.type == 'release' + if: needs.context.check.type == 'release' name: Docker Meta (release) uses: docker/metadata-action@v4 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" tags: | - type=semver,value=${{ needs.secrets.check.version }},pattern={{raw}} - type=semver,value=${{ needs.secrets.check.version }},pattern={{version}} - type=semver,value=${{ needs.secrets.check.version }},pattern=v{{major}} - type=semver,value=${{ needs.secrets.check.version }},pattern={{major}}.{{minor}} + type=semver,value=${{ needs.context.check.version }},pattern={{raw}} + type=semver,value=${{ needs.context.check.version }},pattern={{version}} + type=semver,value=${{ needs.context.check.version }},pattern=v{{major}} + type=semver,value=${{ needs.context.check.version }},pattern={{major}}.{{minor}} - id: login name: Login to Docker Hub From d9a506a54687ac6c7fda07abc4ae8bcc4ec3e5fd Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 11 Sep 2023 10:57:46 +0200 Subject: [PATCH 0574/1003] docs: draft release process document --- docs/release_process.md | 61 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 docs/release_process.md diff --git a/docs/release_process.md b/docs/release_process.md new file mode 100644 index 000000000..cc401c11a --- /dev/null +++ b/docs/release_process.md @@ -0,0 +1,61 @@ +# Torrust Tracker Release Process (draft) + +The purpose of this document is to describe the release process. + +## Overview + +Torrust Tracker is published in this order: + +1. `develop` branch is ready for publishing. +2. create `release: version (semantic version)` commit. +3. push release commit to `main` branch. +4. check all status checks succeed for `main` branch. +5. push `main` branch to `releases\v(semantic version)` branch. +6. check all status checks success for `releases\v(semantic version)` branch. +7. create signed `v(semantic version)` tag. +8. create github release from `v(semantic version)` tag. +9. merge `main` branch into `develop` branch. + +- At step `1.`, `develop` is automatically published to `dockerhub`. +- At step `3.`, `main` is automatically published to `dockerhub`. +- At step `5.`, `releases\v(semantic version)` is automatically published to `dockerhub` and `crate.io`. + +## Development Branch + +The `develop` branch, the default branch for the repository is automatically published to dockerhub with the `develop` label. This process happens automatically when a pull request is merged in, and the `container.yaml` workflow is triggered. + +## Main Branch + +The `main` branch is the staging branch for releases. + +A release commit needs to be made that prepares the repository for the release, this commit should include: + +- Changing the semantic version. +- Finalizing the release notes and changelog. + +The title of the commit should be: `release: version (semantic version)`. + +This commit should be committed upon the head of the development branch, and pushed to the `main` branch. + +Once the release has succeeded, the `main` branch should be merged back into the `develop` branch. + +## Releases Branch + +According to the patten `releases/v(semantic version)`, the `main` branch head is published to here to trigger the deployment workflows. + +The repository deployment environment for crates.io is only available for the `releases/**/*` patten of branches. + +Once the publishing workflows have succeeded; we can make the git-tag. + +## Release Tag + +Create a Signed Tag with a short message in the form `v(semantic version)` and push it to the repository. + +## Github Release + +From the newly published tag, create a Github Release using the web-interface. + + +## Merge back into development branch + +After this is all successful, the `main` branch should be merged into the `develop` branch. From 7365423eb8748a60151615b5be1a201402f1d9a7 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 11 Sep 2023 11:17:25 +0200 Subject: [PATCH 0575/1003] ci: use coverage environment --- .github/workflows/coverage.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 6a99fb11a..df308e329 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -8,8 +8,27 @@ env: CARGO_TERM_COLOR: always jobs: + secrets: + name: Secrets + environment: coverage + runs-on: ubuntu-latest + + outputs: + continue: ${{ steps.check.outputs.continue }} + + steps: + - id: check + name: Check + env: + CODECOV_TOKEN: "${{ secrets.CODECOV_TOKEN }}" + if: "${{ env.CODECOV_TOKEN != '' }}" + run: echo "continue=true" >> $GITHUB_OUTPUT + report: name: Report + environment: coverage + needs: secrets + if: needs.secrets.outputs.continue == 'true' runs-on: ubuntu-latest env: CARGO_INCREMENTAL: "0" From da476a76f78a15c9e7b545ee8658705645ab8e37 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 11 Sep 2023 12:27:52 +0200 Subject: [PATCH 0576/1003] dev: debug container workflow --- .github/workflows/container.yaml | 20 +++++++++++++------- cSpell.json | 1 + 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index d541cc3aa..29e03a7fa 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -54,7 +54,6 @@ jobs: context: name: Context - needs: test runs-on: ubuntu-latest outputs: @@ -72,28 +71,35 @@ jobs: echo "type=development" >> $GITHUB_OUTPUT echo "continue=true" >> $GITHUB_OUTPUT + echo "On \`main\` Branch, Type: \`development\`" - fi - if [[ "${{ github.ref }}" == "refs/heads/develop" ]]; then + elif [[ "${{ github.ref }}" == "refs/heads/develop" ]]; then echo "type=development" >> $GITHUB_OUTPUT echo "continue=true" >> $GITHUB_OUTPUT + echo "On \`develop\` Branch, Type: \`development\`" - fi - if [[ "${{ github.ref }}" =~ ^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ ]]; then + elif [[ "${{ github.ref }}" =~ ^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ ]]; then version=$(echo "${{ github.ref }}" | sed -n -E 's/^(refs\/heads\/releases\/)//p') echo "version=$version" >> $GITHUB_OUTPUT echo "type=release" >> $GITHUB_OUTPUT echo "continue=true" >> $GITHUB_OUTPUT + echo "In \`releases/$version\` Branch, Type: \`release\`" + else + echo "Not Correct Branch. Will Not Continue" fi + else + echo "Not a Push Event. Will Not Continue" fi + else + echo "On a Forked Repository. Will Not Continue" fi secrets: name: Secrets - needs: context + needs: [test, context] environment: dockerhub-torrust if: needs.context.outputs.continue == 'true' runs-on: ubuntu-latest @@ -112,7 +118,7 @@ jobs: publish: name: Publish environment: dockerhub-torrust - needs: secrets + needs: [secrets, context] if: needs.secrets.outputs.continue == 'true' runs-on: ubuntu-latest diff --git a/cSpell.json b/cSpell.json index a02a9b8e8..c9b547c90 100644 --- a/cSpell.json +++ b/cSpell.json @@ -38,6 +38,7 @@ "dockerhub", "downloadedi", "dtolnay", + "elif", "filesd", "Freebox", "gecos", From e6ddda10668c49dad4b61519986b750a2654ea07 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 11 Sep 2023 15:47:52 +0200 Subject: [PATCH 0577/1003] fixup: context output --- .github/workflows/container.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 29e03a7fa..2a983f988 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -124,7 +124,7 @@ jobs: steps: - id: meta_development - if: needs.context.check.type == 'development' + if: needs.context.outputs.type == 'development' name: Docker Meta (development) uses: docker/metadata-action@v4 with: @@ -134,17 +134,17 @@ jobs: type=ref,event=branch - id: meta_release - if: needs.context.check.type == 'release' + if: needs.context.outputs.type == 'release' name: Docker Meta (release) uses: docker/metadata-action@v4 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" tags: | - type=semver,value=${{ needs.context.check.version }},pattern={{raw}} - type=semver,value=${{ needs.context.check.version }},pattern={{version}} - type=semver,value=${{ needs.context.check.version }},pattern=v{{major}} - type=semver,value=${{ needs.context.check.version }},pattern={{major}}.{{minor}} + type=semver,value=${{ needs.context.outputs.version }},pattern={{raw}} + type=semver,value=${{ needs.context.outputs.version }},pattern={{version}} + type=semver,value=${{ needs.context.outputs.version }},pattern=v{{major}} + type=semver,value=${{ needs.context.outputs.version }},pattern={{major}}.{{minor}} - id: login name: Login to Docker Hub From 835b499a596e109621d3636c6347347028089b56 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 12 Sep 2023 10:06:51 +0200 Subject: [PATCH 0578/1003] dev: more container workflow fixups --- .github/workflows/container.yaml | 45 +++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 2a983f988..8fd4ca201 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -115,17 +115,16 @@ jobs: if: "${{ env.DOCKER_HUB_ACCESS_TOKEN != '' }}" run: echo "continue=true" >> $GITHUB_OUTPUT - publish: - name: Publish + publish_development: + name: Publish (Development) environment: dockerhub-torrust needs: [secrets, context] - if: needs.secrets.outputs.continue == 'true' + if: needs.secrets.outputs.continue == 'true' && needs.context.outputs.type == 'development' runs-on: ubuntu-latest steps: - - id: meta_development - if: needs.context.outputs.type == 'development' - name: Docker Meta (development) + - id: meta + name: Docker Meta uses: docker/metadata-action@v4 with: images: | @@ -133,9 +132,37 @@ jobs: tags: | type=ref,event=branch - - id: meta_release - if: needs.context.outputs.type == 'release' - name: Docker Meta (release) + - id: login + name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v4 + with: + file: ./Containerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha + + publish_release: + name: Publish (Release) + environment: dockerhub-torrust + needs: [secrets, context] + if: needs.secrets.outputs.continue == 'true' && needs.context.outputs.type == 'release' + runs-on: ubuntu-latest + + steps: + - id: meta + name: Docker Meta uses: docker/metadata-action@v4 with: images: | From d5d797937402bc697c34fbddc441ab08ce05060a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 12 Sep 2023 10:32:04 +0200 Subject: [PATCH 0579/1003] dev: coverage: change to pull request target --- .github/workflows/coverage.yaml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index df308e329..7cfd92025 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -2,7 +2,11 @@ name: Coverage on: push: - pull_request: + branches: + - develop + pull_request_target: + branches: + - develop env: CARGO_TERM_COLOR: always @@ -36,10 +40,18 @@ jobs: RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" steps: - - id: checkout - name: Checkout Repository + - id: checkout_push + if: github.event_name == 'push' + name: Checkout Repository (Push) uses: actions/checkout@v4 + - id: checkout_pull_request + if: github.event_name == 'pull_request' + name: Checkout Repository (Pull Request) + uses: actions/checkout@v4 + with: + ref: "refs/pull/${{ github.event.pull_request.number }}/head" + - id: setup name: Setup Toolchain uses: dtolnay/rust-toolchain@stable From 8eff67f11c2d2356700c2a9bf38c22b79c1cb488 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 12 Sep 2023 11:30:37 +0200 Subject: [PATCH 0580/1003] dev: fixup, fix event_name test --- .github/workflows/coverage.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 7cfd92025..cf7eb6f6d 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -45,9 +45,9 @@ jobs: name: Checkout Repository (Push) uses: actions/checkout@v4 - - id: checkout_pull_request - if: github.event_name == 'pull_request' - name: Checkout Repository (Pull Request) + - id: checkout_pull_request_target + if: github.event_name == 'pull_request_target' + name: Checkout Repository (Pull Request Target) uses: actions/checkout@v4 with: ref: "refs/pull/${{ github.event.pull_request.number }}/head" From d2e3757808863067787756043fdd54803afb2d05 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 12 Sep 2023 08:34:28 +0200 Subject: [PATCH 0581/1003] dev: rename database driver environmental variable from: `TORRUST_TRACKER_DATABASE` to: `TORRUST_TRACKER_DATABASE_DRIVER` --- Containerfile | 4 ++-- compose.yaml | 2 +- docs/containers.md | 2 +- share/container/entry_script_sh | 10 +++++----- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Containerfile b/Containerfile index 229500cd8..be71017db 100644 --- a/Containerfile +++ b/Containerfile @@ -96,14 +96,14 @@ RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/ COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec ARG TORRUST_TRACKER_PATH_CONFIG="/etc/torrust/tracker/tracker.toml" -ARG TORRUST_TRACKER_DATABASE="sqlite3" +ARG TORRUST_TRACKER_DATABASE_DRIVER="sqlite3" ARG USER_ID=1000 ARG UDP_PORT=6969 ARG HTTP_PORT=7070 ARG API_PORT=1212 ENV TORRUST_TRACKER_PATH_CONFIG=${TORRUST_TRACKER_PATH_CONFIG} -ENV TORRUST_TRACKER_DATABASE=${TORRUST_TRACKER_DATABASE} +ENV TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER} ENV USER_ID=${USER_ID} ENV UDP_PORT=${UDP_PORT} ENV HTTP_PORT=${HTTP_PORT} diff --git a/compose.yaml b/compose.yaml index 02f95bccc..672ca6d0f 100644 --- a/compose.yaml +++ b/compose.yaml @@ -4,7 +4,7 @@ services: image: torrust-tracker:release tty: true environment: - - TORRUST_TRACKER_DATABASE=${TORRUST_TRACKER_DATABASE:-mysql} + - TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER:-mysql} - TORRUST_TRACKER_API_ADMIN_TOKEN=${TORRUST_TRACKER_API_ADMIN_TOKEN:-MyAccessToken} networks: - server_side diff --git a/docs/containers.md b/docs/containers.md index dcf281e6e..737ce40a0 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -140,7 +140,7 @@ The following environmental variables can be set: - `TORRUST_TRACKER_PATH_CONFIG` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). - `TORRUST_TRACKER_API_ADMIN_TOKEN` - Override of the admin token. If set, this value overrides any value set in the config. -- `TORRUST_TRACKER_DATABASE` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_TRACKER_DATABASE_DRIVER` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. - `TORRUST_TRACKER_CONFIG` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG=$(cat tracker-tracker.toml)`). - `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). - `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh index 88a832b40..94dfa6b81 100644 --- a/share/container/entry_script_sh +++ b/share/container/entry_script_sh @@ -26,8 +26,8 @@ chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust # Install the database and config: -if [ -n "$TORRUST_TRACKER_DATABASE" ]; then - if cmp_lc "$TORRUST_TRACKER_DATABASE" "sqlite3"; then +if [ -n "$TORRUST_TRACKER_DATABASE_DRIVER" ]; then + if cmp_lc "$TORRUST_TRACKER_DATABASE_DRIVER" "sqlite3"; then # Select sqlite3 empty database default_database="/usr/share/torrust/default/database/tracker.sqlite3.db" @@ -35,7 +35,7 @@ if [ -n "$TORRUST_TRACKER_DATABASE" ]; then # Select sqlite3 default configuration default_config="/usr/share/torrust/default/config/tracker.container.sqlite3.toml" - elif cmp_lc "$TORRUST_TRACKER_DATABASE" "mysql"; then + elif cmp_lc "$TORRUST_TRACKER_DATABASE_DRIVER" "mysql"; then # (no database file needed for mysql) @@ -43,12 +43,12 @@ if [ -n "$TORRUST_TRACKER_DATABASE" ]; then default_config="/usr/share/torrust/default/config/tracker.container.mysql.toml" else - echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_DATABASE\"." + echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_DATABASE_DRIVER\"." echo "Please Note: Supported Database Types: \"sqlite3\", \"mysql\"." exit 1 fi else - echo "Error: \"\$TORRUST_TRACKER_DATABASE\" was not set!"; exit 1; + echo "Error: \"\$TORRUST_TRACKER_DATABASE_DRIVER\" was not set!"; exit 1; fi install_config="/etc/torrust/tracker/tracker.toml" From 68b199db6eacaea34c63f66cf7aacbf6d2bdbfa1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 12 Sep 2023 12:56:03 +0200 Subject: [PATCH 0582/1003] chore: update deps --- Cargo.lock | 119 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 8 ++-- 2 files changed, 64 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 64aed490d..c6ebec05a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -92,9 +92,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstyle" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" +checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" [[package]] name = "aquatic_udp_protocol" @@ -120,9 +120,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d495b6dc0184693324491a5ac05f559acc97bf937ab31d7a1c33dd0016be6d2b" +checksum = "bb42b2197bf15ccb092b62c74515dbd8b86d0effd934795f6687c93b6e679a2c" dependencies = [ "brotli", "flate2", @@ -142,7 +142,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -185,9 +185,9 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8e81eacc93f36480825da5f46a33b5fb2246ed024eacc9e8933425b80c5807" +checksum = "1ef117890a418b7832678d9ea1e1c08456dd7b2fd1dadb9676cd6f0fe7eb4b21" dependencies = [ "axum", "forwarded-header-value", @@ -254,9 +254,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "bigdecimal" @@ -277,11 +277,11 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.59.2" +version = "0.68.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "726e4313eb6ec35d2730258ad4e15b547ee75d6afaa1361a922e78e59b7d8078" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.0", "cexpr", "clang-sys", "lazy_static", @@ -292,6 +292,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", + "syn 2.0.32", ] [[package]] @@ -435,9 +436,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "cast" @@ -472,9 +473,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87d9d13be47a5b7c3907137f1290b0459a7f80efb26be8c52afb11963bccb02" +checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" dependencies = [ "android-tzdata", "iana-time-zone", @@ -748,7 +749,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -759,7 +760,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -792,7 +793,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -993,7 +994,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1005,7 +1006,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1017,7 +1018,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1082,7 +1083,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1373,7 +1374,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -1568,9 +1569,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" [[package]] name = "local-ip-address" @@ -1719,7 +1720,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2 0.5.3", + "socket2 0.5.4", "twox-hash", "url", ] @@ -1737,7 +1738,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", "termcolor", "thiserror", ] @@ -1748,7 +1749,7 @@ version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bigdecimal", "bindgen", "bitflags 2.4.0", @@ -1938,7 +1939,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2020,7 +2021,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "serde", ] @@ -2061,7 +2062,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2092,7 +2093,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2401,7 +2402,7 @@ version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", @@ -2549,9 +2550,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.11" +version = "0.38.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0c3dde1fc030af041adc40e79c0e7fbcf431dd24870053d187d7c66e4b87453" +checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" dependencies = [ "bitflags 2.4.0", "errno", @@ -2578,14 +2579,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", ] [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" dependencies = [ "ring", "untrusted", @@ -2723,14 +2724,14 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" dependencies = [ "itoa", "ryu", @@ -2755,7 +2756,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2785,7 +2786,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" dependencies = [ - "base64 0.21.3", + "base64 0.21.4", "chrono", "hex", "indexmap 1.9.3", @@ -2805,7 +2806,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -2878,9 +2879,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys", @@ -2927,9 +2928,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.31" +version = "2.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" +checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" dependencies = [ "proc-macro2", "quote", @@ -2993,7 +2994,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -3062,7 +3063,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.3", + "socket2 0.5.4", "tokio-macros", "windows-sys", ] @@ -3075,7 +3076,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", ] [[package]] @@ -3123,9 +3124,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.6" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" dependencies = [ "serde", "serde_spanned", @@ -3144,9 +3145,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ "indexmap 2.0.0", "serde", @@ -3210,7 +3211,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml 0.7.6", + "toml 0.7.8", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -3455,7 +3456,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", "wasm-bindgen-shared", ] @@ -3489,7 +3490,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.31", + "syn 2.0.32", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index 76ec97919..79f523fa3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,7 @@ openssl = { version = "0.10", features = ["vendored"] } config = "0.13" log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" -chrono = { version = "0.4.24", default-features = false, features = ["clock"] } +chrono = { version = "0.4", default-features = false, features = ["clock"] } r2d2 = "0.8" r2d2_mysql = "24.0" r2d2_sqlite = { version = "0.22", features = ["bundled"] } @@ -62,10 +62,10 @@ futures = "0.3" async-trait = "0.1" aquatic_udp_protocol = "0.8" uuid = { version = "1", features = ["v4"] } -axum = "0.6.20" +axum = "0.6" axum-server = { version = "0.5", features = ["tls-rustls"] } -axum-client-ip = "0.4.1" -tower-http = { version = "0.4.3", features = ["compression-full"] } +axum-client-ip = "0.4" +tower-http = { version = "0.4", features = ["compression-full"] } torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.5", path = "contrib/bencode"} torrust-tracker-primitives = { version = "3.0.0-alpha.5", path = "packages/primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.5", path = "packages/configuration" } From ea8c78f1abdcd1bfd6630cb2ff286b7afabc9d73 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 13 Sep 2023 11:05:24 +0200 Subject: [PATCH 0583/1003] release: version 3.0.0-alpha.6 --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c6ebec05a..c9da67f5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3158,7 +3158,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.5" +version = "3.0.0-alpha.6" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3204,7 +3204,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.5" +version = "3.0.0-alpha.6" dependencies = [ "config", "log", @@ -3219,7 +3219,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.5" +version = "3.0.0-alpha.6" dependencies = [ "criterion", "error-chain", @@ -3227,7 +3227,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.5" +version = "3.0.0-alpha.6" dependencies = [ "log", "thiserror", @@ -3235,7 +3235,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.5" +version = "3.0.0-alpha.6" dependencies = [ "derive_more", "serde", @@ -3243,7 +3243,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.5" +version = "3.0.0-alpha.6" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 79f523fa3..a26bed8da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.5" +version = "3.0.0-alpha.6" [dependencies] @@ -66,10 +66,10 @@ axum = "0.6" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4" tower-http = { version = "0.4", features = ["compression-full"] } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.5", path = "contrib/bencode"} -torrust-tracker-primitives = { version = "3.0.0-alpha.5", path = "packages/primitives" } -torrust-tracker-configuration = { version = "3.0.0-alpha.5", path = "packages/configuration" } -torrust-tracker-located-error = { version = "3.0.0-alpha.5", path = "packages/located-error" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.6", path = "contrib/bencode"} +torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.6", path = "packages/configuration" } +torrust-tracker-located-error = { version = "3.0.0-alpha.6", path = "packages/located-error" } multimap = "0.9" hyper = "0.14" @@ -80,7 +80,7 @@ serde_urlencoded = "0.7" serde_repr = "0.1" serde_bytes = "0.11" local-ip-address = "0.5" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.5", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.6", path = "packages/test-helpers" } [workspace] members = [ diff --git a/README.md b/README.md index 7e3d246e5..551471914 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.5/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.5/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.5/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.6/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.6/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.6/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.5/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.6/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index ba2f8a466..4a3028f18 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -22,8 +22,8 @@ config = "0.13" toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" -torrust-tracker-primitives = { version = "3.0.0-alpha.5", path = "../primitives" } -torrust-tracker-located-error = { version = "3.0.0-alpha.5", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.6", path = "../located-error" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index be3cc352d..bec9bb216 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.5", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.5", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.6", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "../primitives" } From c06dc64cd26ad3feb6698c4589fa17c535bb1231 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 13 Sep 2023 12:05:07 +0200 Subject: [PATCH 0584/1003] fixup: wrong env in deployment workflow --- .github/workflows/deployment.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index e3b042fd5..43bfe4784 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -8,7 +8,7 @@ on: jobs: secrets: name: Secrets - environment: dockerhub-torrust + environment: crates-io-torrust runs-on: ubuntu-latest outputs: @@ -51,7 +51,6 @@ jobs: name: Publish environment: crates-io-torrust needs: test - if: needs.secrets.outputs.continue == 'true' runs-on: ubuntu-latest steps: From 27a7f92128dbd08f1962a2300d6b5a9935a25282 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 11:28:28 +0200 Subject: [PATCH 0585/1003] ci: fix for bug in upstream lib testing unit coverage step triggers bug in time crate on nightly: https://github.com/time-rs/time/issues/618 --- .github/workflows/testing.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index f138a95cc..21c47665f 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -104,6 +104,7 @@ jobs: name: Run Unit Tests run: cargo test --tests --benches --examples --workspace --all-targets --all-features - - id: coverage - name: Generate Coverage Report - run: cargo llvm-cov nextest --tests --benches --examples --workspace --all-targets --all-features + # Temporary Disable https://github.com/time-rs/time/issues/618 + # - id: coverage + # name: Generate Coverage Report + # run: cargo llvm-cov nextest --tests --benches --examples --workspace --all-targets --all-features From 4cc4680bf18620f133cf727eb79e36111c469e18 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 08:49:04 +0200 Subject: [PATCH 0586/1003] chore: update docker workflow actions --- .github/workflows/container.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 8fd4ca201..d107a0139 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -26,11 +26,11 @@ jobs: steps: - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - id: build name: Build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: ./Containerfile push: false @@ -125,7 +125,7 @@ jobs: steps: - id: meta name: Docker Meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" @@ -134,17 +134,17 @@ jobs: - id: login name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: ./Containerfile push: true @@ -163,7 +163,7 @@ jobs: steps: - id: meta name: Docker Meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" @@ -175,17 +175,17 @@ jobs: - id: login name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: ./Containerfile push: true From 7d5acd23210c5d2bbba114f4af665d16faca907b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 09:45:22 +0200 Subject: [PATCH 0587/1003] chore: bump codecov/codecov-action from 3 to 4 --- .github/workflows/coverage.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index cf7eb6f6d..4d4054026 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -83,7 +83,7 @@ jobs: - id: upload name: Upload Coverage Report - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} files: ${{ steps.coverage.outputs.report }} From 91ff423c200bcdb59fef270de14f4a0ba1b273a5 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 09:23:11 +0200 Subject: [PATCH 0588/1003] chore: update cargo lockfile --- Cargo.lock | 74 +++++++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9da67f5b..0f04f4bad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,7 +142,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -292,7 +292,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -402,9 +402,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "bytecheck" @@ -524,9 +524,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.2" +version = "4.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6" +checksum = "84ed82781cea27b43c9b106a979fe450a13a31aab0500595fb3fc06616de08e6" dependencies = [ "clap_builder", ] @@ -749,7 +749,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -760,7 +760,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -793,7 +793,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -994,7 +994,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1006,7 +1006,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1018,7 +1018,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1083,7 +1083,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1374,7 +1374,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -1525,9 +1525,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.147" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libloading" @@ -1575,9 +1575,9 @@ checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" [[package]] name = "local-ip-address" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "885efb07efcd6ae1c6af70be7565544121424fa9e5b1c3e4b58bbbf141a58cef" +checksum = "3fefe707432eb6bd4704b3dacfc87aab269d56667ad05dcd6869534e8890e767" dependencies = [ "libc", "neli", @@ -1738,7 +1738,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", "termcolor", "thiserror", ] @@ -1939,7 +1939,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -2062,7 +2062,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -2093,7 +2093,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -2223,9 +2223,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] @@ -2724,14 +2724,14 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] name = "serde_json" -version = "1.0.106" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -2756,7 +2756,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -2806,7 +2806,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -2928,9 +2928,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.32" +version = "2.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" +checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" dependencies = [ "proc-macro2", "quote", @@ -2994,7 +2994,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -3076,7 +3076,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", ] [[package]] @@ -3358,9 +3358,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" @@ -3456,7 +3456,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", "wasm-bindgen-shared", ] @@ -3490,7 +3490,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.33", "wasm-bindgen-backend", "wasm-bindgen-shared", ] From 0ff86dab2d4d47bed25e7a9369aa782f8268a96c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 09:38:54 +0200 Subject: [PATCH 0589/1003] chore: update cargo deps --- Cargo.lock | 21 ++++++++++++++++----- Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0f04f4bad..21b547ca8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2194,7 +2194,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.19.15", ] [[package]] @@ -3124,14 +3124,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.8" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +checksum = "c226a7bba6d859b63c92c4b4fe69c5b6b72d0cb897dbc8e6012298e6154cb56e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.20.0", ] [[package]] @@ -3148,6 +3148,17 @@ name = "toml_edit" version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +dependencies = [ + "indexmap 2.0.0", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ff63e60a958cefbb518ae1fd6566af80d9d4be430a33f3723dfc47d1d411d95" dependencies = [ "indexmap 2.0.0", "serde", @@ -3211,7 +3222,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml 0.7.8", + "toml 0.8.0", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", diff --git a/Cargo.toml b/Cargo.toml index a26bed8da..c22f68802 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,7 +66,7 @@ axum = "0.6" axum-server = { version = "0.5", features = ["tls-rustls"] } axum-client-ip = "0.4" tower-http = { version = "0.4", features = ["compression-full"] } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.6", path = "contrib/bencode"} +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.6", path = "contrib/bencode" } torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "packages/primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.6", path = "packages/configuration" } torrust-tracker-located-error = { version = "3.0.0-alpha.6", path = "packages/located-error" } diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 4a3028f18..2592268e6 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -19,7 +19,7 @@ version.workspace = true serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" config = "0.13" -toml = "0.7" +toml = "0.8" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "../primitives" } From 4bd88e7e06f9ca430f634d9b3da1f8b7237011a2 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 09:44:19 +0200 Subject: [PATCH 0590/1003] style: format .toml files --- .vscode/extensions.json | 3 +- .vscode/settings.json | 5 ++ Cargo.toml | 78 ++++++++----------- contrib/bencode/Cargo.toml | 13 ++-- packages/configuration/Cargo.toml | 13 ++-- packages/located-error/Cargo.toml | 5 +- packages/primitives/Cargo.toml | 7 +- packages/test-helpers/Cargo.toml | 4 +- rustfmt.toml | 4 +- .../config/tracker.container.mysql.toml | 25 +++--- .../config/tracker.container.sqlite3.toml | 25 +++--- .../config/tracker.development.sqlite3.toml | 24 +++--- 12 files changed, 94 insertions(+), 112 deletions(-) diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 11d11a5c5..934a43eb8 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,6 +1,7 @@ { "recommendations": [ "streetsidesoftware.code-spell-checker", - "rust-lang.rust-analyzer" + "rust-lang.rust-analyzer", + "tamasfe.even-better-toml" ] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 3bf0969e9..661243fbe 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -20,4 +20,9 @@ "-W", "clippy::pedantic", ], + "evenBetterToml.formatter.allowedBlankLines": 1, + "evenBetterToml.formatter.columnWidth": 130, + "evenBetterToml.formatter.trailingNewline": true, + "evenBetterToml.formatter.reorderKeys": true, + "evenBetterToml.formatter.reorderArrays": true, } \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index c22f68802..705fd116a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,89 +14,73 @@ repository.workspace = true rust-version.workspace = true version.workspace = true - [workspace.package] -authors = [ - "Nautilus Cyberneering , Mick van Dijke ", -] +authors = ["Nautilus Cyberneering , Mick van Dijke "] categories = ["network-programming", "web-programming"] description = "A feature rich BitTorrent tracker." documentation = "https://docs.rs/crate/torrust-tracker/" edition = "2021" homepage = "https://torrust.com/" -keywords = ["bittorrent", "tracker", "file-sharing", "peer-to-peer", "torrent"] +keywords = ["bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker"] license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" version = "3.0.0-alpha.6" - [dependencies] -tokio = { version = "1.29", features = [ - "rt-multi-thread", - "net", - "sync", - "macros", - "signal", -] } -serde = { version = "1.0", features = ["derive"] } -serde_bencode = "^0.2" -serde_json = "1.0" -serde_with = "3.2" -percent-encoding = "2.2" +aquatic_udp_protocol = "0.8" +async-trait = "0.1" +axum = "0.6" +axum-client-ip = "0.4" +axum-server = { version = "0.5", features = ["tls-rustls"] } binascii = "0.1" -lazy_static = "1.4" -openssl = { version = "0.10", features = ["vendored"] } +chrono = { version = "0.4", default-features = false, features = ["clock"] } config = "0.13" -log = { version = "0.4", features = ["release_max_level_info"] } +derive_more = "0.99" fern = "0.6" -chrono = { version = "0.4", default-features = false, features = ["clock"] } +futures = "0.3" +hyper = "0.14" +lazy_static = "1.4" +log = { version = "0.4", features = ["release_max_level_info"] } +multimap = "0.9" +openssl = { version = "0.10", features = ["vendored"] } +percent-encoding = "2.2" r2d2 = "0.8" r2d2_mysql = "24.0" r2d2_sqlite = { version = "0.22", features = ["bundled"] } rand = "0.8" -derive_more = "0.99" +serde = { version = "1.0", features = ["derive"] } +serde_bencode = "^0.2" +serde_json = "1.0" +serde_with = "3.2" thiserror = "1.0" -futures = "0.3" -async-trait = "0.1" -aquatic_udp_protocol = "0.8" -uuid = { version = "1", features = ["v4"] } -axum = "0.6" -axum-server = { version = "0.5", features = ["tls-rustls"] } -axum-client-ip = "0.4" -tower-http = { version = "0.4", features = ["compression-full"] } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.6", path = "contrib/bencode" } -torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "packages/primitives" } +tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.6", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.6", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.6", path = "packages/located-error" } -multimap = "0.9" -hyper = "0.14" +torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "packages/primitives" } +tower-http = { version = "0.4", features = ["compression-full"] } +uuid = { version = "1", features = ["v4"] } [dev-dependencies] +local-ip-address = "0.5" mockall = "0.11" reqwest = { version = "0.11.18", features = ["json"] } -serde_urlencoded = "0.7" -serde_repr = "0.1" serde_bytes = "0.11" -local-ip-address = "0.5" +serde_repr = "0.1" +serde_urlencoded = "0.7" torrust-tracker-test-helpers = { version = "3.0.0-alpha.6", path = "packages/test-helpers" } [workspace] -members = [ - "contrib/bencode", - "packages/configuration", - "packages/primitives", - "packages/test-helpers", - "packages/located-error", -] +members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] [profile.dev] debug = 1 -opt-level = 1 lto = "thin" +opt-level = 1 [profile.release] debug = 1 -opt-level = 3 lto = "fat" +opt-level = 3 diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index 2aed18409..3918aa6ba 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -1,14 +1,12 @@ [package] -name = "torrust-tracker-contrib-bencode" description = "(contrib) Efficient decoding and encoding for bencode." -keywords = ["library", "contrib", "bencode"] +keywords = ["bencode", "contrib", "library"] +name = "torrust-tracker-contrib-bencode" readme = "README.md" -authors = [ - "Nautilus Cyberneering , Andrew ", -] -repository = "https://github.com/torrust/bittorrent-infrastructure-project" +authors = ["Nautilus Cyberneering , Andrew "] license = "Apache-2.0" +repository = "https://github.com/torrust/bittorrent-infrastructure-project" documentation.workspace = true edition.workspace = true @@ -17,7 +15,6 @@ publish.workspace = true rust-version.workspace = true version.workspace = true - [dependencies] error-chain = "0.12" @@ -29,5 +26,5 @@ name = "test" path = "test/mod.rs" [[bench]] -name = "bencode_benchmark" harness = false +name = "bencode_benchmark" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 2592268e6..e5b9e31cd 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "torrust-tracker-configuration" description = "A library to provide configuration to the Torrust Tracker." -keywords = ["library", "config", "settings"] +keywords = ["config", "library", "settings"] +name = "torrust-tracker-configuration" readme = "README.md" authors.workspace = true @@ -14,16 +14,15 @@ repository.workspace = true rust-version.workspace = true version.workspace = true - [dependencies] -serde = { version = "1.0", features = ["derive"] } -serde_with = "3.2" config = "0.13" -toml = "0.8" log = { version = "0.4", features = ["release_max_level_info"] } +serde = { version = "1.0", features = ["derive"] } +serde_with = "3.2" thiserror = "1.0" -torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "../primitives" } +toml = "0.8" torrust-tracker-located-error = { version = "3.0.0-alpha.6", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index ea0b1639a..b4c813df3 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "torrust-tracker-located-error" description = "A library to provide error decorator with the location and the source of the original error." -keywords = ["library", "helper", "errors"] +keywords = ["errors", "helper", "library"] +name = "torrust-tracker-located-error" readme = "README.md" authors.workspace = true @@ -14,7 +14,6 @@ repository.workspace = true rust-version.workspace = true version.workspace = true - [dependencies] log = { version = "0.4", features = ["release_max_level_info"] } diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 9eb092e1c..ce6c20ff0 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "torrust-tracker-primitives" description = "A library with the primitive types shared by the Torrust tracker packages." -keywords = ["library", "api", "primitives"] +keywords = ["api", "library", "primitives"] +name = "torrust-tracker-primitives" readme = "README.md" authors.workspace = true @@ -14,7 +14,6 @@ repository.workspace = true rust-version.workspace = true version.workspace = true - [dependencies] -serde = { version = "1.0", features = ["derive"] } derive_more = "0.99" +serde = { version = "1.0", features = ["derive"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index bec9bb216..c01abed2e 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "torrust-tracker-test-helpers" description = "A library providing helpers for testing the Torrust tracker." -keywords = ["library", "helper", "testing"] +keywords = ["helper", "library", "testing"] +name = "torrust-tracker-test-helpers" readme = "README.md" authors.workspace = true diff --git a/rustfmt.toml b/rustfmt.toml index abbed5eda..76046e6f4 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,3 +1,3 @@ -max_width = 130 -imports_granularity = "Module" group_imports = "StdExternalCrate" +imports_granularity = "Module" +max_width = 130 diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 60da25db2..fb9cbf789 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -1,36 +1,35 @@ -log_level = "info" -mode = "public" +announce_interval = 120 db_driver = "MySQL" db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" -announce_interval = 120 +external_ip = "0.0.0.0" +inactive_peer_cleanup_interval = 600 +log_level = "info" +max_peer_timeout = 900 min_announce_interval = 120 +mode = "public" on_reverse_proxy = false -external_ip = "0.0.0.0" -tracker_usage_statistics = true persistent_torrent_completed_stat = false -max_peer_timeout = 900 -inactive_peer_cleanup_interval = 600 remove_peerless_torrents = true +tracker_usage_statistics = true [[udp_trackers]] -enabled = false bind_address = "0.0.0.0:6969" +enabled = false [[http_trackers]] -enabled = false bind_address = "0.0.0.0:7070" -ssl_enabled = false +enabled = false ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [http_api] -enabled = true bind_address = "0.0.0.0:1212" -ssl_enabled = false +enabled = true ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - # Please override the admin token setting the # `TORRUST_TRACKER_API_ADMIN_TOKEN` # environmental variable! diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 64cf75518..54cfd4023 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,36 +1,35 @@ -log_level = "info" -mode = "public" +announce_interval = 120 db_driver = "Sqlite3" db_path = "/var/lib/torrust/tracker/database/sqlite3.db" -announce_interval = 120 +external_ip = "0.0.0.0" +inactive_peer_cleanup_interval = 600 +log_level = "info" +max_peer_timeout = 900 min_announce_interval = 120 +mode = "public" on_reverse_proxy = false -external_ip = "0.0.0.0" -tracker_usage_statistics = true persistent_torrent_completed_stat = false -max_peer_timeout = 900 -inactive_peer_cleanup_interval = 600 remove_peerless_torrents = true +tracker_usage_statistics = true [[udp_trackers]] -enabled = false bind_address = "0.0.0.0:6969" +enabled = false [[http_trackers]] -enabled = false bind_address = "0.0.0.0:7070" -ssl_enabled = false +enabled = false ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [http_api] -enabled = true bind_address = "0.0.0.0:1212" -ssl_enabled = false +enabled = true ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - # Please override the admin token setting the # `TORRUST_TRACKER_API_ADMIN_TOKEN` # environmental variable! diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index be1877469..20f95ac5d 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -1,33 +1,33 @@ -log_level = "info" -mode = "public" +announce_interval = 120 db_driver = "Sqlite3" db_path = "./storage/tracker/lib/database/sqlite3.db" -announce_interval = 120 +external_ip = "0.0.0.0" +inactive_peer_cleanup_interval = 600 +log_level = "info" +max_peer_timeout = 900 min_announce_interval = 120 +mode = "public" on_reverse_proxy = false -external_ip = "0.0.0.0" -tracker_usage_statistics = true persistent_torrent_completed_stat = false -max_peer_timeout = 900 -inactive_peer_cleanup_interval = 600 remove_peerless_torrents = true +tracker_usage_statistics = true [[udp_trackers]] -enabled = false bind_address = "0.0.0.0:6969" +enabled = false [[http_trackers]] -enabled = false bind_address = "0.0.0.0:7070" -ssl_enabled = false +enabled = false ssl_cert_path = "" +ssl_enabled = false ssl_key_path = "" [http_api] -enabled = true bind_address = "127.0.0.1:1212" -ssl_enabled = false +enabled = true ssl_cert_path = "" +ssl_enabled = false ssl_key_path = "" [http_api.access_tokens] From a005af2a056f9fd2fc287a855f1e2797faf00db0 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 12:49:30 +0200 Subject: [PATCH 0591/1003] Revert "chore: bump codecov/codecov-action from 3 to 4" This reverts commit 7d5acd23210c5d2bbba114f4af665d16faca907b. --- .github/workflows/coverage.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 4d4054026..cf7eb6f6d 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -83,7 +83,7 @@ jobs: - id: upload name: Upload Coverage Report - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v3 with: token: ${{ secrets.CODECOV_TOKEN }} files: ${{ steps.coverage.outputs.report }} From ab0dd30d8282f313006c866fe2d301daf9f6a7b8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 12:47:10 +0200 Subject: [PATCH 0592/1003] docs: update release process --- docs/release_process.md | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/docs/release_process.md b/docs/release_process.md index cc401c11a..4b58d7ca8 100644 --- a/docs/release_process.md +++ b/docs/release_process.md @@ -1,24 +1,30 @@ -# Torrust Tracker Release Process (draft) +# Torrust Tracker Release Process (draft 2) The purpose of this document is to describe the release process. ## Overview -Torrust Tracker is published in this order: +Torrust Tracker is published according to this protocol: -1. `develop` branch is ready for publishing. -2. create `release: version (semantic version)` commit. -3. push release commit to `main` branch. -4. check all status checks succeed for `main` branch. -5. push `main` branch to `releases\v(semantic version)` branch. -6. check all status checks success for `releases\v(semantic version)` branch. -7. create signed `v(semantic version)` tag. -8. create github release from `v(semantic version)` tag. -9. merge `main` branch into `develop` branch. +0. After release create new pull request into `develop` branch: + +- The `develop` branch has the (semantic version) suffix `-develop`. +- The version is bumped according to releases, new features, and breaking changes. + +- [ ] `develop` is ready for branching for a release. +- [ ] force-push develop to `staging` branch. +- [ ] commit `release: version (semantic version)`, removing the `-develop` suffix. +- [ ] create pull request to merge `staging` into `main` branch. +- [ ] check all status checks succeed for `main` branch. +- [ ] push `main` branch to `releases\v(semantic version)` branch. +- [ ] check all status checks success for `releases\v(semantic version)` branch. +- [ ] create signed `v(semantic version)` tag from `releases\v(semantic version) HEAD`. +- [ ] create github release from `v(semantic version)` tag. +- [ ] merge the `main` branch back into `develop` branch, assuring that the (semantic version) has the suffix `-develop`. - At step `1.`, `develop` is automatically published to `dockerhub`. - At step `3.`, `main` is automatically published to `dockerhub`. -- At step `5.`, `releases\v(semantic version)` is automatically published to `dockerhub` and `crate.io`. +- At step `6.`, `releases\v(semantic version)` is automatically published to `dockerhub` and `crate.io`. ## Development Branch From fc7cad0e72627046d5f39b98c74335cf31674278 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 17:30:00 +0200 Subject: [PATCH 0593/1003] chore: bump alpha version to 3.0.0-alpha.7-develop add version suffix to match release process expectations --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21b547ca8..b67a3434f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.6" +version = "3.0.0-alpha.7-develop" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.6" +version = "3.0.0-alpha.7-develop" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.6" +version = "3.0.0-alpha.7-develop" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.6" +version = "3.0.0-alpha.7-develop" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.6" +version = "3.0.0-alpha.7-develop" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.6" +version = "3.0.0-alpha.7-develop" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 705fd116a..6a7471550 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.6" +version = "3.0.0-alpha.7-develop" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.6", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.6", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.6", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.7-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.7-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.7-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.6", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.7-develop", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index 551471914..e8f54bb6e 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.6/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.6/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.6/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.6/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index e5b9e31cd..9057e85df 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.6", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.7-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index c01abed2e..49903213d 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.6", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.6", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.7-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "../primitives" } From a9b199d908b267f786f52aa7d0675ebe2bc6fd15 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 18:34:25 +0200 Subject: [PATCH 0594/1003] release: version 3.0.0-alpha.7 --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b67a3434f..ba179c9cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.7" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.7" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.7" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.7" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.7" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.7" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 6a7471550..ee908ed5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.7" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.7-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.7-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.7-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.7", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.7", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.7", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.7", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.7-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.7", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index e8f54bb6e..51839d10f 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.7/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.7/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.7/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.7/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 9057e85df..25a4639eb 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.7-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.7", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.7", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 49903213d..e4a60f5c2 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.7-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.7", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.7", path = "../primitives" } From 744f51a258eb731189deb909f2e9f36fd4a87247 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 20:53:00 +0200 Subject: [PATCH 0595/1003] version: bump version --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b67a3434f..976e9b55d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.8-develop" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.8-develop" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.8-develop" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.8-develop" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.8-develop" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.8-develop" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 6a7471550..0cb119a19 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.7-develop" +version = "3.0.0-alpha.8-develop" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.7-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.7-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.7-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.8-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.8-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.8-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.7-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.8-develop", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index e8f54bb6e..b5805446a 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.7-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 9057e85df..631362c07 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.7-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.8-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 49903213d..0c85d31f6 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.7-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.7-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.8-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "../primitives" } From aafe394f3313eb1f23137fe66b6492485d125366 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 20:55:01 +0200 Subject: [PATCH 0596/1003] ci: fix deploymment workflow --- .github/workflows/deployment.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 43bfe4784..ec349bf28 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -64,11 +64,10 @@ jobs: with: toolchain: stable - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} - - id: publish name: Publish Crates + env: + CARGO_REGISTRY_TOKEN: "${{ secrets.CARGO_REGISTRY_TOKEN }}" run: | cargo publish -p torrust-tracker-contrib-bencode cargo publish -p torrust-tracker-located-error From 4f4d1369889be18070b6509bcfc7c1f90ad4f6a8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 20:57:38 +0200 Subject: [PATCH 0597/1003] docs: small style update to release process --- docs/release_process.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/release_process.md b/docs/release_process.md index 4b58d7ca8..e29836191 100644 --- a/docs/release_process.md +++ b/docs/release_process.md @@ -13,12 +13,12 @@ Torrust Tracker is published according to this protocol: - [ ] `develop` is ready for branching for a release. - [ ] force-push develop to `staging` branch. -- [ ] commit `release: version (semantic version)`, removing the `-develop` suffix. +- [ ] commit `release: version (semantic version)`, that removes the `-develop` suffix. - [ ] create pull request to merge `staging` into `main` branch. - [ ] check all status checks succeed for `main` branch. -- [ ] push `main` branch to `releases\v(semantic version)` branch. -- [ ] check all status checks success for `releases\v(semantic version)` branch. -- [ ] create signed `v(semantic version)` tag from `releases\v(semantic version) HEAD`. +- [ ] push `main` branch to `releases/(semantic version)` branch. +- [ ] check all status checks success for `releases/(semantic version)` branch. +- [ ] create signed `v(semantic version)` tag from `releases/(semantic version) HEAD`. - [ ] create github release from `v(semantic version)` tag. - [ ] merge the `main` branch back into `develop` branch, assuring that the (semantic version) has the suffix `-develop`. From 98261eabd67f7f835469d9eeb983e4cf80c7f2a7 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 15 Sep 2023 21:22:06 +0200 Subject: [PATCH 0598/1003] release: version 3.0.0-alpha.8 --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 976e9b55d..3c8169f50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.8" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.8" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.8" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.8" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.8" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.8" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 0cb119a19..f5ab5187c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.8" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.8-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.8-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.8-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.8", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.8", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.8", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.8", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.8-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.8", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index b5805446a..b34168110 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.8/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.8/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.8/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.8/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 631362c07..53de593d2 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.8-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.8", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.8", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 0c85d31f6..7dab50bad 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.8-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.8", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.8", path = "../primitives" } From 4e116f50bc5251e1abaa59d357864df339299453 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 12:24:28 +0200 Subject: [PATCH 0599/1003] github: add codeowners file --- .github/CODEOWNERS | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..2ae8963e3 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +/.github/**/* @torrust/maintainers From 9fb75e4bb92ac5cf42a9e4b0ffa36f85cfd74b69 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 12:25:47 +0200 Subject: [PATCH 0600/1003] ci: secret checks were unnecessary --- .github/workflows/container.yaml | 27 +++++---------------------- .github/workflows/coverage.yaml | 18 ------------------ .github/workflows/deployment.yaml | 22 ++-------------------- 3 files changed, 7 insertions(+), 60 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index d107a0139..c33ea16c8 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -54,6 +54,7 @@ jobs: context: name: Context + needs: test runs-on: ubuntu-latest outputs: @@ -97,29 +98,11 @@ jobs: echo "On a Forked Repository. Will Not Continue" fi - secrets: - name: Secrets - needs: [test, context] - environment: dockerhub-torrust - if: needs.context.outputs.continue == 'true' - runs-on: ubuntu-latest - - outputs: - continue: ${{ steps.check.outputs.continue }} - - steps: - - id: check - name: Check - env: - DOCKER_HUB_ACCESS_TOKEN: "${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}" - if: "${{ env.DOCKER_HUB_ACCESS_TOKEN != '' }}" - run: echo "continue=true" >> $GITHUB_OUTPUT - publish_development: name: Publish (Development) environment: dockerhub-torrust - needs: [secrets, context] - if: needs.secrets.outputs.continue == 'true' && needs.context.outputs.type == 'development' + needs: context + if: needs.context.outputs.continue == 'true' && needs.context.outputs.type == 'development' runs-on: ubuntu-latest steps: @@ -156,8 +139,8 @@ jobs: publish_release: name: Publish (Release) environment: dockerhub-torrust - needs: [secrets, context] - if: needs.secrets.outputs.continue == 'true' && needs.context.outputs.type == 'release' + needs: context + if: needs.context.outputs.continue == 'true' && needs.context.outputs.type == 'release' runs-on: ubuntu-latest steps: diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index cf7eb6f6d..1e7dace66 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -12,27 +12,9 @@ env: CARGO_TERM_COLOR: always jobs: - secrets: - name: Secrets - environment: coverage - runs-on: ubuntu-latest - - outputs: - continue: ${{ steps.check.outputs.continue }} - - steps: - - id: check - name: Check - env: - CODECOV_TOKEN: "${{ secrets.CODECOV_TOKEN }}" - if: "${{ env.CODECOV_TOKEN != '' }}" - run: echo "continue=true" >> $GITHUB_OUTPUT - report: name: Report environment: coverage - needs: secrets - if: needs.secrets.outputs.continue == 'true' runs-on: ubuntu-latest env: CARGO_INCREMENTAL: "0" diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index ec349bf28..5df50a4b0 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -6,26 +6,8 @@ on: - "releases/**/*" jobs: - secrets: - name: Secrets - environment: crates-io-torrust - runs-on: ubuntu-latest - - outputs: - continue: ${{ steps.check.outputs.continue }} - - steps: - - id: check - name: Check - env: - CARGO_REGISTRY_TOKEN: "${{ secrets.CARGO_REGISTRY_TOKEN }}" - if: "${{ env.CARGO_REGISTRY_TOKEN != '' }}" - run: echo "continue=true" >> $GITHUB_OUTPUT - test: name: Test - needs: secrets - if: needs.secrets.outputs.continue == 'true' runs-on: ubuntu-latest strategy: @@ -49,7 +31,7 @@ jobs: publish: name: Publish - environment: crates-io-torrust + environment: deployment needs: test runs-on: ubuntu-latest @@ -67,7 +49,7 @@ jobs: - id: publish name: Publish Crates env: - CARGO_REGISTRY_TOKEN: "${{ secrets.CARGO_REGISTRY_TOKEN }}" + CARGO_REGISTRY_TOKEN: "${{ secrets.TORRUST_UPDATE_CARGO_REGISTRY_TOKEN }}" run: | cargo publish -p torrust-tracker-contrib-bencode cargo publish -p torrust-tracker-located-error From b990e777ca76fb0e8d135b4fa51359f809306f51 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 12:26:55 +0200 Subject: [PATCH 0601/1003] docs: update release process --- docs/release_process.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/release_process.md b/docs/release_process.md index e29836191..4b5e39b25 100644 --- a/docs/release_process.md +++ b/docs/release_process.md @@ -1,4 +1,4 @@ -# Torrust Tracker Release Process (draft 2) +# Torrust Tracker Release Process (v2.1.0-draft) The purpose of this document is to describe the release process. @@ -12,19 +12,20 @@ Torrust Tracker is published according to this protocol: - The version is bumped according to releases, new features, and breaking changes. - [ ] `develop` is ready for branching for a release. -- [ ] force-push develop to `staging` branch. +- [ ] force-push `develop` to `staging/main` branch. - [ ] commit `release: version (semantic version)`, that removes the `-develop` suffix. -- [ ] create pull request to merge `staging` into `main` branch. -- [ ] check all status checks succeed for `main` branch. -- [ ] push `main` branch to `releases/(semantic version)` branch. +- [ ] create pull request to merge `staging/main` into `main` branch. +- [ ] push `main` branch to `releases/v(semantic version)` branch. - [ ] check all status checks success for `releases/(semantic version)` branch. - [ ] create signed `v(semantic version)` tag from `releases/(semantic version) HEAD`. - [ ] create github release from `v(semantic version)` tag. -- [ ] merge the `main` branch back into `develop` branch, assuring that the (semantic version) has the suffix `-develop`. +- [ ] force-push `main` to `staging/develop` branch. +- [ ] commit `develop: bump version (semantic version)-develop`, that bumps the version and adds the `-develop` suffix. +- [ ] create pull request to merge `staging/develop` into `develop` branch. - At step `1.`, `develop` is automatically published to `dockerhub`. - At step `3.`, `main` is automatically published to `dockerhub`. -- At step `6.`, `releases\v(semantic version)` is automatically published to `dockerhub` and `crate.io`. +- At step `6.`, `releases/v(semantic version)` is automatically published to `dockerhub` and `crate.io`. ## Development Branch From de884dde58b73c3bf619d097fe0c5bfb83454f12 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 12:27:49 +0200 Subject: [PATCH 0602/1003] develop: bump version 3.0.0-alpha.9-develop --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 976e9b55d..f7f88c644 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.9-develop" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.9-develop" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.9-develop" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.9-develop" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.9-develop" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.9-develop" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 0cb119a19..607ec3adb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.8-develop" +version = "3.0.0-alpha.9-develop" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.8-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.8-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.8-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.9-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.9-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.9-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.8-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.9-develop", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index b5805446a..c07c2b7f7 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.8-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 631362c07..b9b4c50d8 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.8-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.9-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 0c85d31f6..e367d07cd 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.8-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.8-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.9-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "../primitives" } From 82622c5d0f39801d1321b554a264826f9376a04d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 13:10:43 +0200 Subject: [PATCH 0603/1003] chore: update cargo lockfile --- Cargo.lock | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f7f88c644..afb4ef0b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,7 +142,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -292,7 +292,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -473,9 +473,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", @@ -749,7 +749,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -760,7 +760,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -793,7 +793,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -994,7 +994,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -1006,7 +1006,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -1018,7 +1018,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -1083,7 +1083,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -1374,7 +1374,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -1738,7 +1738,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", "termcolor", "thiserror", ] @@ -1939,7 +1939,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -2062,7 +2062,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -2093,7 +2093,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -2724,7 +2724,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -2756,7 +2756,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -2806,7 +2806,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -2928,9 +2928,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "59bf04c28bee9043ed9ea1e41afc0552288d3aba9c6efdd78903b802926f4879" dependencies = [ "proc-macro2", "quote", @@ -2994,7 +2994,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -3076,7 +3076,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", ] [[package]] @@ -3351,9 +3351,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" @@ -3467,7 +3467,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", "wasm-bindgen-shared", ] @@ -3501,7 +3501,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.35", "wasm-bindgen-backend", "wasm-bindgen-shared", ] From c451a747b421c8b2157aac9ca32de77f9759b01c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 13:55:28 +0200 Subject: [PATCH 0604/1003] docs: update release process --- docs/release_process.md | 103 +++++++++++++++++++++------------------- 1 file changed, 55 insertions(+), 48 deletions(-) diff --git a/docs/release_process.md b/docs/release_process.md index 4b5e39b25..1f81aea57 100644 --- a/docs/release_process.md +++ b/docs/release_process.md @@ -1,68 +1,75 @@ -# Torrust Tracker Release Process (v2.1.0-draft) +# Torrust Tracker Release Process (v2.2.0) -The purpose of this document is to describe the release process. +## Version: +> **The `[semantic version]` is bumped according to releases, new features, and breaking changes.** +> +> *The `develop` branch uses the (semantic version) suffix `-develop`.* -## Overview +## Process: -Torrust Tracker is published according to this protocol: +### 1. The `develop` branch is ready for a release. +The `develop` branch should have the version `[semantic version]-develop` that is ready to be released. -0. After release create new pull request into `develop` branch: +### 2. Stage `develop` HEAD for merging into the `main` branch: -- The `develop` branch has the (semantic version) suffix `-develop`. -- The version is bumped according to releases, new features, and breaking changes. +```sh +git fetch --all +git push --force torrust develop:staging/main +``` -- [ ] `develop` is ready for branching for a release. -- [ ] force-push `develop` to `staging/main` branch. -- [ ] commit `release: version (semantic version)`, that removes the `-develop` suffix. -- [ ] create pull request to merge `staging/main` into `main` branch. -- [ ] push `main` branch to `releases/v(semantic version)` branch. -- [ ] check all status checks success for `releases/(semantic version)` branch. -- [ ] create signed `v(semantic version)` tag from `releases/(semantic version) HEAD`. -- [ ] create github release from `v(semantic version)` tag. -- [ ] force-push `main` to `staging/develop` branch. -- [ ] commit `develop: bump version (semantic version)-develop`, that bumps the version and adds the `-develop` suffix. -- [ ] create pull request to merge `staging/develop` into `develop` branch. +### 3. Create Release Commit: -- At step `1.`, `develop` is automatically published to `dockerhub`. -- At step `3.`, `main` is automatically published to `dockerhub`. -- At step `6.`, `releases/v(semantic version)` is automatically published to `dockerhub` and `crate.io`. +```sh +git stash +git switch staging/main +# change `[semantic version]-develop` to `[semantic version]`. +git add -A +git commit -m "release: version [semantic version]" +git push torrust +``` -## Development Branch +### 4. Create and Merge Pull Request from `staging/main` into `main` branch. +This pull request merges the new version into the `main` branch. -The `develop` branch, the default branch for the repository is automatically published to dockerhub with the `develop` label. This process happens automatically when a pull request is merged in, and the `container.yaml` workflow is triggered. +### 5. Push new version from `main` HEAD to `releases/v[semantic version]` branch: -## Main Branch +```sh +git fetch --all +git push torrust main:releases/v[semantic version] +``` -The `main` branch is the staging branch for releases. +> **Check that the deployment is successful!** -A release commit needs to be made that prepares the repository for the release, this commit should include: +### 6. Create Release Tag: -- Changing the semantic version. -- Finalizing the release notes and changelog. +```sh +git switch releases/v[semantic version] +git tag --sign v[semantic version] +git push --tags torrust +``` -The title of the commit should be: `release: version (semantic version)`. +### 7. Create Release on Github from Tag. +This is for those who wish to download the source code. -This commit should be committed upon the head of the development branch, and pushed to the `main` branch. +### 8. Stage `main` HEAD for merging into the `develop` branch: +Merge release back into the develop branch. -Once the release has succeeded, the `main` branch should be merged back into the `develop` branch. +```sh +git fetch --all +git push --force torrust main:staging/develop +``` +### 9. Create Comment that bumps next development version: -## Releases Branch +```sh +git stash +git switch staging/main +# change `[semantic version]` to `(next)[semantic version]-develop`. +git add -A +git commit -m "develop: bump to version (next)[semantic version]-develop" +git push torrust +``` -According to the patten `releases/v(semantic version)`, the `main` branch head is published to here to trigger the deployment workflows. +### 10. Create and Merge Pull Request from `staging/develop` into `develop` branch. +This pull request merges the new release into the `develop` branch and bumps the version number. -The repository deployment environment for crates.io is only available for the `releases/**/*` patten of branches. -Once the publishing workflows have succeeded; we can make the git-tag. - -## Release Tag - -Create a Signed Tag with a short message in the form `v(semantic version)` and push it to the repository. - -## Github Release - -From the newly published tag, create a Github Release using the web-interface. - - -## Merge back into development branch - -After this is all successful, the `main` branch should be merged into the `develop` branch. From dd8675548a122557ca177a5f79fa2b43d105c419 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 15:10:51 +0200 Subject: [PATCH 0605/1003] release: version 3.0.0-alpha.9 --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index afb4ef0b8..f85b1dd02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.9" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.9" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.9" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.9" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.9" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.9" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 607ec3adb..f62d45799 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.9" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.9-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.9-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.9-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.9", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.9", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.9", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.9", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.9-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.9", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index c07c2b7f7..b91a01e93 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.9/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.9/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.9/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.9/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index b9b4c50d8..1ce6a63a4 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.9-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.9", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.9", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index e367d07cd..8ecf29676 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.9-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.9", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.9", path = "../primitives" } From cea471faad0c211d058eacd51dcc32d7d946c91b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 16:53:35 +0200 Subject: [PATCH 0606/1003] ci: fix test in container workflow --- .github/workflows/container.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index c33ea16c8..884a15843 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -80,7 +80,7 @@ jobs: echo "continue=true" >> $GITHUB_OUTPUT echo "On \`develop\` Branch, Type: \`development\`" - elif [[ "${{ github.ref }}" =~ ^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ ]]; then + elif [[ $(echo "${{ github.ref }}" | grep -P '^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$') ]]; then version=$(echo "${{ github.ref }}" | sed -n -E 's/^(refs\/heads\/releases\/)//p') echo "version=$version" >> $GITHUB_OUTPUT From dba065f08323fe5acb3943f5981b16a9eb0aa63a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 16:56:52 +0200 Subject: [PATCH 0607/1003] docs: minor changes to release process --- docs/release_process.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/release_process.md b/docs/release_process.md index 1f81aea57..d5d945694 100644 --- a/docs/release_process.md +++ b/docs/release_process.md @@ -1,4 +1,4 @@ -# Torrust Tracker Release Process (v2.2.0) +# Torrust Tracker Release Process (v2.2.1) ## Version: > **The `[semantic version]` is bumped according to releases, new features, and breaking changes.** @@ -29,6 +29,9 @@ git push torrust ``` ### 4. Create and Merge Pull Request from `staging/main` into `main` branch. + +Pull request title format: "Release Version `[semantic version]`". + This pull request merges the new version into the `main` branch. ### 5. Push new version from `main` HEAD to `releases/v[semantic version]` branch: @@ -70,6 +73,7 @@ git push torrust ``` ### 10. Create and Merge Pull Request from `staging/develop` into `develop` branch. -This pull request merges the new release into the `develop` branch and bumps the version number. +Pull request title format: "Version `[semantic version]` was Released". +This pull request merges the new release into the `develop` branch and bumps the version number. From fc610f6f4aaae5712241dc838ea65d923d361731 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 16:57:59 +0200 Subject: [PATCH 0608/1003] develop: bump version 3.0.0-alpha.10-develop --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index afb4ef0b8..a82f5f421 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.10-develop" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.10-develop" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.10-develop" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.10-develop" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.10-develop" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.10-develop" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 607ec3adb..9c236f49c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.9-develop" +version = "3.0.0-alpha.10-develop" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.9-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.9-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.9-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.10-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.10-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.10-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.10-develop", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.9-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.10-develop", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index c07c2b7f7..dbf030f9e 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.10-develop/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.10-develop/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.10-develop/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.9-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.10-develop/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index b9b4c50d8..48ef118f7 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.9-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.10-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.10-develop", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index e367d07cd..a1c2bad79 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.9-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.9-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.10-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.10-develop", path = "../primitives" } From 12ffcff58d878f4636736acf0b77ebeb5fb57ece Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 17:31:05 +0200 Subject: [PATCH 0609/1003] release: version 3.0.0-alpha.10 --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a82f5f421..b6b9370f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.10-develop" +version = "3.0.0-alpha.10" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.10-develop" +version = "3.0.0-alpha.10" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.10-develop" +version = "3.0.0-alpha.10" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.10-develop" +version = "3.0.0-alpha.10" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.10-develop" +version = "3.0.0-alpha.10" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.10-develop" +version = "3.0.0-alpha.10" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 9c236f49c..abd293190 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.10-develop" +version = "3.0.0-alpha.10" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.10-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.10-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.10-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.10-develop", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.10", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.10", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.10", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.10", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.10-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.10", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index dbf030f9e..ac6a05492 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.10-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.10-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.10-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.10/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.10/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.10/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.10-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.10/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 48ef118f7..b21e64751 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.10-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.10-develop", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.10", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.10", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index a1c2bad79..170f532c8 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.10-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.10-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.10", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.10", path = "../primitives" } From ec8880380c58944cd45739f7eaeefb509f6759c0 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Sep 2023 23:07:52 +0200 Subject: [PATCH 0610/1003] develop: bump to version 3.0.0-alpha.11-develop --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b6b9370f4..a1a262b82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.10" +version = "3.0.0-alpha.11-develop" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.10" +version = "3.0.0-alpha.11-develop" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.10" +version = "3.0.0-alpha.11-develop" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.10" +version = "3.0.0-alpha.11-develop" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.10" +version = "3.0.0-alpha.11-develop" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.10" +version = "3.0.0-alpha.11-develop" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index abd293190..a1b387292 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.10" +version = "3.0.0-alpha.11-develop" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.10", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.10", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.10", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.10", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.11-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.11-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.11-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.11-develop", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.10", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.11-develop", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index ac6a05492..0aadb3a3f 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.10/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.10/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.10/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.11-develop/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.11-develop/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.11-develop/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.10/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.11-develop/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index b21e64751..bf13e457a 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.10", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.10", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.11-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.11-develop", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 170f532c8..31f6f61e6 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.10", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.10", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.11-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.11-develop", path = "../primitives" } From 4369cca0e1a1b81605912a846ac8765d5cbca36c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 18 Sep 2023 10:42:13 +0200 Subject: [PATCH 0611/1003] release: version 3.0.0-alpha.11 --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a1a262b82..6e4f51bd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.11-develop" +version = "3.0.0-alpha.11" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.11-develop" +version = "3.0.0-alpha.11" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.11-develop" +version = "3.0.0-alpha.11" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.11-develop" +version = "3.0.0-alpha.11" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.11-develop" +version = "3.0.0-alpha.11" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.11-develop" +version = "3.0.0-alpha.11" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index a1b387292..8c83d5834 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.11-develop" +version = "3.0.0-alpha.11" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.11-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.11-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.11-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11-develop", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.11", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.11", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.11", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.11-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.11", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index 0aadb3a3f..e76659d5e 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.11-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.11-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.11-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.11-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index bf13e457a..cc300afe0 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.11-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11-develop", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.11", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 31f6f61e6..85edb99af 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.11-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.11", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "../primitives" } From a14bdc65a2a50ce1ce483409d17a4bba21d30863 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 18 Sep 2023 14:09:57 +0200 Subject: [PATCH 0612/1003] develop: bump to version 3.0.0-alpha.12-develop --- Cargo.lock | 12 ++++++------ Cargo.toml | 12 ++++++------ README.md | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/test-helpers/Cargo.toml | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e4f51bd0..7b6caf856 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3169,7 +3169,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12-develop" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -3215,7 +3215,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12-develop" dependencies = [ "config", "log", @@ -3230,7 +3230,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12-develop" dependencies = [ "criterion", "error-chain", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12-develop" dependencies = [ "log", "thiserror", @@ -3246,7 +3246,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12-develop" dependencies = [ "derive_more", "serde", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12-develop" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 8c83d5834..04761b07c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ license-file = "COPYRIGHT" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12-develop" [dependencies] aquatic_udp_protocol = "0.8" @@ -56,10 +56,10 @@ serde_json = "1.0" serde_with = "3.2" thiserror = "1.0" tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.11", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.11", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.11", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } @@ -70,7 +70,7 @@ reqwest = { version = "0.11.18", features = ["json"] } serde_bytes = "0.11" serde_repr = "0.1" serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.11", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.12-develop", path = "packages/test-helpers" } [workspace] members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] diff --git a/README.md b/README.md index e76659d5e..4e244b292 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.12-develop/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.12-develop/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.12-develop/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.12-develop/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index cc300afe0..a6733ef3a 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,8 +21,8 @@ serde = { version = "1.0", features = ["derive"] } serde_with = "3.2" thiserror = "1.0" toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.11", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 85edb99af..2960c6fb9 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,5 +17,5 @@ version.workspace = true [dependencies] lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.11", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } From f897f2f4bfb05e1f627bbc840b84c1d18c3d6726 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 18 Sep 2023 23:32:41 +0200 Subject: [PATCH 0613/1003] docs: fix docs.rs links in readme --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 4e244b292..e73829ed4 100644 --- a/README.md +++ b/README.md @@ -179,13 +179,13 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.12-develop/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.12-develop/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.12-develop/torrust_tracker/servers/udp +[api]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.12-develop/torrust_tracker/servers/apis/v1 +[API documentation]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT From cba4cda83610a1c28b977904ae7e4843c38999cb Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 18 Sep 2023 23:33:29 +0200 Subject: [PATCH 0614/1003] docs: fix little issues in release process --- docs/release_process.md | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/release_process.md b/docs/release_process.md index d5d945694..73b0a8827 100644 --- a/docs/release_process.md +++ b/docs/release_process.md @@ -1,4 +1,4 @@ -# Torrust Tracker Release Process (v2.2.1) +# Torrust Tracker Release Process (v2.2.2) ## Version: > **The `[semantic version]` is bumped according to releases, new features, and breaking changes.** @@ -7,6 +7,20 @@ ## Process: +**Note**: this guide assumes that the your git `torrust` remote is like this: + +```sh +git remote show torrust +``` + +```s +* remote torrust + Fetch URL: git@github.com:torrust/torrust-tracker.git + Push URL: git@github.com:torrust/torrust-tracker.git +... +``` + + ### 1. The `develop` branch is ready for a release. The `develop` branch should have the version `[semantic version]-develop` that is ready to be released. @@ -22,6 +36,7 @@ git push --force torrust develop:staging/main ```sh git stash git switch staging/main +git reset --hard torrust/staging/main # change `[semantic version]-develop` to `[semantic version]`. git add -A git commit -m "release: version [semantic version]" @@ -65,7 +80,8 @@ git push --force torrust main:staging/develop ```sh git stash -git switch staging/main +git switch staging/develop +git reset --hard torrust/staging/develop # change `[semantic version]` to `(next)[semantic version]-develop`. git add -A git commit -m "develop: bump to version (next)[semantic version]-develop" From 36716f74f510ac69fb47345e51442702e7f6018d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 19 Sep 2023 02:10:39 +0200 Subject: [PATCH 0615/1003] legal: clean up licensing documents --- COPYRIGHT | 664 +++++++++++++++++- Cargo.toml | 4 +- README.md | 30 +- .../licenses/LICENSE-AGPL_3_0 | 0 LICENSE-MIT_0 => docs/licenses/LICENSE-MIT_0 | 0 packages/configuration/Cargo.toml | 2 +- packages/located-error/Cargo.toml | 2 +- packages/primitives/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- 9 files changed, 690 insertions(+), 16 deletions(-) rename LICENSE-AGPL_3_0 => docs/licenses/LICENSE-AGPL_3_0 (100%) rename LICENSE-MIT_0 => docs/licenses/LICENSE-MIT_0 (100%) diff --git a/COPYRIGHT b/COPYRIGHT index 6eef820ec..be3f7b28e 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -1,11 +1,661 @@ -Copyright 2023 in the Torrust-Tracker project are retained by their contributors. No -copyright assignment is required to contribute to the Torrust-Tracker project. + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 -Some files include explicit copyright notices and/or license notices. + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. -Except as otherwise noted (below and/or in individual files), Torrust-Tracker is -licensed under the GNU Affero General Public License, Version 3.0 . This license applies to all files in the Torrust-Tracker project, except as noted below. + Preamble -Except as otherwise noted (below and/or in individual files), Torrust-Tracker is licensed under the MIT-0 license for all commits made after 5 years of merging. This license applies to the version of the files merged into the Torrust-Tracker project at the time of merging, and does not apply to subsequent updates or revisions to those files. + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. -The contributors to the Torrust-Tracker project disclaim all liability for any damages or losses that may arise from the use of the project. + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/Cargo.toml b/Cargo.toml index 04761b07c..3c9354cae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,7 @@ documentation.workspace = true edition.workspace = true homepage.workspace = true keywords.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true @@ -22,7 +22,7 @@ documentation = "https://docs.rs/crate/torrust-tracker/" edition = "2021" homepage = "https://torrust.com/" keywords = ["bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker"] -license-file = "COPYRIGHT" +license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" diff --git a/README.md b/README.md index e73829ed4..41a7ec40b 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,6 @@ __Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker (a service that match - [BEP 27] : Private Torrents. - [BEP 48] : Tracker Protocol Extension: Scrape. - ## Getting Started ### Container Version @@ -120,7 +119,6 @@ The following services are provided by the default configuration: - API _(management)_ - `http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken`. - ## Documentation - [Management API (Version 1)][api] @@ -141,7 +139,29 @@ __How can you contribute?__ ## License -The project is licensed under a dual license. See [COPYRIGHT]. +**Copyright (c) 2023 The Torrust Developers.** + +This program is free software: you can redistribute it and/or modify it under the terms of the [GNU Affero General Public License][AGPL_3_0] as published by the [Free Software Foundation][FSF], version 3. + +This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Affero General Public License][AGPL_3_0] for more details. + +You should have received a copy of the *GNU Affero General Public License* along with this program. If not, see . + +Some files include explicit copyright notices and/or license notices. + +### Legacy Exception + +For prosperity, versions of Torrust Tracker that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [AGPL-3.0-only][AGPL_3_0] license. + +## Contributions + +The copyright of the Torrust Tracker is retained by the respective authors. + +**Contributors agree:** +- That all their contributions be granted a license(s) **compatible** with the [Torrust Trackers License](#License). +- That all contributors signal **clearly** and **explicitly** any other compilable licenses if they are not: *[AGPL-3.0-only with the legacy MIT-0 exception](#License)*. + +**The Torrust-Tracker project has no copyright assignment agreement.** ## Acknowledgments @@ -189,6 +209,10 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [discussions]: https://github.com/torrust/torrust-tracker/discussions [COPYRIGHT]: ./COPYRIGHT +[AGPL_3_0]: ./docs/licenses/LICENSE-AGPL_3_0 +[MIT_0]: ./docs/licenses/LICENSE-MIT_0 +[FSF]: https://www.fsf.org/ + [nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ [Dutch Bits]: https://dutchbits.nl diff --git a/LICENSE-AGPL_3_0 b/docs/licenses/LICENSE-AGPL_3_0 similarity index 100% rename from LICENSE-AGPL_3_0 rename to docs/licenses/LICENSE-AGPL_3_0 diff --git a/LICENSE-MIT_0 b/docs/licenses/LICENSE-MIT_0 similarity index 100% rename from LICENSE-MIT_0 rename to docs/licenses/LICENSE-MIT_0 diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index a6733ef3a..94cf1d0f5 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -8,7 +8,7 @@ authors.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index b4c813df3..6bc219374 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -8,7 +8,7 @@ authors.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index ce6c20ff0..3fac88bef 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -8,7 +8,7 @@ authors.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 2960c6fb9..d428cbf3e 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -8,7 +8,7 @@ authors.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true From 8a49a9497b5606b84ce67c4608ca713925b028c4 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 19 Sep 2023 11:07:18 +0200 Subject: [PATCH 0616/1003] docs: remove duplicate COPYRIGHT file --- COPYRIGHT | 661 ------------------------------------------------------ 1 file changed, 661 deletions(-) delete mode 100644 COPYRIGHT diff --git a/COPYRIGHT b/COPYRIGHT deleted file mode 100644 index be3f7b28e..000000000 --- a/COPYRIGHT +++ /dev/null @@ -1,661 +0,0 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. From bf269bcc3ee33b4512df8bc71b15fce0ec6d7cf6 Mon Sep 17 00:00:00 2001 From: Alex Wellnitz Date: Wed, 4 Oct 2023 00:36:37 +0200 Subject: [PATCH 0617/1003] fix: renaming yml to yaml --- .github/{dependabot.yml => dependabot.yaml} | 0 README.md | 1 + codecov.yml => codecov.yaml | 0 3 files changed, 1 insertion(+) rename .github/{dependabot.yml => dependabot.yaml} (100%) rename codecov.yml => codecov.yaml (100%) diff --git a/.github/dependabot.yml b/.github/dependabot.yaml similarity index 100% rename from .github/dependabot.yml rename to .github/dependabot.yaml diff --git a/README.md b/README.md index 41a7ec40b..b43db1316 100644 --- a/README.md +++ b/README.md @@ -219,3 +219,4 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [Naim A.]: https://github.com/naim94a/udpt [greatest-ape]: https://github.com/greatest-ape/aquatic [Power2All]: https://github.com/power2all + diff --git a/codecov.yml b/codecov.yaml similarity index 100% rename from codecov.yml rename to codecov.yaml From ae3fda61e9800c61555db1d822b060e6be31cfce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Oct 2023 13:58:29 +0200 Subject: [PATCH 0618/1003] chore: update dependencies --- Cargo.lock | 252 +++++++++++++++++++++++++++++------------------------ 1 file changed, 136 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7b6caf856..142fa521e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,9 +41,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.5" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783" +checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" dependencies = [ "memchr", ] @@ -92,9 +92,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "aquatic_udp_protocol" @@ -142,7 +142,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -292,7 +292,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -375,9 +375,9 @@ dependencies = [ [[package]] name = "brotli" -version = "3.3.4" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -386,9 +386,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.3.4" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" +checksum = "da74e2b81409b1b743f8f0c62cc6254afefb8b8e50bbfe3735550f7aeefa3448" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -524,18 +524,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.3" +version = "4.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ed82781cea27b43c9b106a979fe450a13a31aab0500595fb3fc06616de08e6" +checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.4.2" +version = "4.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" +checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" dependencies = [ "anstyle", "clap_lex", @@ -749,7 +749,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -760,7 +760,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -793,7 +793,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -847,9 +847,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "add4f07d43996f76ef320709726a556a9d4f965d9410d8d0271132d2f8293480" dependencies = [ "errno-dragonfly", "libc", @@ -890,9 +890,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fern" @@ -994,7 +994,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -1006,7 +1006,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -1018,7 +1018,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -1083,7 +1083,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -1194,9 +1194,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" dependencies = [ "ahash 0.8.3", "allocator-api2", @@ -1208,7 +1208,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.1", ] [[package]] @@ -1219,9 +1219,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -1358,12 +1358,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "serde", ] @@ -1374,7 +1374,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -1569,15 +1569,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "3852614a3bd9ca9804678ba6be5e3b8ce76dfc902cae004e3e0c44051b6e88db" [[package]] name = "local-ip-address" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fefe707432eb6bd4704b3dacfc87aab269d56667ad05dcd6869534e8890e767" +checksum = "66357e687a569abca487dc399a9c9ac19beb3f13991ed49f00c144e02cbd42ab" dependencies = [ "libc", "neli", @@ -1612,15 +1612,15 @@ dependencies = [ [[package]] name = "matchit" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.6.3" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" @@ -1738,7 +1738,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", "termcolor", "thiserror", ] @@ -1939,7 +1939,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -1950,9 +1950,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.1.3+3.1.2" +version = "300.1.5+3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd2c101a165fff9935e34def4669595ab1c7847943c42be86e21503e482be107" +checksum = "559068e4c12950d7dcaa1857a61725c0d38d4fc03ff8e070ab31a75d6e316491" dependencies = [ "cc", ] @@ -2033,9 +2033,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a4d085fd991ac8d5b05a147b437791b4260b76326baf0fc60cf7c9c27ecd33" +checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" dependencies = [ "memchr", "thiserror", @@ -2044,9 +2044,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bee7be22ce7918f641a33f08e3f43388c7656772244e2bbb2477f44cc9021a" +checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" dependencies = [ "pest", "pest_generator", @@ -2054,22 +2054,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1511785c5e98d79a05e8a6bc34b4ac2168a0e3e92161862030ad84daa223141" +checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] name = "pest_meta" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42f0394d3123e33353ca5e1e89092e533d2cc490389f2bd6131c43c634ebc5f" +checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" dependencies = [ "once_cell", "pest", @@ -2093,7 +2093,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -2329,9 +2329,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -2339,14 +2339,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] @@ -2360,9 +2358,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick", "memchr", @@ -2372,9 +2370,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick", "memchr", @@ -2389,18 +2387,18 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "rend" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" +checksum = "a2571463863a6bd50c32f94402933f03457a3fbaf697a707c5be741e459f08fd" dependencies = [ "bytecheck", ] [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "78fdbab6a7e1d7b13cc8ff10197f47986b41c639300cc3c8158cac7847c9bbef" dependencies = [ "base64 0.21.4", "bytes", @@ -2423,6 +2421,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", "tower-service", @@ -2550,9 +2549,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.13" +version = "0.38.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" +checksum = "d2f9da0cbd88f9f09e7814e388301c8414c51c62aa6ce1e4b5c551d49d96e531" dependencies = [ "bitflags 2.4.0", "errno", @@ -2584,9 +2583,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.5" +version = "0.101.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" +checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" dependencies = [ "ring", "untrusted", @@ -2684,9 +2683,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" [[package]] name = "serde" @@ -2724,7 +2723,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -2756,7 +2755,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -2790,7 +2789,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.0", + "indexmap 2.0.2", "serde", "serde_json", "serde_with_macros", @@ -2806,14 +2805,14 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -2822,9 +2821,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -2863,9 +2862,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" @@ -2928,9 +2927,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.35" +version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59bf04c28bee9043ed9ea1e41afc0552288d3aba9c6efdd78903b802926f4879" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", @@ -2943,6 +2942,27 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -2964,9 +2984,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" dependencies = [ "winapi-util", ] @@ -2979,29 +2999,29 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] name = "time" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" dependencies = [ "deranged", "itoa", @@ -3012,15 +3032,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] @@ -3076,7 +3096,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", ] [[package]] @@ -3101,9 +3121,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", @@ -3124,14 +3144,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c226a7bba6d859b63c92c4b4fe69c5b6b72d0cb897dbc8e6012298e6154cb56e" +checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.0", + "toml_edit 0.20.2", ] [[package]] @@ -3149,18 +3169,18 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.2", "toml_datetime", "winnow", ] [[package]] name = "toml_edit" -version = "0.20.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff63e60a958cefbb518ae1fd6566af80d9d4be430a33f3723dfc47d1d411d95" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.0.2", "serde", "serde_spanned", "toml_datetime", @@ -3222,7 +3242,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml 0.8.0", + "toml 0.8.2", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -3467,7 +3487,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", "wasm-bindgen-shared", ] @@ -3501,7 +3521,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3540,9 +3560,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] From 4ca47a4a601feafc713b81e37aae4654992a6bc8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 8 Oct 2023 17:12:40 +0200 Subject: [PATCH 0619/1003] fixup: ignore new clippy warning --- .vscode/settings.json | 2 +- contrib/bencode/src/access/list.rs | 8 ++++++++ contrib/bencode/src/error.rs | 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 661243fbe..038da4c18 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -18,7 +18,7 @@ "-W", "clippy::style", "-W", - "clippy::pedantic", + "clippy::pedantic" ], "evenBetterToml.formatter.allowedBlankLines": 1, "evenBetterToml.formatter.columnWidth": 130, diff --git a/contrib/bencode/src/access/list.rs b/contrib/bencode/src/access/list.rs index 840bffa1e..c6d1fc407 100644 --- a/contrib/bencode/src/access/list.rs +++ b/contrib/bencode/src/access/list.rs @@ -45,6 +45,14 @@ impl<'a, V: 'a> IndexMut for &'a mut dyn BListAccess { } } +impl<'a, V: 'a> dyn BListAccess { + pub fn iter(&'a self) -> impl Iterator { + self.into_iter() + } +} + +#[allow(unknown_lints)] +#[allow(clippy::into_iter_without_iter)] impl<'a, V: 'a> IntoIterator for &'a dyn BListAccess { type Item = &'a V; type IntoIter = BListIter<'a, V>; diff --git a/contrib/bencode/src/error.rs b/contrib/bencode/src/error.rs index 18ebe9605..54c589e3e 100644 --- a/contrib/bencode/src/error.rs +++ b/contrib/bencode/src/error.rs @@ -1,3 +1,5 @@ +#![allow(unknown_lints)] +#![allow(clippy::iter_without_into_iter)] use error_chain::error_chain; error_chain! { From be914b6719867898376a96b22dba605ca6689078 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 7 Oct 2023 10:38:40 +0200 Subject: [PATCH 0620/1003] ci: re-enable llvm-cov coverage --- .github/workflows/testing.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 21c47665f..f138a95cc 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -104,7 +104,6 @@ jobs: name: Run Unit Tests run: cargo test --tests --benches --examples --workspace --all-targets --all-features - # Temporary Disable https://github.com/time-rs/time/issues/618 - # - id: coverage - # name: Generate Coverage Report - # run: cargo llvm-cov nextest --tests --benches --examples --workspace --all-targets --all-features + - id: coverage + name: Generate Coverage Report + run: cargo llvm-cov nextest --tests --benches --examples --workspace --all-targets --all-features From 050b27a3e25dc9bbe1b26c114d623e31bd6eab4d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 11 Oct 2023 15:32:17 +0200 Subject: [PATCH 0621/1003] ci: export labels using workflow --- .github/workflows/labels.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/labels.yaml diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml new file mode 100644 index 000000000..b78393133 --- /dev/null +++ b/.github/workflows/labels.yaml @@ -0,0 +1,12 @@ +name: Manage Labels +on: + workflow_dispatch: + +jobs: + labels: + runs-on: ubuntu-latest + + steps: + - id: export + name: Export Labels to Workflow Artifacts + uses: EndBug/export-label-config@v1 From d46633c451dac6192b8ac72c9f133b21a1269a7a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 11 Oct 2023 17:27:11 +0200 Subject: [PATCH 0622/1003] ci: enable sync labels workflow --- .github/labels.json | 260 ++++++++++++++++++++++++++++++++++ .github/workflows/labels.yaml | 10 +- 2 files changed, 269 insertions(+), 1 deletion(-) create mode 100644 .github/labels.json diff --git a/.github/labels.json b/.github/labels.json new file mode 100644 index 000000000..25831d61d --- /dev/null +++ b/.github/labels.json @@ -0,0 +1,260 @@ +[ + { + "name": "- Admin -", + "color": "FFFFFF", + "description": "Enjoyable to Install and Setup our Software", + "aliases": [] + }, + { + "name": "- Contributor -", + "color": "FFFFFF", + "description": "Nice to support Torrust", + "aliases": [] + }, + { + "name": "- Developer -", + "color": "FFFFFF", + "description": "Torrust Improvement Experience", + "aliases": [] + }, + { + "name": "- User -", + "color": "FFFFFF", + "description": "Enjoyable to Use our Software", + "aliases": [] + }, + { + "name": "Blocked", + "color": "000000", + "description": "Has Unsatisfied Dependency", + "aliases": [] + }, + { + "name": "Bug", + "color": "a80506", + "description": "Incorrect Behavior", + "aliases": [] + }, + { + "name": "Build | Project System", + "color": "99AAAA", + "description": "Compiling and Packaging", + "aliases": [] + }, + { + "name": "Cannot Reproduce", + "color": "D3D3D3", + "description": "Inconsistent Observations", + "aliases": [] + }, + { + "name": "Code Cleanup / Refactoring", + "color": "055a8b", + "description": "Tidying and Making Neat", + "aliases": [] + }, + { + "name": "Continuous Integration", + "color": "41c6b3", + "description": "Workflows and Automation", + "aliases": [] + }, + { + "name": "Dependencies", + "color": "d4f8f6", + "description": "Related to Dependencies", + "aliases": [] + }, + { + "name": "Documentation", + "color": "3d2133", + "description": "Improves Instructions, Guides, and Notices", + "aliases": [] + }, + { + "name": "Duplicate", + "color": "cfd3d7", + "description": "Not Unique", + "aliases": [] + }, + { + "name": "Easy", + "color": "f0cff0", + "description": "Good for Newcomers", + "aliases": [] + }, + { + "name": "Enhancement / Feature Request", + "color": "c9ecbf", + "description": "Something New", + "aliases": [] + }, + { + "name": "External Tools", + "color": "a6006b", + "description": "3rd Party Systems", + "aliases": [] + }, + { + "name": "First Time Contribution", + "color": "f1e0e6", + "description": "Welcome to Torrust", + "aliases": [] + }, + { + "name": "Fixed", + "color": "8e4c42", + "description": "Not a Concern Anymore", + "aliases": [] + }, + { + "name": "Hard", + "color": "2c2c2c", + "description": "Non-Trivial", + "aliases": [] + }, + { + "name": "Help Wanted", + "color": "00896b", + "description": "More Contributions are Appreciated", + "aliases": [] + }, + { + "name": "High Priority", + "color": "ba3fbc", + "description": "Focus Required", + "aliases": [] + }, + { + "name": "Hold Merge", + "color": "9aafbe", + "description": "We are not Ready Yet", + "aliases": [] + }, + { + "name": "Installer | Package", + "color": "ed8b24", + "description": "Distribution to Users", + "aliases": [] + }, + { + "name": "Invalid", + "color": "c1c1c1", + "description": "This doesn't seem right", + "aliases": [] + }, + { + "name": "Legal", + "color": "463e60", + "description": "Licenses and other Official Documents", + "aliases": [] + }, + { + "name": "Low Priority", + "color": "43536b", + "description": "Not our Focus Now", + "aliases": [] + }, + { + "name": "Needs Feedback", + "color": "d6946c", + "description": "What dose the Community Think?", + "aliases": [] + }, + { + "name": "Needs Rebase", + "color": "FBC002", + "description": "Base Branch has Incompatibilities", + "aliases": [] + }, + { + "name": "Needs Research", + "color": "4bc021", + "description": "We Need to Know More About This", + "aliases": [] + }, + { + "name": "Optimization", + "color": "faeba8", + "description": "Make it Faster", + "aliases": [] + }, + { + "name": "Portability", + "color": "95de82", + "description": "Distribution to More Places", + "aliases": [] + }, + { + "name": "Postponed", + "color": "dadada", + "description": "For Later", + "aliases": [] + }, + { + "name": "Quality & Assurance", + "color": "eea2e8", + "description": "Relates to QA, Testing, and CI", + "aliases": [] + }, + { + "name": "Question / Discussion", + "color": "f89d00", + "description": "Community Feedback", + "aliases": [] + }, + { + "name": "Regression", + "color": "d10588", + "description": "It dose not work anymore", + "aliases": [] + }, + { + "name": "Reviewed", + "color": "f4f4ea", + "description": "This Looks Good", + "aliases": [] + }, + { + "name": "Rust", + "color": "000000", + "description": "Pull requests that update Rust code", + "aliases": [] + }, + { + "name": "Security", + "color": "650606", + "description": "Publicly Connected to Security", + "aliases": [] + }, + { + "name": "Testing", + "color": "c5def5", + "description": "Checking Torrust", + "aliases": [] + }, + { + "name": "Translations", + "color": "0c86af", + "description": "Localization and Cultural Adaptions", + "aliases": [] + }, + { + "name": "Trivial", + "color": "5f9685", + "description": "Something Easy", + "aliases": [] + }, + { + "name": "Won't Fix", + "color": "070003", + "description": "Something Not Relevant", + "aliases": [] + }, + { + "name": "Workaround Possible", + "color": "eae3e7", + "description": "You can still do it another way", + "aliases": [] + } +] \ No newline at end of file diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index b78393133..8d8cb1cba 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -1,5 +1,5 @@ name: Manage Labels -on: +on: workflow_dispatch: jobs: @@ -10,3 +10,11 @@ jobs: - id: export name: Export Labels to Workflow Artifacts uses: EndBug/export-label-config@v1 + + - id: sync + name: Apply Labels from File + uses: EndBug/label-sync@v2 + with: + config-file: .github/labels.json + delete-other-labels: true + token: ${{ secrets.UPDATE_ISSUES }} From 483cc0471f59c3c084c6f7146ccc354c1e3fc2ff Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 11 Oct 2023 19:14:03 +0200 Subject: [PATCH 0623/1003] ci: fixup labels workflow --- .github/workflows/labels.yaml | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index 8d8cb1cba..728b2e681 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -1,16 +1,30 @@ name: Manage Labels on: workflow_dispatch: + push: + branches: + - develop + paths: + - "/.github/labels.json" jobs: - labels: + export: runs-on: ubuntu-latest steps: - - id: export + - id: backup name: Export Labels to Workflow Artifacts uses: EndBug/export-label-config@v1 + sync: + needs: export + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + - id: sync name: Apply Labels from File uses: EndBug/label-sync@v2 From 62c6c10d98f224564685e43d5d7e5801a4adddef Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 12 Oct 2023 08:40:44 +0200 Subject: [PATCH 0624/1003] ci: improve labels workflow --- .github/dependabot.yaml | 6 ++++++ .github/labels.json | 8 +------- .github/workflows/labels.yaml | 6 ++++-- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 8f36cb692..becfbc1df 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -5,9 +5,15 @@ updates: schedule: interval: daily target-branch: "develop" + labels: + - "Continuous Integration" + - "Dependencies" - package-ecosystem: cargo directory: / schedule: interval: daily target-branch: "develop" + labels: + - "Build | Project System" + - "Dependencies" diff --git a/.github/labels.json b/.github/labels.json index 25831d61d..3b8a2af42 100644 --- a/.github/labels.json +++ b/.github/labels.json @@ -39,7 +39,7 @@ "name": "Build | Project System", "color": "99AAAA", "description": "Compiling and Packaging", - "aliases": [] + "aliases": ["Rust"] }, { "name": "Cannot Reproduce", @@ -215,12 +215,6 @@ "description": "This Looks Good", "aliases": [] }, - { - "name": "Rust", - "color": "000000", - "description": "Pull requests that update Rust code", - "aliases": [] - }, { "name": "Security", "color": "650606", diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index 728b2e681..4d049eb0c 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -1,4 +1,4 @@ -name: Manage Labels +name: Labels on: workflow_dispatch: push: @@ -9,14 +9,16 @@ on: jobs: export: + name: Export Existing Labels runs-on: ubuntu-latest steps: - id: backup - name: Export Labels to Workflow Artifacts + name: Export to Workflow Artifact uses: EndBug/export-label-config@v1 sync: + name: Synchronize Labels from Repo needs: export runs-on: ubuntu-latest From 8f2e22ea65662f6c7cbb660837cec8228d5976aa Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 13 Oct 2023 11:23:05 +0200 Subject: [PATCH 0625/1003] ci: maintaince update for labelsync --- .github/workflows/labels.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index 4d049eb0c..bb8283f30 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -33,4 +33,4 @@ jobs: with: config-file: .github/labels.json delete-other-labels: true - token: ${{ secrets.UPDATE_ISSUES }} + token: ${{ secrets.UPDATE_LABELS }} From 308f490e34d31ee64774993af8d939662f6f4061 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 12 Oct 2023 10:28:18 +0200 Subject: [PATCH 0626/1003] deps: update cargo lockfile --- Cargo.lock | 130 ++++++++++++++++++++++++----------------------------- 1 file changed, 59 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 142fa521e..ef5e1a4e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,9 +41,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -142,7 +142,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -292,7 +292,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -430,9 +430,9 @@ dependencies = [ [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -749,7 +749,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -760,7 +760,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -793,7 +793,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -847,25 +847,14 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add4f07d43996f76ef320709726a556a9d4f965d9410d8d0271132d2f8293480" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "errno-dragonfly", "libc", "windows-sys", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "error-chain" version = "0.12.4" @@ -994,7 +983,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1006,7 +995,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1018,7 +1007,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1083,7 +1072,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1374,7 +1363,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1411,9 +1400,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] @@ -1525,9 +1514,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.148" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libloading" @@ -1569,9 +1558,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3852614a3bd9ca9804678ba6be5e3b8ce76dfc902cae004e3e0c44051b6e88db" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "local-ip-address" @@ -1738,7 +1727,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "termcolor", "thiserror", ] @@ -1878,9 +1867,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", ] @@ -1939,7 +1928,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2062,7 +2051,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2093,7 +2082,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2223,9 +2212,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -2358,9 +2347,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.6" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" +checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" dependencies = [ "aho-corasick", "memchr", @@ -2370,9 +2359,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.9" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" +checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" dependencies = [ "aho-corasick", "memchr", @@ -2381,9 +2370,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "56d84fdd47036b038fc80dd333d10b6aab10d5d31f4a366e20014def75328d33" [[package]] name = "rend" @@ -2396,9 +2385,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.21" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78fdbab6a7e1d7b13cc8ff10197f47986b41c639300cc3c8158cac7847c9bbef" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "base64 0.21.4", "bytes", @@ -2549,9 +2538,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.15" +version = "0.38.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f9da0cbd88f9f09e7814e388301c8414c51c62aa6ce1e4b5c551d49d96e531" +checksum = "5a74ee2d7c2581cd139b42447d7d9389b889bdaad3a73f1ebb16f2a3237bb19c" dependencies = [ "bitflags 2.4.0", "errno", @@ -2683,9 +2672,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" @@ -2698,9 +2687,9 @@ dependencies = [ [[package]] name = "serde_bencode" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" +checksum = "a70dfc7b7438b99896e7f8992363ab8e2c4ba26aa5ec675d32d1c3c2c33d413e" dependencies = [ "serde", "serde_bytes", @@ -2723,7 +2712,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2755,7 +2744,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2805,7 +2794,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2927,9 +2916,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.37" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -3014,7 +3003,7 @@ checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3072,9 +3061,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ "backtrace", "bytes", @@ -3096,7 +3085,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3487,7 +3476,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -3521,7 +3510,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3650,9 +3639,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.15" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "037711d82167854aff2018dfd193aa0fef5370f456732f0d5a0c59b0f1b4b907" dependencies = [ "memchr", ] @@ -3706,11 +3695,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", - "libc", "pkg-config", ] From 7dbca2bb8ec1f9f08dca8f752369ec00a4bec429 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 12 Oct 2023 10:28:50 +0200 Subject: [PATCH 0627/1003] ci: move doc tests to unit section --- .github/workflows/testing.yaml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index f138a95cc..3ae18e4db 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -65,10 +65,6 @@ jobs: name: Run Lint Checks run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic - - id: doc - name: Run Documentation Checks - run: cargo test --doc - unit: name: Units runs-on: ubuntu-latest @@ -100,6 +96,14 @@ jobs: with: tool: cargo-llvm-cov, cargo-nextest + - id: docs + name: Build Documentation + run: cargo doc --no-deps --bins --examples --workspace --all-features + + - id: test-docs + name: Run Documentation Tests + run: cargo test --doc + - id: test name: Run Unit Tests run: cargo test --tests --benches --examples --workspace --all-targets --all-features From 417ad73d6c288374b98be2b0ff1819714ae62beb Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 12 Oct 2023 10:54:42 +0200 Subject: [PATCH 0628/1003] chore: fix doc warnings --- src/bootstrap/app.rs | 4 ++-- src/bootstrap/jobs/http_tracker.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 2 +- src/lib.rs | 4 ++-- src/servers/apis/mod.rs | 6 +++--- .../apis/v1/context/auth_key/handlers.rs | 2 +- src/servers/apis/v1/context/stats/handlers.rs | 2 +- .../apis/v1/context/stats/responses.rs | 2 +- .../apis/v1/context/torrent/handlers.rs | 2 +- .../apis/v1/context/torrent/resources/peer.rs | 2 +- .../v1/context/torrent/resources/torrent.rs | 4 ++-- .../apis/v1/context/torrent/responses.rs | 4 ++-- src/servers/http/mod.rs | 4 ++-- src/servers/http/percent_encoding.rs | 6 +++--- .../http/v1/extractors/announce_request.rs | 6 +++--- .../http/v1/extractors/authentication_key.rs | 4 ++-- .../http/v1/extractors/client_ip_sources.rs | 2 +- .../http/v1/extractors/scrape_request.rs | 6 +++--- src/servers/http/v1/handlers/common/auth.rs | 2 +- .../http/v1/handlers/common/peer_ip.rs | 2 +- src/servers/http/v1/query.rs | 2 +- src/servers/http/v1/responses/announce.rs | 4 ++-- src/servers/http/v1/services/announce.rs | 6 +++--- src/servers/http/v1/services/scrape.rs | 6 +++--- src/servers/udp/mod.rs | 2 +- src/servers/udp/peer_builder.rs | 2 +- src/servers/udp/request.rs | 4 ++-- src/servers/udp/server.rs | 6 +++--- src/tracker/auth.rs | 8 ++++---- src/tracker/databases/mod.rs | 4 ++-- src/tracker/mod.rs | 20 +++++++++---------- src/tracker/services/statistics/mod.rs | 4 ++-- src/tracker/services/statistics/setup.rs | 2 +- src/tracker/services/torrent.rs | 10 +++++----- src/tracker/torrent.rs | 6 +++--- 35 files changed, 77 insertions(+), 77 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 6961e15f0..78c16a0a5 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -1,6 +1,6 @@ //! Setup for the main tracker application. //! -//! The [`setup`](bootstrap::app::setup) only builds the application and its dependencies but it does not start the application. +//! The [`setup`] only builds the application and its dependencies but it does not start the application. //! In fact, there is no such thing as the main application process. When the application starts, the only thing it does is //! starting a bunch of independent jobs. If you are looking for how things are started you should read [`app::start`](crate::app::start) //! function documentation. @@ -22,7 +22,7 @@ use crate::shared::crypto::ephemeral_instance_keys; use crate::tracker::services::tracker_factory; use crate::tracker::Tracker; -/// It loads the configuration from the environment and builds the main domain [`tracker`](crate::tracker::Tracker) struct. +/// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. #[must_use] pub fn setup() -> (Arc, Arc) { let configuration = Arc::new(initialize_configuration()); diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index ac0161640..a38fe3a5a 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -7,7 +7,7 @@ //! //! The [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) function spawns a new asynchronous task, //! that tasks is the "**launcher**". The "**launcher**" starts the actual server and sends a message back to the main application. -//! The main application waits until receives the message [`ServerJobStarted`](crate::bootstrap::jobs::http_tracker::ServerJobStarted) from the "**launcher**". +//! The main application waits until receives the message [`ServerJobStarted`] from the "**launcher**". //! //! The "**launcher**" is an intermediary thread that decouples the HTTP servers from the process that handles it. The HTTP could be used independently in the future. //! In that case it would not need to notify a parent process. diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 9afe4ab24..33b9b6e4a 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -11,7 +11,7 @@ //! function spawns a new asynchronous task, that tasks is the "**launcher**". //! The "**launcher**" starts the actual server and sends a message back //! to the main application. The main application waits until receives -//! the message [`ApiServerJobStarted`](crate::bootstrap::jobs::tracker_apis::ApiServerJobStarted) +//! the message [`ApiServerJobStarted`] //! from the "**launcher**". //! //! The "**launcher**" is an intermediary thread that decouples the API server diff --git a/src/lib.rs b/src/lib.rs index c862d373a..c2e70a8b1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -384,7 +384,7 @@ //! //! Torrust Tracker has four main components: //! -//! - The core [`tracker`](crate::tracker) +//! - The core [`tracker`] //! - The tracker REST [`API`](crate::servers::apis) //! - The [`UDP`](crate::servers::udp) tracker //! - The [`HTTP`](crate::servers::http) tracker @@ -402,7 +402,7 @@ //! - Statistics //! - Persistence //! -//! See [`tracker`](crate::tracker) for more details on the [`tracker`](crate::tracker) module. +//! See [`tracker`] for more details on the [`tracker`] module. //! //! ## Tracker API //! diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index afed9ff12..5f8c581d0 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -3,7 +3,7 @@ //! > **NOTICE**: This API should not be exposed directly to the internet, it is //! intended for internal use only. //! -//! Endpoints for the latest API: [v1](crate::servers::apis::v1). +//! Endpoints for the latest API: [v1]. //! //! All endpoints require an authorization token which must be set in the //! configuration before running the tracker. The default configuration uses @@ -139,13 +139,13 @@ //! //! The API is versioned and each version has its own module. //! The API server runs all the API versions on the same server using -//! the same port. Currently there is only one API version: [v1](crate::servers::apis::v1) +//! the same port. Currently there is only one API version: [v1] //! but a version [`v2`](https://github.com/torrust/torrust-tracker/issues/144) //! is planned. //! //! # Endpoints //! -//! Refer to the [v1](crate::servers::apis::v1) module for the list of available +//! Refer to the [v1] module for the list of available //! API endpoints. //! //! # Documentation diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index 85158c698..d6a2992fb 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -19,7 +19,7 @@ use crate::tracker::Tracker; /// /// It returns two types of responses: /// -/// - `200` with an json [`AuthKey`](crate::servers::apis::v1::context::auth_key::resources::AuthKey) +/// - `200` with an json [`AuthKey`] /// resource. If the key was generated successfully. /// - `500` with serialized error in debug format. If the key couldn't be /// generated. diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index dfb983f77..bb531c806 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -12,7 +12,7 @@ use crate::tracker::Tracker; /// It handles the request to get the tracker statistics. /// -/// It returns a `200` response with a json [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) +/// It returns a `200` response with a json [`Stats`] /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats#get-tracker-statistics) /// for more information about this endpoint. diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index a4dad77e4..e8e7cb84d 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -5,7 +5,7 @@ use axum::response::Json; use super::resources::Stats; use crate::tracker::services::statistics::TrackerMetrics; -/// `200` response that contains the [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) resource as json. +/// `200` response that contains the [`Stats`] resource as json. pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { Json(Stats::from(tracker_metrics)) } diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 002d4356e..1f38ab474 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -51,7 +51,7 @@ pub struct PaginationParams { /// It handles the request to get a list of torrents. /// /// It returns a `200` response with a json array with -/// [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) +/// [`ListItem`] /// resources. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#list-torrents) diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 539637b35..4989cec52 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -6,7 +6,7 @@ use crate::tracker; /// `Peer` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Peer { - /// The peer's ID. See [`Id`](crate::servers::apis::v1::context::torrent::resources::peer::Id). + /// The peer's ID. See [`Id`]. pub peer_id: Id, /// The peer's socket address. For example: `192.168.1.88:17548`. pub peer_addr: String, diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index c9dbd1c02..ebebda79c 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -59,8 +59,8 @@ impl ListItem { } } -/// Maps an array of the domain type [`BasicInfo`](crate::tracker::services::torrent::BasicInfo) -/// to the API resource type [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem). +/// Maps an array of the domain type [`BasicInfo`] +/// to the API resource type [`ListItem`]. #[must_use] pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { basic_info_vec diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs index d3be092eb..99c2fcae3 100644 --- a/src/servers/apis/v1/context/torrent/responses.rs +++ b/src/servers/apis/v1/context/torrent/responses.rs @@ -7,14 +7,14 @@ use super::resources::torrent::{ListItem, Torrent}; use crate::tracker::services::torrent::{BasicInfo, Info}; /// `200` response that contains an array of -/// [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) +/// [`ListItem`] /// resources as json. pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { Json(ListItem::new_vec(basic_infos)) } /// `200` response that contains a -/// [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent) +/// [`Torrent`] /// resources as json. pub fn torrent_info_response(info: Info) -> Json { Json(Torrent::from(info)) diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 067e88fdd..e6dd808b6 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -78,13 +78,13 @@ //! > **NOTICE**: the `info_hash` parameter is NOT a `URL` encoded string param. //! It is percent encode of the raw `info_hash` bytes (40 bytes). URL `GET` params //! can contain any bytes, not only well-formed UTF-8. The `info_hash` is a -//! 20-byte SHA1. Check the [`percent_encoding`](crate::servers::http::percent_encoding) +//! 20-byte SHA1. Check the [`percent_encoding`] //! module to know more about the encoding. //! //! > **NOTICE**: the `peer_id` parameter is NOT a `URL` encoded string param. //! It is percent encode of the raw peer ID bytes (20 bytes). URL `GET` params //! can contain any bytes, not only well-formed UTF-8. The `info_hash` is a -//! 20-byte SHA1. Check the [`percent_encoding`](crate::servers::http::percent_encoding) +//! 20-byte SHA1. Check the [`percent_encoding`] //! module to know more about the encoding. //! //! > **NOTICE**: by default, the tracker returns the non-compact peer list when diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index c8f0f7f12..b674f0475 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -19,7 +19,7 @@ use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; /// Percent decodes a percent encoded infohash. Internally an -/// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) is a 20-byte array. +/// [`InfoHash`] is a 20-byte array. /// /// For example, given the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, /// it's percent encoded representation is `%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0`. @@ -43,7 +43,7 @@ use crate::tracker::peer::{self, IdConversionError}; /// # Errors /// /// Will return `Err` if the decoded bytes do not represent a valid -/// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash). +/// [`InfoHash`]. pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); InfoHash::try_from(bytes) @@ -70,7 +70,7 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result Result { let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); peer::Id::try_from(bytes) diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index 5d947ef91..1778f78c5 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -1,7 +1,7 @@ -//! Axum [`extractor`](axum::extract) for the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! Axum [`extractor`](axum::extract) for the [`Announce`] //! request. //! -//! It parses the query parameters returning an [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! It parses the query parameters returning an [`Announce`] //! request. //! //! Refer to [`Announce`](crate::servers::http::v1::requests::announce) for more @@ -38,7 +38,7 @@ use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; use crate::servers::http::v1::responses; -/// Extractor for the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +/// Extractor for the [`Announce`] /// request. pub struct ExtractRequest(pub Announce); diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index 20dc1c90b..3f2a2a246 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -1,4 +1,4 @@ -//! Axum [`extractor`](axum::extract) to extract the authentication [`Key`](crate::tracker::auth::Key) +//! Axum [`extractor`](axum::extract) to extract the authentication [`Key`] //! from the URL path. //! //! It's only used when the tracker is running in private mode. @@ -55,7 +55,7 @@ use crate::servers::http::v1::handlers::common::auth; use crate::servers::http::v1::responses; use crate::tracker::auth::Key; -/// Extractor for the [`Key`](crate::tracker::auth::Key) struct. +/// Extractor for the [`Key`] struct. pub struct Extract(pub Key); #[derive(Deserialize)] diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs index f04300402..18eff26b3 100644 --- a/src/servers/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -45,7 +45,7 @@ use axum_client_ip::RightmostXForwardedFor; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; -/// Extractor for the [`ClientIpSources`](crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources) +/// Extractor for the [`ClientIpSources`] /// struct. pub struct Extract(pub ClientIpSources); diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs index 63c4dba69..1437ede41 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -1,7 +1,7 @@ -//! Axum [`extractor`](axum::extract) for the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! Axum [`extractor`](axum::extract) for the [`Scrape`] //! request. //! -//! It parses the query parameters returning an [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! It parses the query parameters returning an [`Scrape`] //! request. //! //! Refer to [`Scrape`](crate::servers::http::v1::requests::scrape) for more @@ -38,7 +38,7 @@ use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; use crate::servers::http::v1::responses; -/// Extractor for the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +/// Extractor for the [`Scrape`] /// request. pub struct ExtractRequest(pub Scrape); diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs index f41635d69..720ed7659 100644 --- a/src/servers/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -1,5 +1,5 @@ //! HTTP server authentication error and conversion to -//! [`responses::error::Error`](crate::servers::http::v1::responses::error::Error) +//! [`responses::error::Error`] //! response. use std::panic::Location; diff --git a/src/servers/http/v1/handlers/common/peer_ip.rs b/src/servers/http/v1/handlers/common/peer_ip.rs index d65efbc79..5602bd26c 100644 --- a/src/servers/http/v1/handlers/common/peer_ip.rs +++ b/src/servers/http/v1/handlers/common/peer_ip.rs @@ -2,7 +2,7 @@ //! //! The HTTP tracker may fail to resolve the peer IP address. This module //! contains the logic to convert those -//! [`PeerIpResolutionError`](crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError) +//! [`PeerIpResolutionError`] //! errors into responses. use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; diff --git a/src/servers/http/v1/query.rs b/src/servers/http/v1/query.rs index 745796b61..3a078daae 100644 --- a/src/servers/http/v1/query.rs +++ b/src/servers/http/v1/query.rs @@ -93,7 +93,7 @@ impl Query { } } -/// This error can be returned when parsing a [`Query`](crate::servers::http::v1::query::Query) +/// This error can be returned when parsing a [`Query`] /// from a string. #[derive(Error, Debug)] pub enum ParseQueryError { diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 3596275f4..f45f4c824 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -91,7 +91,7 @@ pub struct NonCompact { pub peers: Vec, } -/// Peer information in the [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) +/// Peer information in the [`NonCompact`] /// response. /// /// ```rust @@ -261,7 +261,7 @@ pub struct Compact { pub peers: Vec, } -/// Compact peer. It's used in the [`Compact`](crate::servers::http::v1::responses::announce::Compact) +/// Compact peer. It's used in the [`Compact`] /// response. /// /// _"To reduce the size of tracker responses and to reduce memory and diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 4c1b262ba..ddb3b1221 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -3,10 +3,10 @@ //! The service is responsible for handling the `announce` requests. //! //! It delegates the `announce` logic to the [`Tracker`](crate::tracker::Tracker::announce) -//! and it returns the [`AnnounceData`](crate::tracker::AnnounceData) returned -//! by the [`Tracker`](crate::tracker::Tracker). +//! and it returns the [`AnnounceData`] returned +//! by the [`Tracker`]. //! -//! It also sends an [`statistics::Event`](crate::tracker::statistics::Event) +//! It also sends an [`statistics::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 240680ca3..adea28086 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -3,10 +3,10 @@ //! The service is responsible for handling the `scrape` requests. //! //! It delegates the `scrape` logic to the [`Tracker`](crate::tracker::Tracker::scrape) -//! and it returns the [`ScrapeData`](crate::tracker::ScrapeData) returned -//! by the [`Tracker`](crate::tracker::Tracker). +//! and it returns the [`ScrapeData`] returned +//! by the [`Tracker`]. //! -//! It also sends an [`statistics::Event`](crate::tracker::statistics::Event) +//! It also sends an [`statistics::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index edbfd77d2..630867218 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -109,7 +109,7 @@ //! connection ID = hash(client IP + current time slot + secret seed) //! ``` //! -//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`](crate::servers::udp::connection_cookie) +//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`] //! for more information about the connection ID generation with this method. //! //! #### Connect Request diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index ac62a7ecd..7c83089bb 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -5,7 +5,7 @@ use super::request::AnnounceWrapper; use crate::shared::clock::{Current, Time}; use crate::tracker::peer::{Id, Peer}; -/// Extracts the [`Peer`](crate::tracker::peer::Peer) info from the +/// Extracts the [`Peer`] info from the /// announce request. /// /// # Arguments diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index 0afa02806..f655fd36a 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -9,9 +9,9 @@ use aquatic_udp_protocol::AnnounceRequest; use crate::shared::bit_torrent::info_hash::InfoHash; -/// Wrapper around [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest). +/// Wrapper around [`AnnounceRequest`]. pub struct AnnounceWrapper { - /// [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) to wrap. + /// [`AnnounceRequest`] to wrap. pub announce_request: AnnounceRequest, /// Info hash of the torrent. pub info_hash: InfoHash, diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 5e5c98704..428b76fa1 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -2,9 +2,9 @@ //! //! There are two main types in this module: //! -//! - [`UdpServer`](crate::servers::udp::server::UdpServer): a controller to +//! - [`UdpServer`]: a controller to //! start and stop the server. -//! - [`Udp`](crate::servers::udp::server::Udp): the server launcher. +//! - [`Udp`]: the server launcher. //! //! The `UdpServer` is an state machine for a given configuration. This struct //! represents concrete configuration and state. It allows to start and @@ -42,7 +42,7 @@ use crate::tracker::Tracker; /// /// Some errors triggered while stopping the server are: /// -/// - The [`UdpServer`](crate::servers::udp::server::UdpServer) cannot send the +/// - The [`UdpServer`] cannot send the /// shutdown signal to the spawned UDP service thread. #[derive(Debug)] pub enum Error { diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 466187af5..2759c8d06 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -4,7 +4,7 @@ //! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs //! in `private` or `private_listed` modes. //! -//! There are services to [`generate`](crate::tracker::auth::generate) and [`verify`](crate::tracker::auth::verify) authentication keys. +//! There are services to [`generate`] and [`verify`] authentication keys. //! //! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means //! they are only valid during a period of time. After that time the expiring key will no longer be valid. @@ -53,7 +53,7 @@ use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; #[must_use] -/// It generates a new random 32-char authentication [`ExpiringKey`](crate::tracker::auth::ExpiringKey) +/// It generates a new random 32-char authentication [`ExpiringKey`] /// /// # Panics /// @@ -73,7 +73,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { } } -/// It verifies an [`ExpiringKey`](crate::tracker::auth::ExpiringKey). It checks if the expiration date has passed. +/// It verifies an [`ExpiringKey`]. It checks if the expiration date has passed. /// /// # Errors /// @@ -164,7 +164,7 @@ impl FromStr for Key { } } -/// Verification error. Error returned when an [`ExpiringKey`](crate::tracker::auth::ExpiringKey) cannot be verified with the [`verify(...)`](crate::tracker::auth::verify) function. +/// Verification error. Error returned when an [`ExpiringKey`] cannot be verified with the [`verify(...)`](crate::tracker::auth::verify) function. /// #[derive(Debug, Error)] #[allow(dead_code)] diff --git a/src/tracker/databases/mod.rs b/src/tracker/databases/mod.rs index e0a26be23..902880496 100644 --- a/src/tracker/databases/mod.rs +++ b/src/tracker/databases/mod.rs @@ -1,6 +1,6 @@ //! The persistence module. //! -//! Persistence is currently implemented with one [`Database`](crate::tracker::databases::Database) trait. +//! Persistence is currently implemented with one [`Database`] trait. //! //! There are two implementations of the trait (two drivers): //! @@ -115,7 +115,7 @@ pub trait Database: Sync + Send { /// It loads the torrent metrics data from the database. /// /// It returns an array of tuples with the torrent - /// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) and the + /// [`InfoHash`] and the /// [`completed`](crate::tracker::torrent::Entry::completed) counter /// which is the number of times the torrent has been downloaded. /// See [`Entry::completed`](crate::tracker::torrent::Entry::completed). diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 040751e12..94d75a8cd 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -52,7 +52,7 @@ //! The tracker responds to the peer with the list of other peers in the swarm so that //! the peer can contact them to start downloading pieces of the file from them. //! -//! Once you have instantiated the `Tracker` you can `announce` a new [`peer`](crate::tracker::peer::Peer) with: +//! Once you have instantiated the `Tracker` you can `announce` a new [`Peer`] with: //! //! ```rust,no_run //! use torrust_tracker::tracker::peer; @@ -183,7 +183,7 @@ //! //! ## Torrents //! -//! The [`torrent`](crate::tracker::torrent) module contains all the data structures stored by the `Tracker` except for peers. +//! The [`torrent`] module contains all the data structures stored by the `Tracker` except for peers. //! //! We can represent the data stored in memory internally by the `Tracker` with this JSON object: //! @@ -222,7 +222,7 @@ //! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". //! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. //! -//! The [`torrent`](crate::tracker::torrent) module not only contains the original data obtained from peer via `announce` requests, it also contains +//! The [`torrent`] module not only contains the original data obtained from peer via `announce` requests, it also contains //! aggregate data that can be derived from the original data. For example: //! //! ```rust,no_run @@ -244,7 +244,7 @@ //! `SwarmStats` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmStats` //! is used for the rest of cases. //! -//! Refer to [`torrent`](crate::tracker::torrent) module for more details about these data structures. +//! Refer to [`torrent`] module for more details about these data structures. //! //! ## Peers //! @@ -310,7 +310,7 @@ //! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers //! that have a full version of the torrent data, also known as seeders. //! -//! Refer to [`peer`](crate::tracker::peer) module for more information about peers. +//! Refer to [`peer`] module for more information about peers. //! //! # Configuration //! @@ -348,7 +348,7 @@ //! //! Services can include extra features like pagination, for example. //! -//! Refer to [`services`](crate::tracker::services) module for more information about services. +//! Refer to [`services`] module for more information about services. //! //! # Authentication //! @@ -363,7 +363,7 @@ //! //! To learn more about tracker authentication, refer to the following modules : //! -//! - [`auth`](crate::tracker::auth) module. +//! - [`auth`] module. //! - [`tracker`](crate::tracker) module. //! - [`http`](crate::servers::http) module. //! @@ -417,7 +417,7 @@ //! tracker.send_stats_event(statistics::Event::Tcp4Announce).await //! ``` //! -//! Refer to [`statistics`](crate::tracker::statistics) module for more information about statistics. +//! Refer to [`statistics`] module for more information about statistics. //! //! # Persistence //! @@ -430,7 +430,7 @@ //! - Torrent whitelist //! - Torrent metrics //! -//! Refer to [`databases`](crate::tracker::databases) module for more information about persistence. +//! Refer to [`databases`] module for more information about persistence. pub mod auth; pub mod databases; pub mod error; @@ -753,7 +753,7 @@ impl Tracker { } /// It calculates and returns the general `Tracker` - /// [`TorrentsMetrics`](crate::tracker::TorrentsMetrics) + /// [`TorrentsMetrics`] /// /// # Context: Tracker pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { diff --git a/src/tracker/services/statistics/mod.rs b/src/tracker/services/statistics/mod.rs index 3761e38de..3ef8b52bb 100644 --- a/src/tracker/services/statistics/mod.rs +++ b/src/tracker/services/statistics/mod.rs @@ -3,7 +3,7 @@ //! It includes: //! //! - A [`factory`](crate::tracker::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`](crate::tracker::services::statistics::get_metrics) service to get the [`tracker metrics`](crate::tracker::statistics::Metrics). +//! - A [`get_metrics`] service to get the [`tracker metrics`](crate::tracker::statistics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! @@ -57,7 +57,7 @@ pub struct TrackerMetrics { pub protocol_metrics: Metrics, } -/// It returns all the [`TrackerMetrics`](crate::tracker::services::statistics::TrackerMetrics) +/// It returns all the [`TrackerMetrics`] pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { let torrents_metrics = tracker.get_torrents_metrics().await; let stats = tracker.get_stats().await; diff --git a/src/tracker/services/statistics/setup.rs b/src/tracker/services/statistics/setup.rs index b8d325ab4..4bf5a827a 100644 --- a/src/tracker/services/statistics/setup.rs +++ b/src/tracker/services/statistics/setup.rs @@ -1,6 +1,6 @@ //! Setup for the tracker statistics. //! -//! The [`factory`](crate::tracker::services::statistics::setup::factory) function builds the structs needed for handling the tracker metrics. +//! The [`factory`] function builds the structs needed for handling the tracker metrics. use crate::tracker::statistics; /// It builds the structs needed for handling the tracker metrics. diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index 0db044d07..934fa6b77 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -2,8 +2,8 @@ //! //! There are two services: //! -//! - [`get_torrent_info`](crate::tracker::services::torrent::get_torrent_info): it returns all the data about one torrent. -//! - [`get_torrents`](crate::tracker::services::torrent::get_torrents): it returns data about some torrent in bulk excluding the peer list. +//! - [`get_torrent_info`]: it returns all the data about one torrent. +//! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. use std::sync::Arc; use serde::Deserialize; @@ -29,7 +29,7 @@ pub struct Info { /// It contains only part of the information the tracker has about a torrent /// -/// It contains the same data as [Info](crate::tracker::services::torrent::Info) but without the list of peers in the swarm. +/// It contains the same data as [Info] but without the list of peers in the swarm. #[derive(Debug, PartialEq, Clone)] pub struct BasicInfo { /// The infohash of the torrent this data is related to @@ -91,7 +91,7 @@ impl Default for Pagination { } } -/// It returns all the information the tracker has about one torrent in a [Info](crate::tracker::services::torrent::Info) struct. +/// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { let db = tracker.get_torrents().await; @@ -116,7 +116,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op }) } -/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`](crate::tracker::services::torrent::BasicInfo) struct, excluding the peer list. +/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec { let db = tracker.get_torrents().await; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4f7e28b6b..de520aeb1 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -2,8 +2,8 @@ //! //! There are to main data structures: //! -//! - A torrent [`Entry`](crate::tracker::torrent::Entry): it contains all the information stored by the tracker for one torrent. -//! - The [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. +//! - A torrent [`Entry`]: it contains all the information stored by the tracker for one torrent. +//! - The [`SwarmMetadata`]: it contains aggregate information that can me derived from the torrent entries. //! //! A "swarm" is a network of peers that are trying to download the same torrent. //! @@ -27,7 +27,7 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! -//! > **NOTICE**: that both [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata) and [`SwarmStats`](crate::tracker::torrent::SwarmStats) contain the same information. [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata) is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). +//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmStats`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; From ccfb06fbc8fe0a35aeaf4d6fdca6ce9ee4e67ec6 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 12 Oct 2023 10:55:09 +0200 Subject: [PATCH 0629/1003] ci: deny doc warnings --- .github/workflows/testing.yaml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 3ae18e4db..f60f03e5e 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -65,6 +65,13 @@ jobs: name: Run Lint Checks run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic + - id: docs + name: Lint Documentation + env: + RUSTDOCFLAGS: "-D warnings" + run: cargo doc --no-deps --bins --examples --workspace --all-features + + unit: name: Units runs-on: ubuntu-latest @@ -96,10 +103,6 @@ jobs: with: tool: cargo-llvm-cov, cargo-nextest - - id: docs - name: Build Documentation - run: cargo doc --no-deps --bins --examples --workspace --all-features - - id: test-docs name: Run Documentation Tests run: cargo test --doc From eb428f295330150976c1deb517e432910aebf49f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 15 Oct 2023 15:09:54 +0200 Subject: [PATCH 0630/1003] various: small updates --- .github/workflows/coverage.yaml | 4 +- .github/workflows/labels.yaml | 2 +- Cargo.lock | 74 +++++++++++++++++-------------- Cargo.toml | 68 ++++++++++++++-------------- README.md | 31 ++++++------- contrib/bencode/Cargo.toml | 4 +- packages/configuration/Cargo.toml | 12 ++--- packages/located-error/Cargo.toml | 2 +- packages/primitives/Cargo.toml | 4 +- packages/test-helpers/Cargo.toml | 4 +- 10 files changed, 104 insertions(+), 101 deletions(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 1e7dace66..7f5bf2946 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -53,11 +53,11 @@ jobs: - id: check name: Run Build Checks - run: cargo check --workspace --all-targets --all-features + run: cargo check --tests --benches --examples --workspace --all-targets --all-features - id: test name: Run Unit Tests - run: cargo test --workspace --all-targets --all-features + run: cargo test --tests --benches --examples --workspace --all-targets --all-features - id: coverage name: Generate Coverage Report diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index bb8283f30..97aaa0308 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -29,7 +29,7 @@ jobs: - id: sync name: Apply Labels from File - uses: EndBug/label-sync@v2 + uses: EndBug/label-sync@da00f2c11fdb78e4fae44adac2fdd713778ea3e8 with: config-file: .github/labels.json delete-other-labels: true diff --git a/Cargo.lock b/Cargo.lock index ef5e1a4e3..9df190ae5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -120,9 +120,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb42b2197bf15ccb092b62c74515dbd8b86d0effd934795f6687c93b6e679a2c" +checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" dependencies = [ "brotli", "flate2", @@ -136,9 +136,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", @@ -765,10 +765,11 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" dependencies = [ + "powerfmt", "serde", ] @@ -894,9 +895,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "libz-sys", @@ -2131,6 +2132,12 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -2347,9 +2354,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" +checksum = "aaac441002f822bc9705a681810a4dd2963094b9ca0ddc41cb963a4c189189ea" dependencies = [ "aho-corasick", "memchr", @@ -2359,9 +2366,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" +checksum = "5011c7e263a695dc8ca064cddb722af1be54e517a280b12a5356f98366899e5d" dependencies = [ "aho-corasick", "memchr", @@ -2370,9 +2377,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56d84fdd47036b038fc80dd333d10b6aab10d5d31f4a366e20014def75328d33" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "rend" @@ -2538,9 +2545,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.18" +version = "0.38.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a74ee2d7c2581cd139b42447d7d9389b889bdaad3a73f1ebb16f2a3237bb19c" +checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" dependencies = [ "bitflags 2.4.0", "errno", @@ -2678,9 +2685,9 @@ checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] @@ -2706,9 +2713,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", @@ -3008,12 +3015,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" +checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ "deranged", "itoa", + "powerfmt", "serde", "time-core", "time-macros", @@ -3322,11 +3330,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "ee2ef2af84856a50c1d430afce2fdded0a4ec7eda868db86409b4543df0797f9" dependencies = [ - "cfg-if", "log", "pin-project-lite", "tracing-core", @@ -3334,9 +3341,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] @@ -3639,9 +3646,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.16" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037711d82167854aff2018dfd193aa0fef5370f456732f0d5a0c59b0f1b4b907" +checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" dependencies = [ "memchr", ] @@ -3676,20 +3683,19 @@ dependencies = [ [[package]] name = "zstd" -version = "0.12.4" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" +checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "6.0.6" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" +checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" dependencies = [ - "libc", "zstd-sys", ] diff --git a/Cargo.toml b/Cargo.toml index 3c9354cae..80d7d30b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,47 +29,47 @@ rust-version = "1.72" version = "3.0.0-alpha.12-develop" [dependencies] -aquatic_udp_protocol = "0.8" -async-trait = "0.1" -axum = "0.6" -axum-client-ip = "0.4" -axum-server = { version = "0.5", features = ["tls-rustls"] } -binascii = "0.1" -chrono = { version = "0.4", default-features = false, features = ["clock"] } -config = "0.13" -derive_more = "0.99" -fern = "0.6" -futures = "0.3" -hyper = "0.14" -lazy_static = "1.4" -log = { version = "0.4", features = ["release_max_level_info"] } -multimap = "0.9" -openssl = { version = "0.10", features = ["vendored"] } -percent-encoding = "2.2" -r2d2 = "0.8" -r2d2_mysql = "24.0" -r2d2_sqlite = { version = "0.22", features = ["bundled"] } -rand = "0.8" -serde = { version = "1.0", features = ["derive"] } -serde_bencode = "^0.2" -serde_json = "1.0" -serde_with = "3.2" -thiserror = "1.0" -tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +aquatic_udp_protocol = "0" +async-trait = "0" +axum = "0" +axum-client-ip = "0" +axum-server = { version = "0", features = ["tls-rustls"] } +binascii = "0" +chrono = { version = "0", default-features = false, features = ["clock"] } +config = "0" +derive_more = "0" +fern = "0" +futures = "0" +hyper = "0" +lazy_static = "1" +log = { version = "0", features = ["release_max_level_info"] } +multimap = "0" +openssl = { version = "0", features = ["vendored"] } +percent-encoding = "2" +r2d2 = "0" +r2d2_mysql = "24" +r2d2_sqlite = { version = "0", features = ["bundled"] } +rand = "0" +serde = { version = "1", features = ["derive"] } +serde_bencode = "0" +serde_json = "1" +serde_with = "3" +thiserror = "1" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } -tower-http = { version = "0.4", features = ["compression-full"] } +tower-http = { version = "0", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } [dev-dependencies] -local-ip-address = "0.5" -mockall = "0.11" -reqwest = { version = "0.11.18", features = ["json"] } -serde_bytes = "0.11" -serde_repr = "0.1" -serde_urlencoded = "0.7" +local-ip-address = "0" +mockall = "0" +reqwest = { version = "0", features = ["json"] } +serde_bytes = "0" +serde_repr = "0" +serde_urlencoded = "0" torrust-tracker-test-helpers = { version = "3.0.0-alpha.12-develop", path = "packages/test-helpers" } [workspace] diff --git a/README.md b/README.md index b43db1316..e584db3c8 100644 --- a/README.md +++ b/README.md @@ -2,16 +2,14 @@ [![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf] -__Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker (a service that matchmakes peers and collects statistics) written in [Rust Language][rust] and [axum] (a modern web application framework). ___This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).___ +__Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker that matchmakes peers and collects statistics. Written in [Rust Language][rust] with the [axum] web framework. ___This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).___ > This is a [Torrust][torrust] project and is in active development. It is community supported as well as sponsored by [Nautilus Cyberneering][nautilus]. -- _We have a [container guide][containers.md] for those who wish to get started with __Docker__ or __Podman___ - ## Key Features - [x] High Quality and Modern Rust Codebase. -- [x] [Documentation] Generated from Code Comments. +- [x] [Documentation][docs] Generated from Code Comments. - [x] [Comprehensive Suit][coverage] of Unit and Functional Tests. - [x] Good Performance in Busy Conditions. - [x] Support for `UDP`, `HTTP`, and `TLS` Sockets. @@ -35,7 +33,7 @@ __Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker (a service that match ### Container Version -The Torrust Tracker is [deployed to DockerHub][dockerhub_torrust_tracker], you can run a demo immediately with the following commands: +The Torrust Tracker is [deployed to DockerHub][dockerhub], you can run a demo immediately with the following commands: #### Docker: @@ -126,15 +124,14 @@ The following services are provided by the default configuration: - [Tracker (UDP)][udp] ## Contributing - -This is an open-source community supported project.
-We welcome contributions from the community! +We are happy to support and welcome new people to our project. Please consider our [contributor guide][guide.md].
+This is an open-source community supported project. We welcome contributions from the community! __How can you contribute?__ - Bug reports and feature requests. - Code contributions. You can start by looking at the issues labeled "[good first issues]". -- Documentation improvements. Check the [documentation] and [API documentation] for typos, errors, or missing information. +- Documentation improvements. Check the [documentation][docs] and [API documentation][api] for typos, errors, or missing information. - Participation in the community. You can help by answering questions in the [discussions]. ## License @@ -153,8 +150,7 @@ Some files include explicit copyright notices and/or license notices. For prosperity, versions of Torrust Tracker that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [AGPL-3.0-only][AGPL_3_0] license. -## Contributions - +## Contributor Agreement The copyright of the Torrust Tracker is retained by the respective authors. **Contributors agree:** @@ -163,6 +159,8 @@ The copyright of the Torrust Tracker is retained by the respective authors. **The Torrust-Tracker project has no copyright assignment agreement.** +_We kindly ask you to take time and consider The Torrust Project [Contributor Agreement][agreement.md] in full._ + ## Acknowledgments This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [Dutch Bits]. Also thanks to [Naim A.] and [greatest-ape] for some parts of the code. Further added features and functions thanks to [Power2All]. @@ -185,7 +183,7 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [coverage]: https://app.codecov.io/gh/torrust/torrust-tracker [torrust]: https://torrust.com/ -[dockerhub_torrust_tracker]: https://hub.docker.com/r/torrust/tracker/tags +[dockerhub]: https://hub.docker.com/r/torrust/tracker/tags [torrent_source_felid]: https://github.com/qbittorrent/qBittorrent/discussions/19406 @@ -199,24 +197,23 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md +[docs]: https://docs.rs/torrust-tracker/latest/ [api]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 [http]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/http [udp]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 -[documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions -[COPYRIGHT]: ./COPYRIGHT +[guide.md]: https://github.com/torrust/.github/blob/main/info/contributing.md +[agreement.md]: https://github.com/torrust/.github/blob/main/info/licensing/contributor_agreement_v01.md + [AGPL_3_0]: ./docs/licenses/LICENSE-AGPL_3_0 [MIT_0]: ./docs/licenses/LICENSE-MIT_0 [FSF]: https://www.fsf.org/ - [nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ [Dutch Bits]: https://dutchbits.nl [Naim A.]: https://github.com/naim94a/udpt [greatest-ape]: https://github.com/greatest-ape/aquatic [Power2All]: https://github.com/power2all - diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index 3918aa6ba..f7bab0585 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -16,10 +16,10 @@ rust-version.workspace = true version.workspace = true [dependencies] -error-chain = "0.12" +error-chain = "0" [dev-dependencies] -criterion = "0.5" +criterion = "0" [[test]] name = "test" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 94cf1d0f5..e373b4269 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -15,12 +15,12 @@ rust-version.workspace = true version.workspace = true [dependencies] -config = "0.13" -log = { version = "0.4", features = ["release_max_level_info"] } -serde = { version = "1.0", features = ["derive"] } -serde_with = "3.2" -thiserror = "1.0" -toml = "0.8" +config = "0" +log = { version = "0", features = ["release_max_level_info"] } +serde = { version = "1", features = ["derive"] } +serde_with = "3" +thiserror = "1" +toml = "0" torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 6bc219374..5b266934c 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -15,7 +15,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -log = { version = "0.4", features = ["release_max_level_info"] } +log = { version = "0", features = ["release_max_level_info"] } [dev-dependencies] thiserror = "1.0" diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 3fac88bef..efcce71a9 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -15,5 +15,5 @@ rust-version.workspace = true version.workspace = true [dependencies] -derive_more = "0.99" -serde = { version = "1.0", features = ["derive"] } +derive_more = "0" +serde = { version = "1", features = ["derive"] } diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index d428cbf3e..9ae891a01 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -15,7 +15,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -lazy_static = "1.4" -rand = "0.8.5" +lazy_static = "1" +rand = "0" torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } From cb914daa9e98c7414b2987c3724f63818d187455 Mon Sep 17 00:00:00 2001 From: Dan Groshev Date: Mon, 9 Oct 2023 19:23:06 +0100 Subject: [PATCH 0631/1003] feat: replace peer ID recognition with tdyne-peer-id-registry --- Cargo.lock | 63 ++++++++++++++ Cargo.toml | 2 + .../apis/v1/context/torrent/resources/peer.rs | 2 +- src/tracker/peer.rs | 86 ++----------------- 4 files changed, 73 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9df190ae5..510e84559 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2066,6 +2066,44 @@ dependencies = [ "sha2", ] +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_codegen" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" +dependencies = [ + "phf_generator", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared", + "rand", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.3" @@ -2847,6 +2885,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" version = "0.4.9" @@ -2965,6 +3009,23 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tdyne-peer-id" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dde285ba6f835045648f9d4f4703f778aaafb47421d9c5dff47be1534370c3e" + +[[package]] +name = "tdyne-peer-id-registry" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1923b2d356e080e8bee847c39b58de293309df2fe0bc9ecd859ae3210e868c25" +dependencies = [ + "phf", + "phf_codegen", + "tdyne-peer-id", +] + [[package]] name = "tempfile" version = "3.8.0" @@ -3219,6 +3280,8 @@ dependencies = [ "serde_repr", "serde_urlencoded", "serde_with", + "tdyne-peer-id", + "tdyne-peer-id-registry", "thiserror", "tokio", "torrust-tracker-configuration", diff --git a/Cargo.toml b/Cargo.toml index 80d7d30b0..ab31a9275 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,6 +54,8 @@ serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_json = "1" serde_with = "3" +tdyne-peer-id = "1" +tdyne-peer-id-registry = "0.1" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 4989cec52..e0cab853e 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -39,7 +39,7 @@ impl From for Id { fn from(peer_id: tracker::peer::Id) -> Self { Id { id: peer_id.to_hex_string(), - client: peer_id.get_client_name().map(std::string::ToString::to_string), + client: peer_id.get_client_name(), } } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index d6517f213..4027799a9 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -252,81 +252,9 @@ impl Id { } #[must_use] - pub fn get_client_name(&self) -> Option<&'static str> { - if self.0[0] == b'M' { - return Some("BitTorrent"); - } - if self.0[0] == b'-' { - let name = match &self.0[1..3] { - b"AG" | b"A~" => "Ares", - b"AR" => "Arctic", - b"AV" => "Avicora", - b"AX" => "BitPump", - b"AZ" => "Azureus", - b"BB" => "BitBuddy", - b"BC" => "BitComet", - b"BF" => "Bitflu", - b"BG" => "BTG (uses Rasterbar libtorrent)", - b"BR" => "BitRocket", - b"BS" => "BTSlave", - b"BX" => "~Bittorrent X", - b"CD" => "Enhanced CTorrent", - b"CT" => "CTorrent", - b"DE" => "DelugeTorrent", - b"DP" => "Propagate Data Client", - b"EB" => "EBit", - b"ES" => "electric sheep", - b"FT" => "FoxTorrent", - b"FW" => "FrostWire", - b"FX" => "Freebox BitTorrent", - b"GS" => "GSTorrent", - b"HL" => "Halite", - b"HN" => "Hydranode", - b"KG" => "KGet", - b"KT" => "KTorrent", - b"LH" => "LH-ABC", - b"LP" => "Lphant", - b"LT" => "libtorrent", - b"lt" => "libTorrent", - b"LW" => "LimeWire", - b"MO" => "MonoTorrent", - b"MP" => "MooPolice", - b"MR" => "Miro", - b"MT" => "MoonlightTorrent", - b"NX" => "Net Transport", - b"PD" => "Pando", - b"qB" => "qBittorrent", - b"QD" => "QQDownload", - b"QT" => "Qt 4 Torrent example", - b"RT" => "Retriever", - b"S~" => "Shareaza alpha/beta", - b"SB" => "~Swiftbit", - b"SS" => "SwarmScope", - b"ST" => "SymTorrent", - b"st" => "sharktorrent", - b"SZ" => "Shareaza", - b"TN" => "TorrentDotNET", - b"TR" => "Transmission", - b"TS" => "Torrentstorm", - b"TT" => "TuoTu", - b"UL" => "uLeecher!", - b"UT" => "µTorrent", - b"UW" => "µTorrent Web", - b"VG" => "Vagaa", - b"WD" => "WebTorrent Desktop", - b"WT" => "BitLet", - b"WW" => "WebTorrent", - b"WY" => "FireTorrent", - b"XL" => "Xunlei", - b"XT" => "XanTorrent", - b"XX" => "Xtorrent", - b"ZT" => "ZipTorrent", - _ => return None, - }; - Some(name) - } else { - None - } + pub fn get_client_name(&self) -> Option { + let peer_id = tdyne_peer_id::PeerId::from(self.0); + tdyne_peer_id_registry::parse(peer_id).ok().map(|parsed| parsed.client) } } @@ -336,9 +264,9 @@ impl Serialize for Id { S: serde::Serializer, { #[derive(Serialize)] - struct PeerIdInfo<'a> { + struct PeerIdInfo { id: Option, - client: Option<&'a str>, + client: Option, } let obj = PeerIdInfo { @@ -476,7 +404,7 @@ mod test { #[test] fn it_should_be_serializable() { let torrent_peer = Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), + peer_id: peer::Id(*b"-qB0000-000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), uploaded: NumberOfBytes(0), @@ -490,7 +418,7 @@ mod test { let expected_raw_json = r#" { "peer_id": { - "id": "0x2d71423030303030303030303030303030303030", + "id": "0x2d7142303030302d303030303030303030303030", "client": "qBittorrent" }, "peer_addr":"126.0.0.1:8080", From 8ec0a23e68f1b5bd3c71c84358f463e609d56fac Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Nov 2023 14:07:31 +0100 Subject: [PATCH 0632/1003] chore: update deps --- Cargo.lock | 416 ++++++++++++++++-------------- Cargo.toml | 2 +- packages/located-error/Cargo.toml | 2 +- 3 files changed, 220 insertions(+), 200 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 510e84559..749c0fa53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" dependencies = [ "getrandom", "once_cell", @@ -30,13 +30,14 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -142,7 +143,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -254,9 +255,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "bigdecimal" @@ -277,11 +278,11 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.68.1" +version = "0.69.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726e4313eb6ec35d2730258ad4e15b547ee75d6afaa1361a922e78e59b7d8078" +checksum = "9ffcebc3849946a7170a05992aac39da343a90676ab392c51a4280981d6379c2" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cexpr", "clang-sys", "lazy_static", @@ -292,7 +293,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -303,9 +304,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "bitvec" @@ -386,9 +387,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da74e2b81409b1b743f8f0c62cc6254afefb8b8e50bbfe3735550f7aeefa3448" +checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -524,18 +525,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.6" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +checksum = "ac495e00dcec98c83465d5ad66c5c4fabd652fd6686e7c6269b117e729a6f17b" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.4.6" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +checksum = "c77ed9a32a62e6ca27175d00d29d05ca32e396ea1eb5fb01d8256b669cec7663" dependencies = [ "anstyle", "clap_lex", @@ -543,9 +544,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "cmake" @@ -599,9 +600,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" dependencies = [ "libc", ] @@ -749,7 +750,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -760,7 +761,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -794,7 +795,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -984,7 +985,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -996,7 +997,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1008,7 +1009,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1019,9 +1020,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -1034,9 +1035,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -1044,15 +1045,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -1061,38 +1062,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures-channel", "futures-core", @@ -1170,7 +1171,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.6", + "ahash 0.7.7", ] [[package]] @@ -1179,16 +1180,16 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.3", + "ahash 0.8.6", ] [[package]] name = "hashbrown" -version = "0.14.1" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" dependencies = [ - "ahash 0.8.3", + "ahash 0.8.6", "allocator-api2", ] @@ -1198,7 +1199,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.1", + "hashbrown 0.14.2", ] [[package]] @@ -1276,7 +1277,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -1298,16 +1299,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows", + "windows-core", ] [[package]] @@ -1348,12 +1349,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown 0.14.1", + "hashbrown 0.14.2", "serde", ] @@ -1364,14 +1365,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" @@ -1410,9 +1411,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] @@ -1515,9 +1516,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.149" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libloading" @@ -1577,9 +1578,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg", "scopeguard", @@ -1644,9 +1645,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi", @@ -1682,9 +1683,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70db9248a93dc36a36d9a47898caa007a32755c7ad140ec64eeeb50d5a730631" +checksum = "e1a5d38b9b352dbd913288736af36af41c48d61b1a8cd34bcecd727561b7d511" dependencies = [ "serde", ] @@ -1710,7 +1711,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2 0.5.4", + "socket2 0.5.5", "twox-hash", "url", ] @@ -1728,7 +1729,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "termcolor", "thiserror", ] @@ -1739,10 +1740,10 @@ version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bigdecimal", "bindgen", - "bitflags 2.4.0", + "bitflags 2.4.1", "bitvec", "byteorder", "bytes", @@ -1908,11 +1909,11 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", @@ -1929,7 +1930,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1940,18 +1941,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.1.5+3.1.3" +version = "300.1.6+3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "559068e4c12950d7dcaa1857a61725c0d38d4fc03ff8e070ab31a75d6e316491" +checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.93" +version = "0.9.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" +checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" dependencies = [ "cc", "libc", @@ -1982,9 +1983,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", @@ -2011,7 +2012,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "serde", ] @@ -2023,9 +2024,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" +checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" dependencies = [ "memchr", "thiserror", @@ -2034,9 +2035,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" +checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2" dependencies = [ "pest", "pest_generator", @@ -2044,22 +2045,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" +checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "pest_meta" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" +checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6" dependencies = [ "once_cell", "pest", @@ -2121,7 +2122,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -2383,18 +2384,18 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "regex" -version = "1.10.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaac441002f822bc9705a681810a4dd2963094b9ca0ddc41cb963a4c189189ea" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", @@ -2404,9 +2405,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5011c7e263a695dc8ca064cddb722af1be54e517a280b12a5356f98366899e5d" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", @@ -2434,7 +2435,7 @@ version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bytes", "encoding_rs", "futures-core", @@ -2468,17 +2469,16 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.20" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" dependencies = [ "cc", + "getrandom", "libc", - "once_cell", "spin", "untrusted", - "web-sys", - "winapi", + "windows-sys", ] [[package]] @@ -2526,7 +2526,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2583,11 +2583,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.19" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -2596,9 +2596,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", "ring", @@ -2612,14 +2612,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", ] [[package]] name = "rustls-webpki" -version = "0.101.6" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ "ring", "untrusted", @@ -2678,9 +2678,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ "ring", "untrusted", @@ -2723,9 +2723,9 @@ checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -2751,20 +2751,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", @@ -2783,20 +2783,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" +checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" dependencies = [ "serde", ] @@ -2815,15 +2815,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" +checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_json", "serde_with_macros", @@ -2832,14 +2832,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e6be15c453eb305019bfa438b1593c731f36a289a7853f7707ee29e870b3b3c" +checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -2908,9 +2908,9 @@ checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -2918,9 +2918,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys", @@ -2928,9 +2928,9 @@ dependencies = [ [[package]] name = "spin" -version = "0.5.2" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "static_assertions" @@ -2967,9 +2967,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -3028,9 +3028,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand", @@ -3056,22 +3056,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.49" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3141,7 +3141,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "windows-sys", ] @@ -3154,7 +3154,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3179,9 +3179,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", @@ -3202,21 +3202,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.2" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.2", + "toml_edit 0.21.0", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -3227,18 +3227,18 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "toml_datetime", "winnow", ] [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", @@ -3302,7 +3302,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml 0.8.2", + "toml 0.8.8", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -3365,7 +3365,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", - "bitflags 2.4.0", + "bitflags 2.4.1", "bytes", "futures-core", "futures-util", @@ -3393,9 +3393,9 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.39" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2ef2af84856a50c1d430afce2fdded0a4ec7eda868db86409b4543df0797f9" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", @@ -3463,9 +3463,9 @@ dependencies = [ [[package]] name = "untrusted" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" @@ -3480,9 +3480,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" dependencies = [ "getrandom", "rand", @@ -3527,9 +3527,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3537,24 +3537,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02" dependencies = [ "cfg-if", "js-sys", @@ -3564,9 +3564,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3574,28 +3574,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" dependencies = [ "js-sys", "wasm-bindgen", @@ -3633,10 +3633,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows" -version = "0.48.0" +name = "windows-core" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ "windows-targets", ] @@ -3709,9 +3709,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.17" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" +checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b" dependencies = [ "memchr", ] @@ -3744,6 +3744,26 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "zerocopy" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "zstd" version = "0.13.0" diff --git a/Cargo.toml b/Cargo.toml index ab31a9275..fd6230f80 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,7 @@ serde_bencode = "0" serde_json = "1" serde_with = "3" tdyne-peer-id = "1" -tdyne-peer-id-registry = "0.1" +tdyne-peer-id-registry = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 5b266934c..fa3d1d76d 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -18,4 +18,4 @@ version.workspace = true log = { version = "0", features = ["release_max_level_info"] } [dev-dependencies] -thiserror = "1.0" +thiserror = "1" From dbc8920c70f6a78abf71fc86d421828d7970bbaa Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Nov 2023 14:51:28 +0100 Subject: [PATCH 0633/1003] ci: fix nightly changes --- contrib/bencode/src/access/dict.rs | 2 ++ src/servers/udp/connection_cookie.rs | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/contrib/bencode/src/access/dict.rs b/contrib/bencode/src/access/dict.rs index 596d9535e..7efe93fc3 100644 --- a/contrib/bencode/src/access/dict.rs +++ b/contrib/bencode/src/access/dict.rs @@ -21,6 +21,7 @@ pub trait BDictAccess { impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { fn to_list(&self) -> Vec<(&&'a [u8], &V)> { + #[allow(clippy::map_identity)] self.iter().map(|(k, v)| (k, v)).collect() } @@ -43,6 +44,7 @@ impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { impl<'a, V> BDictAccess, V> for BTreeMap, V> { fn to_list(&self) -> Vec<(&Cow<'a, [u8]>, &V)> { + #[allow(clippy::map_identity)] self.iter().map(|(k, v)| (k, v)).collect() } diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 4dc9896ab..19e61f14e 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -173,11 +173,12 @@ mod tests { #[test] fn it_should_make_a_connection_cookie() { // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. - const ID_COOKIE: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; + const ID_COOKIE_OLD: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; + const ID_COOKIE_NEW: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); - assert_eq!(cookie, ID_COOKIE); + assert!(cookie == ID_COOKIE_OLD || cookie == ID_COOKIE_NEW); } #[test] From 9c0b090ca9c3d39497c2cfbd5d1e2de621bb1ab3 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Nov 2023 17:36:30 +0100 Subject: [PATCH 0634/1003] fixup: rename field for clippy --- tests/servers/http/client.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs index f5cdca398..0dbdd9cf6 100644 --- a/tests/servers/http/client.rs +++ b/tests/servers/http/client.rs @@ -9,7 +9,7 @@ use super::requests::scrape; /// HTTP Tracker Client pub struct Client { server_addr: std::net::SocketAddr, - reqwest_client: ReqwestClient, + reqwest: ReqwestClient, key: Option, } @@ -25,7 +25,7 @@ impl Client { pub fn new(server_addr: std::net::SocketAddr) -> Self { Self { server_addr, - reqwest_client: reqwest::Client::builder().build().unwrap(), + reqwest: reqwest::Client::builder().build().unwrap(), key: None, } } @@ -34,7 +34,7 @@ impl Client { pub fn bind(server_addr: std::net::SocketAddr, local_address: IpAddr) -> Self { Self { server_addr, - reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), + reqwest: reqwest::Client::builder().local_address(local_address).build().unwrap(), key: None, } } @@ -42,7 +42,7 @@ impl Client { pub fn authenticated(server_addr: std::net::SocketAddr, key: Key) -> Self { Self { server_addr, - reqwest_client: reqwest::Client::builder().build().unwrap(), + reqwest: reqwest::Client::builder().build().unwrap(), key: Some(key), } } @@ -61,11 +61,11 @@ impl Client { } pub async fn get(&self, path: &str) -> Response { - self.reqwest_client.get(self.build_url(path)).send().await.unwrap() + self.reqwest.get(self.build_url(path)).send().await.unwrap() } pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { - self.reqwest_client + self.reqwest .get(self.build_url(path)) .header(key, value) .send() From b4329f9977fcf04e4ba0d4776fa5dc5626e9e040 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 25 Sep 2023 12:57:48 +0100 Subject: [PATCH 0635/1003] ci: workflow to show the tracker contract in markdown --- .github/workflows/contract.yaml | 58 +++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github/workflows/contract.yaml diff --git a/.github/workflows/contract.yaml b/.github/workflows/contract.yaml new file mode 100644 index 000000000..7c9fd47bd --- /dev/null +++ b/.github/workflows/contract.yaml @@ -0,0 +1,58 @@ +name: Contract + +on: + push: + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + contract: + name: Contract + runs-on: ubuntu-latest + + strategy: + matrix: + toolchain: [stable, nightly] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-llvm-cov, cargo-nextest + + - id: pretty-test + name: Install pretty-test + run: cargo install cargo-pretty-test + + - id: contract + name: Run contract + run: | + cargo test --lib --bins + cargo pretty-test --lib --bins + + - id: summary + name: Generate contract Summary + run: | + echo "### Tracker Living Contract! :rocket:" >> $GITHUB_STEP_SUMMARY + cargo pretty-test --lib --bins --color=never >> $GITHUB_STEP_SUMMARY + echo '```console' >> $GITHUB_STEP_SUMMARY + echo "$OUTPUT" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY From f8175b557fb24d6c12fb390d73597fc8e8cc97cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Nov 2023 19:57:29 +0000 Subject: [PATCH 0636/1003] chore(deps): bump tokio from 1.33.0 to 1.34.0 Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.33.0 to 1.34.0. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.33.0...tokio-1.34.0) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 749c0fa53..f63fa0f34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3130,9 +3130,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" dependencies = [ "backtrace", "bytes", @@ -3148,9 +3148,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", From f7c5ace1b05e0b9da628c6a04722cb1ea937c3ee Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Nov 2023 16:50:18 +0000 Subject: [PATCH 0637/1003] chore: update dependencies - bump serde from 1.0.192 to 1.0.193 - bump uuid from 1.5.0 to 1.6.1 - bump r2d2_sqlite from 0.22.0 to 0.23.0 --- Cargo.lock | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f63fa0f34..ef28303e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -869,9 +869,9 @@ dependencies = [ [[package]] name = "fallible-iterator" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fallible-streaming-iterator" @@ -1532,9 +1532,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.26.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" dependencies = [ "cc", "pkg-config", @@ -2317,9 +2317,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99f31323d6161385f385046738df520e0e8694fa74852d35891fc0be08348ddc" +checksum = "4dc290b669d30e20751e813517bbe13662d020419c5c8818ff10b6e8bb7777f6" dependencies = [ "r2d2", "rusqlite", @@ -2522,9 +2522,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" +checksum = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d" dependencies = [ "bitflags 2.4.1", "fallible-iterator", @@ -2723,9 +2723,9 @@ checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] @@ -2751,9 +2751,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", @@ -3480,9 +3480,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" +checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" dependencies = [ "getrandom", "rand", From b609c832ab65304c7f6847eb0bf0ad13235d5bea Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Nov 2023 07:20:20 +0000 Subject: [PATCH 0638/1003] chore: update dependencies - bump percent-encoding from 2.3.0 to 2.3.1 - bump config from 0.13.3 to 0.13.4 - bump openssl from 0.10.59 to 0.10.60 --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef28303e8..336adb8f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -559,9 +559,9 @@ dependencies = [ [[package]] name = "config" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" +checksum = "23738e11972c7643e4ec947840fc463b6a571afcd3e735bdfce7d03c7a784aca" dependencies = [ "async-trait", "json5", @@ -1909,9 +1909,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "openssl" -version = "0.10.59" +version = "0.10.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" +checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -1950,9 +1950,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.95" +version = "0.9.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" +checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" dependencies = [ "cc", "libc", @@ -2018,9 +2018,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" From f1c7cccf4fc55aac84b5b735bde1ded8178b4d22 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Nov 2023 14:57:59 +0000 Subject: [PATCH 0639/1003] feat: add cargo dependency reqwest It'll be used by a litle script (health check) that needs to make HTTP request. --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index fd6230f80..51a1ac00f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,7 @@ r2d2 = "0" r2d2_mysql = "24" r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" +reqwest = "0" serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_json = "1" From 0ef4e34254ca175ba861d45f58c065a55bd554c3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Nov 2023 14:59:58 +0000 Subject: [PATCH 0640/1003] feat: [#508] add new binary HTTP health check It makes a request to an HTTP endpoint to check that the service is healthy. --- Cargo.toml | 1 + src/bin/http_health_check.rs | 37 ++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 src/bin/http_health_check.rs diff --git a/Cargo.toml b/Cargo.toml index 51a1ac00f..2316a1edf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [package] +default-run = "torrust-tracker" name = "torrust-tracker" readme = "README.md" diff --git a/src/bin/http_health_check.rs b/src/bin/http_health_check.rs new file mode 100644 index 000000000..2c39f2a79 --- /dev/null +++ b/src/bin/http_health_check.rs @@ -0,0 +1,37 @@ +//! Minimal `curl` or `wget` to be used for container health checks. +//! +//! It's convenient to avoid using third-party libraries because: +//! +//! - They are harder to maintain. +//! - They introduce new attack vectors. +use std::{env, process}; + +#[tokio::main] +async fn main() { + let args: Vec = env::args().collect(); + if args.len() != 2 { + eprintln!("Usage: cargo run --bin health_check "); + eprintln!("Example: cargo run --bin http_health_check http://localhost:1212/api/v1/stats?token=MyAccessToken"); + std::process::exit(1); + } + + println!("Health check ..."); + + let url = &args[1].clone(); + + match reqwest::get(url).await { + Ok(response) => { + if response.status().is_success() { + println!("STATUS: {}", response.status()); + process::exit(0); + } else { + println!("Non-success status received."); + process::exit(1); + } + } + Err(err) => { + println!("ERROR: {err}"); + process::exit(1); + } + } +} \ No newline at end of file From 48ac64fff9ccc1e258d4adf257dd5223fa050796 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Nov 2023 17:30:53 +0000 Subject: [PATCH 0641/1003] feat: [#508] add container healthcheck for API --- Containerfile | 6 ++-- src/bin/http_health_check.rs | 6 ++-- src/servers/apis/routes.rs | 7 ++-- .../apis/v1/context/health_check/handlers.rs | 11 ++++++ .../apis/v1/context/health_check/mod.rs | 34 +++++++++++++++++++ .../apis/v1/context/health_check/resources.rs | 14 ++++++++ src/servers/apis/v1/context/mod.rs | 1 + tests/servers/api/v1/client.rs | 2 +- .../api/v1/contract/context/health_check.rs | 20 +++++++++++ tests/servers/api/v1/contract/context/mod.rs | 1 + 10 files changed, 94 insertions(+), 8 deletions(-) create mode 100644 src/servers/apis/v1/context/health_check/handlers.rs create mode 100644 src/servers/apis/v1/context/health_check/mod.rs create mode 100644 src/servers/apis/v1/context/health_check/resources.rs create mode 100644 tests/servers/api/v1/contract/context/health_check.rs diff --git a/Containerfile b/Containerfile index be71017db..244f7bdfc 100644 --- a/Containerfile +++ b/Containerfile @@ -85,7 +85,7 @@ COPY --from=build \ RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-tracker.tar.zst RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json -RUN mkdir -p /app/bin/; cp -l /test/src/target/release/torrust-tracker /app/bin/torrust-tracker +RUN mkdir -p /app/bin/; cp -l /test/src/target/release/torrust-tracker /app/bin/torrust-tracker; cp -l /test/src/target/release/http_health_check /app/bin/http_health_check RUN mkdir -p /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-tracker | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1 RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin @@ -136,5 +136,7 @@ CMD ["sh"] FROM runtime as release ENV RUNTIME="release" COPY --from=test /app/ /usr/ -# HEALTHCHECK CMD ["/usr/bin/wget", "--no-verbose", "--tries=1", "--spider", "localhost:${API_PORT}/version"] +HEALTHCHECK --interval=5s --timeout=5s --start-period=3s --retries=3 \ + CMD /usr/bin/http_health_check http://localhost:${API_PORT}/health_check \ + || exit 1 CMD ["/usr/bin/torrust-tracker"] diff --git a/src/bin/http_health_check.rs b/src/bin/http_health_check.rs index 2c39f2a79..313f44045 100644 --- a/src/bin/http_health_check.rs +++ b/src/bin/http_health_check.rs @@ -10,8 +10,8 @@ use std::{env, process}; async fn main() { let args: Vec = env::args().collect(); if args.len() != 2 { - eprintln!("Usage: cargo run --bin health_check "); - eprintln!("Example: cargo run --bin http_health_check http://localhost:1212/api/v1/stats?token=MyAccessToken"); + eprintln!("Usage: cargo run --bin http_health_check "); + eprintln!("Example: cargo run --bin http_health_check http://127.0.0.1:1212/health_check"); std::process::exit(1); } @@ -34,4 +34,4 @@ async fn main() { process::exit(1); } } -} \ No newline at end of file +} diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 7801389f3..0740e1f6a 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -7,10 +7,12 @@ //! first path segment. For example: `/api/v1/torrents`. use std::sync::Arc; +use axum::routing::get; use axum::{middleware, Router}; use tower_http::compression::CompressionLayer; use super::v1; +use super::v1::context::health_check::handlers::health_check_handler; use crate::tracker::Tracker; /// Add all API routes to the router. @@ -18,14 +20,15 @@ use crate::tracker::Tracker; pub fn router(tracker: Arc) -> Router { let router = Router::new(); - let prefix = "/api"; + let api_url_prefix = "/api"; - let router = v1::routes::add(prefix, router, tracker.clone()); + let router = v1::routes::add(api_url_prefix, router, tracker.clone()); router .layer(middleware::from_fn_with_state( tracker.config.clone(), v1::middlewares::auth::auth, )) + .route("/health_check", get(health_check_handler)) .layer(CompressionLayer::new()) } diff --git a/src/servers/apis/v1/context/health_check/handlers.rs b/src/servers/apis/v1/context/health_check/handlers.rs new file mode 100644 index 000000000..bfbeab549 --- /dev/null +++ b/src/servers/apis/v1/context/health_check/handlers.rs @@ -0,0 +1,11 @@ +//! API handlers for the [`stats`](crate::servers::apis::v1::context::health_check) +//! API context. + +use axum::Json; + +use super::resources::{Report, Status}; + +/// Endpoint for container health check. +pub async fn health_check_handler() -> Json { + Json(Report { status: Status::Ok }) +} diff --git a/src/servers/apis/v1/context/health_check/mod.rs b/src/servers/apis/v1/context/health_check/mod.rs new file mode 100644 index 000000000..c62c5e97b --- /dev/null +++ b/src/servers/apis/v1/context/health_check/mod.rs @@ -0,0 +1,34 @@ +//! API health check endpoint. +//! +//! It is used to check is the service is running. Especially for containers. +//! +//! # Endpoints +//! +//! - [Health Check](#health-check) +//! +//! # Health Check +//! +//! `GET /health_check` +//! +//! Returns the API status. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/health_check" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "Ok", +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`Stats`](crate::servers::apis::v1::context::health_check::resources::Report) +//! resource for more information about the response attributes. +pub mod handlers; +pub mod resources; diff --git a/src/servers/apis/v1/context/health_check/resources.rs b/src/servers/apis/v1/context/health_check/resources.rs new file mode 100644 index 000000000..9830e643c --- /dev/null +++ b/src/servers/apis/v1/context/health_check/resources.rs @@ -0,0 +1,14 @@ +//! API resources for the [`stats`](crate::servers::apis::v1::context::health_check) +//! API context. +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, +} diff --git a/src/servers/apis/v1/context/mod.rs b/src/servers/apis/v1/context/mod.rs index 5e268a429..be67cd96a 100644 --- a/src/servers/apis/v1/context/mod.rs +++ b/src/servers/apis/v1/context/mod.rs @@ -3,6 +3,7 @@ //! Each context is a module that contains the API endpoints related to a //! specific resource group. pub mod auth_key; +pub mod health_check; pub mod stats; pub mod torrent; pub mod whitelist; diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs index 2b6db2e77..61e98e742 100644 --- a/tests/servers/api/v1/client.rs +++ b/tests/servers/api/v1/client.rs @@ -101,7 +101,7 @@ impl Client { } } -async fn get(path: &str, query: Option) -> Response { +pub async fn get(path: &str, query: Option) -> Response { match query { Some(params) => reqwest::Client::builder() .build() diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs new file mode 100644 index 000000000..3b6c98374 --- /dev/null +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -0,0 +1,20 @@ +use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::client::get; + +#[tokio::test] +async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let url = format!("http://{}/health_check", test_env.get_connection_info().bind_address); + + let response = get(&url, None).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); + + test_env.stop().await; +} diff --git a/tests/servers/api/v1/contract/context/mod.rs b/tests/servers/api/v1/contract/context/mod.rs index 6d3fb7566..032e13b0b 100644 --- a/tests/servers/api/v1/contract/context/mod.rs +++ b/tests/servers/api/v1/contract/context/mod.rs @@ -1,4 +1,5 @@ pub mod auth_key; +pub mod health_check; pub mod stats; pub mod torrent; pub mod whitelist; From e1a45a2561a78c83fc3c6b44063a0aae073ac227 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Nov 2023 10:30:45 +0000 Subject: [PATCH 0642/1003] feat: [#508] Health Check API but no checks yet New Health CHeck API, but it is not checking anything yet. You can call it at: http://localhost:1313/health_check --- Containerfile | 5 +- docs/containers.md | 1 + packages/configuration/src/lib.rs | 50 +++++++++---- packages/test-helpers/src/configuration.rs | 4 + .../config/tracker.container.mysql.toml | 3 + .../config/tracker.container.sqlite3.toml | 3 + .../config/tracker.development.sqlite3.toml | 3 + src/app.rs | 18 ++++- src/bootstrap/jobs/health_check_api.rs | 74 +++++++++++++++++++ src/bootstrap/jobs/mod.rs | 1 + src/lib.rs | 29 ++++---- src/servers/health_check_api/mod.rs | 1 + src/servers/health_check_api/server.rs | 52 +++++++++++++ src/servers/mod.rs | 1 + tests/servers/health_check_api/client.rs | 5 ++ tests/servers/health_check_api/contract.rs | 22 ++++++ tests/servers/health_check_api/mod.rs | 3 + .../health_check_api/test_environment.rs | 32 ++++++++ tests/servers/mod.rs | 1 + 19 files changed, 275 insertions(+), 33 deletions(-) create mode 100644 src/bootstrap/jobs/health_check_api.rs create mode 100644 src/servers/health_check_api/mod.rs create mode 100644 src/servers/health_check_api/server.rs create mode 100644 tests/servers/health_check_api/client.rs create mode 100644 tests/servers/health_check_api/contract.rs create mode 100644 tests/servers/health_check_api/mod.rs create mode 100644 tests/servers/health_check_api/test_environment.rs diff --git a/Containerfile b/Containerfile index 244f7bdfc..8ea555c2d 100644 --- a/Containerfile +++ b/Containerfile @@ -101,6 +101,7 @@ ARG USER_ID=1000 ARG UDP_PORT=6969 ARG HTTP_PORT=7070 ARG API_PORT=1212 +ARG HEALTH_CHECK_API_PORT=1313 ENV TORRUST_TRACKER_PATH_CONFIG=${TORRUST_TRACKER_PATH_CONFIG} ENV TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER} @@ -108,11 +109,13 @@ ENV USER_ID=${USER_ID} ENV UDP_PORT=${UDP_PORT} ENV HTTP_PORT=${HTTP_PORT} ENV API_PORT=${API_PORT} +ENV HEALTH_CHECK_API_PORT=${HEALTH_CHECK_API_PORT} ENV TZ=Etc/UTC EXPOSE ${UDP_PORT}/udp EXPOSE ${HTTP_PORT}/tcp EXPOSE ${API_PORT}/tcp +EXPOSE ${HEALTH_CHECK_API_PORT}/tcp RUN mkdir -p /var/lib/torrust/tracker /var/log/torrust/tracker /etc/torrust/tracker @@ -137,6 +140,6 @@ FROM runtime as release ENV RUNTIME="release" COPY --from=test /app/ /usr/ HEALTHCHECK --interval=5s --timeout=5s --start-period=3s --retries=3 \ - CMD /usr/bin/http_health_check http://localhost:${API_PORT}/health_check \ + CMD /usr/bin/http_health_check http://localhost:${HEALTH_CHECK_API_PORT}/health_check \ || exit 1 CMD ["/usr/bin/torrust-tracker"] diff --git a/docs/containers.md b/docs/containers.md index 737ce40a0..2b06c0f76 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -146,6 +146,7 @@ The following environmental variables can be set: - `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). - `HTTP_PORT` - The port for the HTTP tracker. This should match the port used in the configuration, (default `7070`). - `API_PORT` - The port for the tracker API. This should match the port used in the configuration, (default `1212`). +- `HEALTH_CHECK_API_PORT` - The port for the Health Check API. This should match the port used in the configuration, (default `1313`). ### Sockets diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 059316a26..217f8a8be 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -191,40 +191,43 @@ //! The default configuration is: //! //! ```toml -//! log_level = "info" -//! mode = "public" +//! announce_interval = 120 //! db_driver = "Sqlite3" //! db_path = "./storage/tracker/lib/database/sqlite3.db" -//! announce_interval = 120 -//! min_announce_interval = 120 +//! external_ip = "0.0.0.0" +//! inactive_peer_cleanup_interval = 600 +//! log_level = "info" //! max_peer_timeout = 900 +//! min_announce_interval = 120 +//! mode = "public" //! on_reverse_proxy = false -//! external_ip = "0.0.0.0" -//! tracker_usage_statistics = true //! persistent_torrent_completed_stat = false -//! inactive_peer_cleanup_interval = 600 //! remove_peerless_torrents = true +//! tracker_usage_statistics = true //! //! [[udp_trackers]] -//! enabled = false //! bind_address = "0.0.0.0:6969" +//! enabled = false //! //! [[http_trackers]] -//! enabled = false //! bind_address = "0.0.0.0:7070" -//! ssl_enabled = false +//! enabled = false //! ssl_cert_path = "" +//! ssl_enabled = false //! ssl_key_path = "" //! //! [http_api] -//! enabled = true //! bind_address = "127.0.0.1:1212" -//! ssl_enabled = false +//! enabled = true //! ssl_cert_path = "" +//! ssl_enabled = false //! ssl_key_path = "" //! //! [http_api.access_tokens] //! admin = "MyAccessToken" +//! +//! [health_check_api] +//! bind_address = "127.0.0.1:1313" //!``` use std::collections::{HashMap, HashSet}; use std::net::IpAddr; @@ -342,7 +345,7 @@ pub struct HttpApi { /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating - /// system to choose a random port, use port `0`. + /// system to choose a random port, use port `0`. pub bind_address: String, /// Weather the HTTP API will use SSL or not. pub ssl_enabled: bool, @@ -363,9 +366,7 @@ impl HttpApi { fn override_admin_token(&mut self, api_admin_token: &str) { self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); } -} -impl HttpApi { /// Checks if the given token is one of the token in the configuration. #[must_use] pub fn contains_token(&self, token: &str) -> bool { @@ -375,6 +376,17 @@ impl HttpApi { } } +/// Configuration for the Health Check API. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HealthCheckApi { + /// The address the API will bind to. + /// The format is `ip:port`, for example `127.0.0.1:1313`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + pub bind_address: String, +} + /// Core configuration for the tracker. #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] @@ -465,6 +477,8 @@ pub struct Configuration { pub http_trackers: Vec, /// The HTTP API configuration. pub http_api: HttpApi, + /// The Health Check API configuration. + pub health_check_api: HealthCheckApi, } /// Errors that can occur when loading the configuration. @@ -529,6 +543,9 @@ impl Default for Configuration { .cloned() .collect(), }, + health_check_api: HealthCheckApi { + bind_address: String::from("127.0.0.1:1313"), + }, }; configuration.udp_trackers.push(UdpTracker { enabled: false, @@ -676,6 +693,9 @@ mod tests { [http_api.access_tokens] admin = "MyAccessToken" + + [health_check_api] + bind_address = "127.0.0.1:1313" "# .lines() .map(str::trim_start) diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 437475ee2..b41f435ec 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -37,6 +37,10 @@ pub fn ephemeral() -> Configuration { config.http_api.enabled = true; config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + // Ephemeral socket address for Health Check API + let health_check_api_port = 0u16; + config.health_check_api.bind_address = format!("127.0.0.1:{}", &health_check_api_port); + // Ephemeral socket address for UDP tracker let udp_port = 0u16; config.udp_trackers[0].enabled = true; diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index fb9cbf789..e7714c229 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -36,3 +36,6 @@ ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [http_api.access_tokens] admin = "MyAccessToken" + +[health_check_api] +bind_address = "127.0.0.1:1313" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 54cfd4023..4ec055c56 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -36,3 +36,6 @@ ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [http_api.access_tokens] admin = "MyAccessToken" + +[health_check_api] +bind_address = "127.0.0.1:1313" diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 20f95ac5d..04934dd8a 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -32,3 +32,6 @@ ssl_key_path = "" [http_api.access_tokens] admin = "MyAccessToken" + +[health_check_api] +bind_address = "127.0.0.1:1313" diff --git a/src/app.rs b/src/app.rs index 3fc790a23..6478cffb8 100644 --- a/src/app.rs +++ b/src/app.rs @@ -11,7 +11,11 @@ //! - Loading data from the database when it's needed. //! - Starting some jobs depending on the configuration. //! -//! The started jobs may be: +//! Jobs executed always: +//! +//! - Health Check API +//! +//! Optional jobs: //! //! - Torrent cleaner: it removes inactive peers and (optionally) peerless torrents. //! - UDP trackers: the user can enable multiple UDP tracker on several ports. @@ -23,13 +27,16 @@ use log::warn; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; -use crate::bootstrap::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::servers::http::Version; use crate::tracker; /// # Panics /// -/// Will panic if the socket address for API can't be parsed. +/// Will panic if: +/// +/// - Can't retrieve tracker keys from database. +/// - Can't load whitelist from database. pub async fn start(config: Arc, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); @@ -78,10 +85,13 @@ pub async fn start(config: Arc, tracker: Arc) - jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); } - // Remove torrents without peers, every interval + // Start runners to remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { jobs.push(torrent_cleanup::start_job(&config, &tracker)); } + // Start Health Check API + jobs.push(health_check_api::start_job(&config.health_check_api).await); + jobs } diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs new file mode 100644 index 000000000..29c4ce144 --- /dev/null +++ b/src/bootstrap/jobs/health_check_api.rs @@ -0,0 +1,74 @@ +//! Health Check API job starter. +//! +//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) +//! function starts the Health Check REST API. +//! +//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) +//! function spawns a new asynchronous task, that tasks is the "**launcher**". +//! The "**launcher**" starts the actual server and sends a message back +//! to the main application. The main application waits until receives +//! the message [`ApiServerJobStarted`] +//! from the "**launcher**". +//! +//! The "**launcher**" is an intermediary thread that decouples the Health Check +//! API server from the process that handles it. +//! +//! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! for the API configuration options. +use std::net::SocketAddr; + +use log::info; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::HealthCheckApi; + +use crate::servers::health_check_api::server; + +/// This is the message that the "launcher" spawned task sends to the main +/// application process to notify the API server was successfully started. +/// +/// > **NOTICE**: it does not mean the API server is ready to receive requests. +/// It only means the new server started. It might take some time to the server +/// to be ready to accept request. +#[derive(Debug)] +pub struct ApiServerJobStarted { + pub bound_addr: SocketAddr, +} + +/// This function starts a new Health Check API server with the provided +/// configuration. +/// +/// The functions starts a new concurrent task that will run the API server. +/// This task will send a message to the main application process to notify +/// that the API server was successfully started. +/// +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. +pub async fn start_job(config: &HealthCheckApi) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("Health Check API bind_address invalid."); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + info!("Starting Health Check API server: http://{}", bind_addr); + + let handle = server::start(bind_addr, tx); + + if let Ok(()) = handle.await { + info!("Health Check API server on http://{} stopped", bind_addr); + } + }); + + // Wait until the API server job is running + match rx.await { + Ok(_msg) => info!("Torrust Health Check API server started"), + Err(e) => panic!("the Health Check API server was dropped: {e}"), + } + + join_handle +} diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index c519a9f4b..8c85ba45b 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -6,6 +6,7 @@ //! 2. Launch all the application services as concurrent jobs. //! //! This modules contains all the functions needed to start those jobs. +pub mod health_check_api; pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_apis; diff --git a/src/lib.rs b/src/lib.rs index c2e70a8b1..8d453f177 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -148,41 +148,44 @@ //! The default configuration is: //! //! ```toml -//! log_level = "info" -//! mode = "public" +//! announce_interval = 120 //! db_driver = "Sqlite3" //! db_path = "./storage/tracker/lib/database/sqlite3.db" -//! announce_interval = 120 -//! min_announce_interval = 120 +//! external_ip = "0.0.0.0" +//! inactive_peer_cleanup_interval = 600 +//! log_level = "info" //! max_peer_timeout = 900 +//! min_announce_interval = 120 +//! mode = "public" //! on_reverse_proxy = false -//! external_ip = "0.0.0.0" -//! tracker_usage_statistics = true //! persistent_torrent_completed_stat = false -//! inactive_peer_cleanup_interval = 600 //! remove_peerless_torrents = true +//! tracker_usage_statistics = true //! //! [[udp_trackers]] -//! enabled = false //! bind_address = "0.0.0.0:6969" +//! enabled = false //! //! [[http_trackers]] -//! enabled = false //! bind_address = "0.0.0.0:7070" -//! ssl_enabled = false +//! enabled = false //! ssl_cert_path = "" +//! ssl_enabled = false //! ssl_key_path = "" //! //! [http_api] -//! enabled = true //! bind_address = "127.0.0.1:1212" -//! ssl_enabled = false +//! enabled = true //! ssl_cert_path = "" +//! ssl_enabled = false //! ssl_key_path = "" //! //! [http_api.access_tokens] //! admin = "MyAccessToken" -//! ``` +//! +//! [health_check_api] +//! bind_address = "127.0.0.1:1313" +//!``` //! //! The default configuration includes one disabled UDP server, one disabled HTTP server and the enabled API. //! diff --git a/src/servers/health_check_api/mod.rs b/src/servers/health_check_api/mod.rs new file mode 100644 index 000000000..74f47ad34 --- /dev/null +++ b/src/servers/health_check_api/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs new file mode 100644 index 000000000..cbd1b8703 --- /dev/null +++ b/src/servers/health_check_api/server.rs @@ -0,0 +1,52 @@ +//! Logic to run the Health Check HTTP API server. +//! +//! This API is intended to be used by the container infrastructure to check if +//! the whole application is healthy. +use std::net::SocketAddr; + +use axum::routing::get; +use axum::{Json, Router}; +use futures::Future; +use log::info; +use serde_json::{json, Value}; +use tokio::sync::oneshot::Sender; + +use crate::bootstrap::jobs::health_check_api::ApiServerJobStarted; + +/// Starts Health Check API server. +/// +/// # Panics +/// +/// Will panic if binding to the socket address fails. +pub fn start(socket_addr: SocketAddr, tx: Sender) -> impl Future> { + let app = Router::new() + .route("/", get(|| async { Json(json!({})) })) + .route("/health_check", get(health_check_handler)); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + let bound_addr = server.local_addr(); + + info!("Health Check API server listening on http://{}", bound_addr); + + let running = server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust Health Check API server o http://{} ...", socket_addr); + }); + + tx.send(ApiServerJobStarted { bound_addr }) + .expect("the Health Check API server should not be dropped"); + + running +} + +/// Endpoint for container health check. +async fn health_check_handler() -> Json { + // todo: if enabled, check if the Tracker API is healthy + + // todo: if enabled, check if the HTTP Tracker is healthy + + // todo: if enabled, check if the UDP Tracker is healthy + + Json(json!({ "status": "Ok" })) +} diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 38b4b70cd..077109f35 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,5 +1,6 @@ //! Servers. Services that can be started and stopped. pub mod apis; +pub mod health_check_api; pub mod http; pub mod signals; pub mod udp; diff --git a/tests/servers/health_check_api/client.rs b/tests/servers/health_check_api/client.rs new file mode 100644 index 000000000..3d8bdc7d6 --- /dev/null +++ b/tests/servers/health_check_api/client.rs @@ -0,0 +1,5 @@ +use reqwest::Response; + +pub async fn get(path: &str) -> Response { + reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap() +} diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs new file mode 100644 index 000000000..575e10665 --- /dev/null +++ b/tests/servers/health_check_api/contract.rs @@ -0,0 +1,22 @@ +use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::health_check_api::client::get; +use crate::servers::health_check_api::test_environment; + +#[tokio::test] +async fn health_check_endpoint_should_return_status_ok() { + let configuration = configuration::ephemeral(); + + let (bound_addr, test_env) = test_environment::start(&configuration.health_check_api).await; + + let url = format!("http://{bound_addr}/health_check"); + + let response = get(&url).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); + + test_env.abort(); +} diff --git a/tests/servers/health_check_api/mod.rs b/tests/servers/health_check_api/mod.rs new file mode 100644 index 000000000..89f19a334 --- /dev/null +++ b/tests/servers/health_check_api/mod.rs @@ -0,0 +1,3 @@ +pub mod client; +pub mod contract; +pub mod test_environment; diff --git a/tests/servers/health_check_api/test_environment.rs b/tests/servers/health_check_api/test_environment.rs new file mode 100644 index 000000000..6ad90eac7 --- /dev/null +++ b/tests/servers/health_check_api/test_environment.rs @@ -0,0 +1,32 @@ +use std::net::SocketAddr; + +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use torrust_tracker::bootstrap::jobs::health_check_api::ApiServerJobStarted; +use torrust_tracker::servers::health_check_api::server; +use torrust_tracker_configuration::HealthCheckApi; + +/// Start the test environment for the Health Check API. +/// It runs the API server. +pub async fn start(config: &HealthCheckApi) -> (SocketAddr, JoinHandle<()>) { + let bind_addr = config + .bind_address + .parse::() + .expect("Health Check API bind_address invalid."); + + let (tx, rx) = oneshot::channel::(); + + let join_handle = tokio::spawn(async move { + let handle = server::start(bind_addr, tx); + if let Ok(()) = handle.await { + panic!("Health Check API server on http://{bind_addr} stopped"); + } + }); + + let bound_addr = match rx.await { + Ok(msg) => msg.bound_addr, + Err(e) => panic!("the Health Check API server was dropped: {e}"), + }; + + (bound_addr, join_handle) +} diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index 7c30b6f40..65e9a665b 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1,3 +1,4 @@ mod api; +pub mod health_check_api; mod http; mod udp; From ef296f76bf3160e219f4b842fe1d055d4c9dc308 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Nov 2023 16:13:04 +0000 Subject: [PATCH 0643/1003] feat: [#508] app health check endpoint checks API The app health check endpoint checks is the API is running healthy when is enabled. --- packages/test-helpers/src/configuration.rs | 12 +++++++ src/app.rs | 2 +- src/bootstrap/jobs/health_check_api.rs | 8 +++-- src/servers/health_check_api/handlers.rs | 31 +++++++++++++++++++ src/servers/health_check_api/mod.rs | 3 ++ src/servers/health_check_api/resources.rs | 31 +++++++++++++++++++ src/servers/health_check_api/responses.rs | 11 +++++++ src/servers/health_check_api/server.rs | 25 +++++++-------- tests/servers/health_check_api/contract.rs | 10 +++--- .../health_check_api/test_environment.rs | 8 +++-- 10 files changed, 115 insertions(+), 26 deletions(-) create mode 100644 src/servers/health_check_api/handlers.rs create mode 100644 src/servers/health_check_api/resources.rs create mode 100644 src/servers/health_check_api/responses.rs diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index b41f435ec..388d0151f 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -144,3 +144,15 @@ pub fn ephemeral_ipv6() -> Configuration { cfg } + +/// Ephemeral without running any services. +#[must_use] +pub fn ephemeral_with_no_services() -> Configuration { + let mut cfg = ephemeral(); + + cfg.http_api.enabled = false; + cfg.http_trackers[0].enabled = false; + cfg.udp_trackers[0].enabled = false; + + cfg +} diff --git a/src/app.rs b/src/app.rs index 6478cffb8..e749f9c64 100644 --- a/src/app.rs +++ b/src/app.rs @@ -91,7 +91,7 @@ pub async fn start(config: Arc, tracker: Arc) - } // Start Health Check API - jobs.push(health_check_api::start_job(&config.health_check_api).await); + jobs.push(health_check_api::start_job(config).await); jobs } diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 29c4ce144..96a703afc 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -16,11 +16,12 @@ //! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) //! for the API configuration options. use std::net::SocketAddr; +use std::sync::Arc; use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; -use torrust_tracker_configuration::HealthCheckApi; +use torrust_tracker_configuration::Configuration; use crate::servers::health_check_api::server; @@ -45,8 +46,9 @@ pub struct ApiServerJobStarted { /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &HealthCheckApi) -> JoinHandle<()> { +pub async fn start_job(config: Arc) -> JoinHandle<()> { let bind_addr = config + .health_check_api .bind_address .parse::() .expect("Health Check API bind_address invalid."); @@ -57,7 +59,7 @@ pub async fn start_job(config: &HealthCheckApi) -> JoinHandle<()> { let join_handle = tokio::spawn(async move { info!("Starting Health Check API server: http://{}", bind_addr); - let handle = server::start(bind_addr, tx); + let handle = server::start(bind_addr, tx, config.clone()); if let Ok(()) = handle.await { info!("Health Check API server on http://{} stopped", bind_addr); diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs new file mode 100644 index 000000000..347106d6e --- /dev/null +++ b/src/servers/health_check_api/handlers.rs @@ -0,0 +1,31 @@ +use std::sync::Arc; + +use axum::extract::State; +use axum::Json; +use torrust_tracker_configuration::Configuration; + +use super::resources::Report; +use super::responses; + +/// Endpoint for container health check. +pub(crate) async fn health_check_handler(State(config): State>) -> Json { + if config.http_api.enabled { + let health_check_url = format!("http://{}/health_check", config.http_api.bind_address); + if !get_req_is_ok(&health_check_url).await { + return responses::error(format!("API is not healthy. Health check endpoint: {health_check_url}")); + } + } + + // todo: for all HTTP Trackers, if enabled, check if is healthy + + // todo: for all UDP Trackers, if enabled, check if is healthy + + responses::ok() +} + +async fn get_req_is_ok(url: &str) -> bool { + match reqwest::get(url).await { + Ok(response) => response.status().is_success(), + Err(_err) => false, + } +} diff --git a/src/servers/health_check_api/mod.rs b/src/servers/health_check_api/mod.rs index 74f47ad34..ec608387d 100644 --- a/src/servers/health_check_api/mod.rs +++ b/src/servers/health_check_api/mod.rs @@ -1 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; pub mod server; diff --git a/src/servers/health_check_api/resources.rs b/src/servers/health_check_api/resources.rs new file mode 100644 index 000000000..3fadcf456 --- /dev/null +++ b/src/servers/health_check_api/resources.rs @@ -0,0 +1,31 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, + pub message: String, +} + +impl Report { + #[must_use] + pub fn ok() -> Report { + Self { + status: Status::Ok, + message: String::new(), + } + } + + #[must_use] + pub fn error(message: String) -> Report { + Self { + status: Status::Error, + message, + } + } +} diff --git a/src/servers/health_check_api/responses.rs b/src/servers/health_check_api/responses.rs new file mode 100644 index 000000000..043e271db --- /dev/null +++ b/src/servers/health_check_api/responses.rs @@ -0,0 +1,11 @@ +use axum::Json; + +use super::resources::Report; + +pub fn ok() -> Json { + Json(Report::ok()) +} + +pub fn error(message: String) -> Json { + Json(Report::error(message)) +} diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index cbd1b8703..562772a87 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -3,25 +3,33 @@ //! This API is intended to be used by the container infrastructure to check if //! the whole application is healthy. use std::net::SocketAddr; +use std::sync::Arc; use axum::routing::get; use axum::{Json, Router}; use futures::Future; use log::info; -use serde_json::{json, Value}; +use serde_json::json; use tokio::sync::oneshot::Sender; +use torrust_tracker_configuration::Configuration; use crate::bootstrap::jobs::health_check_api::ApiServerJobStarted; +use crate::servers::health_check_api::handlers::health_check_handler; /// Starts Health Check API server. /// /// # Panics /// /// Will panic if binding to the socket address fails. -pub fn start(socket_addr: SocketAddr, tx: Sender) -> impl Future> { +pub fn start( + socket_addr: SocketAddr, + tx: Sender, + config: Arc, +) -> impl Future> { let app = Router::new() .route("/", get(|| async { Json(json!({})) })) - .route("/health_check", get(health_check_handler)); + .route("/health_check", get(health_check_handler)) + .with_state(config); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -39,14 +47,3 @@ pub fn start(socket_addr: SocketAddr, tx: Sender) -> impl F running } - -/// Endpoint for container health check. -async fn health_check_handler() -> Json { - // todo: if enabled, check if the Tracker API is healthy - - // todo: if enabled, check if the HTTP Tracker is healthy - - // todo: if enabled, check if the UDP Tracker is healthy - - Json(json!({ "status": "Ok" })) -} diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index 575e10665..6b816b85f 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -1,14 +1,14 @@ -use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; +use torrust_tracker::servers::health_check_api::resources::Report; use torrust_tracker_test_helpers::configuration; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::test_environment; #[tokio::test] -async fn health_check_endpoint_should_return_status_ok() { - let configuration = configuration::ephemeral(); +async fn health_check_endpoint_should_return_status_ok_when_no_service_is_running() { + let configuration = configuration::ephemeral_with_no_services(); - let (bound_addr, test_env) = test_environment::start(&configuration.health_check_api).await; + let (bound_addr, test_env) = test_environment::start(configuration.into()).await; let url = format!("http://{bound_addr}/health_check"); @@ -16,7 +16,7 @@ async fn health_check_endpoint_should_return_status_ok() { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); + assert_eq!(response.json::().await.unwrap(), Report::ok()); test_env.abort(); } diff --git a/tests/servers/health_check_api/test_environment.rs b/tests/servers/health_check_api/test_environment.rs index 6ad90eac7..46e54dc47 100644 --- a/tests/servers/health_check_api/test_environment.rs +++ b/tests/servers/health_check_api/test_environment.rs @@ -1,15 +1,17 @@ use std::net::SocketAddr; +use std::sync::Arc; use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker::bootstrap::jobs::health_check_api::ApiServerJobStarted; use torrust_tracker::servers::health_check_api::server; -use torrust_tracker_configuration::HealthCheckApi; +use torrust_tracker_configuration::Configuration; /// Start the test environment for the Health Check API. /// It runs the API server. -pub async fn start(config: &HealthCheckApi) -> (SocketAddr, JoinHandle<()>) { +pub async fn start(config: Arc) -> (SocketAddr, JoinHandle<()>) { let bind_addr = config + .health_check_api .bind_address .parse::() .expect("Health Check API bind_address invalid."); @@ -17,7 +19,7 @@ pub async fn start(config: &HealthCheckApi) -> (SocketAddr, JoinHandle<()>) { let (tx, rx) = oneshot::channel::(); let join_handle = tokio::spawn(async move { - let handle = server::start(bind_addr, tx); + let handle = server::start(bind_addr, tx, config.clone()); if let Ok(()) = handle.await { panic!("Health Check API server on http://{bind_addr} stopped"); } From 742130657bc3c04b3497400207e16250433de966 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Nov 2023 17:01:05 +0000 Subject: [PATCH 0644/1003] feat: [#508] add health check enpoint to HTTP tracker http://localhost:7070/health_check And call the endpoint from the general application health check endpoint: http://localhost:1313/health_check Do not call the endpoint if: - The tracker is disabled. - The tracker configuration uses port 0 only knwon after starting the socket. todo: call the enpoint also when the port is 0 in the configuration. THe service can return back to the main app the port assiged by the OS. And the app can pass that port to the global app health check handler. --- src/servers/health_check_api/handlers.rs | 46 ++++++++++++++++++-- src/servers/http/v1/handlers/health_check.rs | 18 ++++++++ src/servers/http/v1/handlers/mod.rs | 1 + src/servers/http/v1/routes.rs | 4 +- tests/servers/http/client.rs | 4 ++ tests/servers/http/v1/contract.rs | 20 +++++++++ 6 files changed, 88 insertions(+), 5 deletions(-) create mode 100644 src/servers/http/v1/handlers/health_check.rs diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index 347106d6e..0a95c3211 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::sync::Arc; use axum::extract::State; @@ -7,16 +8,53 @@ use torrust_tracker_configuration::Configuration; use super::resources::Report; use super::responses; +/// If port 0 is specified in the configuration the OS will automatically +/// assign a free port. But we do now know in from the configuration. +/// We can only know it after starting the socket. +const UNKNOWN_PORT: u16 = 0; + /// Endpoint for container health check. +/// +/// This endpoint only checks services when we know the port from the +/// configuration. If port 0 is specified in the configuration the health check +/// for that service is skipped. pub(crate) async fn health_check_handler(State(config): State>) -> Json { + // todo: when port 0 is specified in the configuration get the port from the + // running service, after starting it as we do for testing with ephemeral + // configurations. + if config.http_api.enabled { - let health_check_url = format!("http://{}/health_check", config.http_api.bind_address); - if !get_req_is_ok(&health_check_url).await { - return responses::error(format!("API is not healthy. Health check endpoint: {health_check_url}")); + let addr: SocketAddr = config.http_api.bind_address.parse().expect("invalid socket address for API"); + + if addr.port() != UNKNOWN_PORT { + let health_check_url = format!("http://{addr}/health_check"); + + if !get_req_is_ok(&health_check_url).await { + return responses::error(format!("API is not healthy. Health check endpoint: {health_check_url}")); + } } } - // todo: for all HTTP Trackers, if enabled, check if is healthy + for http_tracker_config in &config.http_trackers { + if !http_tracker_config.enabled { + continue; + } + + let addr: SocketAddr = http_tracker_config + .bind_address + .parse() + .expect("invalid socket address for HTTP tracker"); + + if addr.port() != UNKNOWN_PORT { + let health_check_url = format!("http://{addr}/health_check"); + + if !get_req_is_ok(&health_check_url).await { + return responses::error(format!( + "HTTP Tracker is not healthy. Health check endpoint: {health_check_url}" + )); + } + } + } // todo: for all UDP Trackers, if enabled, check if is healthy diff --git a/src/servers/http/v1/handlers/health_check.rs b/src/servers/http/v1/handlers/health_check.rs new file mode 100644 index 000000000..b15af6255 --- /dev/null +++ b/src/servers/http/v1/handlers/health_check.rs @@ -0,0 +1,18 @@ +use axum::Json; +use serde::{Deserialize, Serialize}; + +#[allow(clippy::unused_async)] +pub async fn handler() -> Json { + Json(Report { status: Status::Ok }) +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, +} diff --git a/src/servers/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs index d78dee7d5..d7fd05838 100644 --- a/src/servers/http/v1/handlers/mod.rs +++ b/src/servers/http/v1/handlers/mod.rs @@ -7,6 +7,7 @@ use crate::tracker::error::Error; pub mod announce; pub mod common; +pub mod health_check; pub mod scrape; impl From for responses::error::Error { diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 6546dcbb8..0b6b419c1 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -6,7 +6,7 @@ use axum::Router; use axum_client_ip::SecureClientIpSource; use tower_http::compression::CompressionLayer; -use super::handlers::{announce, scrape}; +use super::handlers::{announce, health_check, scrape}; use crate::tracker::Tracker; /// It adds the routes to the router. @@ -16,6 +16,8 @@ use crate::tracker::Tracker; #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { Router::new() + // Health check + .route("/health_check", get(health_check::handler)) // Announce request .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs index 0dbdd9cf6..03ed9aee4 100644 --- a/tests/servers/http/client.rs +++ b/tests/servers/http/client.rs @@ -60,6 +60,10 @@ impl Client { .await } + pub async fn health_check(&self) -> Response { + self.get(&self.build_path("health_check")).await + } + pub async fn get(&self, path: &str) -> Response { self.reqwest.get(self.build_url(path)).send().await.unwrap() } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 2e24af6b7..b19009454 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -13,6 +13,26 @@ async fn test_environment_should_be_started_and_stopped() { mod for_all_config_modes { + use torrust_tracker::servers::http::v1::handlers::health_check::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::client::Client; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn health_check_endpoint_should_return_ok_if_the_http_tracker_is_running() { + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let response = Client::new(*test_env.bind_address()).health_check().await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); + + test_env.stop().await; + } + mod and_running_on_reverse_proxy { use torrust_tracker_test_helpers::configuration; From 2a05590fddf79a550abab6b6c477ddf562c46669 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Nov 2023 17:32:44 +0000 Subject: [PATCH 0645/1003] refactor: [#508] move UDP tracker client to production code We will use the UDP tracker connection request to check if the UDP tracker service is healthy. --- src/servers/udp/mod.rs | 6 ----- src/servers/udp/server.rs | 2 +- src/shared/bit_torrent/mod.rs | 1 + .../shared/bit_torrent}/udp/client.rs | 27 +++++++++++++++++-- src/shared/bit_torrent/udp/mod.rs | 12 +++++++++ tests/servers/udp/contract.rs | 10 +++---- tests/servers/udp/mod.rs | 6 ----- 7 files changed, 44 insertions(+), 20 deletions(-) rename {tests/servers => src/shared/bit_torrent}/udp/client.rs (79%) create mode 100644 src/shared/bit_torrent/udp/mod.rs diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 630867218..a50fffd37 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -652,9 +652,3 @@ pub type Port = u16; /// The transaction id. A random number generated byt the peer that is used to /// match requests and responses. pub type TransactionId = i64; - -/// The maximum number of bytes in a UDP packet. -pub const MAX_PACKET_SIZE: usize = 1496; -/// A magic 64-bit integer constant defined in the protocol that is used to -/// identify the protocol. -pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 428b76fa1..31e87481e 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -30,7 +30,7 @@ use tokio::task::JoinHandle; use crate::servers::signals::shutdown_signal; use crate::servers::udp::handlers::handle_packet; -use crate::servers::udp::MAX_PACKET_SIZE; +use crate::shared::bit_torrent::udp::MAX_PACKET_SIZE; use crate::tracker::Tracker; /// Error that can occur when starting or stopping the UDP server. diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs index eba90b4ab..872203a1f 100644 --- a/src/shared/bit_torrent/mod.rs +++ b/src/shared/bit_torrent/mod.rs @@ -69,3 +69,4 @@ //!Bencode & bdecode in your browser | pub mod common; pub mod info_hash; +pub mod udp; diff --git a/tests/servers/udp/client.rs b/src/shared/bit_torrent/udp/client.rs similarity index 79% rename from tests/servers/udp/client.rs rename to src/shared/bit_torrent/udp/client.rs index d267adaba..d5c4c9adf 100644 --- a/tests/servers/udp/client.rs +++ b/src/shared/bit_torrent/udp/client.rs @@ -3,9 +3,8 @@ use std::sync::Arc; use aquatic_udp_protocol::{Request, Response}; use tokio::net::UdpSocket; -use torrust_tracker::servers::udp::MAX_PACKET_SIZE; -use crate::servers::udp::source_address; +use crate::shared::bit_torrent::udp::{source_address, MAX_PACKET_SIZE}; #[allow(clippy::module_name_repetitions)] pub struct UdpClient { @@ -13,6 +12,9 @@ pub struct UdpClient { } impl UdpClient { + /// # Panics + /// + /// Will panic if the local address can't be bound. pub async fn bind(local_address: &str) -> Self { let socket = UdpSocket::bind(local_address).await.unwrap(); Self { @@ -20,15 +22,30 @@ impl UdpClient { } } + /// # Panics + /// + /// Will panic if can't connect to the socket. pub async fn connect(&self, remote_address: &str) { self.socket.connect(remote_address).await.unwrap(); } + /// # Panics + /// + /// Will panic if: + /// + /// - Can't write to the socket. + /// - Can't send data. pub async fn send(&self, bytes: &[u8]) -> usize { self.socket.writable().await.unwrap(); self.socket.send(bytes).await.unwrap() } + /// # Panics + /// + /// Will panic if: + /// + /// - Can't read from the socket. + /// - Can't receive data. pub async fn receive(&self, bytes: &mut [u8]) -> usize { self.socket.readable().await.unwrap(); self.socket.recv(bytes).await.unwrap() @@ -49,6 +66,9 @@ pub struct UdpTrackerClient { } impl UdpTrackerClient { + /// # Panics + /// + /// Will panic if can't write request to bytes. pub async fn send(&self, request: Request) -> usize { // Write request into a buffer let request_buffer = vec![0u8; MAX_PACKET_SIZE]; @@ -68,6 +88,9 @@ impl UdpTrackerClient { self.udp_client.send(request_data).await } + /// # Panics + /// + /// Will panic if can't create response from the received payload (bytes buffer). pub async fn receive(&self) -> Response { let mut response_buffer = [0u8; MAX_PACKET_SIZE]; diff --git a/src/shared/bit_torrent/udp/mod.rs b/src/shared/bit_torrent/udp/mod.rs new file mode 100644 index 000000000..9322ef045 --- /dev/null +++ b/src/shared/bit_torrent/udp/mod.rs @@ -0,0 +1,12 @@ +pub mod client; + +/// The maximum number of bytes in a UDP packet. +pub const MAX_PACKET_SIZE: usize = 1496; +/// A magic 64-bit integer constant defined in the protocol that is used to +/// identify the protocol. +pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; + +/// Generates the source address for the UDP client +fn source_address(port: u16) -> String { + format!("127.0.0.1:{port}") +} diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 3187d9871..72124fc3f 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -6,11 +6,11 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; -use torrust_tracker::servers::udp::MAX_PACKET_SIZE; +use torrust_tracker::shared::bit_torrent::udp::client::{new_udp_client_connected, UdpTrackerClient}; +use torrust_tracker::shared::bit_torrent::udp::MAX_PACKET_SIZE; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_error_response; -use crate::servers::udp::client::{new_udp_client_connected, UdpTrackerClient}; use crate::servers::udp::test_environment::running_test_environment; fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { @@ -51,10 +51,10 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req mod receiving_a_connection_request { use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + use torrust_tracker::shared::bit_torrent::udp::client::new_udp_tracker_client_connected; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_connect_response; - use crate::servers::udp::client::new_udp_tracker_client_connected; use crate::servers::udp::test_environment::running_test_environment; #[tokio::test] @@ -82,10 +82,10 @@ mod receiving_an_announce_request { AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, TransactionId, }; + use torrust_tracker::shared::bit_torrent::udp::client::new_udp_tracker_client_connected; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_ipv4_announce_response; - use crate::servers::udp::client::new_udp_tracker_client_connected; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::test_environment::running_test_environment; @@ -124,10 +124,10 @@ mod receiving_an_announce_request { mod receiving_an_scrape_request { use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + use torrust_tracker::shared::bit_torrent::udp::client::new_udp_tracker_client_connected; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_scrape_response; - use crate::servers::udp::client::new_udp_tracker_client_connected; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::test_environment::running_test_environment; diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs index d39c37153..4759350dc 100644 --- a/tests/servers/udp/mod.rs +++ b/tests/servers/udp/mod.rs @@ -1,9 +1,3 @@ pub mod asserts; -pub mod client; pub mod contract; pub mod test_environment; - -/// Generates the source address for the UDP client -fn source_address(port: u16) -> String { - format!("127.0.0.1:{port}") -} From bf23479debf17b7a396ea6b0d5ac6560e31144d7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Nov 2023 17:45:42 +0000 Subject: [PATCH 0646/1003] feat: [#508] add health check for UDP tracker Using the `connect` UDP request. If the UDP trackers replies to a connection request we assume is healthy. --- src/servers/health_check_api/handlers.rs | 38 +++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index 0a95c3211..4e8f2b928 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -1,12 +1,14 @@ use std::net::SocketAddr; use std::sync::Arc; +use aquatic_udp_protocol::{ConnectRequest, Response, TransactionId}; use axum::extract::State; use axum::Json; use torrust_tracker_configuration::Configuration; use super::resources::Report; use super::responses; +use crate::shared::bit_torrent::udp::client::new_udp_tracker_client_connected; /// If port 0 is specified in the configuration the OS will automatically /// assign a free port. But we do now know in from the configuration. @@ -23,6 +25,8 @@ pub(crate) async fn health_check_handler(State(config): State // running service, after starting it as we do for testing with ephemeral // configurations. + // Health check for API + if config.http_api.enabled { let addr: SocketAddr = config.http_api.bind_address.parse().expect("invalid socket address for API"); @@ -35,6 +39,8 @@ pub(crate) async fn health_check_handler(State(config): State } } + // Health check for HTTP Trackers + for http_tracker_config in &config.http_trackers { if !http_tracker_config.enabled { continue; @@ -56,7 +62,22 @@ pub(crate) async fn health_check_handler(State(config): State } } - // todo: for all UDP Trackers, if enabled, check if is healthy + // Health check for UDP Trackers + + for udp_tracker_config in &config.udp_trackers { + if !udp_tracker_config.enabled { + continue; + } + + let addr: SocketAddr = udp_tracker_config + .bind_address + .parse() + .expect("invalid socket address for UDP tracker"); + + if addr.port() != UNKNOWN_PORT && !can_connect_to_udp_tracker(&addr.to_string()).await { + return responses::error(format!("UDP Tracker is not healthy. Can't connect to: {addr}")); + } + } responses::ok() } @@ -67,3 +88,18 @@ async fn get_req_is_ok(url: &str) -> bool { Err(_err) => false, } } + +/// Tries to connect to an UDP tracker. It returns true if it succeeded. +async fn can_connect_to_udp_tracker(url: &str) -> bool { + let client = new_udp_tracker_client_connected(url).await; + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + matches!(response, Response::Connect(_connect_response)) +} From 5e0a686023862f22e24ea9ffea6530f5d9b83efb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Nov 2023 17:57:01 +0000 Subject: [PATCH 0647/1003] refactor: [#508] extract health check methods --- src/servers/health_check_api/handlers.rs | 58 ++++++++++++++++++------ 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index 4e8f2b928..109b89bb4 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, Response, TransactionId}; use axum::extract::State; use axum::Json; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, UdpTracker}; use super::resources::Report; use super::responses; @@ -21,27 +21,49 @@ const UNKNOWN_PORT: u16 = 0; /// configuration. If port 0 is specified in the configuration the health check /// for that service is skipped. pub(crate) async fn health_check_handler(State(config): State>) -> Json { + if let Some(err_response) = api_health_check(&config.http_api).await { + return err_response; + } + + if let Some(err_response) = http_trackers_health_check(&config.http_trackers).await { + return err_response; + } + + if let Some(err_response) = udp_trackers_health_check(&config.udp_trackers).await { + return err_response; + } + + responses::ok() +} + +async fn api_health_check(config: &HttpApi) -> Option> { // todo: when port 0 is specified in the configuration get the port from the // running service, after starting it as we do for testing with ephemeral // configurations. - // Health check for API - - if config.http_api.enabled { - let addr: SocketAddr = config.http_api.bind_address.parse().expect("invalid socket address for API"); + if config.enabled { + let addr: SocketAddr = config.bind_address.parse().expect("invalid socket address for API"); if addr.port() != UNKNOWN_PORT { let health_check_url = format!("http://{addr}/health_check"); if !get_req_is_ok(&health_check_url).await { - return responses::error(format!("API is not healthy. Health check endpoint: {health_check_url}")); + return Some(responses::error(format!( + "API is not healthy. Health check endpoint: {health_check_url}" + ))); } } } - // Health check for HTTP Trackers + None +} - for http_tracker_config in &config.http_trackers { +async fn http_trackers_health_check(http_trackers: &Vec) -> Option> { + // todo: when port 0 is specified in the configuration get the port from the + // running service, after starting it as we do for testing with ephemeral + // configurations. + + for http_tracker_config in http_trackers { if !http_tracker_config.enabled { continue; } @@ -55,16 +77,22 @@ pub(crate) async fn health_check_handler(State(config): State let health_check_url = format!("http://{addr}/health_check"); if !get_req_is_ok(&health_check_url).await { - return responses::error(format!( + return Some(responses::error(format!( "HTTP Tracker is not healthy. Health check endpoint: {health_check_url}" - )); + ))); } } } - // Health check for UDP Trackers + None +} + +async fn udp_trackers_health_check(udp_trackers: &Vec) -> Option> { + // todo: when port 0 is specified in the configuration get the port from the + // running service, after starting it as we do for testing with ephemeral + // configurations. - for udp_tracker_config in &config.udp_trackers { + for udp_tracker_config in udp_trackers { if !udp_tracker_config.enabled { continue; } @@ -75,11 +103,13 @@ pub(crate) async fn health_check_handler(State(config): State .expect("invalid socket address for UDP tracker"); if addr.port() != UNKNOWN_PORT && !can_connect_to_udp_tracker(&addr.to_string()).await { - return responses::error(format!("UDP Tracker is not healthy. Can't connect to: {addr}")); + return Some(responses::error(format!( + "UDP Tracker is not healthy. Can't connect to: {addr}" + ))); } } - responses::ok() + None } async fn get_req_is_ok(url: &str) -> bool { From 5ce0048b075b06d8fd2ae35cf70363a2362b6f79 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 1 Dec 2023 09:55:54 +0000 Subject: [PATCH 0648/1003] fix: disable shellcheck --- share/container/entry_script_sh | 1 + 1 file changed, 1 insertion(+) diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh index 94dfa6b81..4f98e6622 100644 --- a/share/container/entry_script_sh +++ b/share/container/entry_script_sh @@ -73,6 +73,7 @@ if [ -e "/usr/share/torrust/container/message" ]; then fi # Load message of the day from Profile +# shellcheck disable=SC2016 echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/profile cd /home/torrust || exit 1 From 74511ee2a1fef85a45c5d9a13f74fc02c2766854 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 1 Dec 2023 09:56:19 +0000 Subject: [PATCH 0649/1003] feat: add script to install for development Used here: https://github.com/torrust/torrust-index-gui/blob/develop/docs/development_guide.md#run-the-tracker --- contrib/dev-tools/init/install-local.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/contrib/dev-tools/init/install-local.sh b/contrib/dev-tools/init/install-local.sh index f9806a0b8..747c357bc 100755 --- a/contrib/dev-tools/init/install-local.sh +++ b/contrib/dev-tools/init/install-local.sh @@ -7,6 +7,5 @@ mkdir -p ./storage/tracker/lib/database # Generate the sqlite database if it does not exist if ! [ -f "./storage/tracker/lib/database/sqlite3.db" ]; then - # todo: it should get the path from tracker.toml and only do it when we use sqlite - sqlite3 ./storage/tracker/lib/database/sqlite3.db "VACUUM;" + sqlite3 ./storage/tracker/lib/database/sqlite3.db "VACUUM;" fi From 5439b6e0fb36e5b933050eda284ca13bf14a694b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 2 Dec 2023 16:34:24 +0800 Subject: [PATCH 0650/1003] chore: update deps constrained versions: - axum = "0.6" - axum-client-ip = "0.4" - tower-http = "0.4" info: ``` Updating async-compression v0.4.4 -> v0.4.5 Updating borsh v0.10.3 -> v1.2.0 Updating borsh-derive v0.10.3 -> v1.2.0 Removing borsh-derive-internal v0.10.3 Removing borsh-schema-derive-internal v0.10.3 Adding cfg_aliases v0.1.1 Updating clap v4.4.7 -> v4.4.10 Updating clap_builder v4.4.7 -> v4.4.9 Updating core-foundation v0.9.3 -> v0.9.4 Updating core-foundation-sys v0.8.4 -> v0.8.6 Updating errno v0.3.5 -> v0.3.8 Updating form_urlencoded v1.2.0 -> v1.2.1 Updating getrandom v0.2.10 -> v0.2.11 Updating gimli v0.28.0 -> v0.28.1 Updating h2 v0.3.21 -> v0.3.22 Updating hashbrown v0.14.2 -> v0.14.3 Updating http v0.2.9 -> v0.2.11 Updating idna v0.4.0 -> v0.5.0 Updating js-sys v0.3.65 -> v0.3.66 Updating linux-raw-sys v0.4.10 -> v0.4.12 Updating proc-macro-crate v0.1.5 -> v2.0.0 Updating proc-macro2 v1.0.69 -> v1.0.70 Updating ring v0.17.5 -> v0.17.6 Updating rust_decimal v1.32.0 -> v1.33.1 Updating rustix v0.38.21 -> v0.38.26 Updating rustls v0.21.8 -> v0.21.9 Updating rustls-pemfile v1.0.3 -> v1.0.4 Updating smallvec v1.11.1 -> v1.11.2 Adding syn_derive v0.1.8 Updating termcolor v1.3.0 -> v1.4.0 Adding toml_edit v0.20.7 Updating url v2.4.1 -> v2.5.0 Updating wasm-bindgen v0.2.88 -> v0.2.89 Updating wasm-bindgen-backend v0.2.88 -> v0.2.89 Updating wasm-bindgen-futures v0.4.38 -> v0.4.39 Updating wasm-bindgen-macro v0.2.88 -> v0.2.89 Updating wasm-bindgen-macro-support v0.2.88 -> v0.2.89 Updating wasm-bindgen-shared v0.2.88 -> v0.2.89 Updating web-sys v0.3.65 -> v0.3.66 Adding windows-sys v0.52.0 Adding windows-targets v0.52.0 Adding windows_aarch64_gnullvm v0.52.0 Adding windows_aarch64_msvc v0.52.0 Adding windows_i686_gnu v0.52.0 Adding windows_i686_msvc v0.52.0 Adding windows_x86_64_gnu v0.52.0 Adding windows_x86_64_gnullvm v0.52.0 Adding windows_x86_64_msvc v0.52.0 Updating zerocopy v0.7.25 -> v0.7.28 Updating zerocopy-derive v0.7.25 -> v0.7.28 ``` --- Cargo.lock | 330 ++++++++++++++++++++++++++++++++--------------------- Cargo.toml | 6 +- 2 files changed, 205 insertions(+), 131 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 336adb8f7..30a961b14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,9 +121,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" +checksum = "bc2d0cfb2a7388d34f590e76686704c494ed7aaceed62ee1ba35cbf363abc2a5" dependencies = [ "brotli", "flate2", @@ -331,47 +331,26 @@ dependencies = [ [[package]] name = "borsh" -version = "0.10.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" +checksum = "bf617fabf5cdbdc92f774bfe5062d870f228b80056d41180797abf48bed4056e" dependencies = [ "borsh-derive", - "hashbrown 0.13.2", + "cfg_aliases", ] [[package]] name = "borsh-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" -dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate 0.1.5", - "proc-macro2", - "syn 1.0.109", -] - -[[package]] -name = "borsh-derive-internal" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.10.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" +checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3" dependencies = [ + "once_cell", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.39", + "syn_derive", ] [[package]] @@ -472,6 +451,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" version = "0.4.31" @@ -482,7 +467,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -525,18 +510,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.7" +version = "4.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac495e00dcec98c83465d5ad66c5c4fabd652fd6686e7c6269b117e729a6f17b" +checksum = "41fffed7514f420abec6d183b1d3acfd9099c79c3a10a06ade4f8203f1411272" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.4.7" +version = "4.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77ed9a32a62e6ca27175d00d29d05ca32e396ea1eb5fb01d8256b669cec7663" +checksum = "63361bae7eef3771745f02d8d892bec2fee5f6e34af316ba556e7f97a7069ff1" dependencies = [ "anstyle", "clap_lex", @@ -584,9 +569,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -594,9 +579,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" @@ -849,12 +834,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -937,9 +922,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -1119,9 +1104,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "libc", @@ -1130,9 +1115,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" @@ -1142,9 +1127,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.21" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" +checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" dependencies = [ "bytes", "fnv", @@ -1152,7 +1137,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap 2.1.0", "slab", "tokio", "tokio-util", @@ -1185,9 +1170,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash 0.8.6", "allocator-api2", @@ -1199,7 +1184,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.2", + "hashbrown 0.14.3", ] [[package]] @@ -1222,9 +1207,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -1328,9 +1313,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1354,7 +1339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown 0.14.2", + "hashbrown 0.14.3", "serde", ] @@ -1382,7 +1367,7 @@ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", "rustix", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1411,9 +1396,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] @@ -1560,9 +1545,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" [[package]] name = "local-ip-address" @@ -1573,7 +1558,7 @@ dependencies = [ "libc", "neli", "thiserror", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1651,7 +1636,7 @@ checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1991,7 +1976,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -2215,21 +2200,21 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "0.1.5" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ - "toml 0.5.11", + "once_cell", + "toml_edit 0.19.15", ] [[package]] name = "proc-macro-crate" -version = "1.3.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" dependencies = [ - "once_cell", - "toml_edit 0.19.15", + "toml_edit 0.20.7", ] [[package]] @@ -2258,9 +2243,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] @@ -2469,16 +2454,16 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.5" +version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +checksum = "684d5e6e18f669ccebf64a92236bb7db9a34f07be010e3627368182027180866" dependencies = [ "cc", "getrandom", "libc", "spin", "untrusted", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2546,9 +2531,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.32.0" +version = "1.33.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c4216490d5a413bc6d10fa4742bd7d4955941d062c0ef873141d6b0e7b30fd" +checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4" dependencies = [ "arrayvec", "borsh", @@ -2583,22 +2568,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" dependencies = [ "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", "ring", @@ -2608,9 +2593,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ "base64 0.21.5", ] @@ -2658,7 +2643,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2902,9 +2887,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "socket2" @@ -2923,7 +2908,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2976,6 +2961,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -3036,14 +3033,14 @@ dependencies = [ "fastrand", "redox_syscall", "rustix", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "termcolor" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" +checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" dependencies = [ "winapi-util", ] @@ -3143,7 +3140,7 @@ dependencies = [ "signal-hook-registry", "socket2 0.5.5", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3232,6 +3229,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + [[package]] name = "toml_edit" version = "0.21.0" @@ -3469,9 +3477,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", "idna", @@ -3527,9 +3535,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3537,9 +3545,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", @@ -3552,9 +3560,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02" +checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" dependencies = [ "cfg-if", "js-sys", @@ -3564,9 +3572,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3574,9 +3582,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", @@ -3587,15 +3595,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "web-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" dependencies = [ "js-sys", "wasm-bindgen", @@ -3638,7 +3646,7 @@ version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -3647,7 +3655,16 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] @@ -3656,13 +3673,28 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] @@ -3671,42 +3703,84 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winnow" version = "0.5.19" @@ -3723,7 +3797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3746,18 +3820,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.25" +version = "0.7.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +checksum = "7d6f15f7ade05d2a4935e34a457b936c23dc70a05cc1d97133dc99e7a3fe0f0e" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.25" +version = "0.7.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +checksum = "dbbad221e3f78500350ecbd7dfa4e63ef945c05f4c61cb7f4d3f84cd0bba649b" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 2316a1edf..5f55c6c5c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,8 +32,8 @@ version = "3.0.0-alpha.12-develop" [dependencies] aquatic_udp_protocol = "0" async-trait = "0" -axum = "0" -axum-client-ip = "0" +axum = "0.6" +axum-client-ip = "0.4" axum-server = { version = "0", features = ["tls-rustls"] } binascii = "0" chrono = { version = "0", default-features = false, features = ["clock"] } @@ -64,7 +64,7 @@ torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "pa torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } -tower-http = { version = "0", features = ["compression-full"] } +tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } [dev-dependencies] From 986ab64316fc2df977b58115bc8ba218ca0a896a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 3 Dec 2023 14:49:36 +0800 Subject: [PATCH 0651/1003] dev: refactor: rename tracker mod to core --- packages/configuration/src/lib.rs | 6 +- src/app.rs | 4 +- src/bootstrap/app.rs | 4 +- src/bootstrap/jobs/http_tracker.rs | 6 +- src/bootstrap/jobs/torrent_cleanup.rs | 4 +- src/bootstrap/jobs/tracker_apis.rs | 4 +- src/bootstrap/jobs/udp_tracker.rs | 4 +- src/{tracker => core}/auth.rs | 12 ++-- src/{tracker => core}/databases/driver.rs | 6 +- src/{tracker => core}/databases/error.rs | 2 +- src/{tracker => core}/databases/mod.rs | 16 +++--- src/{tracker => core}/databases/mysql.rs | 28 +++++----- src/{tracker => core}/databases/sqlite.rs | 28 +++++----- src/{tracker => core}/error.rs | 0 src/{tracker => core}/mod.rs | 54 +++++++++--------- src/{tracker => core}/peer.rs | 10 ++-- src/{tracker => core}/services/mod.rs | 6 +- .../services/statistics/mod.rs | 24 ++++---- .../services/statistics/setup.rs | 6 +- src/{tracker => core}/services/torrent.rs | 18 +++--- src/{tracker => core}/statistics.rs | 14 ++--- src/{tracker => core}/torrent.rs | 4 +- src/lib.rs | 6 +- src/servers/apis/routes.rs | 2 +- src/servers/apis/server.rs | 8 +-- .../apis/v1/context/auth_key/handlers.rs | 4 +- .../apis/v1/context/auth_key/resources.rs | 4 +- .../apis/v1/context/auth_key/routes.rs | 2 +- src/servers/apis/v1/context/stats/handlers.rs | 4 +- .../apis/v1/context/stats/resources.rs | 8 +-- .../apis/v1/context/stats/responses.rs | 2 +- src/servers/apis/v1/context/stats/routes.rs | 2 +- .../apis/v1/context/torrent/handlers.rs | 4 +- .../apis/v1/context/torrent/resources/peer.rs | 10 ++-- .../v1/context/torrent/resources/torrent.rs | 6 +- .../apis/v1/context/torrent/responses.rs | 2 +- src/servers/apis/v1/context/torrent/routes.rs | 2 +- .../apis/v1/context/whitelist/handlers.rs | 2 +- .../apis/v1/context/whitelist/routes.rs | 2 +- src/servers/apis/v1/routes.rs | 2 +- src/servers/http/mod.rs | 12 ++-- src/servers/http/percent_encoding.rs | 10 ++-- src/servers/http/server.rs | 2 +- .../http/v1/extractors/announce_request.rs | 2 +- .../http/v1/extractors/authentication_key.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 12 ++-- src/servers/http/v1/handlers/common/auth.rs | 2 +- src/servers/http/v1/handlers/mod.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 12 ++-- src/servers/http/v1/launcher.rs | 2 +- src/servers/http/v1/requests/announce.rs | 6 +- src/servers/http/v1/responses/announce.rs | 10 ++-- src/servers/http/v1/responses/scrape.rs | 10 ++-- src/servers/http/v1/routes.rs | 2 +- src/servers/http/v1/services/announce.rs | 16 +++--- src/servers/http/v1/services/scrape.rs | 14 ++--- src/servers/udp/handlers.rs | 56 +++++++++---------- src/servers/udp/mod.rs | 12 ++-- src/servers/udp/peer_builder.rs | 2 +- src/servers/udp/server.rs | 2 +- src/shared/bit_torrent/common.rs | 4 +- tests/common/app.rs | 2 +- tests/common/fixtures.rs | 2 +- tests/servers/api/mod.rs | 2 +- tests/servers/api/test_environment.rs | 4 +- .../api/v1/contract/context/auth_key.rs | 2 +- tests/servers/http/client.rs | 2 +- tests/servers/http/connection_info.rs | 2 +- tests/servers/http/requests/announce.rs | 2 +- tests/servers/http/responses/announce.rs | 2 +- tests/servers/http/test_environment.rs | 4 +- tests/servers/http/v1/contract.rs | 12 ++-- tests/servers/udp/test_environment.rs | 4 +- 73 files changed, 281 insertions(+), 283 deletions(-) rename src/{tracker => core}/auth.rs (96%) rename src/{tracker => core}/databases/driver.rs (90%) rename src/{tracker => core}/databases/error.rs (97%) rename src/{tracker => core}/databases/mod.rs (92%) rename src/{tracker => core}/databases/mysql.rs (89%) rename src/{tracker => core}/databases/sqlite.rs (90%) rename src/{tracker => core}/error.rs (100%) rename src/{tracker => core}/mod.rs (97%) rename src/{tracker => core}/peer.rs (98%) rename src/{tracker => core}/services/mod.rs (71%) rename src/{tracker => core}/services/statistics/mod.rs (81%) rename src/{tracker => core}/services/statistics/setup.rs (83%) rename src/{tracker => core}/services/torrent.rs (95%) rename src/{tracker => core}/statistics.rs (95%) rename src/{tracker => core}/torrent.rs (99%) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 217f8a8be..918d9f014 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -30,10 +30,10 @@ //! //! Each section in the toml structure is mapped to a data structure. For //! example, the `[http_api]` section (configuration for the tracker HTTP API) -//! is mapped to the [`HttpApi`](HttpApi) structure. +//! is mapped to the [`HttpApi`] structure. //! //! > **NOTICE**: some sections are arrays of structures. For example, the -//! > `[[udp_trackers]]` section is an array of [`UdpTracker`](UdpTracker) since +//! > `[[udp_trackers]]` section is an array of [`UdpTracker`] since //! > you can have multiple running UDP trackers bound to different ports. //! //! Please refer to the documentation of each structure for more information @@ -394,7 +394,7 @@ pub struct Configuration { /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, /// `Debug` and `Trace`. Default is `Info`. pub log_level: Option, - /// Tracker mode. See [`TrackerMode`](torrust_tracker_primitives::TrackerMode) for more information. + /// Tracker mode. See [`TrackerMode`] for more information. pub mode: TrackerMode, // Database configuration diff --git a/src/app.rs b/src/app.rs index e749f9c64..32c12d74a 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,8 +28,8 @@ use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::core; use crate::servers::http::Version; -use crate::tracker; /// # Panics /// @@ -37,7 +37,7 @@ use crate::tracker; /// /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. -pub async fn start(config: Arc, tracker: Arc) -> Vec> { +pub async fn start(config: Arc, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); // Load peer keys diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 78c16a0a5..4a6f79a96 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -17,10 +17,10 @@ use torrust_tracker_configuration::Configuration; use super::config::initialize_configuration; use crate::bootstrap; +use crate::core::services::tracker_factory; +use crate::core::Tracker; use crate::shared::clock::static_time; use crate::shared::crypto::ephemeral_instance_keys; -use crate::tracker::services::tracker_factory; -use crate::tracker::Tracker; /// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. #[must_use] diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index a38fe3a5a..ecf6bd8ac 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -19,9 +19,9 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; +use crate::core; use crate::servers::http::v1::launcher; use crate::servers::http::Version; -use crate::tracker; /// This is the message that the "**launcher**" spawned task sends to the main application process to notify that the HTTP server was successfully started. /// @@ -33,7 +33,7 @@ pub struct ServerJobStarted(); /// /// Right now there is only one version but in the future we could support more than one HTTP tracker version at the same time. /// This feature allows supporting breaking changes on `BitTorrent` BEPs. -pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { +pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { match version { Version::V1 => start_v1(config, tracker.clone()).await, } @@ -42,7 +42,7 @@ pub async fn start_job(config: &HttpTracker, tracker: Arc, ver /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -async fn start_v1(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { +async fn start_v1(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .bind_address .parse::() diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index d48769139..d3b084d31 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -17,7 +17,7 @@ use log::info; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; -use crate::tracker; +use crate::core; /// It starts a jobs for cleaning up the torrent data in the tracker. /// @@ -25,7 +25,7 @@ use crate::tracker; /// /// Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about that option. #[must_use] -pub fn start_job(config: &Arc, tracker: &Arc) -> JoinHandle<()> { +pub fn start_job(config: &Arc, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 33b9b6e4a..ca29d2b5f 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -28,8 +28,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpApi; +use crate::core; use crate::servers::apis::server; -use crate::tracker; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the API server was successfully started. @@ -49,7 +49,7 @@ pub struct ApiServerJobStarted(); /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { +pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .bind_address .parse::() diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 76c465a8d..9a30c9126 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -12,14 +12,14 @@ use log::{error, info, warn}; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; +use crate::core; use crate::servers::udp::server::Udp; -use crate::tracker; /// It starts a new UDP server with the provided configuration. /// /// It spawns a new asynchronous task for the new UDP server. #[must_use] -pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); tokio::spawn(async move { diff --git a/src/tracker/auth.rs b/src/core/auth.rs similarity index 96% rename from src/tracker/auth.rs rename to src/core/auth.rs index 2759c8d06..c6b772485 100644 --- a/src/tracker/auth.rs +++ b/src/core/auth.rs @@ -12,7 +12,7 @@ //! Keys are stored in this struct: //! //! ```rust,no_run -//! use torrust_tracker::tracker::auth::Key; +//! use torrust_tracker::core::auth::Key; //! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; //! //! pub struct ExpiringKey { @@ -26,7 +26,7 @@ //! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: //! //! ```rust,no_run -//! use torrust_tracker::tracker::auth; +//! use torrust_tracker::core::auth; //! use std::time::Duration; //! //! let expiring_key = auth::generate(Duration::new(9999, 0)); @@ -138,7 +138,7 @@ pub struct Key(String); /// Error returned when a key cannot be parsed from a string. /// /// ```rust,no_run -/// use torrust_tracker::tracker::auth::Key; +/// use torrust_tracker::core::auth::Key; /// use std::str::FromStr; /// /// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; @@ -164,7 +164,7 @@ impl FromStr for Key { } } -/// Verification error. Error returned when an [`ExpiringKey`] cannot be verified with the [`verify(...)`](crate::tracker::auth::verify) function. +/// Verification error. Error returned when an [`ExpiringKey`] cannot be verified with the [`verify(...)`](crate::core::auth::verify) function. /// #[derive(Debug, Error)] #[allow(dead_code)] @@ -196,7 +196,7 @@ mod tests { mod key { use std::str::FromStr; - use crate::tracker::auth::Key; + use crate::core::auth::Key; #[test] fn should_be_parsed_from_an_string() { @@ -212,8 +212,8 @@ mod tests { use std::str::FromStr; use std::time::Duration; + use crate::core::auth; use crate::shared::clock::{Current, StoppedTime}; - use crate::tracker::auth; #[test] fn should_be_parsed_from_an_string() { diff --git a/src/tracker/databases/driver.rs b/src/core/databases/driver.rs similarity index 90% rename from src/tracker/databases/driver.rs rename to src/core/databases/driver.rs index 19cb7046e..99d96c6b1 100644 --- a/src/tracker/databases/driver.rs +++ b/src/core/databases/driver.rs @@ -1,6 +1,6 @@ //! Database driver factory. //! -//! See [`databases::driver::build`](crate::tracker::databases::driver::build) +//! See [`databases::driver::build`](crate::core::databases::driver::build) //! function for more information. use torrust_tracker_primitives::DatabaseDriver; @@ -14,7 +14,7 @@ use super::{Builder, Database}; /// Example for `SQLite3`: /// /// ```rust,no_run -/// use torrust_tracker::tracker::databases; +/// use torrust_tracker::core::databases; /// use torrust_tracker_primitives::DatabaseDriver; /// /// let db_driver = DatabaseDriver::Sqlite3; @@ -25,7 +25,7 @@ use super::{Builder, Database}; /// Example for `MySQL`: /// /// ```rust,no_run -/// use torrust_tracker::tracker::databases; +/// use torrust_tracker::core::databases; /// use torrust_tracker_primitives::DatabaseDriver; /// /// let db_driver = DatabaseDriver::MySQL; diff --git a/src/tracker/databases/error.rs b/src/core/databases/error.rs similarity index 97% rename from src/tracker/databases/error.rs rename to src/core/databases/error.rs index d89ec05de..96b0d835e 100644 --- a/src/tracker/databases/error.rs +++ b/src/core/databases/error.rs @@ -1,6 +1,6 @@ //! Database errors. //! -//! This module contains the [Database errors](crate::tracker::databases::error::Error). +//! This module contains the [Database errors](crate::core::databases::error::Error). use std::panic::Location; use std::sync::Arc; diff --git a/src/tracker/databases/mod.rs b/src/core/databases/mod.rs similarity index 92% rename from src/tracker/databases/mod.rs rename to src/core/databases/mod.rs index 902880496..14fcb6b5b 100644 --- a/src/tracker/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -4,8 +4,8 @@ //! //! There are two implementations of the trait (two drivers): //! -//! - [`Mysql`](crate::tracker::databases::mysql::Mysql) -//! - [`Sqlite`](crate::tracker::databases::sqlite::Sqlite) +//! - [`Mysql`](crate::core::databases::mysql::Mysql) +//! - [`Sqlite`](crate::core::databases::sqlite::Sqlite) //! //! > **NOTICE**: There are no database migrations. If there are any changes, //! we will implemented them or provide a script to migrate to the new schema. @@ -22,7 +22,7 @@ //! ---|---|--- //! `id` | 1 | Autoincrement id //! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 -//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](crate::tracker::torrent::Entry) for more information. +//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](crate::core::torrent::Entry) for more information. //! //! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be //! regenerated again after some minutes. @@ -53,8 +53,8 @@ use std::marker::PhantomData; use async_trait::async_trait; use self::error::Error; +use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::auth::{self, Key}; struct Builder where @@ -116,9 +116,9 @@ pub trait Database: Sync + Send { /// /// It returns an array of tuples with the torrent /// [`InfoHash`] and the - /// [`completed`](crate::tracker::torrent::Entry::completed) counter + /// [`completed`](crate::core::torrent::Entry::completed) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](crate::tracker::torrent::Entry::completed). + /// See [`Entry::completed`](crate::core::torrent::Entry::completed). /// /// # Context: Torrent Metrics /// @@ -200,8 +200,8 @@ pub trait Database: Sync + Send { /// It gets an expiring authentication key from the database. /// - /// It returns `Some(ExpiringKey)` if a [`ExpiringKey`](crate::tracker::auth::ExpiringKey) - /// with the input [`Key`](crate::tracker::auth::Key) exists, `None` otherwise. + /// It returns `Some(ExpiringKey)` if a [`ExpiringKey`](crate::core::auth::ExpiringKey) + /// with the input [`Key`](crate::core::auth::Key) exists, `None` otherwise. /// /// # Context: Authentication Keys /// diff --git a/src/tracker/databases/mysql.rs b/src/core/databases/mysql.rs similarity index 89% rename from src/tracker/databases/mysql.rs rename to src/core/databases/mysql.rs index 4419666ab..c46300829 100644 --- a/src/tracker/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -11,9 +11,9 @@ use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::DatabaseDriver; use super::{Database, Error}; +use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::auth::{self, Key}; const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; @@ -25,7 +25,7 @@ pub struct Mysql { impl Database for Mysql { /// It instantiates a new `MySQL` database driver. /// - /// Refer to [`databases::Database::new`](crate::tracker::databases::Database::new). + /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). /// /// # Errors /// @@ -39,7 +39,7 @@ impl Database for Mysql { Ok(Self { pool }) } - /// Refer to [`databases::Database::create_database_tables`](crate::tracker::databases::Database::create_database_tables). + /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -79,7 +79,7 @@ impl Database for Mysql { Ok(()) } - /// Refer to [`databases::Database::drop_database_tables`](crate::tracker::databases::Database::drop_database_tables). + /// Refer to [`databases::Database::drop_database_tables`](crate::core::databases::Database::drop_database_tables). fn drop_database_tables(&self) -> Result<(), Error> { let drop_whitelist_table = " DROP TABLE `whitelist`;" @@ -104,7 +104,7 @@ impl Database for Mysql { Ok(()) } - /// Refer to [`databases::Database::load_persistent_torrents`](crate::tracker::databases::Database::load_persistent_torrents). + /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). async fn load_persistent_torrents(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -119,7 +119,7 @@ impl Database for Mysql { Ok(torrents) } - /// Refer to [`databases::Database::load_keys`](crate::tracker::databases::Database::load_keys). + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -134,7 +134,7 @@ impl Database for Mysql { Ok(keys) } - /// Refer to [`databases::Database::load_whitelist`](crate::tracker::databases::Database::load_whitelist). + /// Refer to [`databases::Database::load_whitelist`](crate::core::databases::Database::load_whitelist). async fn load_whitelist(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -145,7 +145,7 @@ impl Database for Mysql { Ok(info_hashes) } - /// Refer to [`databases::Database::save_persistent_torrent`](crate::tracker::databases::Database::save_persistent_torrent). + /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; @@ -158,7 +158,7 @@ impl Database for Mysql { Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) } - /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::tracker::databases::Database::get_info_hash_from_whitelist). + /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -172,7 +172,7 @@ impl Database for Mysql { Ok(info_hash) } - /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::tracker::databases::Database::add_info_hash_to_whitelist). + /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::core::databases::Database::add_info_hash_to_whitelist). async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -186,7 +186,7 @@ impl Database for Mysql { Ok(1) } - /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::tracker::databases::Database::remove_info_hash_from_whitelist). + /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::core::databases::Database::remove_info_hash_from_whitelist). async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -197,7 +197,7 @@ impl Database for Mysql { Ok(1) } - /// Refer to [`databases::Database::get_key_from_keys`](crate::tracker::databases::Database::get_key_from_keys). + /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -214,7 +214,7 @@ impl Database for Mysql { })) } - /// Refer to [`databases::Database::add_key_to_keys`](crate::tracker::databases::Database::add_key_to_keys). + /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -229,7 +229,7 @@ impl Database for Mysql { Ok(1) } - /// Refer to [`databases::Database::remove_key_from_keys`](crate::tracker::databases::Database::remove_key_from_keys). + /// Refer to [`databases::Database::remove_key_from_keys`](crate::core::databases::Database::remove_key_from_keys). async fn remove_key_from_keys(&self, key: &Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/src/tracker/databases/sqlite.rs b/src/core/databases/sqlite.rs similarity index 90% rename from src/tracker/databases/sqlite.rs rename to src/core/databases/sqlite.rs index 1968ee049..bf2d6b8b9 100644 --- a/src/tracker/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -8,9 +8,9 @@ use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::DatabaseDriver; use super::{Database, Error}; +use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; -use crate::tracker::auth::{self, Key}; const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; @@ -22,7 +22,7 @@ pub struct Sqlite { impl Database for Sqlite { /// It instantiates a new `SQLite3` database driver. /// - /// Refer to [`databases::Database::new`](crate::tracker::databases::Database::new). + /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). /// /// # Errors /// @@ -32,7 +32,7 @@ impl Database for Sqlite { Pool::new(cm).map_or_else(|err| Err((err, DatabaseDriver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) } - /// Refer to [`databases::Database::create_database_tables`](crate::tracker::databases::Database::create_database_tables). + /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -66,7 +66,7 @@ impl Database for Sqlite { Ok(()) } - /// Refer to [`databases::Database::drop_database_tables`](crate::tracker::databases::Database::drop_database_tables). + /// Refer to [`databases::Database::drop_database_tables`](crate::core::databases::Database::drop_database_tables). fn drop_database_tables(&self) -> Result<(), Error> { let drop_whitelist_table = " DROP TABLE whitelist;" @@ -89,7 +89,7 @@ impl Database for Sqlite { Ok(()) } - /// Refer to [`databases::Database::load_persistent_torrents`](crate::tracker::databases::Database::load_persistent_torrents). + /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). async fn load_persistent_torrents(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -110,7 +110,7 @@ impl Database for Sqlite { Ok(torrents) } - /// Refer to [`databases::Database::load_keys`](crate::tracker::databases::Database::load_keys). + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). async fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -131,7 +131,7 @@ impl Database for Sqlite { Ok(keys) } - /// Refer to [`databases::Database::load_whitelist`](crate::tracker::databases::Database::load_whitelist). + /// Refer to [`databases::Database::load_whitelist`](crate::core::databases::Database::load_whitelist). async fn load_whitelist(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -148,7 +148,7 @@ impl Database for Sqlite { Ok(info_hashes) } - /// Refer to [`databases::Database::save_persistent_torrent`](crate::tracker::databases::Database::save_persistent_torrent). + /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -167,7 +167,7 @@ impl Database for Sqlite { } } - /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::tracker::databases::Database::get_info_hash_from_whitelist). + /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -180,7 +180,7 @@ impl Database for Sqlite { Ok(query.map(|f| InfoHash::from_str(&f.get_unwrap::<_, String>(0)).unwrap())) } - /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::tracker::databases::Database::add_info_hash_to_whitelist). + /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::core::databases::Database::add_info_hash_to_whitelist). async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -196,7 +196,7 @@ impl Database for Sqlite { } } - /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::tracker::databases::Database::remove_info_hash_from_whitelist). + /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::core::databases::Database::remove_info_hash_from_whitelist). async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -214,7 +214,7 @@ impl Database for Sqlite { } } - /// Refer to [`databases::Database::get_key_from_keys`](crate::tracker::databases::Database::get_key_from_keys). + /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -234,7 +234,7 @@ impl Database for Sqlite { })) } - /// Refer to [`databases::Database::add_key_to_keys`](crate::tracker::databases::Database::add_key_to_keys). + /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -253,7 +253,7 @@ impl Database for Sqlite { } } - /// Refer to [`databases::Database::remove_key_from_keys`](crate::tracker::databases::Database::remove_key_from_keys). + /// Refer to [`databases::Database::remove_key_from_keys`](crate::core::databases::Database::remove_key_from_keys). async fn remove_key_from_keys(&self, key: &Key) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/src/tracker/error.rs b/src/core/error.rs similarity index 100% rename from src/tracker/error.rs rename to src/core/error.rs diff --git a/src/tracker/mod.rs b/src/core/mod.rs similarity index 97% rename from src/tracker/mod.rs rename to src/core/mod.rs index 94d75a8cd..ed6ba6a8d 100644 --- a/src/tracker/mod.rs +++ b/src/core/mod.rs @@ -55,7 +55,7 @@ //! Once you have instantiated the `Tracker` you can `announce` a new [`Peer`] with: //! //! ```rust,no_run -//! use torrust_tracker::tracker::peer; +//! use torrust_tracker::core::peer; //! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; //! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; //! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; @@ -97,7 +97,7 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::tracker::peer::Peer; +//! use torrust_tracker::core::peer::Peer; //! //! pub struct AnnounceData { //! pub peers: Vec, @@ -251,7 +251,7 @@ //! A `Peer` is the struct used by the `Tracker` to keep peers data: //! //! ```rust,no_run -//! use torrust_tracker::tracker::peer::Id; +//! use torrust_tracker::core::peer::Id; //! use std::net::SocketAddr; //! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; //! use aquatic_udp_protocol::NumberOfBytes; @@ -364,7 +364,7 @@ //! To learn more about tracker authentication, refer to the following modules : //! //! - [`auth`] module. -//! - [`tracker`](crate::tracker) module. +//! - [`core`](crate::core) module. //! - [`http`](crate::servers::http) module. //! //! # Statistics @@ -455,8 +455,8 @@ use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; +use crate::core::databases::Database; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::databases::Database; /// The domain layer tracker service. /// @@ -470,8 +470,8 @@ use crate::tracker::databases::Database; pub struct Tracker { /// `Tracker` configuration. See [`torrust-tracker-configuration`](torrust_tracker_configuration) pub config: Arc, - /// A database driver implementation: [`Sqlite3`](crate::tracker::databases::sqlite) - /// or [`MySQL`](crate::tracker::databases::mysql) + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) pub database: Box, mode: TrackerMode, keys: RwLock>, @@ -1110,11 +1110,11 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; + use crate::core::peer::{self, Peer}; + use crate::core::services::tracker_factory; + use crate::core::{TorrentsMetrics, Tracker}; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::peer::{self, Peer}; - use crate::tracker::services::tracker_factory; - use crate::tracker::{TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { tracker_factory(configuration::ephemeral_mode_public().into()) @@ -1288,7 +1288,7 @@ mod tests { mod handling_an_announce_request { - use crate::tracker::tests::the_tracker::{ + use crate::core::tests::the_tracker::{ peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, }; @@ -1296,7 +1296,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; - use crate::tracker::assign_ip_address_to_peer; + use crate::core::assign_ip_address_to_peer; #[test] fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { @@ -1312,7 +1312,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; - use crate::tracker::assign_ip_address_to_peer; + use crate::core::assign_ip_address_to_peer; #[test] fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { @@ -1353,7 +1353,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; - use crate::tracker::assign_ip_address_to_peer; + use crate::core::assign_ip_address_to_peer; #[test] fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { @@ -1418,7 +1418,7 @@ mod tests { mod it_should_update_the_swarm_stats_for_the_torrent { - use crate::tracker::tests::the_tracker::{ + use crate::core::tests::the_tracker::{ completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, }; @@ -1464,9 +1464,9 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; + use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; + use crate::core::{ScrapeData, SwarmMetadata}; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; - use crate::tracker::{ScrapeData, SwarmMetadata}; #[tokio::test] async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( @@ -1542,7 +1542,7 @@ mod tests { mod configured_as_whitelisted { mod handling_authorization { - use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { @@ -1569,7 +1569,7 @@ mod tests { } mod handling_the_torrent_whitelist { - use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { @@ -1596,7 +1596,7 @@ mod tests { } mod persistence { - use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { @@ -1621,12 +1621,12 @@ mod tests { mod handling_an_scrape_request { - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::tests::the_tracker::{ + use crate::core::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; - use crate::tracker::torrent::SwarmMetadata; - use crate::tracker::ScrapeData; + use crate::core::torrent::SwarmMetadata; + use crate::core::ScrapeData; + use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { @@ -1670,8 +1670,8 @@ mod tests { use std::str::FromStr; use std::time::Duration; - use crate::tracker::auth; - use crate::tracker::tests::the_tracker::private_tracker; + use crate::core::auth; + use crate::core::tests::the_tracker::private_tracker; #[tokio::test] async fn it_should_generate_the_expiring_authentication_keys() { @@ -1767,7 +1767,7 @@ mod tests { mod handling_torrent_persistence { use aquatic_udp_protocol::AnnounceEvent; - use crate::tracker::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { diff --git a/src/tracker/peer.rs b/src/core/peer.rs similarity index 98% rename from src/tracker/peer.rs rename to src/core/peer.rs index 4027799a9..a64f87b66 100644 --- a/src/tracker/peer.rs +++ b/src/core/peer.rs @@ -3,7 +3,7 @@ //! A sample peer: //! //! ```rust,no_run -//! use torrust_tracker::tracker::peer; +//! use torrust_tracker::core::peer; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; @@ -46,7 +46,7 @@ pub enum IPVersion { /// A sample peer: /// /// ```rust,no_run -/// use torrust_tracker::tracker::peer; +/// use torrust_tracker::core::peer; /// use std::net::SocketAddr; /// use std::net::IpAddr; /// use std::net::Ipv4Addr; @@ -118,7 +118,7 @@ impl Peer { /// A sample peer ID: /// /// ```rust,no_run -/// use torrust_tracker::tracker::peer; +/// use torrust_tracker::core::peer; /// /// let peer_id = peer::Id(*b"-qB00000000000000000"); /// ``` @@ -281,7 +281,7 @@ impl Serialize for Id { mod test { mod torrent_peer_id { - use crate::tracker::peer; + use crate::core::peer; #[test] fn should_be_instantiated_from_a_byte_slice() { @@ -398,8 +398,8 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde_json::Value; + use crate::core::peer::{self, Peer}; use crate::shared::clock::{Current, Time}; - use crate::tracker::peer::{self, Peer}; #[test] fn it_should_be_serializable() { diff --git a/src/tracker/services/mod.rs b/src/core/services/mod.rs similarity index 71% rename from src/tracker/services/mod.rs rename to src/core/services/mod.rs index deb07a439..f5868fc26 100644 --- a/src/tracker/services/mod.rs +++ b/src/core/services/mod.rs @@ -2,8 +2,8 @@ //! //! There are two types of service: //! -//! - [Core tracker services](crate::tracker::services::torrent): related to the tracker main functionalities like getting info about torrents. -//! - [Services for statistics](crate::tracker::services::statistics): related to tracker metrics. Aggregate data about the tracker server. +//! - [Core tracker services](crate::core::services::torrent): related to the tracker main functionalities like getting info about torrents. +//! - [Services for statistics](crate::core::services::statistics): related to tracker metrics. Aggregate data about the tracker server. pub mod statistics; pub mod torrent; @@ -11,7 +11,7 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It returns a new tracker building its dependencies. /// diff --git a/src/tracker/services/statistics/mod.rs b/src/core/services/statistics/mod.rs similarity index 81% rename from src/tracker/services/statistics/mod.rs rename to src/core/services/statistics/mod.rs index 3ef8b52bb..f74df62e5 100644 --- a/src/tracker/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -2,15 +2,15 @@ //! //! It includes: //! -//! - A [`factory`](crate::tracker::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the [`tracker metrics`](crate::tracker::statistics::Metrics). +//! - A [`factory`](crate::core::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the [`tracker metrics`](crate::core::statistics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! //! The factory function builds two structs: //! -//! - An statistics [`EventSender`](crate::tracker::statistics::EventSender) -//! - An statistics [`Repo`](crate::tracker::statistics::Repo) +//! - An statistics [`EventSender`](crate::core::statistics::EventSender) +//! - An statistics [`Repo`](crate::core::statistics::Repo) //! //! ```text //! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); @@ -21,7 +21,7 @@ //! There is an event listener that is receiving all the events and processing them with an event handler. //! Then, the event handler updates the metrics depending on the received event. //! -//! For example, if you send the event [`Event::Udp4Connect`](crate::tracker::statistics::Event::Udp4Connect): +//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::Event::Udp4Connect): //! //! ```text //! let result = event_sender.send_event(Event::Udp4Connect).await; @@ -40,8 +40,8 @@ pub mod setup; use std::sync::Arc; -use crate::tracker::statistics::Metrics; -use crate::tracker::{TorrentsMetrics, Tracker}; +use crate::core::statistics::Metrics; +use crate::core::{TorrentsMetrics, Tracker}; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -88,9 +88,9 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::tracker; - use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; - use crate::tracker::services::tracker_factory; + use crate::core; + use crate::core::services::statistics::{get_metrics, TrackerMetrics}; + use crate::core::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) @@ -105,8 +105,8 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: tracker::TorrentsMetrics::default(), - protocol_metrics: tracker::statistics::Metrics::default(), + torrents_metrics: core::TorrentsMetrics::default(), + protocol_metrics: core::statistics::Metrics::default(), } ); } diff --git a/src/tracker/services/statistics/setup.rs b/src/core/services/statistics/setup.rs similarity index 83% rename from src/tracker/services/statistics/setup.rs rename to src/core/services/statistics/setup.rs index 4bf5a827a..37603852b 100644 --- a/src/tracker/services/statistics/setup.rs +++ b/src/core/services/statistics/setup.rs @@ -1,14 +1,14 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::tracker::statistics; +use crate::core::statistics; /// It builds the structs needed for handling the tracker metrics. /// /// It returns: /// -/// - An statistics [`EventSender`](crate::tracker::statistics::EventSender) that allows you to send events related to statistics. -/// - An statistics [`Repo`](crate::tracker::statistics::Repo) which is an in-memory repository for the tracker metrics. +/// - An statistics [`EventSender`](crate::core::statistics::EventSender) that allows you to send events related to statistics. +/// - An statistics [`Repo`](crate::core::statistics::Repo) which is an in-memory repository for the tracker metrics. /// /// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics /// events are sent are received but not dispatched to the handler. diff --git a/src/tracker/services/torrent.rs b/src/core/services/torrent.rs similarity index 95% rename from src/tracker/services/torrent.rs rename to src/core/services/torrent.rs index 934fa6b77..651f40cab 100644 --- a/src/tracker/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -8,9 +8,9 @@ use std::sync::Arc; use serde::Deserialize; +use crate::core::peer::Peer; +use crate::core::Tracker; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::peer::Peer; -use crate::tracker::Tracker; /// It contains all the information the tracker has about a torrent #[derive(Debug, PartialEq)] @@ -141,8 +141,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use crate::core::peer; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::peer; fn sample_peer() -> peer::Peer { peer::Peer { @@ -164,10 +164,10 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; + use crate::core::services::torrent::tests::sample_peer; + use crate::core::services::torrent::{get_torrent_info, Info}; + use crate::core::services::tracker_factory; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::torrent::tests::sample_peer; - use crate::tracker::services::torrent::{get_torrent_info, Info}; - use crate::tracker::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) @@ -219,10 +219,10 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; + use crate::core::services::torrent::tests::sample_peer; + use crate::core::services::torrent::{get_torrents, BasicInfo, Pagination}; + use crate::core::services::tracker_factory; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::torrent::tests::sample_peer; - use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; - use crate::tracker::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) diff --git a/src/tracker/statistics.rs b/src/core/statistics.rs similarity index 95% rename from src/tracker/statistics.rs rename to src/core/statistics.rs index 85cc4f255..f38662cdd 100644 --- a/src/tracker/statistics.rs +++ b/src/core/statistics.rs @@ -13,10 +13,10 @@ //! //! The data is collected by using an `event-sender -> event listener` model. //! -//! The tracker uses an [`statistics::EventSender`](crate::tracker::statistics::EventSender) instance to send an event. -//! The [`statistics::Keeper`](crate::tracker::statistics::Keeper) listens to new events and uses the [`statistics::Repo`](crate::tracker::statistics::Repo) to upgrade and store metrics. +//! The tracker uses an [`statistics::EventSender`](crate::core::statistics::EventSender) instance to send an event. +//! The [`statistics::Keeper`](crate::core::statistics::Keeper) listens to new events and uses the [`statistics::Repo`](crate::core::statistics::Repo) to upgrade and store metrics. //! -//! See the [`statistics::Event`](crate::tracker::statistics::Event) enum to check which events are available. +//! See the [`statistics::Event`](crate::core::statistics::Event) enum to check which events are available. use std::sync::Arc; use async_trait::async_trait; @@ -191,10 +191,10 @@ pub trait EventSender: Sync + Send { async fn send_event(&self, event: Event) -> Option>>; } -/// An [`statistics::EventSender`](crate::tracker::statistics::EventSender) implementation. +/// An [`statistics::EventSender`](crate::core::statistics::EventSender) implementation. /// /// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::tracker::statistics::Keeper) +/// [`statistics::Keeper`](crate::core::statistics::Keeper) pub struct Sender { sender: mpsc::Sender, } @@ -307,7 +307,7 @@ impl Repo { mod tests { mod stats_tracker { - use crate::tracker::statistics::{Event, Keeper, Metrics}; + use crate::core::statistics::{Event, Keeper, Metrics}; #[tokio::test] async fn should_contain_the_tracker_statistics() { @@ -331,7 +331,7 @@ mod tests { } mod event_handler { - use crate::tracker::statistics::{event_handler, Event, Repo}; + use crate::core::statistics::{event_handler, Event, Repo}; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { diff --git a/src/tracker/torrent.rs b/src/core/torrent.rs similarity index 99% rename from src/tracker/torrent.rs rename to src/core/torrent.rs index de520aeb1..8167aa2db 100644 --- a/src/tracker/torrent.rs +++ b/src/core/torrent.rs @@ -191,9 +191,9 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use crate::core::peer; + use crate::core::torrent::Entry; use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; - use crate::tracker::peer; - use crate::tracker::torrent::Entry; struct TorrentPeerBuilder { peer: peer::Peer, diff --git a/src/lib.rs b/src/lib.rs index 8d453f177..c5f775646 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -387,7 +387,7 @@ //! //! Torrust Tracker has four main components: //! -//! - The core [`tracker`] +//! - The core tracker [`core`] //! - The tracker REST [`API`](crate::servers::apis) //! - The [`UDP`](crate::servers::udp) tracker //! - The [`HTTP`](crate::servers::http) tracker @@ -405,7 +405,7 @@ //! - Statistics //! - Persistence //! -//! See [`tracker`] for more details on the [`tracker`] module. +//! See [`core`] for more details on the [`core`] module. //! //! ## Tracker API //! @@ -471,9 +471,9 @@ //! examples on the integration and unit tests. pub mod app; pub mod bootstrap; +pub mod core; pub mod servers; pub mod shared; -pub mod tracker; #[macro_use] extern crate lazy_static; diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 0740e1f6a..49f263db3 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -13,7 +13,7 @@ use tower_http::compression::CompressionLayer; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; -use crate::tracker::Tracker; +use crate::core::Tracker; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 778a17d90..58b60638e 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -34,8 +34,8 @@ use futures::Future; use log::info; use super::routes::router; +use crate::core::Tracker; use crate::servers::signals::shutdown_signal; -use crate::tracker::Tracker; /// Errors that can occur when starting or stopping the API server. #[derive(Debug)] @@ -281,9 +281,9 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; + use crate::core; + use crate::core::statistics; use crate::servers::apis::server::ApiServer; - use crate::tracker; - use crate::tracker::statistics; fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) @@ -293,7 +293,7 @@ mod tests { async fn it_should_be_able_to_start_from_stopped_state_and_then_stop_again() { let cfg = tracker_configuration(); - let tracker = Arc::new(tracker::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); + let tracker = Arc::new(core::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); let stopped_api_server = ApiServer::new(cfg.http_api.clone()); diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index d6a2992fb..a6c8bf812 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -10,10 +10,10 @@ use serde::Deserialize; use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, }; +use crate::core::auth::Key; +use crate::core::Tracker; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; -use crate::tracker::auth::Key; -use crate::tracker::Tracker; /// It handles the request to generate a new authentication key. /// diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index 5099fad8b..f4c7f34ca 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -3,8 +3,8 @@ use std::convert::From; use serde::{Deserialize, Serialize}; +use crate::core::auth::{self, Key}; use crate::shared::clock::convert_from_iso_8601_to_timestamp; -use crate::tracker::auth::{self, Key}; /// A resource that represents an authentication key. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -43,8 +43,8 @@ mod tests { use std::time::Duration; use super::AuthKey; + use crate::core::auth::{self, Key}; use crate::shared::clock::{Current, TimeNow}; - use crate::tracker::auth::{self, Key}; struct TestTime { pub timestamp: u64, diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index 76c634e21..003ee5af4 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -12,7 +12,7 @@ use axum::routing::{get, post}; use axum::Router; use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index bb531c806..c3be5dc7a 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -7,8 +7,8 @@ use axum::response::Json; use super::resources::Stats; use super::responses::stats_response; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::Tracker; +use crate::core::services::statistics::get_metrics; +use crate::core::Tracker; /// It handles the request to get the tracker statistics. /// diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 355a1e448..b241c469c 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -2,7 +2,7 @@ //! API context. use serde::{Deserialize, Serialize}; -use crate::tracker::services::statistics::TrackerMetrics; +use crate::core::services::statistics::TrackerMetrics; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -72,9 +72,9 @@ impl From for Stats { #[cfg(test)] mod tests { use super::Stats; - use crate::tracker::services::statistics::TrackerMetrics; - use crate::tracker::statistics::Metrics; - use crate::tracker::TorrentsMetrics; + use crate::core::services::statistics::TrackerMetrics; + use crate::core::statistics::Metrics; + use crate::core::TorrentsMetrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index e8e7cb84d..9d03ccedf 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -3,7 +3,7 @@ use axum::response::Json; use super::resources::Stats; -use crate::tracker::services::statistics::TrackerMetrics; +use crate::core::services::statistics::TrackerMetrics; /// `200` response that contains the [`Stats`] resource as json. pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index 9198562dd..d8d552697 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -9,7 +9,7 @@ use axum::routing::get; use axum::Router; use super::handlers::get_stats_handler; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 1f38ab474..101a25c8d 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -11,11 +11,11 @@ use serde::{de, Deserialize, Deserializer}; use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; +use crate::core::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::core::Tracker; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; -use crate::tracker::Tracker; /// It handles the request to get the torrent data. /// diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index e0cab853e..752694393 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -1,7 +1,7 @@ //! `Peer` and Peer `Id` API resources. use serde::{Deserialize, Serialize}; -use crate::tracker; +use crate::core; /// `Peer` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -35,8 +35,8 @@ pub struct Id { pub client: Option, } -impl From for Id { - fn from(peer_id: tracker::peer::Id) -> Self { +impl From for Id { + fn from(peer_id: core::peer::Id) -> Self { Id { id: peer_id.to_hex_string(), client: peer_id.get_client_name(), @@ -44,9 +44,9 @@ impl From for Id { } } -impl From for Peer { +impl From for Peer { #[allow(deprecated)] - fn from(peer: tracker::peer::Peer) -> Self { + fn from(peer: core::peer::Peer) -> Self { Peer { peer_id: Id::from(peer.peer_id), peer_addr: peer.peer_addr.to_string(), diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index ebebda79c..74577a23e 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use super::peer; -use crate::tracker::services::torrent::{BasicInfo, Info}; +use crate::core::services::torrent::{BasicInfo, Info}; /// `Torrent` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -103,12 +103,12 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use super::Torrent; + use crate::core::peer; + use crate::core::services::torrent::{BasicInfo, Info}; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::peer; - use crate::tracker::services::torrent::{BasicInfo, Info}; fn sample_peer() -> peer::Peer { peer::Peer { diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs index 99c2fcae3..5daceaf94 100644 --- a/src/servers/apis/v1/context/torrent/responses.rs +++ b/src/servers/apis/v1/context/torrent/responses.rs @@ -4,7 +4,7 @@ use axum::response::{IntoResponse, Json, Response}; use serde_json::json; use super::resources::torrent::{ListItem, Torrent}; -use crate::tracker::services::torrent::{BasicInfo, Info}; +use crate::core::services::torrent::{BasicInfo, Info}; /// `200` response that contains an array of /// [`ListItem`] diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs index 18295f2a2..6f8c28df5 100644 --- a/src/servers/apis/v1/context/torrent/routes.rs +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -10,7 +10,7 @@ use axum::routing::get; use axum::Router; use super::handlers::{get_torrent_handler, get_torrents_handler}; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It adds the routes to the router for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index bd1da735e..fc32f667b 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -9,10 +9,10 @@ use axum::response::Response; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; +use crate::core::Tracker; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::Tracker; /// It handles the request to add a torrent to the whitelist. /// diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs index 65d511341..e4e85181f 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -11,7 +11,7 @@ use axum::routing::{delete, get, post}; use axum::Router; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 74778ca14..48b795573 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use axum::Router; use super::context::{auth_key, stats, torrent, whitelist}; -use crate::tracker::Tracker; +use crate::core::Tracker; /// Add the routes for the v1 API. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index e6dd808b6..10666d8a5 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -206,15 +206,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](crate::core::torrent::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](crate::core::torrent::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::tracker::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::tracker::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::tracker::torrent::SwarmMetadata::incomplete) +//! - [complete](crate::core::torrent::SwarmMetadata::complete) +//! - [downloaded](crate::core::torrent::SwarmMetadata::downloaded) +//! - [incomplete](crate::core::torrent::SwarmMetadata::incomplete) //! //! **Query parameters** //! @@ -266,7 +266,7 @@ //! Where the `files` key contains a dictionary of dictionaries. The first //! dictionary key is the `info_hash` of the torrent (`iiiiiiiiiiiiiiiiiiii` in //! the example). The second level dictionary contains the -//! [swarm metadata](crate::tracker::torrent::SwarmMetadata) for that torrent. +//! [swarm metadata](crate::core::torrent::SwarmMetadata) for that torrent. //! //! If you save the response as a file and you open it with a program that //! can handle binary data you would see: diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index b674f0475..472b1e724 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -15,8 +15,8 @@ //! - //! - //! - +use crate::core::peer::{self, IdConversionError}; use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; -use crate::tracker::peer::{self, IdConversionError}; /// Percent decodes a percent encoded infohash. Internally an /// [`InfoHash`] is a 20-byte array. @@ -28,7 +28,7 @@ use crate::tracker::peer::{self, IdConversionError}; /// use std::str::FromStr; /// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; /// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::tracker::peer; +/// use torrust_tracker::core::peer; /// /// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; /// @@ -49,7 +49,7 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result Result Result Tracker { tracker_factory(configuration::ephemeral_mode_private().into()) @@ -215,9 +215,9 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; + use crate::core::auth; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; - use crate::tracker::auth; #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs index 720ed7659..f9a7796a4 100644 --- a/src/servers/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -5,8 +5,8 @@ use std::panic::Location; use thiserror::Error; +use crate::core::auth; use crate::servers::http::v1::responses; -use crate::tracker::auth; /// Authentication error. /// diff --git a/src/servers/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs index d7fd05838..7b3a1e7c3 100644 --- a/src/servers/http/v1/handlers/mod.rs +++ b/src/servers/http/v1/handlers/mod.rs @@ -3,7 +3,7 @@ //! Refer to the generic [HTTP server documentation](crate::servers::http) for //! more information about the HTTP tracker. use super::responses; -use crate::tracker::error::Error; +use crate::core::error::Error; pub mod announce; pub mod common; diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 58b8aa84c..298d47383 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -11,14 +11,14 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use crate::core::auth::Key; +use crate::core::{ScrapeData, Tracker}; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; use crate::servers::http::v1::requests::scrape::Scrape; use crate::servers::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; use crate::servers::http::v1::{responses, services}; -use crate::tracker::auth::Key; -use crate::tracker::{ScrapeData, Tracker}; /// It handles the `scrape` request when the HTTP tracker is configured /// to run in `public` mode. @@ -113,12 +113,12 @@ mod tests { use torrust_tracker_test_helpers::configuration; + use crate::core::services::tracker_factory; + use crate::core::Tracker; use crate::servers::http::v1::requests::scrape::Scrape; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::tracker_factory; - use crate::tracker::Tracker; fn private_tracker() -> Tracker { tracker_factory(configuration::ephemeral_mode_private().into()) @@ -161,8 +161,8 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; + use crate::core::{auth, ScrapeData}; use crate::servers::http::v1::handlers::scrape::handle_scrape; - use crate::tracker::{auth, ScrapeData}; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { @@ -203,8 +203,8 @@ mod tests { use std::sync::Arc; use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; + use crate::core::ScrapeData; use crate::servers::http::v1::handlers::scrape::handle_scrape; - use crate::tracker::ScrapeData; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { diff --git a/src/servers/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs index b5faf8d46..1ae09a5f8 100644 --- a/src/servers/http/v1/launcher.rs +++ b/src/servers/http/v1/launcher.rs @@ -11,8 +11,8 @@ use futures::future::BoxFuture; use log::info; use super::routes::router; +use crate::core::Tracker; use crate::servers::http::server::HttpServerLauncher; -use crate::tracker::Tracker; #[derive(Debug)] pub enum Error { diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index c330ca3bd..7f77f727d 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -8,11 +8,11 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use crate::core::peer::{self, IdConversionError}; use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::servers::http::v1::query::{ParseQueryError, Query}; use crate::servers::http::v1::responses; use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; -use crate::tracker::peer::{self, IdConversionError}; /// The number of bytes `downloaded`, `uploaded` or `left`. It's used in the /// `Announce` request for parameters that represent a number of bytes. @@ -34,7 +34,7 @@ const COMPACT: &str = "compact"; /// ```rust /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; /// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::tracker::peer; +/// use torrust_tracker::core::peer; /// /// let request = Announce { /// // Mandatory params @@ -355,12 +355,12 @@ mod tests { mod announce_request { + use crate::core::peer; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::peer; #[test] fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index f45f4c824..8a245476b 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -11,8 +11,8 @@ use serde::{self, Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use crate::core::{self, AnnounceData}; use crate::servers::http::v1::responses; -use crate::tracker::{self, AnnounceData}; /// Normal (non compact) `announce` response. /// @@ -125,8 +125,8 @@ impl Peer { } } -impl From for Peer { - fn from(peer: tracker::peer::Peer) -> Self { +impl From for Peer { + fn from(peer: core::peer::Peer) -> Self { Peer { peer_id: peer.peer_id.to_bytes(), ip: peer.peer_addr.ip(), @@ -312,8 +312,8 @@ impl CompactPeer { } } -impl From for CompactPeer { - fn from(peer: tracker::peer::Peer) -> Self { +impl From for CompactPeer { + fn from(peer: core::peer::Peer) -> Self { CompactPeer { ip: peer.peer_addr.ip(), port: peer.peer_addr.port(), diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index 9cd88b9ab..e16827824 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -7,15 +7,15 @@ use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use torrust_tracker_contrib_bencode::{ben_int, ben_map, BMutAccess}; -use crate::tracker::ScrapeData; +use crate::core::ScrapeData; /// The `Scrape` response for the HTTP tracker. /// /// ```rust /// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; /// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::tracker::torrent::SwarmMetadata; -/// use torrust_tracker::tracker::ScrapeData; +/// use torrust_tracker::core::torrent::SwarmMetadata; +/// use torrust_tracker::core::ScrapeData; /// /// let info_hash = InfoHash([0x69; 20]); /// let mut scrape_data = ScrapeData::empty(); @@ -92,10 +92,10 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { + use crate::core::torrent::SwarmMetadata; + use crate::core::ScrapeData; use crate::servers::http::v1::responses::scrape::Bencoded; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::torrent::SwarmMetadata; - use crate::tracker::ScrapeData; fn sample_scrape_data() -> ScrapeData { let info_hash = InfoHash([0x69; 20]); diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 0b6b419c1..20e96d7fd 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -7,7 +7,7 @@ use axum_client_ip::SecureClientIpSource; use tower_http::compression::CompressionLayer; use super::handlers::{announce, health_check, scrape}; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It adds the routes to the router. /// diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index ddb3b1221..bdf8afc87 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -2,7 +2,7 @@ //! //! The service is responsible for handling the `announce` requests. //! -//! It delegates the `announce` logic to the [`Tracker`](crate::tracker::Tracker::announce) +//! It delegates the `announce` logic to the [`Tracker`](crate::core::Tracker::announce) //! and it returns the [`AnnounceData`] returned //! by the [`Tracker`]. //! @@ -11,9 +11,9 @@ use std::net::IpAddr; use std::sync::Arc; +use crate::core::peer::Peer; +use crate::core::{statistics, AnnounceData, Tracker}; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::peer::Peer; -use crate::tracker::{statistics, AnnounceData, Tracker}; /// The HTTP tracker `announce` service. /// @@ -50,10 +50,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; + use crate::core::services::tracker_factory; + use crate::core::{peer, Tracker}; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::services::tracker_factory; - use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { tracker_factory(configuration::ephemeral_mode_public().into()) @@ -97,11 +97,11 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::core::peer::Peer; + use crate::core::torrent::SwarmStats; + use crate::core::{statistics, AnnounceData, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; - use crate::tracker::peer::Peer; - use crate::tracker::torrent::SwarmStats; - use crate::tracker::{statistics, AnnounceData, Tracker}; #[tokio::test] async fn it_should_return_the_announce_data() { diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index adea28086..c2fa104de 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -2,7 +2,7 @@ //! //! The service is responsible for handling the `scrape` requests. //! -//! It delegates the `scrape` logic to the [`Tracker`](crate::tracker::Tracker::scrape) +//! It delegates the `scrape` logic to the [`Tracker`](crate::core::Tracker::scrape) //! and it returns the [`ScrapeData`] returned //! by the [`Tracker`]. //! @@ -11,8 +11,8 @@ use std::net::IpAddr; use std::sync::Arc; +use crate::core::{statistics, ScrapeData, Tracker}; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::{statistics, ScrapeData, Tracker}; /// The HTTP tracker `scrape` service. /// @@ -63,10 +63,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; + use crate::core::services::tracker_factory; + use crate::core::{peer, Tracker}; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::services::tracker_factory; - use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { tracker_factory(configuration::ephemeral_mode_public().into()) @@ -101,12 +101,12 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; + use crate::core::torrent::SwarmMetadata; + use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ public_tracker, sample_info_hash, sample_info_hashes, sample_peer, }; - use crate::tracker::torrent::SwarmMetadata; - use crate::tracker::{statistics, ScrapeData, Tracker}; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { @@ -193,11 +193,11 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; + use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ public_tracker, sample_info_hash, sample_info_hashes, sample_peer, }; - use crate::tracker::{statistics, ScrapeData, Tracker}; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 64d60e549..1878c30e1 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -10,12 +10,12 @@ use aquatic_udp_protocol::{ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; +use crate::core::{statistics, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::{statistics, Tracker}; /// It handles the incoming UDP packets. /// @@ -283,9 +283,9 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; + use crate::core::services::tracker_factory; + use crate::core::{peer, Tracker}; use crate::shared::clock::{Current, Time}; - use crate::tracker::services::tracker_factory; - use crate::tracker::{peer, Tracker}; fn tracker_configuration() -> Arc { Arc::new(default_testing_tracker_configuration()) @@ -396,10 +396,10 @@ mod tests { use mockall::predicate::eq; use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; - use crate::tracker::{self, statistics}; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -457,9 +457,8 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); - let torrent_tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), - ); + let torrent_tracker = + Arc::new(core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) .await .unwrap(); @@ -475,9 +474,8 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let torrent_tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), - ); + let torrent_tracker = + Arc::new(core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) .await .unwrap(); @@ -567,13 +565,13 @@ mod tests { }; use mockall::predicate::eq; + use crate::core::{self, peer, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; - use crate::tracker::{self, peer, statistics}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -662,7 +660,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -680,7 +678,7 @@ mod tests { .await; } - async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make(&remote_addr))) @@ -717,7 +715,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -734,11 +732,11 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::core::peer; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; - use crate::tracker::peer; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -788,13 +786,13 @@ mod tests { }; use mockall::predicate::eq; + use crate::core::{self, peer, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; - use crate::tracker::{self, peer, statistics}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -888,7 +886,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -905,7 +903,7 @@ mod tests { .await; } - async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; @@ -945,7 +943,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -963,19 +961,19 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::core; + use crate::core::statistics::Keeper; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::TrackerConfigurationBuilder; - use crate::tracker; - use crate::tracker::statistics::Keeper; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(core::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1025,10 +1023,10 @@ mod tests { }; use super::TorrentPeerBuilder; + use crate::core::{self, peer}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; - use crate::tracker::{self, peer}; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1064,7 +1062,7 @@ mod tests { ); } - async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { let peer_id = peer::Id([255u8; 20]); let peer = TorrentPeerBuilder::default() @@ -1088,7 +1086,7 @@ mod tests { } } - async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1237,9 +1235,9 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::core::{self, statistics}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; - use crate::tracker::{self, statistics}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1253,7 +1251,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) @@ -1269,9 +1267,9 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::core::{self, statistics}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; - use crate::tracker::{self, statistics}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { @@ -1285,7 +1283,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index a50fffd37..985c1cec7 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -53,7 +53,7 @@ //! supports only three types of requests: `Connect`, `Announce` and `Scrape`. //! //! Request are parsed from UDP packets using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) -//! crate and then handled by the [`Tracker`](crate::tracker::Tracker) struct. +//! crate and then handled by the [`Tracker`](crate::core::Tracker) struct. //! And then the response is also build using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) //! and converted to a UDP packet. //! @@ -467,15 +467,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](crate::core::torrent::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](crate::core::torrent::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::tracker::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::tracker::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::tracker::torrent::SwarmMetadata::incomplete) +//! - [complete](crate::core::torrent::SwarmMetadata::complete) +//! - [downloaded](crate::core::torrent::SwarmMetadata::downloaded) +//! - [incomplete](crate::core::torrent::SwarmMetadata::incomplete) //! //! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape //! can't be done with this protocol. This is a limitation of the UDP protocol. diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 7c83089bb..5168e2578 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -2,8 +2,8 @@ use std::net::{IpAddr, SocketAddr}; use super::request::AnnounceWrapper; +use crate::core::peer::{Id, Peer}; use crate::shared::clock::{Current, Time}; -use crate::tracker::peer::{Id, Peer}; /// Extracts the [`Peer`] info from the /// announce request. diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 31e87481e..c6b73860b 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -28,10 +28,10 @@ use log::{debug, error, info}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; +use crate::core::Tracker; use crate::servers::signals::shutdown_signal; use crate::servers::udp::handlers::handle_packet; use crate::shared::bit_torrent::udp::MAX_PACKET_SIZE; -use crate::tracker::Tracker; /// Error that can occur when starting or stopping the UDP server. /// diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index fd52e098c..0ce345a3e 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -20,8 +20,8 @@ pub const MAX_SCRAPE_TORRENTS: u8 = 74; /// HTTP tracker authentication key length. /// -/// See function to [`generate`](crate::tracker::auth::generate) the -/// [`ExpiringKeys`](crate::tracker::auth::ExpiringKey) for more information. +/// See function to [`generate`](crate::core::auth::generate) the +/// [`ExpiringKeys`](crate::core::auth::ExpiringKey) for more information. pub const AUTH_KEY_LENGTH: usize = 32; #[repr(u32)] diff --git a/tests/common/app.rs b/tests/common/app.rs index ee3fba064..1b735bc86 100644 --- a/tests/common/app.rs +++ b/tests/common/app.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use torrust_tracker::bootstrap; -use torrust_tracker::tracker::Tracker; +use torrust_tracker::core::Tracker; pub fn setup_with_configuration(configuration: &Arc) -> Arc { bootstrap::app::initialize_with_configuration(configuration) diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 7062c8376..9fd328d5d 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -1,8 +1,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use torrust_tracker::core::peer::{self, Id, Peer}; use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -use torrust_tracker::tracker::peer::{self, Id, Peer}; pub struct PeerBuilder { peer: Peer, diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs index 7022da9b4..155ac0de1 100644 --- a/tests/servers/api/mod.rs +++ b/tests/servers/api/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker::tracker::Tracker; +use torrust_tracker::core::Tracker; pub mod connection_info; pub mod test_environment; diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index dbb23dcfa..0501d9c56 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -1,9 +1,9 @@ use std::sync::Arc; +use torrust_tracker::core::peer::Peer; +use torrust_tracker::core::Tracker; use torrust_tracker::servers::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::Tracker; use super::connection_info::ConnectionInfo; use crate::common::app::setup_with_configuration; diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index a99272e84..4c59b4e95 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use torrust_tracker::tracker::auth::Key; +use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs index 03ed9aee4..288987c55 100644 --- a/tests/servers/http/client.rs +++ b/tests/servers/http/client.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use reqwest::{Client as ReqwestClient, Response}; -use torrust_tracker::tracker::auth::Key; +use torrust_tracker::core::auth::Key; use super::requests::announce::{self, Query}; use super::requests::scrape; diff --git a/tests/servers/http/connection_info.rs b/tests/servers/http/connection_info.rs index 5736271fd..f4081d60e 100644 --- a/tests/servers/http/connection_info.rs +++ b/tests/servers/http/connection_info.rs @@ -1,4 +1,4 @@ -use torrust_tracker::tracker::auth::Key; +use torrust_tracker::core::auth::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index f7f25da3e..2cc615d0f 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -3,8 +3,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; +use torrust_tracker::core::peer::Id; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Id; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs index 8a07ebd5e..a57b41c78 100644 --- a/tests/servers/http/responses/announce.rs +++ b/tests/servers/http/responses/announce.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{self, Deserialize, Serialize}; -use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::core::peer::Peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index 8d0aaba02..e24e1b9a5 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -1,9 +1,9 @@ use std::sync::Arc; +use torrust_tracker::core::peer::Peer; +use torrust_tracker::core::Tracker; use torrust_tracker::servers::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::Tracker; use crate::common::app::setup_with_configuration; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index b19009454..d7f4d50cc 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -93,8 +93,8 @@ mod for_all_config_modes { use local_ip_address::local_ip; use reqwest::Response; use tokio::net::TcpListener; + use torrust_tracker::core::peer; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; @@ -869,8 +869,8 @@ mod for_all_config_modes { use std::str::FromStr; use tokio::net::TcpListener; + use torrust_tracker::core::peer; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; @@ -1147,8 +1147,8 @@ mod configured_as_whitelisted { mod receiving_an_scrape_request { use std::str::FromStr; + use torrust_tracker::core::peer; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; @@ -1244,8 +1244,8 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; + use torrust_tracker::core::auth::Key; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; @@ -1321,9 +1321,9 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; + use torrust_tracker::core::auth::Key; + use torrust_tracker::core::peer; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index 15266d881..dfe19ac86 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -1,10 +1,10 @@ use std::net::SocketAddr; use std::sync::Arc; +use torrust_tracker::core::peer::Peer; +use torrust_tracker::core::Tracker; use torrust_tracker::servers::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::Tracker; use crate::common::app::setup_with_configuration; From 985633e17c0ac341bca72b0fe78c30a64bf97394 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 14 Dec 2023 09:45:03 +0000 Subject: [PATCH 0652/1003] feat!: [#537] move API healthcheck endpoint From: `GET /health_check` To: `GET /api/health_check` To avoid collission with HTTP Tracker health check enpoint when the API and the HTTP Tracker are using the same domain+port. For example: - API: https://tracker.com:443/api/ - HTTP tracker: https://tracker.com:443/ Old health check endpoints: - API: https://tracker.com:443/health_check - HTTP tracker: https://tracker.com:443/health_check New API health check endpoint: - API: https://tracker.com:443/api/health_check - HTTP tracker: https://tracker.com:443/health_check --- src/bin/http_health_check.rs | 2 +- src/servers/apis/routes.rs | 2 +- src/servers/apis/v1/context/health_check/mod.rs | 4 ++-- src/servers/apis/v1/routes.rs | 2 ++ src/servers/health_check_api/handlers.rs | 2 +- tests/servers/api/v1/contract/context/health_check.rs | 2 +- 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/bin/http_health_check.rs b/src/bin/http_health_check.rs index 313f44045..d3f1767cb 100644 --- a/src/bin/http_health_check.rs +++ b/src/bin/http_health_check.rs @@ -11,7 +11,7 @@ async fn main() { let args: Vec = env::args().collect(); if args.len() != 2 { eprintln!("Usage: cargo run --bin http_health_check "); - eprintln!("Example: cargo run --bin http_health_check http://127.0.0.1:1212/health_check"); + eprintln!("Example: cargo run --bin http_health_check http://127.0.0.1:1212/api/health_check"); std::process::exit(1); } diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 49f263db3..fef412f91 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -29,6 +29,6 @@ pub fn router(tracker: Arc) -> Router { tracker.config.clone(), v1::middlewares::auth::auth, )) - .route("/health_check", get(health_check_handler)) + .route(&format!("{api_url_prefix}/health_check"), get(health_check_handler)) .layer(CompressionLayer::new()) } diff --git a/src/servers/apis/v1/context/health_check/mod.rs b/src/servers/apis/v1/context/health_check/mod.rs index c62c5e97b..b73849511 100644 --- a/src/servers/apis/v1/context/health_check/mod.rs +++ b/src/servers/apis/v1/context/health_check/mod.rs @@ -8,14 +8,14 @@ //! //! # Health Check //! -//! `GET /health_check` +//! `GET /api/health_check` //! //! Returns the API status. //! //! **Example request** //! //! ```bash -//! curl "http://127.0.0.1:1212/health_check" +//! curl "http://127.0.0.1:1212/api/health_check" //! ``` //! //! **Example response** `200` diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 48b795573..3786b3532 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -9,8 +9,10 @@ use crate::core::Tracker; /// Add the routes for the v1 API. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { let v1_prefix = format!("{prefix}/v1"); + let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); let router = stats::routes::add(&v1_prefix, router, tracker.clone()); let router = whitelist::routes::add(&v1_prefix, router, tracker.clone()); + torrent::routes::add(&v1_prefix, router, tracker) } diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index 109b89bb4..2f47c8607 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -45,7 +45,7 @@ async fn api_health_check(config: &HttpApi) -> Option> { let addr: SocketAddr = config.bind_address.parse().expect("invalid socket address for API"); if addr.port() != UNKNOWN_PORT { - let health_check_url = format!("http://{addr}/health_check"); + let health_check_url = format!("http://{addr}/api/health_check"); if !get_req_is_ok(&health_check_url).await { return Some(responses::error(format!( diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index 3b6c98374..108ae237a 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -8,7 +8,7 @@ use crate::servers::api::v1::client::get; async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { let test_env = running_test_environment(configuration::ephemeral()).await; - let url = format!("http://{}/health_check", test_env.get_connection_info().bind_address); + let url = format!("http://{}/api/health_check", test_env.get_connection_info().bind_address); let response = get(&url, None).await; From 6087e4fa1e16e9a00a61aee470897335812b387c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 16 Dec 2023 17:21:11 +0000 Subject: [PATCH 0653/1003] feat: added benchmarking binary for torrent repository --- Cargo.lock | 97 ++++++ Cargo.toml | 6 +- .../torrent-repository-benchmarks/Cargo.toml | 21 ++ .../torrent-repository-benchmarks/src/args.rs | 15 + .../src/benches/asyn.rs | 176 ++++++++++ .../src/benches/mod.rs | 3 + .../src/benches/sync.rs | 166 ++++++++++ .../src/benches/utils.rs | 73 +++++ .../torrent-repository-benchmarks/src/lib.rs | 2 + .../torrent-repository-benchmarks/src/main.rs | 139 ++++++++ src/core/mod.rs | 133 ++++---- src/core/services/torrent.rs | 33 +- src/core/{torrent.rs => torrent/mod.rs} | 2 + src/core/torrent/repository.rs | 301 ++++++++++++++++++ tests/wrk_benchmark_announce.lua | 64 ++-- 15 files changed, 1110 insertions(+), 121 deletions(-) create mode 100644 packages/torrent-repository-benchmarks/Cargo.toml create mode 100644 packages/torrent-repository-benchmarks/src/args.rs create mode 100644 packages/torrent-repository-benchmarks/src/benches/asyn.rs create mode 100644 packages/torrent-repository-benchmarks/src/benches/mod.rs create mode 100644 packages/torrent-repository-benchmarks/src/benches/sync.rs create mode 100644 packages/torrent-repository-benchmarks/src/benches/utils.rs create mode 100644 packages/torrent-repository-benchmarks/src/lib.rs create mode 100644 packages/torrent-repository-benchmarks/src/main.rs rename src/core/{torrent.rs => torrent/mod.rs} (99%) create mode 100644 src/core/torrent/repository.rs diff --git a/Cargo.lock b/Cargo.lock index 30a961b14..4e3bb2f62 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,12 +91,54 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" +[[package]] +name = "anstream" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + [[package]] name = "anstyle" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +[[package]] +name = "anstyle-parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + [[package]] name = "aquatic_udp_protocol" version = "0.8.0" @@ -160,6 +202,7 @@ checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", + "axum-macros", "bitflags 1.3.2", "bytes", "futures-util", @@ -212,6 +255,18 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum-macros" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdca6a10ecad987bda04e95606ef85a5417dcaac1a78455242d72e031e2b6b62" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "axum-server" version = "0.5.1" @@ -515,6 +570,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fffed7514f420abec6d183b1d3acfd9099c79c3a10a06ade4f8203f1411272" dependencies = [ "clap_builder", + "clap_derive", ] [[package]] @@ -523,8 +579,22 @@ version = "4.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63361bae7eef3771745f02d8d892bec2fee5f6e34af316ba556e7f97a7069ff1" dependencies = [ + "anstream", "anstyle", "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.39", ] [[package]] @@ -542,6 +612,12 @@ dependencies = [ "cc", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "config" version = "0.13.4" @@ -612,6 +688,7 @@ dependencies = [ "ciborium", "clap", "criterion-plot", + "futures", "is-terminal", "itertools", "num-traits", @@ -624,6 +701,7 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] @@ -3253,6 +3331,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "torrust-torrent-repository-benchmarks" +version = "3.0.0-alpha.12-develop" +dependencies = [ + "aquatic_udp_protocol", + "clap", + "futures", + "tokio", + "torrust-tracker", +] + [[package]] name = "torrust-tracker" version = "3.0.0-alpha.12-develop" @@ -3265,6 +3354,7 @@ dependencies = [ "binascii", "chrono", "config", + "criterion", "derive_more", "fern", "futures", @@ -3274,6 +3364,7 @@ dependencies = [ "log", "mockall", "multimap", + "once_cell", "openssl", "percent-encoding", "r2d2", @@ -3486,6 +3577,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "1.6.1" diff --git a/Cargo.toml b/Cargo.toml index 5f55c6c5c..4f2abd6f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ version = "3.0.0-alpha.12-develop" [dependencies] aquatic_udp_protocol = "0" async-trait = "0" -axum = "0.6" +axum = { version = "0.6", features = ["macros"] } axum-client-ip = "0.4" axum-server = { version = "0", features = ["tls-rustls"] } binascii = "0" @@ -68,8 +68,10 @@ tower-http = { version = "0.4", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } [dev-dependencies] +criterion = { version = "0.5.1", features = ["async_tokio"] } local-ip-address = "0" mockall = "0" +once_cell = "1.18.0" reqwest = { version = "0", features = ["json"] } serde_bytes = "0" serde_repr = "0" @@ -77,7 +79,7 @@ serde_urlencoded = "0" torrust-tracker-test-helpers = { version = "3.0.0-alpha.12-develop", path = "packages/test-helpers" } [workspace] -members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] +members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers", "packages/torrent-repository-benchmarks"] [profile.dev] debug = 1 diff --git a/packages/torrent-repository-benchmarks/Cargo.toml b/packages/torrent-repository-benchmarks/Cargo.toml new file mode 100644 index 000000000..da9aba621 --- /dev/null +++ b/packages/torrent-repository-benchmarks/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "torrust-torrent-repository-benchmarks" +authors.workspace = true +categories.workspace = true +description.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0.8.0" +clap = { version = "4.4.8", features = ["derive"] } +futures = "0.3.29" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker = { path = "../../" } \ No newline at end of file diff --git a/packages/torrent-repository-benchmarks/src/args.rs b/packages/torrent-repository-benchmarks/src/args.rs new file mode 100644 index 000000000..3a38c55a7 --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/args.rs @@ -0,0 +1,15 @@ +use clap::Parser; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +pub struct Args { + /// Amount of benchmark worker threads + #[arg(short, long)] + pub threads: usize, + /// Amount of time in ns a thread will sleep to simulate a client response after handling a task + #[arg(short, long)] + pub sleep: Option, + /// Compare with old implementations of the torrent repository + #[arg(short, long)] + pub compare: Option, +} diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs new file mode 100644 index 000000000..33f9e85fa --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -0,0 +1,176 @@ +use std::sync::Arc; +use std::time::Duration; + +use clap::Parser; +use futures::stream::FuturesUnordered; +use torrust_tracker::core::torrent::repository::TRepositoryAsync; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + +use crate::args::Args; +use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; + +pub async fn async_add_one_torrent(samples: usize) -> (Duration, Duration) { + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(T::new()); + + let info_hash = InfoHash([0; 20]); + + let start_time = std::time::Instant::now(); + + torrent_repository + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn async_update_one_torrent_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: usize, +) -> (Duration, Duration) { + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(T::new()); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + + let start_time = std::time::Instant::now(); + + for _ in 0..10_000 { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn async_add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: usize, +) -> (Duration, Duration) { + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(T::new()); + let info_hashes = generate_unique_info_hashes(10_000); + let handles = FuturesUnordered::new(); + + let start_time = std::time::Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Async update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn async_update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: usize, +) -> (Duration, Duration) { + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(T::new()); + let info_hashes = generate_unique_info_hashes(10_000); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + } + + let start_time = std::time::Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} diff --git a/packages/torrent-repository-benchmarks/src/benches/mod.rs b/packages/torrent-repository-benchmarks/src/benches/mod.rs new file mode 100644 index 000000000..1026aa4bf --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/benches/mod.rs @@ -0,0 +1,3 @@ +pub mod asyn; +pub mod sync; +pub mod utils; diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs new file mode 100644 index 000000000..dac7ab810 --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -0,0 +1,166 @@ +use std::sync::Arc; +use std::time::Duration; + +use clap::Parser; +use futures::stream::FuturesUnordered; +use torrust_tracker::core::torrent::repository::Repository; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + +use crate::args::Args; +use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; + +// Simply add one torrent +#[must_use] +pub fn add_one_torrent(samples: usize) -> (Duration, Duration) { + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(T::new()); + + let info_hash = InfoHash([0; 20]); + + let start_time = std::time::Instant::now(); + + torrent_repository.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: usize, +) -> (Duration, Duration) { + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(T::new()); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + + let start_time = std::time::Instant::now(); + + for _ in 0..10_000 { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: usize, +) -> (Duration, Duration) { + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(T::new()); + let info_hashes = generate_unique_info_hashes(10_000); + let handles = FuturesUnordered::new(); + + let start_time = std::time::Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: usize, +) -> (Duration, Duration) { + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(T::new()); + let info_hashes = generate_unique_info_hashes(10_000); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + } + + let start_time = std::time::Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} diff --git a/packages/torrent-repository-benchmarks/src/benches/utils.rs b/packages/torrent-repository-benchmarks/src/benches/utils.rs new file mode 100644 index 000000000..ef1640038 --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/benches/utils.rs @@ -0,0 +1,73 @@ +use std::collections::HashSet; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::time::Duration; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use torrust_tracker::core::peer::{Id, Peer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker::shared::clock::DurationSinceUnixEpoch; + +pub const DEFAULT_PEER: Peer = Peer { + peer_id: Id([0; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::from_secs(0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, +}; + +#[must_use] +#[allow(clippy::missing_panics_doc)] +pub fn generate_unique_info_hashes(size: usize) -> Vec { + let mut result = HashSet::new(); + + let mut bytes = [0u8; 20]; + + #[allow(clippy::cast_possible_truncation)] + for i in 0..size { + bytes[0] = (i & 0xFF) as u8; + bytes[1] = ((i >> 8) & 0xFF) as u8; + bytes[2] = ((i >> 16) & 0xFF) as u8; + bytes[3] = ((i >> 24) & 0xFF) as u8; + + let info_hash = InfoHash(bytes); + result.insert(info_hash); + } + + assert_eq!(result.len(), size); + + result.into_iter().collect() +} + +#[must_use] +pub fn within_acceptable_range(test: &Duration, norm: &Duration) -> bool { + let test_secs = test.as_secs_f64(); + let norm_secs = norm.as_secs_f64(); + + // Calculate the upper and lower bounds for the 10% tolerance + let tolerance = norm_secs * 0.1; + + // Calculate the upper and lower limits + let upper_limit = norm_secs + tolerance; + let lower_limit = norm_secs - tolerance; + + test_secs < upper_limit && test_secs > lower_limit +} + +#[must_use] +pub fn get_average_and_adjusted_average_from_results(mut results: Vec) -> (Duration, Duration) { + #[allow(clippy::cast_possible_truncation)] + let average = results.iter().sum::() / results.len() as u32; + + results.retain(|result| within_acceptable_range(result, &average)); + + let mut adjusted_average = Duration::from_nanos(0); + + #[allow(clippy::cast_possible_truncation)] + if results.len() > 1 { + adjusted_average = results.iter().sum::() / results.len() as u32; + } + + (average, adjusted_average) +} diff --git a/packages/torrent-repository-benchmarks/src/lib.rs b/packages/torrent-repository-benchmarks/src/lib.rs new file mode 100644 index 000000000..58ebc2057 --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/lib.rs @@ -0,0 +1,2 @@ +pub mod args; +pub mod benches; diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs new file mode 100644 index 000000000..0d9db73ac --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -0,0 +1,139 @@ +use clap::Parser; +use torrust_torrent_repository_benchmarks::args::Args; +use torrust_torrent_repository_benchmarks::benches::asyn::{ + async_add_multiple_torrents_in_parallel, async_add_one_torrent, async_update_multiple_torrents_in_parallel, + async_update_one_torrent_in_parallel, +}; +use torrust_torrent_repository_benchmarks::benches::sync::{ + add_multiple_torrents_in_parallel, add_one_torrent, update_multiple_torrents_in_parallel, update_one_torrent_in_parallel, +}; +use torrust_tracker::core::torrent::repository::{AsyncSync, RepositoryAsync, RepositoryAsyncSingle, Sync, SyncSingle}; + +#[allow(clippy::too_many_lines)] +#[allow(clippy::print_literal)] +fn main() { + let args = Args::parse(); + + // Add 1 to worker_threads since we need a thread that awaits the benchmark + let rt = tokio::runtime::Builder::new_multi_thread() + .worker_threads(args.threads + 1) + .enable_time() + .build() + .unwrap(); + + println!("tokio::sync::RwLock>"); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + rt.block_on(async_add_one_torrent::(1_000_000)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_one_torrent_in_parallel", + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_multiple_torrents_in_parallel", + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_multiple_torrents_in_parallel", + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + ); + + if let Some(true) = args.compare { + println!(); + + println!("std::sync::RwLock>"); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + add_one_torrent::(1_000_000) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_one_torrent_in_parallel", + rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_multiple_torrents_in_parallel", + rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_multiple_torrents_in_parallel", + rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + ); + + println!(); + + println!("std::sync::RwLock>>>"); + println!("{}: Avg/AdjAvg: {:?}", "add_one_torrent", add_one_torrent::(1_000_000)); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_one_torrent_in_parallel", + rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_multiple_torrents_in_parallel", + rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_multiple_torrents_in_parallel", + rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + ); + + println!(); + + println!("tokio::sync::RwLock>>>"); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + rt.block_on(async_add_one_torrent::(1_000_000)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_one_torrent_in_parallel", + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_multiple_torrents_in_parallel", + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_multiple_torrents_in_parallel", + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + ); + + println!(); + + println!("tokio::sync::RwLock>>>"); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + rt.block_on(async_add_one_torrent::(1_000_000)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_one_torrent_in_parallel", + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_multiple_torrents_in_parallel", + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_multiple_torrents_in_parallel", + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + ); + } +} diff --git a/src/core/mod.rs b/src/core/mod.rs index ed6ba6a8d..caac5b1ea 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -12,9 +12,9 @@ //! ```text //! Delivery layer Domain layer //! -//! HTTP tracker | +//! HTTP tracker | //! UDP tracker |> Core tracker -//! Tracker REST API | +//! Tracker REST API | //! ``` //! //! # Table of contents @@ -439,23 +439,23 @@ pub mod services; pub mod statistics; pub mod torrent; -use std::collections::btree_map::Entry; use std::collections::{BTreeMap, HashMap}; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use std::time::Duration; +use futures::future::join_all; use tokio::sync::mpsc::error::SendError; -use tokio::sync::{RwLock, RwLockReadGuard}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; -use self::torrent::{SwarmMetadata, SwarmStats}; +use self::torrent::repository::{RepositoryAsyncSingle, TRepositoryAsync}; use crate::core::databases::Database; +use crate::core::torrent::{SwarmMetadata, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; /// The domain layer tracker service. @@ -472,11 +472,11 @@ pub struct Tracker { pub config: Arc, /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) /// or [`MySQL`](crate::core::databases::mysql) - pub database: Box, + pub database: Arc>, mode: TrackerMode, - keys: RwLock>, - whitelist: RwLock>, - torrents: RwLock>, + keys: tokio::sync::RwLock>, + whitelist: tokio::sync::RwLock>, + pub torrents: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, } @@ -562,16 +562,16 @@ impl Tracker { stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = databases::driver::build(&config.db_driver, &config.db_path)?; + let database = Arc::new(databases::driver::build(&config.db_driver, &config.db_path)?); let mode = config.mode; Ok(Tracker { config, mode, - keys: RwLock::new(std::collections::HashMap::new()), - whitelist: RwLock::new(std::collections::HashSet::new()), - torrents: RwLock::new(std::collections::BTreeMap::new()), + keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), + whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), + torrents: Arc::new(RepositoryAsyncSingle::new()), stats_event_sender, stats_repository, database, @@ -654,7 +654,8 @@ impl Tracker { /// It returns the data for a `scrape` response. async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - let torrents = self.get_torrents().await; + let torrents = self.torrents.get_torrents().await; + match torrents.get(info_hash) { Some(torrent_entry) => torrent_entry.get_swarm_metadata(), None => SwarmMetadata::default(), @@ -672,7 +673,7 @@ impl Tracker { pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; - let mut torrents = self.torrents.write().await; + let mut torrents = self.torrents.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { // Skip if torrent entry already exists @@ -692,7 +693,7 @@ impl Tracker { } async fn get_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { - let read_lock = self.torrents.read().await; + let read_lock = self.torrents.get_torrents().await; match read_lock.get(info_hash) { None => vec![], @@ -704,7 +705,7 @@ impl Tracker { /// /// Get all torrent peers for a given torrent pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.read().await; + let read_lock = self.torrents.get_torrents().await; match read_lock.get(info_hash) { None => vec![], @@ -721,79 +722,87 @@ impl Tracker { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` - let mut torrents = self.torrents.write().await; - - let torrent_entry = match torrents.entry(*info_hash) { - Entry::Vacant(vacant) => vacant.insert(torrent::Entry::new()), - Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.update_peer(peer); + let (stats, stats_updated) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - // todo: move this action to a separate worker if self.config.persistent_torrent_completed_stat && stats_updated { - drop( - self.database - .save_persistent_torrent(info_hash, torrent_entry.completed) - .await, - ); - } + let completed = stats.completed; + let info_hash = *info_hash; - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - torrent::SwarmStats { - completed, - seeders, - leechers, + drop(self.database.save_persistent_torrent(&info_hash, completed).await); } - } - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { - self.torrents.read().await + stats } /// It calculates and returns the general `Tracker` /// [`TorrentsMetrics`] /// /// # Context: Tracker + /// + /// # Panics + /// Panics if unable to get the torrent metrics. pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - let mut torrents_metrics = TorrentsMetrics { + let arc_torrents_metrics = Arc::new(tokio::sync::Mutex::new(TorrentsMetrics { seeders: 0, completed: 0, leechers: 0, torrents: 0, - }; + })); + + let db = self.torrents.get_torrents().await.clone(); + + let futures = db + .values() + .map(|torrent_entry| { + let torrent_entry = torrent_entry.clone(); + let torrents_metrics = arc_torrents_metrics.clone(); + + async move { + tokio::spawn(async move { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + torrents_metrics.lock().await.seeders += u64::from(seeders); + torrents_metrics.lock().await.completed += u64::from(completed); + torrents_metrics.lock().await.leechers += u64::from(leechers); + torrents_metrics.lock().await.torrents += 1; + }) + .await + .expect("Error torrent_metrics spawn"); + } + }) + .collect::>(); - let db = self.get_torrents().await; + join_all(futures).await; - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrents_metrics.seeders += u64::from(seeders); - torrents_metrics.completed += u64::from(completed); - torrents_metrics.leechers += u64::from(leechers); - torrents_metrics.torrents += 1; - }); + let torrents_metrics = Arc::try_unwrap(arc_torrents_metrics).expect("Could not unwrap arc_torrents_metrics"); - torrents_metrics + torrents_metrics.into_inner() } /// Remove inactive peers and (optionally) peerless torrents /// /// # Context: Tracker pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.write().await; + let mut torrents_lock = self.torrents.get_torrents_mut().await; // If we don't need to remove torrents we will use the faster iter if self.config.remove_peerless_torrents { - torrents_lock.retain(|_, torrent_entry| { + let mut cleaned_torrents_map: BTreeMap = BTreeMap::new(); + + for (info_hash, torrent_entry) in &mut *torrents_lock { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - if self.config.persistent_torrent_completed_stat { - torrent_entry.completed > 0 || !torrent_entry.peers.is_empty() - } else { - !torrent_entry.peers.is_empty() + if torrent_entry.peers.is_empty() { + continue; } - }); + + if self.config.persistent_torrent_completed_stat && torrent_entry.completed == 0 { + continue; + } + + cleaned_torrents_map.insert(*info_hash, torrent_entry.clone()); + } + + *torrents_lock = cleaned_torrents_map; } else { for torrent_entry in (*torrents_lock).values_mut() { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); @@ -1074,7 +1083,7 @@ impl Tracker { /// It return the `Tracker` [`statistics::Metrics`]. /// /// # Context: Statistics - pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { + pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, statistics::Metrics> { self.stats_repository.get_stats().await } @@ -1786,11 +1795,11 @@ mod tests { assert_eq!(swarm_stats.completed, 1); // Remove the newly updated torrent from memory - tracker.torrents.write().await.remove(&info_hash); + tracker.torrents.get_torrents_mut().await.remove(&info_hash); tracker.load_torrents_from_database().await.unwrap(); - let torrents = tracker.get_torrents().await; + let torrents = tracker.torrents.get_torrents().await; assert!(torrents.contains_key(&info_hash)); let torrent_entry = torrents.get(&info_hash).unwrap(); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 651f40cab..918a80bae 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -93,7 +93,7 @@ impl Default for Pagination { /// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let db = tracker.get_torrents().await; + let db = tracker.torrents.get_torrents().await; let torrent_entry_option = db.get(info_hash); @@ -118,21 +118,22 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec { - let db = tracker.get_torrents().await; - - db.iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - BasicInfo { - info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), - } - }) - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .collect() + let db = tracker.torrents.get_torrents().await; + + let mut basic_infos: Vec = vec![]; + + for (info_hash, torrent_entry) in db.iter().skip(pagination.offset as usize).take(pagination.limit as usize) { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + basic_infos.push(BasicInfo { + info_hash: *info_hash, + seeders: u64::from(seeders), + completed: u64::from(completed), + leechers: u64::from(leechers), + }); + } + + basic_infos } #[cfg(test)] diff --git a/src/core/torrent.rs b/src/core/torrent/mod.rs similarity index 99% rename from src/core/torrent.rs rename to src/core/torrent/mod.rs index 8167aa2db..a49e218a9 100644 --- a/src/core/torrent.rs +++ b/src/core/torrent/mod.rs @@ -28,6 +28,8 @@ //! Peer that don not have a full copy of the torrent data are called "leechers". //! //! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmStats`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). +pub mod repository; + use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; diff --git a/src/core/torrent/repository.rs b/src/core/torrent/repository.rs new file mode 100644 index 000000000..62df9b510 --- /dev/null +++ b/src/core/torrent/repository.rs @@ -0,0 +1,301 @@ +use std::sync::Arc; + +use crate::core::peer; +use crate::core::torrent::{Entry, SwarmStats}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub trait Repository { + fn new() -> Self; + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); +} + +pub trait TRepositoryAsync { + fn new() -> Self; + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; +} + +/// Structure that holds all torrents. Using `std::sync` locks. +pub struct Sync { + torrents: std::sync::RwLock>>>, +} + +impl Sync { + /// Returns the get torrents of this [`Sync`]. + /// + /// # Panics + /// + /// Panics if unable to read the torrent. + pub fn get_torrents( + &self, + ) -> std::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { + self.torrents.read().expect("unable to get torrent list") + } + + /// Returns the mutable get torrents of this [`Sync`]. + /// + /// # Panics + /// + /// Panics if unable to write to the torrents list. + pub fn get_torrents_mut( + &self, + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Repository for Sync { + fn new() -> Self { + Self { + torrents: std::sync::RwLock::new(std::collections::BTreeMap::new()), + } + } + + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().unwrap(); + let stats_updated = torrent_entry_lock.update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + completed: stats.1, + seeders: stats.0, + leechers: stats.2, + }, + stats_updated, + ) + } +} + +/// Structure that holds all torrents. Using `std::sync` locks. +pub struct SyncSingle { + torrents: std::sync::RwLock>, +} + +impl SyncSingle { + /// Returns the get torrents of this [`SyncSingle`]. + /// + /// # Panics + /// + /// Panics if unable to get torrent list. + pub fn get_torrents(&self) -> std::sync::RwLockReadGuard<'_, std::collections::BTreeMap> { + self.torrents.read().expect("unable to get torrent list") + } + + /// Returns the get torrents of this [`SyncSingle`]. + /// + /// # Panics + /// + /// Panics if unable to get writable torrent list. + pub fn get_torrents_mut(&self) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Repository for SyncSingle { + fn new() -> Self { + Self { + torrents: std::sync::RwLock::new(std::collections::BTreeMap::new()), + } + } + + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let mut torrents = self.torrents.write().unwrap(); + + let torrent_entry = match torrents.entry(*info_hash) { + std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), + std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + let stats_updated = torrent_entry.update_peer(peer); + let stats = torrent_entry.get_stats(); + + ( + SwarmStats { + completed: stats.1, + seeders: stats.0, + leechers: stats.2, + }, + stats_updated, + ) + } +} + +/// Structure that holds all torrents. Using `tokio::sync` locks. +#[allow(clippy::module_name_repetitions)] +pub struct RepositoryAsync { + torrents: tokio::sync::RwLock>>>, +} + +impl TRepositoryAsync for RepositoryAsync { + fn new() -> Self { + Self { + torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), + } + } + + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut().await; + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().await; + let stats_updated = torrent_entry_lock.update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + completed: stats.1, + seeders: stats.0, + leechers: stats.2, + }, + stats_updated, + ) + } +} + +impl RepositoryAsync { + pub async fn get_torrents( + &self, + ) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { + self.torrents.read().await + } + + pub async fn get_torrents_mut( + &self, + ) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { + self.torrents.write().await + } +} + +/// Structure that holds all torrents. Using a `tokio::sync` lock for the torrents map an`std::sync`nc lock for the inner torrent entry. +pub struct AsyncSync { + torrents: tokio::sync::RwLock>>>, +} + +impl TRepositoryAsync for AsyncSync { + fn new() -> Self { + Self { + torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), + } + } + + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut().await; + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().unwrap(); + let stats_updated = torrent_entry_lock.update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + completed: stats.1, + seeders: stats.0, + leechers: stats.2, + }, + stats_updated, + ) + } +} + +impl AsyncSync { + pub async fn get_torrents( + &self, + ) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { + self.torrents.read().await + } + + pub async fn get_torrents_mut( + &self, + ) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { + self.torrents.write().await + } +} + +#[allow(clippy::module_name_repetitions)] +pub struct RepositoryAsyncSingle { + torrents: tokio::sync::RwLock>, +} + +impl TRepositoryAsync for RepositoryAsyncSingle { + fn new() -> Self { + Self { + torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), + } + } + + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let (stats, stats_updated) = { + let mut torrents_lock = self.torrents.write().await; + let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); + let stats_updated = torrent_entry.update_peer(peer); + let stats = torrent_entry.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + completed: stats.1, + seeders: stats.0, + leechers: stats.2, + }, + stats_updated, + ) + } +} + +impl RepositoryAsyncSingle { + pub async fn get_torrents(&self) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap> { + self.torrents.read().await + } + + pub async fn get_torrents_mut(&self) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().await + } +} diff --git a/tests/wrk_benchmark_announce.lua b/tests/wrk_benchmark_announce.lua index c182f8e68..620ba2680 100644 --- a/tests/wrk_benchmark_announce.lua +++ b/tests/wrk_benchmark_announce.lua @@ -1,53 +1,35 @@ --- else the randomness would be the same every run -math.randomseed(os.time()) +function generate_unique_info_hashes(size) + local result = {} + local seen = {} -local charset = "0123456789ABCDEF" + for i = 0, size - 1 do + local bytes = {} + bytes[1] = i & 0xFF + bytes[2] = (i >> 8) & 0xFF + bytes[3] = (i >> 16) & 0xFF + bytes[4] = (i >> 24) & 0xFF -function hexToChar(hex) - local n = tonumber(hex, 16) - local f = string.char(n) - return f -end + local info_hash = bytes + local key = table.concat(info_hash, ",") -function hexStringToCharString(hex) - local ret = {} - local r - for i = 0, 19 do - local x = i * 2 - r = hex:sub(x+1, x+2) - local f = hexToChar(r) - table.insert(ret, f) + if not seen[key] then + table.insert(result, info_hash) + seen[key] = true + end end - return table.concat(ret) -end -function urlEncode(str) - str = string.gsub (str, "([^0-9a-zA-Z !'()*._~-])", -- locale independent - function (c) return string.format ("%%%02X", string.byte(c)) end) - str = string.gsub (str, " ", "+") - return str + return result end -function genHexString(length) - local ret = {} - local r - for i = 1, length do - r = math.random(1, #charset) - table.insert(ret, charset:sub(r, r)) - end - return table.concat(ret) -end +info_hashes = generate_unique_info_hashes(10000000) -function randomInfoHash() - local hexString = genHexString(40) - local str = hexStringToCharString(hexString) - return urlEncode(str) -end +index = 0 -- the request function that will run at each request request = function() - path = "/announce?info_hash=" .. randomInfoHash() .. "&peer_id=-lt0D80-a%D4%10%19%99%A6yh%9A%E1%CD%96&port=54434&uploaded=885&downloaded=0&left=0&corrupt=0&key=A78381BD&numwant=200&compact=1&no_peer_id=1&supportcrypto=1&redundant=0" - headers = {} - headers["X-Forwarded-For"] = "1.1.1.1" - return wrk.format("GET", path, headers) + path = "/announce?info_hash=" .. info_hashes[index] .. "&peer_id=-lt0D80-a%D4%10%19%99%A6yh%9A%E1%CD%96&port=54434&uploaded=885&downloaded=0&left=0&corrupt=0&key=A78381BD&numwant=200&compact=1&no_peer_id=1&supportcrypto=1&redundant=0" + index += 1 + headers = {} + headers["X-Forwarded-For"] = "1.1.1.1" + return wrk.format("GET", path, headers) end From 1735a7a4dc4fa53f36da1394bc4cd4b52eb352a6 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 21 Dec 2023 00:43:42 +0100 Subject: [PATCH 0654/1003] chore: only run contract, deployment & testing jobs in nightly rust --- .github/workflows/contract.yaml | 2 +- .github/workflows/deployment.yaml | 4 ++-- .github/workflows/testing.yaml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/contract.yaml b/.github/workflows/contract.yaml index 7c9fd47bd..b38e0e8f5 100644 --- a/.github/workflows/contract.yaml +++ b/.github/workflows/contract.yaml @@ -14,7 +14,7 @@ jobs: strategy: matrix: - toolchain: [stable, nightly] + toolchain: [nightly] steps: - id: checkout diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 5df50a4b0..91f8d86eb 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: - toolchain: [stable, nightly] + toolchain: [nightly] steps: - id: checkout @@ -44,7 +44,7 @@ jobs: name: Setup Toolchain uses: dtolnay/rust-toolchain@stable with: - toolchain: stable + toolchain: ${{ matrix.toolchain }} - id: publish name: Publish Crates diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index f60f03e5e..d9d0c60c9 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -39,7 +39,7 @@ jobs: strategy: matrix: - toolchain: [stable, nightly] + toolchain: [nightly] steps: - id: checkout @@ -79,7 +79,7 @@ jobs: strategy: matrix: - toolchain: [stable, nightly] + toolchain: [nightly] steps: - id: checkout From ebb7d4c3094003264888d31dbe7a8831b628c9a9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 21 Dec 2023 10:50:27 +0100 Subject: [PATCH 0655/1003] chore: make Containerfile use nightly rust --- Containerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Containerfile b/Containerfile index 8ea555c2d..77c7da669 100644 --- a/Containerfile +++ b/Containerfile @@ -3,13 +3,13 @@ # Torrust Tracker ## Builder Image -FROM rust:bookworm as chef +FROM rustlang/rust:nightly-bookworm as chef WORKDIR /tmp RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash RUN cargo binstall --no-confirm cargo-chef cargo-nextest ## Tester Image -FROM rust:slim-bookworm as tester +FROM rustlang/rust:nightly-bookworm-slim as tester WORKDIR /tmp RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean From 6e607c34e2ee51edf88578300ebcae10b228bc58 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 22 Dec 2023 12:37:18 +0000 Subject: [PATCH 0656/1003] feat: [#539] change log for UDP tracker From: ```s 2023-12-22T12:32:53.016911160+00:00 [torrust_tracker::servers::udp::server][INFO] Received 109 bytes 2023-12-22T12:32:53.016953899+00:00 [torrust_tracker::servers::udp::server][INFO] Sending 43 bytes ... 2023-12-22T12:32:53.017038257+00:00 [torrust_tracker::servers::udp::server][INFO] 43 bytes sent ``` To: ```s 2023-12-22T12:35:51.320114322+00:00 [UDP][INFO] "CONNECT TxID 1583189312" 2023-12-22T12:35:51.345003905+00:00 [UDP][INFO] "ANNOUNCE TxID 1583189313 IH 443c7602b4fde83d1154d6d9da48808418b181b6" 2023-12-22T12:35:51.320114322+00:00 [UDP][INFO] "SCRAPE TxID 1583189312" ```` - The target is more generic "UDP" and it will be always the same even if we rearrange the packages. - The info is more useful. It includes the request type, the transaction ID to identify the client, and the info-hash. That would allow us to extract statistics from the logs. NOTE: In the long term maybe this should be configurable. --- src/servers/udp/handlers.rs | 6 +++++- src/servers/udp/server.rs | 8 ++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 1878c30e1..39a077466 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -7,7 +7,7 @@ use aquatic_udp_protocol::{ AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use log::debug; +use log::{debug, info}; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::core::{statistics, Tracker}; @@ -73,6 +73,7 @@ pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: /// /// This function does not ever return an error. pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { + info!(target: "UDP", "\"CONNECT TxID {}\"", request.transaction_id.0); debug!("udp connect request: {:#?}", request); let connection_cookie = make(&remote_addr); @@ -136,6 +137,8 @@ pub async fn handle_announce( authenticate(&info_hash, tracker).await?; + info!(target: "UDP", "\"ANNOUNCE TxID {} IH {}\"", announce_request.transaction_id.0, info_hash.to_hex_string()); + let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; @@ -210,6 +213,7 @@ pub async fn handle_announce( /// /// This function does not ever return an error. pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { + info!(target: "UDP", "\"SCRAPE TxID {}\"", request.transaction_id.0); debug!("udp scrape request: {:#?}", request); // Convert from aquatic infohashes diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index c6b73860b..9b9a89b11 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -191,7 +191,7 @@ impl Udp { Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { let payload = data[..valid_bytes].to_vec(); - info!("Received {} bytes", payload.len()); + debug!("Received {} bytes", payload.len()); debug!("From: {}", &remote_addr); debug!("Payload: {:?}", payload); @@ -227,7 +227,7 @@ impl Udp { Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { let payload = data[..valid_bytes].to_vec(); - info!("Received {} bytes", payload.len()); + debug!("Received {} bytes", payload.len()); debug!("From: {}", &remote_addr); debug!("Payload: {:?}", payload); @@ -249,13 +249,13 @@ impl Udp { let position = cursor.position() as usize; let inner = cursor.get_ref(); - info!("Sending {} bytes ...", &inner[..position].len()); + debug!("Sending {} bytes ...", &inner[..position].len()); debug!("To: {:?}", &remote_addr); debug!("Payload: {:?}", &inner[..position]); Udp::send_packet(socket, &remote_addr, &inner[..position]).await; - info!("{} bytes sent", &inner[..position].len()); + debug!("{} bytes sent", &inner[..position].len()); } Err(_) => { error!("could not write response to bytes."); From ff3928e7f2c9ea6a94ae938dc03ba1dbe9c0eb00 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 22 Dec 2023 13:04:17 +0000 Subject: [PATCH 0657/1003] fix: clippy error --- contrib/bencode/src/reference/decode_opt.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/bencode/src/reference/decode_opt.rs b/contrib/bencode/src/reference/decode_opt.rs index ac94d0311..e8d9a8337 100644 --- a/contrib/bencode/src/reference/decode_opt.rs +++ b/contrib/bencode/src/reference/decode_opt.rs @@ -41,7 +41,7 @@ impl BDecodeOpt { /// /// It may be useful to disable this if for example, the input bencode is prepended to /// some payload and you would like to disassociate it. In this case, to find where the - /// rest of the payload starts that wasn't decoded, get the bencode buffer, and call len(). + /// rest of the payload starts that wasn't decoded, get the bencode buffer, and call `len()`. #[must_use] pub fn enforce_full_decode(&self) -> bool { self.enforce_full_decode From 3b8a625318463aa8681f818d41e2557ef95f7f19 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 26 Dec 2023 16:23:25 +0800 Subject: [PATCH 0658/1003] chore: update cargo deps ``` Updating crates.io index Updating anstream v0.6.4 -> v0.6.5 Updating anstyle-parse v0.2.2 -> v0.2.3 Updating anstyle-query v1.0.0 -> v1.0.2 Updating anstyle-wincon v3.0.1 -> v3.0.2 Updating async-trait v0.1.74 -> v0.1.75 Updating borsh v1.2.0 -> v1.3.0 Updating borsh-derive v1.2.0 -> v1.3.0 Updating clap v4.4.10 -> v4.4.11 Updating clap_builder v4.4.9 -> v4.4.11 Updating crossbeam v0.8.2 -> v0.8.3 Updating crossbeam-channel v0.5.8 -> v0.5.10 Updating crossbeam-deque v0.8.3 -> v0.8.4 Updating crossbeam-epoch v0.9.15 -> v0.9.17 Updating crossbeam-queue v0.3.8 -> v0.3.10 Updating crossbeam-utils v0.8.16 -> v0.8.18 Updating deranged v0.3.9 -> v0.3.10 Removing difflib v0.4.0 Removing float-cmp v0.9.0 Updating futures v0.3.29 -> v0.3.30 Updating futures-channel v0.3.29 -> v0.3.30 Updating futures-core v0.3.29 -> v0.3.30 Updating futures-executor v0.3.29 -> v0.3.30 Updating futures-io v0.3.29 -> v0.3.30 Updating futures-macro v0.3.29 -> v0.3.30 Updating futures-sink v0.3.29 -> v0.3.30 Updating futures-task v0.3.29 -> v0.3.30 Updating futures-util v0.3.29 -> v0.3.30 Updating http-body v0.4.5 -> v0.4.6 Updating hyper v0.14.27 -> v0.14.28 Adding itertools v0.11.0 Updating itoa v1.0.9 -> v1.0.10 Updating libc v0.2.150 -> v0.2.151 Removing memoffset v0.9.0 Updating mio v0.8.9 -> v0.8.10 Updating mockall v0.11.4 -> v0.12.1 Updating mockall_derive v0.11.4 -> v0.12.1 Removing normalize-line-endings v0.3.0 Updating object v0.32.1 -> v0.32.2 Updating once_cell v1.18.0 -> v1.19.0 Updating openssl v0.10.60 -> v0.10.62 Updating openssl-src v300.1.6+3.1.4 -> v300.2.1+3.2.0 Updating openssl-sys v0.9.96 -> v0.9.98 Updating pkg-config v0.3.27 -> v0.3.28 Updating predicates v2.1.5 -> v3.0.4 Updating proc-macro2 v1.0.70 -> v1.0.71 Updating reqwest v0.11.22 -> v0.11.23 Updating ring v0.17.6 -> v0.17.7 Updating rkyv v0.7.42 -> v0.7.43 Updating rkyv_derive v0.7.42 -> v0.7.43 Updating rustix v0.38.26 -> v0.38.28 Updating rustls v0.21.9 -> v0.21.10 Updating ryu v1.0.15 -> v1.0.16 Updating serde_spanned v0.6.4 -> v0.6.5 Removing socket2 v0.4.10 Updating syn v2.0.39 -> v2.0.43 Updating thiserror v1.0.50 -> v1.0.52 Updating thiserror-impl v1.0.50 -> v1.0.52 Updating time v0.3.30 -> v0.3.31 Updating time-macros v0.2.15 -> v0.2.16 Updating tokio v1.34.0 -> v1.35.1 Updating try-lock v0.2.4 -> v0.2.5 Updating unicode-bidi v0.3.13 -> v0.3.14 Updating winnow v0.5.19 -> v0.5.30 Updating zerocopy v0.7.28 -> v0.7.32 Updating zerocopy-derive v0.7.28 -> v0.7.32 ``` --- Cargo.lock | 363 ++++++++++++++++++++++++----------------------------- Cargo.toml | 2 +- 2 files changed, 165 insertions(+), 200 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e3bb2f62..ac0e95cea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,9 +93,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6" dependencies = [ "anstyle", "anstyle-parse", @@ -113,30 +113,30 @@ checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -179,13 +179,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "fdf6721fb0140e4f897002dd086c06f6c27775df19cfe1fccb21181a48fd2c98" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -264,7 +264,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -348,7 +348,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -386,9 +386,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf617fabf5cdbdc92f774bfe5062d870f228b80056d41180797abf48bed4056e" +checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028" dependencies = [ "borsh-derive", "cfg_aliases", @@ -396,15 +396,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3" +checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" dependencies = [ "once_cell", "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", "syn_derive", ] @@ -565,9 +565,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.10" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fffed7514f420abec6d183b1d3acfd9099c79c3a10a06ade4f8203f1411272" +checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" dependencies = [ "clap_builder", "clap_derive", @@ -575,9 +575,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.9" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63361bae7eef3771745f02d8d892bec2fee5f6e34af316ba556e7f97a7069ff1" +checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" dependencies = [ "anstream", "anstyle", @@ -594,7 +594,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -690,7 +690,7 @@ dependencies = [ "criterion-plot", "futures", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -712,14 +712,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] name = "crossbeam" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +checksum = "6eb9105919ca8e40d437fc9cbb8f1975d916f1bd28afe795a48aae32a2cc8920" dependencies = [ "cfg-if", "crossbeam-channel", @@ -731,9 +731,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "82a9b73a36529d9c47029b9fb3a6f0ea3cc916a261195352ba19e770fc1748b2" dependencies = [ "cfg-if", "crossbeam-utils", @@ -741,9 +741,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -752,22 +752,20 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "0e3681d554572a651dda4186cd47240627c3d0114d45a95f6ad27f2f22e7548d" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset", - "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "adc6598521bb5a83d491e8c1fe51db7296019d2ca3cb93cc6c2a20369a4d78a2" dependencies = [ "cfg-if", "crossbeam-utils", @@ -775,9 +773,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" dependencies = [ "cfg-if", ] @@ -813,7 +811,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -824,14 +822,14 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" dependencies = [ "powerfmt", "serde", @@ -858,15 +856,9 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] -[[package]] -name = "difflib" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" - [[package]] name = "digest" version = "0.10.7" @@ -968,15 +960,6 @@ dependencies = [ "miniz_oxide", ] -[[package]] -name = "float-cmp" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" -dependencies = [ - "num-traits", -] - [[package]] name = "fnv" version = "1.0.7" @@ -1048,7 +1031,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -1060,7 +1043,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -1072,7 +1055,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -1083,9 +1066,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1098,9 +1081,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1108,15 +1091,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1125,38 +1108,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1296,9 +1279,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", @@ -1325,9 +1308,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -1340,7 +1323,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2", "tokio", "tower-service", "tracing", @@ -1428,7 +1411,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -1457,11 +1440,20 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jobserver" @@ -1579,9 +1571,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.150" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libloading" @@ -1676,15 +1668,6 @@ version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "mime" version = "0.3.17" @@ -1708,9 +1691,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", @@ -1719,9 +1702,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.11.4" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" dependencies = [ "cfg-if", "downcast", @@ -1734,14 +1717,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.11.4" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.43", ] [[package]] @@ -1774,7 +1757,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2 0.5.5", + "socket2", "twox-hash", "url", ] @@ -1792,7 +1775,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", "termcolor", "thiserror", ] @@ -1903,12 +1886,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" -[[package]] -name = "normalize-line-endings" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" - [[package]] name = "num-bigint" version = "0.4.4" @@ -1951,18 +1928,18 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -1972,9 +1949,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "openssl" -version = "0.10.60" +version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" +checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -1993,7 +1970,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -2004,18 +1981,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.1.6+3.1.4" +version = "300.2.1+3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" +checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.96" +version = "0.9.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" +checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" dependencies = [ "cc", "libc", @@ -2116,7 +2093,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -2185,7 +2162,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -2202,9 +2179,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" [[package]] name = "plotters" @@ -2248,16 +2225,13 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.5" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" +checksum = "6dfc28575c2e3f19cb3c73b93af36460ae898d426eba6fc15b9bd2a5220758a0" dependencies = [ - "difflib", - "float-cmp", - "itertools", - "normalize-line-endings", + "anstyle", + "itertools 0.11.0", "predicates-core", - "regex", ] [[package]] @@ -2321,9 +2295,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "75cb1540fadbd5b8fbccc4dddad2734eba435053f725621c070711a14bb5f4b8" dependencies = [ "unicode-ident", ] @@ -2494,9 +2468,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "base64 0.21.5", "bytes", @@ -2532,9 +2506,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.6" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "684d5e6e18f669ccebf64a92236bb7db9a34f07be010e3627368182027180866" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", "getrandom", @@ -2546,12 +2520,13 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.42" +version = "0.7.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" +checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5" dependencies = [ "bitvec", "bytecheck", + "bytes", "hashbrown 0.12.3", "ptr_meta", "rend", @@ -2563,9 +2538,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.42" +version = "0.7.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" +checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033" dependencies = [ "proc-macro2", "quote", @@ -2646,9 +2621,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.26" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags 2.4.1", "errno", @@ -2659,9 +2634,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.9" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", "ring", @@ -2696,9 +2671,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "same-file" @@ -2820,7 +2795,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -2852,14 +2827,14 @@ checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] name = "serde_spanned" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -2902,7 +2877,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -2969,16 +2944,6 @@ version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.5" @@ -3030,9 +2995,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "ee659fb5f3d355364e1f3e5bc10fb82068efbf824a1e9d1c9504244a6469ad53" dependencies = [ "proc-macro2", "quote", @@ -3048,7 +3013,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -3131,29 +3096,29 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "83a48fd946b02c0a526b2e9481c8e2a17755e47039164a86c4070446e3a4614d" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "e7fbe9b594d6568a6a1443250a7e67d80b74e1e96f6d1715e1e21cc1888291d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] name = "time" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", "itoa", @@ -3171,9 +3136,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -3205,9 +3170,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.34.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ "backtrace", "bytes", @@ -3216,7 +3181,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2", "tokio-macros", "windows-sys 0.48.0", ] @@ -3229,7 +3194,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] @@ -3512,9 +3477,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "twox-hash" @@ -3541,9 +3506,9 @@ checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" [[package]] name = "unicode-ident" @@ -3651,7 +3616,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", "wasm-bindgen-shared", ] @@ -3685,7 +3650,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3880,9 +3845,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.19" +version = "0.5.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b" +checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5" dependencies = [ "memchr", ] @@ -3917,22 +3882,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.28" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6f15f7ade05d2a4935e34a457b936c23dc70a05cc1d97133dc99e7a3fe0f0e" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.28" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbbad221e3f78500350ecbd7dfa4e63ef945c05f4c61cb7f4d3f84cd0bba649b" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.43", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 4f2abd6f5..f2dd1fe96 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ aquatic_udp_protocol = "0" async-trait = "0" axum = { version = "0.6", features = ["macros"] } axum-client-ip = "0.4" -axum-server = { version = "0", features = ["tls-rustls"] } +axum-server = { version = "0.5", features = ["tls-rustls"] } binascii = "0" chrono = { version = "0", default-features = false, features = ["clock"] } config = "0" From 14cb26cebd729fb0e209eb1ab3d626f431efb83a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 26 Dec 2023 18:20:52 +0800 Subject: [PATCH 0659/1003] chore: update deps to hyper v1 ``` Updating crates.io index Updating axum v0.6.20 -> v0.7.2 Updating axum-client-ip v0.4.2 -> v0.5.0 Updating axum-core v0.3.4 -> v0.4.1 Updating axum-macros v0.3.8 -> v0.4.0 Updating axum-server v0.5.1 -> v0.6.0 Adding h2 v0.4.0 Adding http-body-util v0.1.0 Removing http-range-header v0.3.1 Adding hyper-util v0.1.2 Updating rustls-pemfile v1.0.4 -> v2.0.0 Adding rustls-pki-types v1.1.0 Updating tower-http v0.4.4 -> v0.5.0 ``` --- Cargo.lock | 179 ++++++++++++++++++------ Cargo.toml | 10 +- src/servers/apis/server.rs | 30 ++-- src/servers/apis/v1/middlewares/auth.rs | 11 +- src/servers/health_check_api/server.rs | 22 ++- src/servers/http/v1/launcher.rs | 35 +++-- 6 files changed, 208 insertions(+), 79 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ac0e95cea..b696fc0d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -196,19 +196,20 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "202651474fe73c62d9e0a56c6133f7a0ff1dc1c8cf7a5b03381af2a26553ac9d" dependencies = [ "async-trait", "axum-core", "axum-macros", - "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.1.0", + "hyper-util", "itoa", "matchit", "memchr", @@ -229,9 +230,9 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ef117890a418b7832678d9ea1e1c08456dd7b2fd1dadb9676cd6f0fe7eb4b21" +checksum = "0f5ffe4637708b326c621d5494ab6c91dcf62ee440fa6ee967d289315a9c6f81" dependencies = [ "axum", "forwarded-header-value", @@ -240,26 +241,29 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "77cb22c689c44d4c07b0ab44ebc25d69d8ae601a2f28fb8d672d344178fa17aa" dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper", "tower-layer", "tower-service", ] [[package]] name = "axum-macros" -version = "0.3.8" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdca6a10ecad987bda04e95606ef85a5417dcaac1a78455242d72e031e2b6b62" +checksum = "5a2edad600410b905404c594e2523549f1bcd4bded1e252c8f74524ccce0b867" dependencies = [ "heck", "proc-macro2", @@ -269,21 +273,24 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447f28c85900215cc1bea282f32d4a2f22d55c5a300afdfbc661c8d6a632e063" +checksum = "c1ad46c3ec4e12f4a4b6835e173ba21c25e484c9d02b49770bf006ce5367c036" dependencies = [ "arc-swap", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.1.0", + "hyper-util", "pin-project-lite", "rustls", "rustls-pemfile", "tokio", "tokio-rustls", + "tower", "tower-service", ] @@ -1197,7 +1204,26 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.11", + "indexmap 2.1.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 1.0.0", "indexmap 2.1.0", "slab", "tokio", @@ -1277,6 +1303,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -1284,15 +1321,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.11", "pin-project-lite", ] [[package]] -name = "http-range-header" -version = "0.3.1" +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.0.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "pin-project-lite", +] [[package]] name = "httparse" @@ -1316,9 +1370,9 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.22", + "http 0.2.11", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -1330,6 +1384,25 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.0", + "http 1.0.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "tokio", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -1337,12 +1410,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.28", "native-tls", "tokio", "tokio-native-tls", ] +[[package]] +name = "hyper-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.1.0", + "pin-project-lite", + "socket2", + "tokio", + "tracing", +] + [[package]] name = "iana-time-zone" version = "0.1.58" @@ -2477,10 +2568,10 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.22", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", "hyper-tls", "ipnet", "js-sys", @@ -2646,13 +2737,20 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.4" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" dependencies = [ "base64 0.21.5", + "rustls-pki-types", ] +[[package]] +name = "rustls-pki-types" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -3323,7 +3421,7 @@ dependencies = [ "derive_more", "fern", "futures", - "hyper", + "hyper 1.1.0", "lazy_static", "local-ip-address", "log", @@ -3424,18 +3522,17 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "09e12e6351354851911bdf8c2b8f2ab15050c567d70a8b9a37ae7b8301a4080d" dependencies = [ "async-compression", "bitflags 2.4.1", "bytes", - "futures-core", "futures-util", - "http", - "http-body", - "http-range-header", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", "pin-project-lite", "tokio", "tokio-util", diff --git a/Cargo.toml b/Cargo.toml index f2dd1fe96..64f913e4f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,16 +32,16 @@ version = "3.0.0-alpha.12-develop" [dependencies] aquatic_udp_protocol = "0" async-trait = "0" -axum = { version = "0.6", features = ["macros"] } -axum-client-ip = "0.4" -axum-server = { version = "0.5", features = ["tls-rustls"] } +axum = { version = "0", features = ["macros"] } +axum-client-ip = "0" +axum-server = { version = "0", features = ["tls-rustls"] } binascii = "0" chrono = { version = "0", default-features = false, features = ["clock"] } config = "0" derive_more = "0" fern = "0" futures = "0" -hyper = "0" +hyper = "1" lazy_static = "1" log = { version = "0", features = ["release_max_level_info"] } multimap = "0" @@ -64,7 +64,7 @@ torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "pa torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } -tower-http = { version = "0.4", features = ["compression-full"] } +tower-http = { version = "0", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } [dev-dependencies] diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 58b60638e..c42083f9f 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -187,11 +187,19 @@ impl Launcher { { let app = router(tracker); + let handle = Handle::new(); + + let cloned_handle = handle.clone(); + + tokio::task::spawn(async move { + shutdown_signal.await; + cloned_handle.shutdown(); + }); + Box::pin(async { - axum::Server::from_tcp(tcp_listener) - .expect("Could not bind to tcp listener.") + axum_server::from_tcp(tcp_listener) + .handle(handle) .serve(app.into_make_service_with_connect_info::()) - .with_graceful_shutdown(shutdown_signal) .await .expect("Axum server crashed."); }) @@ -213,7 +221,7 @@ impl Launcher { let cloned_handle = handle.clone(); - tokio::task::spawn_local(async move { + tokio::task::spawn(async move { shutdown_signal.await; cloned_handle.shutdown(); }); @@ -237,15 +245,19 @@ impl Launcher { /// # Panics /// /// It would panic if it fails to listen to shutdown signal. -pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { +pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + let handle = Handle::new(); + let shutdown_handle = handle.clone(); - server.with_graceful_shutdown(async move { + tokio::spawn(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust APIs server on http://{} ...", socket_addr); - }) + info!("Stopping Torrust APIs server on https://{} ...", socket_addr); + shutdown_handle.shutdown(); + }); + + axum_server::bind(socket_addr).handle(handle).serve(app.into_make_service()) } /// Starts the API server with graceful shutdown and TLS on the current thread. diff --git a/src/servers/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs index 3e8f74d0c..7749b3b34 100644 --- a/src/servers/apis/v1/middlewares/auth.rs +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -40,15 +40,12 @@ pub struct QueryParams { /// Middleware for authentication using a "token" GET param. /// The token must be one of the tokens in the tracker [HTTP API configuration](torrust_tracker_configuration::HttpApi). -pub async fn auth( +pub async fn auth( State(config): State>, Query(params): Query, - request: Request, - next: Next, -) -> Response -where - B: Send, -{ + request: Request, + next: Next, +) -> Response { let Some(token) = params.token else { return AuthError::Unauthorized.into_response(); }; diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index 562772a87..d4654d617 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use axum::routing::get; use axum::{Json, Router}; +use axum_server::Handle; use futures::Future; use log::info; use serde_json::json; @@ -25,23 +26,30 @@ pub fn start( socket_addr: SocketAddr, tx: Sender, config: Arc, -) -> impl Future> { +) -> impl Future> { let app = Router::new() .route("/", get(|| async { Json(json!({})) })) .route("/health_check", get(health_check_handler)) .with_state(config); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + let handle = Handle::new(); + let cloned_handle = handle.clone(); - let bound_addr = server.local_addr(); + let tcp_listener = std::net::TcpListener::bind(socket_addr).expect("Could not bind tcp_listener to address."); + let bound_addr = tcp_listener + .local_addr() + .expect("Could not get local_addr from tcp_listener."); - info!("Health Check API server listening on http://{}", bound_addr); - - let running = server.with_graceful_shutdown(async move { + tokio::task::spawn(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust Health Check API server o http://{} ...", socket_addr); + info!("Stopping Torrust Health Check API server o http://{} ...", bound_addr); + cloned_handle.shutdown(); }); + let running = axum_server::from_tcp(tcp_listener) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()); + tx.send(ApiServerJobStarted { bound_addr }) .expect("the Health Check API server should not be dropped"); diff --git a/src/servers/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs index 1ae09a5f8..6b89e8ce7 100644 --- a/src/servers/http/v1/launcher.rs +++ b/src/servers/http/v1/launcher.rs @@ -40,11 +40,19 @@ impl Launcher { { let app = router(tracker); + let handle = Handle::new(); + + let cloned_handle = handle.clone(); + + tokio::task::spawn(async move { + shutdown_signal.await; + cloned_handle.shutdown(); + }); + Box::pin(async { - axum::Server::from_tcp(tcp_listener) - .expect("Could not bind to tcp listener.") - .serve(app.into_make_service_with_connect_info::()) - .with_graceful_shutdown(shutdown_signal) + axum_server::from_tcp(tcp_listener) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) .await .expect("Axum server crashed."); }) @@ -73,7 +81,7 @@ impl Launcher { let cloned_handle = handle.clone(); - tokio::task::spawn_local(async move { + tokio::task::spawn(async move { shutdown_signal.await; cloned_handle.shutdown(); }); @@ -135,15 +143,22 @@ impl HttpServerLauncher for Launcher { /// # Panics /// /// Panics if the server could not listen to shutdown (ctrl+c) signal. -pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl Future> { +pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); + let handle = Handle::new(); + + let cloned_handle = handle.clone(); - server.with_graceful_shutdown(async move { + tokio::task::spawn(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust HTTP tracker server on http://{} ...", socket_addr); - }) + info!("Stopping Torrust Health Check API server o http://{} ...", socket_addr); + cloned_handle.shutdown(); + }); + + axum_server::bind(socket_addr) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) } /// Starts a new HTTPS server instance. From 922a46d7c5a8485dea05050851ceef1e3a138a5b Mon Sep 17 00:00:00 2001 From: Shrirang Borde <68811459+ShrirangB@users.noreply.github.com> Date: Tue, 26 Dec 2023 22:52:20 +0530 Subject: [PATCH 0660/1003] Update labels.json #503 Added good first issue label #503 in label.json file --- .github/labels.json | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/labels.json b/.github/labels.json index 3b8a2af42..778c0c892 100644 --- a/.github/labels.json +++ b/.github/labels.json @@ -250,5 +250,11 @@ "color": "eae3e7", "description": "You can still do it another way", "aliases": [] + }, + { + "name": "good first issue", + "color": "b0fc38", + "description": "Feel free to seek assistance when needed", + "aliases": [] } -] \ No newline at end of file +] From 911708ce841123bfd7005845fdd7abebea2de3a9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 31 Dec 2023 22:54:44 +1100 Subject: [PATCH 0661/1003] chore: fixup clippy and spelling --- cSpell.json | 3 ++- src/core/services/torrent.rs | 4 +--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/cSpell.json b/cSpell.json index c9b547c90..d09db93b7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -129,7 +129,8 @@ "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", - "yyyyyyyyyyyyyyyyyyyyd" + "yyyyyyyyyyyyyyyyyyyyd", + "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7" ], "enableFiletypes": [ "dockerfile", diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 918a80bae..f88cf5b50 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -97,9 +97,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let torrent_entry_option = db.get(info_hash); - let Some(torrent_entry) = torrent_entry_option else { - return None; - }; + let torrent_entry = torrent_entry_option?; let (seeders, completed, leechers) = torrent_entry.get_stats(); From f4c762b799ff3a97feb0345dce4a762864b34f40 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 31 Dec 2023 22:56:00 +1100 Subject: [PATCH 0662/1003] chore: update cargo deps ``` Updating crates.io index Updating ahash v0.8.6 -> v0.8.7 Updating async-trait v0.1.75 -> v0.1.76 Updating axum v0.7.2 -> v0.7.3 Updating axum-core v0.4.1 -> v0.4.2 Updating clap v4.4.11 -> v4.4.12 Updating clap_builder v4.4.11 -> v4.4.12 Updating deranged v0.3.10 -> v0.3.11 Updating iana-time-zone v0.1.58 -> v0.1.59 Updating is-terminal v0.4.9 -> v0.4.10 Updating memchr v2.6.4 -> v2.7.1 Updating proc-macro2 v1.0.71 -> v1.0.72 Updating schannel v0.1.22 -> v0.1.23 Updating serde_bytes v0.11.12 -> v0.11.13 Updating tempfile v3.8.1 -> v3.9.0 Updating thiserror v1.0.52 -> v1.0.53 Updating thiserror-impl v1.0.52 -> v1.0.53 Updating windows-core v0.51.1 -> v0.52.0 Updating winnow v0.5.30 -> v0.5.31 ``` --- Cargo.lock | 86 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 44 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b696fc0d2..4e5be3291 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "once_cell", @@ -179,9 +179,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.75" +version = "0.1.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdf6721fb0140e4f897002dd086c06f6c27775df19cfe1fccb21181a48fd2c98" +checksum = "531b97fb4cd3dfdce92c35dedbfdc1f0b9d8091c8ca943d6dae340ef5012d514" dependencies = [ "proc-macro2", "quote", @@ -196,9 +196,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "202651474fe73c62d9e0a56c6133f7a0ff1dc1c8cf7a5b03381af2a26553ac9d" +checksum = "d09dbe0e490df5da9d69b36dca48a76635288a82f92eca90024883a56202026d" dependencies = [ "async-trait", "axum-core", @@ -226,6 +226,7 @@ dependencies = [ "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -241,9 +242,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77cb22c689c44d4c07b0ab44ebc25d69d8ae601a2f28fb8d672d344178fa17aa" +checksum = "e87c8503f93e6d144ee5690907ba22db7ba79ab001a932ab99034f0fe836b3df" dependencies = [ "async-trait", "bytes", @@ -257,6 +258,7 @@ dependencies = [ "sync_wrapper", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -572,9 +574,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.11" +version = "4.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "dcfab8ba68f3668e89f6ff60f5b205cea56aa7b769451a59f34b8682f51c056d" dependencies = [ "clap_builder", "clap_derive", @@ -582,9 +584,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "fb7fb5e4e979aec3be7791562fcba452f94ad85e954da024396433e0e25a79e9" dependencies = [ "anstream", "anstyle", @@ -834,9 +836,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", "serde", @@ -1252,7 +1254,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", ] [[package]] @@ -1261,7 +1263,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.6", + "ahash 0.8.7", "allocator-api2", ] @@ -1436,9 +1438,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1513,13 +1515,13 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ "hermit-abi", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1755,9 +1757,9 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "mime" @@ -2386,9 +2388,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75cb1540fadbd5b8fbccc4dddad2734eba435053f725621c070711a14bb5f4b8" +checksum = "a293318316cf6478ec1ad2a21c49390a8d5b5eae9fab736467d93fbc0edc29c5" dependencies = [ "unicode-ident", ] @@ -2790,11 +2792,11 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2878,9 +2880,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "8bb1879ea93538b78549031e2d54da3e901fd7e75f2e4dc758d760937b123d10" dependencies = [ "serde", ] @@ -3166,15 +3168,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", "fastrand", "redox_syscall", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3194,18 +3196,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.52" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a48fd946b02c0a526b2e9481c8e2a17755e47039164a86c4070446e3a4614d" +checksum = "b2cd5904763bad08ad5513ddbb12cf2ae273ca53fa9f68e843e236ec6dfccc09" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.52" +version = "1.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7fbe9b594d6568a6a1443250a7e67d80b74e1e96f6d1715e1e21cc1888291d3" +checksum = "3dcf4a824cce0aeacd6f38ae6f24234c8e80d68632338ebaa1443b5df9e29e19" dependencies = [ "proc-macro2", "quote", @@ -3801,11 +3803,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] @@ -3942,9 +3944,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.30" +version = "0.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5" +checksum = "97a4882e6b134d6c28953a387571f1acdd3496830d5e36c5e3a1075580ea641c" dependencies = [ "memchr", ] From 46e67a86c7e2861cc924713df0e049468b434817 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Jan 2024 14:33:52 +0000 Subject: [PATCH 0663/1003] refactor: [#262] split global consts for limits Current const: `crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS` `MAX_SCRAPE_TORRENTS` is the limit only for the number of torrents in a `scrape`request. New const: `crate::core::TORRENT_PEERS_LIMIT` `TORRENT_PEERS_LIMIT` is now the limit for the number of peers in an announce request (UDP and HTTP tracker). Besides, the endpoint to get the torrent details in the API does not limit the number of peers for the torrent. So the API returns all peers in the tracker. This could lead to performance issues and we might need to paginate results, but the API should either return all peers and paginate them in a new endpoint. --- src/core/mod.rs | 21 +++++--- src/core/torrent/mod.rs | 79 +++++++++++++++++++------------ src/core/torrent/repository.rs | 10 ++-- src/servers/http/mod.rs | 4 +- src/servers/udp/handlers.rs | 12 ++--- src/shared/bit_torrent/common.rs | 1 - tests/servers/http/v1/contract.rs | 8 ++-- 7 files changed, 79 insertions(+), 56 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index caac5b1ea..01dd295a2 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -458,6 +458,9 @@ use crate::core::databases::Database; use crate::core::torrent::{SwarmMetadata, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; +/// The maximum number of returned peers for a torrent. +pub const TORRENT_PEERS_LIMIT: usize = 74; + /// The domain layer tracker service. /// /// Its main responsibility is to handle the `announce` and `scrape` requests. @@ -623,7 +626,7 @@ impl Tracker { let swarm_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - let peers = self.get_peers_for_peer(info_hash, peer).await; + let peers = self.get_torrent_peers_for_peer(info_hash, peer).await; AnnounceData { peers, @@ -692,24 +695,28 @@ impl Tracker { Ok(()) } - async fn get_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { + async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { let read_lock = self.torrents.get_torrents().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers_for_peer(peer).into_iter().copied().collect(), + Some(entry) => entry + .get_peers_for_peer(peer, TORRENT_PEERS_LIMIT) + .into_iter() + .copied() + .collect(), } } /// # Context: Tracker /// /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec { let read_lock = self.torrents.get_torrents().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_all_peers().into_iter().copied().collect(), + Some(entry) => entry.get_peers(TORRENT_PEERS_LIMIT).into_iter().copied().collect(), } } @@ -1253,7 +1260,7 @@ mod tests { tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_all_torrent_peers(&info_hash).await; + let peers = tracker.get_torrent_peers(&info_hash).await; assert_eq!(peers, vec![peer]); } @@ -1267,7 +1274,7 @@ mod tests { tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_peers_for_peer(&info_hash, &peer).await; + let peers = tracker.get_torrent_peers_for_peer(&info_hash, &peer).await; assert_eq!(peers, vec![]); } diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index a49e218a9..79828d368 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -36,7 +36,6 @@ use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use super::peer::{self, Peer}; -use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::shared::clock::{Current, TimeNow}; /// A data structure containing all the information about a torrent in the tracker. @@ -100,7 +99,7 @@ impl Entry { /// /// The number of peers that have complete downloading is synchronously updated when peers are updated. /// That's the total torrent downloads counter. - pub fn update_peer(&mut self, peer: &peer::Peer) -> bool { + pub fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { let mut did_torrent_stats_change: bool = false; match peer.event { @@ -123,26 +122,44 @@ impl Entry { did_torrent_stats_change } - /// Get all swarm peers, limiting the result to the maximum number of scrape - /// torrents. + /// Get all swarm peers. #[must_use] pub fn get_all_peers(&self) -> Vec<&peer::Peer> { - self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() + self.peers.values().collect() + } + + /// Get swarm peers, limiting the result. + #[must_use] + pub fn get_peers(&self, limit: usize) -> Vec<&peer::Peer> { + self.peers.values().take(limit).collect() + } + + /// It returns the list of peers for a given peer client. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + #[must_use] + pub fn get_all_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { + self.peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer.peer_addr != client.peer_addr) + .collect() } /// It returns the list of peers for a given peer client, limiting the - /// result to the maximum number of scrape torrents. + /// result. /// /// It filters out the input peer, typically because we want to return this /// list of peers to that client peer. #[must_use] - pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { + pub fn get_peers_for_peer(&self, client: &Peer, limit: usize) -> Vec<&peer::Peer> { self.peers .values() // Take peers which are not the client peer .filter(|peer| peer.peer_addr != client.peer_addr) // Limit the number of peers on the result - .take(MAX_SCRAPE_TORRENTS as usize) + .take(limit) .collect() } @@ -193,8 +210,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::core::peer; use crate::core::torrent::Entry; + use crate::core::{peer, TORRENT_PEERS_LIMIT}; use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; struct TorrentPeerBuilder { @@ -275,7 +292,7 @@ mod tests { let mut torrent_entry = Entry::new(); let torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); assert_eq!(torrent_entry.get_all_peers().len(), 1); @@ -286,7 +303,7 @@ mod tests { let mut torrent_entry = Entry::new(); let torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); } @@ -295,10 +312,10 @@ mod tests { fn a_peer_can_be_updated_in_a_torrent_entry() { let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); } @@ -307,10 +324,10 @@ mod tests { fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry assert_eq!(torrent_entry.get_all_peers().len(), 0); } @@ -320,10 +337,10 @@ mod tests { let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + let stats_have_changed = torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry assert!(stats_have_changed); } @@ -335,7 +352,7 @@ mod tests { let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); + let torrent_stats_have_not_changed = !torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); assert!(torrent_stats_have_not_changed); } @@ -346,10 +363,10 @@ mod tests { let mut torrent_entry = Entry::new(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.update_peer(&torrent_peer); // Add peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer // Get peers excluding the one we have just added - let peers = torrent_entry.get_peers_for_peer(&torrent_peer); + let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer); assert_eq!(peers.len(), 0); } @@ -364,16 +381,16 @@ mod tests { let torrent_peer_1 = TorrentPeerBuilder::default() .with_peer_address(SocketAddr::new(peer_ip, 8080)) .into(); - torrent_entry.update_peer(&torrent_peer_1); + torrent_entry.insert_or_update_peer(&torrent_peer_1); // Add peer 2 let torrent_peer_2 = TorrentPeerBuilder::default() .with_peer_address(SocketAddr::new(peer_ip, 8081)) .into(); - torrent_entry.update_peer(&torrent_peer_2); + torrent_entry.insert_or_update_peer(&torrent_peer_2); // Get peers for peer 1 - let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); + let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer_1); // The peer 2 using the same IP but different port should be included assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); @@ -397,10 +414,10 @@ mod tests { let torrent_peer = TorrentPeerBuilder::default() .with_peer_id(peer_id_from_i32(peer_number)) .into(); - torrent_entry.update_peer(&torrent_peer); + torrent_entry.insert_or_update_peer(&torrent_peer); } - let peers = torrent_entry.get_all_peers(); + let peers = torrent_entry.get_peers(TORRENT_PEERS_LIMIT); assert_eq!(peers.len(), 74); } @@ -410,7 +427,7 @@ mod tests { let mut torrent_entry = Entry::new(); let torrent_seeder = a_torrent_seeder(); - torrent_entry.update_peer(&torrent_seeder); // Add seeder + torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder assert_eq!(torrent_entry.get_stats().0, 1); } @@ -420,7 +437,7 @@ mod tests { let mut torrent_entry = Entry::new(); let torrent_leecher = a_torrent_leecher(); - torrent_entry.update_peer(&torrent_leecher); // Add leecher + torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher assert_eq!(torrent_entry.get_stats().2, 1); } @@ -430,11 +447,11 @@ mod tests { ) { let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer // Announce "Completed" torrent download event. torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.update_peer(&torrent_peer); // Update the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; @@ -448,7 +465,7 @@ mod tests { // Announce "Completed" torrent download event. // It's the first event announced from this peer. - torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer + torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; @@ -468,7 +485,7 @@ mod tests { let inactive_peer = TorrentPeerBuilder::default() .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) .into(); - torrent_entry.update_peer(&inactive_peer); // Add the peer + torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer torrent_entry.remove_inactive_peers(timeout); diff --git a/src/core/torrent/repository.rs b/src/core/torrent/repository.rs index 62df9b510..ac3d03054 100644 --- a/src/core/torrent/repository.rs +++ b/src/core/torrent/repository.rs @@ -69,7 +69,7 @@ impl Repository for Sync { let (stats, stats_updated) = { let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.update_peer(peer); + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); let stats = torrent_entry_lock.get_stats(); (stats, stats_updated) @@ -126,7 +126,7 @@ impl Repository for SyncSingle { std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), }; - let stats_updated = torrent_entry.update_peer(peer); + let stats_updated = torrent_entry.insert_or_update_peer(peer); let stats = torrent_entry.get_stats(); ( @@ -168,7 +168,7 @@ impl TRepositoryAsync for RepositoryAsync { let (stats, stats_updated) = { let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.update_peer(peer); + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); let stats = torrent_entry_lock.get_stats(); (stats, stats_updated) @@ -226,7 +226,7 @@ impl TRepositoryAsync for AsyncSync { let (stats, stats_updated) = { let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.update_peer(peer); + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); let stats = torrent_entry_lock.get_stats(); (stats, stats_updated) @@ -273,7 +273,7 @@ impl TRepositoryAsync for RepositoryAsyncSingle { let (stats, stats_updated) = { let mut torrents_lock = self.torrents.write().await; let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); - let stats_updated = torrent_entry.update_peer(peer); + let stats_updated = torrent_entry.insert_or_update_peer(peer); let stats = torrent_entry.get_stats(); (stats, stats_updated) diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 10666d8a5..b2d232fc6 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -71,7 +71,7 @@ //! is behind a reverse proxy. //! //! > **NOTICE**: the maximum number of peers that the tracker can return is -//! `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](crate::core::TORRENT_PEERS_LIMIT). //! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) //! for more information about this limitation. //! @@ -237,7 +237,7 @@ //! In order to scrape multiple torrents at the same time you can pass multiple //! `info_hash` parameters: `info_hash=%81%00%0...00%00%00&info_hash=%82%00%0...00%00%00` //! -//! > **NOTICE**: the maximum number of torrent you can scrape at the same time +//! > **NOTICE**: the maximum number of torrents you can scrape at the same time //! is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). //! //! **Sample response** diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 39a077466..a1461a457 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -598,7 +598,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; let expected_peer = TorrentPeerBuilder::default() .with_peer_id(peer::Id(peer_id.0)) @@ -659,7 +659,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -763,7 +763,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; let external_ip_in_tracker_configuration = tracker.config.external_ip.clone().unwrap().parse::().unwrap(); @@ -820,7 +820,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; let expected_peer = TorrentPeerBuilder::default() .with_peer_id(peer::Id(peer_id.0)) @@ -884,7 +884,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -1001,7 +1001,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; let _external_ip_in_tracker_configuration = tracker.config.external_ip.clone().unwrap().parse::().unwrap(); diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 0ce345a3e..9bf9dfd3c 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -5,7 +5,6 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::{Deserialize, Serialize}; /// The maximum number of torrents that can be returned in an `scrape` response. -/// It's also the maximum number of peers returned in an `announce` response. /// /// The [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) /// defines this limit: diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index d7f4d50cc..9a6aa2454 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -740,7 +740,7 @@ mod for_all_config_modes { client.announce(&announce_query).await; - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -776,7 +776,7 @@ mod for_all_config_modes { client.announce(&announce_query).await; - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); @@ -812,7 +812,7 @@ mod for_all_config_modes { client.announce(&announce_query).await; - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); @@ -846,7 +846,7 @@ mod for_all_config_modes { ) .await; - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); From d03e930c1a842f46473cf3fb0d59e2638a0abfef Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Jan 2024 15:55:53 +0000 Subject: [PATCH 0664/1003] feat!: [#536] remove always-empty peer list in torrent list item It's unused. --- src/servers/apis/v1/context/torrent/resources/torrent.rs | 5 ----- tests/servers/api/v1/contract/context/torrent.rs | 3 --- 2 files changed, 8 deletions(-) diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 74577a23e..fc43fbb7a 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -44,9 +44,6 @@ pub struct ListItem { /// The torrent's leechers counter. Active peers that are downloading the /// torrent. pub leechers: u64, - /// The torrent's peers. It's always `None` in the struct and `null` in the - /// JSON response. - pub peers: Option>, // todo: this is always None. Remove field from endpoint? } impl ListItem { @@ -90,7 +87,6 @@ impl From for ListItem { seeders: basic_info.seeders, completed: basic_info.completed, leechers: basic_info.leechers, - peers: None, } } } @@ -156,7 +152,6 @@ mod tests { seeders: 1, completed: 2, leechers: 3, - peers: None, } ); } diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index ab497787f..3cac55e6a 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -35,7 +35,6 @@ async fn should_allow_getting_torrents() { seeders: 1, completed: 0, leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent }], ) .await; @@ -65,7 +64,6 @@ async fn should_allow_limiting_the_torrents_in_the_result() { seeders: 1, completed: 0, leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent }], ) .await; @@ -95,7 +93,6 @@ async fn should_allow_the_torrents_result_pagination() { seeders: 1, completed: 0, leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent }], ) .await; From 02b64f2922b39cb6cc24867cf2e460fd7e2c7017 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Jan 2024 17:02:52 +0000 Subject: [PATCH 0665/1003] refactor: [#343] remove deprecated function --- src/core/mod.rs | 56 ------------------------------------- src/servers/udp/error.rs | 4 +++ src/servers/udp/handlers.rs | 48 +++++++++++++------------------ 3 files changed, 23 insertions(+), 85 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 01dd295a2..beb4b133d 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -909,62 +909,6 @@ impl Tracker { Ok(()) } - /// It authenticates and authorizes a UDP tracker request. - /// - /// # Context: Authentication and Authorization - /// - /// # Errors - /// - /// Will return a `torrent::Error::PeerKeyNotValid` if the `key` is not valid. - /// - /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. - /// - /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - #[deprecated(since = "3.0.0", note = "please use `authenticate` and `authorize` instead")] - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { - // todo: this is a deprecated method. - // We're splitting authentication and authorization responsibilities. - // Use `authenticate` and `authorize` instead. - - // Authentication - - // no authentication needed in public mode - if self.is_public() { - return Ok(()); - } - - // check if auth_key is set and valid - if self.is_private() { - match key { - Some(key) => { - if let Err(e) = self.verify_auth_key(key).await { - return Err(Error::PeerKeyNotValid { - key: key.clone(), - source: (Arc::new(e) as Arc).into(), - }); - } - } - None => { - return Err(Error::PeerNotAuthenticated { - location: Location::caller(), - }); - } - } - } - - // Authorization - - // check if info_hash is whitelisted - if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(Error::TorrentNotWhitelisted { - info_hash: *info_hash, - location: Location::caller(), - }); - } - - Ok(()) - } - /// Right now, there is only authorization when the `Tracker` runs in /// `listed` or `private_listed` modes. /// diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index ce59cd015..fb7bb93f3 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -29,4 +29,8 @@ pub enum Error { BadRequest { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + + /// Error returned when tracker requires authentication. + #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] + TrackerAuthenticationRequired { location: &'static Location<'static> }, } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index a1461a457..f3c7b58b0 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -10,7 +10,7 @@ use aquatic_udp_protocol::{ use log::{debug, info}; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; -use crate::core::{statistics, Tracker}; +use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; @@ -99,22 +99,6 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t Ok(Response::from(response)) } -/// It authenticates the request. It returns an error if the peer is not allowed -/// to make the request. -/// -/// # Errors -/// -/// Will return `Error` if unable to `authenticate_request`. -#[allow(deprecated)] -pub async fn authenticate(info_hash: &InfoHash, tracker: &Tracker) -> Result<(), Error> { - tracker - .authenticate_request(info_hash, &None) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) -} - /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. /// @@ -128,6 +112,13 @@ pub async fn handle_announce( ) -> Result { debug!("udp announce request: {:#?}", announce_request); + // Authentication + if tracker.requires_authentication() { + return Err(Error::TrackerAuthenticationRequired { + location: Location::caller(), + }); + } + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; let wrapped_announce_request = AnnounceWrapper::new(announce_request); @@ -135,7 +126,10 @@ pub async fn handle_announce( let info_hash = wrapped_announce_request.info_hash; let remote_client_ip = remote_addr.ip(); - authenticate(&info_hash, tracker).await?; + // Authorization + tracker.authorize(&info_hash).await.map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + })?; info!(target: "UDP", "\"ANNOUNCE TxID {} IH {}\"", announce_request.transaction_id.0, info_hash.to_hex_string()); @@ -222,28 +216,24 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra info_hashes.push(InfoHash(info_hash.0)); } - let scrape_data = tracker.scrape(&info_hashes).await; + let scrape_data = if tracker.requires_authentication() { + ScrapeData::zeroed(&info_hashes) + } else { + tracker.scrape(&info_hashes).await + }; let mut torrent_stats: Vec = Vec::new(); for file in &scrape_data.files { - let info_hash = file.0; let swarm_metadata = file.1; - #[allow(deprecated)] - let scrape_entry = if tracker.authenticate_request(info_hash, &None).await.is_ok() { - #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_possible_truncation)] + let scrape_entry = { TorrentScrapeStatistics { seeders: NumberOfPeers(i64::from(swarm_metadata.complete) as i32), completed: NumberOfDownloads(i64::from(swarm_metadata.downloaded) as i32), leechers: NumberOfPeers(i64::from(swarm_metadata.incomplete) as i32), } - } else { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), - } }; torrent_stats.push(scrape_entry); From fc9bd77e236cad3106c90aeed708760f667ea621 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 3 Jan 2024 19:29:24 +0100 Subject: [PATCH 0666/1003] refactor: preload info hashes in wrk benchmark --- tests/wrk_benchmark_announce.lua | 71 +++++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 19 deletions(-) diff --git a/tests/wrk_benchmark_announce.lua b/tests/wrk_benchmark_announce.lua index 620ba2680..c0bdac48d 100644 --- a/tests/wrk_benchmark_announce.lua +++ b/tests/wrk_benchmark_announce.lua @@ -1,34 +1,67 @@ +-- else the randomness would be the same every run +math.randomseed(os.time()) + +local charset = "0123456789ABCDEF" + +function hex_to_char(hex) + local n = tonumber(hex, 16) + local f = string.char(n) + return f +end + +function hex_string_to_char_string(hex) + local ret = {} + local r + for i = 0, 19 do + local x = i * 2 + r = hex:sub(x+1, x+2) + local f = hex_to_char(r) + table.insert(ret, f) + end + return table.concat(ret) +end + +function url_encode(str) + str = string.gsub (str, "([^0-9a-zA-Z !'()*._~-])", -- locale independent + function (c) return string.format ("%%%02X", string.byte(c)) end) + str = string.gsub (str, " ", "+") + return str +end + +function gen_hex_string(length) + local ret = {} + local r + for i = 1, length do + r = math.random(1, #charset) + table.insert(ret, charset:sub(r, r)) + end + return table.concat(ret) +end + +function random_info_hash() + local hexString = gen_hex_string(40) + local str = hex_string_to_char_string(hexString) + return url_encode(str) +end + function generate_unique_info_hashes(size) local result = {} - local seen = {} - - for i = 0, size - 1 do - local bytes = {} - bytes[1] = i & 0xFF - bytes[2] = (i >> 8) & 0xFF - bytes[3] = (i >> 16) & 0xFF - bytes[4] = (i >> 24) & 0xFF - - local info_hash = bytes - local key = table.concat(info_hash, ",") - - if not seen[key] then - table.insert(result, info_hash) - seen[key] = true - end + + for i = 1, size do + result[i] = random_info_hash() end return result end -info_hashes = generate_unique_info_hashes(10000000) +info_hashes = generate_unique_info_hashes(5000000) -index = 0 +index = 1 -- the request function that will run at each request request = function() path = "/announce?info_hash=" .. info_hashes[index] .. "&peer_id=-lt0D80-a%D4%10%19%99%A6yh%9A%E1%CD%96&port=54434&uploaded=885&downloaded=0&left=0&corrupt=0&key=A78381BD&numwant=200&compact=1&no_peer_id=1&supportcrypto=1&redundant=0" - index += 1 + index = index + 1 headers = {} headers["X-Forwarded-For"] = "1.1.1.1" return wrk.format("GET", path, headers) From 541a0729634d23108c5c4945eb81469f3a5911e0 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 4 Jan 2024 02:12:34 +1100 Subject: [PATCH 0667/1003] ci: add threshold to patch changes --- codecov.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/codecov.yaml b/codecov.yaml index f0878195b..aaa25bf74 100644 --- a/codecov.yaml +++ b/codecov.yaml @@ -4,3 +4,7 @@ coverage: default: target: auto threshold: 0.5% + patch: + default: + target: auto + threshold: 0.5% From 9a0919f085cde6119d1352152f0797aebec484d9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 4 Jan 2024 11:55:02 +1100 Subject: [PATCH 0668/1003] chore: update cargo deps ``` Updating crates.io index Updating async-trait v0.1.76 -> v0.1.77 Updating clang-sys v1.6.1 -> v1.7.0 Updating libloading v0.7.4 -> v0.8.1 Updating proc-macro2 v1.0.72 -> v1.0.75 Updating quote v1.0.33 -> v1.0.35 Updating semver v1.0.20 -> v1.0.21 Updating serde v1.0.193 -> v1.0.194 Updating serde_bytes v0.11.13 -> v0.11.14 Updating serde_derive v1.0.193 -> v1.0.194 Updating serde_json v1.0.108 -> v1.0.110 Updating serde_path_to_error v0.1.14 -> v0.1.15 Updating serde_repr v0.1.17 -> v0.1.18 Updating syn v2.0.43 -> v2.0.47 Updating thiserror v1.0.53 -> v1.0.56 Updating thiserror-impl v1.0.53 -> v1.0.56 Updating winnow v0.5.31 -> v0.5.32 ``` --- Cargo.lock | 120 ++++++++++++++++++++++++++--------------------------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e5be3291..9b7c10f39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -179,13 +179,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.76" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531b97fb4cd3dfdce92c35dedbfdc1f0b9d8091c8ca943d6dae340ef5012d514" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -270,7 +270,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -357,7 +357,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -413,7 +413,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", "syn_derive", ] @@ -563,9 +563,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -603,7 +603,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -820,7 +820,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -831,7 +831,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -865,7 +865,7 @@ checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -1040,7 +1040,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -1052,7 +1052,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -1064,7 +1064,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -1129,7 +1129,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -1504,7 +1504,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" dependencies = [ "derive_utils", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -1670,12 +1670,12 @@ checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1817,7 +1817,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -1868,7 +1868,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", "termcolor", "thiserror", ] @@ -2063,7 +2063,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -2186,7 +2186,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -2255,7 +2255,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -2388,9 +2388,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a293318316cf6478ec1ad2a21c49390a8d5b5eae9fab736467d93fbc0edc29c5" +checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" dependencies = [ "unicode-ident", ] @@ -2417,9 +2417,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -2855,15 +2855,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" [[package]] name = "serde" -version = "1.0.193" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "0b114498256798c94a0689e1a15fec6005dee8ac1f41de56404b67afc2a4b773" dependencies = [ "serde_derive", ] @@ -2880,29 +2880,29 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb1879ea93538b78549031e2d54da3e901fd7e75f2e4dc758d760937b123d10" +checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "6fbd975230bada99c8bb618e0c365c2eefa219158d5c6c29610fd09ff1833257" dependencies = [ "itoa", "ryu", @@ -2911,9 +2911,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" dependencies = [ "itoa", "serde", @@ -2921,13 +2921,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" +checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -2977,7 +2977,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -3095,9 +3095,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.43" +version = "2.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee659fb5f3d355364e1f3e5bc10fb82068efbf824a1e9d1c9504244a6469ad53" +checksum = "1726efe18f42ae774cc644f330953a5e7b3c3003d3edcecf18850fe9d4dd9afb" dependencies = [ "proc-macro2", "quote", @@ -3113,7 +3113,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -3196,22 +3196,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.53" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2cd5904763bad08ad5513ddbb12cf2ae273ca53fa9f68e843e236ec6dfccc09" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.53" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcf4a824cce0aeacd6f38ae6f24234c8e80d68632338ebaa1443b5df9e29e19" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -3294,7 +3294,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] @@ -3715,7 +3715,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", "wasm-bindgen-shared", ] @@ -3749,7 +3749,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3944,9 +3944,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.31" +version = "0.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a4882e6b134d6c28953a387571f1acdd3496830d5e36c5e3a1075580ea641c" +checksum = "8434aeec7b290e8da5c3f0d628cb0eac6cabcb31d14bb74f779a08109a5914d6" dependencies = [ "memchr", ] @@ -3996,7 +3996,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.43", + "syn 2.0.47", ] [[package]] From 13140f60dbd461fafb2ce234c6c968d7f23052ec Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 30 Dec 2023 18:38:25 +1100 Subject: [PATCH 0669/1003] dev: cleanup service bootstraping --- packages/configuration/src/lib.rs | 4 +- packages/located-error/src/lib.rs | 8 +- src/app.rs | 16 +- src/bootstrap/jobs/health_check_api.rs | 27 +- src/bootstrap/jobs/http_tracker.rs | 111 ++++---- src/bootstrap/jobs/mod.rs | 83 ++++++ src/bootstrap/jobs/tracker_apis.rs | 87 +++--- src/bootstrap/jobs/udp_tracker.rs | 36 ++- src/core/auth.rs | 4 +- src/core/databases/error.rs | 8 +- src/servers/apis/mod.rs | 11 +- src/servers/apis/server.rs | 265 ++++++------------ src/servers/health_check_api/server.rs | 18 +- src/servers/http/server.rs | 163 +++++++---- src/servers/http/v1/launcher.rs | 188 ------------- src/servers/http/v1/mod.rs | 1 - src/servers/signals.rs | 37 ++- src/servers/udp/handlers.rs | 3 +- src/servers/udp/server.rs | 218 +++++++------- tests/servers/api/test_environment.rs | 47 +++- .../servers/api/v1/contract/configuration.rs | 31 +- .../health_check_api/test_environment.rs | 6 +- tests/servers/http/test_environment.rs | 77 ++--- tests/servers/http/v1/contract.rs | 173 ++++++------ tests/servers/udp/test_environment.rs | 17 +- 25 files changed, 804 insertions(+), 835 deletions(-) delete mode 100644 src/servers/http/v1/launcher.rs diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 918d9f014..1c0979524 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -239,7 +239,7 @@ use config::{Config, ConfigError, File, FileFormat}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use thiserror::Error; -use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_located_error::{DynError, Located, LocatedError}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; /// Information required for loading config @@ -289,7 +289,7 @@ impl Info { fs::read_to_string(config_path) .map_err(|e| Error::UnableToLoadFromConfigFile { - source: (Arc::new(e) as Arc).into(), + source: (Arc::new(e) as DynError).into(), })? .parse() .map_err(|_e: std::convert::Infallible| Error::Infallible)? diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs index bf8618686..49e135600 100644 --- a/packages/located-error/src/lib.rs +++ b/packages/located-error/src/lib.rs @@ -33,6 +33,10 @@ use std::error::Error; use std::panic::Location; use std::sync::Arc; +use log::debug; + +pub type DynError = Arc; + /// A generic wrapper around an error. /// /// Where `E` is the inner error (source error). @@ -90,13 +94,13 @@ where source: Arc::new(self.0), location: Box::new(*std::panic::Location::caller()), }; - log::debug!("{e}"); + debug!("{e}"); e } } #[allow(clippy::from_over_into)] -impl<'a> Into> for Arc { +impl<'a> Into> for DynError { #[track_caller] fn into(self) -> LocatedError<'a, dyn std::error::Error + Send + Sync> { LocatedError { diff --git a/src/app.rs b/src/app.rs index 32c12d74a..3608aa22e 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,8 +28,7 @@ use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; -use crate::core; -use crate::servers::http::Version; +use crate::{core, servers}; /// # Panics /// @@ -68,21 +67,22 @@ pub async fn start(config: Arc, tracker: Arc) -> V udp_tracker_config.bind_address, config.mode ); } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone()).await); } } // Start the HTTP blocks for http_tracker_config in &config.http_trackers { - if !http_tracker_config.enabled { - continue; - } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); + if let Some(job) = http_tracker::start_job(http_tracker_config, tracker.clone(), servers::http::Version::V1).await { + jobs.push(job); + }; } // Start HTTP API if config.http_api.enabled { - jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); + if let Some(job) = tracker_apis::start_job(&config.http_api, tracker.clone(), servers::apis::Version::V1).await { + jobs.push(job); + }; } // Start runners to remove torrents without peers, every interval diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 96a703afc..83eb77f6b 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -6,16 +6,13 @@ //! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) //! function spawns a new asynchronous task, that tasks is the "**launcher**". //! The "**launcher**" starts the actual server and sends a message back -//! to the main application. The main application waits until receives -//! the message [`ApiServerJobStarted`] -//! from the "**launcher**". +//! to the main application. //! //! The "**launcher**" is an intermediary thread that decouples the Health Check //! API server from the process that handles it. //! //! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) //! for the API configuration options. -use std::net::SocketAddr; use std::sync::Arc; use log::info; @@ -23,19 +20,9 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; +use super::Started; use crate::servers::health_check_api::server; -/// This is the message that the "launcher" spawned task sends to the main -/// application process to notify the API server was successfully started. -/// -/// > **NOTICE**: it does not mean the API server is ready to receive requests. -/// It only means the new server started. It might take some time to the server -/// to be ready to accept request. -#[derive(Debug)] -pub struct ApiServerJobStarted { - pub bound_addr: SocketAddr, -} - /// This function starts a new Health Check API server with the provided /// configuration. /// @@ -51,15 +38,15 @@ pub async fn start_job(config: Arc) -> JoinHandle<()> { .health_check_api .bind_address .parse::() - .expect("Health Check API bind_address invalid."); + .expect("it should have a valid health check bind address"); - let (tx, rx) = oneshot::channel::(); + let (tx_start, rx_start) = oneshot::channel::(); // Run the API server let join_handle = tokio::spawn(async move { info!("Starting Health Check API server: http://{}", bind_addr); - let handle = server::start(bind_addr, tx, config.clone()); + let handle = server::start(bind_addr, tx_start, config.clone()); if let Ok(()) = handle.await { info!("Health Check API server on http://{} stopped", bind_addr); @@ -67,8 +54,8 @@ pub async fn start_job(config: Arc) -> JoinHandle<()> { }); // Wait until the API server job is running - match rx.await { - Ok(_msg) => info!("Torrust Health Check API server started"), + match rx_start.await { + Ok(msg) => info!("Torrust Health Check API server started on socket: {}", msg.address), Err(e) => panic!("the Health Check API server was dropped: {e}"), } diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index ecf6bd8ac..79e01fb3d 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -7,88 +7,85 @@ //! //! The [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) function spawns a new asynchronous task, //! that tasks is the "**launcher**". The "**launcher**" starts the actual server and sends a message back to the main application. -//! The main application waits until receives the message [`ServerJobStarted`] from the "**launcher**". //! //! The "**launcher**" is an intermediary thread that decouples the HTTP servers from the process that handles it. The HTTP could be used independently in the future. //! In that case it would not need to notify a parent process. +use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use log::info; -use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; +use super::make_rust_tls; use crate::core; -use crate::servers::http::v1::launcher; +use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; -/// This is the message that the "**launcher**" spawned task sends to the main application process to notify that the HTTP server was successfully started. -/// -/// > **NOTICE**: it does not mean the HTTP server is ready to receive requests. It only means the new server started. It might take some time to the server to be ready to accept request. -#[derive(Debug)] -pub struct ServerJobStarted(); - /// It starts a new HTTP server with the provided configuration and version. /// /// Right now there is only one version but in the future we could support more than one HTTP tracker version at the same time. /// This feature allows supporting breaking changes on `BitTorrent` BEPs. -pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { - match version { - Version::V1 => start_v1(config, tracker.clone()).await, - } -} - +/// /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -async fn start_v1(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - let (tx, rx) = oneshot::channel::(); - - // Run the API server - let join_handle = tokio::spawn(async move { - if !ssl_enabled { - info!("Starting Torrust HTTP tracker server on: http://{}", bind_addr); - - let handle = launcher::start(bind_addr, tracker); - - tx.send(ServerJobStarted()) - .expect("the HTTP tracker server should not be dropped"); +/// +pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> Option> { + if config.enabled { + let socket = config + .bind_address + .parse::() + .expect("it should have a valid http tracker bind address"); + + let tls = make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path) + .await + .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); + + match version { + Version::V1 => Some(start_v1(socket, tls, tracker.clone()).await), + } + } else { + info!("Note: Not loading Http Tracker Service, Not Enabled in Configuration."); + None + } +} - if let Ok(()) = handle.await { - info!("Torrust HTTP tracker server on http://{} stopped", bind_addr); - } - } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting Torrust HTTP tracker server on: https://{}", bind_addr); +async fn start_v1(socket: SocketAddr, tls: Option, tracker: Arc) -> JoinHandle<()> { + let server = HttpServer::new(Launcher::new(socket, tls)) + .start(tracker) + .await + .expect("it should be able to start to the http tracker"); + + tokio::spawn(async move { + server + .state + .task + .await + .expect("it should be able to join to the http tracker task"); + }) +} - let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) - .await - .unwrap(); +#[cfg(test)] +mod tests { + use std::sync::Arc; - let handle = launcher::start_tls(bind_addr, ssl_config, tracker); + use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; - tx.send(ServerJobStarted()) - .expect("the HTTP tracker server should not be dropped"); + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::http_tracker::start_job; + use crate::servers::http::Version; - if let Ok(()) = handle.await { - info!("Torrust HTTP tracker server on https://{} stopped", bind_addr); - } - } - }); + #[tokio::test] + async fn it_should_start_http_tracker() { + let cfg = Arc::new(ephemeral_mode_public()); + let config = &cfg.http_trackers[0]; + let tracker = initialize_with_configuration(&cfg); + let version = Version::V1; - // Wait until the HTTP tracker server job is running - match rx.await { - Ok(_msg) => info!("Torrust HTTP tracker server started"), - Err(e) => panic!("the HTTP tracker server was dropped: {e}"), + start_job(config, tracker, version) + .await + .expect("it should be able to join to the http tracker start-job"); } - - join_handle } diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 8c85ba45b..3a9936882 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -11,3 +11,86 @@ pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_apis; pub mod udp_tracker; + +/// This is the message that the "launcher" spawned task sends to the main +/// application process to notify the service was successfully started. +/// +#[derive(Debug)] +pub struct Started { + pub address: std::net::SocketAddr, +} + +pub async fn make_rust_tls(enabled: bool, cert: &Option, key: &Option) -> Option> { + if !enabled { + info!("tls not enabled"); + return None; + } + + if let (Some(cert), Some(key)) = (cert, key) { + info!("Using https: cert path: {cert}."); + info!("Using https: key path: {cert}."); + + Some( + RustlsConfig::from_pem_file(cert, key) + .await + .map_err(|err| Error::BadTlsConfig { + source: (Arc::new(err) as DynError).into(), + }), + ) + } else { + Some(Err(Error::MissingTlsConfig { + location: Location::caller(), + })) + } +} + +#[cfg(test)] +mod tests { + + use super::make_rust_tls; + + #[tokio::test] + async fn it_should_error_on_bad_tls_config() { + let (bad_cert_path, bad_key_path) = (Some("bad cert path".to_string()), Some("bad key path".to_string())); + let err = make_rust_tls(true, &bad_cert_path, &bad_key_path) + .await + .expect("tls_was_enabled") + .expect_err("bad_cert_and_key_files"); + + assert!(err + .to_string() + .contains("bad tls config: No such file or directory (os error 2)")); + } + + #[tokio::test] + async fn it_should_error_on_missing_tls_config() { + let err = make_rust_tls(true, &None, &None) + .await + .expect("tls_was_enabled") + .expect_err("missing_config"); + + assert_eq!(err.to_string(), "tls config missing"); + } +} + +use std::panic::Location; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use log::info; +use thiserror::Error; +use torrust_tracker_located_error::{DynError, LocatedError}; + +/// Error returned by the Bootstrap Process. +#[derive(Error, Debug)] +pub enum Error { + /// Enabled tls but missing config. + #[error("tls config missing")] + MissingTlsConfig { location: &'static Location<'static> }, + + /// Unable to parse tls Config. + #[error("bad tls config: {source}")] + BadTlsConfig { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, +} diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index ca29d2b5f..f454b017f 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -20,16 +20,18 @@ //! //! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) //! for the API configuration options. +use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use log::info; -use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpApi; +use super::make_rust_tls; use crate::core; -use crate::servers::apis::server; +use crate::servers::apis::server::{ApiServer, Launcher}; +use crate::servers::apis::Version; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the API server was successfully started. @@ -49,51 +51,58 @@ pub struct ApiServerJobStarted(); /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - let (tx, rx) = oneshot::channel::(); +/// +/// +pub async fn start_job(config: &HttpApi, tracker: Arc, version: Version) -> Option> { + if config.enabled { + let bind_to = config + .bind_address + .parse::() + .expect("it should have a valid tracker api bind address"); - // Run the API server - let join_handle = tokio::spawn(async move { - if !ssl_enabled { - info!("Starting Torrust APIs server on: http://{}", bind_addr); + let tls = make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path) + .await + .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); - let handle = server::start(bind_addr, tracker); + match version { + Version::V1 => Some(start_v1(bind_to, tls, tracker.clone()).await), + } + } else { + info!("Note: Not loading Http Tracker Service, Not Enabled in Configuration."); + None + } +} - tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); +async fn start_v1(socket: SocketAddr, tls: Option, tracker: Arc) -> JoinHandle<()> { + let server = ApiServer::new(Launcher::new(socket, tls)) + .start(tracker) + .await + .expect("it should be able to start to the tracker api"); - if let Ok(()) = handle.await { - info!("Torrust APIs server on http://{} stopped", bind_addr); - } - } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting Torrust APIs server on: https://{}", bind_addr); + tokio::spawn(async move { + server.state.task.await.expect("failed to close service"); + }) +} - let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) - .await - .unwrap(); +#[cfg(test)] +mod tests { + use std::sync::Arc; - let handle = server::start_tls(bind_addr, ssl_config, tracker); + use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; - tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::tracker_apis::start_job; + use crate::servers::apis::Version; - if let Ok(()) = handle.await { - info!("Torrust APIs server on https://{} stopped", bind_addr); - } - } - }); + #[tokio::test] + async fn it_should_start_http_tracker() { + let cfg = Arc::new(ephemeral_mode_public()); + let config = &cfg.http_api; + let tracker = initialize_with_configuration(&cfg); + let version = Version::V1; - // Wait until the APIs server job is running - match rx.await { - Ok(_msg) => info!("Torrust APIs server started"), - Err(e) => panic!("the API server was dropped: {e}"), + start_job(config, tracker, version) + .await + .expect("it should be able to join to the tracker api start-job"); } - - join_handle } diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 9a30c9126..5911bdf95 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -8,30 +8,38 @@ //! for the configuration options. use std::sync::Arc; -use log::{error, info, warn}; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; use crate::core; -use crate::servers::udp::server::Udp; +use crate::servers::udp::server::{Launcher, UdpServer}; /// It starts a new UDP server with the provided configuration. /// /// It spawns a new asynchronous task for the new UDP server. +/// +/// # Panics +/// +/// It will panic if the API binding address is not a valid socket. +/// It will panic if it is unable to start the UDP service. +/// It will panic if the task did not finish successfully. #[must_use] -pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config.bind_address.clone(); +pub async fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { + let bind_to = config + .bind_address + .parse::() + .expect("it should have a valid udp tracker bind address"); + + let server = UdpServer::new(Launcher::new(bind_to)) + .start(tracker) + .await + .expect("it should be able to start the udp tracker"); tokio::spawn(async move { - match Udp::new(&bind_addr).await { - Ok(udp_server) => { - info!("Starting UDP server on: udp://{}", bind_addr); - udp_server.start(tracker).await; - } - Err(e) => { - warn!("Could not start UDP tracker on: udp://{}", bind_addr); - error!("{}", e); - } - } + server + .state + .task + .await + .expect("it should be able to join to the udp tracker task"); }) } diff --git a/src/core/auth.rs b/src/core/auth.rs index c6b772485..9fc9d6e7b 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -47,7 +47,7 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; -use torrust_tracker_located_error::LocatedError; +use torrust_tracker_located_error::{DynError, LocatedError}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; @@ -185,7 +185,7 @@ pub enum Error { impl From for Error { fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { Error::KeyVerificationError { - source: (Arc::new(e) as Arc).into(), + source: (Arc::new(e) as DynError).into(), } } } diff --git a/src/core/databases/error.rs b/src/core/databases/error.rs index 96b0d835e..a5179e3a4 100644 --- a/src/core/databases/error.rs +++ b/src/core/databases/error.rs @@ -5,7 +5,7 @@ use std::panic::Location; use std::sync::Arc; use r2d2_mysql::mysql::UrlError; -use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_located_error::{DynError, Located, LocatedError}; use torrust_tracker_primitives::DatabaseDriver; #[derive(thiserror::Error, Debug, Clone)] @@ -59,11 +59,11 @@ impl From for Error { fn from(err: r2d2_sqlite::rusqlite::Error) -> Self { match err { r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { - source: (Arc::new(err) as Arc).into(), + source: (Arc::new(err) as DynError).into(), driver: DatabaseDriver::Sqlite3, }, _ => Error::InvalidQuery { - source: (Arc::new(err) as Arc).into(), + source: (Arc::new(err) as DynError).into(), driver: DatabaseDriver::Sqlite3, }, } @@ -73,7 +73,7 @@ impl From for Error { impl From for Error { #[track_caller] fn from(err: r2d2_mysql::mysql::Error) -> Self { - let e: Arc = Arc::new(err); + let e: DynError = Arc::new(err); Error::InvalidQuery { source: e.into(), driver: DatabaseDriver::MySQL, diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index 5f8c581d0..2d4b3abe1 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -159,8 +159,6 @@ pub mod routes; pub mod server; pub mod v1; -use serde::Deserialize; - /// The info hash URL path parameter. /// /// Some API endpoints require an info hash as a path parameter. @@ -172,3 +170,12 @@ use serde::Deserialize; /// in order to provide a more specific error message. #[derive(Deserialize)] pub struct InfoHashParam(pub String); + +use serde::{Deserialize, Serialize}; + +/// The version of the HTTP Api. +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum Version { + /// The `v1` version of the HTTP Api. + V1, +} diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index c42083f9f..a25e62c8f 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -24,18 +24,18 @@ /// for example, to restart it to apply new configuration changes, to remotely /// shutdown the server, etc. use std::net::SocketAddr; -use std::str::FromStr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; +use derive_more::Constructor; use futures::future::BoxFuture; -use futures::Future; -use log::info; +use tokio::sync::oneshot::{Receiver, Sender}; use super::routes::router; +use crate::bootstrap::jobs::Started; use crate::core::Tracker; -use crate::servers::signals::shutdown_signal; +use crate::servers::signals::{graceful_shutdown, Halted}; /// Errors that can occur when starting or stopping the API server. #[derive(Debug)] @@ -58,24 +58,27 @@ pub type RunningApiServer = ApiServer; /// states: `Stopped` or `Running`. #[allow(clippy::module_name_repetitions)] pub struct ApiServer { - pub cfg: torrust_tracker_configuration::HttpApi, pub state: S, } /// The `Stopped` state of the `ApiServer` struct. -pub struct Stopped; +pub struct Stopped { + launcher: Launcher, +} /// The `Running` state of the `ApiServer` struct. pub struct Running { - pub bind_addr: SocketAddr, - task_killer: tokio::sync::oneshot::Sender, - task: tokio::task::JoinHandle<()>, + pub binding: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: tokio::task::JoinHandle, } impl ApiServer { #[must_use] - pub fn new(cfg: torrust_tracker_configuration::HttpApi) -> Self { - Self { cfg, state: Stopped {} } + pub fn new(launcher: Launcher) -> Self { + Self { + state: Stopped { launcher }, + } } /// Starts the API server with the given configuration. @@ -88,28 +91,20 @@ impl ApiServer { /// /// It would panic if the bound socket address cannot be sent back to this starter. pub async fn start(self, tracker: Arc) -> Result, Error> { - let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); - let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); - let configuration = self.cfg.clone(); + let launcher = self.state.launcher; let task = tokio::spawn(async move { - let (bind_addr, server) = Launcher::start(&configuration, tracker, shutdown_signal(shutdown_receiver)); - - addr_sender.send(bind_addr).expect("Could not return SocketAddr."); - - server.await; + launcher.start(tracker, tx_start, rx_halt).await; + launcher }); - let bind_address = addr_receiver - .await - .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; - Ok(ApiServer { - cfg: self.cfg, state: Running { - bind_addr: bind_address, - task_killer: shutdown_sender, + binding: rx_start.await.expect("unable to start service").address, + halt_task: tx_halt, task, }, }) @@ -124,21 +119,24 @@ impl ApiServer { /// It would return an error if the channel for the task killer signal was closed. pub async fn stop(self) -> Result, Error> { self.state - .task_killer - .send(0) + .halt_task + .send(Halted::Normal) .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; - drop(self.state.task.await); + let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; Ok(ApiServer { - cfg: self.cfg, - state: Stopped {}, + state: Stopped { launcher }, }) } } /// A struct responsible for starting the API server. -struct Launcher; +#[derive(Constructor, Debug)] +pub struct Launcher { + bind_to: SocketAddr, + tls: Option, +} impl Launcher { /// Starts the API server with graceful shutdown. @@ -146,175 +144,78 @@ impl Launcher { /// If TLS is enabled in the configuration, it will start the server with /// TLS. See [`torrust-tracker-configuration`](torrust_tracker_configuration) /// for more information about configuration. - pub fn start( - cfg: &torrust_tracker_configuration::HttpApi, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); - let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); - let bind_addr = tcp_listener - .local_addr() - .expect("Could not get local_addr from tcp_listener."); - - if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (&cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { - let server = Self::start_tls_with_graceful_shutdown( - tcp_listener, - (ssl_cert_path.to_string(), ssl_key_path.to_string()), - tracker, - shutdown_signal, - ); - - (bind_addr, server) - } else { - let server = Self::start_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); - - (bind_addr, server) - } - } - - /// Starts the API server with graceful shutdown. - pub fn start_with_graceful_shutdown( - tcp_listener: std::net::TcpListener, - tracker: Arc, - shutdown_signal: F, - ) -> BoxFuture<'static, ()> - where - F: Future + Send + 'static, - { - let app = router(tracker); - - let handle = Handle::new(); - - let cloned_handle = handle.clone(); - - tokio::task::spawn(async move { - shutdown_signal.await; - cloned_handle.shutdown(); - }); - - Box::pin(async { - axum_server::from_tcp(tcp_listener) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) - .await - .expect("Axum server crashed."); - }) - } - - /// Starts the API server with graceful shutdown and TLS. - pub fn start_tls_with_graceful_shutdown( - tcp_listener: std::net::TcpListener, - (ssl_cert_path, ssl_key_path): (String, String), - tracker: Arc, - shutdown_signal: F, - ) -> BoxFuture<'static, ()> - where - F: Future + Send + 'static, - { - let app = router(tracker); + /// + /// # Panics + /// + /// Will panic if unable to bind to the socket, or unable to get the address of the bound socket. + /// Will also panic if unable to send message regarding the bound socket address. + pub fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> BoxFuture<'static, ()> { + let router = router(tracker); + let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let handle = Handle::new(); - let cloned_handle = handle.clone(); - - tokio::task::spawn(async move { - shutdown_signal.await; - cloned_handle.shutdown(); + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("shutting down http server on socket address: {address}"), + )); + + let tls = self.tls.clone(); + + let running = Box::pin(async { + match tls { + Some(tls) => axum_server::from_tcp_rustls(socket, tls) + .handle(handle) + .serve(router.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."), + None => axum_server::from_tcp(socket) + .handle(handle) + .serve(router.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."), + } }); - Box::pin(async { - let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) - .await - .expect("Could not read tls cert."); + tx_start + .send(Started { address }) + .expect("the HTTP(s) Tracker service should not be dropped"); - axum_server::from_tcp_rustls(tcp_listener, tls_config) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) - .await - .expect("Axum server crashed."); - }) + running } } -/// Starts the API server with graceful shutdown on the current thread. -/// -/// # Panics -/// -/// It would panic if it fails to listen to shutdown signal. -pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { - let app = router(tracker); - - let handle = Handle::new(); - let shutdown_handle = handle.clone(); - - tokio::spawn(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust APIs server on https://{} ...", socket_addr); - shutdown_handle.shutdown(); - }); - - axum_server::bind(socket_addr).handle(handle).serve(app.into_make_service()) -} - -/// Starts the API server with graceful shutdown and TLS on the current thread. -/// -/// # Panics -/// -/// It would panic if it fails to listen to shutdown signal. -pub fn start_tls( - socket_addr: SocketAddr, - ssl_config: RustlsConfig, - tracker: Arc, -) -> impl Future> { - let app = router(tracker); - - let handle = Handle::new(); - let shutdown_handle = handle.clone(); - - tokio::spawn(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust APIs server on https://{} ...", socket_addr); - shutdown_handle.shutdown(); - }); - - axum_server::bind_rustls(socket_addr, ssl_config) - .handle(handle) - .serve(app.into_make_service()) -} - #[cfg(test)] mod tests { use std::sync::Arc; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; - use crate::core; - use crate::core::statistics; - use crate::servers::apis::server::ApiServer; - - fn tracker_configuration() -> Arc { - Arc::new(configuration::ephemeral()) - } + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::make_rust_tls; + use crate::servers::apis::server::{ApiServer, Launcher}; #[tokio::test] - async fn it_should_be_able_to_start_from_stopped_state_and_then_stop_again() { - let cfg = tracker_configuration(); - - let tracker = Arc::new(core::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_mode_public()); + let tracker = initialize_with_configuration(&cfg); + let config = &cfg.http_api; - let stopped_api_server = ApiServer::new(cfg.http_api.clone()); + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); - let running_api_server_result = stopped_api_server.start(tracker).await; - - assert!(running_api_server_result.is_ok()); + let tls = make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path) + .await + .map(|tls| tls.expect("tls config failed")); - let running_api_server = running_api_server_result.unwrap(); + let stopped = ApiServer::new(Launcher::new(bind_to, tls)); + let started = stopped.start(tracker).await.expect("it should start the server"); + let stopped = started.stop().await.expect("it should stop the server"); - assert!(running_api_server.stop().await.is_ok()); + assert_eq!(stopped.state.launcher.bind_to, bind_to); } } diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index d4654d617..fb807d09c 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -14,7 +14,7 @@ use serde_json::json; use tokio::sync::oneshot::Sender; use torrust_tracker_configuration::Configuration; -use crate::bootstrap::jobs::health_check_api::ApiServerJobStarted; +use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; /// Starts Health Check API server. @@ -23,8 +23,8 @@ use crate::servers::health_check_api::handlers::health_check_handler; /// /// Will panic if binding to the socket address fails. pub fn start( - socket_addr: SocketAddr, - tx: Sender, + address: SocketAddr, + tx: Sender, config: Arc, ) -> impl Future> { let app = Router::new() @@ -35,22 +35,20 @@ pub fn start( let handle = Handle::new(); let cloned_handle = handle.clone(); - let tcp_listener = std::net::TcpListener::bind(socket_addr).expect("Could not bind tcp_listener to address."); - let bound_addr = tcp_listener - .local_addr() - .expect("Could not get local_addr from tcp_listener."); + let socket = std::net::TcpListener::bind(address).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); tokio::task::spawn(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust Health Check API server o http://{} ...", bound_addr); + info!("Stopping Torrust Health Check API server o http://{} ...", address); cloned_handle.shutdown(); }); - let running = axum_server::from_tcp(tcp_listener) + let running = axum_server::from_tcp(socket) .handle(handle) .serve(app.into_make_service_with_connect_info::()); - tx.send(ApiServerJobStarted { bound_addr }) + tx.send(Started { address }) .expect("the Health Check API server should not be dropped"); running diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 2d8fc745f..aee2d0ac0 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -1,30 +1,17 @@ //! Module to handle the HTTP server instances. -use std::future::Future; use std::net::SocketAddr; use std::sync::Arc; +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; +use derive_more::Constructor; use futures::future::BoxFuture; +use tokio::sync::oneshot::{Receiver, Sender}; +use super::v1::routes::router; +use crate::bootstrap::jobs::Started; use crate::core::Tracker; -use crate::servers::signals::shutdown_signal; - -/// Trait to be implemented by a HTTP server launcher for the tracker. -/// -/// A launcher is responsible for starting the server and returning the -/// `SocketAddr` it is bound to. -#[allow(clippy::module_name_repetitions)] -pub trait HttpServerLauncher: Sync + Send { - fn new() -> Self; - - fn start_with_graceful_shutdown( - &self, - cfg: torrust_tracker_configuration::HttpTracker, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static; -} +use crate::servers::signals::{graceful_shutdown, Halted}; /// Error that can occur when starting or stopping the HTTP server. /// @@ -40,17 +27,61 @@ pub trait HttpServerLauncher: Sync + Send { /// completion. #[derive(Debug)] pub enum Error { - /// Any kind of error starting or stopping the server. - Error(String), // todo: refactor to use thiserror and add more variants for specific errors. + Error(String), +} + +#[derive(Constructor, Debug)] +pub struct Launcher { + pub bind_to: SocketAddr, + pub tls: Option, +} + +impl Launcher { + fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> BoxFuture<'static, ()> { + let app = router(tracker); + let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + + let handle = Handle::new(); + + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("shutting down http server on socket address: {address}"), + )); + + let tls = self.tls.clone(); + + let running = Box::pin(async { + match tls { + Some(tls) => axum_server::from_tcp_rustls(socket, tls) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."), + None => axum_server::from_tcp(socket) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."), + } + }); + + tx_start + .send(Started { address }) + .expect("the HTTP(s) Tracker service should not be dropped"); + + running + } } /// A HTTP server instance controller with no HTTP instance running. #[allow(clippy::module_name_repetitions)] -pub type StoppedHttpServer = HttpServer>; +pub type StoppedHttpServer = HttpServer; /// A HTTP server instance controller with a running HTTP instance. #[allow(clippy::module_name_repetitions)] -pub type RunningHttpServer = HttpServer>; +pub type RunningHttpServer = HttpServer; /// A HTTP server instance controller. /// @@ -69,31 +100,28 @@ pub type RunningHttpServer = HttpServer>; /// intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] pub struct HttpServer { - /// The configuration of the server that will be used every time the server - /// is started. - pub cfg: torrust_tracker_configuration::HttpTracker, /// The state of the server: `running` or `stopped`. pub state: S, } /// A stopped HTTP server state. -pub struct Stopped { - launcher: I, +pub struct Stopped { + launcher: Launcher, } /// A running HTTP server state. -pub struct Running { +pub struct Running { /// The address where the server is bound. - pub bind_addr: SocketAddr, - task_killer: tokio::sync::oneshot::Sender, - task: tokio::task::JoinHandle, + pub binding: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: tokio::task::JoinHandle, } -impl HttpServer> { +impl HttpServer { /// It creates a new `HttpServer` controller in `stopped` state. - pub fn new(cfg: torrust_tracker_configuration::HttpTracker, launcher: I) -> Self { + #[must_use] + pub fn new(launcher: Launcher) -> Self { Self { - cfg, state: Stopped { launcher }, } } @@ -109,57 +137,80 @@ impl HttpServer> { /// /// It would panic spawned HTTP server launcher cannot send the bound `SocketAddr` /// back to the main thread. - pub async fn start(self, tracker: Arc) -> Result>, Error> { - let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); - let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + pub async fn start(self, tracker: Arc) -> Result, Error> { + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); - let configuration = self.cfg.clone(); let launcher = self.state.launcher; let task = tokio::spawn(async move { - let (bind_addr, server) = - launcher.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); - - addr_sender.send(bind_addr).expect("Could not return SocketAddr."); + let server = launcher.start(tracker, tx_start, rx_halt); server.await; launcher }); - let bind_address = addr_receiver - .await - .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; - Ok(HttpServer { - cfg: self.cfg, state: Running { - bind_addr: bind_address, - task_killer: shutdown_sender, + binding: rx_start.await.expect("unable to start service").address, + halt_task: tx_halt, task, }, }) } } -impl HttpServer> { +impl HttpServer { /// It stops the server and returns a `HttpServer` controller in `stopped` /// state. /// /// # Errors /// /// It would return an error if the channel for the task killer signal was closed. - pub async fn stop(self) -> Result>, Error> { + pub async fn stop(self) -> Result, Error> { self.state - .task_killer - .send(0) + .halt_task + .send(Halted::Normal) .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; Ok(HttpServer { - cfg: self.cfg, state: Stopped { launcher }, }) } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; + + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::make_rust_tls; + use crate::servers::http::server::{HttpServer, Launcher}; + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_mode_public()); + let tracker = initialize_with_configuration(&cfg); + let config = &cfg.http_trackers[0]; + + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + let tls = make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path) + .await + .map(|tls| tls.expect("tls config failed")); + + let stopped = HttpServer::new(Launcher::new(bind_to, tls)); + let started = stopped.start(tracker).await.expect("it should start the server"); + let stopped = started.stop().await.expect("it should stop the server"); + + assert_eq!(stopped.state.launcher.bind_to, bind_to); + } +} diff --git a/src/servers/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs deleted file mode 100644 index 6b89e8ce7..000000000 --- a/src/servers/http/v1/launcher.rs +++ /dev/null @@ -1,188 +0,0 @@ -//! Logic to start new HTTP server instances. -use std::future::Future; -use std::net::SocketAddr; -use std::str::FromStr; -use std::sync::Arc; - -use async_trait::async_trait; -use axum_server::tls_rustls::RustlsConfig; -use axum_server::Handle; -use futures::future::BoxFuture; -use log::info; - -use super::routes::router; -use crate::core::Tracker; -use crate::servers::http::server::HttpServerLauncher; - -#[derive(Debug)] -pub enum Error { - Error(String), -} - -pub struct Launcher; - -impl Launcher { - /// It starts a new HTTP server instance from a TCP listener with graceful shutdown. - /// - /// # Panics - /// - /// Will panic if: - /// - /// - The TCP listener could not be bound. - /// - The Axum server crashes. - pub fn start_from_tcp_listener_with_graceful_shutdown( - tcp_listener: std::net::TcpListener, - tracker: Arc, - shutdown_signal: F, - ) -> BoxFuture<'static, ()> - where - F: Future + Send + 'static, - { - let app = router(tracker); - - let handle = Handle::new(); - - let cloned_handle = handle.clone(); - - tokio::task::spawn(async move { - shutdown_signal.await; - cloned_handle.shutdown(); - }); - - Box::pin(async { - axum_server::from_tcp(tcp_listener) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) - .await - .expect("Axum server crashed."); - }) - } - - /// It starts a new HTTPS server instance from a TCP listener with graceful shutdown. - /// - /// # Panics - /// - /// Will panic if: - /// - /// - The SSL certificate could not be read from the provided path or is invalid. - /// - The Axum server crashes. - pub fn start_tls_from_tcp_listener_with_graceful_shutdown( - tcp_listener: std::net::TcpListener, - (ssl_cert_path, ssl_key_path): (String, String), - tracker: Arc, - shutdown_signal: F, - ) -> BoxFuture<'static, ()> - where - F: Future + Send + 'static, - { - let app = router(tracker); - - let handle = Handle::new(); - - let cloned_handle = handle.clone(); - - tokio::task::spawn(async move { - shutdown_signal.await; - cloned_handle.shutdown(); - }); - - Box::pin(async { - let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) - .await - .expect("Could not read tls cert."); - - axum_server::from_tcp_rustls(tcp_listener, tls_config) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) - .await - .expect("Axum server crashed."); - }) - } -} - -#[async_trait] -impl HttpServerLauncher for Launcher { - fn new() -> Self { - Self {} - } - - fn start_with_graceful_shutdown( - &self, - cfg: torrust_tracker_configuration::HttpTracker, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); - let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); - let bind_addr = tcp_listener - .local_addr() - .expect("Could not get local_addr from tcp_listener."); - - if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { - let server = Self::start_tls_from_tcp_listener_with_graceful_shutdown( - tcp_listener, - (ssl_cert_path.to_string(), ssl_key_path.to_string()), - tracker, - shutdown_signal, - ); - - (bind_addr, server) - } else { - let server = Self::start_from_tcp_listener_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); - - (bind_addr, server) - } - } -} - -/// Starts a new HTTP server instance. -/// -/// # Panics -/// -/// Panics if the server could not listen to shutdown (ctrl+c) signal. -pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl Future> { - let app = router(tracker); - - let handle = Handle::new(); - - let cloned_handle = handle.clone(); - - tokio::task::spawn(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust Health Check API server o http://{} ...", socket_addr); - cloned_handle.shutdown(); - }); - - axum_server::bind(socket_addr) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) -} - -/// Starts a new HTTPS server instance. -/// -/// # Panics -/// -/// Panics if the server could not listen to shutdown (ctrl+c) signal. -pub fn start_tls( - socket_addr: std::net::SocketAddr, - ssl_config: RustlsConfig, - tracker: Arc, -) -> impl Future> { - let app = router(tracker); - - let handle = Handle::new(); - let shutdown_handle = handle.clone(); - - tokio::spawn(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust HTTP tracker server on https://{} ...", socket_addr); - shutdown_handle.shutdown(); - }); - - axum_server::bind_rustls(socket_addr, ssl_config) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) -} diff --git a/src/servers/http/v1/mod.rs b/src/servers/http/v1/mod.rs index 464a7ee14..9d2745692 100644 --- a/src/servers/http/v1/mod.rs +++ b/src/servers/http/v1/mod.rs @@ -4,7 +4,6 @@ //! more information about the endpoints and their usage. pub mod extractors; pub mod handlers; -pub mod launcher; pub mod query; pub mod requests; pub mod responses; diff --git a/src/servers/signals.rs b/src/servers/signals.rs index 51f53738d..cb0675d65 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -1,5 +1,17 @@ //! This module contains functions to handle signals. +use std::time::Duration; + +use derive_more::Display; use log::info; +use tokio::time::sleep; + +/// This is the message that the "launcher" spawned task receives from the main +/// application process to notify the service to shutdown. +/// +#[derive(Copy, Clone, Debug, Display)] +pub enum Halted { + Normal, +} /// Resolves on `ctrl_c` or the `terminate` signal. /// @@ -33,18 +45,33 @@ pub async fn global_shutdown_signal() { /// # Panics /// /// Will panic if the `stop_receiver` resolves with an error. -pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { - let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; +pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { + let halt = async { rx_halt.await.expect("Failed to install stop signal.") }; tokio::select! { - _ = stop => {}, + _ = halt => {}, () = global_shutdown_signal() => {} } } /// Same as `shutdown_signal()`, but shows a message when it resolves. -pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { - shutdown_signal(stop_receiver).await; +pub async fn shutdown_signal_with_message(rx_halt: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal(rx_halt).await; info!("{message}"); } + +pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal_with_message(rx_halt, message).await; + + info!("sending graceful shutdown signal"); + handle.graceful_shutdown(Some(Duration::from_secs(90))); + + println!("!! shuting down in 90 seconds !!"); + + loop { + sleep(Duration::from_secs(1)).await; + + info!("remaining alive connections: {}", handle.connection_count()); + } +} diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f3c7b58b0..18a341418 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -8,6 +8,7 @@ use aquatic_udp_protocol::{ NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use log::{debug, info}; +use torrust_tracker_located_error::DynError; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::core::{statistics, ScrapeData, Tracker}; @@ -46,7 +47,7 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: & // bad request Err(e) => handle_error( &Error::BadRequest { - source: (Arc::new(e) as Arc).into(), + source: (Arc::new(e) as DynError).into(), }, TransactionId(0), ), diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 9b9a89b11..a0af55101 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -17,19 +17,21 @@ //! because we want to be able to start and stop the server multiple times, and //! we want to know the bound address and the current state of the server. //! In production, the `Udp` launcher is used directly. -use std::future::Future; use std::io::Cursor; use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; +use derive_more::Constructor; use futures::pin_mut; use log::{debug, error, info}; use tokio::net::UdpSocket; +use tokio::sync::oneshot::{Receiver, Sender}; use tokio::task::JoinHandle; +use crate::bootstrap::jobs::Started; use crate::core::Tracker; -use crate::servers::signals::shutdown_signal; +use crate::servers::signals::{shutdown_signal_with_message, Halted}; use crate::servers::udp::handlers::handle_packet; use crate::shared::bit_torrent::udp::MAX_PACKET_SIZE; @@ -75,29 +77,32 @@ pub type RunningUdpServer = UdpServer; /// intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] pub struct UdpServer { - /// The configuration of the server that will be used every time the server - /// is started. - pub cfg: torrust_tracker_configuration::UdpTracker, /// The state of the server: `running` or `stopped`. pub state: S, } /// A stopped UDP server state. -pub struct Stopped; + +pub struct Stopped { + launcher: Launcher, +} /// A running UDP server state. +#[derive(Debug, Constructor)] pub struct Running { /// The address where the server is bound. - pub bind_address: SocketAddr, - stop_job_sender: tokio::sync::oneshot::Sender, - job: JoinHandle<()>, + pub binding: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: JoinHandle, } impl UdpServer { /// Creates a new `UdpServer` instance in `stopped`state. #[must_use] - pub fn new(cfg: torrust_tracker_configuration::UdpTracker) -> Self { - Self { cfg, state: Stopped {} } + pub fn new(launcher: Launcher) -> Self { + Self { + state: Stopped { launcher }, + } } /// It starts the server and returns a `UdpServer` controller in `running` @@ -106,28 +111,32 @@ impl UdpServer { /// # Errors /// /// Will return `Err` if UDP can't bind to given bind address. + /// + /// # Panics + /// + /// It panics if unable to receive the bound socket address from service. + /// pub async fn start(self, tracker: Arc) -> Result, Error> { - let udp = Udp::new(&self.cfg.bind_address) - .await - .map_err(|e| Error::Error(e.to_string()))?; - - let bind_address = udp.socket.local_addr().map_err(|e| Error::Error(e.to_string()))?; + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); - let (sender, receiver) = tokio::sync::oneshot::channel::(); + let launcher = self.state.launcher; - let job = tokio::spawn(async move { - udp.start_with_graceful_shutdown(tracker, shutdown_signal(receiver)).await; + let task = tokio::spawn(async move { + launcher.start(tracker, tx_start, rx_halt).await; + launcher }); let running_udp_server: UdpServer = UdpServer { - cfg: self.cfg, state: Running { - bind_address, - stop_job_sender: sender, - job, + binding: rx_start.await.expect("unable to start service").address, + halt_task: tx_halt, + task, }, }; + info!("Running UDP Tracker on Socket: {}", running_udp_server.state.binding); + Ok(running_udp_server) } } @@ -140,103 +149,96 @@ impl UdpServer { /// /// Will return `Err` if the oneshot channel to send the stop signal /// has already been called once. + /// + /// # Panics + /// + /// It panics if unable to shutdown service. pub async fn stop(self) -> Result, Error> { - self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; + self.state + .halt_task + .send(Halted::Normal) + .map_err(|e| Error::Error(e.to_string()))?; - drop(self.state.job.await); + let launcher = self.state.task.await.expect("unable to shutdown service"); let stopped_api_server: UdpServer = UdpServer { - cfg: self.cfg, - state: Stopped {}, + state: Stopped { launcher }, }; Ok(stopped_api_server) } } -/// A UDP server instance launcher. -pub struct Udp { - socket: Arc, +#[derive(Constructor, Debug)] +pub struct Launcher { + bind_to: SocketAddr, } -impl Udp { - /// Creates a new `Udp` instance. - /// - /// # Errors - /// - /// Will return `Err` unable to bind to the supplied `bind_address`. - pub async fn new(bind_address: &str) -> tokio::io::Result { - let socket = UdpSocket::bind(bind_address).await?; - - Ok(Udp { - socket: Arc::new(socket), - }) - } - +impl Launcher { /// It starts the UDP server instance. /// /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - pub async fn start(&self, tracker: Arc) { - loop { - let mut data = [0; MAX_PACKET_SIZE]; - let socket = self.socket.clone(); - - tokio::select! { - _ = tokio::signal::ctrl_c() => { - info!("Stopping UDP server: {}..", socket.local_addr().unwrap()); - break; - } - Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { - let payload = data[..valid_bytes].to_vec(); - - debug!("Received {} bytes", payload.len()); - debug!("From: {}", &remote_addr); - debug!("Payload: {:?}", payload); - - let response = handle_packet(remote_addr, payload, &tracker).await; - - Udp::send_response(socket, remote_addr, response).await; - } - } - } + pub async fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> JoinHandle<()> { + Udp::start_with_graceful_shutdown(tracker, self.bind_to, tx_start, rx_halt).await } +} + +/// A UDP server instance launcher. +#[derive(Constructor)] +pub struct Udp; +impl Udp { /// It starts the UDP server instance with graceful shutdown. /// /// # Panics /// - /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - async fn start_with_graceful_shutdown(&self, tracker: Arc, shutdown_signal: F) - where - F: Future, - { - // Pin the future so that it doesn't move to the first loop iteration. - pin_mut!(shutdown_signal); - - loop { - let mut data = [0; MAX_PACKET_SIZE]; - let socket = self.socket.clone(); - - tokio::select! { - () = &mut shutdown_signal => { - info!("Stopping UDP server: {}..", self.socket.local_addr().unwrap()); - break; + /// It panics if unable to bind to udp socket, and get the address from the udp socket. + /// It also panics if unable to send address of socket. + async fn start_with_graceful_shutdown( + tracker: Arc, + bind_to: SocketAddr, + tx_start: Sender, + rx_halt: Receiver, + ) -> JoinHandle<()> { + let binding = Arc::new(UdpSocket::bind(bind_to).await.expect("Could not bind to {self.socket}.")); + let address = binding.local_addr().expect("Could not get local_addr from {binding}."); + + let running = tokio::task::spawn(async move { + let halt = async move { + shutdown_signal_with_message(rx_halt, format!("Halting Http Service Bound to Socket: {address}")).await; + }; + + pin_mut!(halt); + + loop { + let mut data = [0; MAX_PACKET_SIZE]; + let binding = binding.clone(); + + tokio::select! { + () = & mut halt => {}, + + Ok((valid_bytes, remote_addr)) = binding.recv_from(&mut data) => { + let payload = data[..valid_bytes].to_vec(); + + debug!("Received {} bytes", payload.len()); + debug!("From: {}", &remote_addr); + debug!("Payload: {:?}", payload); + + let response = handle_packet(remote_addr, payload, &tracker).await; + + Udp::send_response(binding, remote_addr, response).await; + } } - Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { - let payload = data[..valid_bytes].to_vec(); - - debug!("Received {} bytes", payload.len()); - debug!("From: {}", &remote_addr); - debug!("Payload: {:?}", payload); + } + }); - let response = handle_packet(remote_addr, payload, &tracker).await; + tx_start + .send(Started { address }) + .expect("the UDP Tracker service should not be dropped"); - Udp::send_response(socket, remote_addr, response).await; - } - } - } + running } async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { @@ -268,3 +270,31 @@ impl Udp { drop(socket.send_to(payload, remote_addr).await); } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; + + use crate::bootstrap::app::initialize_with_configuration; + use crate::servers::udp::server::{Launcher, UdpServer}; + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_mode_public()); + let tracker = initialize_with_configuration(&cfg); + let config = &cfg.udp_trackers[0]; + + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + let stopped = UdpServer::new(Launcher::new(bind_to)); + let started = stopped.start(tracker).await.expect("it should start the server"); + let stopped = started.stop().await.expect("it should stop the server"); + + assert_eq!(stopped.state.launcher.bind_to, bind_to); + } +} diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index 0501d9c56..166bfd7d1 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -1,8 +1,12 @@ +use std::net::SocketAddr; use std::sync::Arc; +use axum_server::tls_rustls::RustlsConfig; +use futures::executor::block_on; +use torrust_tracker::bootstrap::jobs::make_rust_tls; use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; -use torrust_tracker::servers::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; +use torrust_tracker::servers::apis::server::{ApiServer, Launcher, RunningApiServer, StoppedApiServer}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use super::connection_info::ConnectionInfo; @@ -36,15 +40,27 @@ impl TestEnvironment { } impl TestEnvironment { - pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { - let cfg = Arc::new(cfg); + pub fn new(cfg: torrust_tracker_configuration::Configuration) -> Self { + let tracker = setup_with_configuration(&Arc::new(cfg)); - let tracker = setup_with_configuration(&cfg); + let config = tracker.config.http_api.clone(); - let api_server = api_server(cfg.http_api.clone()); + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) + .map(|tls| tls.expect("tls config failed")); + + Self::new_stopped(tracker, bind_to, tls) + } + + pub fn new_stopped(tracker: Arc, bind_to: SocketAddr, tls: Option) -> Self { + let api_server = api_server(Launcher::new(bind_to, tls)); Self { - cfg, + cfg: tracker.config.clone(), tracker, state: Stopped { api_server }, } @@ -60,14 +76,14 @@ impl TestEnvironment { } } - pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpApi { - &mut self.state.api_server.cfg - } + // pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpApi { + // &mut self.cfg.http_api + // } } impl TestEnvironment { pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { - let test_env = StoppedTestEnvironment::new_stopped(cfg); + let test_env = StoppedTestEnvironment::new(cfg); test_env.start().await } @@ -84,15 +100,16 @@ impl TestEnvironment { pub fn get_connection_info(&self) -> ConnectionInfo { ConnectionInfo { - bind_address: self.state.api_server.state.bind_addr.to_string(), - api_token: self.state.api_server.cfg.access_tokens.get("admin").cloned(), + bind_address: self.state.api_server.state.binding.to_string(), + api_token: self.cfg.http_api.access_tokens.get("admin").cloned(), } } } #[allow(clippy::module_name_repetitions)] +#[allow(dead_code)] pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { - TestEnvironment::new_stopped(cfg) + TestEnvironment::new(cfg) } #[allow(clippy::module_name_repetitions)] @@ -100,6 +117,6 @@ pub async fn running_test_environment(cfg: torrust_tracker_configuration::Config TestEnvironment::new_running(cfg).await } -pub fn api_server(cfg: torrust_tracker_configuration::HttpApi) -> StoppedApiServer { - ApiServer::new(cfg) +pub fn api_server(launcher: Launcher) -> StoppedApiServer { + ApiServer::new(launcher) } diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs index cfdb59b0c..a551a8b36 100644 --- a/tests/servers/api/v1/contract/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -1,18 +1,33 @@ -use torrust_tracker_test_helpers::configuration; +// use std::sync::Arc; -use crate::servers::api::test_environment::stopped_test_environment; +// use axum_server::tls_rustls::RustlsConfig; +// use futures::executor::block_on; +// use torrust_tracker_test_helpers::configuration; + +// use crate::common::app::setup_with_configuration; +// use crate::servers::api::test_environment::stopped_test_environment; #[tokio::test] #[ignore] #[should_panic = "Could not receive bind_address."] async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { - let mut test_env = stopped_test_environment(configuration::ephemeral()); + // let tracker = setup_with_configuration(&Arc::new(configuration::ephemeral())); + + // let config = tracker.config.http_api.clone(); + + // let bind_to = config + // .bind_address + // .parse::() + // .expect("Tracker API bind_address invalid."); - let cfg = test_env.config_mut(); + // let tls = + // if let (true, Some(cert), Some(key)) = (&true, &Some("bad cert path".to_string()), &Some("bad cert path".to_string())) { + // Some(block_on(RustlsConfig::from_pem_file(cert, key)).expect("Could not read tls cert.")) + // } else { + // None + // }; - cfg.ssl_enabled = true; - cfg.ssl_key_path = Some("bad key path".to_string()); - cfg.ssl_cert_path = Some("bad cert path".to_string()); + // let test_env = new_stopped(tracker, bind_to, tls); - test_env.start().await; + // test_env.start().await; } diff --git a/tests/servers/health_check_api/test_environment.rs b/tests/servers/health_check_api/test_environment.rs index 46e54dc47..554e37dbf 100644 --- a/tests/servers/health_check_api/test_environment.rs +++ b/tests/servers/health_check_api/test_environment.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use tokio::sync::oneshot; use tokio::task::JoinHandle; -use torrust_tracker::bootstrap::jobs::health_check_api::ApiServerJobStarted; +use torrust_tracker::bootstrap::jobs::Started; use torrust_tracker::servers::health_check_api::server; use torrust_tracker_configuration::Configuration; @@ -16,7 +16,7 @@ pub async fn start(config: Arc) -> (SocketAddr, JoinHandle<()>) { .parse::() .expect("Health Check API bind_address invalid."); - let (tx, rx) = oneshot::channel::(); + let (tx, rx) = oneshot::channel::(); let join_handle = tokio::spawn(async move { let handle = server::start(bind_addr, tx, config.clone()); @@ -26,7 +26,7 @@ pub async fn start(config: Arc) -> (SocketAddr, JoinHandle<()>) { }); let bound_addr = match rx.await { - Ok(msg) => msg.bound_addr, + Ok(msg) => msg.address, Err(e) => panic!("the Health Check API server was dropped: {e}"), }; diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index e24e1b9a5..73961b790 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -1,16 +1,18 @@ use std::sync::Arc; +use futures::executor::block_on; +use torrust_tracker::bootstrap::jobs::make_rust_tls; use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; -use torrust_tracker::servers::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; +use torrust_tracker::servers::http::server::{HttpServer, Launcher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::common::app::setup_with_configuration; #[allow(clippy::module_name_repetitions, dead_code)] -pub type StoppedTestEnvironment = TestEnvironment>; +pub type StoppedTestEnvironment = TestEnvironment; #[allow(clippy::module_name_repetitions)] -pub type RunningTestEnvironment = TestEnvironment>; +pub type RunningTestEnvironment = TestEnvironment; pub struct TestEnvironment { pub cfg: Arc, @@ -19,12 +21,12 @@ pub struct TestEnvironment { } #[allow(dead_code)] -pub struct Stopped { - http_server: StoppedHttpServer, +pub struct Stopped { + http_server: StoppedHttpServer, } -pub struct Running { - http_server: RunningHttpServer, +pub struct Running { + http_server: RunningHttpServer, } impl TestEnvironment { @@ -34,14 +36,24 @@ impl TestEnvironment { } } -impl TestEnvironment> { +impl TestEnvironment { #[allow(dead_code)] pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); let tracker = setup_with_configuration(&cfg); - let http_server = http_server(cfg.http_trackers[0].clone()); + let config = cfg.http_trackers[0].clone(); + + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) + .map(|tls| tls.expect("tls config failed")); + + let http_server = HttpServer::new(Launcher::new(bind_to, tls)); Self { cfg, @@ -51,7 +63,7 @@ impl TestEnvironment> { } #[allow(dead_code)] - pub async fn start(self) -> TestEnvironment> { + pub async fn start(self) -> TestEnvironment { TestEnvironment { cfg: self.cfg, tracker: self.tracker.clone(), @@ -61,25 +73,25 @@ impl TestEnvironment> { } } - #[allow(dead_code)] - pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { - &self.state.http_server.cfg - } + // #[allow(dead_code)] + // pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { + // &self.state.http_server.cfg + // } - #[allow(dead_code)] - pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { - &mut self.state.http_server.cfg - } + // #[allow(dead_code)] + // pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { + // &mut self.state.http_server.cfg + // } } -impl TestEnvironment> { +impl TestEnvironment { pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { let test_env = StoppedTestEnvironment::new_stopped(cfg); test_env.start().await } - pub async fn stop(self) -> TestEnvironment> { + pub async fn stop(self) -> TestEnvironment { TestEnvironment { cfg: self.cfg, tracker: self.tracker, @@ -90,31 +102,26 @@ impl TestEnvironment> { } pub fn bind_address(&self) -> &std::net::SocketAddr { - &self.state.http_server.state.bind_addr + &self.state.http_server.state.binding } - #[allow(dead_code)] - pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { - &self.state.http_server.cfg - } + // #[allow(dead_code)] + // pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { + // &self.state.http_server.cfg + // } } #[allow(clippy::module_name_repetitions, dead_code)] -pub fn stopped_test_environment( - cfg: torrust_tracker_configuration::Configuration, -) -> StoppedTestEnvironment { +pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { TestEnvironment::new_stopped(cfg) } #[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment( - cfg: torrust_tracker_configuration::Configuration, -) -> RunningTestEnvironment { +pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { TestEnvironment::new_running(cfg).await } -pub fn http_server(cfg: torrust_tracker_configuration::HttpTracker) -> StoppedHttpServer { - let http_server = I::new(); - - HttpServer::new(cfg, http_server) +#[allow(dead_code)] +pub fn http_server(launcher: Launcher) -> StoppedHttpServer { + HttpServer::new(launcher) } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 9a6aa2454..3034847db 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -2,11 +2,9 @@ use torrust_tracker_test_helpers::configuration; use crate::servers::http::test_environment::running_test_environment; -pub type V1 = torrust_tracker::servers::http::v1::launcher::Launcher; - #[tokio::test] async fn test_environment_should_be_started_and_stopped() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; test_env.stop().await; } @@ -18,11 +16,10 @@ mod for_all_config_modes { use crate::servers::http::client::Client; use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; #[tokio::test] async fn health_check_endpoint_should_return_ok_if_the_http_tracker_is_running() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let test_env = running_test_environment(configuration::ephemeral_with_reverse_proxy()).await; let response = Client::new(*test_env.bind_address()).health_check().await; @@ -40,14 +37,13 @@ mod for_all_config_modes { use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let test_env = running_test_environment(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -60,7 +56,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let test_env = running_test_environment(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -91,7 +87,7 @@ mod for_all_config_modes { use std::str::FromStr; use local_ip_address::local_ip; - use reqwest::Response; + use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; use torrust_tracker::core::peer; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; @@ -108,11 +104,16 @@ mod for_all_config_modes { use crate::servers::http::responses; use crate::servers::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn it_should_start_and_stop() { + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + test_env.stop().await; + } #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -127,7 +128,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(*test_env.bind_address()).get("announce").await; @@ -138,7 +139,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_query_param = "a=b=c"; @@ -153,7 +154,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; // Without `info_hash` param @@ -190,7 +191,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -212,7 +213,7 @@ mod for_all_config_modes { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -227,7 +228,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -246,7 +247,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -265,7 +266,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -291,7 +292,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -310,7 +311,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -329,7 +330,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -356,7 +357,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let test_env = running_test_environment(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -375,7 +376,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let response = Client::new(*test_env.bind_address()) .announce( @@ -402,7 +403,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -442,7 +443,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -492,7 +493,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -519,7 +520,7 @@ mod for_all_config_modes { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -560,7 +561,7 @@ mod for_all_config_modes { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -598,7 +599,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) @@ -622,7 +623,7 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment(configuration::ephemeral_ipv6()).await; Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -641,7 +642,7 @@ mod for_all_config_modes { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; Client::new(*test_env.bind_address()) .announce( @@ -662,7 +663,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) @@ -686,7 +687,7 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment(configuration::ephemeral_ipv6()).await; Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -705,7 +706,7 @@ mod for_all_config_modes { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; Client::new(*test_env.bind_address()) .announce( @@ -726,19 +727,22 @@ mod for_all_config_modes { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(*test_env.bind_address(), client_ip); - let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); - client.announce(&announce_query).await; + { + let client = Client::bind(*test_env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; @@ -758,7 +762,7 @@ mod for_all_config_modes { 127.0.0.1 external_ip = "2.137.87.41" */ - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + let test_env = running_test_environment(configuration::ephemeral_with_external_ip( IpAddr::from_str("2.137.87.41").unwrap(), )) .await; @@ -767,14 +771,17 @@ mod for_all_config_modes { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(*test_env.bind_address(), client_ip); - let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); - client.announce(&announce_query).await; + { + let client = Client::bind(*test_env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; @@ -794,7 +801,7 @@ mod for_all_config_modes { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + let test_env = running_test_environment(configuration::ephemeral_with_external_ip( IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), )) .await; @@ -803,14 +810,17 @@ mod for_all_config_modes { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(*test_env.bind_address(), client_ip); - let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); - client.announce(&announce_query).await; + { + let client = Client::bind(*test_env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; @@ -830,21 +840,25 @@ mod for_all_config_modes { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let test_env = running_test_environment(configuration::ephemeral_with_reverse_proxy()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(*test_env.bind_address()); - let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - client - .announce_with_header( - &announce_query, - "X-Forwarded-For", - "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", - ) - .await; + { + let client = Client::new(*test_env.bind_address()); + let status = client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await + .status(); + + assert_eq!(status, StatusCode::OK); + } let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; @@ -883,12 +897,11 @@ mod for_all_config_modes { use crate::servers::http::requests::scrape::QueryBuilder; use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; //#[tokio::test] #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let response = Client::new(*test_env.bind_address()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; @@ -898,7 +911,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); @@ -915,7 +928,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -955,7 +968,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -995,7 +1008,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1014,7 +1027,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -1040,7 +1053,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1070,7 +1083,7 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment(configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1105,11 +1118,10 @@ mod configured_as_whitelisted { use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1124,7 +1136,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1157,11 +1169,10 @@ mod configured_as_whitelisted { use crate::servers::http::requests; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1192,7 +1203,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1252,11 +1263,10 @@ mod configured_as_private { use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; #[tokio::test] async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); @@ -1271,7 +1281,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1286,7 +1296,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; @@ -1301,7 +1311,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -1332,11 +1342,10 @@ mod configured_as_private { use crate::servers::http::requests; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; @@ -1351,7 +1360,7 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1382,7 +1391,7 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1427,7 +1436,7 @@ mod configured_as_private { // There is not authentication error // code-review: should this really be this way? - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index dfe19ac86..bbad6d927 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; -use torrust_tracker::servers::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; +use torrust_tracker::servers::udp::server::{Launcher, RunningUdpServer, StoppedUdpServer, UdpServer}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::common::app::setup_with_configuration; @@ -43,7 +43,14 @@ impl TestEnvironment { let tracker = setup_with_configuration(&cfg); - let udp_server = udp_server(cfg.udp_trackers[0].clone()); + let udp_cfg = cfg.udp_trackers[0].clone(); + + let bind_to = udp_cfg + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + let udp_server = udp_server(Launcher::new(bind_to)); Self { cfg, @@ -81,7 +88,7 @@ impl TestEnvironment { } pub fn bind_address(&self) -> SocketAddr { - self.state.udp_server.state.bind_address + self.state.udp_server.state.binding } } @@ -95,6 +102,6 @@ pub async fn running_test_environment(cfg: torrust_tracker_configuration::Config TestEnvironment::new_running(cfg).await } -pub fn udp_server(cfg: torrust_tracker_configuration::UdpTracker) -> StoppedUdpServer { - UdpServer::new(cfg) +pub fn udp_server(launcher: Launcher) -> StoppedUdpServer { + UdpServer::new(launcher) } From cf613b8a1f66cd674ac39a05cb956ce89cc12052 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Jan 2024 16:01:16 +0000 Subject: [PATCH 0670/1003] fix: [#588] broken grateful shutdown for tracker API The internal halt channel was nor working becuase the sender was being droped just after starting the server. That also made the `shutdown_signal` fail. ```rust pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { let halt = async { match rx_halt.await { Ok(signal) => signal, Err(err) => panic!("Failed to install stop signal: {err}"), } }; tokio::select! { signal = halt => { info!("Halt signal processed: {}", signal) }, () = global_shutdown_signal() => { info!("Global shutdown signal processed") } } } ``` Since the signal branch in the `tokio::select!` was finishing the global_shutdown_signal did not work either. So you had to kill the process manually to stop the tracker. It seems Rust droped partially the `Running::halt_taks` attribute and that closed the channel. --- src/bootstrap/jobs/tracker_apis.rs | 1 + src/servers/apis/server.rs | 23 +++++++++++++++++------ src/servers/signals.rs | 11 ++++++++--- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index f454b017f..e50a83651 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -80,6 +80,7 @@ async fn start_v1(socket: SocketAddr, tls: Option, tracker: Arc { launcher }); - Ok(ApiServer { - state: Running { - binding: rx_start.await.expect("unable to start service").address, - halt_task: tx_halt, - task, + //let address = rx_start.await.expect("unable to start service").address; + let api_server = match rx_start.await { + Ok(started) => ApiServer { + state: Running { + binding: started.address, + halt_task: tx_halt, + task, + }, }, - }) + Err(err) => { + let msg = format!("unable to start API server: {err}"); + error!("{}", msg); + panic!("{}", msg); + } + }; + + Ok(api_server) } } diff --git a/src/servers/signals.rs b/src/servers/signals.rs index cb0675d65..091982ae3 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -46,11 +46,16 @@ pub async fn global_shutdown_signal() { /// /// Will panic if the `stop_receiver` resolves with an error. pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { - let halt = async { rx_halt.await.expect("Failed to install stop signal.") }; + let halt = async { + match rx_halt.await { + Ok(signal) => signal, + Err(err) => panic!("Failed to install stop signal: {err}"), + } + }; tokio::select! { - _ = halt => {}, - () = global_shutdown_signal() => {} + signal = halt => { info!("Halt signal processed: {}", signal) }, + () = global_shutdown_signal() => { info!("Global shutdown signal processed") } } } From 53613ec31eec23f569d3895c90f77ae5105f083d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Jan 2024 16:19:15 +0000 Subject: [PATCH 0671/1003] feat: start log lines with capital --- src/bootstrap/jobs/mod.rs | 2 +- src/servers/apis/server.rs | 2 +- src/servers/http/server.rs | 2 +- src/servers/signals.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 3a9936882..2c12eb40e 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -22,7 +22,7 @@ pub struct Started { pub async fn make_rust_tls(enabled: bool, cert: &Option, key: &Option) -> Option> { if !enabled { - info!("tls not enabled"); + info!("TLS not enabled"); return None; } diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index f9507b0fb..b885bf348 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -170,7 +170,7 @@ impl Launcher { tokio::task::spawn(graceful_shutdown( handle.clone(), rx_halt, - format!("shutting down http server on socket address: {address}"), + format!("Shutting down http server on socket address: {address}"), )); let tls = self.tls.clone(); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index aee2d0ac0..c3411ac06 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -47,7 +47,7 @@ impl Launcher { tokio::task::spawn(graceful_shutdown( handle.clone(), rx_halt, - format!("shutting down http server on socket address: {address}"), + format!("Shutting down http server on socket address: {address}"), )); let tls = self.tls.clone(); diff --git a/src/servers/signals.rs b/src/servers/signals.rs index 091982ae3..42fd868e8 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -69,7 +69,7 @@ pub async fn shutdown_signal_with_message(rx_halt: tokio::sync::oneshot::Receive pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String) { shutdown_signal_with_message(rx_halt, message).await; - info!("sending graceful shutdown signal"); + info!("Sending graceful shutdown signal"); handle.graceful_shutdown(Some(Duration::from_secs(90))); println!("!! shuting down in 90 seconds !!"); From ac18605ee563ee4f835803012cbbc2486bc97b13 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Jan 2024 16:30:30 +0000 Subject: [PATCH 0672/1003] feat: improve log when starting the API Added the URL where the API is running. ``` 2024-01-09T16:29:46.911284166+00:00 [API][INFO] API server started on http://127.0.0.1:1212 ``` Some hosting services parse the application log output to discover services. For example, GitHub Codespaces can use that into to automatically do port forwarding. Besides, it's also useful for development when you are using random ports and to see what services are you running. --- src/servers/apis/server.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index b885bf348..5df1d76fd 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -30,7 +30,7 @@ use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; -use log::error; +use log::{error, info}; use tokio::sync::oneshot::{Receiver, Sender}; use super::routes::router; @@ -102,7 +102,6 @@ impl ApiServer { launcher }); - //let address = rx_start.await.expect("unable to start service").address; let api_server = match rx_start.await { Ok(started) => ApiServer { state: Running { @@ -112,7 +111,7 @@ impl ApiServer { }, }, Err(err) => { - let msg = format!("unable to start API server: {err}"); + let msg = format!("Unable to start API server: {err}"); error!("{}", msg); panic!("{}", msg); } @@ -170,10 +169,11 @@ impl Launcher { tokio::task::spawn(graceful_shutdown( handle.clone(), rx_halt, - format!("Shutting down http server on socket address: {address}"), + format!("Shutting down tracker API server on socket address: {address}"), )); let tls = self.tls.clone(); + let protocol = if tls.is_some() { "https" } else { "http" }; let running = Box::pin(async { match tls { @@ -181,18 +181,20 @@ impl Launcher { .handle(handle) .serve(router.into_make_service_with_connect_info::()) .await - .expect("Axum server crashed."), + .expect("Axum server for tracker API crashed."), None => axum_server::from_tcp(socket) .handle(handle) .serve(router.into_make_service_with_connect_info::()) .await - .expect("Axum server crashed."), + .expect("Axum server for tracker API crashed."), } }); + info!(target: "API", "API server started on {protocol}://{}", address); + tx_start .send(Started { address }) - .expect("the HTTP(s) Tracker service should not be dropped"); + .expect("the HTTP(s) Tracker API service should not be dropped"); running } From 452b4a0cd665e3e27d0e2440ea06e30a707eac00 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Jan 2024 16:55:23 +0000 Subject: [PATCH 0673/1003] feat: improve http_health_check output --- src/bin/http_health_check.rs | 2 +- src/bootstrap/jobs/health_check_api.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bin/http_health_check.rs b/src/bin/http_health_check.rs index d3f1767cb..d66d334df 100644 --- a/src/bin/http_health_check.rs +++ b/src/bin/http_health_check.rs @@ -11,7 +11,7 @@ async fn main() { let args: Vec = env::args().collect(); if args.len() != 2 { eprintln!("Usage: cargo run --bin http_health_check "); - eprintln!("Example: cargo run --bin http_health_check http://127.0.0.1:1212/api/health_check"); + eprintln!("Example: cargo run --bin http_health_check http://127.0.0.1:1313/health_check"); std::process::exit(1); } diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 83eb77f6b..a49f612e8 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -55,7 +55,7 @@ pub async fn start_job(config: Arc) -> JoinHandle<()> { // Wait until the API server job is running match rx_start.await { - Ok(msg) => info!("Torrust Health Check API server started on socket: {}", msg.address), + Ok(msg) => info!("Torrust Health Check API server started on: http://{}", msg.address), Err(e) => panic!("the Health Check API server was dropped: {e}"), } From 9f3f949359c5dab1c537166edcea692dea5765f0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Jan 2024 18:44:11 +0000 Subject: [PATCH 0674/1003] fix: [#592] halt channel closed after starting HTTP tracker --- src/bootstrap/jobs/http_tracker.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 79e01fb3d..69ff345db 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -59,6 +59,10 @@ async fn start_v1(socket: SocketAddr, tls: Option, tracker: Arc Date: Tue, 9 Jan 2024 18:50:16 +0000 Subject: [PATCH 0675/1003] feat: improve logging for HTTP tracker bootstrapping --- src/servers/http/server.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index c3411ac06..904ccdcf5 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -6,6 +6,7 @@ use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; +use log::info; use tokio::sync::oneshot::{Receiver, Sender}; use super::v1::routes::router; @@ -51,6 +52,9 @@ impl Launcher { )); let tls = self.tls.clone(); + let protocol = if tls.is_some() { "https" } else { "http" }; + + info!(target: "HTTP Tracker", "Starting on: {protocol}://{}", address); let running = Box::pin(async { match tls { @@ -67,6 +71,8 @@ impl Launcher { } }); + info!(target: "HTTP Tracker", "Started on: {protocol}://{}", address); + tx_start .send(Started { address }) .expect("the HTTP(s) Tracker service should not be dropped"); From 0c1f38982d251d1d68063be475ef123d4a4a65e3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Jan 2024 15:37:29 +0000 Subject: [PATCH 0676/1003] fix: [#591] panicking after starting UDP server due to close halt channel --- cSpell.json | 5 +- src/bootstrap/jobs/udp_tracker.rs | 11 ++++ src/servers/apis/server.rs | 4 +- src/servers/http/server.rs | 2 +- src/servers/udp/server.rs | 86 ++++++++++++++++++++----------- 5 files changed, 75 insertions(+), 33 deletions(-) diff --git a/cSpell.json b/cSpell.json index d09db93b7..9602ba39b 100644 --- a/cSpell.json +++ b/cSpell.json @@ -32,6 +32,7 @@ "Containerfile", "curr", "Cyberneering", + "datagram", "datetime", "Dijke", "distroless", @@ -79,6 +80,7 @@ "nonroot", "Norberg", "numwant", + "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7", "oneshot", "ostr", "Pando", @@ -129,8 +131,7 @@ "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", - "yyyyyyyyyyyyyyyyyyyyd", - "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7" + "yyyyyyyyyyyyyyyyyyyyd" ], "enableFiletypes": [ "dockerfile", diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 5911bdf95..20ef0c793 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -8,6 +8,7 @@ //! for the configuration options. use std::sync::Arc; +use log::debug; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; @@ -36,10 +37,20 @@ pub async fn start_job(config: &UdpTracker, tracker: Arc) -> Join .expect("it should be able to start the udp tracker"); tokio::spawn(async move { + debug!(target: "UDP Tracker", "Wait for launcher (UDP service) to finish ..."); + debug!(target: "UDP Tracker", "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); + + assert!( + !server.state.halt_task.is_closed(), + "Halt channel for UDP tracker should be open" + ); + server .state .task .await .expect("it should be able to join to the udp tracker task"); + + debug!(target: "UDP Tracker", "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); }) } diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 5df1d76fd..f4fdf8994 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -175,6 +175,8 @@ impl Launcher { let tls = self.tls.clone(); let protocol = if tls.is_some() { "https" } else { "http" }; + info!(target: "API", "Starting on {protocol}://{}", address); + let running = Box::pin(async { match tls { Some(tls) => axum_server::from_tcp_rustls(socket, tls) @@ -190,7 +192,7 @@ impl Launcher { } }); - info!(target: "API", "API server started on {protocol}://{}", address); + info!(target: "API", "Started on {protocol}://{}", address); tx_start .send(Started { address }) diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 904ccdcf5..0a4b687b5 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -48,7 +48,7 @@ impl Launcher { tokio::task::spawn(graceful_shutdown( handle.clone(), rx_halt, - format!("Shutting down http server on socket address: {address}"), + format!("Shutting down HTTP server on socket address: {address}"), )); let tls = self.tls.clone(); diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index a0af55101..22cdf6357 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -120,23 +120,30 @@ impl UdpServer { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); + let launcher = self.state.launcher; let task = tokio::spawn(async move { - launcher.start(tracker, tx_start, rx_halt).await; + debug!(target: "UDP Tracker", "Launcher starting ..."); + + let starting = launcher.start(tracker, tx_start, rx_halt).await; + + starting.await.expect("UDP server should have started running"); + launcher }); + let binding = rx_start.await.expect("unable to start service").address; + let running_udp_server: UdpServer = UdpServer { state: Running { - binding: rx_start.await.expect("unable to start service").address, + binding, halt_task: tx_halt, task, }, }; - info!("Running UDP Tracker on Socket: {}", running_udp_server.state.binding); - Ok(running_udp_server) } } @@ -202,41 +209,62 @@ impl Udp { tx_start: Sender, rx_halt: Receiver, ) -> JoinHandle<()> { - let binding = Arc::new(UdpSocket::bind(bind_to).await.expect("Could not bind to {self.socket}.")); - let address = binding.local_addr().expect("Could not get local_addr from {binding}."); + let socket = Arc::new(UdpSocket::bind(bind_to).await.expect("Could not bind to {self.socket}.")); + let address = socket.local_addr().expect("Could not get local_addr from {binding}."); + + info!(target: "UDP Tracker", "Starting on: udp://{}", address); let running = tokio::task::spawn(async move { - let halt = async move { - shutdown_signal_with_message(rx_halt, format!("Halting Http Service Bound to Socket: {address}")).await; + let halt = tokio::task::spawn(async move { + debug!(target: "UDP Tracker", "Waiting for halt signal for socket address: udp://{address} ..."); + + shutdown_signal_with_message( + rx_halt, + format!("Shutting down UDP server on socket address: udp://{address}"), + ) + .await; + }); + + let listen = async move { + debug!(target: "UDP Tracker", "Waiting for packets on socket address: udp://{address} ..."); + + loop { + let mut data = [0; MAX_PACKET_SIZE]; + let socket_clone = socket.clone(); + + match socket_clone.recv_from(&mut data).await { + Ok((valid_bytes, remote_addr)) => { + let payload = data[..valid_bytes].to_vec(); + + debug!(target: "UDP Tracker", "Received {} bytes", payload.len()); + debug!(target: "UDP Tracker", "From: {}", &remote_addr); + debug!(target: "UDP Tracker", "Payload: {:?}", payload); + + let response = handle_packet(remote_addr, payload, &tracker).await; + + Udp::send_response(socket_clone, remote_addr, response).await; + } + Err(err) => { + error!("Error reading UDP datagram from socket. Error: {:?}", err); + } + } + } }; pin_mut!(halt); + pin_mut!(listen); - loop { - let mut data = [0; MAX_PACKET_SIZE]; - let binding = binding.clone(); - - tokio::select! { - () = & mut halt => {}, - - Ok((valid_bytes, remote_addr)) = binding.recv_from(&mut data) => { - let payload = data[..valid_bytes].to_vec(); - - debug!("Received {} bytes", payload.len()); - debug!("From: {}", &remote_addr); - debug!("Payload: {:?}", payload); + tx_start + .send(Started { address }) + .expect("the UDP Tracker service should not be dropped"); - let response = handle_packet(remote_addr, payload, &tracker).await; - - Udp::send_response(binding, remote_addr, response).await; - } - } + tokio::select! { + _ = & mut halt => { debug!(target: "UDP Tracker", "Halt signal spawned task stopped on address: udp://{address}"); }, + () = & mut listen => { debug!(target: "UDP Tracker", "Socket listener stopped on address: udp://{address}"); }, } }); - tx_start - .send(Started { address }) - .expect("the UDP Tracker service should not be dropped"); + info!(target: "UDP Tracker", "Started on: udp://{}", address); running } From 5fd0c849d5a3865588446a78ae8c15dc9f8263b8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 11 Jan 2024 17:12:04 +0000 Subject: [PATCH 0677/1003] chore: normalize log ouput Added targets to all services especially when they start: [HTTP Tracker], [UDP Tracker], etc. ``` Loading default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... 2024-01-11T17:12:11.134816964+00:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. 2024-01-11T17:12:11.135473883+00:00 [UDP Tracker][INFO] Starting on: udp://0.0.0.0:6969 2024-01-11T17:12:11.135494422+00:00 [UDP Tracker][INFO] Started on: udp://0.0.0.0:6969 2024-01-11T17:12:11.135503672+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled 2024-01-11T17:12:11.135587738+00:00 [HTTP Tracker][INFO] Starting on: http://0.0.0.0:7070 2024-01-11T17:12:11.135612497+00:00 [HTTP Tracker][INFO] Started on: http://0.0.0.0:7070 2024-01-11T17:12:11.135619586+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled 2024-01-11T17:12:11.135675454+00:00 [API][INFO] Starting on http://127.0.0.1:1212 2024-01-11T17:12:11.135688443+00:00 [API][INFO] Started on http://127.0.0.1:1212 2024-01-11T17:12:11.135701143+00:00 [Health Check API][INFO] Starting on: http://127.0.0.1:1313 2024-01-11T17:12:11.135718012+00:00 [Health Check API][INFO] Started on: http://127.0.0.1:1313 ``` --- src/bootstrap/jobs/health_check_api.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index a49f612e8..9fed56435 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -44,18 +44,18 @@ pub async fn start_job(config: Arc) -> JoinHandle<()> { // Run the API server let join_handle = tokio::spawn(async move { - info!("Starting Health Check API server: http://{}", bind_addr); + info!(target: "Health Check API", "Starting on: http://{}", bind_addr); let handle = server::start(bind_addr, tx_start, config.clone()); if let Ok(()) = handle.await { - info!("Health Check API server on http://{} stopped", bind_addr); + info!(target: "Health Check API", "Stopped server running on: http://{}", bind_addr); } }); // Wait until the API server job is running match rx_start.await { - Ok(msg) => info!("Torrust Health Check API server started on: http://{}", msg.address), + Ok(msg) => info!(target: "Health Check API", "Started on: http://{}", msg.address), Err(e) => panic!("the Health Check API server was dropped: {e}"), } From cca17d5a45f00eed1a7a5e0e62d842b438d31bf1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 8 Jan 2024 16:36:52 +0000 Subject: [PATCH 0678/1003] refactor: rename `non-compact` to `normal` --- src/servers/http/v1/handlers/announce.rs | 4 +- src/servers/http/v1/responses/announce.rs | 48 ++++++++++++----------- 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 0522042b1..23d8b2d6e 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -120,10 +120,10 @@ fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> R match &announce_request.compact { Some(compact) => match compact { Compact::Accepted => announce::Compact::from(announce_data).into_response(), - Compact::NotAccepted => announce::NonCompact::from(announce_data).into_response(), + Compact::NotAccepted => announce::Normal::from(announce_data).into_response(), }, // Default response format non compact - None => announce::NonCompact::from(announce_data).into_response(), + None => announce::Normal::from(announce_data).into_response(), } } diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 8a245476b..52155f171 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -20,22 +20,22 @@ use crate::servers::http::v1::responses; /// /// ```rust /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::{NonCompact, Peer}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; /// -/// let response = NonCompact { +/// let response = Normal { /// interval: 111, /// interval_min: 222, /// complete: 333, /// incomplete: 444, /// peers: vec![ /// // IPV4 -/// Peer { +/// NormalPeer { /// peer_id: *b"-qB00000000000000001", /// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 /// port: 0x7070, // 28784 /// }, /// // IPV6 -/// Peer { +/// NormalPeer { /// peer_id: *b"-qB00000000000000002", /// ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), /// port: 0x7070, // 28784 @@ -57,7 +57,7 @@ use crate::servers::http::v1::responses; /// Refer to [BEP 03: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) /// for more information. #[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct NonCompact { +pub struct Normal { /// Interval in seconds that the client should wait between sending regular /// announce requests to the tracker. /// @@ -88,24 +88,24 @@ pub struct NonCompact { /// Number of non-seeder peers, aka "leechers". pub incomplete: u32, /// A list of peers. The value is a list of dictionaries. - pub peers: Vec, + pub peers: Vec, } -/// Peer information in the [`NonCompact`] +/// Peer information in the [`Normal`] /// response. /// /// ```rust /// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::{NonCompact, Peer}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; /// -/// let peer = Peer { +/// let peer = NormalPeer { /// peer_id: *b"-qB00000000000000001", /// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 /// port: 0x7070, // 28784 /// }; /// ``` #[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Peer { +pub struct NormalPeer { /// The peer's ID. pub peer_id: [u8; 20], /// The peer's IP address. @@ -114,7 +114,7 @@ pub struct Peer { pub port: u16, } -impl Peer { +impl NormalPeer { #[must_use] pub fn ben_map(&self) -> BencodeMut<'_> { ben_map! { @@ -125,9 +125,9 @@ impl Peer { } } -impl From for Peer { +impl From for NormalPeer { fn from(peer: core::peer::Peer) -> Self { - Peer { + NormalPeer { peer_id: peer.peer_id.to_bytes(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port(), @@ -135,7 +135,7 @@ impl From for Peer { } } -impl NonCompact { +impl Normal { /// Returns the bencoded body of the non-compact response. /// /// # Panics @@ -160,15 +160,19 @@ impl NonCompact { } } -impl IntoResponse for NonCompact { +impl IntoResponse for Normal { fn into_response(self) -> Response { (StatusCode::OK, self.body()).into_response() } } -impl From for NonCompact { +impl From for Normal { fn from(domain_announce_response: AnnounceData) -> Self { - let peers: Vec = domain_announce_response.peers.iter().map(|peer| Peer::from(*peer)).collect(); + let peers: Vec = domain_announce_response + .peers + .iter() + .map(|peer| NormalPeer::from(*peer)) + .collect(); Self { interval: domain_announce_response.interval, @@ -424,7 +428,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use super::{NonCompact, Peer}; + use super::{Normal, NormalPeer}; use crate::servers::http::v1::responses::announce::{Compact, CompactPeer}; // Some ascii values used in tests: @@ -440,21 +444,21 @@ mod tests { // is also a valid string which makes asserts more readable. #[test] - fn non_compact_announce_response_can_be_bencoded() { - let response = NonCompact { + fn normal_announce_response_can_be_bencoded() { + let response = Normal { interval: 111, interval_min: 222, complete: 333, incomplete: 444, peers: vec![ // IPV4 - Peer { + NormalPeer { peer_id: *b"-qB00000000000000001", ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 port: 0x7070, // 28784 }, // IPV6 - Peer { + NormalPeer { peer_id: *b"-qB00000000000000002", ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), port: 0x7070, // 28784 From d4adfa791501b222ead98be9ab39314a52e8c77a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 8 Jan 2024 16:43:44 +0000 Subject: [PATCH 0679/1003] refactor: extract config struct `AnnouncePolicy` --- packages/configuration/src/lib.rs | 67 +++++++++----- src/servers/http/mod.rs | 2 +- src/servers/http/v1/requests/announce.rs | 2 +- src/servers/http/v1/responses/announce.rs | 103 ++++++++-------------- 4 files changed, 83 insertions(+), 91 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 1c0979524..58de94582 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -387,26 +387,9 @@ pub struct HealthCheckApi { pub bind_address: String, } -/// Core configuration for the tracker. -#[allow(clippy::struct_excessive_bools)] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct Configuration { - /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, - /// `Debug` and `Trace`. Default is `Info`. - pub log_level: Option, - /// Tracker mode. See [`TrackerMode`] for more information. - pub mode: TrackerMode, - - // Database configuration - /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. - pub db_driver: DatabaseDriver, - /// Database connection string. The format depends on the database driver. - /// For `Sqlite3`, the format is `path/to/database.db`, for example: - /// `./storage/tracker/lib/database/sqlite3.db`. - /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for - /// example: `root:password@localhost:3306/torrust`. - pub db_path: String, - +/// Announce policy +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)] +pub struct AnnouncePolicy { /// Interval in seconds that the client should wait between sending regular /// announce requests to the tracker. /// @@ -418,7 +401,8 @@ pub struct Configuration { /// client's initial request. It serves as a guideline for clients to know /// how often they should contact the tracker for updates on the peer list, /// while ensuring that the tracker is not overwhelmed with requests. - pub announce_interval: u32, + pub interval: u32, + /// Minimum announce interval. Clients must not reannounce more frequently /// than this. /// @@ -430,6 +414,42 @@ pub struct Configuration { /// value to prevent sending too many requests in a short period, which /// could lead to excessive load on the tracker or even getting banned by /// the tracker for not adhering to the rules. + pub interval_min: u32, +} + +impl Default for AnnouncePolicy { + fn default() -> Self { + Self { + interval: 120, + interval_min: 120, + } + } +} + +/// Core configuration for the tracker. +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +pub struct Configuration { + /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, + /// `Debug` and `Trace`. Default is `Info`. + pub log_level: Option, + /// Tracker mode. See [`TrackerMode`] for more information. + pub mode: TrackerMode, + + // Database configuration + /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. + pub db_driver: DatabaseDriver, + /// Database connection string. The format depends on the database driver. + /// For `Sqlite3`, the format is `path/to/database.db`, for example: + /// `./storage/tracker/lib/database/sqlite3.db`. + /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for + /// example: `root:password@localhost:3306/torrust`. + pub db_path: String, + + /// See [`AnnouncePolicy::interval`] + pub announce_interval: u32, + + /// See [`AnnouncePolicy::interval_min`] pub min_announce_interval: u32, /// Weather the tracker is behind a reverse proxy or not. /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header @@ -516,13 +536,14 @@ impl From for Error { impl Default for Configuration { fn default() -> Self { + let announce_policy = AnnouncePolicy::default(); let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::Public, db_driver: DatabaseDriver::Sqlite3, db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), - announce_interval: 120, - min_announce_interval: 120, + announce_interval: announce_policy.interval, + min_announce_interval: announce_policy.interval_min, max_peer_timeout: 900, on_reverse_proxy: false, external_ip: Some(String::from("0.0.0.0")), diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index b2d232fc6..e4e42b1c3 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -152,7 +152,7 @@ //! 000000f0: 65 e //! ``` //! -//! Refer to the [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) +//! Refer to the [`Normal`](crate::servers::http::v1::responses::announce::Normal) //! response for more information about the response. //! //! **Sample compact response** diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 7f77f727d..f65d22929 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -180,7 +180,7 @@ impl fmt::Display for Event { /// Depending on the value of this param, the tracker will return a different /// response: /// -/// - [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) response. +/// - [`Normal`](crate::servers::http::v1::responses::announce::Normal) response. /// - [`Compact`](crate::servers::http::v1::responses::announce::Compact) response. /// /// Refer to [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 52155f171..b19a311d8 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -9,6 +9,7 @@ use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use serde::{self, Deserialize, Serialize}; use thiserror::Error; +use torrust_tracker_configuration::AnnouncePolicy; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; use crate::core::{self, AnnounceData}; @@ -20,11 +21,14 @@ use crate::servers::http::v1::responses; /// /// ```rust /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +/// use torrust_tracker_configuration::AnnouncePolicy; /// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; /// /// let response = Normal { -/// interval: 111, -/// interval_min: 222, +/// policy: AnnouncePolicy { +/// interval: 111, +/// interval_min: 222, +/// }, /// complete: 333, /// incomplete: 444, /// peers: vec![ @@ -58,31 +62,8 @@ use crate::servers::http::v1::responses; /// for more information. #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Normal { - /// Interval in seconds that the client should wait between sending regular - /// announce requests to the tracker. - /// - /// It's a **recommended** wait time between announcements. - /// - /// This is the standard amount of time that clients should wait between - /// sending consecutive announcements to the tracker. This value is set by - /// the tracker and is typically provided in the tracker's response to a - /// client's initial request. It serves as a guideline for clients to know - /// how often they should contact the tracker for updates on the peer list, - /// while ensuring that the tracker is not overwhelmed with requests. - pub interval: u32, - /// Minimum announce interval. Clients must not reannounce more frequently - /// than this. - /// - /// It establishes the shortest allowed wait time. - /// - /// This is an optional parameter in the protocol that the tracker may - /// provide in its response. It sets a lower limit on the frequency at which - /// clients are allowed to send announcements. Clients should respect this - /// value to prevent sending too many requests in a short period, which - /// could lead to excessive load on the tracker or even getting banned by - /// the tracker for not adhering to the rules. - #[serde(rename = "min interval")] - pub interval_min: u32, + /// Announce policy + pub policy: AnnouncePolicy, /// Number of peers with the entire file, i.e. seeders. pub complete: u32, /// Number of non-seeder peers, aka "leechers". @@ -152,8 +133,8 @@ impl Normal { (ben_map! { "complete" => ben_int!(i64::from(self.complete)), "incomplete" => ben_int!(i64::from(self.incomplete)), - "interval" => ben_int!(i64::from(self.interval)), - "min interval" => ben_int!(i64::from(self.interval_min)), + "interval" => ben_int!(i64::from(self.policy.interval)), + "min interval" => ben_int!(i64::from(self.policy.interval_min)), "peers" => peers_list.clone() }) .encode() @@ -175,8 +156,10 @@ impl From for Normal { .collect(); Self { - interval: domain_announce_response.interval, - interval_min: domain_announce_response.interval_min, + policy: AnnouncePolicy { + interval: domain_announce_response.interval, + interval_min: domain_announce_response.interval_min, + }, complete: domain_announce_response.swarm_stats.seeders, incomplete: domain_announce_response.swarm_stats.leechers, peers, @@ -192,11 +175,14 @@ impl From for Normal { /// /// ```rust /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +/// use torrust_tracker_configuration::AnnouncePolicy; /// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer}; /// /// let response = Compact { -/// interval: 111, -/// interval_min: 222, +/// policy: AnnouncePolicy { +/// interval: 111, +/// interval_min: 222, +/// }, /// complete: 333, /// incomplete: 444, /// peers: vec![ @@ -232,31 +218,8 @@ impl From for Normal { /// - [BEP 07: IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Compact { - /// Interval in seconds that the client should wait between sending regular - /// announce requests to the tracker. - /// - /// It's a **recommended** wait time between announcements. - /// - /// This is the standard amount of time that clients should wait between - /// sending consecutive announcements to the tracker. This value is set by - /// the tracker and is typically provided in the tracker's response to a - /// client's initial request. It serves as a guideline for clients to know - /// how often they should contact the tracker for updates on the peer list, - /// while ensuring that the tracker is not overwhelmed with requests. - pub interval: u32, - /// Minimum announce interval. Clients must not reannounce more frequently - /// than this. - /// - /// It establishes the shortest allowed wait time. - /// - /// This is an optional parameter in the protocol that the tracker may - /// provide in its response. It sets a lower limit on the frequency at which - /// clients are allowed to send announcements. Clients should respect this - /// value to prevent sending too many requests in a short period, which - /// could lead to excessive load on the tracker or even getting banned by - /// the tracker for not adhering to the rules. - #[serde(rename = "min interval")] - pub interval_min: u32, + /// Announce policy + pub policy: AnnouncePolicy, /// Number of seeders, aka "completed". pub complete: u32, /// Number of non-seeder peers, aka "incomplete". @@ -335,8 +298,8 @@ impl Compact { let bytes = (ben_map! { "complete" => ben_int!(i64::from(self.complete)), "incomplete" => ben_int!(i64::from(self.incomplete)), - "interval" => ben_int!(i64::from(self.interval)), - "min interval" => ben_int!(i64::from(self.interval_min)), + "interval" => ben_int!(i64::from(self.policy.interval)), + "min interval" => ben_int!(i64::from(self.policy.interval_min)), "peers" => ben_bytes!(self.peers_v4_bytes()?), "peers6" => ben_bytes!(self.peers_v6_bytes()?) }) @@ -414,8 +377,10 @@ impl From for Compact { .collect(); Self { - interval: domain_announce_response.interval, - interval_min: domain_announce_response.interval_min, + policy: AnnouncePolicy { + interval: domain_announce_response.interval, + interval_min: domain_announce_response.interval_min, + }, complete: domain_announce_response.swarm_stats.seeders, incomplete: domain_announce_response.swarm_stats.leechers, peers, @@ -428,6 +393,8 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use torrust_tracker_configuration::AnnouncePolicy; + use super::{Normal, NormalPeer}; use crate::servers::http::v1::responses::announce::{Compact, CompactPeer}; @@ -446,8 +413,10 @@ mod tests { #[test] fn normal_announce_response_can_be_bencoded() { let response = Normal { - interval: 111, - interval_min: 222, + policy: AnnouncePolicy { + interval: 111, + interval_min: 222, + }, complete: 333, incomplete: 444, peers: vec![ @@ -480,8 +449,10 @@ mod tests { #[test] fn compact_announce_response_can_be_bencoded() { let response = Compact { - interval: 111, - interval_min: 222, + policy: AnnouncePolicy { + interval: 111, + interval_min: 222, + }, complete: 333, incomplete: 444, peers: vec![ From 58a57d364022f9a63b4592d9c833a7040acb77a2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 8 Jan 2024 16:52:32 +0000 Subject: [PATCH 0680/1003] refactor: extract struct SwarmStats (da2ce7): Merge `SwarmStats` into `SwarmMetadata`. --- packages/configuration/src/lib.rs | 2 +- src/core/mod.rs | 12 ++-- src/core/torrent/mod.rs | 14 +---- src/core/torrent/repository.rs | 30 ++++----- src/servers/http/v1/responses/announce.rs | 77 +++++++++++++---------- src/servers/http/v1/services/announce.rs | 6 +- src/servers/udp/handlers.rs | 8 +-- 7 files changed, 75 insertions(+), 74 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 58de94582..6b3056e85 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -388,7 +388,7 @@ pub struct HealthCheckApi { } /// Announce policy -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)] +#[derive(PartialEq, Eq, Debug, Clone, Copy)] pub struct AnnouncePolicy { /// Interval in seconds that the client should wait between sending regular /// announce requests to the tracker. diff --git a/src/core/mod.rs b/src/core/mod.rs index beb4b133d..646558d55 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -732,7 +732,7 @@ impl Tracker { let (stats, stats_updated) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; if self.config.persistent_torrent_completed_stat && stats_updated { - let completed = stats.completed; + let completed = stats.downloaded; let info_hash = *info_hash; drop(self.database.save_persistent_torrent(&info_hash, completed).await); @@ -1390,7 +1390,7 @@ mod tests { let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.swarm_stats.seeders, 1); + assert_eq!(announce_data.swarm_stats.complete, 1); } #[tokio::test] @@ -1401,7 +1401,7 @@ mod tests { let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.swarm_stats.leechers, 1); + assert_eq!(announce_data.swarm_stats.incomplete, 1); } #[tokio::test] @@ -1415,7 +1415,7 @@ mod tests { let mut completed_peer = completed_peer(); let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; - assert_eq!(announce_data.swarm_stats.completed, 1); + assert_eq!(announce_data.swarm_stats.downloaded, 1); } } } @@ -1739,11 +1739,11 @@ mod tests { peer.event = AnnounceEvent::Started; let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - assert_eq!(swarm_stats.completed, 0); + assert_eq!(swarm_stats.downloaded, 0); peer.event = AnnounceEvent::Completed; let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - assert_eq!(swarm_stats.completed, 1); + assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory tracker.torrents.get_torrents_mut().await.remove(&info_hash); diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 79828d368..82e37ecb2 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -73,18 +73,8 @@ impl SwarmMetadata { } } -/// Swarm statistics for one torrent. -/// -/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Debug, PartialEq, Default)] -pub struct SwarmStats { - /// The number of peers that have ever completed downloading - pub completed: u32, - /// The number of active peers that have completed downloading (seeders) - pub seeders: u32, - /// The number of active peers that have not completed downloading (leechers) - pub leechers: u32, -} +/// [`SwarmStats`] has the same form as [`SwarmMetadata`] +pub type SwarmStats = SwarmMetadata; impl Entry { #[must_use] diff --git a/src/core/torrent/repository.rs b/src/core/torrent/repository.rs index ac3d03054..d4f8ee5e3 100644 --- a/src/core/torrent/repository.rs +++ b/src/core/torrent/repository.rs @@ -77,9 +77,9 @@ impl Repository for Sync { ( SwarmStats { - completed: stats.1, - seeders: stats.0, - leechers: stats.2, + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, }, stats_updated, ) @@ -131,9 +131,9 @@ impl Repository for SyncSingle { ( SwarmStats { - completed: stats.1, - seeders: stats.0, - leechers: stats.2, + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, }, stats_updated, ) @@ -176,9 +176,9 @@ impl TRepositoryAsync for RepositoryAsync { ( SwarmStats { - completed: stats.1, - seeders: stats.0, - leechers: stats.2, + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, }, stats_updated, ) @@ -234,9 +234,9 @@ impl TRepositoryAsync for AsyncSync { ( SwarmStats { - completed: stats.1, - seeders: stats.0, - leechers: stats.2, + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, }, stats_updated, ) @@ -281,9 +281,9 @@ impl TRepositoryAsync for RepositoryAsyncSingle { ( SwarmStats { - completed: stats.1, - seeders: stats.0, - leechers: stats.2, + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, }, stats_updated, ) diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index b19a311d8..14ae9156d 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -7,11 +7,11 @@ use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use serde::{self, Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_configuration::AnnouncePolicy; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use crate::core::torrent::SwarmStats; use crate::core::{self, AnnounceData}; use crate::servers::http::v1::responses; @@ -22,6 +22,7 @@ use crate::servers::http::v1::responses; /// ```rust /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; /// use torrust_tracker_configuration::AnnouncePolicy; +/// use torrust_tracker::core::torrent::SwarmStats; /// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; /// /// let response = Normal { @@ -29,8 +30,11 @@ use crate::servers::http::v1::responses; /// interval: 111, /// interval_min: 222, /// }, -/// complete: 333, -/// incomplete: 444, +/// stats: SwarmStats { +/// downloaded: 0, +/// complete: 333, +/// incomplete: 444, +/// }, /// peers: vec![ /// // IPV4 /// NormalPeer { @@ -60,15 +64,10 @@ use crate::servers::http::v1::responses; /// /// Refer to [BEP 03: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) /// for more information. -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Debug, PartialEq)] pub struct Normal { - /// Announce policy pub policy: AnnouncePolicy, - /// Number of peers with the entire file, i.e. seeders. - pub complete: u32, - /// Number of non-seeder peers, aka "leechers". - pub incomplete: u32, - /// A list of peers. The value is a list of dictionaries. + pub stats: SwarmStats, pub peers: Vec, } @@ -85,7 +84,7 @@ pub struct Normal { /// port: 0x7070, // 28784 /// }; /// ``` -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Debug, PartialEq)] pub struct NormalPeer { /// The peer's ID. pub peer_id: [u8; 20], @@ -131,8 +130,8 @@ impl Normal { } (ben_map! { - "complete" => ben_int!(i64::from(self.complete)), - "incomplete" => ben_int!(i64::from(self.incomplete)), + "complete" => ben_int!(i64::from(self.stats.complete)), + "incomplete" => ben_int!(i64::from(self.stats.incomplete)), "interval" => ben_int!(i64::from(self.policy.interval)), "min interval" => ben_int!(i64::from(self.policy.interval_min)), "peers" => peers_list.clone() @@ -160,8 +159,11 @@ impl From for Normal { interval: domain_announce_response.interval, interval_min: domain_announce_response.interval_min, }, - complete: domain_announce_response.swarm_stats.seeders, - incomplete: domain_announce_response.swarm_stats.leechers, + stats: SwarmStats { + complete: domain_announce_response.swarm_stats.complete, + incomplete: domain_announce_response.swarm_stats.incomplete, + downloaded: 0, + }, peers, } } @@ -176,6 +178,7 @@ impl From for Normal { /// ```rust /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; /// use torrust_tracker_configuration::AnnouncePolicy; +/// use torrust_tracker::core::torrent::SwarmStats; /// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer}; /// /// let response = Compact { @@ -183,8 +186,11 @@ impl From for Normal { /// interval: 111, /// interval_min: 222, /// }, -/// complete: 333, -/// incomplete: 444, +/// stats: SwarmStats { +/// downloaded: 0, +/// complete: 333, +/// incomplete: 444, +/// }, /// peers: vec![ /// // IPV4 /// CompactPeer { @@ -216,15 +222,10 @@ impl From for Normal { /// /// - [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) /// - [BEP 07: IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Debug, PartialEq)] pub struct Compact { - /// Announce policy pub policy: AnnouncePolicy, - /// Number of seeders, aka "completed". - pub complete: u32, - /// Number of non-seeder peers, aka "incomplete". - pub incomplete: u32, - /// Compact peer list. + pub stats: SwarmStats, pub peers: Vec, } @@ -250,7 +251,7 @@ pub struct Compact { /// /// Refer to [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) /// for more information. -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Debug, PartialEq)] pub struct CompactPeer { /// The peer's IP address. pub ip: IpAddr, @@ -296,8 +297,8 @@ impl Compact { /// Will return `Err` if internally interrupted. pub fn body(&self) -> Result, Box> { let bytes = (ben_map! { - "complete" => ben_int!(i64::from(self.complete)), - "incomplete" => ben_int!(i64::from(self.incomplete)), + "complete" => ben_int!(i64::from(self.stats.complete)), + "incomplete" => ben_int!(i64::from(self.stats.incomplete)), "interval" => ben_int!(i64::from(self.policy.interval)), "min interval" => ben_int!(i64::from(self.policy.interval_min)), "peers" => ben_bytes!(self.peers_v4_bytes()?), @@ -381,8 +382,11 @@ impl From for Compact { interval: domain_announce_response.interval, interval_min: domain_announce_response.interval_min, }, - complete: domain_announce_response.swarm_stats.seeders, - incomplete: domain_announce_response.swarm_stats.leechers, + stats: SwarmStats { + complete: domain_announce_response.swarm_stats.complete, + incomplete: domain_announce_response.swarm_stats.incomplete, + downloaded: 0, + }, peers, } } @@ -396,6 +400,7 @@ mod tests { use torrust_tracker_configuration::AnnouncePolicy; use super::{Normal, NormalPeer}; + use crate::core::torrent::SwarmStats; use crate::servers::http::v1::responses::announce::{Compact, CompactPeer}; // Some ascii values used in tests: @@ -417,8 +422,11 @@ mod tests { interval: 111, interval_min: 222, }, - complete: 333, - incomplete: 444, + stats: SwarmStats { + downloaded: 0, + complete: 333, + incomplete: 444, + }, peers: vec![ // IPV4 NormalPeer { @@ -453,8 +461,11 @@ mod tests { interval: 111, interval_min: 222, }, - complete: 333, - incomplete: 444, + stats: SwarmStats { + downloaded: 0, + complete: 333, + incomplete: 444, + }, peers: vec![ // IPV4 CompactPeer { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index bdf8afc87..547dcd35b 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -114,9 +114,9 @@ mod tests { let expected_announce_data = AnnounceData { peers: vec![], swarm_stats: SwarmStats { - completed: 0, - seeders: 1, - leechers: 0, + downloaded: 0, + complete: 1, + incomplete: 0, }, interval: tracker.config.announce_interval, interval_min: tracker.config.min_announce_interval, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 18a341418..4e3080b37 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -152,8 +152,8 @@ pub async fn handle_announce( let announce_response = AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), + leechers: NumberOfPeers(i64::from(response.swarm_stats.incomplete) as i32), + seeders: NumberOfPeers(i64::from(response.swarm_stats.complete) as i32), peers: response .peers .iter() @@ -177,8 +177,8 @@ pub async fn handle_announce( let announce_response = AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), + leechers: NumberOfPeers(i64::from(response.swarm_stats.incomplete) as i32), + seeders: NumberOfPeers(i64::from(response.swarm_stats.complete) as i32), peers: response .peers .iter() From 0e6fe3309b65a674e6bcb968dde4b2e59af536d2 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 10 Jan 2024 15:52:59 +1100 Subject: [PATCH 0681/1003] dev: Announce Responce Cleanup --- Cargo.lock | 1 + Cargo.toml | 9 +- cSpell.json | 1 + packages/configuration/Cargo.toml | 1 + packages/configuration/src/lib.rs | 4 +- src/core/databases/mod.rs | 2 +- src/core/mod.rs | 34 +- src/core/peer.rs | 80 ++- src/core/torrent/mod.rs | 13 +- src/servers/http/mod.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 15 +- src/servers/http/v1/requests/announce.rs | 2 +- src/servers/http/v1/responses/announce.rs | 597 +++++++----------- src/servers/http/v1/responses/mod.rs | 12 + src/servers/http/v1/services/announce.rs | 6 +- src/servers/udp/handlers.rs | 8 +- tests/common/fixtures.rs | 66 -- .../servers/api/v1/contract/context/stats.rs | 2 +- .../api/v1/contract/context/torrent.rs | 2 +- tests/servers/http/v1/contract.rs | 10 +- 20 files changed, 396 insertions(+), 471 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9b7c10f39..de630b497 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3462,6 +3462,7 @@ name = "torrust-tracker-configuration" version = "3.0.0-alpha.12-develop" dependencies = [ "config", + "derive_more", "log", "serde", "serde_with", diff --git a/Cargo.toml b/Cargo.toml index 64f913e4f..daf3c0259 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,7 +79,14 @@ serde_urlencoded = "0" torrust-tracker-test-helpers = { version = "3.0.0-alpha.12-develop", path = "packages/test-helpers" } [workspace] -members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers", "packages/torrent-repository-benchmarks"] +members = [ + "contrib/bencode", + "packages/configuration", + "packages/located-error", + "packages/primitives", + "packages/test-helpers", + "packages/torrent-repository-benchmarks", +] [profile.dev] debug = 1 diff --git a/cSpell.json b/cSpell.json index 9602ba39b..7b3ce4de9 100644 --- a/cSpell.json +++ b/cSpell.json @@ -50,6 +50,7 @@ "Hydranode", "Icelake", "imdl", + "impls", "incompletei", "infohash", "infohashes", diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index e373b4269..ecc8c976e 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -16,6 +16,7 @@ version.workspace = true [dependencies] config = "0" +derive_more = "0" log = { version = "0", features = ["release_max_level_info"] } serde = { version = "1", features = ["derive"] } serde_with = "3" diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 6b3056e85..a8f605289 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -236,6 +236,7 @@ use std::sync::Arc; use std::{env, fs}; use config::{Config, ConfigError, File, FileFormat}; +use derive_more::Constructor; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use thiserror::Error; @@ -388,7 +389,7 @@ pub struct HealthCheckApi { } /// Announce policy -#[derive(PartialEq, Eq, Debug, Clone, Copy)] +#[derive(PartialEq, Eq, Debug, Clone, Copy, Constructor)] pub struct AnnouncePolicy { /// Interval in seconds that the client should wait between sending regular /// announce requests to the tracker. @@ -537,6 +538,7 @@ impl From for Error { impl Default for Configuration { fn default() -> Self { let announce_policy = AnnouncePolicy::default(); + let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::Public, diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index 14fcb6b5b..b80b11987 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -134,7 +134,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + async fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; // Whitelist diff --git a/src/core/mod.rs b/src/core/mod.rs index 646558d55..fc44877c8 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -98,12 +98,12 @@ //! //! ```rust,no_run //! use torrust_tracker::core::peer::Peer; +//! use torrust_tracker_configuration::AnnouncePolicy; //! //! pub struct AnnounceData { //! pub peers: Vec, //! pub swarm_stats: SwarmStats, -//! pub interval: u32, // Option `announce_interval` from core tracker configuration -//! pub interval_min: u32, // Option `min_announce_interval` from core tracker configuration +//! pub policy: AnnouncePolicy, // the tracker announce policy. //! } //! //! pub struct SwarmStats { @@ -445,9 +445,10 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; +use derive_more::Constructor; use futures::future::join_all; use tokio::sync::mpsc::error::SendError; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::{AnnouncePolicy, Configuration}; use torrust_tracker_primitives::TrackerMode; use self::auth::Key; @@ -487,7 +488,7 @@ pub struct Tracker { /// Structure that holds general `Tracker` torrents metrics. /// /// Metrics are aggregate values for all torrents. -#[derive(Debug, PartialEq, Default)] +#[derive(Copy, Clone, Debug, PartialEq, Default)] pub struct TorrentsMetrics { /// Total number of seeders for all torrents pub seeders: u64, @@ -500,20 +501,14 @@ pub struct TorrentsMetrics { } /// Structure that holds the data returned by the `announce` request. -#[derive(Debug, PartialEq, Default)] +#[derive(Clone, Debug, PartialEq, Constructor, Default)] pub struct AnnounceData { /// The list of peers that are downloading the same torrent. /// It excludes the peer that made the request. pub peers: Vec, /// Swarm statistics - pub swarm_stats: SwarmStats, - /// The interval in seconds that the client should wait between sending - /// regular requests to the tracker. - /// Refer to [`announce_interval`](torrust_tracker_configuration::Configuration::announce_interval). - pub interval: u32, - /// The minimum announce interval in seconds that the client should wait. - /// Refer to [`min_announce_interval`](torrust_tracker_configuration::Configuration::min_announce_interval). - pub interval_min: u32, + pub stats: SwarmStats, + pub policy: AnnouncePolicy, } /// Structure that holds the data returned by the `scrape` request. @@ -628,11 +623,12 @@ impl Tracker { let peers = self.get_torrent_peers_for_peer(info_hash, peer).await; + let policy = AnnouncePolicy::new(self.config.announce_interval, self.config.min_announce_interval); + AnnounceData { peers, - swarm_stats, - interval: self.config.announce_interval, - interval_min: self.config.min_announce_interval, + stats: swarm_stats, + policy, } } @@ -1390,7 +1386,7 @@ mod tests { let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.swarm_stats.complete, 1); + assert_eq!(announce_data.stats.complete, 1); } #[tokio::test] @@ -1401,7 +1397,7 @@ mod tests { let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.swarm_stats.incomplete, 1); + assert_eq!(announce_data.stats.incomplete, 1); } #[tokio::test] @@ -1415,7 +1411,7 @@ mod tests { let mut completed_peer = completed_peer(); let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; - assert_eq!(announce_data.swarm_stats.downloaded, 1); + assert_eq!(announce_data.stats.downloaded, 1); } } } diff --git a/src/core/peer.rs b/src/core/peer.rs index a64f87b66..03489ce30 100644 --- a/src/core/peer.rs +++ b/src/core/peer.rs @@ -277,9 +277,85 @@ impl Serialize for Id { } } -#[cfg(test)] -mod test { +pub mod fixture { + use std::net::SocketAddr; + + use aquatic_udp_protocol::NumberOfBytes; + + use super::{Id, Peer}; + + #[derive(PartialEq, Debug)] + + pub struct PeerBuilder { + peer: Peer, + } + + #[allow(clippy::derivable_impls)] + impl Default for PeerBuilder { + fn default() -> Self { + Self { peer: Peer::default() } + } + } + + impl PeerBuilder { + #[allow(dead_code)] + #[must_use] + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.peer.peer_id = *peer_id; + self + } + + #[allow(dead_code)] + #[must_use] + pub fn with_peer_addr(mut self, peer_addr: &SocketAddr) -> Self { + self.peer.peer_addr = *peer_addr; + self + } + + #[allow(dead_code)] + #[must_use] + pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + #[allow(dead_code)] + #[must_use] + pub fn with_no_bytes_pending_to_download(mut self) -> Self { + self.peer.left = NumberOfBytes(0); + self + } + + #[allow(dead_code)] + #[must_use] + pub fn build(self) -> Peer { + self.into() + } + + #[allow(dead_code)] + #[must_use] + pub fn into(self) -> Peer { + self.peer + } + } + + impl Default for Peer { + fn default() -> Self { + Self { + peer_id: Id(*b"-qB00000000000000000"), + peer_addr: std::net::SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: crate::shared::clock::DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: aquatic_udp_protocol::AnnounceEvent::Started, + } + } + } +} + +#[cfg(test)] +pub mod test { mod torrent_peer_id { use crate::core::peer; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 82e37ecb2..d19a97be1 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -33,6 +33,7 @@ pub mod repository; use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; +use derive_more::Constructor; use serde::{Deserialize, Serialize}; use super::peer::{self, Peer}; @@ -56,13 +57,13 @@ pub struct Entry { /// Swarm metadata dictionary in the scrape response. /// /// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Debug, PartialEq, Default)] +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] pub struct SwarmMetadata { - /// The number of peers that have ever completed downloading - pub downloaded: u32, - /// The number of active peers that have completed downloading (seeders) - pub complete: u32, - /// The number of active peers that have not completed downloading (leechers) + /// (i.e `completed`): The number of peers that have ever completed downloading + pub downloaded: u32, // + /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) + pub complete: u32, //seeders + /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) pub incomplete: u32, } diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index e4e42b1c3..08a59ef90 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -152,7 +152,7 @@ //! 000000f0: 65 e //! ``` //! -//! Refer to the [`Normal`](crate::servers::http::v1::responses::announce::Normal) +//! Refer to the [`Normal`](crate::servers::http::v1::responses::announce::Normal), i.e. `Non-Compact` //! response for more information about the response. //! //! **Sample compact response** diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 23d8b2d6e..cfe422e7f 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -22,7 +22,7 @@ use crate::servers::http::v1::extractors::authentication_key::Extract as Extract use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::handlers::common::auth; use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; -use crate::servers::http::v1::responses::{self, announce}; +use crate::servers::http::v1::responses::{self}; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::servers::http::v1::services::{self, peer_ip_resolver}; use crate::shared::clock::{Current, Time}; @@ -117,13 +117,12 @@ async fn handle_announce( } fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { - match &announce_request.compact { - Some(compact) => match compact { - Compact::Accepted => announce::Compact::from(announce_data).into_response(), - Compact::NotAccepted => announce::Normal::from(announce_data).into_response(), - }, - // Default response format non compact - None => announce::Normal::from(announce_data).into_response(), + if announce_request.compact.as_ref().is_some_and(|f| *f == Compact::Accepted) { + let response: responses::Announce = announce_data.into(); + response.into_response() + } else { + let response: responses::Announce = announce_data.into(); + response.into_response() } } diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index f65d22929..08dd9da29 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -180,7 +180,7 @@ impl fmt::Display for Event { /// Depending on the value of this param, the tracker will return a different /// response: /// -/// - [`Normal`](crate::servers::http::v1::responses::announce::Normal) response. +/// - [`Normal`](crate::servers::http::v1::responses::announce::Normal), i.e. a `non-compact` response. /// - [`Compact`](crate::servers::http::v1::responses::announce::Compact) response. /// /// Refer to [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 14ae9156d..b1b474ea9 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -2,237 +2,217 @@ //! //! Data structures and logic to build the `announce` response. use std::io::Write; -use std::net::IpAddr; -use std::panic::Location; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use axum::http::StatusCode; -use axum::response::{IntoResponse, Response}; -use thiserror::Error; -use torrust_tracker_configuration::AnnouncePolicy; +use derive_more::{AsRef, Constructor, From}; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; -use crate::core::torrent::SwarmStats; +use super::Response; +use crate::core::peer::Peer; use crate::core::{self, AnnounceData}; use crate::servers::http::v1::responses; -/// Normal (non compact) `announce` response. +/// An [`Announce`] response, that can be anything that is convertible from [`AnnounceData`]. /// -/// It's a bencoded dictionary. +/// The [`Announce`] can built from any data that implements: [`From`] and [`Into>`]. /// -/// ```rust -/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -/// use torrust_tracker_configuration::AnnouncePolicy; -/// use torrust_tracker::core::torrent::SwarmStats; -/// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; +/// The two standard forms of an announce response are: [`Normal`] and [`Compact`]. /// -/// let response = Normal { -/// policy: AnnouncePolicy { -/// interval: 111, -/// interval_min: 222, -/// }, -/// stats: SwarmStats { -/// downloaded: 0, -/// complete: 333, -/// incomplete: 444, -/// }, -/// peers: vec![ -/// // IPV4 -/// NormalPeer { -/// peer_id: *b"-qB00000000000000001", -/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 -/// port: 0x7070, // 28784 -/// }, -/// // IPV6 -/// NormalPeer { -/// peer_id: *b"-qB00000000000000002", -/// ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), -/// port: 0x7070, // 28784 -/// }, -/// ], -/// }; /// -/// let bytes = response.body(); -/// -/// // The expected bencoded response. -/// let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; +/// _"To reduce the size of tracker responses and to reduce memory and +/// computational requirements in trackers, trackers may return peers as a +/// packed string rather than as a bencoded list."_ /// -/// assert_eq!( -/// String::from_utf8(bytes).unwrap(), -/// String::from_utf8(expected_bytes.to_vec()).unwrap() -/// ); -/// ``` +/// Refer to the official BEPs for more information: /// -/// Refer to [BEP 03: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -/// for more information. -#[derive(Debug, PartialEq)] -pub struct Normal { - pub policy: AnnouncePolicy, - pub stats: SwarmStats, - pub peers: Vec, +/// - [BEP 03: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +/// - [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +/// - [BEP 07: IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) + +#[derive(Debug, AsRef, PartialEq, Constructor)] +pub struct Announce +where + E: From + Into>, +{ + data: E, } -/// Peer information in the [`Normal`] -/// response. -/// -/// ```rust -/// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; -/// -/// let peer = NormalPeer { -/// peer_id: *b"-qB00000000000000001", -/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 -/// port: 0x7070, // 28784 -/// }; -/// ``` -#[derive(Debug, PartialEq)] -pub struct NormalPeer { - /// The peer's ID. - pub peer_id: [u8; 20], - /// The peer's IP address. - pub ip: IpAddr, - /// The peer's port number. - pub port: u16, +/// Build any [`Announce`] from an [`AnnounceData`]. +impl + Into>> From for Announce { + fn from(data: AnnounceData) -> Self { + Self::new(data.into()) + } } -impl NormalPeer { - #[must_use] - pub fn ben_map(&self) -> BencodeMut<'_> { - ben_map! { - "peer id" => ben_bytes!(self.peer_id.clone().to_vec()), - "ip" => ben_bytes!(self.ip.to_string()), - "port" => ben_int!(i64::from(self.port)) - } +/// Convert any Announce [`Announce`] into a [`axum::response::Response`] +impl + Into>> axum::response::IntoResponse for Announce +where + Announce: Response, +{ + fn into_response(self) -> axum::response::Response { + axum::response::IntoResponse::into_response(self.body().map(|bytes| (StatusCode::OK, bytes))) } } -impl From for NormalPeer { - fn from(peer: core::peer::Peer) -> Self { - NormalPeer { - peer_id: peer.peer_id.to_bytes(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port(), +/// Implement the [`Response`] for the [`Announce`]. +/// +impl + Into>> Response for Announce { + fn body(self) -> Result, responses::error::Error> { + Ok(self.data.into()) + } +} + +/// Format of the [`Normal`] (Non-Compact) Encoding +pub struct Normal { + complete: i64, + incomplete: i64, + interval: i64, + min_interval: i64, + peers: Vec, +} + +impl From for Normal { + fn from(data: AnnounceData) -> Self { + Self { + complete: data.stats.complete.into(), + incomplete: data.stats.incomplete.into(), + interval: data.policy.interval.into(), + min_interval: data.policy.interval_min.into(), + peers: data.peers.into_iter().collect(), } } } -impl Normal { - /// Returns the bencoded body of the non-compact response. - /// - /// # Panics - /// - /// Will return an error if it can't access the bencode as a mutable `BListAccess`. - #[must_use] - pub fn body(&self) -> Vec { +#[allow(clippy::from_over_into)] +impl Into> for Normal { + fn into(self) -> Vec { let mut peers_list = ben_list!(); let peers_list_mut = peers_list.list_mut().unwrap(); for peer in &self.peers { - peers_list_mut.push(peer.ben_map()); + peers_list_mut.push(peer.into()); } (ben_map! { - "complete" => ben_int!(i64::from(self.stats.complete)), - "incomplete" => ben_int!(i64::from(self.stats.incomplete)), - "interval" => ben_int!(i64::from(self.policy.interval)), - "min interval" => ben_int!(i64::from(self.policy.interval_min)), + "complete" => ben_int!(self.complete), + "incomplete" => ben_int!(self.incomplete), + "interval" => ben_int!(self.interval), + "min interval" => ben_int!(self.min_interval), "peers" => peers_list.clone() }) .encode() } } -impl IntoResponse for Normal { - fn into_response(self) -> Response { - (StatusCode::OK, self.body()).into_response() - } +/// Format of the [`Compact`] Encoding +pub struct Compact { + complete: i64, + incomplete: i64, + interval: i64, + min_interval: i64, + peers: Vec, + peers6: Vec, } -impl From for Normal { - fn from(domain_announce_response: AnnounceData) -> Self { - let peers: Vec = domain_announce_response - .peers - .iter() - .map(|peer| NormalPeer::from(*peer)) - .collect(); +impl From for Compact { + fn from(data: AnnounceData) -> Self { + let compact_peers: Vec = data.peers.into_iter().collect(); + + let (peers, peers6): (Vec>, Vec>) = + compact_peers.into_iter().collect(); + + let peers_encoded: CompactPeersEncoded = peers.into_iter().collect(); + let peers_encoded_6: CompactPeersEncoded = peers6.into_iter().collect(); Self { - policy: AnnouncePolicy { - interval: domain_announce_response.interval, - interval_min: domain_announce_response.interval_min, - }, - stats: SwarmStats { - complete: domain_announce_response.swarm_stats.complete, - incomplete: domain_announce_response.swarm_stats.incomplete, - downloaded: 0, - }, - peers, + complete: data.stats.complete.into(), + incomplete: data.stats.incomplete.into(), + interval: data.policy.interval.into(), + min_interval: data.policy.interval_min.into(), + peers: peers_encoded.0, + peers6: peers_encoded_6.0, } } } -/// Compact `announce` response. -/// -/// _"To reduce the size of tracker responses and to reduce memory and -/// computational requirements in trackers, trackers may return peers as a -/// packed string rather than as a bencoded list."_ +#[allow(clippy::from_over_into)] +impl Into> for Compact { + fn into(self) -> Vec { + (ben_map! { + "complete" => ben_int!(self.complete), + "incomplete" => ben_int!(self.incomplete), + "interval" => ben_int!(self.interval), + "min interval" => ben_int!(self.min_interval), + "peers" => ben_bytes!(self.peers), + "peers6" => ben_bytes!(self.peers6) + }) + .encode() + } +} + +/// Marker Trait for Peer Vectors +pub trait PeerEncoding: From + PartialEq {} + +impl FromIterator for Vec

{ + fn from_iter>(iter: T) -> Self { + let mut peers: Vec

= vec![]; + + for peer in iter { + peers.push(peer.into()); + } + + peers + } +} + +/// A [`NormalPeer`], for the [`Normal`] form. /// /// ```rust -/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -/// use torrust_tracker_configuration::AnnouncePolicy; -/// use torrust_tracker::core::torrent::SwarmStats; -/// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer}; +/// use std::net::{IpAddr, Ipv4Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; /// -/// let response = Compact { -/// policy: AnnouncePolicy { -/// interval: 111, -/// interval_min: 222, -/// }, -/// stats: SwarmStats { -/// downloaded: 0, -/// complete: 333, -/// incomplete: 444, -/// }, -/// peers: vec![ -/// // IPV4 -/// CompactPeer { -/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 -/// port: 0x7070, // 28784 -/// }, -/// // IPV6 -/// CompactPeer { -/// ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), -/// port: 0x7070, // 28784 -/// }, -/// ], +/// let peer = NormalPeer { +/// peer_id: *b"-qB00000000000000001", +/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 +/// port: 0x7070, // 28784 /// }; /// -/// let bytes = response.body().unwrap(); -/// -/// // The expected bencoded response. -/// let expected_bytes = -/// // cspell:disable-next-line -/// b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe"; -/// -/// assert_eq!( -/// String::from_utf8(bytes).unwrap(), -/// String::from_utf8(expected_bytes.to_vec()).unwrap() -/// ); -/// ``` -/// -/// Refer to the official BEPs for more information: -/// -/// - [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) -/// - [BEP 07: IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) +/// ``` #[derive(Debug, PartialEq)] -pub struct Compact { - pub policy: AnnouncePolicy, - pub stats: SwarmStats, - pub peers: Vec, +pub struct NormalPeer { + /// The peer's ID. + pub peer_id: [u8; 20], + /// The peer's IP address. + pub ip: IpAddr, + /// The peer's port number. + pub port: u16, +} + +impl PeerEncoding for NormalPeer {} + +impl From for NormalPeer { + fn from(peer: core::peer::Peer) -> Self { + NormalPeer { + peer_id: peer.peer_id.to_bytes(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } } -/// Compact peer. It's used in the [`Compact`] -/// response. +impl From<&NormalPeer> for BencodeMut<'_> { + fn from(value: &NormalPeer) -> Self { + ben_map! { + "peer id" => ben_bytes!(value.peer_id.clone().to_vec()), + "ip" => ben_bytes!(value.ip.to_string()), + "port" => ben_int!(i64::from(value.port)) + } + } +} + +/// A [`CompactPeer`], for the [`Compact`] form. /// -/// _"To reduce the size of tracker responses and to reduce memory and +/// _"To reduce the size of tracker responses and to reduce memory and /// computational requirements in trackers, trackers may return peers as a /// packed string rather than as a bencoded list."_ /// @@ -240,168 +220,107 @@ pub struct Compact { /// the peer's ID. /// /// ```rust -/// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::CompactPeer; +/// use std::net::{IpAddr, Ipv4Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; /// -/// let compact_peer = CompactPeer { -/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 -/// port: 0x7070 // 28784 -/// }; -/// ``` +/// let peer = CompactPeer::V4(CompactPeerData { +/// ip: Ipv4Addr::new(0x69, 0x69, 0x69, 0x69), // 105.105.105.105 +/// port: 0x7070, // 28784 +/// }); +/// +/// ``` /// /// Refer to [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) /// for more information. -#[derive(Debug, PartialEq)] -pub struct CompactPeer { +#[derive(Clone, Debug, PartialEq)] +pub enum CompactPeer { /// The peer's IP address. - pub ip: IpAddr, + V4(CompactPeerData), /// The peer's port number. - pub port: u16, + V6(CompactPeerData), } -impl CompactPeer { - /// Returns the compact peer as a byte vector. - /// - /// # Errors - /// - /// Will return `Err` if internally interrupted. - pub fn bytes(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - match self.ip { - IpAddr::V4(ip) => { - bytes.write_all(&u32::from(ip).to_be_bytes())?; - } - IpAddr::V6(ip) => { - bytes.write_all(&u128::from(ip).to_be_bytes())?; - } - } - bytes.write_all(&self.port.to_be_bytes())?; - Ok(bytes) - } -} +impl PeerEncoding for CompactPeer {} impl From for CompactPeer { fn from(peer: core::peer::Peer) -> Self { - CompactPeer { - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port(), + match (peer.peer_addr.ip(), peer.peer_addr.port()) { + (IpAddr::V4(ip), port) => Self::V4(CompactPeerData { ip, port }), + (IpAddr::V6(ip), port) => Self::V6(CompactPeerData { ip, port }), } } } -impl Compact { - /// Returns the bencoded compact response as a byte vector. - /// - /// # Errors - /// - /// Will return `Err` if internally interrupted. - pub fn body(&self) -> Result, Box> { - let bytes = (ben_map! { - "complete" => ben_int!(i64::from(self.stats.complete)), - "incomplete" => ben_int!(i64::from(self.stats.incomplete)), - "interval" => ben_int!(i64::from(self.policy.interval)), - "min interval" => ben_int!(i64::from(self.policy.interval_min)), - "peers" => ben_bytes!(self.peers_v4_bytes()?), - "peers6" => ben_bytes!(self.peers_v6_bytes()?) - }) - .encode(); +/// The [`CompactPeerData`], that made with either a [`Ipv4Addr`], or [`Ipv6Addr`] along with a `port`. +/// +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeerData { + /// The peer's IP address. + pub ip: V, + /// The peer's port number. + pub port: u16, +} - Ok(bytes) - } +impl FromIterator for (Vec>, Vec>) { + fn from_iter>(iter: T) -> Self { + let mut peers_v4: Vec> = vec![]; + let mut peers_v6: Vec> = vec![]; - fn peers_v4_bytes(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - for compact_peer in &self.peers { - match compact_peer.ip { - IpAddr::V4(_ip) => { - let peer_bytes = compact_peer.bytes()?; - bytes.write_all(&peer_bytes)?; - } - IpAddr::V6(_) => {} + for peer in iter { + match peer { + CompactPeer::V4(peer) => peers_v4.push(peer), + CompactPeer::V6(peer6) => peers_v6.push(peer6), } } - Ok(bytes) - } - fn peers_v6_bytes(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - for compact_peer in &self.peers { - match compact_peer.ip { - IpAddr::V6(_ip) => { - let peer_bytes = compact_peer.bytes()?; - bytes.write_all(&peer_bytes)?; - } - IpAddr::V4(_) => {} - } - } - Ok(bytes) + (peers_v4, peers_v6) } } -/// `Compact` response serialization error. -#[derive(Error, Debug)] -pub enum CompactSerializationError { - #[error("cannot write bytes: {inner_error} in {location}")] - CannotWriteBytes { - location: &'static Location<'static>, - inner_error: String, - }, -} +#[derive(From, PartialEq)] +struct CompactPeersEncoded(Vec); -impl From for responses::error::Error { - fn from(err: CompactSerializationError) -> Self { - responses::error::Error { - failure_reason: format!("{err}"), - } - } -} +impl FromIterator> for CompactPeersEncoded { + fn from_iter>>(iter: T) -> Self { + let mut bytes: Vec = vec![]; -impl IntoResponse for Compact { - fn into_response(self) -> Response { - match self.body() { - Ok(bytes) => (StatusCode::OK, bytes).into_response(), - Err(err) => responses::error::Error::from(CompactSerializationError::CannotWriteBytes { - location: Location::caller(), - inner_error: format!("{err}"), - }) - .into_response(), + for peer in iter { + bytes + .write_all(&u32::from(peer.ip).to_be_bytes()) + .expect("it should write peer ip"); + bytes.write_all(&peer.port.to_be_bytes()).expect("it should write peer port"); } + + bytes.into() } } -impl From for Compact { - fn from(domain_announce_response: AnnounceData) -> Self { - let peers: Vec = domain_announce_response - .peers - .iter() - .map(|peer| CompactPeer::from(*peer)) - .collect(); +impl FromIterator> for CompactPeersEncoded { + fn from_iter>>(iter: T) -> Self { + let mut bytes: Vec = Vec::new(); - Self { - policy: AnnouncePolicy { - interval: domain_announce_response.interval, - interval_min: domain_announce_response.interval_min, - }, - stats: SwarmStats { - complete: domain_announce_response.swarm_stats.complete, - incomplete: domain_announce_response.swarm_stats.incomplete, - downloaded: 0, - }, - peers, + for peer in iter { + bytes + .write_all(&u128::from(peer.ip).to_be_bytes()) + .expect("it should write peer ip"); + bytes.write_all(&peer.port.to_be_bytes()).expect("it should write peer port"); } + bytes.into() } } #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use torrust_tracker_configuration::AnnouncePolicy; - use super::{Normal, NormalPeer}; + use crate::core::peer::fixture::PeerBuilder; + use crate::core::peer::Id; use crate::core::torrent::SwarmStats; - use crate::servers::http::v1::responses::announce::{Compact, CompactPeer}; + use crate::core::AnnounceData; + use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; // Some ascii values used in tests: // @@ -415,35 +334,32 @@ mod tests { // IP addresses and port numbers used in tests are chosen so that their bencoded representation // is also a valid string which makes asserts more readable. + fn setup_announce_data() -> AnnounceData { + let policy = AnnouncePolicy::new(111, 222); + + let peer_ipv4 = PeerBuilder::default() + .with_peer_id(&Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 0x7070)) + .build(); + + let peer_ipv6 = PeerBuilder::default() + .with_peer_id(&Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 0x7070, + )) + .build(); + + let peers = vec![peer_ipv4, peer_ipv6]; + let stats = SwarmStats::new(333, 333, 444); + + AnnounceData::new(peers, stats, policy) + } + #[test] - fn normal_announce_response_can_be_bencoded() { - let response = Normal { - policy: AnnouncePolicy { - interval: 111, - interval_min: 222, - }, - stats: SwarmStats { - downloaded: 0, - complete: 333, - incomplete: 444, - }, - peers: vec![ - // IPV4 - NormalPeer { - peer_id: *b"-qB00000000000000001", - ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 - port: 0x7070, // 28784 - }, - // IPV6 - NormalPeer { - peer_id: *b"-qB00000000000000002", - ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - port: 0x7070, // 28784 - }, - ], - }; - - let bytes = response.body(); + fn non_compact_announce_response_can_be_bencoded() { + let response: Announce = setup_announce_data().into(); + let bytes = response.body().expect("it should encode the response"); // cspell:disable-next-line let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; @@ -456,31 +372,8 @@ mod tests { #[test] fn compact_announce_response_can_be_bencoded() { - let response = Compact { - policy: AnnouncePolicy { - interval: 111, - interval_min: 222, - }, - stats: SwarmStats { - downloaded: 0, - complete: 333, - incomplete: 444, - }, - peers: vec![ - // IPV4 - CompactPeer { - ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 - port: 0x7070, // 28784 - }, - // IPV6 - CompactPeer { - ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - port: 0x7070, // 28784 - }, - ], - }; - - let bytes = response.body().unwrap(); + let response: Announce = setup_announce_data().into(); + let bytes = response.body().expect("it should encode the response"); let expected_bytes = // cspell:disable-next-line diff --git a/src/servers/http/v1/responses/mod.rs b/src/servers/http/v1/responses/mod.rs index 3c6632fed..e22879c6d 100644 --- a/src/servers/http/v1/responses/mod.rs +++ b/src/servers/http/v1/responses/mod.rs @@ -5,3 +5,15 @@ pub mod announce; pub mod error; pub mod scrape; + +pub use announce::{Announce, Compact, Normal}; + +/// Trait that defines the Announce Response Format +pub trait Response: axum::response::IntoResponse { + /// Returns the Body of the Announce Response + /// + /// # Errors + /// + /// If unable to generate the response, it will return an error. + fn body(self) -> Result, error::Error>; +} diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 547dcd35b..80dc1ca5b 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -94,6 +94,7 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_configuration::AnnouncePolicy; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; @@ -113,13 +114,12 @@ mod tests { let expected_announce_data = AnnounceData { peers: vec![], - swarm_stats: SwarmStats { + stats: SwarmStats { downloaded: 0, complete: 1, incomplete: 0, }, - interval: tracker.config.announce_interval, - interval_min: tracker.config.min_announce_interval, + policy: AnnouncePolicy::default(), }; assert_eq!(announce_data, expected_announce_data); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 4e3080b37..34ebaec89 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -152,8 +152,8 @@ pub async fn handle_announce( let announce_response = AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swarm_stats.incomplete) as i32), - seeders: NumberOfPeers(i64::from(response.swarm_stats.complete) as i32), + leechers: NumberOfPeers(i64::from(response.stats.incomplete) as i32), + seeders: NumberOfPeers(i64::from(response.stats.complete) as i32), peers: response .peers .iter() @@ -177,8 +177,8 @@ pub async fn handle_announce( let announce_response = AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swarm_stats.incomplete) as i32), - seeders: NumberOfPeers(i64::from(response.swarm_stats.complete) as i32), + leechers: NumberOfPeers(i64::from(response.stats.incomplete) as i32), + seeders: NumberOfPeers(i64::from(response.stats.complete) as i32), peers: response .peers .iter() diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 9fd328d5d..bbdebff76 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -1,69 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use torrust_tracker::core::peer::{self, Id, Peer}; -use torrust_tracker::shared::clock::DurationSinceUnixEpoch; - -pub struct PeerBuilder { - peer: Peer, -} - -impl PeerBuilder { - #[allow(dead_code)] - pub fn default() -> PeerBuilder { - Self { - peer: default_peer_for_testing(), - } - } - - #[allow(dead_code)] - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { - self.peer.peer_id = *peer_id; - self - } - - #[allow(dead_code)] - pub fn with_peer_addr(mut self, peer_addr: &SocketAddr) -> Self { - self.peer.peer_addr = *peer_addr; - self - } - - #[allow(dead_code)] - pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self - } - - #[allow(dead_code)] - pub fn with_no_bytes_pending_to_download(mut self) -> Self { - self.peer.left = NumberOfBytes(0); - self - } - - #[allow(dead_code)] - pub fn build(self) -> Peer { - self.into() - } - - #[allow(dead_code)] - pub fn into(self) -> Peer { - self.peer - } -} - -#[allow(dead_code)] -fn default_peer_for_testing() -> Peer { - Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - } -} - #[allow(dead_code)] pub fn invalid_info_hashes() -> Vec { [ diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 45f7e604a..71738f8e5 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,10 +1,10 @@ use std::str::FromStr; +use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::common::fixtures::PeerBuilder; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 3cac55e6a..dc91e8fc5 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,11 +1,11 @@ use std::str::FromStr; +use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::common::fixtures::PeerBuilder; use crate::common::http::{Query, QueryParam}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::test_environment::running_test_environment; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 3034847db..f3d1fcef0 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -90,10 +90,11 @@ mod for_all_config_modes { use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; use torrust_tracker::core::peer; + use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::common::fixtures::invalid_info_hashes; use crate::servers::http::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, @@ -884,10 +885,11 @@ mod for_all_config_modes { use tokio::net::TcpListener; use torrust_tracker::core::peer; + use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::common::fixtures::invalid_info_hashes; use crate::servers::http::asserts::{ assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, @@ -1160,10 +1162,10 @@ mod configured_as_whitelisted { use std::str::FromStr; use torrust_tracker::core::peer; + use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::PeerBuilder; use crate::servers::http::asserts::assert_scrape_response; use crate::servers::http::client::Client; use crate::servers::http::requests; @@ -1333,10 +1335,10 @@ mod configured_as_private { use torrust_tracker::core::auth::Key; use torrust_tracker::core::peer; + use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::PeerBuilder; use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; use crate::servers::http::client::Client; use crate::servers::http::requests; From 3e2b1525e951837cf2dbe486f27ae121e8b1b262 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 16 Jan 2024 09:16:17 +0000 Subject: [PATCH 0682/1003] feat: [#604] add timeout to http_health_check binary --- src/bin/http_health_check.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/bin/http_health_check.rs b/src/bin/http_health_check.rs index d66d334df..b7c6dfa41 100644 --- a/src/bin/http_health_check.rs +++ b/src/bin/http_health_check.rs @@ -4,8 +4,11 @@ //! //! - They are harder to maintain. //! - They introduce new attack vectors. +use std::time::Duration; use std::{env, process}; +use reqwest::Client; + #[tokio::main] async fn main() { let args: Vec = env::args().collect(); @@ -19,7 +22,9 @@ async fn main() { let url = &args[1].clone(); - match reqwest::get(url).await { + let client = Client::builder().timeout(Duration::from_secs(5)).build().unwrap(); + + match client.get(url).send().await { Ok(response) => { if response.status().is_success() { println!("STATUS: {}", response.status()); From fec9716be89a0b004ab2d9c7a2dd8f27365b9a66 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 16 Jan 2024 09:50:42 +0000 Subject: [PATCH 0683/1003] feat: [#609] add timeout to UDP tracker requests --- src/servers/udp/server.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 22cdf6357..a15226bd2 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -20,6 +20,7 @@ use std::io::Cursor; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use aquatic_udp_protocol::Response; use derive_more::Constructor; @@ -240,9 +241,16 @@ impl Udp { debug!(target: "UDP Tracker", "From: {}", &remote_addr); debug!(target: "UDP Tracker", "Payload: {:?}", payload); - let response = handle_packet(remote_addr, payload, &tracker).await; + let response_fut = handle_packet(remote_addr, payload, &tracker); - Udp::send_response(socket_clone, remote_addr, response).await; + match tokio::time::timeout(Duration::from_secs(5), response_fut).await { + Ok(response) => { + Udp::send_response(socket_clone, remote_addr, response).await; + } + Err(_) => { + error!("Timeout occurred while processing the UDP request."); + } + } } Err(err) => { error!("Error reading UDP datagram from socket. Error: {:?}", err); From 470e6083c507ab76fc4c6dd1a98b3cf2991ff4d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 16 Jan 2024 12:53:31 +0000 Subject: [PATCH 0684/1003] feat: a simple HTTP tracker client command You can execute it with: ``` cargo run --bin http_tracker_client https://tracker.torrust-demo.com 9c38422213e30bff212b30c360d26f9a02136422" ``` and the output should be something like: ```json{ "complete": 1, "incomplete": 1, "interval": 300, "min interval": 300, "peers": [ { "ip": "90.XX.XX.167", "peer id": [ 45, 66, 76, 50, 52, 54, 51, 54, 51, 45, 51, 70, 41, 46, 114, 46, 68, 100, 74, 69 ], "port": 59568 } ] } ``` --- Cargo.toml | 4 +- .../config/tracker.development.sqlite3.toml | 10 +- src/bin/http_tracker_client.rs | 35 +++ src/shared/bit_torrent/mod.rs | 1 + .../bit_torrent/tracker/http/client/mod.rs | 125 ++++++++ .../tracker/http/client/requests/announce.rs | 275 ++++++++++++++++++ .../tracker/http/client/requests/mod.rs | 2 + .../tracker/http/client/requests/scrape.rs | 124 ++++++++ .../tracker/http/client/responses/announce.rs | 126 ++++++++ .../tracker/http/client/responses/error.rs | 7 + .../tracker/http/client/responses/mod.rs | 3 + .../tracker/http/client/responses/scrape.rs | 203 +++++++++++++ src/shared/bit_torrent/tracker/http/mod.rs | 26 ++ src/shared/bit_torrent/tracker/mod.rs | 1 + 14 files changed, 935 insertions(+), 7 deletions(-) create mode 100644 src/bin/http_tracker_client.rs create mode 100644 src/shared/bit_torrent/tracker/http/client/mod.rs create mode 100644 src/shared/bit_torrent/tracker/http/client/requests/announce.rs create mode 100644 src/shared/bit_torrent/tracker/http/client/requests/mod.rs create mode 100644 src/shared/bit_torrent/tracker/http/client/requests/scrape.rs create mode 100644 src/shared/bit_torrent/tracker/http/client/responses/announce.rs create mode 100644 src/shared/bit_torrent/tracker/http/client/responses/error.rs create mode 100644 src/shared/bit_torrent/tracker/http/client/responses/mod.rs create mode 100644 src/shared/bit_torrent/tracker/http/client/responses/scrape.rs create mode 100644 src/shared/bit_torrent/tracker/http/mod.rs create mode 100644 src/shared/bit_torrent/tracker/mod.rs diff --git a/Cargo.toml b/Cargo.toml index daf3c0259..671d66e98 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,8 +54,10 @@ rand = "0" reqwest = "0" serde = { version = "1", features = ["derive"] } serde_bencode = "0" +serde_bytes = "0" serde_json = "1" serde_with = "3" +serde_repr = "0" tdyne-peer-id = "1" tdyne-peer-id-registry = "0" thiserror = "1" @@ -73,8 +75,6 @@ local-ip-address = "0" mockall = "0" once_cell = "1.18.0" reqwest = { version = "0", features = ["json"] } -serde_bytes = "0" -serde_repr = "0" serde_urlencoded = "0" torrust-tracker-test-helpers = { version = "3.0.0-alpha.12-develop", path = "packages/test-helpers" } diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 04934dd8a..e26aa6c6c 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -13,18 +13,18 @@ remove_peerless_torrents = true tracker_usage_statistics = true [[udp_trackers]] -bind_address = "0.0.0.0:6969" -enabled = false +bind_address = "0.0.0.0:0" +enabled = true [[http_trackers]] -bind_address = "0.0.0.0:7070" -enabled = false +bind_address = "0.0.0.0:0" +enabled = true ssl_cert_path = "" ssl_enabled = false ssl_key_path = "" [http_api] -bind_address = "127.0.0.1:1212" +bind_address = "127.0.0.1:0" enabled = true ssl_cert_path = "" ssl_enabled = false diff --git a/src/bin/http_tracker_client.rs b/src/bin/http_tracker_client.rs new file mode 100644 index 000000000..1f1154fa5 --- /dev/null +++ b/src/bin/http_tracker_client.rs @@ -0,0 +1,35 @@ +use std::env; +use std::str::FromStr; + +use reqwest::Url; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; +use torrust_tracker::shared::bit_torrent::tracker::http::client::responses::announce::Announce; +use torrust_tracker::shared::bit_torrent::tracker::http::client::Client; + +#[tokio::main] +async fn main() { + let args: Vec = env::args().collect(); + if args.len() != 3 { + eprintln!("Error: invalid number of arguments!"); + eprintln!("Usage: cargo run --bin http_tracker_client "); + eprintln!("Example: cargo run --bin http_tracker_client https://tracker.torrust-demo.com 9c38422213e30bff212b30c360d26f9a02136422"); + std::process::exit(1); + } + + let base_url = Url::parse(&args[1]).expect("arg 1 should be a valid HTTP tracker base URL"); + let info_hash = InfoHash::from_str(&args[2]).expect("arg 2 should be a valid infohash"); + + let response = Client::new(base_url) + .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + .await; + + let body = response.bytes().await.unwrap(); + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); + + let json = serde_json::to_string(&announce_response).expect("announce response should be a valid JSON"); + + print!("{json}"); +} diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs index 872203a1f..3dcf705e4 100644 --- a/src/shared/bit_torrent/mod.rs +++ b/src/shared/bit_torrent/mod.rs @@ -69,4 +69,5 @@ //!Bencode & bdecode in your browser | pub mod common; pub mod info_hash; +pub mod tracker; pub mod udp; diff --git a/src/shared/bit_torrent/tracker/http/client/mod.rs b/src/shared/bit_torrent/tracker/http/client/mod.rs new file mode 100644 index 000000000..a75b0fec3 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/mod.rs @@ -0,0 +1,125 @@ +pub mod requests; +pub mod responses; + +use std::net::IpAddr; + +use requests::announce::{self, Query}; +use requests::scrape; +use reqwest::{Client as ReqwestClient, Response, Url}; + +use crate::core::auth::Key; + +/// HTTP Tracker Client +pub struct Client { + base_url: Url, + reqwest: ReqwestClient, + key: Option, +} + +/// URL components in this context: +/// +/// ```text +/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// \_____________________/\_______________/ \__________________________________________________________/ +/// | | | +/// base url path query +/// ``` +impl Client { + /// # Panics + /// + /// This method fails if the client builder fails. + #[must_use] + pub fn new(base_url: Url) -> Self { + Self { + base_url, + reqwest: reqwest::Client::builder().build().unwrap(), + key: None, + } + } + + /// Creates the new client binding it to an specific local address. + /// + /// # Panics + /// + /// This method fails if the client builder fails. + #[must_use] + pub fn bind(base_url: Url, local_address: IpAddr) -> Self { + Self { + base_url, + reqwest: reqwest::Client::builder().local_address(local_address).build().unwrap(), + key: None, + } + } + + /// # Panics + /// + /// This method fails if the client builder fails. + #[must_use] + pub fn authenticated(base_url: Url, key: Key) -> Self { + Self { + base_url, + reqwest: reqwest::Client::builder().build().unwrap(), + key: Some(key), + } + } + + pub async fn announce(&self, query: &announce::Query) -> Response { + self.get(&self.build_announce_path_and_query(query)).await + } + + pub async fn scrape(&self, query: &scrape::Query) -> Response { + self.get(&self.build_scrape_path_and_query(query)).await + } + + pub async fn announce_with_header(&self, query: &Query, key: &str, value: &str) -> Response { + self.get_with_header(&self.build_announce_path_and_query(query), key, value) + .await + } + + pub async fn health_check(&self) -> Response { + self.get(&self.build_path("health_check")).await + } + + /// # Panics + /// + /// This method fails if there was an error while sending request. + pub async fn get(&self, path: &str) -> Response { + self.reqwest.get(self.build_url(path)).send().await.unwrap() + } + + /// # Panics + /// + /// This method fails if there was an error while sending request. + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { + self.reqwest + .get(self.build_url(path)) + .header(key, value) + .send() + .await + .unwrap() + } + + fn build_announce_path_and_query(&self, query: &announce::Query) -> String { + format!("{}?{query}", self.build_path("announce")) + } + + fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { + format!("{}?{query}", self.build_path("scrape")) + } + + fn build_path(&self, path: &str) -> String { + match &self.key { + Some(key) => format!("{path}/{key}"), + None => path.to_string(), + } + } + + fn build_url(&self, path: &str) -> String { + let base_url = self.base_url(); + format!("{base_url}{path}") + } + + fn base_url(&self) -> String { + self.base_url.to_string() + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs new file mode 100644 index 000000000..6cae79888 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs @@ -0,0 +1,275 @@ +use std::fmt; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; + +use serde_repr::Serialize_repr; + +use crate::core::peer::Id; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: ByteArray20, + pub peer_addr: IpAddr, + pub downloaded: BaseTenASCII, + pub uploaded: BaseTenASCII, + pub peer_id: ByteArray20, + pub port: PortNumber, + pub left: BaseTenASCII, + pub event: Option, + pub compact: Option, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Announce Request: +/// +/// +/// +/// Some parameters in the specification are not implemented in this tracker yet. +impl Query { + /// It builds the URL query component for the announce request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + #[must_use] + pub fn build(&self) -> String { + self.params().to_string() + } + + #[must_use] + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub type BaseTenASCII = u64; +pub type PortNumber = u16; + +pub enum Event { + //Started, + //Stopped, + Completed, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + //Event::Started => write!(f, "started"), + //Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(Serialize_repr, PartialEq, Debug)] +#[repr(u8)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} + +pub struct QueryBuilder { + announce_query: Query, +} + +impl QueryBuilder { + /// # Panics + /// + /// Will panic if the default info-hash value is not a valid info-hash. + #[must_use] + pub fn with_default_values() -> QueryBuilder { + let default_announce_query = Query { + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, // # DevSkim: ignore DS173237 + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: Id(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + }; + Self { + announce_query: default_announce_query, + } + } + + #[must_use] + pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.announce_query.info_hash = info_hash.0; + self + } + + #[must_use] + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.announce_query.peer_id = peer_id.0; + self + } + + #[must_use] + pub fn with_compact(mut self, compact: Compact) -> Self { + self.announce_query.compact = Some(compact); + self + } + + #[must_use] + pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { + self.announce_query.peer_addr = *peer_addr; + self + } + + #[must_use] + pub fn without_compact(mut self) -> Self { + self.announce_query.compact = None; + self + } + + #[must_use] + pub fn query(self) -> Query { + self.announce_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Announce request. +/// +/// Sample Announce URL with all the GET parameters (mandatory and optional): +/// +/// ```text +/// http://127.0.0.1:7070/announce? +/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) +/// peer_addr=192.168.1.88 +/// downloaded=0 +/// uploaded=0 +/// peer_id=%2DqB00000000000000000 (mandatory) +/// port=17548 (mandatory) +/// left=0 +/// event=completed +/// compact=0 +/// ``` +pub struct QueryParams { + pub info_hash: Option, + pub peer_addr: Option, + pub downloaded: Option, + pub uploaded: Option, + pub peer_id: Option, + pub port: Option, + pub left: Option, + pub event: Option, + pub compact: Option, +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut params = vec![]; + + if let Some(info_hash) = &self.info_hash { + params.push(("info_hash", info_hash)); + } + if let Some(peer_addr) = &self.peer_addr { + params.push(("peer_addr", peer_addr)); + } + if let Some(downloaded) = &self.downloaded { + params.push(("downloaded", downloaded)); + } + if let Some(uploaded) = &self.uploaded { + params.push(("uploaded", uploaded)); + } + if let Some(peer_id) = &self.peer_id { + params.push(("peer_id", peer_id)); + } + if let Some(port) = &self.port { + params.push(("port", port)); + } + if let Some(left) = &self.left { + params.push(("left", left)); + } + if let Some(event) = &self.event { + params.push(("event", event)); + } + if let Some(compact) = &self.compact { + params.push(("compact", compact)); + } + + let query = params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(announce_query: &Query) -> Self { + let event = announce_query.event.as_ref().map(std::string::ToString::to_string); + let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + + Self { + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), + peer_addr: Some(announce_query.peer_addr.to_string()), + downloaded: Some(announce_query.downloaded.to_string()), + uploaded: Some(announce_query.uploaded.to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), + port: Some(announce_query.port.to_string()), + left: Some(announce_query.left.to_string()), + event, + compact, + } + } + + pub fn remove_optional_params(&mut self) { + // todo: make them optional with the Option<...> in the AnnounceQuery struct + // if they are really optional. So that we can crete a minimal AnnounceQuery + // instead of removing the optional params afterwards. + // + // The original specification on: + // + // says only `ip` and `event` are optional. + // + // On + // says only `ip`, `numwant`, `key` and `trackerid` are optional. + // + // but the server is responding if all these params are not included. + self.peer_addr = None; + self.downloaded = None; + self.uploaded = None; + self.left = None; + self.event = None; + self.compact = None; + } + + /// # Panics + /// + /// Will panic if invalid param name is provided. + pub fn set(&mut self, param_name: &str, param_value: &str) { + match param_name { + "info_hash" => self.info_hash = Some(param_value.to_string()), + "peer_addr" => self.peer_addr = Some(param_value.to_string()), + "downloaded" => self.downloaded = Some(param_value.to_string()), + "uploaded" => self.uploaded = Some(param_value.to_string()), + "peer_id" => self.peer_id = Some(param_value.to_string()), + "port" => self.port = Some(param_value.to_string()), + "left" => self.left = Some(param_value.to_string()), + "event" => self.event = Some(param_value.to_string()), + "compact" => self.compact = Some(param_value.to_string()), + &_ => panic!("Invalid param name for announce query"), + } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/requests/mod.rs b/src/shared/bit_torrent/tracker/http/client/requests/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs new file mode 100644 index 000000000..e2563b8ed --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -0,0 +1,124 @@ +use std::fmt; +use std::str::FromStr; + +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: Vec, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Scrape Request: +/// +/// +impl Query { + /// It builds the URL query component for the scrape request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + #[must_use] + pub fn build(&self) -> String { + self.params().to_string() + } + + #[must_use] + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub struct QueryBuilder { + scrape_query: Query, +} + +impl Default for QueryBuilder { + fn default() -> Self { + let default_scrape_query = Query { + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), // # DevSkim: ignore DS173237 + }; + Self { + scrape_query: default_scrape_query, + } + } +} + +impl QueryBuilder { + #[must_use] + pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash = [info_hash.0].to_vec(); + self + } + + #[must_use] + pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash.push(info_hash.0); + self + } + + #[must_use] + pub fn query(self) -> Query { + self.scrape_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Scrape request. +/// +/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. +/// +/// Sample Scrape URL with all the GET parameters: +/// +/// For `IpV4`: +/// +/// ```text +/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// For `IpV6`: +/// +/// ```text +/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// You can add as many info hashes as you want, just adding the same param again. +pub struct QueryParams { + pub info_hash: Vec, +} + +impl QueryParams { + pub fn set_one_info_hash_param(&mut self, info_hash: &str) { + self.info_hash = vec![info_hash.to_string()]; + } +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let query = self + .info_hash + .iter() + .map(|info_hash| format!("info_hash={}", &info_hash)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(scrape_query: &Query) -> Self { + let info_hashes = scrape_query + .info_hash + .iter() + .map(percent_encode_byte_array) + .collect::>(); + + Self { info_hash: info_hashes } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs new file mode 100644 index 000000000..f68c54482 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs @@ -0,0 +1,126 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{self, Deserialize, Serialize}; + +use crate::core::peer::Peer; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, // Peers using IPV4 and IPV6 +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DictionaryPeer { + pub ip: String, + #[serde(rename = "peer id")] + #[serde(with = "serde_bytes")] + pub peer_id: Vec, + pub port: u16, +} + +impl From for DictionaryPeer { + fn from(peer: Peer) -> Self { + DictionaryPeer { + peer_id: peer.peer_id.to_bytes().to_vec(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DeserializedCompact { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + #[serde(with = "serde_bytes")] + pub peers: Vec, +} + +impl DeserializedCompact { + /// # Errors + /// + /// Will return an error if bytes can't be deserialized. + pub fn from_bytes(bytes: &[u8]) -> Result { + serde_bencode::from_bytes::(bytes) + } +} + +#[derive(Debug, PartialEq)] +pub struct Compact { + // code-review: there could be a way to deserialize this struct directly + // by using serde instead of doing it manually. Or at least using a custom deserializer. + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + pub min_interval: u32, + pub peers: CompactPeerList, +} + +#[derive(Debug, PartialEq)] +pub struct CompactPeerList { + peers: Vec, +} + +impl CompactPeerList { + #[must_use] + pub fn new(peers: Vec) -> Self { + Self { peers } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeer { + ip: Ipv4Addr, + port: u16, +} + +impl CompactPeer { + /// # Panics + /// + /// Will panic if the provided socket address is a IPv6 IP address. + /// It's not supported for compact peers. + #[must_use] + pub fn new(socket_addr: &SocketAddr) -> Self { + match socket_addr.ip() { + IpAddr::V4(ip) => Self { + ip, + port: socket_addr.port(), + }, + IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), + } + } + + #[must_use] + pub fn new_from_bytes(bytes: &[u8]) -> Self { + Self { + ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), + port: u16::from_be_bytes([bytes[4], bytes[5]]), + } + } +} + +impl From for Compact { + fn from(compact_announce: DeserializedCompact) -> Self { + let mut peers = vec![]; + + for peer_bytes in compact_announce.peers.chunks_exact(6) { + peers.push(CompactPeer::new_from_bytes(peer_bytes)); + } + + Self { + complete: compact_announce.complete, + incomplete: compact_announce.incomplete, + interval: compact_announce.interval, + min_interval: compact_announce.min_interval, + peers: CompactPeerList::new(peers), + } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/error.rs b/src/shared/bit_torrent/tracker/http/client/responses/error.rs new file mode 100644 index 000000000..12c53a0cf --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/error.rs @@ -0,0 +1,7 @@ +use serde::{self, Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/mod.rs b/src/shared/bit_torrent/tracker/http/client/responses/mod.rs new file mode 100644 index 000000000..bdc689056 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/mod.rs @@ -0,0 +1,3 @@ +pub mod announce; +pub mod error; +pub mod scrape; diff --git a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs new file mode 100644 index 000000000..ae06841e4 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs @@ -0,0 +1,203 @@ +use std::collections::HashMap; +use std::str; + +use serde::{self, Deserialize, Serialize}; +use serde_bencode::value::Value; + +use crate::shared::bit_torrent::tracker::http::{ByteArray20, InfoHash}; + +#[derive(Debug, PartialEq, Default)] +pub struct Response { + pub files: HashMap, +} + +impl Response { + #[must_use] + pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { + let mut files: HashMap = HashMap::new(); + files.insert(info_hash_bytes, file); + Self { files } + } + + /// # Errors + /// + /// Will return an error if the deserialized bencoded response can't not be converted into a valid response. + /// + /// # Panics + /// + /// Will panic if it can't deserialize the bencoded response. + pub fn try_from_bencoded(bytes: &[u8]) -> Result { + let scrape_response: DeserializedResponse = + serde_bencode::from_bytes(bytes).expect("provided bytes should be a valid bencoded response"); + Self::try_from(scrape_response) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +pub struct File { + pub complete: i64, // The number of active peers that have completed downloading + pub downloaded: i64, // The number of peers that have ever completed downloading + pub incomplete: i64, // The number of active peers that have not completed downloading +} + +impl File { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} + +impl TryFrom for Response { + type Error = BencodeParseError; + + fn try_from(scrape_response: DeserializedResponse) -> Result { + parse_bencoded_response(&scrape_response.files) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct DeserializedResponse { + pub files: Value, +} + +#[derive(Default)] +pub struct ResponseBuilder { + response: Response, +} + +impl ResponseBuilder { + #[must_use] + pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { + self.response.files.insert(info_hash_bytes, file); + self + } + + #[must_use] + pub fn build(self) -> Response { + self.response + } +} + +#[derive(Debug)] +pub enum BencodeParseError { + InvalidValueExpectedDict { value: Value }, + InvalidValueExpectedInt { value: Value }, + InvalidFileField { value: Value }, + MissingFileField { field_name: String }, +} + +/// It parses a bencoded scrape response into a `Response` struct. +/// +/// For example: +/// +/// ```text +/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e +/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +/// ``` +/// +/// Response (JSON encoded for readability): +/// +/// ```text +/// { +/// 'files': { +/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +/// } +/// } +fn parse_bencoded_response(value: &Value) -> Result { + let mut files: HashMap = HashMap::new(); + + match value { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = parse_bencoded_file(file_value).unwrap(); + + files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + } + + Ok(Response { files }) +} + +/// It parses a bencoded dictionary into a `File` struct. +/// +/// For example: +/// +/// +/// ```text +/// d8:completei11e10:downloadedi13772e10:incompletei19ee +/// ``` +/// +/// into: +/// +/// ```text +/// File { +/// complete: 11, +/// downloaded: 13772, +/// incomplete: 19, +/// } +/// ``` +fn parse_bencoded_file(value: &Value) -> Result { + let file = match &value { + Value::Dict(dict) => { + let mut complete = None; + let mut downloaded = None; + let mut incomplete = None; + + for file_field in dict { + let field_name = file_field.0; + + let field_value = match file_field.1 { + Value::Int(number) => Ok(*number), + _ => Err(BencodeParseError::InvalidValueExpectedInt { + value: file_field.1.clone(), + }), + }?; + + if field_name == b"complete" { + complete = Some(field_value); + } else if field_name == b"downloaded" { + downloaded = Some(field_value); + } else if field_name == b"incomplete" { + incomplete = Some(field_value); + } else { + return Err(BencodeParseError::InvalidFileField { + value: file_field.1.clone(), + }); + } + } + + if complete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "complete".to_string(), + }); + } + + if downloaded.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "downloaded".to_string(), + }); + } + + if incomplete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "incomplete".to_string(), + }); + } + + File { + complete: complete.unwrap(), + downloaded: downloaded.unwrap(), + incomplete: incomplete.unwrap(), + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + }; + + Ok(file) +} diff --git a/src/shared/bit_torrent/tracker/http/mod.rs b/src/shared/bit_torrent/tracker/http/mod.rs new file mode 100644 index 000000000..15723c1b7 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/mod.rs @@ -0,0 +1,26 @@ +pub mod client; + +use percent_encoding::NON_ALPHANUMERIC; + +pub type ByteArray20 = [u8; 20]; + +#[must_use] +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + #[must_use] + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + #[must_use] + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/src/shared/bit_torrent/tracker/mod.rs b/src/shared/bit_torrent/tracker/mod.rs new file mode 100644 index 000000000..3883215fc --- /dev/null +++ b/src/shared/bit_torrent/tracker/mod.rs @@ -0,0 +1 @@ +pub mod http; From 129fd2f26549fec74f0bf76b4b11b2c2a3a3c9f7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 17 Jan 2024 11:08:43 +0000 Subject: [PATCH 0685/1003] refactor: move upd tracker client to follow same folder strucutre as the HTTP tracker client. --- src/servers/health_check_api/handlers.rs | 2 +- src/servers/udp/server.rs | 2 +- src/shared/bit_torrent/mod.rs | 1 - src/shared/bit_torrent/tracker/mod.rs | 1 + src/shared/bit_torrent/{ => tracker}/udp/client.rs | 2 +- src/shared/bit_torrent/{ => tracker}/udp/mod.rs | 0 tests/servers/udp/contract.rs | 10 +++++----- 7 files changed, 9 insertions(+), 9 deletions(-) rename src/shared/bit_torrent/{ => tracker}/udp/client.rs (97%) rename src/shared/bit_torrent/{ => tracker}/udp/mod.rs (100%) diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index 2f47c8607..4403676af 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -8,7 +8,7 @@ use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, UdpTrac use super::resources::Report; use super::responses; -use crate::shared::bit_torrent::udp::client::new_udp_tracker_client_connected; +use crate::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; /// If port 0 is specified in the configuration the OS will automatically /// assign a free port. But we do now know in from the configuration. diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index a15226bd2..001603b08 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -34,7 +34,7 @@ use crate::bootstrap::jobs::Started; use crate::core::Tracker; use crate::servers::signals::{shutdown_signal_with_message, Halted}; use crate::servers::udp::handlers::handle_packet; -use crate::shared::bit_torrent::udp::MAX_PACKET_SIZE; +use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; /// Error that can occur when starting or stopping the UDP server. /// diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs index 3dcf705e4..8074661be 100644 --- a/src/shared/bit_torrent/mod.rs +++ b/src/shared/bit_torrent/mod.rs @@ -70,4 +70,3 @@ pub mod common; pub mod info_hash; pub mod tracker; -pub mod udp; diff --git a/src/shared/bit_torrent/tracker/mod.rs b/src/shared/bit_torrent/tracker/mod.rs index 3883215fc..b08eaa622 100644 --- a/src/shared/bit_torrent/tracker/mod.rs +++ b/src/shared/bit_torrent/tracker/mod.rs @@ -1 +1,2 @@ pub mod http; +pub mod udp; diff --git a/src/shared/bit_torrent/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs similarity index 97% rename from src/shared/bit_torrent/udp/client.rs rename to src/shared/bit_torrent/tracker/udp/client.rs index d5c4c9adf..5ea982663 100644 --- a/src/shared/bit_torrent/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{Request, Response}; use tokio::net::UdpSocket; -use crate::shared::bit_torrent::udp::{source_address, MAX_PACKET_SIZE}; +use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; #[allow(clippy::module_name_repetitions)] pub struct UdpClient { diff --git a/src/shared/bit_torrent/udp/mod.rs b/src/shared/bit_torrent/tracker/udp/mod.rs similarity index 100% rename from src/shared/bit_torrent/udp/mod.rs rename to src/shared/bit_torrent/tracker/udp/mod.rs diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 72124fc3f..b16a47cd3 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -6,8 +6,8 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; -use torrust_tracker::shared::bit_torrent::udp::client::{new_udp_client_connected, UdpTrackerClient}; -use torrust_tracker::shared::bit_torrent::udp::MAX_PACKET_SIZE; +use torrust_tracker::shared::bit_torrent::tracker::udp::client::{new_udp_client_connected, UdpTrackerClient}; +use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_error_response; @@ -51,7 +51,7 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req mod receiving_a_connection_request { use aquatic_udp_protocol::{ConnectRequest, TransactionId}; - use torrust_tracker::shared::bit_torrent::udp::client::new_udp_tracker_client_connected; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_connect_response; @@ -82,7 +82,7 @@ mod receiving_an_announce_request { AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, TransactionId, }; - use torrust_tracker::shared::bit_torrent::udp::client::new_udp_tracker_client_connected; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_ipv4_announce_response; @@ -124,7 +124,7 @@ mod receiving_an_announce_request { mod receiving_an_scrape_request { use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; - use torrust_tracker::shared::bit_torrent::udp::client::new_udp_tracker_client_connected; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_scrape_response; From 3b492570a24e87e45e22c0f0240a9ba92cb803b2 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 3 Jan 2024 15:58:40 +1100 Subject: [PATCH 0686/1003] dev: extract config from core::tracker --- packages/configuration/src/lib.rs | 18 ++---- src/bootstrap/app.rs | 12 ++-- src/bootstrap/jobs/torrent_cleanup.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 15 +++-- src/core/mod.rs | 70 +++++++++++++++++------- src/core/services/mod.rs | 4 +- src/core/services/statistics/mod.rs | 6 +- src/core/services/torrent.rs | 22 ++++---- src/main.rs | 2 +- src/servers/apis/routes.rs | 11 ++-- src/servers/apis/server.rs | 22 ++++++-- src/servers/apis/v1/middlewares/auth.rs | 19 ++++--- src/servers/http/v1/handlers/announce.rs | 10 ++-- src/servers/http/v1/handlers/scrape.rs | 10 ++-- src/servers/http/v1/services/announce.rs | 27 +++------ src/servers/http/v1/services/scrape.rs | 42 +++----------- src/servers/udp/handlers.rs | 46 ++++++++-------- tests/servers/api/test_environment.rs | 26 ++++----- tests/servers/http/v1/contract.rs | 22 +++++--- 19 files changed, 202 insertions(+), 184 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index a8f605289..4b81aed8b 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -229,7 +229,7 @@ //! [health_check_api] //! bind_address = "127.0.0.1:1313" //!``` -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::net::IpAddr; use std::str::FromStr; use std::sync::Arc; @@ -337,6 +337,8 @@ pub struct HttpTracker { pub ssl_key_path: Option, } +pub type AccessTokens = HashMap; + /// Configuration for the HTTP API. #[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] @@ -360,21 +362,13 @@ pub struct HttpApi { /// token and the value is the token itself. The token is used to /// authenticate the user. All tokens are valid for all endpoints and have /// the all permissions. - pub access_tokens: HashMap, + pub access_tokens: AccessTokens, } impl HttpApi { fn override_admin_token(&mut self, api_admin_token: &str) { self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); } - - /// Checks if the given token is one of the token in the configuration. - #[must_use] - pub fn contains_token(&self, token: &str) -> bool { - let tokens: HashMap = self.access_tokens.clone(); - let tokens: HashSet = tokens.into_values().collect(); - tokens.contains(token) - } } /// Configuration for the Health Check API. @@ -804,7 +798,7 @@ mod tests { fn http_api_configuration_should_check_if_it_contains_a_token() { let configuration = Configuration::default(); - assert!(configuration.http_api.contains_token("MyAccessToken")); - assert!(!configuration.http_api.contains_token("NonExistingToken")); + assert!(configuration.http_api.access_tokens.values().any(|t| t == "MyAccessToken")); + assert!(!configuration.http_api.access_tokens.values().any(|t| t == "NonExistingToken")); } } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 4a6f79a96..09b624566 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -24,8 +24,8 @@ use crate::shared::crypto::ephemeral_instance_keys; /// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. #[must_use] -pub fn setup() -> (Arc, Arc) { - let configuration = Arc::new(initialize_configuration()); +pub fn setup() -> (Configuration, Arc) { + let configuration = initialize_configuration(); let tracker = initialize_with_configuration(&configuration); (configuration, tracker) @@ -35,7 +35,7 @@ pub fn setup() -> (Arc, Arc) { /// /// The configuration may be obtained from the environment (via config file or env vars). #[must_use] -pub fn initialize_with_configuration(configuration: &Arc) -> Arc { +pub fn initialize_with_configuration(configuration: &Configuration) -> Arc { initialize_static(); initialize_logging(configuration); Arc::new(initialize_tracker(configuration)) @@ -60,13 +60,13 @@ pub fn initialize_static() { /// The tracker is the domain layer service. It's the entrypoint to make requests to the domain layer. /// It's used by other higher-level components like the UDP and HTTP trackers or the tracker API. #[must_use] -pub fn initialize_tracker(config: &Arc) -> Tracker { - tracker_factory(config.clone()) +pub fn initialize_tracker(config: &Configuration) -> Tracker { + tracker_factory(config) } /// It initializes the log level, format and channel. /// /// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. -pub fn initialize_logging(config: &Arc) { +pub fn initialize_logging(config: &Configuration) { bootstrap::logging::setup(config); } diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index d3b084d31..6647e0249 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -25,7 +25,7 @@ use crate::core; /// /// Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about that option. #[must_use] -pub fn start_job(config: &Arc, tracker: &Arc) -> JoinHandle<()> { +pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index e50a83651..43cb5de8e 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -26,7 +26,7 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use log::info; use tokio::task::JoinHandle; -use torrust_tracker_configuration::HttpApi; +use torrust_tracker_configuration::{AccessTokens, HttpApi}; use super::make_rust_tls; use crate::core; @@ -64,8 +64,10 @@ pub async fn start_job(config: &HttpApi, tracker: Arc, version: V .await .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); + let access_tokens = Arc::new(config.access_tokens.clone()); + match version { - Version::V1 => Some(start_v1(bind_to, tls, tracker.clone()).await), + Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), access_tokens).await), } } else { info!("Note: Not loading Http Tracker Service, Not Enabled in Configuration."); @@ -73,9 +75,14 @@ pub async fn start_job(config: &HttpApi, tracker: Arc, version: V } } -async fn start_v1(socket: SocketAddr, tls: Option, tracker: Arc) -> JoinHandle<()> { +async fn start_v1( + socket: SocketAddr, + tls: Option, + tracker: Arc, + access_tokens: Arc, +) -> JoinHandle<()> { let server = ApiServer::new(Launcher::new(socket, tls)) - .start(tracker) + .start(tracker, access_tokens) .await .expect("it should be able to start to the tracker api"); diff --git a/src/core/mod.rs b/src/core/mod.rs index fc44877c8..dac298462 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -447,6 +447,7 @@ use std::time::Duration; use derive_more::Constructor; use futures::future::join_all; +use log::debug; use tokio::sync::mpsc::error::SendError; use torrust_tracker_configuration::{AnnouncePolicy, Configuration}; use torrust_tracker_primitives::TrackerMode; @@ -472,17 +473,19 @@ pub const TORRENT_PEERS_LIMIT: usize = 74; /// Typically, the `Tracker` is used by a higher application service that handles /// the network layer. pub struct Tracker { - /// `Tracker` configuration. See [`torrust-tracker-configuration`](torrust_tracker_configuration) - pub config: Arc, + announce_policy: AnnouncePolicy, /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) /// or [`MySQL`](crate::core::databases::mysql) pub database: Arc>, mode: TrackerMode, + policy: TrackerPolicy, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, pub torrents: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, + external_ip: Option, + on_reverse_proxy: bool, } /// Structure that holds general `Tracker` torrents metrics. @@ -500,6 +503,12 @@ pub struct TorrentsMetrics { pub torrents: u64, } +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +pub struct TrackerPolicy { + pub remove_peerless_torrents: bool, + pub max_peer_timeout: u32, + pub persistent_torrent_completed_stat: bool, +} /// Structure that holds the data returned by the `announce` request. #[derive(Clone, Debug, PartialEq, Constructor, Default)] pub struct AnnounceData { @@ -556,7 +565,7 @@ impl Tracker { /// /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( - config: Arc, + config: &Configuration, stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { @@ -565,7 +574,8 @@ impl Tracker { let mode = config.mode; Ok(Tracker { - config, + //config, + announce_policy: AnnouncePolicy::new(config.announce_interval, config.min_announce_interval), mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), @@ -573,6 +583,13 @@ impl Tracker { stats_event_sender, stats_repository, database, + external_ip: config.get_ext_ip(), + policy: TrackerPolicy::new( + config.remove_peerless_torrents, + config.max_peer_timeout, + config.persistent_torrent_completed_stat, + ), + on_reverse_proxy: config.on_reverse_proxy, }) } @@ -596,6 +613,19 @@ impl Tracker { self.is_private() } + /// Returns `true` is the tracker is in whitelisted mode. + pub fn is_behind_reverse_proxy(&self) -> bool { + self.on_reverse_proxy + } + + pub fn get_announce_policy(&self) -> AnnouncePolicy { + self.announce_policy + } + + pub fn get_maybe_external_ip(&self) -> Option { + self.external_ip + } + /// It handles an announce request. /// /// # Context: Tracker @@ -617,18 +647,19 @@ impl Tracker { // we are actually handling authentication at the handlers level. So I would extract that // responsibility into another authentication service. - peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); + debug!("Before: {peer:?}"); + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.external_ip)); + debug!("After: {peer:?}"); - let swarm_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + // we should update the torrent and get the stats before we get the peer list. + let stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; let peers = self.get_torrent_peers_for_peer(info_hash, peer).await; - let policy = AnnouncePolicy::new(self.config.announce_interval, self.config.min_announce_interval); - AnnounceData { peers, - stats: swarm_stats, - policy, + stats, + policy: self.get_announce_policy(), } } @@ -727,7 +758,7 @@ impl Tracker { let (stats, stats_updated) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - if self.config.persistent_torrent_completed_stat && stats_updated { + if self.policy.persistent_torrent_completed_stat && stats_updated { let completed = stats.downloaded; let info_hash = *info_hash; @@ -788,17 +819,17 @@ impl Tracker { let mut torrents_lock = self.torrents.get_torrents_mut().await; // If we don't need to remove torrents we will use the faster iter - if self.config.remove_peerless_torrents { + if self.policy.remove_peerless_torrents { let mut cleaned_torrents_map: BTreeMap = BTreeMap::new(); for (info_hash, torrent_entry) in &mut *torrents_lock { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); if torrent_entry.peers.is_empty() { continue; } - if self.config.persistent_torrent_completed_stat && torrent_entry.completed == 0 { + if self.policy.persistent_torrent_completed_stat && torrent_entry.completed == 0 { continue; } @@ -808,7 +839,7 @@ impl Tracker { *torrents_lock = cleaned_torrents_map; } else { for torrent_entry in (*torrents_lock).values_mut() { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); } } } @@ -1061,7 +1092,6 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; @@ -1073,21 +1103,21 @@ mod tests { use crate::shared::clock::DurationSinceUnixEpoch; fn public_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_public().into()) + tracker_factory(&configuration::ephemeral_mode_public()) } fn private_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_private().into()) + tracker_factory(&configuration::ephemeral_mode_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + tracker_factory(&configuration::ephemeral_mode_whitelisted()) } pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.persistent_torrent_completed_stat = true; - tracker_factory(Arc::new(configuration)) + tracker_factory(&configuration) } fn sample_info_hash() -> InfoHash { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index f5868fc26..76c6a36f6 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -19,12 +19,12 @@ use crate::core::Tracker; /// /// Will panic if tracker cannot be instantiated. #[must_use] -pub fn tracker_factory(config: Arc) -> Tracker { +pub fn tracker_factory(config: &Configuration) -> Tracker { // Initialize statistics let (stats_event_sender, stats_repository) = statistics::setup::factory(config.tracker_usage_statistics); // Initialize Torrust tracker - match Tracker::new(config, stats_event_sender, stats_repository) { + match Tracker::new(&Arc::new(config), stats_event_sender, stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index f74df62e5..3578c53aa 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -92,13 +92,13 @@ mod tests { use crate::core::services::statistics::{get_metrics, TrackerMetrics}; use crate::core::services::tracker_factory; - pub fn tracker_configuration() -> Arc { - Arc::new(configuration::ephemeral()) + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() } #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let tracker_metrics = get_metrics(tracker.clone()).await; diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index f88cf5b50..d1ab29a7f 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -168,13 +168,13 @@ mod tests { use crate::core::services::tracker_factory; use crate::shared::bit_torrent::info_hash::InfoHash; - pub fn tracker_configuration() -> Arc { - Arc::new(configuration::ephemeral()) + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() } #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let torrent_info = get_torrent_info( tracker.clone(), @@ -187,7 +187,7 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -223,13 +223,13 @@ mod tests { use crate::core::services::tracker_factory; use crate::shared::bit_torrent::info_hash::InfoHash; - pub fn tracker_configuration() -> Arc { - Arc::new(configuration::ephemeral()) + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() } #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; @@ -238,7 +238,7 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -262,7 +262,7 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -286,7 +286,7 @@ mod tests { #[tokio::test] async fn should_allow_using_pagination_in_the_result() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -319,7 +319,7 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/main.rs b/src/main.rs index 87c0fc367..5c65f8e07 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,7 +5,7 @@ use torrust_tracker::{app, bootstrap}; async fn main() { let (config, tracker) = bootstrap::app::setup(); - let jobs = app::start(config.clone(), tracker.clone()).await; + let jobs = app::start(config.into(), tracker.clone()).await; // handle the signals tokio::select! { diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index fef412f91..227916335 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -9,26 +9,27 @@ use std::sync::Arc; use axum::routing::get; use axum::{middleware, Router}; +use torrust_tracker_configuration::AccessTokens; use tower_http::compression::CompressionLayer; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; +use super::v1::middlewares::auth::State; use crate::core::Tracker; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] -pub fn router(tracker: Arc) -> Router { +pub fn router(tracker: Arc, access_tokens: Arc) -> Router { let router = Router::new(); let api_url_prefix = "/api"; let router = v1::routes::add(api_url_prefix, router, tracker.clone()); + let state = State { access_tokens }; + router - .layer(middleware::from_fn_with_state( - tracker.config.clone(), - v1::middlewares::auth::auth, - )) + .layer(middleware::from_fn_with_state(state, v1::middlewares::auth::auth)) .route(&format!("{api_url_prefix}/health_check"), get(health_check_handler)) .layer(CompressionLayer::new()) } diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index f4fdf8994..d26362f66 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -32,6 +32,7 @@ use derive_more::Constructor; use futures::future::BoxFuture; use log::{error, info}; use tokio::sync::oneshot::{Receiver, Sender}; +use torrust_tracker_configuration::AccessTokens; use super::routes::router; use crate::bootstrap::jobs::Started; @@ -91,14 +92,14 @@ impl ApiServer { /// # Panics /// /// It would panic if the bound socket address cannot be sent back to this starter. - pub async fn start(self, tracker: Arc) -> Result, Error> { + pub async fn start(self, tracker: Arc, access_tokens: Arc) -> Result, Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); let launcher = self.state.launcher; let task = tokio::spawn(async move { - launcher.start(tracker, tx_start, rx_halt).await; + launcher.start(tracker, access_tokens, tx_start, rx_halt).await; launcher }); @@ -159,8 +160,14 @@ impl Launcher { /// /// Will panic if unable to bind to the socket, or unable to get the address of the bound socket. /// Will also panic if unable to send message regarding the bound socket address. - pub fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> BoxFuture<'static, ()> { - let router = router(tracker); + pub fn start( + &self, + tracker: Arc, + access_tokens: Arc, + tx_start: Sender, + rx_halt: Receiver, + ) -> BoxFuture<'static, ()> { + let router = router(tracker, access_tokens); let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); @@ -227,8 +234,13 @@ mod tests { .await .map(|tls| tls.expect("tls config failed")); + let access_tokens = Arc::new(config.access_tokens.clone()); + let stopped = ApiServer::new(Launcher::new(bind_to, tls)); - let started = stopped.start(tracker).await.expect("it should start the server"); + let started = stopped + .start(tracker, access_tokens) + .await + .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); assert_eq!(stopped.state.launcher.bind_to, bind_to); diff --git a/src/servers/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs index 7749b3b34..58219c7ca 100644 --- a/src/servers/apis/v1/middlewares/auth.rs +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -23,12 +23,12 @@ //! identify the token. use std::sync::Arc; -use axum::extract::{Query, State}; +use axum::extract::{self}; use axum::http::Request; use axum::middleware::Next; use axum::response::{IntoResponse, Response}; use serde::Deserialize; -use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_configuration::AccessTokens; use crate::servers::apis::v1::responses::unhandled_rejection_response; @@ -38,11 +38,16 @@ pub struct QueryParams { pub token: Option, } +#[derive(Clone, Debug)] +pub struct State { + pub access_tokens: Arc, +} + /// Middleware for authentication using a "token" GET param. /// The token must be one of the tokens in the tracker [HTTP API configuration](torrust_tracker_configuration::HttpApi). pub async fn auth( - State(config): State>, - Query(params): Query, + extract::State(state): extract::State, + extract::Query(params): extract::Query, request: Request, next: Next, ) -> Response { @@ -50,7 +55,7 @@ pub async fn auth( return AuthError::Unauthorized.into_response(); }; - if !authenticate(&token, &config.http_api) { + if !authenticate(&token, &state.access_tokens) { return AuthError::TokenNotValid.into_response(); } @@ -73,8 +78,8 @@ impl IntoResponse for AuthError { } } -fn authenticate(token: &str, http_api_config: &HttpApi) -> bool { - http_api_config.contains_token(token) +fn authenticate(token: &str, tokens: &AccessTokens) -> bool { + tokens.values().any(|t| t == token) } /// `500` error response returned when the token is missing. diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index cfe422e7f..be2085613 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -104,7 +104,7 @@ async fn handle_announce( Err(error) => return Err(responses::error::Error::from(error)), } - let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { + let peer_ip = match peer_ip_resolver::invoke(tracker.is_behind_reverse_proxy(), client_ip_sources) { Ok(peer_ip) => peer_ip, Err(error) => return Err(responses::error::Error::from(error)), }; @@ -166,19 +166,19 @@ mod tests { use crate::shared::bit_torrent::info_hash::InfoHash; fn private_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_private().into()) + tracker_factory(&configuration::ephemeral_mode_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + tracker_factory(&configuration::ephemeral_mode_whitelisted()) } fn tracker_on_reverse_proxy() -> Tracker { - tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) + tracker_factory(&configuration::ephemeral_with_reverse_proxy()) } fn tracker_not_on_reverse_proxy() -> Tracker { - tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) + tracker_factory(&configuration::ephemeral_without_reverse_proxy()) } fn sample_announce_request() -> Announce { diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 298d47383..49b1aebc7 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -90,7 +90,7 @@ async fn handle_scrape( // Authorization for scrape requests is handled at the `Tracker` level // for each torrent. - let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { + let peer_ip = match peer_ip_resolver::invoke(tracker.is_behind_reverse_proxy(), client_ip_sources) { Ok(peer_ip) => peer_ip, Err(error) => return Err(responses::error::Error::from(error)), }; @@ -121,19 +121,19 @@ mod tests { use crate::shared::bit_torrent::info_hash::InfoHash; fn private_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_private().into()) + tracker_factory(&configuration::ephemeral_mode_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + tracker_factory(&configuration::ephemeral_mode_whitelisted()) } fn tracker_on_reverse_proxy() -> Tracker { - tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) + tracker_factory(&configuration::ephemeral_with_reverse_proxy()) } fn tracker_not_on_reverse_proxy() -> Tracker { - tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) + tracker_factory(&configuration::ephemeral_without_reverse_proxy()) } fn sample_scrape_request() -> Scrape { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 80dc1ca5b..b791defd7 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -56,7 +56,7 @@ mod tests { use crate::shared::clock::DurationSinceUnixEpoch; fn public_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_public().into()) + tracker_factory(&configuration::ephemeral_mode_public()) } fn sample_info_hash() -> InfoHash { @@ -94,7 +94,6 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; - use torrust_tracker_configuration::AnnouncePolicy; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; @@ -119,7 +118,7 @@ mod tests { complete: 1, incomplete: 0, }, - policy: AnnouncePolicy::default(), + policy: tracker.get_announce_policy(), }; assert_eq!(announce_data, expected_announce_data); @@ -135,14 +134,8 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - Arc::new(configuration::ephemeral()), - Some(stats_event_sender), - statistics::Repo::new(), - ) - .unwrap(), - ); + let tracker = + Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); let mut peer = sample_peer_using_ipv4(); @@ -154,7 +147,7 @@ mod tests { configuration.external_ip = Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); - Tracker::new(Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() + Tracker::new(&configuration, Some(stats_event_sender), statistics::Repo::new()).unwrap() } fn peer_with_the_ipv4_loopback_ip() -> Peer { @@ -199,14 +192,8 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - Arc::new(configuration::ephemeral()), - Some(stats_event_sender), - statistics::Repo::new(), - ) - .unwrap(), - ); + let tracker = + Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); let mut peer = sample_peer_using_ipv6(); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index c2fa104de..82ca15dc8 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -69,7 +69,7 @@ mod tests { use crate::shared::clock::DurationSinceUnixEpoch; fn public_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_public().into()) + tracker_factory(&configuration::ephemeral_mode_public()) } fn sample_info_hashes() -> Vec { @@ -145,14 +145,8 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - Arc::new(configuration::ephemeral()), - Some(stats_event_sender), - statistics::Repo::new(), - ) - .unwrap(), - ); + let tracker = + Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -169,14 +163,8 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - Arc::new(configuration::ephemeral()), - Some(stats_event_sender), - statistics::Repo::new(), - ) - .unwrap(), - ); + let tracker = + Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -228,14 +216,8 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - Arc::new(configuration::ephemeral()), - Some(stats_event_sender), - statistics::Repo::new(), - ) - .unwrap(), - ); + let tracker = + Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -252,14 +234,8 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - Arc::new(configuration::ephemeral()), - Some(stats_event_sender), - statistics::Repo::new(), - ) - .unwrap(), - ); + let tracker = + Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 34ebaec89..b77cd3a42 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -151,7 +151,7 @@ pub async fn handle_announce( if remote_addr.is_ipv4() { let announce_response = AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + announce_interval: AnnounceInterval(i64::from(tracker.get_announce_policy().interval) as i32), leechers: NumberOfPeers(i64::from(response.stats.incomplete) as i32), seeders: NumberOfPeers(i64::from(response.stats.complete) as i32), peers: response @@ -176,7 +176,7 @@ pub async fn handle_announce( } else { let announce_response = AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + announce_interval: AnnounceInterval(i64::from(tracker.get_announce_policy().interval) as i32), leechers: NumberOfPeers(i64::from(response.stats.incomplete) as i32), seeders: NumberOfPeers(i64::from(response.stats.complete) as i32), peers: response @@ -282,8 +282,8 @@ mod tests { use crate::core::{peer, Tracker}; use crate::shared::clock::{Current, Time}; - fn tracker_configuration() -> Arc { - Arc::new(default_testing_tracker_configuration()) + fn tracker_configuration() -> Configuration { + default_testing_tracker_configuration() } fn default_testing_tracker_configuration() -> Configuration { @@ -291,18 +291,18 @@ mod tests { } fn public_tracker() -> Arc { - initialized_tracker(configuration::ephemeral_mode_public().into()) + initialized_tracker(&configuration::ephemeral_mode_public()) } fn private_tracker() -> Arc { - initialized_tracker(configuration::ephemeral_mode_private().into()) + initialized_tracker(&configuration::ephemeral_mode_private()) } fn whitelisted_tracker() -> Arc { - initialized_tracker(configuration::ephemeral_mode_whitelisted().into()) + initialized_tracker(&configuration::ephemeral_mode_whitelisted()) } - fn initialized_tracker(configuration: Arc) -> Arc { + fn initialized_tracker(configuration: &Configuration) -> Arc { tracker_factory(configuration).into() } @@ -452,8 +452,9 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); - let torrent_tracker = - Arc::new(core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); + let torrent_tracker = Arc::new( + core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + ); handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) .await .unwrap(); @@ -469,8 +470,9 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let torrent_tracker = - Arc::new(core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); + let torrent_tracker = Arc::new( + core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) .await .unwrap(); @@ -710,7 +712,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -756,12 +758,11 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; - let external_ip_in_tracker_configuration = - tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); let expected_peer = TorrentPeerBuilder::default() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(external_ip_in_tracker_configuration), client_port)) + .with_peer_addr(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); assert_eq!(peers[0], expected_peer); @@ -938,7 +939,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -968,7 +969,7 @@ mod tests { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(core::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(core::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -994,8 +995,9 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; - let _external_ip_in_tracker_configuration = - tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); + + assert!(external_ip_in_tracker_configuration.is_ipv6()); // There's a special type of IPv6 addresses that provide compatibility with IPv4. // The last 32 bits of these addresses represent an IPv4, and are represented like this: @@ -1246,7 +1248,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) @@ -1278,7 +1280,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - core::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index 166bfd7d1..c6878c674 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -1,13 +1,12 @@ -use std::net::SocketAddr; use std::sync::Arc; -use axum_server::tls_rustls::RustlsConfig; use futures::executor::block_on; use torrust_tracker::bootstrap::jobs::make_rust_tls; use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::apis::server::{ApiServer, Launcher, RunningApiServer, StoppedApiServer}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_configuration::HttpApi; use super::connection_info::ConnectionInfo; use crate::common::app::setup_with_configuration; @@ -18,7 +17,7 @@ pub type StoppedTestEnvironment = TestEnvironment; pub type RunningTestEnvironment = TestEnvironment; pub struct TestEnvironment { - pub cfg: Arc, + pub config: Arc, pub tracker: Arc, pub state: S, } @@ -41,9 +40,10 @@ impl TestEnvironment { impl TestEnvironment { pub fn new(cfg: torrust_tracker_configuration::Configuration) -> Self { - let tracker = setup_with_configuration(&Arc::new(cfg)); + let cfg = Arc::new(cfg); + let tracker = setup_with_configuration(&cfg); - let config = tracker.config.http_api.clone(); + let config = Arc::new(cfg.http_api.clone()); let bind_to = config .bind_address @@ -53,25 +53,23 @@ impl TestEnvironment { let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) .map(|tls| tls.expect("tls config failed")); - Self::new_stopped(tracker, bind_to, tls) - } - - pub fn new_stopped(tracker: Arc, bind_to: SocketAddr, tls: Option) -> Self { let api_server = api_server(Launcher::new(bind_to, tls)); Self { - cfg: tracker.config.clone(), + config, tracker, state: Stopped { api_server }, } } pub async fn start(self) -> TestEnvironment { + let access_tokens = Arc::new(self.config.access_tokens.clone()); + TestEnvironment { - cfg: self.cfg, + config: self.config, tracker: self.tracker.clone(), state: Running { - api_server: self.state.api_server.start(self.tracker).await.unwrap(), + api_server: self.state.api_server.start(self.tracker, access_tokens).await.unwrap(), }, } } @@ -90,7 +88,7 @@ impl TestEnvironment { pub async fn stop(self) -> TestEnvironment { TestEnvironment { - cfg: self.cfg, + config: self.config, tracker: self.tracker, state: Stopped { api_server: self.state.api_server.stop().await.unwrap(), @@ -101,7 +99,7 @@ impl TestEnvironment { pub fn get_connection_info(&self) -> ConnectionInfo { ConnectionInfo { bind_address: self.state.api_server.state.binding.to_string(), - api_token: self.cfg.http_api.access_tokens.get("admin").cloned(), + api_token: self.config.access_tokens.get("admin").cloned(), } } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index f3d1fcef0..e394779ad 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -387,13 +387,15 @@ mod for_all_config_modes { ) .await; + let announce_policy = test_env.tracker.get_announce_policy(); + assert_announce_response( response, &Announce { complete: 1, // the peer for this test incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, peers: vec![], }, ) @@ -426,14 +428,16 @@ mod for_all_config_modes { ) .await; + let announce_policy = test_env.tracker.get_announce_policy(); + // It should only contain the previously announced peer assert_announce_response( response, &Announce { complete: 2, incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, peers: vec![DictionaryPeer::from(previously_announced_peer)], }, ) @@ -475,6 +479,8 @@ mod for_all_config_modes { ) .await; + let announce_policy = test_env.tracker.get_announce_policy(); + // The newly announced peer is not included on the response peer list, // but all the previously announced peers should be included regardless the IP version they are using. assert_announce_response( @@ -482,8 +488,8 @@ mod for_all_config_modes { &Announce { complete: 3, incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], }, ) @@ -787,7 +793,7 @@ mod for_all_config_modes { let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.get_maybe_external_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); test_env.stop().await; @@ -826,7 +832,7 @@ mod for_all_config_modes { let peers = test_env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.get_maybe_external_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); test_env.stop().await; From b310c7558f5717145f1b1aa14cf6dd7839722c45 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 5 Jan 2024 17:21:06 +1100 Subject: [PATCH 0687/1003] dev: extract config from health check --- cSpell.json | 2 + src/app.rs | 29 +++- src/bootstrap/jobs/health_check_api.rs | 9 +- src/bootstrap/jobs/http_tracker.rs | 22 ++- src/bootstrap/jobs/tracker_apis.rs | 16 +- src/bootstrap/jobs/udp_tracker.rs | 5 +- src/main.rs | 2 +- src/servers/apis/server.rs | 65 ++++++-- src/servers/health_check_api/handlers.rs | 150 ++++-------------- src/servers/health_check_api/resources.rs | 31 +++- src/servers/health_check_api/responses.rs | 10 +- src/servers/health_check_api/server.rs | 7 +- src/servers/http/server.rs | 40 ++++- src/servers/mod.rs | 1 + src/servers/registar.rs | 95 +++++++++++ src/servers/udp/server.rs | 26 ++- src/shared/bit_torrent/tracker/udp/client.rs | 26 ++- tests/servers/api/test_environment.rs | 8 +- tests/servers/health_check_api/contract.rs | 9 +- .../health_check_api/test_environment.rs | 9 +- tests/servers/http/test_environment.rs | 8 +- tests/servers/udp/test_environment.rs | 5 +- 22 files changed, 392 insertions(+), 183 deletions(-) create mode 100644 src/servers/registar.rs diff --git a/cSpell.json b/cSpell.json index 7b3ce4de9..e02c6ed87 100644 --- a/cSpell.json +++ b/cSpell.json @@ -34,6 +34,7 @@ "Cyberneering", "datagram", "datetime", + "Deque", "Dijke", "distroless", "dockerhub", @@ -91,6 +92,7 @@ "Rasterbar", "realpath", "reannounce", + "Registar", "repr", "reqwest", "rerequests", diff --git a/src/app.rs b/src/app.rs index 3608aa22e..3ec9806d3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,6 +28,7 @@ use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::servers::registar::Registar; use crate::{core, servers}; /// # Panics @@ -36,9 +37,11 @@ use crate::{core, servers}; /// /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. -pub async fn start(config: Arc, tracker: Arc) -> Vec> { +pub async fn start(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); + let registar = Registar::default(); + // Load peer keys if tracker.is_private() { tracker @@ -67,31 +70,45 @@ pub async fn start(config: Arc, tracker: Arc) -> V udp_tracker_config.bind_address, config.mode ); } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone()).await); + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone(), registar.give_form()).await); } } // Start the HTTP blocks for http_tracker_config in &config.http_trackers { - if let Some(job) = http_tracker::start_job(http_tracker_config, tracker.clone(), servers::http::Version::V1).await { + if let Some(job) = http_tracker::start_job( + http_tracker_config, + tracker.clone(), + registar.give_form(), + servers::http::Version::V1, + ) + .await + { jobs.push(job); }; } // Start HTTP API if config.http_api.enabled { - if let Some(job) = tracker_apis::start_job(&config.http_api, tracker.clone(), servers::apis::Version::V1).await { + if let Some(job) = tracker_apis::start_job( + &config.http_api, + tracker.clone(), + registar.give_form(), + servers::apis::Version::V1, + ) + .await + { jobs.push(job); }; } // Start runners to remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config, &tracker)); + jobs.push(torrent_cleanup::start_job(config, &tracker)); } // Start Health Check API - jobs.push(health_check_api::start_job(config).await); + jobs.push(health_check_api::start_job(&config.health_check_api, registar.entries()).await); jobs } diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 9fed56435..1a9815280 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -13,15 +13,15 @@ //! //! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) //! for the API configuration options. -use std::sync::Arc; use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::HealthCheckApi; use super::Started; use crate::servers::health_check_api::server; +use crate::servers::registar::ServiceRegistry; /// This function starts a new Health Check API server with the provided /// configuration. @@ -33,9 +33,8 @@ use crate::servers::health_check_api::server; /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: Arc) -> JoinHandle<()> { +pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> JoinHandle<()> { let bind_addr = config - .health_check_api .bind_address .parse::() .expect("it should have a valid health check bind address"); @@ -46,7 +45,7 @@ pub async fn start_job(config: Arc) -> JoinHandle<()> { let join_handle = tokio::spawn(async move { info!(target: "Health Check API", "Starting on: http://{}", bind_addr); - let handle = server::start(bind_addr, tx_start, config.clone()); + let handle = server::start(bind_addr, tx_start, register); if let Ok(()) = handle.await { info!(target: "Health Check API", "Stopped server running on: http://{}", bind_addr); diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 69ff345db..0a0638b78 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -22,6 +22,7 @@ use super::make_rust_tls; use crate::core; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; +use crate::servers::registar::ServiceRegistrationForm; /// It starts a new HTTP server with the provided configuration and version. /// @@ -32,7 +33,12 @@ use crate::servers::http::Version; /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. /// -pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> Option> { +pub async fn start_job( + config: &HttpTracker, + tracker: Arc, + form: ServiceRegistrationForm, + version: Version, +) -> Option> { if config.enabled { let socket = config .bind_address @@ -44,7 +50,7 @@ pub async fn start_job(config: &HttpTracker, tracker: Arc, versio .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); match version { - Version::V1 => Some(start_v1(socket, tls, tracker.clone()).await), + Version::V1 => Some(start_v1(socket, tls, tracker.clone(), form).await), } } else { info!("Note: Not loading Http Tracker Service, Not Enabled in Configuration."); @@ -52,9 +58,14 @@ pub async fn start_job(config: &HttpTracker, tracker: Arc, versio } } -async fn start_v1(socket: SocketAddr, tls: Option, tracker: Arc) -> JoinHandle<()> { +async fn start_v1( + socket: SocketAddr, + tls: Option, + tracker: Arc, + form: ServiceRegistrationForm, +) -> JoinHandle<()> { let server = HttpServer::new(Launcher::new(socket, tls)) - .start(tracker) + .start(tracker, form) .await .expect("it should be able to start to the http tracker"); @@ -80,6 +91,7 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::http_tracker::start_job; use crate::servers::http::Version; + use crate::servers::registar::Registar; #[tokio::test] async fn it_should_start_http_tracker() { @@ -88,7 +100,7 @@ mod tests { let tracker = initialize_with_configuration(&cfg); let version = Version::V1; - start_job(config, tracker, version) + start_job(config, tracker, Registar::default().give_form(), version) .await .expect("it should be able to join to the http tracker start-job"); } diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 43cb5de8e..ffd7c7407 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -32,6 +32,7 @@ use super::make_rust_tls; use crate::core; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; +use crate::servers::registar::ServiceRegistrationForm; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the API server was successfully started. @@ -53,7 +54,12 @@ pub struct ApiServerJobStarted(); /// It would panic if unable to send the `ApiServerJobStarted` notice. /// /// -pub async fn start_job(config: &HttpApi, tracker: Arc, version: Version) -> Option> { +pub async fn start_job( + config: &HttpApi, + tracker: Arc, + form: ServiceRegistrationForm, + version: Version, +) -> Option> { if config.enabled { let bind_to = config .bind_address @@ -67,7 +73,7 @@ pub async fn start_job(config: &HttpApi, tracker: Arc, version: V let access_tokens = Arc::new(config.access_tokens.clone()); match version { - Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), access_tokens).await), + Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), form, access_tokens).await), } } else { info!("Note: Not loading Http Tracker Service, Not Enabled in Configuration."); @@ -79,10 +85,11 @@ async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + form: ServiceRegistrationForm, access_tokens: Arc, ) -> JoinHandle<()> { let server = ApiServer::new(Launcher::new(socket, tls)) - .start(tracker, access_tokens) + .start(tracker, form, access_tokens) .await .expect("it should be able to start to the tracker api"); @@ -101,6 +108,7 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::tracker_apis::start_job; use crate::servers::apis::Version; + use crate::servers::registar::Registar; #[tokio::test] async fn it_should_start_http_tracker() { @@ -109,7 +117,7 @@ mod tests { let tracker = initialize_with_configuration(&cfg); let version = Version::V1; - start_job(config, tracker, version) + start_job(config, tracker, Registar::default().give_form(), version) .await .expect("it should be able to join to the tracker api start-job"); } diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 20ef0c793..275ce1381 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -13,6 +13,7 @@ use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; use crate::core; +use crate::servers::registar::ServiceRegistrationForm; use crate::servers::udp::server::{Launcher, UdpServer}; /// It starts a new UDP server with the provided configuration. @@ -25,14 +26,14 @@ use crate::servers::udp::server::{Launcher, UdpServer}; /// It will panic if it is unable to start the UDP service. /// It will panic if the task did not finish successfully. #[must_use] -pub async fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { +pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { let bind_to = config .bind_address .parse::() .expect("it should have a valid udp tracker bind address"); let server = UdpServer::new(Launcher::new(bind_to)) - .start(tracker) + .start(tracker, form) .await .expect("it should be able to start the udp tracker"); diff --git a/src/main.rs b/src/main.rs index 5c65f8e07..bd07f4a58 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,7 +5,7 @@ use torrust_tracker::{app, bootstrap}; async fn main() { let (config, tracker) = bootstrap::app::setup(); - let jobs = app::start(config.into(), tracker.clone()).await; + let jobs = app::start(&config, tracker).await; // handle the signals tokio::select! { diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index d26362f66..8aef9744c 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -37,6 +37,7 @@ use torrust_tracker_configuration::AccessTokens; use super::routes::router; use crate::bootstrap::jobs::Started; use crate::core::Tracker; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; /// Errors that can occur when starting or stopping the API server. @@ -75,6 +76,21 @@ pub struct Running { pub task: tokio::task::JoinHandle, } +impl Running { + #[must_use] + pub fn new( + binding: SocketAddr, + halt_task: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle, + ) -> Self { + Self { + binding, + halt_task, + task, + } + } +} + impl ApiServer { #[must_use] pub fn new(launcher: Launcher) -> Self { @@ -92,7 +108,12 @@ impl ApiServer { /// # Panics /// /// It would panic if the bound socket address cannot be sent back to this starter. - pub async fn start(self, tracker: Arc, access_tokens: Arc) -> Result, Error> { + pub async fn start( + self, + tracker: Arc, + form: ServiceRegistrationForm, + access_tokens: Arc, + ) -> Result, Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); @@ -104,13 +125,14 @@ impl ApiServer { }); let api_server = match rx_start.await { - Ok(started) => ApiServer { - state: Running { - binding: started.address, - halt_task: tx_halt, - task, - }, - }, + Ok(started) => { + form.send(ServiceRegistration::new(started.address, check_fn)) + .expect("it should be able to send service registration"); + + ApiServer { + state: Running::new(started.address, tx_halt, task), + } + } Err(err) => { let msg = format!("Unable to start API server: {err}"); error!("{}", msg); @@ -142,6 +164,27 @@ impl ApiServer { } } +/// Checks the Health by connecting to the API service endpoint. +/// +/// # Errors +/// +/// This function will return an error if unable to connect. +/// Or if there request returns an error code. +#[must_use] +pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { + let url = format!("http://{binding}/api/health_check"); + + let info = format!("checking api health check at: {url}"); + + let job = tokio::spawn(async move { + match reqwest::get(url).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err.to_string()), + } + }); + ServiceHealthCheckJob::new(*binding, info, job) +} + /// A struct responsible for starting the API server. #[derive(Constructor, Debug)] pub struct Launcher { @@ -218,6 +261,7 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::make_rust_tls; use crate::servers::apis::server::{ApiServer, Launcher}; + use crate::servers::registar::Registar; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { @@ -237,8 +281,11 @@ mod tests { let access_tokens = Arc::new(config.access_tokens.clone()); let stopped = ApiServer::new(Launcher::new(bind_to, tls)); + + let register = &Registar::default(); + let started = stopped - .start(tracker, access_tokens) + .start(tracker, register.give_form(), access_tokens) .await .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index 4403676af..35382583e 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -1,135 +1,45 @@ -use std::net::SocketAddr; -use std::sync::Arc; +use std::collections::VecDeque; -use aquatic_udp_protocol::{ConnectRequest, Response, TransactionId}; use axum::extract::State; use axum::Json; -use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, UdpTracker}; -use super::resources::Report; +use super::resources::{CheckReport, Report}; use super::responses; -use crate::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; - -/// If port 0 is specified in the configuration the OS will automatically -/// assign a free port. But we do now know in from the configuration. -/// We can only know it after starting the socket. -const UNKNOWN_PORT: u16 = 0; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistry}; /// Endpoint for container health check. /// -/// This endpoint only checks services when we know the port from the -/// configuration. If port 0 is specified in the configuration the health check -/// for that service is skipped. -pub(crate) async fn health_check_handler(State(config): State>) -> Json { - if let Some(err_response) = api_health_check(&config.http_api).await { - return err_response; - } - - if let Some(err_response) = http_trackers_health_check(&config.http_trackers).await { - return err_response; - } - - if let Some(err_response) = udp_trackers_health_check(&config.udp_trackers).await { - return err_response; - } - - responses::ok() -} - -async fn api_health_check(config: &HttpApi) -> Option> { - // todo: when port 0 is specified in the configuration get the port from the - // running service, after starting it as we do for testing with ephemeral - // configurations. - - if config.enabled { - let addr: SocketAddr = config.bind_address.parse().expect("invalid socket address for API"); +/// Creates a vector [`CheckReport`] from the input set of [`CheckJob`], and then builds a report from the results. +/// +pub(crate) async fn health_check_handler(State(register): State) -> Json { + #[allow(unused_assignments)] + let mut checks: VecDeque = VecDeque::new(); - if addr.port() != UNKNOWN_PORT { - let health_check_url = format!("http://{addr}/api/health_check"); + { + let mutex = register.lock(); - if !get_req_is_ok(&health_check_url).await { - return Some(responses::error(format!( - "API is not healthy. Health check endpoint: {health_check_url}" - ))); - } - } + checks = mutex.await.values().map(ServiceRegistration::spawn_check).collect(); } - None -} - -async fn http_trackers_health_check(http_trackers: &Vec) -> Option> { - // todo: when port 0 is specified in the configuration get the port from the - // running service, after starting it as we do for testing with ephemeral - // configurations. - - for http_tracker_config in http_trackers { - if !http_tracker_config.enabled { - continue; - } - - let addr: SocketAddr = http_tracker_config - .bind_address - .parse() - .expect("invalid socket address for HTTP tracker"); - - if addr.port() != UNKNOWN_PORT { - let health_check_url = format!("http://{addr}/health_check"); - - if !get_req_is_ok(&health_check_url).await { - return Some(responses::error(format!( - "HTTP Tracker is not healthy. Health check endpoint: {health_check_url}" - ))); + let jobs = checks.drain(..).map(|c| { + tokio::spawn(async move { + CheckReport { + binding: c.binding, + info: c.info.clone(), + result: c.job.await.expect("it should be able to join into the checking function"), } - } + }) + }); + + let results: Vec = futures::future::join_all(jobs) + .await + .drain(..) + .map(|r| r.expect("it should be able to connect to the job")) + .collect(); + + if results.iter().any(CheckReport::fail) { + responses::error("health check failed".to_string(), results) + } else { + responses::ok(results) } - - None -} - -async fn udp_trackers_health_check(udp_trackers: &Vec) -> Option> { - // todo: when port 0 is specified in the configuration get the port from the - // running service, after starting it as we do for testing with ephemeral - // configurations. - - for udp_tracker_config in udp_trackers { - if !udp_tracker_config.enabled { - continue; - } - - let addr: SocketAddr = udp_tracker_config - .bind_address - .parse() - .expect("invalid socket address for UDP tracker"); - - if addr.port() != UNKNOWN_PORT && !can_connect_to_udp_tracker(&addr.to_string()).await { - return Some(responses::error(format!( - "UDP Tracker is not healthy. Can't connect to: {addr}" - ))); - } - } - - None -} - -async fn get_req_is_ok(url: &str) -> bool { - match reqwest::get(url).await { - Ok(response) => response.status().is_success(), - Err(_err) => false, - } -} - -/// Tries to connect to an UDP tracker. It returns true if it succeeded. -async fn can_connect_to_udp_tracker(url: &str) -> bool { - let client = new_udp_tracker_client_connected(url).await; - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - matches!(response, Response::Connect(_connect_response)) } diff --git a/src/servers/health_check_api/resources.rs b/src/servers/health_check_api/resources.rs index 3fadcf456..bb57cf20b 100644 --- a/src/servers/health_check_api/resources.rs +++ b/src/servers/health_check_api/resources.rs @@ -1,31 +1,54 @@ +use std::net::SocketAddr; + use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub enum Status { Ok, Error, } -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct CheckReport { + pub binding: SocketAddr, + pub info: String, + pub result: Result, +} + +impl CheckReport { + #[must_use] + pub fn pass(&self) -> bool { + self.result.is_ok() + } + #[must_use] + pub fn fail(&self) -> bool { + self.result.is_err() + } +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Report { pub status: Status, pub message: String, + pub details: Vec, } impl Report { #[must_use] - pub fn ok() -> Report { + pub fn ok(details: Vec) -> Report { Self { status: Status::Ok, message: String::new(), + details, } } #[must_use] - pub fn error(message: String) -> Report { + pub fn error(message: String, details: Vec) -> Report { Self { status: Status::Error, message, + details, } } } diff --git a/src/servers/health_check_api/responses.rs b/src/servers/health_check_api/responses.rs index 043e271db..8658caeb4 100644 --- a/src/servers/health_check_api/responses.rs +++ b/src/servers/health_check_api/responses.rs @@ -1,11 +1,11 @@ use axum::Json; -use super::resources::Report; +use super::resources::{CheckReport, Report}; -pub fn ok() -> Json { - Json(Report::ok()) +pub fn ok(details: Vec) -> Json { + Json(Report::ok(details)) } -pub fn error(message: String) -> Json { - Json(Report::error(message)) +pub fn error(message: String, details: Vec) -> Json { + Json(Report::error(message, details)) } diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index fb807d09c..a7cbf4a8a 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -3,7 +3,6 @@ //! This API is intended to be used by the container infrastructure to check if //! the whole application is healthy. use std::net::SocketAddr; -use std::sync::Arc; use axum::routing::get; use axum::{Json, Router}; @@ -12,10 +11,10 @@ use futures::Future; use log::info; use serde_json::json; use tokio::sync::oneshot::Sender; -use torrust_tracker_configuration::Configuration; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; +use crate::servers::registar::ServiceRegistry; /// Starts Health Check API server. /// @@ -25,12 +24,12 @@ use crate::servers::health_check_api::handlers::health_check_handler; pub fn start( address: SocketAddr, tx: Sender, - config: Arc, + register: ServiceRegistry, ) -> impl Future> { let app = Router::new() .route("/", get(|| async { Json(json!({})) })) .route("/health_check", get(health_check_handler)) - .with_state(config); + .with_state(register); let handle = Handle::new(); let cloned_handle = handle.clone(); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 0a4b687b5..20e57db57 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -12,6 +12,7 @@ use tokio::sync::oneshot::{Receiver, Sender}; use super::v1::routes::router; use crate::bootstrap::jobs::Started; use crate::core::Tracker; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; /// Error that can occur when starting or stopping the HTTP server. @@ -143,7 +144,7 @@ impl HttpServer { /// /// It would panic spawned HTTP server launcher cannot send the bound `SocketAddr` /// back to the main thread. - pub async fn start(self, tracker: Arc) -> Result, Error> { + pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); @@ -157,9 +158,14 @@ impl HttpServer { launcher }); + let binding = rx_start.await.expect("it should be able to start the service").address; + + form.send(ServiceRegistration::new(binding, check_fn)) + .expect("it should be able to send service registration"); + Ok(HttpServer { state: Running { - binding: rx_start.await.expect("unable to start service").address, + binding, halt_task: tx_halt, task, }, @@ -188,6 +194,28 @@ impl HttpServer { } } +/// Checks the Health by connecting to the HTTP tracker endpoint. +/// +/// # Errors +/// +/// This function will return an error if unable to connect. +/// Or if the request returns an error. +#[must_use] +pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { + let url = format!("http://{binding}/health_check"); + + let info = format!("checking http tracker health check at: {url}"); + + let job = tokio::spawn(async move { + match reqwest::get(url).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err.to_string()), + } + }); + + ServiceHealthCheckJob::new(*binding, info, job) +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -197,6 +225,7 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::make_rust_tls; use crate::servers::http::server::{HttpServer, Launcher}; + use crate::servers::registar::Registar; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { @@ -213,8 +242,13 @@ mod tests { .await .map(|tls| tls.expect("tls config failed")); + let register = &Registar::default(); + let stopped = HttpServer::new(Launcher::new(bind_to, tls)); - let started = stopped.start(tracker).await.expect("it should start the server"); + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); assert_eq!(stopped.state.launcher.bind_to, bind_to); diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 077109f35..b0e222d2a 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -2,5 +2,6 @@ pub mod apis; pub mod health_check_api; pub mod http; +pub mod registar; pub mod signals; pub mod udp; diff --git a/src/servers/registar.rs b/src/servers/registar.rs new file mode 100644 index 000000000..0fb8d6acc --- /dev/null +++ b/src/servers/registar.rs @@ -0,0 +1,95 @@ +//! Registar. Registers Services for Health Check. + +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::Constructor; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; + +/// A [`ServiceHeathCheckResult`] is returned by a completed health check. +pub type ServiceHeathCheckResult = Result; + +/// The [`ServiceHealthCheckJob`] has a health check job with it's metadata +/// +/// The `job` awaits a [`ServiceHeathCheckResult`]. +#[derive(Debug, Constructor)] +pub struct ServiceHealthCheckJob { + pub binding: SocketAddr, + pub info: String, + pub job: JoinHandle, +} + +/// The function specification [`FnSpawnServiceHeathCheck`]. +/// +/// A function fulfilling this specification will spawn a new [`ServiceHealthCheckJob`]. +pub type FnSpawnServiceHeathCheck = fn(&SocketAddr) -> ServiceHealthCheckJob; + +/// A [`ServiceRegistration`] is provided to the [`Registar`] for registration. +/// +/// Each registration includes a function that fulfils the [`FnSpawnServiceHeathCheck`] specification. +#[derive(Clone, Debug, Constructor)] +pub struct ServiceRegistration { + binding: SocketAddr, + check_fn: FnSpawnServiceHeathCheck, +} + +impl ServiceRegistration { + #[must_use] + pub fn spawn_check(&self) -> ServiceHealthCheckJob { + (self.check_fn)(&self.binding) + } +} + +/// A [`ServiceRegistrationForm`] will return a completed [`ServiceRegistration`] to the [`Registar`]. +pub type ServiceRegistrationForm = tokio::sync::oneshot::Sender; + +/// The [`ServiceRegistry`] contains each unique [`ServiceRegistration`] by it's [`SocketAddr`]. +pub type ServiceRegistry = Arc>>; + +/// The [`Registar`] manages the [`ServiceRegistry`]. +#[derive(Clone, Debug)] +pub struct Registar { + registry: ServiceRegistry, +} + +#[allow(clippy::derivable_impls)] +impl Default for Registar { + fn default() -> Self { + Self { + registry: ServiceRegistry::default(), + } + } +} + +impl Registar { + pub fn new(register: ServiceRegistry) -> Self { + Self { registry: register } + } + + /// Registers a Service + #[must_use] + pub fn give_form(&self) -> ServiceRegistrationForm { + let (tx, rx) = tokio::sync::oneshot::channel::(); + let register = self.clone(); + tokio::spawn(async move { + register.insert(rx).await; + }); + tx + } + + /// Inserts a listing into the registry. + async fn insert(&self, rx: tokio::sync::oneshot::Receiver) { + let listing = rx.await.expect("it should receive the listing"); + + let mut mutex = self.registry.lock().await; + mutex.insert(listing.binding, listing); + } + + /// Returns the [`ServiceRegistry`] of services + #[must_use] + pub fn entries(&self) -> ServiceRegistry { + self.registry.clone() + } +} diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 001603b08..5a1977d01 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -32,8 +32,10 @@ use tokio::task::JoinHandle; use crate::bootstrap::jobs::Started; use crate::core::Tracker; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{shutdown_signal_with_message, Halted}; use crate::servers::udp::handlers::handle_packet; +use crate::shared::bit_torrent::tracker::udp::client::check; use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; /// Error that can occur when starting or stopping the UDP server. @@ -117,7 +119,7 @@ impl UdpServer { /// /// It panics if unable to receive the bound socket address from service. /// - pub async fn start(self, tracker: Arc) -> Result, Error> { + pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); @@ -135,7 +137,10 @@ impl UdpServer { launcher }); - let binding = rx_start.await.expect("unable to start service").address; + let binding = rx_start.await.expect("it should be able to start the service").address; + + form.send(ServiceRegistration::new(binding, Udp::check)) + .expect("it should be able to send service registration"); let running_udp_server: UdpServer = UdpServer { state: Running { @@ -305,6 +310,15 @@ impl Udp { // doesn't matter if it reaches or not drop(socket.send_to(payload, remote_addr).await); } + + fn check(binding: &SocketAddr) -> ServiceHealthCheckJob { + let binding = *binding; + let info = format!("checking the udp tracker health check at: {binding}"); + + let job = tokio::spawn(async move { check(&binding).await }); + + ServiceHealthCheckJob::new(binding, info, job) + } } #[cfg(test)] @@ -314,6 +328,7 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; use crate::bootstrap::app::initialize_with_configuration; + use crate::servers::registar::Registar; use crate::servers::udp::server::{Launcher, UdpServer}; #[tokio::test] @@ -327,8 +342,13 @@ mod tests { .parse::() .expect("Tracker API bind_address invalid."); + let register = &Registar::default(); + let stopped = UdpServer::new(Launcher::new(bind_to)); - let started = stopped.start(tracker).await.expect("it should start the server"); + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); assert_eq!(stopped.state.launcher.bind_to, bind_to); diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 5ea982663..f0a981c8a 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -1,7 +1,8 @@ use std::io::Cursor; +use std::net::SocketAddr; use std::sync::Arc; -use aquatic_udp_protocol::{Request, Response}; +use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; use tokio::net::UdpSocket; use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; @@ -105,3 +106,26 @@ pub async fn new_udp_tracker_client_connected(remote_address: &str) -> UdpTracke let udp_client = new_udp_client_connected(remote_address).await; UdpTrackerClient { udp_client } } + +/// Helper Function to Check if a UDP Service is Connectable +/// +/// # Errors +/// +/// It will return an error if unable to connect to the UDP service. +pub async fn check(binding: &SocketAddr) -> Result { + let client = new_udp_tracker_client_connected(binding.to_string().as_str()).await; + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + if matches!(response, Response::Connect(_connect_response)) { + Ok("Connected".to_string()) + } else { + Err("Did not Connect".to_string()) + } +} diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index c6878c674..080fab551 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -5,6 +5,7 @@ use torrust_tracker::bootstrap::jobs::make_rust_tls; use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::apis::server::{ApiServer, Launcher, RunningApiServer, StoppedApiServer}; +use torrust_tracker::servers::registar::Registar; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::HttpApi; @@ -69,7 +70,12 @@ impl TestEnvironment { config: self.config, tracker: self.tracker.clone(), state: Running { - api_server: self.state.api_server.start(self.tracker, access_tokens).await.unwrap(), + api_server: self + .state + .api_server + .start(self.tracker, Registar::default().give_form(), access_tokens) + .await + .unwrap(), }, } } diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index 6b816b85f..c02335d05 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -1,4 +1,5 @@ -use torrust_tracker::servers::health_check_api::resources::Report; +use torrust_tracker::servers::health_check_api::resources::{Report, Status}; +use torrust_tracker::servers::registar::Registar; use torrust_tracker_test_helpers::configuration; use crate::servers::health_check_api::client::get; @@ -8,7 +9,9 @@ use crate::servers::health_check_api::test_environment; async fn health_check_endpoint_should_return_status_ok_when_no_service_is_running() { let configuration = configuration::ephemeral_with_no_services(); - let (bound_addr, test_env) = test_environment::start(configuration.into()).await; + let registar = &Registar::default(); + + let (bound_addr, test_env) = test_environment::start(&configuration.health_check_api, registar.entries()).await; let url = format!("http://{bound_addr}/health_check"); @@ -16,7 +19,7 @@ async fn health_check_endpoint_should_return_status_ok_when_no_service_is_runnin assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - assert_eq!(response.json::().await.unwrap(), Report::ok()); + assert_eq!(response.json::().await.unwrap().status, Status::Ok); test_env.abort(); } diff --git a/tests/servers/health_check_api/test_environment.rs b/tests/servers/health_check_api/test_environment.rs index 554e37dbf..18924e101 100644 --- a/tests/servers/health_check_api/test_environment.rs +++ b/tests/servers/health_check_api/test_environment.rs @@ -1,17 +1,16 @@ use std::net::SocketAddr; -use std::sync::Arc; use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker::bootstrap::jobs::Started; use torrust_tracker::servers::health_check_api::server; -use torrust_tracker_configuration::Configuration; +use torrust_tracker::servers::registar::ServiceRegistry; +use torrust_tracker_configuration::HealthCheckApi; /// Start the test environment for the Health Check API. /// It runs the API server. -pub async fn start(config: Arc) -> (SocketAddr, JoinHandle<()>) { +pub async fn start(config: &HealthCheckApi, register: ServiceRegistry) -> (SocketAddr, JoinHandle<()>) { let bind_addr = config - .health_check_api .bind_address .parse::() .expect("Health Check API bind_address invalid."); @@ -19,7 +18,7 @@ pub async fn start(config: Arc) -> (SocketAddr, JoinHandle<()>) { let (tx, rx) = oneshot::channel::(); let join_handle = tokio::spawn(async move { - let handle = server::start(bind_addr, tx, config.clone()); + let handle = server::start(bind_addr, tx, register); if let Ok(()) = handle.await { panic!("Health Check API server on http://{bind_addr} stopped"); } diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index 73961b790..9cab40db2 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -5,6 +5,7 @@ use torrust_tracker::bootstrap::jobs::make_rust_tls; use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::http::server::{HttpServer, Launcher, RunningHttpServer, StoppedHttpServer}; +use torrust_tracker::servers::registar::Registar; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::common::app::setup_with_configuration; @@ -68,7 +69,12 @@ impl TestEnvironment { cfg: self.cfg, tracker: self.tracker.clone(), state: Running { - http_server: self.state.http_server.start(self.tracker).await.unwrap(), + http_server: self + .state + .http_server + .start(self.tracker, Registar::default().give_form()) + .await + .unwrap(), }, } } diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index bbad6d927..f272b6dd3 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; +use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::udp::server::{Launcher, RunningUdpServer, StoppedUdpServer, UdpServer}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; @@ -61,11 +62,13 @@ impl TestEnvironment { #[allow(dead_code)] pub async fn start(self) -> TestEnvironment { + let register = &Registar::default(); + TestEnvironment { cfg: self.cfg, tracker: self.tracker.clone(), state: Running { - udp_server: self.state.udp_server.start(self.tracker).await.unwrap(), + udp_server: self.state.udp_server.start(self.tracker, register.give_form()).await.unwrap(), }, } } From 3f0dcea464bb4fd7cf7424a02dcc9f9329295caf Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 7 Jan 2024 02:55:24 +1100 Subject: [PATCH 0688/1003] dev: add tests to health check --- src/bootstrap/jobs/health_check_api.rs | 5 +- src/servers/health_check_api/handlers.rs | 5 + src/servers/health_check_api/resources.rs | 10 + src/servers/health_check_api/responses.rs | 4 + src/servers/health_check_api/server.rs | 28 +- src/shared/bit_torrent/tracker/udp/client.rs | 26 +- tests/common/app.rs | 8 - tests/common/mod.rs | 1 - tests/servers/api/environment.rs | 94 ++++ tests/servers/api/mod.rs | 5 +- tests/servers/api/test_environment.rs | 126 ----- .../servers/api/v1/contract/authentication.rs | 38 +- .../servers/api/v1/contract/configuration.rs | 6 +- .../api/v1/contract/context/auth_key.rs | 124 ++-- .../api/v1/contract/context/health_check.rs | 8 +- .../servers/api/v1/contract/context/stats.rs | 33 +- .../api/v1/contract/context/torrent.rs | 98 ++-- .../api/v1/contract/context/whitelist.rs | 124 ++-- tests/servers/health_check_api/contract.rs | 308 +++++++++- tests/servers/health_check_api/environment.rs | 91 +++ tests/servers/health_check_api/mod.rs | 4 +- .../health_check_api/test_environment.rs | 33 -- tests/servers/http/environment.rs | 81 +++ tests/servers/http/mod.rs | 5 +- tests/servers/http/test_environment.rs | 133 ----- tests/servers/http/v1/contract.rs | 529 +++++++++--------- tests/servers/udp/contract.rs | 24 +- tests/servers/udp/environment.rs | 78 +++ tests/servers/udp/mod.rs | 6 +- tests/servers/udp/test_environment.rs | 110 ---- 30 files changed, 1182 insertions(+), 963 deletions(-) delete mode 100644 tests/common/app.rs create mode 100644 tests/servers/api/environment.rs delete mode 100644 tests/servers/api/test_environment.rs create mode 100644 tests/servers/health_check_api/environment.rs delete mode 100644 tests/servers/health_check_api/test_environment.rs create mode 100644 tests/servers/http/environment.rs delete mode 100644 tests/servers/http/test_environment.rs create mode 100644 tests/servers/udp/environment.rs delete mode 100644 tests/servers/udp/test_environment.rs diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 1a9815280..7eeafe97b 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -22,6 +22,7 @@ use torrust_tracker_configuration::HealthCheckApi; use super::Started; use crate::servers::health_check_api::server; use crate::servers::registar::ServiceRegistry; +use crate::servers::signals::Halted; /// This function starts a new Health Check API server with the provided /// configuration. @@ -40,12 +41,14 @@ pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> Jo .expect("it should have a valid health check bind address"); let (tx_start, rx_start) = oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + drop(tx_halt); // Run the API server let join_handle = tokio::spawn(async move { info!(target: "Health Check API", "Starting on: http://{}", bind_addr); - let handle = server::start(bind_addr, tx_start, register); + let handle = server::start(bind_addr, tx_start, rx_halt, register); if let Ok(()) = handle.await { info!(target: "Health Check API", "Stopped server running on: http://{}", bind_addr); diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index 35382583e..944e84a1d 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -21,6 +21,11 @@ pub(crate) async fn health_check_handler(State(register): State checks = mutex.await.values().map(ServiceRegistration::spawn_check).collect(); } + // if we do not have any checks, lets return a `none` result. + if checks.is_empty() { + return responses::none(); + } + let jobs = checks.drain(..).map(|c| { tokio::spawn(async move { CheckReport { diff --git a/src/servers/health_check_api/resources.rs b/src/servers/health_check_api/resources.rs index bb57cf20b..3302fb966 100644 --- a/src/servers/health_check_api/resources.rs +++ b/src/servers/health_check_api/resources.rs @@ -6,6 +6,7 @@ use serde::{Deserialize, Serialize}; pub enum Status { Ok, Error, + None, } #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -34,6 +35,15 @@ pub struct Report { } impl Report { + #[must_use] + pub fn none() -> Report { + Self { + status: Status::None, + message: String::new(), + details: Vec::default(), + } + } + #[must_use] pub fn ok(details: Vec) -> Report { Self { diff --git a/src/servers/health_check_api/responses.rs b/src/servers/health_check_api/responses.rs index 8658caeb4..3796d8be4 100644 --- a/src/servers/health_check_api/responses.rs +++ b/src/servers/health_check_api/responses.rs @@ -9,3 +9,7 @@ pub fn ok(details: Vec) -> Json { pub fn error(message: String, details: Vec) -> Json { Json(Report::error(message, details)) } + +pub fn none() -> Json { + Json(Report::none()) +} diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index a7cbf4a8a..ecc6fe427 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -8,13 +8,13 @@ use axum::routing::get; use axum::{Json, Router}; use axum_server::Handle; use futures::Future; -use log::info; use serde_json::json; -use tokio::sync::oneshot::Sender; +use tokio::sync::oneshot::{Receiver, Sender}; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; use crate::servers::registar::ServiceRegistry; +use crate::servers::signals::{graceful_shutdown, Halted}; /// Starts Health Check API server. /// @@ -22,30 +22,30 @@ use crate::servers::registar::ServiceRegistry; /// /// Will panic if binding to the socket address fails. pub fn start( - address: SocketAddr, + bind_to: SocketAddr, tx: Sender, + rx_halt: Receiver, register: ServiceRegistry, ) -> impl Future> { - let app = Router::new() + let router = Router::new() .route("/", get(|| async { Json(json!({})) })) .route("/health_check", get(health_check_handler)) .with_state(register); - let handle = Handle::new(); - let cloned_handle = handle.clone(); - - let socket = std::net::TcpListener::bind(address).expect("Could not bind tcp_listener to address."); + let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); - tokio::task::spawn(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust Health Check API server o http://{} ...", address); - cloned_handle.shutdown(); - }); + let handle = Handle::new(); + + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("shutting down http server on socket address: {address}"), + )); let running = axum_server::from_tcp(socket) .handle(handle) - .serve(app.into_make_service_with_connect_info::()); + .serve(router.into_make_service_with_connect_info::()); tx.send(Started { address }) .expect("the Health Check API server should not be dropped"); diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index f0a981c8a..00f0b8acf 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -1,9 +1,11 @@ use std::io::Cursor; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; use tokio::net::UdpSocket; +use tokio::time; use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; @@ -112,6 +114,8 @@ pub async fn new_udp_tracker_client_connected(remote_address: &str) -> UdpTracke /// # Errors /// /// It will return an error if unable to connect to the UDP service. +/// +/// # Panics pub async fn check(binding: &SocketAddr) -> Result { let client = new_udp_tracker_client_connected(binding.to_string().as_str()).await; @@ -121,11 +125,23 @@ pub async fn check(binding: &SocketAddr) -> Result { client.send(connect_request.into()).await; - let response = client.receive().await; + let process = move |response| { + if matches!(response, Response::Connect(_connect_response)) { + Ok("Connected".to_string()) + } else { + Err("Did not Connect".to_string()) + } + }; + + let sleep = time::sleep(Duration::from_millis(2000)); + tokio::pin!(sleep); - if matches!(response, Response::Connect(_connect_response)) { - Ok("Connected".to_string()) - } else { - Err("Did not Connect".to_string()) + tokio::select! { + () = &mut sleep => { + Err("Timed Out".to_string()) + } + response = client.receive() => { + process(response) + } } } diff --git a/tests/common/app.rs b/tests/common/app.rs deleted file mode 100644 index 1b735bc86..000000000 --- a/tests/common/app.rs +++ /dev/null @@ -1,8 +0,0 @@ -use std::sync::Arc; - -use torrust_tracker::bootstrap; -use torrust_tracker::core::Tracker; - -pub fn setup_with_configuration(configuration: &Arc) -> Arc { - bootstrap::app::initialize_with_configuration(configuration) -} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 51a8a5b03..b57996292 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,4 +1,3 @@ -pub mod app; pub mod fixtures; pub mod http; pub mod udp; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs new file mode 100644 index 000000000..186b7ea3b --- /dev/null +++ b/tests/servers/api/environment.rs @@ -0,0 +1,94 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use futures::executor::block_on; +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::bootstrap::jobs::make_rust_tls; +use torrust_tracker::core::peer::Peer; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_configuration::{Configuration, HttpApi}; + +use super::connection_info::ConnectionInfo; + +pub struct Environment { + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: ApiServer, +} + +impl Environment { + /// Add a torrent to the tracker + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl Environment { + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let config = Arc::new(configuration.http_api.clone()); + + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) + .map(|tls| tls.expect("tls config failed")); + + let server = ApiServer::new(Launcher::new(bind_to, tls)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + pub async fn start(self) -> Environment { + let access_tokens = Arc::new(self.config.access_tokens.clone()); + + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self + .server + .start(self.tracker, self.registar.give_form(), access_tokens) + .await + .unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + Environment::::new(configuration).start().await + } + + pub async fn stop(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + server: self.server.stop().await.unwrap(), + } + } + + pub fn get_connection_info(&self) -> ConnectionInfo { + ConnectionInfo { + bind_address: self.server.state.binding.to_string(), + api_token: self.config.access_tokens.get("admin").cloned(), + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.server.state.binding + } +} diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs index 155ac0de1..9c30e316a 100644 --- a/tests/servers/api/mod.rs +++ b/tests/servers/api/mod.rs @@ -1,11 +1,14 @@ use std::sync::Arc; use torrust_tracker::core::Tracker; +use torrust_tracker::servers::apis::server; pub mod connection_info; -pub mod test_environment; +pub mod environment; pub mod v1; +pub type Started = environment::Environment; + /// It forces a database error by dropping all tables. /// That makes any query fail. /// code-review: alternatively we could inject a database mock in the future. diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs deleted file mode 100644 index 080fab551..000000000 --- a/tests/servers/api/test_environment.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std::sync::Arc; - -use futures::executor::block_on; -use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::peer::Peer; -use torrust_tracker::core::Tracker; -use torrust_tracker::servers::apis::server::{ApiServer, Launcher, RunningApiServer, StoppedApiServer}; -use torrust_tracker::servers::registar::Registar; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker_configuration::HttpApi; - -use super::connection_info::ConnectionInfo; -use crate::common::app::setup_with_configuration; - -#[allow(clippy::module_name_repetitions, dead_code)] -pub type StoppedTestEnvironment = TestEnvironment; -#[allow(clippy::module_name_repetitions)] -pub type RunningTestEnvironment = TestEnvironment; - -pub struct TestEnvironment { - pub config: Arc, - pub tracker: Arc, - pub state: S, -} - -#[allow(dead_code)] -pub struct Stopped { - api_server: StoppedApiServer, -} - -pub struct Running { - api_server: RunningApiServer, -} - -impl TestEnvironment { - /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} - -impl TestEnvironment { - pub fn new(cfg: torrust_tracker_configuration::Configuration) -> Self { - let cfg = Arc::new(cfg); - let tracker = setup_with_configuration(&cfg); - - let config = Arc::new(cfg.http_api.clone()); - - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - - let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) - .map(|tls| tls.expect("tls config failed")); - - let api_server = api_server(Launcher::new(bind_to, tls)); - - Self { - config, - tracker, - state: Stopped { api_server }, - } - } - - pub async fn start(self) -> TestEnvironment { - let access_tokens = Arc::new(self.config.access_tokens.clone()); - - TestEnvironment { - config: self.config, - tracker: self.tracker.clone(), - state: Running { - api_server: self - .state - .api_server - .start(self.tracker, Registar::default().give_form(), access_tokens) - .await - .unwrap(), - }, - } - } - - // pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpApi { - // &mut self.cfg.http_api - // } -} - -impl TestEnvironment { - pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { - let test_env = StoppedTestEnvironment::new(cfg); - - test_env.start().await - } - - pub async fn stop(self) -> TestEnvironment { - TestEnvironment { - config: self.config, - tracker: self.tracker, - state: Stopped { - api_server: self.state.api_server.stop().await.unwrap(), - }, - } - } - - pub fn get_connection_info(&self) -> ConnectionInfo { - ConnectionInfo { - bind_address: self.state.api_server.state.binding.to_string(), - api_token: self.config.access_tokens.get("admin").cloned(), - } - } -} - -#[allow(clippy::module_name_repetitions)] -#[allow(dead_code)] -pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { - TestEnvironment::new(cfg) -} - -#[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { - TestEnvironment::new_running(cfg).await -} - -pub fn api_server(launcher: Launcher) -> StoppedApiServer { - ApiServer::new(launcher) -} diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs index fb8de1810..49981dd02 100644 --- a/tests/servers/api/v1/contract/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -1,83 +1,83 @@ use torrust_tracker_test_helpers::configuration; use crate::common::http::{Query, QueryParam}; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::servers::api::v1::client::Client; +use crate::servers::api::Started; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let token = test_env.get_connection_info().api_token.unwrap(); + let token = env.get_connection_info().api_token.unwrap(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) .await; assert_eq!(response.status(), 200); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request_with_query("stats", Query::default()) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) .await; assert_token_not_valid(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) .await; assert_token_not_valid(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let token = test_env.get_connection_info().api_token.unwrap(); + let token = env.get_connection_info().api_token.unwrap(); // At the beginning of the query component - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request(&format!("torrents?token={token}&limit=1")) .await; assert_eq!(response.status(), 200); // At the end of the query component - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request(&format!("torrents?limit=1&token={token}")) .await; assert_eq!(response.status(), 200); - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs index a551a8b36..4220f62d2 100644 --- a/tests/servers/api/v1/contract/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -5,7 +5,7 @@ // use torrust_tracker_test_helpers::configuration; // use crate::common::app::setup_with_configuration; -// use crate::servers::api::test_environment::stopped_test_environment; +// use crate::servers::api::environment::stopped_environment; #[tokio::test] #[ignore] @@ -27,7 +27,7 @@ async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { // None // }; - // let test_env = new_stopped(tracker, bind_to, tls); + // let env = new_stopped(tracker, bind_to, tls); - // test_env.start().await; + // env.start().await; } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 4c59b4e95..f9630bafe 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -4,62 +4,57 @@ use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::force_database_error; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::servers::api::v1::client::Client; +use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - let response = Client::new(test_env.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; + let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; let auth_key_resource = assert_auth_key_utf8(response).await; // Verify the key with the tracker - assert!(test_env + assert!(env .tracker .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .generate_auth_key(seconds_valid) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .generate_auth_key(seconds_valid) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_key_durations = [ // "", it returns 404 @@ -68,55 +63,53 @@ async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid( ]; for invalid_key_duration in invalid_key_durations { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .post(&format!("key/{invalid_key_duration}")) .await; assert_invalid_key_duration_param(response, invalid_key_duration).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); let seconds_valid = 60; - let response = Client::new(test_env.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; + let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; assert_failed_to_generate_key(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - let auth_key = test_env + let auth_key = env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .delete_auth_key(&auth_key.key.to_string()) .await; assert_ok(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_auth_keys = [ // "", it returns a 404 @@ -129,137 +122,128 @@ async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { ]; for invalid_auth_key in &invalid_auth_keys { - let response = Client::new(test_env.get_connection_info()) - .delete_auth_key(invalid_auth_key) - .await; + let response = Client::new(env.get_connection_info()).delete_auth_key(invalid_auth_key).await; assert_invalid_auth_key_param(response, invalid_auth_key).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - let auth_key = test_env + let auth_key = env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .delete_auth_key(&auth_key.key.to_string()) .await; assert_failed_to_delete_key(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; // Generate new auth key - let auth_key = test_env + let auth_key = env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .delete_auth_key(&auth_key.key.to_string()) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .delete_auth_key(&auth_key.key.to_string()) + .await; assert_token_not_valid(response).await; // Generate new auth key - let auth_key = test_env + let auth_key = env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .delete_auth_key(&auth_key.key.to_string()) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_reloading_keys() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - test_env - .tracker + env.tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(test_env.get_connection_info()).reload_keys().await; + let response = Client::new(env.get_connection_info()).reload_keys().await; assert_ok(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - test_env - .tracker + env.tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()).reload_keys().await; + let response = Client::new(env.get_connection_info()).reload_keys().await; assert_failed_to_reload_keys(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - test_env - .tracker + env.tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .reload_keys() - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .reload_keys() + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .reload_keys() .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index 108ae237a..d8dc3c030 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -1,14 +1,14 @@ use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::client::get; +use crate::servers::api::Started; #[tokio::test] async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let url = format!("http://{}/api/health_check", test_env.get_connection_info().bind_address); + let url = format!("http://{}/api/health_check", env.get_connection_info().bind_address); let response = get(&url, None).await; @@ -16,5 +16,5 @@ async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 71738f8e5..54263f8b8 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -6,22 +6,21 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::servers::api::v1::client::Client; +use crate::servers::api::Started; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - test_env - .add_torrent_peer( - &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), - &PeerBuilder::default().into(), - ) - .await; + env.add_torrent_peer( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &PeerBuilder::default().into(), + ) + .await; - let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; + let response = Client::new(env.get_connection_info()).get_tracker_statistics().await; assert_stats( response, @@ -46,26 +45,24 @@ async fn should_allow_getting_tracker_statistics() { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_tracker_statistics() - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_tracker_statistics() + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .get_tracker_statistics() .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index dc91e8fc5..63b97b402 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -8,7 +8,6 @@ use torrust_tracker_test_helpers::configuration; use crate::common::http::{Query, QueryParam}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, @@ -17,16 +16,17 @@ use crate::servers::api::v1::client::Client; use crate::servers::api::v1::contract::fixtures::{ invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, }; +use crate::servers::api::Started; #[tokio::test] async fn should_allow_getting_torrents() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; + let response = Client::new(env.get_connection_info()).get_torrents(Query::empty()).await; assert_torrent_list( response, @@ -39,21 +39,21 @@ async fn should_allow_getting_torrents() { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -68,21 +68,21 @@ async fn should_allow_limiting_the_torrents_in_the_result() { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -97,75 +97,73 @@ async fn should_allow_the_torrents_result_pagination() { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; for invalid_offset in &invalid_offsets { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; for invalid_limit in &invalid_limits { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_torrents(Query::empty()) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_torrents(Query::empty()) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .get_torrents(Query::default()) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let peer = PeerBuilder::default().into(); - test_env.add_torrent_peer(&info_hash, &peer).await; + env.add_torrent_peer(&info_hash, &peer).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -181,68 +179,62 @@ async fn should_allow_getting_a_torrent_info() { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; assert_torrent_not_known(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) - .get_torrent(invalid_infohash) - .await; + let response = Client::new(env.get_connection_info()).get_torrent(invalid_infohash).await; assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) - .get_torrent(invalid_infohash) - .await; + let response = Client::new(env.get_connection_info()).get_torrent(invalid_infohash).await; assert_not_found(response).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_torrent(&info_hash.to_string()) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_torrent(&info_hash.to_string()) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .get_torrent(&info_hash.to_string()) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 60ab4c901..358a4a19e 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -4,8 +4,6 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::force_database_error; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, @@ -14,35 +12,33 @@ use crate::servers::api::v1::client::Client; use crate::servers::api::v1::contract::fixtures::{ invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, }; +use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; + let response = Client::new(env.get_connection_info()).whitelist_a_torrent(&info_hash).await; assert_ok(response).await; assert!( - test_env - .tracker + env.tracker .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(test_env.get_connection_info()); + let api_client = Client::new(env.get_connection_info()); let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; @@ -50,55 +46,51 @@ async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .whitelist_a_torrent(&info_hash) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .whitelist_a_torrent(&info_hash) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .whitelist_a_torrent(&info_hash) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; + let response = Client::new(env.get_connection_info()).whitelist_a_torrent(&info_hash).await; assert_failed_to_whitelist_torrent(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .whitelist_a_torrent(invalid_infohash) .await; @@ -106,55 +98,55 @@ async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invali } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .whitelist_a_torrent(invalid_infohash) .await; assert_not_found(response).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; assert_ok(response).await; - assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(!env.tracker.is_info_hash_whitelisted(&info_hash).await); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) .await; assert_ok(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(invalid_infohash) .await; @@ -162,99 +154,97 @@ async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_inf } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(invalid_infohash) .await; assert_not_found(response).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; assert_failed_to_remove_torrent_from_whitelist(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .remove_torrent_from_whitelist(&hash) - .await; + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .remove_torrent_from_whitelist(&hash) + .await; assert_token_not_valid(response).await; - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .remove_torrent_from_whitelist(&hash) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + let response = Client::new(env.get_connection_info()).reload_whitelist().await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent is whitelisted and use that endpoint to check if the torrent is still there after reloading. assert!( - !(test_env + !(env .tracker .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await) ); */ - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + let response = Client::new(env.get_connection_info()).reload_whitelist().await; assert_failed_to_reload_whitelist(response).await; - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index c02335d05..7b00866d3 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -3,23 +3,311 @@ use torrust_tracker::servers::registar::Registar; use torrust_tracker_test_helpers::configuration; use crate::servers::health_check_api::client::get; -use crate::servers::health_check_api::test_environment; +use crate::servers::health_check_api::Started; #[tokio::test] -async fn health_check_endpoint_should_return_status_ok_when_no_service_is_running() { +async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services_registered() { let configuration = configuration::ephemeral_with_no_services(); - let registar = &Registar::default(); + let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; - let (bound_addr, test_env) = test_environment::start(&configuration.health_check_api, registar.entries()).await; - - let url = format!("http://{bound_addr}/health_check"); - - let response = get(&url).await; + let response = get(&format!("http://{}/health_check", env.state.binding)).await; assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - assert_eq!(response.json::().await.unwrap().status, Status::Ok); - test_env.abort(); + let report = response + .json::() + .await + .expect("it should be able to get the report as json"); + + assert_eq!(report.status, Status::None); + + env.stop().await.expect("it should stop the service"); +} + +mod api { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::api; + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_api_service() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = api::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, service.bind_address()); + + assert_eq!(details.result, Ok("200 OK".to_string())); + + assert_eq!( + details.info, + format!( + "checking api health check at: http://{}/api/health_check", + service.bind_address() + ) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_api_service_was_stopped_after_registration() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = api::Started::new(&configuration).await; + + let binding = service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert!(details.result.as_ref().is_err_and(|e| e.contains("Connection refused"))); + assert_eq!( + details.info, + format!("checking api health check at: http://{binding}/api/health_check") + ); + + env.stop().await.expect("it should stop the service"); + } + } +} + +mod http { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + use crate::servers::http; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_http_service() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = http::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, *service.bind_address()); + assert_eq!(details.result, Ok("200 OK".to_string())); + + assert_eq!( + details.info, + format!( + "checking http tracker health check at: http://{}/health_check", + service.bind_address() + ) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_http_service_was_stopped_after_registration() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = http::Started::new(&configuration).await; + + let binding = *service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert!(details.result.as_ref().is_err_and(|e| e.contains("Connection refused"))); + assert_eq!( + details.info, + format!("checking http tracker health check at: http://{binding}/health_check") + ); + + env.stop().await.expect("it should stop the service"); + } + } +} + +mod udp { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + use crate::servers::udp; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_udp_service() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = udp::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, service.bind_address()); + assert_eq!(details.result, Ok("Connected".to_string())); + + assert_eq!( + details.info, + format!("checking the udp tracker health check at: {}", service.bind_address()) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_udp_service_was_stopped_after_registration() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = udp::Started::new(&configuration).await; + + let binding = service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert_eq!(details.result, Err("Timed Out".to_string())); + assert_eq!(details.info, format!("checking the udp tracker health check at: {binding}")); + + env.stop().await.expect("it should stop the service"); + } + } } diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs new file mode 100644 index 000000000..9aa3ab16d --- /dev/null +++ b/tests/servers/health_check_api/environment.rs @@ -0,0 +1,91 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::sync::oneshot::{self, Sender}; +use tokio::task::JoinHandle; +use torrust_tracker::bootstrap::jobs::Started; +use torrust_tracker::servers::health_check_api::server; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker::servers::signals::{self, Halted}; +use torrust_tracker_configuration::HealthCheckApi; + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +pub struct Running { + pub binding: SocketAddr, + pub halt_task: Sender, + pub task: JoinHandle, +} + +pub struct Stopped { + pub bind_to: SocketAddr, +} + +pub struct Environment { + pub registar: Registar, + pub state: S, +} + +impl Environment { + pub fn new(config: &Arc, registar: Registar) -> Self { + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + Self { + registar, + state: Stopped { bind_to }, + } + } + + /// Start the test environment for the Health Check API. + /// It runs the API server. + pub async fn start(self) -> Environment { + let (tx_start, rx_start) = oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + let register = self.registar.entries(); + + let server = tokio::spawn(async move { + server::start(self.state.bind_to, tx_start, rx_halt, register) + .await + .expect("it should start the health check service"); + self.state.bind_to + }); + + let binding = rx_start.await.expect("it should send service binding").address; + + Environment { + registar: self.registar.clone(), + state: Running { + task: server, + halt_task: tx_halt, + binding, + }, + } + } +} + +impl Environment { + pub async fn new(config: &Arc, registar: Registar) -> Self { + Environment::::new(config, registar).start().await + } + + pub async fn stop(self) -> Result, Error> { + self.state + .halt_task + .send(Halted::Normal) + .map_err(|e| Error::Error(e.to_string()))?; + + let bind_to = self.state.task.await.expect("it should shutdown the service"); + + Ok(Environment { + registar: self.registar.clone(), + state: Stopped { bind_to }, + }) + } +} diff --git a/tests/servers/health_check_api/mod.rs b/tests/servers/health_check_api/mod.rs index 89f19a334..9e15c5f62 100644 --- a/tests/servers/health_check_api/mod.rs +++ b/tests/servers/health_check_api/mod.rs @@ -1,3 +1,5 @@ pub mod client; pub mod contract; -pub mod test_environment; +pub mod environment; + +pub type Started = environment::Environment; diff --git a/tests/servers/health_check_api/test_environment.rs b/tests/servers/health_check_api/test_environment.rs deleted file mode 100644 index 18924e101..000000000 --- a/tests/servers/health_check_api/test_environment.rs +++ /dev/null @@ -1,33 +0,0 @@ -use std::net::SocketAddr; - -use tokio::sync::oneshot; -use tokio::task::JoinHandle; -use torrust_tracker::bootstrap::jobs::Started; -use torrust_tracker::servers::health_check_api::server; -use torrust_tracker::servers::registar::ServiceRegistry; -use torrust_tracker_configuration::HealthCheckApi; - -/// Start the test environment for the Health Check API. -/// It runs the API server. -pub async fn start(config: &HealthCheckApi, register: ServiceRegistry) -> (SocketAddr, JoinHandle<()>) { - let bind_addr = config - .bind_address - .parse::() - .expect("Health Check API bind_address invalid."); - - let (tx, rx) = oneshot::channel::(); - - let join_handle = tokio::spawn(async move { - let handle = server::start(bind_addr, tx, register); - if let Ok(()) = handle.await { - panic!("Health Check API server on http://{bind_addr} stopped"); - } - }); - - let bound_addr = match rx.await { - Ok(msg) => msg.address, - Err(e) => panic!("the Health Check API server was dropped: {e}"), - }; - - (bound_addr, join_handle) -} diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs new file mode 100644 index 000000000..326f4e534 --- /dev/null +++ b/tests/servers/http/environment.rs @@ -0,0 +1,81 @@ +use std::sync::Arc; + +use futures::executor::block_on; +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::bootstrap::jobs::make_rust_tls; +use torrust_tracker::core::peer::Peer; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_configuration::{Configuration, HttpTracker}; + +pub struct Environment { + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: HttpServer, +} + +impl Environment { + /// Add a torrent to the tracker + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl Environment { + #[allow(dead_code)] + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let config = Arc::new(configuration.http_trackers[0].clone()); + + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) + .map(|tls| tls.expect("tls config failed")); + + let server = HttpServer::new(Launcher::new(bind_to, tls)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + Environment::::new(configuration).start().await + } + + pub async fn stop(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + + server: self.server.stop().await.unwrap(), + } + } + + pub fn bind_address(&self) -> &std::net::SocketAddr { + &self.server.state.binding + } +} diff --git a/tests/servers/http/mod.rs b/tests/servers/http/mod.rs index cb2885df0..65affc433 100644 --- a/tests/servers/http/mod.rs +++ b/tests/servers/http/mod.rs @@ -1,11 +1,14 @@ pub mod asserts; pub mod client; +pub mod environment; pub mod requests; pub mod responses; -pub mod test_environment; pub mod v1; +pub type Started = environment::Environment; + use percent_encoding::NON_ALPHANUMERIC; +use torrust_tracker::servers::http::server; pub type ByteArray20 = [u8; 20]; diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs deleted file mode 100644 index 9cab40db2..000000000 --- a/tests/servers/http/test_environment.rs +++ /dev/null @@ -1,133 +0,0 @@ -use std::sync::Arc; - -use futures::executor::block_on; -use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::peer::Peer; -use torrust_tracker::core::Tracker; -use torrust_tracker::servers::http::server::{HttpServer, Launcher, RunningHttpServer, StoppedHttpServer}; -use torrust_tracker::servers::registar::Registar; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - -use crate::common::app::setup_with_configuration; - -#[allow(clippy::module_name_repetitions, dead_code)] -pub type StoppedTestEnvironment = TestEnvironment; -#[allow(clippy::module_name_repetitions)] -pub type RunningTestEnvironment = TestEnvironment; - -pub struct TestEnvironment { - pub cfg: Arc, - pub tracker: Arc, - pub state: S, -} - -#[allow(dead_code)] -pub struct Stopped { - http_server: StoppedHttpServer, -} - -pub struct Running { - http_server: RunningHttpServer, -} - -impl TestEnvironment { - /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} - -impl TestEnvironment { - #[allow(dead_code)] - pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { - let cfg = Arc::new(cfg); - - let tracker = setup_with_configuration(&cfg); - - let config = cfg.http_trackers[0].clone(); - - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - - let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) - .map(|tls| tls.expect("tls config failed")); - - let http_server = HttpServer::new(Launcher::new(bind_to, tls)); - - Self { - cfg, - tracker, - state: Stopped { http_server }, - } - } - - #[allow(dead_code)] - pub async fn start(self) -> TestEnvironment { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker.clone(), - state: Running { - http_server: self - .state - .http_server - .start(self.tracker, Registar::default().give_form()) - .await - .unwrap(), - }, - } - } - - // #[allow(dead_code)] - // pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { - // &self.state.http_server.cfg - // } - - // #[allow(dead_code)] - // pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { - // &mut self.state.http_server.cfg - // } -} - -impl TestEnvironment { - pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { - let test_env = StoppedTestEnvironment::new_stopped(cfg); - - test_env.start().await - } - - pub async fn stop(self) -> TestEnvironment { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker, - state: Stopped { - http_server: self.state.http_server.stop().await.unwrap(), - }, - } - } - - pub fn bind_address(&self) -> &std::net::SocketAddr { - &self.state.http_server.state.binding - } - - // #[allow(dead_code)] - // pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { - // &self.state.http_server.cfg - // } -} - -#[allow(clippy::module_name_repetitions, dead_code)] -pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { - TestEnvironment::new_stopped(cfg) -} - -#[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { - TestEnvironment::new_running(cfg).await -} - -#[allow(dead_code)] -pub fn http_server(launcher: Launcher) -> StoppedHttpServer { - HttpServer::new(launcher) -} diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index e394779ad..be285dcd7 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1,12 +1,12 @@ use torrust_tracker_test_helpers::configuration; -use crate::servers::http::test_environment::running_test_environment; +use crate::servers::http::Started; #[tokio::test] -async fn test_environment_should_be_started_and_stopped() { - let test_env = running_test_environment(configuration::ephemeral()).await; +async fn environment_should_be_started_and_stopped() { + let env = Started::new(&configuration::ephemeral().into()).await; - test_env.stop().await; + env.stop().await; } mod for_all_config_modes { @@ -15,19 +15,19 @@ mod for_all_config_modes { use torrust_tracker_test_helpers::configuration; use crate::servers::http::client::Client; - use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::Started; #[tokio::test] async fn health_check_endpoint_should_return_ok_if_the_http_tracker_is_running() { - let test_env = running_test_environment(configuration::ephemeral_with_reverse_proxy()).await; + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; - let response = Client::new(*test_env.bind_address()).health_check().await; + let response = Client::new(*env.bind_address()).health_check().await; assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); - test_env.stop().await; + env.stop().await; } mod and_running_on_reverse_proxy { @@ -36,37 +36,37 @@ mod for_all_config_modes { use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::Started; #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let test_env = running_test_environment(configuration::ephemeral_with_reverse_proxy()).await; + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; let params = QueryBuilder::default().query().params(); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment(configuration::ephemeral_with_reverse_proxy()).await; + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; let params = QueryBuilder::default().query().params(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - test_env.stop().await; + env.stop().await; } } @@ -102,60 +102,59 @@ mod for_all_config_modes { }; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::{Compact, QueryBuilder}; - use crate::servers::http::responses; use crate::servers::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::{responses, Started}; #[tokio::test] async fn it_should_start_and_stop() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; - test_env.stop().await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + env.stop().await; } #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); params.remove_optional_params(); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(*test_env.bind_address()).get("announce").await; + let response = Client::new(*env.bind_address()).get("announce").await; assert_missing_query_params_for_announce_request_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_query_param = "a=b=c"; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .get(&format!("announce?{invalid_query_param}")) .await; assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; // Without `info_hash` param @@ -163,7 +162,7 @@ mod for_all_config_modes { params.info_hash = None; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param info_hash").await; @@ -173,7 +172,7 @@ mod for_all_config_modes { params.peer_id = None; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param peer_id").await; @@ -183,28 +182,28 @@ mod for_all_config_modes { params.port = None; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param port").await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_cannot_parse_query_params_error_response(response, "").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -214,22 +213,22 @@ mod for_all_config_modes { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -238,17 +237,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -257,17 +256,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -283,17 +282,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -302,17 +301,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -321,17 +320,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -348,17 +347,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -367,19 +366,19 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -387,7 +386,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = test_env.tracker.get_announce_policy(); + let announce_policy = env.tracker.get_announce_policy(); assert_announce_response( response, @@ -401,12 +400,12 @@ mod for_all_config_modes { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -416,10 +415,10 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -428,7 +427,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = test_env.tracker.get_announce_policy(); + let announce_policy = env.tracker.get_announce_policy(); // It should only contain the previously announced peer assert_announce_response( @@ -443,12 +442,12 @@ mod for_all_config_modes { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -457,7 +456,7 @@ mod for_all_config_modes { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -467,10 +466,10 @@ mod for_all_config_modes { 8080, )) .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -479,7 +478,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = test_env.tracker.get_announce_policy(); + let announce_policy = env.tracker.get_announce_policy(); // The newly announced peer is not included on the response peer list, // but all the previously announced peers should be included regardless the IP version they are using. @@ -495,18 +494,18 @@ mod for_all_config_modes { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); // Add a peer - test_env.add_torrent_peer(&info_hash, &peer).await; + env.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -515,11 +514,11 @@ mod for_all_config_modes { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; + let response = Client::new(*env.bind_address()).announce(&announce_query).await; assert_empty_announce_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -527,7 +526,7 @@ mod for_all_config_modes { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -537,10 +536,10 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -560,7 +559,7 @@ mod for_all_config_modes { assert_compact_announce_response(response, &expected_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -568,7 +567,7 @@ mod for_all_config_modes { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -578,12 +577,12 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -595,7 +594,7 @@ mod for_all_config_modes { assert!(!is_a_compact_announce_response(response).await); - test_env.stop().await; + env.stop().await; } async fn is_a_compact_announce_response(response: Response) -> bool { @@ -606,19 +605,19 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .announce(&QueryBuilder::default().query()) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -630,28 +629,28 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment(configuration::ephemeral_ipv6()).await; + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -659,30 +658,30 @@ mod for_all_config_modes { ) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .announce(&QueryBuilder::default().query()) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -694,28 +693,28 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment(configuration::ephemeral_ipv6()).await; + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -723,18 +722,18 @@ mod for_all_config_modes { ) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -745,19 +744,19 @@ mod for_all_config_modes { .query(); { - let client = Client::bind(*test_env.bind_address(), client_ip); + let client = Client::bind(*env.bind_address(), client_ip); let status = client.announce(&announce_query).await.status(); assert_eq!(status, StatusCode::OK); } - let peers = test_env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -768,11 +767,8 @@ mod for_all_config_modes { client <-> tracker <-> Internet 127.0.0.1 external_ip = "2.137.87.41" */ - - let test_env = running_test_environment(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; + let env = + Started::new(&configuration::ephemeral_with_external_ip(IpAddr::from_str("2.137.87.41").unwrap()).into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -784,19 +780,19 @@ mod for_all_config_modes { .query(); { - let client = Client::bind(*test_env.bind_address(), client_ip); + let client = Client::bind(*env.bind_address(), client_ip); let status = client.announce(&announce_query).await.status(); assert_eq!(status, StatusCode::OK); } - let peers = test_env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), test_env.tracker.get_maybe_external_ip().unwrap()); + assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -808,9 +804,10 @@ mod for_all_config_modes { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let test_env = running_test_environment(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) + let env = Started::new( + &configuration::ephemeral_with_external_ip(IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()) + .into(), + ) .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -823,19 +820,19 @@ mod for_all_config_modes { .query(); { - let client = Client::bind(*test_env.bind_address(), client_ip); + let client = Client::bind(*env.bind_address(), client_ip); let status = client.announce(&announce_query).await.status(); assert_eq!(status, StatusCode::OK); } - let peers = test_env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), test_env.tracker.get_maybe_external_ip().unwrap()); + assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -847,14 +844,14 @@ mod for_all_config_modes { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let test_env = running_test_environment(configuration::ephemeral_with_reverse_proxy()).await; + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); { - let client = Client::new(*test_env.bind_address()); + let client = Client::new(*env.bind_address()); let status = client .announce_with_header( &announce_query, @@ -867,12 +864,12 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = test_env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); - test_env.stop().await; + env.stop().await; } } @@ -901,56 +898,54 @@ mod for_all_config_modes { assert_scrape_response, }; use crate::servers::http::client::Client; - use crate::servers::http::requests; use crate::servers::http::requests::scrape::QueryBuilder; use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::{requests, Started}; //#[tokio::test] #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; - let response = Client::new(*test_env.bind_address()).get("scrape").await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let response = Client::new(*env.bind_address()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_cannot_parse_query_params_error_response(response, "").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -971,26 +966,25 @@ mod for_all_config_modes { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1011,16 +1005,16 @@ mod for_all_config_modes { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1030,17 +1024,17 @@ mod for_all_config_modes { assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -1056,16 +1050,16 @@ mod for_all_config_modes { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_mode_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1073,13 +1067,13 @@ mod for_all_config_modes { ) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -1091,11 +1085,11 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment(configuration::ephemeral_ipv6()).await; + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1103,13 +1097,13 @@ mod for_all_config_modes { ) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } } } @@ -1125,42 +1119,41 @@ mod configured_as_whitelisted { use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::Started; #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral_mode_whitelisted()).await; + let env = Started::new(&configuration::ephemeral_mode_whitelisted().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_torrent_not_in_whitelist_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment(configuration::ephemeral_mode_whitelisted()).await; + let env = Started::new(&configuration::ephemeral_mode_whitelisted().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .tracker + env.tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_is_announce_response(response).await; - test_env.stop().await; + env.stop().await; } } @@ -1174,27 +1167,25 @@ mod configured_as_whitelisted { use crate::servers::http::asserts::assert_scrape_response; use crate::servers::http::client::Client; - use crate::servers::http::requests; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; - use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::{requests, Started}; #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral_mode_whitelisted()).await; + let env = Started::new(&configuration::ephemeral_mode_whitelisted().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1206,32 +1197,30 @@ mod configured_as_whitelisted { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral_mode_whitelisted()).await; + let env = Started::new(&configuration::ephemeral_mode_whitelisted().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - test_env - .tracker + env.tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1252,7 +1241,7 @@ mod configured_as_whitelisted { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } } } @@ -1270,45 +1259,45 @@ mod configured_as_private { use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::Started; #[tokio::test] async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_mode_private().into()).await; - let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) + let response = Client::authenticated(*env.bind_address(), expiring_key.key()) .announce(&QueryBuilder::default().query()) .await; assert_is_announce_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_mode_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_authentication_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_mode_private().into()).await; let invalid_key = "INVALID_KEY"; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .get(&format!( "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) @@ -1319,18 +1308,18 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_mode_private().into()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(*test_env.bind_address(), unregistered_key) + let response = Client::authenticated(*env.bind_address(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; assert_authentication_error_response(response).await; - test_env.stop().await; + env.stop().await; } } @@ -1347,17 +1336,16 @@ mod configured_as_private { use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; use crate::servers::http::client::Client; - use crate::servers::http::requests; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; - use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::{requests, Started}; #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_mode_private().into()).await; let invalid_key = "INVALID_KEY"; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .get(&format!( "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" )) @@ -1368,21 +1356,20 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_mode_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1394,28 +1381,27 @@ mod configured_as_private { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_mode_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) + let response = Client::authenticated(*env.bind_address(), expiring_key.key()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1436,7 +1422,7 @@ mod configured_as_private { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -1444,23 +1430,22 @@ mod configured_as_private { // There is not authentication error // code-review: should this really be this way? - let test_env = running_test_environment(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_mode_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(*test_env.bind_address(), false_key) + let response = Client::authenticated(*env.bind_address(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1472,7 +1457,7 @@ mod configured_as_private { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } } } diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index b16a47cd3..9ac585190 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -11,7 +11,7 @@ use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_error_response; -use crate::servers::udp::test_environment::running_test_environment; +use crate::servers::udp::Started; fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { [0; MAX_PACKET_SIZE] @@ -36,9 +36,9 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; + let client = new_udp_client_connected(&env.bind_address().to_string()).await; client.send(&empty_udp_request()).await; @@ -55,13 +55,13 @@ mod receiving_a_connection_request { use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_connect_response; - use crate::servers::udp::test_environment::running_test_environment; + use crate::servers::udp::Started; #[tokio::test] async fn should_return_a_connect_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + let client = new_udp_tracker_client_connected(&env.bind_address().to_string()).await; let connect_request = ConnectRequest { transaction_id: TransactionId(123), @@ -87,13 +87,13 @@ mod receiving_an_announce_request { use crate::servers::udp::asserts::is_ipv4_announce_response; use crate::servers::udp::contract::send_connection_request; - use crate::servers::udp::test_environment::running_test_environment; + use crate::servers::udp::Started; #[tokio::test] async fn should_return_an_announce_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + let client = new_udp_tracker_client_connected(&env.bind_address().to_string()).await; let connection_id = send_connection_request(TransactionId(123), &client).await; @@ -129,13 +129,13 @@ mod receiving_an_scrape_request { use crate::servers::udp::asserts::is_scrape_response; use crate::servers::udp::contract::send_connection_request; - use crate::servers::udp::test_environment::running_test_environment; + use crate::servers::udp::Started; #[tokio::test] async fn should_return_a_scrape_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + let client = new_udp_tracker_client_connected(&env.bind_address().to_string()).await; let connection_id = send_connection_request(TransactionId(123), &client).await; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs new file mode 100644 index 000000000..26a47987e --- /dev/null +++ b/tests/servers/udp/environment.rs @@ -0,0 +1,78 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::core::peer::Peer; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker::servers::udp::server::{Launcher, Running, Stopped, UdpServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_configuration::{Configuration, UdpTracker}; + +pub struct Environment { + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: UdpServer, +} + +impl Environment { + /// Add a torrent to the tracker + #[allow(dead_code)] + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl Environment { + #[allow(dead_code)] + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let config = Arc::new(configuration.udp_trackers[0].clone()); + + let bind_to = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + + let server = UdpServer::new(Launcher::new(bind_to)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + Environment::::new(configuration).start().await + } + + #[allow(dead_code)] + pub async fn stop(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + server: self.server.stop().await.unwrap(), + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.server.state.binding + } +} diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs index 4759350dc..b13b82240 100644 --- a/tests/servers/udp/mod.rs +++ b/tests/servers/udp/mod.rs @@ -1,3 +1,7 @@ +use torrust_tracker::servers::udp::server; + pub mod asserts; pub mod contract; -pub mod test_environment; +pub mod environment; + +pub type Started = environment::Environment; diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs deleted file mode 100644 index f272b6dd3..000000000 --- a/tests/servers/udp/test_environment.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker::core::peer::Peer; -use torrust_tracker::core::Tracker; -use torrust_tracker::servers::registar::Registar; -use torrust_tracker::servers::udp::server::{Launcher, RunningUdpServer, StoppedUdpServer, UdpServer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - -use crate::common::app::setup_with_configuration; - -#[allow(clippy::module_name_repetitions, dead_code)] -pub type StoppedTestEnvironment = TestEnvironment; -#[allow(clippy::module_name_repetitions)] -pub type RunningTestEnvironment = TestEnvironment; - -pub struct TestEnvironment { - pub cfg: Arc, - pub tracker: Arc, - pub state: S, -} - -#[allow(dead_code)] -pub struct Stopped { - udp_server: StoppedUdpServer, -} - -pub struct Running { - udp_server: RunningUdpServer, -} - -impl TestEnvironment { - /// Add a torrent to the tracker - #[allow(dead_code)] - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} - -impl TestEnvironment { - #[allow(dead_code)] - pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { - let cfg = Arc::new(cfg); - - let tracker = setup_with_configuration(&cfg); - - let udp_cfg = cfg.udp_trackers[0].clone(); - - let bind_to = udp_cfg - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - - let udp_server = udp_server(Launcher::new(bind_to)); - - Self { - cfg, - tracker, - state: Stopped { udp_server }, - } - } - - #[allow(dead_code)] - pub async fn start(self) -> TestEnvironment { - let register = &Registar::default(); - - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker.clone(), - state: Running { - udp_server: self.state.udp_server.start(self.tracker, register.give_form()).await.unwrap(), - }, - } - } -} - -impl TestEnvironment { - pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { - StoppedTestEnvironment::new_stopped(cfg).start().await - } - - #[allow(dead_code)] - pub async fn stop(self) -> TestEnvironment { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker, - state: Stopped { - udp_server: self.state.udp_server.stop().await.unwrap(), - }, - } - } - - pub fn bind_address(&self) -> SocketAddr { - self.state.udp_server.state.binding - } -} - -#[allow(clippy::module_name_repetitions, dead_code)] -pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { - TestEnvironment::new_stopped(cfg) -} - -#[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { - TestEnvironment::new_running(cfg).await -} - -pub fn udp_server(launcher: Launcher) -> StoppedUdpServer { - UdpServer::new(launcher) -} From bbf1be6eb9466afaa7092d14aa90cf917afcd18c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 19 Jan 2024 12:54:37 +0000 Subject: [PATCH 0689/1003] fix: don't start HTTP tracker if it's disabled This fixes this error: ``` Loading default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... 2024-01-19T12:43:24.605765751+00:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. 2024-01-19T12:43:24.606305647+00:00 [torrust_tracker::bootstrap::jobs::http_tracker][INFO] Note: Not loading Http Tracker Service, Not Enabled in Configuration. 2024-01-19T12:43:24.606314967+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled thread 'tokio-runtime-worker' panicked at src/servers/registar.rs:84:32: it should receive the listing: RecvError(()) ``` --- src/app.rs | 4 ++++ src/servers/apis/server.rs | 12 +++++++++--- src/servers/registar.rs | 10 ++++++++-- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/src/app.rs b/src/app.rs index 3ec9806d3..8bdc281a6 100644 --- a/src/app.rs +++ b/src/app.rs @@ -76,6 +76,10 @@ pub async fn start(config: &Configuration, tracker: Arc) -> Vec { let launcher = self.state.launcher; let task = tokio::spawn(async move { - launcher.start(tracker, access_tokens, tx_start, rx_halt).await; + debug!(target: "API", "Starting with launcher in spawned task ..."); + + let _task = launcher.start(tracker, access_tokens, tx_start, rx_halt).await; + + debug!(target: "API", "Started with launcher in spawned task"); + launcher }); @@ -266,9 +271,10 @@ mod tests { #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_mode_public()); - let tracker = initialize_with_configuration(&cfg); let config = &cfg.http_api; + let tracker = initialize_with_configuration(&cfg); + let bind_to = config .bind_address .parse::() diff --git a/src/servers/registar.rs b/src/servers/registar.rs index 0fb8d6acc..9c23573c4 100644 --- a/src/servers/registar.rs +++ b/src/servers/registar.rs @@ -5,6 +5,7 @@ use std::net::SocketAddr; use std::sync::Arc; use derive_more::Constructor; +use log::debug; use tokio::sync::Mutex; use tokio::task::JoinHandle; @@ -81,10 +82,15 @@ impl Registar { /// Inserts a listing into the registry. async fn insert(&self, rx: tokio::sync::oneshot::Receiver) { - let listing = rx.await.expect("it should receive the listing"); + debug!("Waiting for the started service to send registration data ..."); + + let service_registration = rx + .await + .expect("it should receive the service registration from the started service"); let mut mutex = self.registry.lock().await; - mutex.insert(listing.binding, listing); + + mutex.insert(service_registration.binding, service_registration); } /// Returns the [`ServiceRegistry`] of services From 17296cdcb85bde4cad468dc662fde2fd8d70961b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 19 Jan 2024 15:18:55 +0000 Subject: [PATCH 0690/1003] fix: [#626] healt check api server shutdown This fixes: - The error: "Failed to install stop signal: channel closed" - And CRTL+C to shutdown the service --- src/bootstrap/jobs/health_check_api.rs | 20 +++++++++++++------ src/servers/health_check_api/server.rs | 5 ++++- tests/servers/health_check_api/environment.rs | 10 ++++++++++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 7eeafe97b..e57d1c151 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -42,24 +42,32 @@ pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> Jo let (tx_start, rx_start) = oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); - drop(tx_halt); + + let protocol = "http"; // Run the API server let join_handle = tokio::spawn(async move { - info!(target: "Health Check API", "Starting on: http://{}", bind_addr); + info!(target: "Health Check API", "Starting on: {protocol}://{}", bind_addr); let handle = server::start(bind_addr, tx_start, rx_halt, register); if let Ok(()) = handle.await { - info!(target: "Health Check API", "Stopped server running on: http://{}", bind_addr); + info!(target: "Health Check API", "Stopped server running on: {protocol}://{}", bind_addr); } }); - // Wait until the API server job is running + // Wait until the server sends the started message match rx_start.await { - Ok(msg) => info!(target: "Health Check API", "Started on: http://{}", msg.address), + Ok(msg) => info!(target: "Health Check API", "Started on: {protocol}://{}", msg.address), Err(e) => panic!("the Health Check API server was dropped: {e}"), } - join_handle + // Wait until the server finishes + tokio::spawn(async move { + assert!(!tx_halt.is_closed(), "Halt channel for Health Check API should be open"); + + join_handle + .await + .expect("it should be able to join to the Health Check API server task"); + }) } diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index ecc6fe427..8ba20691f 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -8,6 +8,7 @@ use axum::routing::get; use axum::{Json, Router}; use axum_server::Handle; use futures::Future; +use log::debug; use serde_json::json; use tokio::sync::oneshot::{Receiver, Sender}; @@ -37,10 +38,12 @@ pub fn start( let handle = Handle::new(); + debug!(target: "Health Check API", "Starting service with graceful shutdown in a spawned task ..."); + tokio::task::spawn(graceful_shutdown( handle.clone(), rx_halt, - format!("shutting down http server on socket address: {address}"), + format!("Shutting down http server on socket address: {address}"), )); let running = axum_server::from_tcp(socket) diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index 9aa3ab16d..c98784282 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -1,6 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; +use log::debug; use tokio::sync::oneshot::{self, Sender}; use tokio::task::JoinHandle; use torrust_tracker::bootstrap::jobs::Started; @@ -50,13 +51,22 @@ impl Environment { let register = self.registar.entries(); + debug!(target: "Health Check API", "Spawning task to launch the service ..."); + let server = tokio::spawn(async move { + debug!(target: "Health Check API", "Starting the server in a spawned task ..."); + server::start(self.state.bind_to, tx_start, rx_halt, register) .await .expect("it should start the health check service"); + + debug!(target: "Health Check API", "Server started. Sending the binding {} ...", self.state.bind_to); + self.state.bind_to }); + debug!(target: "Health Check API", "Waiting for spawning task to send the binding ..."); + let binding = rx_start.await.expect("it should send service binding").address; Environment { From f0710d3554d67952611af2a5670a542f4f06a176 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 17 Jan 2024 15:35:33 +0000 Subject: [PATCH 0691/1003] feat: [#625] a new UDP tracker client You can use it with: ```console cargo run --bin udp_tracker_client 144.126.245.19:6969 9c38422213e30bff212b30c360d26f9a02136422 ``` and the output should be something like: ``` AnnounceIpv4( AnnounceResponse { transaction_id: TransactionId( -888840697, ), announce_interval: AnnounceInterval( 300, ), leechers: NumberOfPeers( 0, ), seeders: NumberOfPeers( 4, ), peers: [ ResponsePeer { ip_address: xx.yy.zz.254, port: Port( 51516, ), }, ResponsePeer { ip_address: xx.yy.zz.20, port: Port( 59448, ), }, ResponsePeer { ip_address: xx.yy.zz.224, port: Port( 58587, ), }, ], }, ) ``` --- src/bin/udp_tracker_client.rs | 154 +++++++++++++++++++ src/shared/bit_torrent/tracker/udp/client.rs | 32 +++- tests/servers/udp/contract.rs | 2 + 3 files changed, 185 insertions(+), 3 deletions(-) create mode 100644 src/bin/udp_tracker_client.rs diff --git a/src/bin/udp_tracker_client.rs b/src/bin/udp_tracker_client.rs new file mode 100644 index 000000000..41084127c --- /dev/null +++ b/src/bin/udp_tracker_client.rs @@ -0,0 +1,154 @@ +use std::env; +use std::net::{Ipv4Addr, SocketAddr}; +use std::str::FromStr; + +use aquatic_udp_protocol::common::InfoHash; +use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, + TransactionId, +}; +use log::{debug, LevelFilter}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; +use torrust_tracker::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; + +const ASSIGNED_BY_OS: i32 = 0; +const RANDOM_TRANSACTION_ID: i32 = -888_840_697; + +#[tokio::main] +async fn main() { + setup_logging(LevelFilter::Info); + + let (remote_socket_addr, info_hash) = parse_arguments(); + + // Configuration + let local_port = ASSIGNED_BY_OS; + let transaction_id = RANDOM_TRANSACTION_ID; + let bind_to = format!("0.0.0.0:{local_port}"); + + // Bind to local port + + debug!("Binding to: {bind_to}"); + let udp_client = UdpClient::bind(&bind_to).await; + let bound_to = udp_client.socket.local_addr().unwrap(); + debug!("Bound to: {bound_to}"); + + // Connect to remote socket + + debug!("Connecting to remote: udp://{remote_socket_addr}"); + udp_client.connect(&remote_socket_addr).await; + + let udp_tracker_client = UdpTrackerClient { udp_client }; + + let transaction_id = TransactionId(transaction_id); + + let connection_id = send_connection_request(transaction_id, &udp_tracker_client).await; + + let response = send_announce_request( + connection_id, + transaction_id, + info_hash, + Port(bound_to.port()), + &udp_tracker_client, + ) + .await; + + println!("{response:#?}"); +} + +fn setup_logging(level: LevelFilter) { + if let Err(_err) = fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "{} [{}][{}] {}", + chrono::Local::now().format("%+"), + record.target(), + record.level(), + message + )); + }) + .level(level) + .chain(std::io::stdout()) + .apply() + { + panic!("Failed to initialize logging.") + } + + debug!("logging initialized."); +} + +fn parse_arguments() -> (String, TorrustInfoHash) { + let args: Vec = env::args().collect(); + + if args.len() != 3 { + eprintln!("Error: invalid number of arguments!"); + eprintln!("Usage: cargo run --bin udp_tracker_client "); + eprintln!("Example: cargo run --bin udp_tracker_client 144.126.245.19:6969 9c38422213e30bff212b30c360d26f9a02136422"); + std::process::exit(1); + } + + let remote_socket_addr = &args[1]; + let _valid_socket_addr = remote_socket_addr.parse::().unwrap_or_else(|_| { + panic!( + "Invalid argument: `{}`. Argument 1 should be a valid socket address. For example: `144.126.245.19:6969`.", + args[1] + ) + }); + let info_hash = TorrustInfoHash::from_str(&args[2]).unwrap_or_else(|_| { + panic!( + "Invalid argument: `{}`. Argument 2 should be a valid infohash. For example: `9c38422213e30bff212b30c360d26f9a02136422`.", + args[2] + ) + }); + + (remote_socket_addr.to_string(), info_hash) +} + +async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { + debug!("Sending connection request with transaction id: {transaction_id:#?}"); + + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + debug!("connection request response:\n{response:#?}"); + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server. Unexpected response"), + } +} + +async fn send_announce_request( + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hash: TorrustInfoHash, + port: Port, + client: &UdpTrackerClient, +) -> Response { + debug!("Sending announce request with transaction id: {transaction_id:#?}"); + + let announce_request = AnnounceRequest { + connection_id, + transaction_id, + info_hash: InfoHash(info_hash.bytes()), + peer_id: PeerId(*b"-qB00000000000000001"), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port, + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + debug!("announce request response:\n{response:#?}"); + + response +} diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 00f0b8acf..959001e82 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; +use log::debug; use tokio::net::UdpSocket; use tokio::time; @@ -19,7 +20,12 @@ impl UdpClient { /// /// Will panic if the local address can't be bound. pub async fn bind(local_address: &str) -> Self { - let socket = UdpSocket::bind(local_address).await.unwrap(); + let valid_socket_addr = local_address + .parse::() + .unwrap_or_else(|_| panic!("{local_address} is not a valid socket address")); + + let socket = UdpSocket::bind(valid_socket_addr).await.unwrap(); + Self { socket: Arc::new(socket), } @@ -29,7 +35,14 @@ impl UdpClient { /// /// Will panic if can't connect to the socket. pub async fn connect(&self, remote_address: &str) { - self.socket.connect(remote_address).await.unwrap(); + let valid_socket_addr = remote_address + .parse::() + .unwrap_or_else(|_| panic!("{remote_address} is not a valid socket address")); + + match self.socket.connect(valid_socket_addr).await { + Ok(()) => debug!("Connected successfully"), + Err(e) => panic!("Failed to connect: {e:?}"), + } } /// # Panics @@ -39,6 +52,8 @@ impl UdpClient { /// - Can't write to the socket. /// - Can't send data. pub async fn send(&self, bytes: &[u8]) -> usize { + debug!(target: "UDP client", "send {bytes:?}"); + self.socket.writable().await.unwrap(); self.socket.send(bytes).await.unwrap() } @@ -50,8 +65,15 @@ impl UdpClient { /// - Can't read from the socket. /// - Can't receive data. pub async fn receive(&self, bytes: &mut [u8]) -> usize { + debug!(target: "UDP client", "receiving ..."); + self.socket.readable().await.unwrap(); - self.socket.recv(bytes).await.unwrap() + + let size = self.socket.recv(bytes).await.unwrap(); + + debug!(target: "UDP client", "{size} bytes received {bytes:?}"); + + size } } @@ -73,6 +95,8 @@ impl UdpTrackerClient { /// /// Will panic if can't write request to bytes. pub async fn send(&self, request: Request) -> usize { + debug!(target: "UDP tracker client", "send request {request:?}"); + // Write request into a buffer let request_buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(request_buffer); @@ -99,6 +123,8 @@ impl UdpTrackerClient { let payload_size = self.udp_client.receive(&mut response_buffer).await; + debug!(target: "UDP tracker client", "received {payload_size} bytes. Response {response_buffer:?}"); + Response::from_bytes(&response_buffer[..payload_size], true).unwrap() } } diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 9ac585190..0eea650b8 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -118,6 +118,8 @@ mod receiving_an_announce_request { let response = client.receive().await; + println!("test response {response:?}"); + assert!(is_ipv4_announce_response(&response)); } } From 4c416e0b25272e1c67770e689c808231a3220d5b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 19 Jan 2024 15:45:01 +0000 Subject: [PATCH 0692/1003] fix: remove coverage report generation from testing workflow The coverage report is also generated in the coverage workflow. And it takes long. --- .github/workflows/testing.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index d9d0c60c9..02dbb1804 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -110,7 +110,3 @@ jobs: - id: test name: Run Unit Tests run: cargo test --tests --benches --examples --workspace --all-targets --all-features - - - id: coverage - name: Generate Coverage Report - run: cargo llvm-cov nextest --tests --benches --examples --workspace --all-targets --all-features From 1b7e5b9e2cdd2085885ac07d4b10034469e94615 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 19 Jan 2024 16:39:46 +0000 Subject: [PATCH 0693/1003] feat: enable all services in dev default config - Using wildcard IPs for UDP and HTTP tracker. - Revert port 0 (unintentionally changed). Use predefined ports. --- share/default/config/tracker.development.sqlite3.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index e26aa6c6c..9304a2d51 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -13,18 +13,18 @@ remove_peerless_torrents = true tracker_usage_statistics = true [[udp_trackers]] -bind_address = "0.0.0.0:0" +bind_address = "0.0.0.0:6969" enabled = true [[http_trackers]] -bind_address = "0.0.0.0:0" +bind_address = "0.0.0.0:7070" enabled = true ssl_cert_path = "" ssl_enabled = false ssl_key_path = "" [http_api] -bind_address = "127.0.0.1:0" +bind_address = "127.0.0.1:1212" enabled = true ssl_cert_path = "" ssl_enabled = false From b2ef4e0d8c39d95f6221317a46ab14fd98248bbb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Jan 2024 10:55:14 +0000 Subject: [PATCH 0694/1003] feat: tracker checker command It runs some checks against a running tracker. --- Cargo.lock | 12 ++ Cargo.toml | 2 + share/default/config/tracker_checker.json | 11 ++ src/bin/tracker_checker.rs | 11 ++ src/checker/app.rs | 53 ++++++++ src/checker/config.rs | 152 ++++++++++++++++++++++ src/checker/console.rs | 38 ++++++ src/checker/logger.rs | 72 ++++++++++ src/checker/mod.rs | 6 + src/checker/printer.rs | 9 ++ src/checker/service.rs | 84 ++++++++++++ src/lib.rs | 1 + 12 files changed, 451 insertions(+) create mode 100644 share/default/config/tracker_checker.json create mode 100644 src/bin/tracker_checker.rs create mode 100644 src/checker/app.rs create mode 100644 src/checker/config.rs create mode 100644 src/checker/console.rs create mode 100644 src/checker/logger.rs create mode 100644 src/checker/mod.rs create mode 100644 src/checker/printer.rs create mode 100644 src/checker/service.rs diff --git a/Cargo.lock b/Cargo.lock index de630b497..1f49aa986 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -627,6 +627,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "colored" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +dependencies = [ + "lazy_static", + "windows-sys 0.48.0", +] + [[package]] name = "config" version = "0.13.4" @@ -3418,6 +3428,7 @@ dependencies = [ "axum-server", "binascii", "chrono", + "colored", "config", "criterion", "derive_more", @@ -3454,6 +3465,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tower-http", + "url", "uuid", ] diff --git a/Cargo.toml b/Cargo.toml index 671d66e98..9b7e71905 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,6 +68,8 @@ torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "pa torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } tower-http = { version = "0", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } +colored = "2.1.0" +url = "2.5.0" [dev-dependencies] criterion = { version = "0.5.1", features = ["async_tokio"] } diff --git a/share/default/config/tracker_checker.json b/share/default/config/tracker_checker.json new file mode 100644 index 000000000..7d1453bfd --- /dev/null +++ b/share/default/config/tracker_checker.json @@ -0,0 +1,11 @@ +{ + "udp_trackers": [ + "127.0.0.1:6969" + ], + "http_trackers": [ + "http://127.0.0.1:7070" + ], + "health_checks": [ + "http://127.0.0.1:1313/health_check" + ] +} \ No newline at end of file diff --git a/src/bin/tracker_checker.rs b/src/bin/tracker_checker.rs new file mode 100644 index 000000000..3a0e0ee88 --- /dev/null +++ b/src/bin/tracker_checker.rs @@ -0,0 +1,11 @@ +//! Program to run checks against running trackers. +//! +//! ```text +//! cargo run --bin tracker_checker "./share/default/config/tracker_checker.json" +//! ``` +use torrust_tracker::checker::app; + +#[tokio::main] +async fn main() { + app::run().await; +} diff --git a/src/checker/app.rs b/src/checker/app.rs new file mode 100644 index 000000000..e92373493 --- /dev/null +++ b/src/checker/app.rs @@ -0,0 +1,53 @@ +use std::sync::Arc; + +use super::config::Configuration; +use super::console::Console; +use crate::checker::config::parse_from_json; +use crate::checker::service::Service; + +pub const NUMBER_OF_ARGUMENTS: usize = 2; + +/// # Panics +/// +/// Will panic if: +/// +/// - It can't read the json configuration file. +/// - The configuration file is invalid. +pub async fn run() { + let args = parse_arguments(); + let config = setup_config(&args); + let console_printer = Console {}; + let service = Service { + config: Arc::new(config), + console: console_printer, + }; + + service.run_checks().await; +} + +pub struct Arguments { + pub config_path: String, +} + +fn parse_arguments() -> Arguments { + let args: Vec = std::env::args().collect(); + + if args.len() < NUMBER_OF_ARGUMENTS { + eprintln!("Usage: cargo run --bin tracker_checker "); + eprintln!("For example: cargo run --bin tracker_checker ./share/default/config/tracker_checker.json"); + std::process::exit(1); + } + + let config_path = &args[1]; + + Arguments { + config_path: config_path.to_string(), + } +} + +fn setup_config(args: &Arguments) -> Configuration { + let file_content = std::fs::read_to_string(args.config_path.clone()) + .unwrap_or_else(|_| panic!("Can't read config file {}", args.config_path)); + + parse_from_json(&file_content).expect("Invalid config format") +} diff --git a/src/checker/config.rs b/src/checker/config.rs new file mode 100644 index 000000000..aaf611bb9 --- /dev/null +++ b/src/checker/config.rs @@ -0,0 +1,152 @@ +use std::fmt; +use std::net::SocketAddr; + +use reqwest::Url as ServiceUrl; +use serde::Deserialize; +use url; + +/// It parses the configuration from a JSON format. +/// +/// # Errors +/// +/// Will return an error if the configuration is not valid. +/// +/// # Panics +/// +/// Will panic if unable to read the configuration file. +pub fn parse_from_json(json: &str) -> Result { + let plain_config: PlainConfiguration = serde_json::from_str(json).map_err(ConfigurationError::JsonParseError)?; + Configuration::try_from(plain_config) +} + +/// DTO for the configuration to serialize/deserialize configuration. +/// +/// Configuration does not need to be valid. +#[derive(Deserialize)] +struct PlainConfiguration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +/// Validated configuration +pub struct Configuration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +#[derive(Debug)] +pub enum ConfigurationError { + JsonParseError(serde_json::Error), + InvalidUdpAddress(std::net::AddrParseError), + InvalidUrl(url::ParseError), +} + +impl fmt::Display for ConfigurationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConfigurationError::JsonParseError(e) => write!(f, "JSON parse error: {e}"), + ConfigurationError::InvalidUdpAddress(e) => write!(f, "Invalid UDP address: {e}"), + ConfigurationError::InvalidUrl(e) => write!(f, "Invalid URL: {e}"), + } + } +} + +impl TryFrom for Configuration { + type Error = ConfigurationError; + + fn try_from(plain_config: PlainConfiguration) -> Result { + let udp_trackers = plain_config + .udp_trackers + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUdpAddress)) + .collect::, _>>()?; + + let http_trackers = plain_config + .http_trackers + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + let health_checks = plain_config + .health_checks + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + Ok(Configuration { + udp_trackers, + http_trackers, + health_checks, + }) + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use super::*; + + #[test] + fn configuration_should_be_build_from_plain_serializable_configuration() { + let dto = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:8080".to_string()], + http_trackers: vec!["http://127.0.0.1:8080".to_string()], + health_checks: vec!["http://127.0.0.1:8080/health".to_string()], + }; + + let config = Configuration::try_from(dto).expect("A valid configuration"); + + assert_eq!( + config.udp_trackers, + vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080)] + ); + assert_eq!( + config.http_trackers, + vec![ServiceUrl::parse("http://127.0.0.1:8080").unwrap()] + ); + assert_eq!( + config.health_checks, + vec![ServiceUrl::parse("http://127.0.0.1:8080/health").unwrap()] + ); + } + + mod building_configuration_from_plan_configuration { + use crate::checker::config::{Configuration, PlainConfiguration}; + + #[test] + fn it_should_fail_when_a_tracker_udp_address_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["invalid_address".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_fail_when_a_tracker_http_address_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["not_a_url".to_string()], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_fail_when_a_health_check_http_address_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec![], + health_checks: vec!["not_a_url".to_string()], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + } +} diff --git a/src/checker/console.rs b/src/checker/console.rs new file mode 100644 index 000000000..b55c559fc --- /dev/null +++ b/src/checker/console.rs @@ -0,0 +1,38 @@ +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Console {} + +impl Default for Console { + fn default() -> Self { + Self::new() + } +} + +impl Console { + #[must_use] + pub fn new() -> Self { + Self {} + } +} + +impl Printer for Console { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + print!("{}", &output); + } + + fn eprint(&self, output: &str) { + eprint!("{}", &output); + } + + fn println(&self, output: &str) { + println!("{}", &output); + } + + fn eprintln(&self, output: &str) { + eprintln!("{}", &output); + } +} diff --git a/src/checker/logger.rs b/src/checker/logger.rs new file mode 100644 index 000000000..3d1074e7b --- /dev/null +++ b/src/checker/logger.rs @@ -0,0 +1,72 @@ +use std::cell::RefCell; + +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Logger { + output: RefCell, +} + +impl Default for Logger { + fn default() -> Self { + Self::new() + } +} + +impl Logger { + #[must_use] + pub fn new() -> Self { + Self { + output: RefCell::new(String::new()), + } + } + + pub fn log(&self) -> String { + self.output.borrow().clone() + } +} + +impl Printer for Logger { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn eprint(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn println(&self, output: &str) { + self.print(&format!("{}/n", &output)); + } + + fn eprintln(&self, output: &str) { + self.eprint(&format!("{}/n", &output)); + } +} + +#[cfg(test)] +mod tests { + use crate::checker::logger::Logger; + use crate::checker::printer::{Printer, CLEAR_SCREEN}; + + #[test] + fn should_capture_the_clear_screen_command() { + let console_logger = Logger::new(); + + console_logger.clear(); + + assert_eq!(CLEAR_SCREEN, console_logger.log()); + } + + #[test] + fn should_capture_the_print_command_output() { + let console_logger = Logger::new(); + + console_logger.print("OUTPUT"); + + assert_eq!("OUTPUT", console_logger.log()); + } +} diff --git a/src/checker/mod.rs b/src/checker/mod.rs new file mode 100644 index 000000000..6a55141d5 --- /dev/null +++ b/src/checker/mod.rs @@ -0,0 +1,6 @@ +pub mod app; +pub mod config; +pub mod console; +pub mod logger; +pub mod printer; +pub mod service; diff --git a/src/checker/printer.rs b/src/checker/printer.rs new file mode 100644 index 000000000..d590dfedb --- /dev/null +++ b/src/checker/printer.rs @@ -0,0 +1,9 @@ +pub const CLEAR_SCREEN: &str = "\x1B[2J\x1B[1;1H"; + +pub trait Printer { + fn clear(&self); + fn print(&self, output: &str); + fn eprint(&self, output: &str); + fn println(&self, output: &str); + fn eprintln(&self, output: &str); +} diff --git a/src/checker/service.rs b/src/checker/service.rs new file mode 100644 index 000000000..92902debd --- /dev/null +++ b/src/checker/service.rs @@ -0,0 +1,84 @@ +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use colored::Colorize; +use reqwest::{Client, Url}; + +use super::config::Configuration; +use super::console::Console; +use crate::checker::printer::Printer; + +pub struct Service { + pub(crate) config: Arc, + pub(crate) console: Console, +} + +impl Service { + pub async fn run_checks(&self) { + self.console.println("Running checks for trackers ..."); + self.check_udp_trackers(); + self.check_http_trackers(); + self.run_health_checks().await; + } + + fn check_udp_trackers(&self) { + self.console.println("UDP trackers ..."); + + for udp_tracker in &self.config.udp_trackers { + self.check_udp_tracker(udp_tracker); + } + } + + fn check_http_trackers(&self) { + self.console.println("HTTP trackers ..."); + + for http_tracker in &self.config.http_trackers { + self.check_http_tracker(http_tracker); + } + } + + async fn run_health_checks(&self) { + self.console.println("Health checks ..."); + + for health_check_url in &self.config.health_checks { + self.run_health_check(health_check_url.clone()).await; + } + } + + fn check_udp_tracker(&self, address: &SocketAddr) { + // todo: + // - Make announce request + // - Make scrape request + self.console + .println(&format!("{} - UDP tracker at {:?} is OK (TODO)", "✓".green(), address)); + } + + fn check_http_tracker(&self, url: &Url) { + // todo: + // - Make announce request + // - Make scrape request + self.console + .println(&format!("{} - HTTP tracker at {} is OK (TODO)", "✓".green(), url)); + } + + async fn run_health_check(&self, url: Url) { + let client = Client::builder().timeout(Duration::from_secs(5)).build().unwrap(); + + match client.get(url.clone()).send().await { + Ok(response) => { + if response.status().is_success() { + self.console + .println(&format!("{} - Health API at {} is OK", "✓".green(), url)); + } else { + self.console + .eprintln(&format!("{} - Health API at {} failing: {:?}", "✗".red(), url, response)); + } + } + Err(err) => { + self.console + .eprintln(&format!("{} - Health API at {} failing: {:?}", "✗".red(), url, err)); + } + } + } +} diff --git a/src/lib.rs b/src/lib.rs index c5f775646..7b5d453a4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -471,6 +471,7 @@ //! examples on the integration and unit tests. pub mod app; pub mod bootstrap; +pub mod checker; pub mod core; pub mod servers; pub mod shared; From 72c8348559f45f936266823d576f8d8798b32d66 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 24 Jan 2024 18:17:10 +0800 Subject: [PATCH 0695/1003] udp: handle udp requests concurrently --- .github/workflows/coverage.yaml | 3 + .vscode/settings.json | 6 + Cargo.lock | 10 + Cargo.toml | 1 + cSpell.json | 2 + src/servers/udp/handlers.rs | 16 +- src/servers/udp/mod.rs | 9 + src/servers/udp/server.rs | 215 ++++++++++++------- src/shared/bit_torrent/tracker/udp/client.rs | 2 + tests/servers/udp/contract.rs | 8 + tests/servers/udp/environment.rs | 20 +- 11 files changed, 207 insertions(+), 85 deletions(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 7f5bf2946..06529d53d 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -55,6 +55,9 @@ jobs: name: Run Build Checks run: cargo check --tests --benches --examples --workspace --all-targets --all-features + # Run Test Locally: + # RUSTFLAGS="-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" RUSTDOCFLAGS="-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" CARGO_INCREMENTAL="0" RUST_BACKTRACE=1 cargo test --tests --benches --examples --workspace --all-targets --all-features + - id: test name: Run Unit Tests run: cargo test --tests --benches --examples --workspace --all-targets --all-features diff --git a/.vscode/settings.json b/.vscode/settings.json index 038da4c18..701e89ccf 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,6 +2,12 @@ "[rust]": { "editor.formatOnSave": true }, + "[ignore]": { "rust-analyzer.cargo.extraEnv" : { + "RUSTFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", + "RUSTDOCFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", + "CARGO_INCREMENTAL": "0", + "RUST_BACKTRACE": "1" + }}, "rust-analyzer.checkOnSave": true, "rust-analyzer.check.command": "clippy", "rust-analyzer.check.allTargets": true, diff --git a/Cargo.lock b/Cargo.lock index 1f49aa986..63dfab1c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2621,6 +2621,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ringbuf" +version = "0.4.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b8f7d58e4f67752d63318605656be063e333154aa35b70126075e9d05552979" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "rkyv" version = "0.7.43" @@ -3448,6 +3457,7 @@ dependencies = [ "r2d2_sqlite", "rand", "reqwest", + "ringbuf", "serde", "serde_bencode", "serde_bytes", diff --git a/Cargo.toml b/Cargo.toml index 9b7e71905..6fd542c2f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,6 +56,7 @@ serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" serde_json = "1" +ringbuf = "0.4.0-rc.2" serde_with = "3" serde_repr = "0" tdyne-peer-id = "1" diff --git a/cSpell.json b/cSpell.json index e02c6ed87..acd46284c 100644 --- a/cSpell.json +++ b/cSpell.json @@ -94,8 +94,10 @@ "reannounce", "Registar", "repr", + "reqs", "reqwest", "rerequests", + "ringbuf", "rngs", "routable", "rusqlite", diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index b77cd3a42..65e3f5b20 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -11,6 +11,7 @@ use log::{debug, info}; use torrust_tracker_located_error::DynError; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; +use super::UdpRequest; use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::peer_builder; @@ -27,10 +28,13 @@ use crate::shared::bit_torrent::info_hash::InfoHash; /// type. /// /// It will return an `Error` response if the request is invalid. -pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { - match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { - message: format!("{e:?}"), - location: Location::caller(), +pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc) -> Response { + debug!("Handling Packets: {udp_request:?}"); + match Request::from_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| { + Error::InternalServer { + message: format!("{e:?}"), + location: Location::caller(), + } }) { Ok(request) => { let transaction_id = match &request { @@ -39,7 +43,7 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: & Request::Scrape(scrape_request) => scrape_request.transaction_id, }; - match handle_request(request, remote_addr, tracker).await { + match handle_request(request, udp_request.from, tracker).await { Ok(response) => response, Err(e) => handle_error(&e, transaction_id), } @@ -60,6 +64,8 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: & /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { + debug!("Handling Request: {request:?} to: {remote_addr:?}"); + match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 985c1cec7..3b22aeab5 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -638,6 +638,9 @@ //! documentation by [Arvid Norberg](https://github.com/arvidn) was very //! supportive in the development of this documentation. Some descriptions were //! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). + +use std::net::SocketAddr; + pub mod connection_cookie; pub mod error; pub mod handlers; @@ -652,3 +655,9 @@ pub type Port = u16; /// The transaction id. A random number generated byt the peer that is used to /// match requests and responses. pub type TransactionId = i64; + +#[derive(Clone, Debug)] +pub(crate) struct UdpRequest { + payload: Vec, + from: SocketAddr, +} diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 5a1977d01..0ab50d3bd 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -20,21 +20,24 @@ use std::io::Cursor; use std::net::SocketAddr; use std::sync::Arc; -use std::time::Duration; use aquatic_udp_protocol::Response; use derive_more::Constructor; -use futures::pin_mut; -use log::{debug, error, info}; +use log::{debug, error, info, trace}; +use ringbuf::storage::Static; +use ringbuf::traits::{Consumer, Observer, RingBuffer}; +use ringbuf::LocalRb; use tokio::net::UdpSocket; -use tokio::sync::oneshot::{Receiver, Sender}; -use tokio::task::JoinHandle; +use tokio::sync::oneshot; +use tokio::task::{AbortHandle, JoinHandle}; +use tokio::{select, task}; +use super::UdpRequest; use crate::bootstrap::jobs::Started; use crate::core::Tracker; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{shutdown_signal_with_message, Halted}; -use crate::servers::udp::handlers::handle_packet; +use crate::servers::udp::handlers; use crate::shared::bit_torrent::tracker::udp::client::check; use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; @@ -125,17 +128,8 @@ impl UdpServer { assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); - let launcher = self.state.launcher; - - let task = tokio::spawn(async move { - debug!(target: "UDP Tracker", "Launcher starting ..."); - - let starting = launcher.start(tracker, tx_start, rx_halt).await; - - starting.await.expect("UDP server should have started running"); - - launcher - }); + // May need to wrap in a task to about a tokio bug. + let task = self.state.launcher.start(tracker, tx_start, rx_halt); let binding = rx_start.await.expect("it should be able to start the service").address; @@ -150,6 +144,8 @@ impl UdpServer { }, }; + trace!("Running UDP Tracker on Socket: {}", running_udp_server.state.binding); + Ok(running_udp_server) } } @@ -182,7 +178,7 @@ impl UdpServer { } } -#[derive(Constructor, Debug)] +#[derive(Constructor, Copy, Clone, Debug)] pub struct Launcher { bind_to: SocketAddr, } @@ -193,8 +189,40 @@ impl Launcher { /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - pub async fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> JoinHandle<()> { - Udp::start_with_graceful_shutdown(tracker, self.bind_to, tx_start, rx_halt).await + pub fn start( + &self, + tracker: Arc, + tx_start: oneshot::Sender, + rx_halt: oneshot::Receiver, + ) -> JoinHandle { + let launcher = Launcher::new(self.bind_to); + tokio::spawn(async move { + Udp::run_with_graceful_shutdown(tracker, launcher.bind_to, tx_start, rx_halt).await; + launcher + }) + } +} + +#[derive(Default)] +struct ActiveRequests { + rb: LocalRb>, // the number of requests we handle at the same time. +} + +impl std::fmt::Debug for ActiveRequests { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let (left, right) = &self.rb.as_slices(); + let dbg = format!("capacity: {}, left: {left:?}, right: {right:?}", &self.rb.capacity()); + f.debug_struct("ActiveRequests").field("rb", &dbg).finish() + } +} + +impl Drop for ActiveRequests { + fn drop(&mut self) { + for h in self.rb.pop_iter() { + if !h.is_finished() { + h.abort(); + } + } } } @@ -209,80 +237,103 @@ impl Udp { /// /// It panics if unable to bind to udp socket, and get the address from the udp socket. /// It also panics if unable to send address of socket. - async fn start_with_graceful_shutdown( + async fn run_with_graceful_shutdown( tracker: Arc, bind_to: SocketAddr, - tx_start: Sender, - rx_halt: Receiver, - ) -> JoinHandle<()> { + tx_start: oneshot::Sender, + rx_halt: oneshot::Receiver, + ) { let socket = Arc::new(UdpSocket::bind(bind_to).await.expect("Could not bind to {self.socket}.")); let address = socket.local_addr().expect("Could not get local_addr from {binding}."); + let halt = shutdown_signal_with_message(rx_halt, format!("Halting Http Service Bound to Socket: {address}")); info!(target: "UDP Tracker", "Starting on: udp://{}", address); let running = tokio::task::spawn(async move { - let halt = tokio::task::spawn(async move { - debug!(target: "UDP Tracker", "Waiting for halt signal for socket address: udp://{address} ..."); - - shutdown_signal_with_message( - rx_halt, - format!("Shutting down UDP server on socket address: udp://{address}"), - ) - .await; - }); - - let listen = async move { - debug!(target: "UDP Tracker", "Waiting for packets on socket address: udp://{address} ..."); - - loop { - let mut data = [0; MAX_PACKET_SIZE]; - let socket_clone = socket.clone(); - - match socket_clone.recv_from(&mut data).await { - Ok((valid_bytes, remote_addr)) => { - let payload = data[..valid_bytes].to_vec(); - - debug!(target: "UDP Tracker", "Received {} bytes", payload.len()); - debug!(target: "UDP Tracker", "From: {}", &remote_addr); - debug!(target: "UDP Tracker", "Payload: {:?}", payload); - - let response_fut = handle_packet(remote_addr, payload, &tracker); - - match tokio::time::timeout(Duration::from_secs(5), response_fut).await { - Ok(response) => { - Udp::send_response(socket_clone, remote_addr, response).await; - } - Err(_) => { - error!("Timeout occurred while processing the UDP request."); - } - } - } - Err(err) => { - error!("Error reading UDP datagram from socket. Error: {:?}", err); - } + debug!(target: "UDP Tracker", "Started: Waiting for packets on socket address: udp://{address} ..."); + + let tracker = tracker.clone(); + let socket = socket.clone(); + + let reqs = &mut ActiveRequests::default(); + + // Main Waiting Loop, awaits on async [`receive_request`]. + loop { + if let Some(h) = reqs.rb.push_overwrite( + Self::do_request(Self::receive_request(socket.clone()).await, tracker.clone(), socket.clone()).abort_handle(), + ) { + if !h.is_finished() { + // the task is still running, lets yield and give it a chance to flush. + tokio::task::yield_now().await; + h.abort(); } } - }; + } + }); + + tx_start + .send(Started { address }) + .expect("the UDP Tracker service should not be dropped"); + + debug!(target: "UDP Tracker", "Started on: udp://{}", address); - pin_mut!(halt); - pin_mut!(listen); + let stop = running.abort_handle(); - tx_start - .send(Started { address }) - .expect("the UDP Tracker service should not be dropped"); + select! { + _ = running => { debug!(target: "UDP Tracker", "Socket listener stopped on address: udp://{address}"); }, + () = halt => { debug!(target: "UDP Tracker", "Halt signal spawned task stopped on address: udp://{address}"); } + } + stop.abort(); + + task::yield_now().await; // lets allow the other threads to complete. + } - tokio::select! { - _ = & mut halt => { debug!(target: "UDP Tracker", "Halt signal spawned task stopped on address: udp://{address}"); }, - () = & mut listen => { debug!(target: "UDP Tracker", "Socket listener stopped on address: udp://{address}"); }, + async fn receive_request(socket: Arc) -> Result> { + // Wait for the socket to be readable + socket.readable().await?; + + let mut buf = Vec::with_capacity(MAX_PACKET_SIZE); + + match socket.recv_buf_from(&mut buf).await { + Ok((n, from)) => { + Vec::truncate(&mut buf, n); + trace!("GOT {buf:?}"); + Ok(UdpRequest { payload: buf, from }) } - }); - info!(target: "UDP Tracker", "Started on: udp://{}", address); + Err(e) => Err(Box::new(e)), + } + } - running + fn do_request( + result: Result>, + tracker: Arc, + socket: Arc, + ) -> JoinHandle<()> { + // timeout not needed, as udp is non-blocking. + tokio::task::spawn(async move { + match result { + Ok(udp_request) => { + trace!("Received Request from: {}", udp_request.from); + Self::make_response(tracker.clone(), socket.clone(), udp_request).await; + } + Err(error) => { + debug!("error: {error}"); + } + } + }) } - async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { + async fn make_response(tracker: Arc, socket: Arc, udp_request: UdpRequest) { + trace!("Making Response to {udp_request:?}"); + let from = udp_request.from; + let response = handlers::handle_packet(udp_request, &tracker.clone()).await; + Self::send_response(&socket.clone(), from, response).await; + } + + async fn send_response(socket: &Arc, to: SocketAddr, response: Response) { + trace!("Sending Response: {response:?} to: {to:?}"); + let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); @@ -293,10 +344,10 @@ impl Udp { let inner = cursor.get_ref(); debug!("Sending {} bytes ...", &inner[..position].len()); - debug!("To: {:?}", &remote_addr); + debug!("To: {:?}", &to); debug!("Payload: {:?}", &inner[..position]); - Udp::send_packet(socket, &remote_addr, &inner[..position]).await; + Self::send_packet(socket, &to, &inner[..position]).await; debug!("{} bytes sent", &inner[..position].len()); } @@ -306,7 +357,9 @@ impl Udp { } } - async fn send_packet(socket: Arc, remote_addr: &SocketAddr, payload: &[u8]) { + async fn send_packet(socket: &Arc, remote_addr: &SocketAddr, payload: &[u8]) { + trace!("Sending Packets: {payload:?} to: {remote_addr:?}"); + // doesn't matter if it reaches or not drop(socket.send_to(payload, remote_addr).await); } @@ -324,7 +377,9 @@ impl Udp { #[cfg(test)] mod tests { use std::sync::Arc; + use std::time::Duration; + use tokio::time::sleep; use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; use crate::bootstrap::app::initialize_with_configuration; @@ -351,6 +406,8 @@ mod tests { .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); + sleep(Duration::from_secs(1)).await; + assert_eq!(stopped.state.launcher.bind_to, bind_to); } } diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 959001e82..23b718472 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -143,6 +143,8 @@ pub async fn new_udp_tracker_client_connected(remote_address: &str) -> UdpTracke /// /// # Panics pub async fn check(binding: &SocketAddr) -> Result { + debug!("Checking Service (detail): {binding:?}."); + let client = new_udp_tracker_client_connected(binding.to_string().as_str()).await; let connect_request = ConnectRequest { diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 0eea650b8..91dca4d42 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -47,6 +47,8 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req let response = Response::from_bytes(&buffer, true).unwrap(); assert!(is_error_response(&response, "bad request")); + + env.stop().await; } mod receiving_a_connection_request { @@ -72,6 +74,8 @@ mod receiving_a_connection_request { let response = client.receive().await; assert!(is_connect_response(&response, TransactionId(123))); + + env.stop().await; } } @@ -121,6 +125,8 @@ mod receiving_an_announce_request { println!("test response {response:?}"); assert!(is_ipv4_announce_response(&response)); + + env.stop().await; } } @@ -158,5 +164,7 @@ mod receiving_an_scrape_request { let response = client.receive().await; assert!(is_scrape_response(&response)); + + env.stop().await; } } diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 26a47987e..da7705016 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -68,7 +68,7 @@ impl Environment { config: self.config, tracker: self.tracker, registar: Registar::default(), - server: self.server.stop().await.unwrap(), + server: self.server.stop().await.expect("it stop the udp tracker service"), } } @@ -76,3 +76,21 @@ impl Environment { self.server.state.binding } } + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use tokio::time::sleep; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::Started; + + #[tokio::test] + async fn it_should_make_and_stop_udp_server() { + let env = Started::new(&configuration::ephemeral().into()).await; + sleep(Duration::from_secs(1)).await; + env.stop().await; + sleep(Duration::from_secs(1)).await; + } +} From 8e432057634718bf9e8efbfc7abb4f36f28a26e0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 24 Jan 2024 17:13:18 +0000 Subject: [PATCH 0696/1003] ci: [#634] new dependency to make temp dirs It will be used to store temp files during CI scripts execution. --- Cargo.lock | 1 + Cargo.toml | 1 + cSpell.json | 1 + 3 files changed, 3 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 63dfab1c7..ab270d0cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3467,6 +3467,7 @@ dependencies = [ "serde_with", "tdyne-peer-id", "tdyne-peer-id-registry", + "tempfile", "thiserror", "tokio", "torrust-tracker-configuration", diff --git a/Cargo.toml b/Cargo.toml index 6fd542c2f..3a11786f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,6 +71,7 @@ tower-http = { version = "0", features = ["compression-full"] } uuid = { version = "1", features = ["v4"] } colored = "2.1.0" url = "2.5.0" +tempfile = "3.9.0" [dev-dependencies] criterion = { version = "0.5.1", features = ["async_tokio"] } diff --git a/cSpell.json b/cSpell.json index acd46284c..0a3f78fad 100644 --- a/cSpell.json +++ b/cSpell.json @@ -116,6 +116,7 @@ "Swatinem", "Swiftbit", "taiki", + "tempfile", "thiserror", "tlsv", "Torrentstorm", From 4edcd2efb262aad2c6f246c325c0ff6a0792d3fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 24 Jan 2024 17:14:49 +0000 Subject: [PATCH 0697/1003] ci: [#634] new script to run E2E tests It uses Rust instead of Bash. You can run it with: ``` cargo run --bin e2e_tests_runner share/default/config/tracker.e2e.container.sqlite3.toml ``` It will: - Build the tracker docker image. - Run the docker image. - Wait until the container is healthy. - Parse logs to get running services. - Build config file for the tracker_checker. - Run the tracker_checker. - Stop the container. --- .../config/tracker.e2e.container.sqlite3.toml | 41 ++++ src/bin/e2e_tests_runner.rs | 10 + src/e2e/docker.rs | 177 +++++++++++++++ src/e2e/logs_parser.rs | 114 ++++++++++ src/e2e/mod.rs | 4 + src/e2e/runner.rs | 214 ++++++++++++++++++ src/e2e/temp_dir.rs | 53 +++++ src/lib.rs | 1 + 8 files changed, 614 insertions(+) create mode 100644 share/default/config/tracker.e2e.container.sqlite3.toml create mode 100644 src/bin/e2e_tests_runner.rs create mode 100644 src/e2e/docker.rs create mode 100644 src/e2e/logs_parser.rs create mode 100644 src/e2e/mod.rs create mode 100644 src/e2e/runner.rs create mode 100644 src/e2e/temp_dir.rs diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml new file mode 100644 index 000000000..86ffb3ffd --- /dev/null +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -0,0 +1,41 @@ +announce_interval = 120 +db_driver = "Sqlite3" +db_path = "/var/lib/torrust/tracker/database/sqlite3.db" +external_ip = "0.0.0.0" +inactive_peer_cleanup_interval = 600 +log_level = "info" +max_peer_timeout = 900 +min_announce_interval = 120 +mode = "public" +on_reverse_proxy = false +persistent_torrent_completed_stat = false +remove_peerless_torrents = true +tracker_usage_statistics = true + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" +enabled = true + +[[http_trackers]] +bind_address = "0.0.0.0:7070" +enabled = true +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +[http_api] +bind_address = "0.0.0.0:1212" +enabled = true +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +# Please override the admin token setting the +# `TORRUST_TRACKER_API_ADMIN_TOKEN` +# environmental variable! + +[http_api.access_tokens] +admin = "MyAccessToken" + +[health_check_api] +bind_address = "0.0.0.0:1313" diff --git a/src/bin/e2e_tests_runner.rs b/src/bin/e2e_tests_runner.rs new file mode 100644 index 000000000..35368b612 --- /dev/null +++ b/src/bin/e2e_tests_runner.rs @@ -0,0 +1,10 @@ +//! Program to run E2E tests. +//! +//! ```text +//! cargo run --bin e2e_tests_runner share/default/config/tracker.e2e.container.sqlite3.toml +//! ``` +use torrust_tracker::e2e; + +fn main() { + e2e::runner::run(); +} diff --git a/src/e2e/docker.rs b/src/e2e/docker.rs new file mode 100644 index 000000000..419e6138a --- /dev/null +++ b/src/e2e/docker.rs @@ -0,0 +1,177 @@ +//! Docker command wrapper. +use std::io; +use std::process::{Command, Output}; +use std::thread::sleep; +use std::time::{Duration, Instant}; + +use log::debug; + +/// Docker command wrapper. +pub struct Docker {} + +pub struct RunningContainer { + pub name: String, + pub output: Output, +} + +impl Drop for RunningContainer { + /// Ensures that the temporary container is stopped and removed when the + /// struct goes out of scope. + fn drop(&mut self) { + let _unused = Docker::stop(self); + let _unused = Docker::remove(&self.name); + } +} + +impl Docker { + /// Builds a Docker image from a given Dockerfile. + /// + /// # Errors + /// + /// Will fail if the docker build command fails. + pub fn build(dockerfile: &str, tag: &str) -> io::Result<()> { + let status = Command::new("docker") + .args(["build", "-f", dockerfile, "-t", tag, "."]) + .status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to build Docker image from dockerfile {dockerfile}"), + )) + } + } + + /// Runs a Docker container from a given image with multiple environment variables. + /// + /// # Arguments + /// + /// * `image` - The Docker image to run. + /// * `container` - The name for the Docker container. + /// * `env_vars` - A slice of tuples, each representing an environment variable as ("KEY", "value"). + /// + /// # Errors + /// + /// Will fail if the docker run command fails. + pub fn run(image: &str, container: &str, env_vars: &[(String, String)], ports: &[String]) -> io::Result { + let initial_args = vec![ + "run".to_string(), + "--detach".to_string(), + "--name".to_string(), + container.to_string(), + ]; + + // Add environment variables + let mut env_var_args: Vec = vec![]; + for (key, value) in env_vars { + env_var_args.push("--env".to_string()); + env_var_args.push(format!("{key}={value}")); + } + + // Add port mappings + let mut port_args: Vec = vec![]; + for port in ports { + port_args.push("--publish".to_string()); + port_args.push(port.to_string()); + } + + let args = [initial_args, env_var_args, port_args, [image.to_string()].to_vec()].concat(); + + debug!("Docker run args: {:?}", args); + + let output = Command::new("docker").args(args).output()?; + + if output.status.success() { + Ok(RunningContainer { + name: container.to_owned(), + output, + }) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to run Docker image {image}"), + )) + } + } + + /// Stops a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker stop command fails. + pub fn stop(container: &RunningContainer) -> io::Result<()> { + let status = Command::new("docker").args(["stop", &container.name]).status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to stop Docker container {}", container.name), + )) + } + } + + /// Removes a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker rm command fails. + pub fn remove(container: &str) -> io::Result<()> { + let status = Command::new("docker").args(["rm", "-f", container]).status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to remove Docker container {container}"), + )) + } + } + + /// Fetches logs from a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker logs command fails. + pub fn logs(container: &str) -> io::Result { + let output = Command::new("docker").args(["logs", container]).output()?; + + if output.status.success() { + Ok(String::from_utf8_lossy(&output.stdout).to_string()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to fetch logs from Docker container {container}"), + )) + } + } + + /// Checks if a Docker container is healthy. + #[must_use] + pub fn wait_until_is_healthy(name: &str, timeout: Duration) -> bool { + let start = Instant::now(); + + while start.elapsed() < timeout { + let Ok(output) = Command::new("docker") + .args(["ps", "-f", &format!("name={name}"), "--format", "{{.Status}}"]) + .output() + else { + return false; + }; + + let output_str = String::from_utf8_lossy(&output.stdout); + + if output_str.contains("(healthy)") { + return true; + } + + sleep(Duration::from_secs(1)); + } + + false + } +} diff --git a/src/e2e/logs_parser.rs b/src/e2e/logs_parser.rs new file mode 100644 index 000000000..1d6baa23e --- /dev/null +++ b/src/e2e/logs_parser.rs @@ -0,0 +1,114 @@ +//! Utilities to parse Torrust Tracker logs. +use serde::{Deserialize, Serialize}; + +const UDP_TRACKER_PATTERN: &str = "[UDP Tracker][INFO] Starting on: udp://"; +const HTTP_TRACKER_PATTERN: &str = "[HTTP Tracker][INFO] Starting on: "; +const HEALTH_CHECK_PATTERN: &str = "[Health Check API][INFO] Starting on: "; + +#[derive(Serialize, Deserialize, Debug, Default)] +pub struct RunningServices { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +impl RunningServices { + /// It parses the tracker logs to extract the running services. + /// + /// For example, from this logs: + /// + /// ```text + /// Loading default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... + /// 2024-01-24T16:36:14.614898789+00:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. + /// 2024-01-24T16:36:14.615586025+00:00 [UDP Tracker][INFO] Starting on: udp://0.0.0.0:6969 + /// 2024-01-24T16:36:14.615623705+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled + /// 2024-01-24T16:36:14.615694484+00:00 [HTTP Tracker][INFO] Starting on: http://0.0.0.0:7070 + /// 2024-01-24T16:36:14.615710534+00:00 [HTTP Tracker][INFO] Started on: http://0.0.0.0:7070 + /// 2024-01-24T16:36:14.615716574+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled + /// 2024-01-24T16:36:14.615764904+00:00 [API][INFO] Starting on http://127.0.0.1:1212 + /// 2024-01-24T16:36:14.615767264+00:00 [API][INFO] Started on http://127.0.0.1:1212 + /// 2024-01-24T16:36:14.615777574+00:00 [Health Check API][INFO] Starting on: http://127.0.0.1:1313 + /// 2024-01-24T16:36:14.615791124+00:00 [Health Check API][INFO] Started on: http://127.0.0.1:1313 + /// ``` + /// + /// It would extract these services: + /// + /// ```json + /// { + /// "udp_trackers": [ + /// "127.0.0.1:6969" + /// ], + /// "http_trackers": [ + /// "http://127.0.0.1:7070" + /// ], + /// "health_checks": [ + /// "http://127.0.0.1:1313/health_check" + /// ] + /// } + /// ``` + #[must_use] + pub fn parse_from_logs(logs: &str) -> Self { + let mut udp_trackers: Vec = Vec::new(); + let mut http_trackers: Vec = Vec::new(); + let mut health_checks: Vec = Vec::new(); + + for line in logs.lines() { + if let Some(address) = Self::extract_address_if_matches(line, UDP_TRACKER_PATTERN) { + udp_trackers.push(address); + } else if let Some(address) = Self::extract_address_if_matches(line, HTTP_TRACKER_PATTERN) { + http_trackers.push(address); + } else if let Some(address) = Self::extract_address_if_matches(line, HEALTH_CHECK_PATTERN) { + health_checks.push(format!("{address}/health_check")); + } + } + + Self { + udp_trackers, + http_trackers, + health_checks, + } + } + + fn extract_address_if_matches(line: &str, pattern: &str) -> Option { + line.find(pattern) + .map(|start| Self::replace_wildcard_ip_with_localhost(line[start + pattern.len()..].trim())) + } + + fn replace_wildcard_ip_with_localhost(address: &str) -> String { + address.replace("0.0.0.0", "127.0.0.1") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_should_parse_from_logs_with_valid_logs() { + let logs = "\ + [UDP Tracker][INFO] Starting on: udp://0.0.0.0:8080\n\ + [HTTP Tracker][INFO] Starting on: 0.0.0.0:9090\n\ + [Health Check API][INFO] Starting on: 0.0.0.0:10010"; + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:8080"]); + assert_eq!(running_services.http_trackers, vec!["127.0.0.1:9090"]); + assert_eq!(running_services.health_checks, vec!["127.0.0.1:10010/health_check"]); + } + + #[test] + fn it_should_ignore_logs_with_no_matching_lines() { + let logs = "[Other Service][INFO] Starting on: 0.0.0.0:7070"; + let running_services = RunningServices::parse_from_logs(logs); + + assert!(running_services.udp_trackers.is_empty()); + assert!(running_services.http_trackers.is_empty()); + assert!(running_services.health_checks.is_empty()); + } + + #[test] + fn it_should_replace_wildcard_ip_with_localhost() { + let address = "0.0.0.0:8080"; + assert_eq!(RunningServices::replace_wildcard_ip_with_localhost(address), "127.0.0.1:8080"); + } +} diff --git a/src/e2e/mod.rs b/src/e2e/mod.rs new file mode 100644 index 000000000..6745d49cd --- /dev/null +++ b/src/e2e/mod.rs @@ -0,0 +1,4 @@ +pub mod docker; +pub mod logs_parser; +pub mod runner; +pub mod temp_dir; diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs new file mode 100644 index 000000000..eee2805a6 --- /dev/null +++ b/src/e2e/runner.rs @@ -0,0 +1,214 @@ +use std::fs::File; +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::process::Command; +use std::time::Duration; +use std::{env, io}; + +use log::{debug, info, LevelFilter}; +use rand::distributions::Alphanumeric; +use rand::Rng; + +use super::docker::RunningContainer; +use crate::e2e::docker::Docker; +use crate::e2e::logs_parser::RunningServices; +use crate::e2e::temp_dir::Handler; + +pub const NUMBER_OF_ARGUMENTS: usize = 2; +const CONTAINER_TAG: &str = "torrust-tracker:local"; +const TRACKER_CHECKER_CONFIG_FILE: &str = "tracker_checker.json"; + +pub struct Arguments { + pub tracker_config_path: String, +} + +/// Script to run E2E tests. +/// +/// # Panics +/// +/// Will panic if it can't not perform any of the operations. +pub fn run() { + setup_runner_logging(LevelFilter::Info); + + let args = parse_arguments(); + + let tracker_config = load_tracker_configuration(&args.tracker_config_path); + + build_tracker_container_image(CONTAINER_TAG); + + let temp_dir = create_temp_dir(); + + let container_name = generate_random_container_name("tracker_"); + + // code-review: if we want to use port 0 we don't know which ports we have to open. + // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. + // We could not use docker, but the intention was to create E2E tests including containerization. + let env_vars = [("TORRUST_TRACKER_CONFIG".to_string(), tracker_config.to_string())]; + let ports = [ + "6969:6969/udp".to_string(), + "7070:7070/tcp".to_string(), + "1212:1212/tcp".to_string(), + "1313:1313/tcp".to_string(), + ]; + + let container = run_tracker_container(&container_name, &env_vars, &ports); + + let running_services = parse_running_services_from_logs(&container); + + let tracker_checker_config = + serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); + + let mut tracker_checker_config_path = PathBuf::from(&temp_dir.temp_dir.path()); + tracker_checker_config_path.push(TRACKER_CHECKER_CONFIG_FILE); + + write_tracker_checker_config_file(&tracker_checker_config_path, &tracker_checker_config); + + run_tracker_checker(&tracker_checker_config_path).expect("Tracker checker should check running services"); + + // More E2E tests could be executed here in the future. For example: `cargo test ...`. + + info!("Running container `{}` will be automatically removed", container.name); +} + +fn setup_runner_logging(level: LevelFilter) { + if let Err(_err) = fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "{} [{}][{}] {}", + chrono::Local::now().format("%+"), + record.target(), + record.level(), + message + )); + }) + .level(level) + .chain(std::io::stdout()) + .apply() + { + panic!("Failed to initialize logging.") + } + + debug!("logging initialized."); +} + +fn parse_arguments() -> Arguments { + let args: Vec = std::env::args().collect(); + + if args.len() < NUMBER_OF_ARGUMENTS { + eprintln!("Usage: cargo run --bin e2e_tests_runner "); + eprintln!("For example: cargo run --bin e2e_tests_runner ./share/default/config/tracker.e2e.container.sqlite3.toml"); + std::process::exit(1); + } + + let config_path = &args[1]; + + Arguments { + tracker_config_path: config_path.to_string(), + } +} + +fn load_tracker_configuration(tracker_config_path: &str) -> String { + info!("Reading tracker configuration from file: {} ...", tracker_config_path); + read_file(tracker_config_path) +} + +fn read_file(path: &str) -> String { + std::fs::read_to_string(path).unwrap_or_else(|_| panic!("Can't read file {path}")) +} + +fn build_tracker_container_image(tag: &str) { + info!("Building tracker container image with tag: {} ...", tag); + Docker::build("./Containerfile", tag).expect("A tracker local docker image should be built"); +} + +fn create_temp_dir() -> Handler { + debug!( + "Current dir: {:?}", + env::current_dir().expect("It should return the current dir") + ); + + let temp_dir_handler = Handler::new().expect("A temp dir should be created"); + + info!("Temp dir created: {:?}", temp_dir_handler.temp_dir); + + temp_dir_handler +} + +fn generate_random_container_name(prefix: &str) -> String { + let rand_string: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(20) + .map(char::from) + .collect(); + + format!("{prefix}{rand_string}") +} + +fn run_tracker_container(container_name: &str, env_vars: &[(String, String)], ports: &[String]) -> RunningContainer { + info!("Running docker tracker image: {container_name} ..."); + + let container = + Docker::run(CONTAINER_TAG, container_name, env_vars, ports).expect("A tracker local docker image should be running"); + + info!("Waiting for the container {container_name} to be healthy ..."); + + let is_healthy = Docker::wait_until_is_healthy(container_name, Duration::from_secs(10)); + + assert!(is_healthy, "Unhealthy tracker container: {container_name}"); + + debug!("Container {container_name} is healthy ..."); + + container +} + +fn parse_running_services_from_logs(container: &RunningContainer) -> RunningServices { + let logs = Docker::logs(&container.name).expect("Logs should be captured from running container"); + + debug!("Logs after starting the container:\n{logs}"); + + RunningServices::parse_from_logs(&logs) +} + +fn write_tracker_checker_config_file(config_file_path: &Path, config: &str) { + let mut file = File::create(config_file_path).expect("Tracker checker config file to be created"); + + file.write_all(config.as_bytes()) + .expect("Tracker checker config file to be written"); + + info!("Tracker checker configuration file: {:?} \n{config}", config_file_path); +} + +/// Runs the tracker checker +/// +/// ```text +/// cargo run --bin tracker_checker "./share/default/config/tracker_checker.json" +/// ``` +/// +/// # Errors +/// +/// Will return an error if the tracker checker fails. +/// +/// # Panics +/// +/// Will panic if the config path is not a valid string. +pub fn run_tracker_checker(config_path: &Path) -> io::Result<()> { + info!( + "Running tacker checker: cargo --bin tracker_checker {}", + config_path.display() + ); + + let path = config_path.to_str().expect("The path should be a valid string"); + + let status = Command::new("cargo") + .args(["run", "--bin", "tracker_checker", path]) + .status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to run tracker checker with config file {path}"), + )) + } +} diff --git a/src/e2e/temp_dir.rs b/src/e2e/temp_dir.rs new file mode 100644 index 000000000..8433e3059 --- /dev/null +++ b/src/e2e/temp_dir.rs @@ -0,0 +1,53 @@ +//! Temp dir which is automatically removed when it goes out of scope. +use std::path::PathBuf; +use std::{env, io}; + +use tempfile::TempDir; + +pub struct Handler { + pub temp_dir: TempDir, + pub original_dir: PathBuf, +} + +impl Handler { + /// Creates a new temporary directory and remembers the current working directory. + /// + /// # Errors + /// + /// Will error if: + /// + /// - It can't create the temp dir. + /// - It can't get the current dir. + pub fn new() -> io::Result { + let temp_dir = TempDir::new()?; + let original_dir = env::current_dir()?; + + Ok(Handler { temp_dir, original_dir }) + } + + /// Changes the current working directory to the temporary directory. + /// + /// # Errors + /// + /// Will error if it can't change the current di to the temp dir. + pub fn change_to_temp_dir(&self) -> io::Result<()> { + env::set_current_dir(self.temp_dir.path()) + } + + /// Changes the current working directory back to the original directory. + /// + /// # Errors + /// + /// Will error if it can't revert the current dir to the original one. + pub fn revert_to_original_dir(&self) -> io::Result<()> { + env::set_current_dir(&self.original_dir) + } +} + +impl Drop for Handler { + /// Ensures that the temporary directory is deleted when the struct goes out of scope. + fn drop(&mut self) { + // The temporary directory is automatically deleted when `TempDir` is dropped. + // We can add additional cleanup here if necessary. + } +} diff --git a/src/lib.rs b/src/lib.rs index 7b5d453a4..f239039bd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -473,6 +473,7 @@ pub mod app; pub mod bootstrap; pub mod checker; pub mod core; +pub mod e2e; pub mod servers; pub mod shared; From ec13fb41cfef0d68af67b44aee8c83696754a519 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 24 Jan 2024 17:17:24 +0000 Subject: [PATCH 0698/1003] ci: [#634] run E2E tests in the testing workflow --- .github/workflows/testing.yaml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 02dbb1804..5deabd74a 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -110,3 +110,32 @@ jobs: - id: test name: Run Unit Tests run: cargo test --tests --benches --examples --workspace --all-targets --all-features + + e2e: + name: E2E + runs-on: ubuntu-latest + needs: unit + + strategy: + matrix: + toolchain: [nightly] + + steps: + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: test + name: Run E2E Tests + run: cargo run --bin e2e_tests_runner ./share/default/config/tracker.e2e.container.sqlite3.toml From 0afab09333ce75ca4bd0a65020b77360390929b1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 26 Jan 2024 10:28:14 +0000 Subject: [PATCH 0699/1003] refactor: [#647] extract strcut RunOptions --- src/e2e/docker.rs | 12 +++++++++--- src/e2e/runner.rs | 27 ++++++++++++++------------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/e2e/docker.rs b/src/e2e/docker.rs index 419e6138a..75c67d64b 100644 --- a/src/e2e/docker.rs +++ b/src/e2e/docker.rs @@ -23,6 +23,12 @@ impl Drop for RunningContainer { } } +/// `docker run` command options. +pub struct RunOptions { + pub env_vars: Vec<(String, String)>, + pub ports: Vec, +} + impl Docker { /// Builds a Docker image from a given Dockerfile. /// @@ -55,7 +61,7 @@ impl Docker { /// # Errors /// /// Will fail if the docker run command fails. - pub fn run(image: &str, container: &str, env_vars: &[(String, String)], ports: &[String]) -> io::Result { + pub fn run(image: &str, container: &str, options: &RunOptions) -> io::Result { let initial_args = vec![ "run".to_string(), "--detach".to_string(), @@ -65,14 +71,14 @@ impl Docker { // Add environment variables let mut env_var_args: Vec = vec![]; - for (key, value) in env_vars { + for (key, value) in &options.env_vars { env_var_args.push("--env".to_string()); env_var_args.push(format!("{key}={value}")); } // Add port mappings let mut port_args: Vec = vec![]; - for port in ports { + for port in &options.ports { port_args.push("--publish".to_string()); port_args.push(port.to_string()); } diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs index eee2805a6..a59659891 100644 --- a/src/e2e/runner.rs +++ b/src/e2e/runner.rs @@ -10,7 +10,7 @@ use rand::distributions::Alphanumeric; use rand::Rng; use super::docker::RunningContainer; -use crate::e2e::docker::Docker; +use crate::e2e::docker::{Docker, RunOptions}; use crate::e2e::logs_parser::RunningServices; use crate::e2e::temp_dir::Handler; @@ -43,15 +43,17 @@ pub fn run() { // code-review: if we want to use port 0 we don't know which ports we have to open. // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. // We could not use docker, but the intention was to create E2E tests including containerization. - let env_vars = [("TORRUST_TRACKER_CONFIG".to_string(), tracker_config.to_string())]; - let ports = [ - "6969:6969/udp".to_string(), - "7070:7070/tcp".to_string(), - "1212:1212/tcp".to_string(), - "1313:1313/tcp".to_string(), - ]; - - let container = run_tracker_container(&container_name, &env_vars, &ports); + let options = RunOptions { + env_vars: vec![("TORRUST_TRACKER_CONFIG".to_string(), tracker_config.to_string())], + ports: vec![ + "6969:6969/udp".to_string(), + "7070:7070/tcp".to_string(), + "1212:1212/tcp".to_string(), + "1313:1313/tcp".to_string(), + ], + }; + + let container = run_tracker_container(CONTAINER_TAG, &container_name, &options); let running_services = parse_running_services_from_logs(&container); @@ -144,11 +146,10 @@ fn generate_random_container_name(prefix: &str) -> String { format!("{prefix}{rand_string}") } -fn run_tracker_container(container_name: &str, env_vars: &[(String, String)], ports: &[String]) -> RunningContainer { +fn run_tracker_container(image: &str, container_name: &str, options: &RunOptions) -> RunningContainer { info!("Running docker tracker image: {container_name} ..."); - let container = - Docker::run(CONTAINER_TAG, container_name, env_vars, ports).expect("A tracker local docker image should be running"); + let container = Docker::run(image, container_name, options).expect("A tracker local docker image should be running"); info!("Waiting for the container {container_name} to be healthy ..."); From 670927c1c57b9381c90082af9856dfa11aba93ad Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 26 Jan 2024 10:35:10 +0000 Subject: [PATCH 0700/1003] ci: [#647] E2E tests. Make sure we run at least one service per type We want to run all services in the E2E tests env. At least one running service per type: - HTTP tracker - UDP tracker - HealthCheck endpoint --- src/e2e/runner.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs index a59659891..058ebed40 100644 --- a/src/e2e/runner.rs +++ b/src/e2e/runner.rs @@ -57,6 +57,8 @@ pub fn run() { let running_services = parse_running_services_from_logs(&container); + assert_there_is_at_least_one_service_per_type(&running_services); + let tracker_checker_config = serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); @@ -170,6 +172,21 @@ fn parse_running_services_from_logs(container: &RunningContainer) -> RunningServ RunningServices::parse_from_logs(&logs) } +fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServices) { + assert!( + !running_services.udp_trackers.is_empty(), + "At least one UDP tracker should be enabled in E2E tests configuration" + ); + assert!( + !running_services.http_trackers.is_empty(), + "At least one HTTP tracker should be enabled in E2E tests configuration" + ); + assert!( + !running_services.health_checks.is_empty(), + "At least one Health Check should be enabled in E2E tests configuration" + ); +} + fn write_tracker_checker_config_file(config_file_path: &Path, config: &str) { let mut file = File::create(config_file_path).expect("Tracker checker config file to be created"); From ddad4a432b0602067f9d7a227d3e82e5398f8baf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 26 Jan 2024 11:05:54 +0000 Subject: [PATCH 0701/1003] ci: [#647] E2E tests. Make sure there are not panics in logs --- src/e2e/docker.rs | 54 +++++++++++++++++++++++++++++++++++++++++++++-- src/e2e/runner.rs | 32 ++++++++++++++++++++++++++-- 2 files changed, 82 insertions(+), 4 deletions(-) diff --git a/src/e2e/docker.rs b/src/e2e/docker.rs index 75c67d64b..6eb64783a 100644 --- a/src/e2e/docker.rs +++ b/src/e2e/docker.rs @@ -18,8 +18,12 @@ impl Drop for RunningContainer { /// Ensures that the temporary container is stopped and removed when the /// struct goes out of scope. fn drop(&mut self) { - let _unused = Docker::stop(self); - let _unused = Docker::remove(&self.name); + if Docker::is_container_running(&self.name) { + let _unused = Docker::stop(self); + } + if Docker::container_exist(&self.name) { + let _unused = Docker::remove(&self.name); + } } } @@ -180,4 +184,50 @@ impl Docker { false } + + /// Checks if a Docker container is running. + /// + /// # Arguments + /// + /// * `container` - The name of the Docker container. + /// + /// # Returns + /// + /// `true` if the container is running, `false` otherwise. + #[must_use] + pub fn is_container_running(container: &str) -> bool { + match Command::new("docker") + .args(["ps", "-f", &format!("name={container}"), "--format", "{{.Names}}"]) + .output() + { + Ok(output) => { + let output_str = String::from_utf8_lossy(&output.stdout); + output_str.contains(container) + } + Err(_) => false, + } + } + + /// Checks if a Docker container exists. + /// + /// # Arguments + /// + /// * `container` - The name of the Docker container. + /// + /// # Returns + /// + /// `true` if the container exists, `false` otherwise. + #[must_use] + pub fn container_exist(container: &str) -> bool { + match Command::new("docker") + .args(["ps", "-a", "-f", &format!("name={container}"), "--format", "{{.Names}}"]) + .output() + { + Ok(output) => { + let output_str = String::from_utf8_lossy(&output.stdout); + output_str.contains(container) + } + Err(_) => false, + } + } } diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs index 058ebed40..370069544 100644 --- a/src/e2e/runner.rs +++ b/src/e2e/runner.rs @@ -57,6 +57,8 @@ pub fn run() { let running_services = parse_running_services_from_logs(&container); + assert_there_are_no_panics_in_logs(&container); + assert_there_is_at_least_one_service_per_type(&running_services); let tracker_checker_config = @@ -69,9 +71,12 @@ pub fn run() { run_tracker_checker(&tracker_checker_config_path).expect("Tracker checker should check running services"); - // More E2E tests could be executed here in the future. For example: `cargo test ...`. + // More E2E tests could be added here in the future. + // For example: `cargo test ...` for only E2E tests, using this shared test env. + + stop_tracker_container(&container); - info!("Running container `{}` will be automatically removed", container.name); + remove_tracker_container(&container_name); } fn setup_runner_logging(level: LevelFilter) { @@ -164,6 +169,29 @@ fn run_tracker_container(image: &str, container_name: &str, options: &RunOptions container } +fn stop_tracker_container(container: &RunningContainer) { + info!("Stopping docker tracker image: {} ...", container.name); + Docker::stop(container).expect("Container should be stopped"); + assert_there_are_no_panics_in_logs(container); +} + +fn remove_tracker_container(container_name: &str) { + info!("Removing docker tracker image: {container_name} ..."); + Docker::remove(container_name).expect("Container should be removed"); +} + +fn assert_there_are_no_panics_in_logs(container: &RunningContainer) -> RunningServices { + let logs = Docker::logs(&container.name).expect("Logs should be captured from running container"); + + assert!( + !(logs.contains(" panicked at ") || logs.contains("RUST_BACKTRACE=1")), + "{}", + format!("Panics found is logs:\n{logs}") + ); + + RunningServices::parse_from_logs(&logs) +} + fn parse_running_services_from_logs(container: &RunningContainer) -> RunningServices { let logs = Docker::logs(&container.name).expect("Logs should be captured from running container"); From 68f71be7ccb437e6befbbc400d3c46b9ba2eabf8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 26 Jan 2024 11:22:48 +0000 Subject: [PATCH 0702/1003] refactor: [#647] E2E tests. Extract strcut TrackerContainer --- src/e2e/docker.rs | 13 ++-- src/e2e/mod.rs | 1 + src/e2e/runner.rs | 117 +++++++++-------------------- src/e2e/tracker_container.rs | 138 +++++++++++++++++++++++++++++++++++ 4 files changed, 179 insertions(+), 90 deletions(-) create mode 100644 src/e2e/tracker_container.rs diff --git a/src/e2e/docker.rs b/src/e2e/docker.rs index 6eb64783a..c024efbae 100644 --- a/src/e2e/docker.rs +++ b/src/e2e/docker.rs @@ -4,26 +4,26 @@ use std::process::{Command, Output}; use std::thread::sleep; use std::time::{Duration, Instant}; -use log::debug; +use log::{debug, info}; /// Docker command wrapper. pub struct Docker {} +#[derive(Clone, Debug)] pub struct RunningContainer { + pub image: String, pub name: String, pub output: Output, } impl Drop for RunningContainer { - /// Ensures that the temporary container is stopped and removed when the - /// struct goes out of scope. + /// Ensures that the temporary container is stopped when the struct goes out + /// of scope. fn drop(&mut self) { + info!("Dropping running container: {}", self.name); if Docker::is_container_running(&self.name) { let _unused = Docker::stop(self); } - if Docker::container_exist(&self.name) { - let _unused = Docker::remove(&self.name); - } } } @@ -95,6 +95,7 @@ impl Docker { if output.status.success() { Ok(RunningContainer { + image: image.to_owned(), name: container.to_owned(), output, }) diff --git a/src/e2e/mod.rs b/src/e2e/mod.rs index 6745d49cd..deba8971e 100644 --- a/src/e2e/mod.rs +++ b/src/e2e/mod.rs @@ -2,3 +2,4 @@ pub mod docker; pub mod logs_parser; pub mod runner; pub mod temp_dir; +pub mod tracker_container; diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs index 370069544..23b534ee4 100644 --- a/src/e2e/runner.rs +++ b/src/e2e/runner.rs @@ -2,20 +2,26 @@ use std::fs::File; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::Command; -use std::time::Duration; use std::{env, io}; use log::{debug, info, LevelFilter}; -use rand::distributions::Alphanumeric; -use rand::Rng; -use super::docker::RunningContainer; -use crate::e2e::docker::{Docker, RunOptions}; +use super::tracker_container::TrackerContainer; +use crate::e2e::docker::RunOptions; use crate::e2e::logs_parser::RunningServices; use crate::e2e::temp_dir::Handler; +/* code-review: + - We use always the same docker image name. Should we use a random image name (tag)? + - We use the name image name we use in other workflows `torrust-tracker:local`. + Should we use a different one like `torrust-tracker:e2e`? + - We remove the container after running tests but not the container image. + Should we remove the image too? +*/ + pub const NUMBER_OF_ARGUMENTS: usize = 2; -const CONTAINER_TAG: &str = "torrust-tracker:local"; +const CONTAINER_IMAGE: &str = "torrust-tracker:local"; +const CONTAINER_NAME_PREFIX: &str = "tracker_"; const TRACKER_CHECKER_CONFIG_FILE: &str = "tracker_checker.json"; pub struct Arguments { @@ -34,11 +40,9 @@ pub fn run() { let tracker_config = load_tracker_configuration(&args.tracker_config_path); - build_tracker_container_image(CONTAINER_TAG); - - let temp_dir = create_temp_dir(); + let mut tracker_container = TrackerContainer::new(CONTAINER_IMAGE, CONTAINER_NAME_PREFIX); - let container_name = generate_random_container_name("tracker_"); + tracker_container.build_image(); // code-review: if we want to use port 0 we don't know which ports we have to open. // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. @@ -53,30 +57,32 @@ pub fn run() { ], }; - let container = run_tracker_container(CONTAINER_TAG, &container_name, &options); + tracker_container.run(&options); - let running_services = parse_running_services_from_logs(&container); - - assert_there_are_no_panics_in_logs(&container); + let running_services = tracker_container.running_services(); assert_there_is_at_least_one_service_per_type(&running_services); let tracker_checker_config = serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); + let temp_dir = create_temp_dir(); + let mut tracker_checker_config_path = PathBuf::from(&temp_dir.temp_dir.path()); tracker_checker_config_path.push(TRACKER_CHECKER_CONFIG_FILE); write_tracker_checker_config_file(&tracker_checker_config_path, &tracker_checker_config); - run_tracker_checker(&tracker_checker_config_path).expect("Tracker checker should check running services"); + run_tracker_checker(&tracker_checker_config_path).expect("All tracker services should be running correctly"); // More E2E tests could be added here in the future. // For example: `cargo test ...` for only E2E tests, using this shared test env. - stop_tracker_container(&container); + tracker_container.stop(); + + tracker_container.remove(); - remove_tracker_container(&container_name); + info!("Tracker container final state:\n{:#?}", tracker_container); } fn setup_runner_logging(level: LevelFilter) { @@ -125,11 +131,6 @@ fn read_file(path: &str) -> String { std::fs::read_to_string(path).unwrap_or_else(|_| panic!("Can't read file {path}")) } -fn build_tracker_container_image(tag: &str) { - info!("Building tracker container image with tag: {} ...", tag); - Docker::build("./Containerfile", tag).expect("A tracker local docker image should be built"); -} - fn create_temp_dir() -> Handler { debug!( "Current dir: {:?}", @@ -143,63 +144,6 @@ fn create_temp_dir() -> Handler { temp_dir_handler } -fn generate_random_container_name(prefix: &str) -> String { - let rand_string: String = rand::thread_rng() - .sample_iter(&Alphanumeric) - .take(20) - .map(char::from) - .collect(); - - format!("{prefix}{rand_string}") -} - -fn run_tracker_container(image: &str, container_name: &str, options: &RunOptions) -> RunningContainer { - info!("Running docker tracker image: {container_name} ..."); - - let container = Docker::run(image, container_name, options).expect("A tracker local docker image should be running"); - - info!("Waiting for the container {container_name} to be healthy ..."); - - let is_healthy = Docker::wait_until_is_healthy(container_name, Duration::from_secs(10)); - - assert!(is_healthy, "Unhealthy tracker container: {container_name}"); - - debug!("Container {container_name} is healthy ..."); - - container -} - -fn stop_tracker_container(container: &RunningContainer) { - info!("Stopping docker tracker image: {} ...", container.name); - Docker::stop(container).expect("Container should be stopped"); - assert_there_are_no_panics_in_logs(container); -} - -fn remove_tracker_container(container_name: &str) { - info!("Removing docker tracker image: {container_name} ..."); - Docker::remove(container_name).expect("Container should be removed"); -} - -fn assert_there_are_no_panics_in_logs(container: &RunningContainer) -> RunningServices { - let logs = Docker::logs(&container.name).expect("Logs should be captured from running container"); - - assert!( - !(logs.contains(" panicked at ") || logs.contains("RUST_BACKTRACE=1")), - "{}", - format!("Panics found is logs:\n{logs}") - ); - - RunningServices::parse_from_logs(&logs) -} - -fn parse_running_services_from_logs(container: &RunningContainer) -> RunningServices { - let logs = Docker::logs(&container.name).expect("Logs should be captured from running container"); - - debug!("Logs after starting the container:\n{logs}"); - - RunningServices::parse_from_logs(&logs) -} - fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServices) { assert!( !running_services.udp_trackers.is_empty(), @@ -216,15 +160,20 @@ fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServi } fn write_tracker_checker_config_file(config_file_path: &Path, config: &str) { + info!( + "Writing Tracker Checker configuration file: {:?} \n{config}", + config_file_path + ); + let mut file = File::create(config_file_path).expect("Tracker checker config file to be created"); file.write_all(config.as_bytes()) .expect("Tracker checker config file to be written"); - - info!("Tracker checker configuration file: {:?} \n{config}", config_file_path); } -/// Runs the tracker checker +/// Runs the Tracker Checker. +/// +/// For example: /// /// ```text /// cargo run --bin tracker_checker "./share/default/config/tracker_checker.json" @@ -239,7 +188,7 @@ fn write_tracker_checker_config_file(config_file_path: &Path, config: &str) { /// Will panic if the config path is not a valid string. pub fn run_tracker_checker(config_path: &Path) -> io::Result<()> { info!( - "Running tacker checker: cargo --bin tracker_checker {}", + "Running Tracker Checker: cargo --bin tracker_checker {}", config_path.display() ); @@ -254,7 +203,7 @@ pub fn run_tracker_checker(config_path: &Path) -> io::Result<()> { } else { Err(io::Error::new( io::ErrorKind::Other, - format!("Failed to run tracker checker with config file {path}"), + format!("Failed to run Tracker Checker with config file {path}"), )) } } diff --git a/src/e2e/tracker_container.rs b/src/e2e/tracker_container.rs new file mode 100644 index 000000000..3e70942b5 --- /dev/null +++ b/src/e2e/tracker_container.rs @@ -0,0 +1,138 @@ +use std::time::Duration; + +use log::{debug, error, info}; +use rand::distributions::Alphanumeric; +use rand::Rng; + +use super::docker::{RunOptions, RunningContainer}; +use super::logs_parser::RunningServices; +use crate::e2e::docker::Docker; + +#[derive(Debug)] +pub struct TrackerContainer { + pub image: String, + pub name: String, + pub running: Option, +} + +impl Drop for TrackerContainer { + /// Ensures that the temporary container is removed when the + /// struct goes out of scope. + fn drop(&mut self) { + info!("Dropping tracker container: {}", self.name); + if Docker::container_exist(&self.name) { + let _unused = Docker::remove(&self.name); + } + } +} + +impl TrackerContainer { + #[must_use] + pub fn new(tag: &str, container_name_prefix: &str) -> Self { + Self { + image: tag.to_owned(), + name: Self::generate_random_container_name(container_name_prefix), + running: None, + } + } + + /// # Panics + /// + /// Will panic if it can't build the docker image. + pub fn build_image(&self) { + info!("Building tracker container image with tag: {} ...", self.image); + Docker::build("./Containerfile", &self.image).expect("A tracker local docker image should be built"); + } + + /// # Panics + /// + /// Will panic if it can't run the container. + pub fn run(&mut self, options: &RunOptions) { + info!("Running docker tracker image: {} ...", self.name); + + let container = Docker::run(&self.image, &self.name, options).expect("A tracker local docker image should be running"); + + info!("Waiting for the container {} to be healthy ...", self.name); + + let is_healthy = Docker::wait_until_is_healthy(&self.name, Duration::from_secs(10)); + + assert!(is_healthy, "Unhealthy tracker container: {}", &self.name); + + info!("Container {} is healthy ...", &self.name); + + self.running = Some(container); + + self.assert_there_are_no_panics_in_logs(); + } + + /// # Panics + /// + /// Will panic if it can't get the logs from the running container. + #[must_use] + pub fn running_services(&self) -> RunningServices { + let logs = Docker::logs(&self.name).expect("Logs should be captured from running container"); + + debug!("Parsing running services from logs. Logs :\n{logs}"); + + RunningServices::parse_from_logs(&logs) + } + + /// # Panics + /// + /// Will panic if it can't stop the container. + pub fn stop(&mut self) { + match &self.running { + Some(container) => { + info!("Stopping docker tracker container: {} ...", self.name); + + Docker::stop(container).expect("Container should be stopped"); + + self.assert_there_are_no_panics_in_logs(); + } + None => { + if Docker::is_container_running(&self.name) { + error!("Tracker container {} was started manually", self.name); + } else { + info!("Docker tracker container is not running: {} ...", self.name); + } + } + } + + self.running = None; + } + + /// # Panics + /// + /// Will panic if it can't remove the container. + pub fn remove(&self) { + match &self.running { + Some(_running_container) => { + error!("Can't remove running container: {} ...", self.name); + } + None => { + info!("Removing docker tracker container: {} ...", self.name); + Docker::remove(&self.name).expect("Container should be removed"); + } + } + } + + fn generate_random_container_name(prefix: &str) -> String { + let rand_string: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(20) + .map(char::from) + .collect(); + + format!("{prefix}{rand_string}") + } + + fn assert_there_are_no_panics_in_logs(&self) { + let logs = Docker::logs(&self.name).expect("Logs should be captured from running container"); + + assert!( + !(logs.contains(" panicked at ") || logs.contains("RUST_BACKTRACE=1")), + "{}", + format!("Panics found is logs:\n{logs}") + ); + } +} From e5cd81bdd1ab7867c240b7a91f30414f604b0318 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 26 Jan 2024 15:14:49 +0000 Subject: [PATCH 0703/1003] refactor: [#647] E2E tests. Extract function --- src/e2e/runner.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs index 23b534ee4..818210886 100644 --- a/src/e2e/runner.rs +++ b/src/e2e/runner.rs @@ -63,16 +63,13 @@ pub fn run() { assert_there_is_at_least_one_service_per_type(&running_services); - let tracker_checker_config = - serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); - let temp_dir = create_temp_dir(); - let mut tracker_checker_config_path = PathBuf::from(&temp_dir.temp_dir.path()); - tracker_checker_config_path.push(TRACKER_CHECKER_CONFIG_FILE); - - write_tracker_checker_config_file(&tracker_checker_config_path, &tracker_checker_config); + let tracker_checker_config_path = + create_tracker_checker_config_file(&running_services, temp_dir.temp_dir.path(), TRACKER_CHECKER_CONFIG_FILE); + // todo: inject the configuration with an env variable so that we don't have + // to create the temporary directory/file. run_tracker_checker(&tracker_checker_config_path).expect("All tracker services should be running correctly"); // More E2E tests could be added here in the future. @@ -159,6 +156,18 @@ fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServi ); } +fn create_tracker_checker_config_file(running_services: &RunningServices, config_path: &Path, config_name: &str) -> PathBuf { + let tracker_checker_config = + serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); + + let mut tracker_checker_config_path = PathBuf::from(&config_path); + tracker_checker_config_path.push(config_name); + + write_tracker_checker_config_file(&tracker_checker_config_path, &tracker_checker_config); + + tracker_checker_config_path +} + fn write_tracker_checker_config_file(config_file_path: &Path, config: &str) { info!( "Writing Tracker Checker configuration file: {:?} \n{config}", From f18e68cc498fa98ebb2b23b0605dd8ec60dbf579 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 26 Jan 2024 16:22:56 +0000 Subject: [PATCH 0704/1003] fix: tracker checker return error code when it fails This command: ``` cargo run --bin tracker_checker "./share/default/config/tracker_checker.json" && echo "OK" ``` should not print OK when it fails. --- src/bin/tracker_checker.rs | 2 +- src/checker/app.rs | 10 +++++++--- src/checker/service.rs | 36 +++++++++++++++++++++++++++++++----- src/e2e/runner.rs | 2 +- 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/src/bin/tracker_checker.rs b/src/bin/tracker_checker.rs index 3a0e0ee88..d2f676097 100644 --- a/src/bin/tracker_checker.rs +++ b/src/bin/tracker_checker.rs @@ -7,5 +7,5 @@ use torrust_tracker::checker::app; #[tokio::main] async fn main() { - app::run().await; + app::run().await.expect("Some checks fail"); } diff --git a/src/checker/app.rs b/src/checker/app.rs index e92373493..22ed61ba7 100644 --- a/src/checker/app.rs +++ b/src/checker/app.rs @@ -2,18 +2,22 @@ use std::sync::Arc; use super::config::Configuration; use super::console::Console; +use super::service::{CheckError, Service}; use crate::checker::config::parse_from_json; -use crate::checker::service::Service; pub const NUMBER_OF_ARGUMENTS: usize = 2; +/// # Errors +/// +/// If some checks fails it will return a vector with all failing checks. +/// /// # Panics /// /// Will panic if: /// /// - It can't read the json configuration file. /// - The configuration file is invalid. -pub async fn run() { +pub async fn run() -> Result<(), Vec> { let args = parse_arguments(); let config = setup_config(&args); let console_printer = Console {}; @@ -22,7 +26,7 @@ pub async fn run() { console: console_printer, }; - service.run_checks().await; + service.run_checks().await } pub struct Arguments { diff --git a/src/checker/service.rs b/src/checker/service.rs index 92902debd..254716376 100644 --- a/src/checker/service.rs +++ b/src/checker/service.rs @@ -14,12 +14,24 @@ pub struct Service { pub(crate) console: Console, } +#[derive(Debug)] +pub enum CheckError { + UdpError, + HttpError, + HealthCheckError { url: Url }, +} + impl Service { - pub async fn run_checks(&self) { + /// # Errors + /// + /// Will return OK is all checks pass or an array with the check errors. + pub async fn run_checks(&self) -> Result<(), Vec> { self.console.println("Running checks for trackers ..."); + self.check_udp_trackers(); self.check_http_trackers(); - self.run_health_checks().await; + + self.run_health_checks().await } fn check_udp_trackers(&self) { @@ -38,11 +50,22 @@ impl Service { } } - async fn run_health_checks(&self) { + async fn run_health_checks(&self) -> Result<(), Vec> { self.console.println("Health checks ..."); + let mut check_errors = vec![]; + for health_check_url in &self.config.health_checks { - self.run_health_check(health_check_url.clone()).await; + match self.run_health_check(health_check_url.clone()).await { + Ok(()) => {} + Err(err) => check_errors.push(err), + } + } + + if check_errors.is_empty() { + Ok(()) + } else { + Err(check_errors) } } @@ -62,7 +85,7 @@ impl Service { .println(&format!("{} - HTTP tracker at {} is OK (TODO)", "✓".green(), url)); } - async fn run_health_check(&self, url: Url) { + async fn run_health_check(&self, url: Url) -> Result<(), CheckError> { let client = Client::builder().timeout(Duration::from_secs(5)).build().unwrap(); match client.get(url.clone()).send().await { @@ -70,14 +93,17 @@ impl Service { if response.status().is_success() { self.console .println(&format!("{} - Health API at {} is OK", "✓".green(), url)); + Ok(()) } else { self.console .eprintln(&format!("{} - Health API at {} failing: {:?}", "✗".red(), url, response)); + Err(CheckError::HealthCheckError { url }) } } Err(err) => { self.console .eprintln(&format!("{} - Health API at {} failing: {:?}", "✗".red(), url, err)); + Err(CheckError::HealthCheckError { url }) } } } diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs index 818210886..aaac0e910 100644 --- a/src/e2e/runner.rs +++ b/src/e2e/runner.rs @@ -68,7 +68,7 @@ pub fn run() { let tracker_checker_config_path = create_tracker_checker_config_file(&running_services, temp_dir.temp_dir.path(), TRACKER_CHECKER_CONFIG_FILE); - // todo: inject the configuration with an env variable so that we don't have + // todo: inject the configuration with an env variable so that we don't have // to create the temporary directory/file. run_tracker_checker(&tracker_checker_config_path).expect("All tracker services should be running correctly"); From f4390155ccb9fe8fa91db9394c5ede0ff747e4f3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jan 2024 09:47:02 +0000 Subject: [PATCH 0705/1003] feat: [#649] add cargo dependency: clap For console commands. --- Cargo.lock | 13 +++++++------ Cargo.toml | 1 + 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab270d0cc..6c49938de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,9 +93,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.5" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6" +checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", @@ -574,9 +574,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.12" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfab8ba68f3668e89f6ff60f5b205cea56aa7b769451a59f34b8682f51c056d" +checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" dependencies = [ "clap_builder", "clap_derive", @@ -584,9 +584,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.12" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb7fb5e4e979aec3be7791562fcba452f94ad85e954da024396433e0e25a79e9" +checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" dependencies = [ "anstream", "anstyle", @@ -3437,6 +3437,7 @@ dependencies = [ "axum-server", "binascii", "chrono", + "clap", "colored", "config", "criterion", diff --git a/Cargo.toml b/Cargo.toml index 3a11786f5..4b60b8051 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,6 +72,7 @@ uuid = { version = "1", features = ["v4"] } colored = "2.1.0" url = "2.5.0" tempfile = "3.9.0" +clap = { version = "4.4.18", features = ["derive"]} [dev-dependencies] criterion = { version = "0.5.1", features = ["async_tokio"] } From b05e2f5cfead54bcab1b5d5fb3e7e8e223c254c1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jan 2024 09:48:04 +0000 Subject: [PATCH 0706/1003] refactor: [#649] use clap in HTTP tracker client An added scaffolding for scrape command. --- src/bin/http_tracker_client.rs | 62 ++++++++++++++++++++++++++++------ 1 file changed, 52 insertions(+), 10 deletions(-) diff --git a/src/bin/http_tracker_client.rs b/src/bin/http_tracker_client.rs index 1f1154fa5..29127cdf4 100644 --- a/src/bin/http_tracker_client.rs +++ b/src/bin/http_tracker_client.rs @@ -1,24 +1,60 @@ -use std::env; +//! HTTP Tracker client: +//! +//! Examples: +//! +//! `Announce` request: +//! +//! ```text +//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! `Scrape` request: +//! +//! ```text +//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` use std::str::FromStr; +use clap::{Parser, Subcommand}; use reqwest::Url; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use torrust_tracker::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use torrust_tracker::shared::bit_torrent::tracker::http::client::Client; +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { tracker_url: String, info_hash: String }, + Scrape { tracker_url: String, info_hashes: Vec }, +} + #[tokio::main] async fn main() { - let args: Vec = env::args().collect(); - if args.len() != 3 { - eprintln!("Error: invalid number of arguments!"); - eprintln!("Usage: cargo run --bin http_tracker_client "); - eprintln!("Example: cargo run --bin http_tracker_client https://tracker.torrust-demo.com 9c38422213e30bff212b30c360d26f9a02136422"); - std::process::exit(1); + let args = Args::parse(); + + match args.command { + Command::Announce { tracker_url, info_hash } => { + announce_command(tracker_url, info_hash).await; + } + Command::Scrape { + tracker_url, + info_hashes, + } => { + scrape_command(&tracker_url, &info_hashes); + } } +} - let base_url = Url::parse(&args[1]).expect("arg 1 should be a valid HTTP tracker base URL"); - let info_hash = InfoHash::from_str(&args[2]).expect("arg 2 should be a valid infohash"); +async fn announce_command(tracker_url: String, info_hash: String) { + let base_url = Url::parse(&tracker_url).expect("Invalid HTTP tracker base URL"); + let info_hash = InfoHash::from_str(&info_hash).expect("Invalid infohash"); let response = Client::new(base_url) .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) @@ -31,5 +67,11 @@ async fn main() { let json = serde_json::to_string(&announce_response).expect("announce response should be a valid JSON"); - print!("{json}"); + println!("{json}"); +} + +fn scrape_command(tracker_url: &str, info_hashes: &[String]) { + println!("URL: {tracker_url}"); + println!("Infohashes: {info_hashes:#?}"); + todo!(); } From 415ca1c371cdff314d7998e9669c1deffd384a28 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jan 2024 10:28:49 +0000 Subject: [PATCH 0707/1003] feat: [#649] scrape req for the HTTP tracker client ```console cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 9c38422213e30bff212b30c360d26f9a02136423 | jq ``` ```json { "9c38422213e30bff212b30c360d26f9a02136422": { "complete": 0, "downloaded": 0, "incomplete": 0 }, "9c38422213e30bff212b30c360d26f9a02136423": { "complete": 0, "downloaded": 0, "incomplete": 0 } } ``` --- src/bin/http_tracker_client.rs | 30 +++++++++++++----- .../tracker/http/client/requests/scrape.rs | 23 +++++++++++++- .../tracker/http/client/responses/scrape.rs | 31 +++++++++++++++++-- 3 files changed, 73 insertions(+), 11 deletions(-) diff --git a/src/bin/http_tracker_client.rs b/src/bin/http_tracker_client.rs index 29127cdf4..5e6db722c 100644 --- a/src/bin/http_tracker_client.rs +++ b/src/bin/http_tracker_client.rs @@ -20,7 +20,8 @@ use reqwest::Url; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use torrust_tracker::shared::bit_torrent::tracker::http::client::responses::announce::Announce; -use torrust_tracker::shared::bit_torrent::tracker::http::client::Client; +use torrust_tracker::shared::bit_torrent::tracker::http::client::responses::scrape; +use torrust_tracker::shared::bit_torrent::tracker::http::client::{requests, Client}; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] @@ -47,14 +48,15 @@ async fn main() { tracker_url, info_hashes, } => { - scrape_command(&tracker_url, &info_hashes); + scrape_command(&tracker_url, &info_hashes).await; } } } async fn announce_command(tracker_url: String, info_hash: String) { let base_url = Url::parse(&tracker_url).expect("Invalid HTTP tracker base URL"); - let info_hash = InfoHash::from_str(&info_hash).expect("Invalid infohash"); + let info_hash = + InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); let response = Client::new(base_url) .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) @@ -63,15 +65,27 @@ async fn announce_command(tracker_url: String, info_hash: String) { let body = response.bytes().await.unwrap(); let announce_response: Announce = serde_bencode::from_bytes(&body) - .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); let json = serde_json::to_string(&announce_response).expect("announce response should be a valid JSON"); println!("{json}"); } -fn scrape_command(tracker_url: &str, info_hashes: &[String]) { - println!("URL: {tracker_url}"); - println!("Infohashes: {info_hashes:#?}"); - todo!(); +async fn scrape_command(tracker_url: &str, info_hashes: &[String]) { + let base_url = Url::parse(tracker_url).expect("Invalid HTTP tracker base URL"); + + let query = requests::scrape::Query::try_from(info_hashes) + .expect("All infohashes should be valid. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); + + let response = Client::new(base_url).scrape(&query).await; + + let body = response.bytes().await.unwrap(); + + let scrape_response = scrape::Response::try_from_bencoded(&body) + .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&scrape_response).expect("scrape response should be a valid JSON"); + + println!("{json}"); } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs index e2563b8ed..2aecc1550 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -1,4 +1,5 @@ -use std::fmt; +use std::convert::TryFrom; +use std::fmt::{self}; use std::str::FromStr; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -14,6 +15,26 @@ impl fmt::Display for Query { } } +#[derive(Debug)] +pub struct ConversionError(String); + +impl TryFrom<&[String]> for Query { + type Error = ConversionError; + + fn try_from(info_hashes: &[String]) -> Result { + let mut validated_info_hashes: Vec = Vec::new(); + + for info_hash in info_hashes { + let validated_info_hash = InfoHash::from_str(info_hash).map_err(|_| ConversionError(info_hash.clone()))?; + validated_info_hashes.push(validated_info_hash.0); + } + + Ok(Self { + info_hash: validated_info_hashes, + }) + } +} + /// HTTP Tracker Scrape Request: /// /// diff --git a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs index ae06841e4..ee301ee7a 100644 --- a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs @@ -1,12 +1,14 @@ use std::collections::HashMap; +use std::fmt::Write; use std::str; -use serde::{self, Deserialize, Serialize}; +use serde::ser::SerializeMap; +use serde::{self, Deserialize, Serialize, Serializer}; use serde_bencode::value::Value; use crate::shared::bit_torrent::tracker::http::{ByteArray20, InfoHash}; -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, PartialEq, Default, Deserialize)] pub struct Response { pub files: HashMap, } @@ -60,6 +62,31 @@ struct DeserializedResponse { pub files: Value, } +// Custom serialization for Response +impl Serialize for Response { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(self.files.len()))?; + for (key, value) in &self.files { + // Convert ByteArray20 key to hex string + let hex_key = byte_array_to_hex_string(key); + map.serialize_entry(&hex_key, value)?; + } + map.end() + } +} + +// Helper function to convert ByteArray20 to hex string +fn byte_array_to_hex_string(byte_array: &ByteArray20) -> String { + let mut hex_string = String::with_capacity(byte_array.len() * 2); + for byte in byte_array { + write!(hex_string, "{byte:02x}").expect("Writing to string should never fail"); + } + hex_string +} + #[derive(Default)] pub struct ResponseBuilder { response: Response, From 271bfa853a06b53c88928667518ae56e75269f04 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jan 2024 11:04:10 +0000 Subject: [PATCH 0708/1003] feat: [#649] add cargo dep: anyhow To handle errors in console clients. --- Cargo.lock | 7 +++++++ Cargo.toml | 1 + 2 files changed, 8 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 6c49938de..1af4d5b3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -139,6 +139,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "anyhow" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" + [[package]] name = "aquatic_udp_protocol" version = "0.8.0" @@ -3430,6 +3436,7 @@ dependencies = [ name = "torrust-tracker" version = "3.0.0-alpha.12-develop" dependencies = [ + "anyhow", "aquatic_udp_protocol", "async-trait", "axum", diff --git a/Cargo.toml b/Cargo.toml index 4b60b8051..a512d90b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,6 +73,7 @@ colored = "2.1.0" url = "2.5.0" tempfile = "3.9.0" clap = { version = "4.4.18", features = ["derive"]} +anyhow = "1.0.79" [dev-dependencies] criterion = { version = "0.5.1", features = ["async_tokio"] } From 0624bf209cf995749c1027c773b15fcc6b113c83 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jan 2024 11:05:09 +0000 Subject: [PATCH 0709/1003] refactor: [#649] use anyhow to handle errors in the HTTP tracker client. --- src/bin/http_tracker_client.rs | 27 +++++++++++-------- .../tracker/http/client/requests/scrape.rs | 10 +++++++ 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/src/bin/http_tracker_client.rs b/src/bin/http_tracker_client.rs index 5e6db722c..4ca194803 100644 --- a/src/bin/http_tracker_client.rs +++ b/src/bin/http_tracker_client.rs @@ -15,6 +15,7 @@ //! ``` use std::str::FromStr; +use anyhow::Context; use clap::{Parser, Subcommand}; use reqwest::Url; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; @@ -37,24 +38,25 @@ enum Command { } #[tokio::main] -async fn main() { +async fn main() -> anyhow::Result<()> { let args = Args::parse(); match args.command { Command::Announce { tracker_url, info_hash } => { - announce_command(tracker_url, info_hash).await; + announce_command(tracker_url, info_hash).await?; } Command::Scrape { tracker_url, info_hashes, } => { - scrape_command(&tracker_url, &info_hashes).await; + scrape_command(&tracker_url, &info_hashes).await?; } } + Ok(()) } -async fn announce_command(tracker_url: String, info_hash: String) { - let base_url = Url::parse(&tracker_url).expect("Invalid HTTP tracker base URL"); +async fn announce_command(tracker_url: String, info_hash: String) -> anyhow::Result<()> { + let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; let info_hash = InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); @@ -67,16 +69,17 @@ async fn announce_command(tracker_url: String, info_hash: String) { let announce_response: Announce = serde_bencode::from_bytes(&body) .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); - let json = serde_json::to_string(&announce_response).expect("announce response should be a valid JSON"); + let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; println!("{json}"); + + Ok(()) } -async fn scrape_command(tracker_url: &str, info_hashes: &[String]) { - let base_url = Url::parse(tracker_url).expect("Invalid HTTP tracker base URL"); +async fn scrape_command(tracker_url: &str, info_hashes: &[String]) -> anyhow::Result<()> { + let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; - let query = requests::scrape::Query::try_from(info_hashes) - .expect("All infohashes should be valid. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); + let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; let response = Client::new(base_url).scrape(&query).await; @@ -85,7 +88,9 @@ async fn scrape_command(tracker_url: &str, info_hashes: &[String]) { let scrape_response = scrape::Response::try_from_bencoded(&body) .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); - let json = serde_json::to_string(&scrape_response).expect("scrape response should be a valid JSON"); + let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; println!("{json}"); + + Ok(()) } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs index 2aecc1550..771b3a45e 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -1,4 +1,5 @@ use std::convert::TryFrom; +use std::error::Error; use std::fmt::{self}; use std::str::FromStr; @@ -16,8 +17,17 @@ impl fmt::Display for Query { } #[derive(Debug)] +#[allow(dead_code)] pub struct ConversionError(String); +impl fmt::Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Invalid infohash: {}", self.0) + } +} + +impl Error for ConversionError {} + impl TryFrom<&[String]> for Query { type Error = ConversionError; From 1b34d9301f783b86cf09ff502ac5611ee50b8b6f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jan 2024 12:58:28 +0000 Subject: [PATCH 0710/1003] refactor: [#654] UDP tracker client: use clap and anyhow ```console $ cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq Compiling torrust-tracker v3.0.0-alpha.12-develop (/home/josecelano/Documents/git/committer/me/github/torrust/torrust-tracker) Finished dev [optimized + debuginfo] target(s) in 2.60s Running `target/debug/udp_tracker_client '127.0.0.1:6969' 9c38422213e30bff212b30c360d26f9a02136422` { "announce_interval": 120, "leechers": 0, "peers": [], "seeders": 1, "transaction_id": -888840697 } ``` --- src/bin/udp_tracker_client.rs | 141 +++++++++++++++++++++++----------- 1 file changed, 98 insertions(+), 43 deletions(-) diff --git a/src/bin/udp_tracker_client.rs b/src/bin/udp_tracker_client.rs index 41084127c..8d30ee0d4 100644 --- a/src/bin/udp_tracker_client.rs +++ b/src/bin/udp_tracker_client.rs @@ -1,24 +1,67 @@ -use std::env; +//! UDP Tracker client: +//! +//! Examples: +//! +//! Announce request: +//! +//! ```text +//! cargo run --bin udp_tracker_client 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Announce response: +//! +//! ```json +//! { +//! "transaction_id": -888840697 +//! "announce_interval": 120, +//! "leechers": 0, +//! "seeders": 1, +//! "peers": [ +//! "123.123.123.123:51289" +//! ], +//! } +/// ```` use std::net::{Ipv4Addr, SocketAddr}; use std::str::FromStr; +use anyhow::Context; use aquatic_udp_protocol::common::InfoHash; +use aquatic_udp_protocol::Response::{AnnounceIpv4, AnnounceIpv6}; use aquatic_udp_protocol::{ AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, TransactionId, }; +use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; +use serde_json::json; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; use torrust_tracker::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; const ASSIGNED_BY_OS: i32 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash)] + info_hash: TorrustInfoHash, + }, +} + #[tokio::main] -async fn main() { +async fn main() -> anyhow::Result<()> { setup_logging(LevelFilter::Info); - let (remote_socket_addr, info_hash) = parse_arguments(); + let args = Args::parse(); // Configuration let local_port = ASSIGNED_BY_OS; @@ -26,33 +69,64 @@ async fn main() { let bind_to = format!("0.0.0.0:{local_port}"); // Bind to local port - debug!("Binding to: {bind_to}"); let udp_client = UdpClient::bind(&bind_to).await; let bound_to = udp_client.socket.local_addr().unwrap(); debug!("Bound to: {bound_to}"); - // Connect to remote socket + let response = match args.command { + Command::Announce { + tracker_socket_addr, + info_hash, + } => { + debug!("Connecting to remote: udp://{tracker_socket_addr}"); - debug!("Connecting to remote: udp://{remote_socket_addr}"); - udp_client.connect(&remote_socket_addr).await; + udp_client.connect(&tracker_socket_addr.to_string()).await; - let udp_tracker_client = UdpTrackerClient { udp_client }; + let udp_tracker_client = UdpTrackerClient { udp_client }; - let transaction_id = TransactionId(transaction_id); + let transaction_id = TransactionId(transaction_id); - let connection_id = send_connection_request(transaction_id, &udp_tracker_client).await; + let connection_id = send_connection_request(transaction_id, &udp_tracker_client).await; - let response = send_announce_request( - connection_id, - transaction_id, - info_hash, - Port(bound_to.port()), - &udp_tracker_client, - ) - .await; + send_announce_request( + connection_id, + transaction_id, + info_hash, + Port(bound_to.port()), + &udp_tracker_client, + ) + .await + } + }; + + match response { + AnnounceIpv4(announce) => { + let json = json!({ + "transaction_id": announce.transaction_id.0, + "announce_interval": announce.announce_interval.0, + "leechers": announce.leechers.0, + "seeders": announce.seeders.0, + "peers": announce.peers.iter().map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)).collect::>(), + }); + let pretty_json = serde_json::to_string_pretty(&json).unwrap(); + println!("{pretty_json}"); + } + AnnounceIpv6(announce) => { + let json = json!({ + "transaction_id": announce.transaction_id.0, + "announce_interval": announce.announce_interval.0, + "leechers": announce.leechers.0, + "seeders": announce.seeders.0, + "peers6": announce.peers.iter().map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)).collect::>(), + }); + let pretty_json = serde_json::to_string_pretty(&json).unwrap(); + println!("{pretty_json}"); + } + _ => println!("{response:#?}"), + } - println!("{response:#?}"); + Ok(()) } fn setup_logging(level: LevelFilter) { @@ -76,31 +150,12 @@ fn setup_logging(level: LevelFilter) { debug!("logging initialized."); } -fn parse_arguments() -> (String, TorrustInfoHash) { - let args: Vec = env::args().collect(); - - if args.len() != 3 { - eprintln!("Error: invalid number of arguments!"); - eprintln!("Usage: cargo run --bin udp_tracker_client "); - eprintln!("Example: cargo run --bin udp_tracker_client 144.126.245.19:6969 9c38422213e30bff212b30c360d26f9a02136422"); - std::process::exit(1); - } +fn parse_socket_addr(s: &str) -> anyhow::Result { + s.parse().with_context(|| format!("failed to parse socket address: `{s}`")) +} - let remote_socket_addr = &args[1]; - let _valid_socket_addr = remote_socket_addr.parse::().unwrap_or_else(|_| { - panic!( - "Invalid argument: `{}`. Argument 1 should be a valid socket address. For example: `144.126.245.19:6969`.", - args[1] - ) - }); - let info_hash = TorrustInfoHash::from_str(&args[2]).unwrap_or_else(|_| { - panic!( - "Invalid argument: `{}`. Argument 2 should be a valid infohash. For example: `9c38422213e30bff212b30c360d26f9a02136422`.", - args[2] - ) - }); - - (remote_socket_addr.to_string(), info_hash) +fn parse_info_hash(s: &str) -> anyhow::Result { + TorrustInfoHash::from_str(s).map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{s}`: {e:?}"))) } async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { From f4e9bdad94c0c42849cbfa0201cf4c7ab578628c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jan 2024 16:03:33 +0000 Subject: [PATCH 0711/1003] feat: [#654] UDP tracker client: scrape ```text cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq ``` Scrape response: ```json { "transaction_id": -888840697, "torrent_stats": [ { "completed": 0, "leechers": 0, "seeders": 0 }, { "completed": 0, "leechers": 0, "seeders": 0 } ] } ``` --- cSpell.json | 1 + src/bin/udp_tracker_client.rs | 189 ++++++++++++++++++++++++++++++---- 2 files changed, 168 insertions(+), 22 deletions(-) diff --git a/cSpell.json b/cSpell.json index 0a3f78fad..aaa3229c2 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,5 +1,6 @@ { "words": [ + "Addrs", "adduser", "alekitto", "appuser", diff --git a/src/bin/udp_tracker_client.rs b/src/bin/udp_tracker_client.rs index 8d30ee0d4..2c8e63cd0 100644 --- a/src/bin/udp_tracker_client.rs +++ b/src/bin/udp_tracker_client.rs @@ -5,7 +5,7 @@ //! Announce request: //! //! ```text -//! cargo run --bin udp_tracker_client 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq //! ``` //! //! Announce response: @@ -20,22 +20,58 @@ //! "123.123.123.123:51289" //! ], //! } -/// ```` -use std::net::{Ipv4Addr, SocketAddr}; +//! ``` +//! +//! Scrape request: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Scrape response: +//! +//! ```json +//! { +//! "transaction_id": -888840697, +//! "torrent_stats": [ +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! }, +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! } +//! ] +//! } +//! ``` +//! +//! You can use an URL with instead of the socket address. For example: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. +use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::str::FromStr; use anyhow::Context; use aquatic_udp_protocol::common::InfoHash; -use aquatic_udp_protocol::Response::{AnnounceIpv4, AnnounceIpv6}; +use aquatic_udp_protocol::Response::{AnnounceIpv4, AnnounceIpv6, Scrape}; use aquatic_udp_protocol::{ AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, - TransactionId, + ScrapeRequest, TransactionId, }; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; use serde_json::json; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; use torrust_tracker::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; +use url::Url; const ASSIGNED_BY_OS: i32 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; @@ -55,6 +91,12 @@ enum Command { #[arg(value_parser = parse_info_hash)] info_hash: TorrustInfoHash, }, + Scrape { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] + info_hashes: Vec, + }, } #[tokio::main] @@ -65,29 +107,23 @@ async fn main() -> anyhow::Result<()> { // Configuration let local_port = ASSIGNED_BY_OS; + let local_bind_to = format!("0.0.0.0:{local_port}"); let transaction_id = RANDOM_TRANSACTION_ID; - let bind_to = format!("0.0.0.0:{local_port}"); // Bind to local port - debug!("Binding to: {bind_to}"); - let udp_client = UdpClient::bind(&bind_to).await; + debug!("Binding to: {local_bind_to}"); + let udp_client = UdpClient::bind(&local_bind_to).await; let bound_to = udp_client.socket.local_addr().unwrap(); debug!("Bound to: {bound_to}"); + let transaction_id = TransactionId(transaction_id); + let response = match args.command { Command::Announce { tracker_socket_addr, info_hash, } => { - debug!("Connecting to remote: udp://{tracker_socket_addr}"); - - udp_client.connect(&tracker_socket_addr.to_string()).await; - - let udp_tracker_client = UdpTrackerClient { udp_client }; - - let transaction_id = TransactionId(transaction_id); - - let connection_id = send_connection_request(transaction_id, &udp_tracker_client).await; + let (connection_id, udp_tracker_client) = connect(&tracker_socket_addr, udp_client, transaction_id).await; send_announce_request( connection_id, @@ -98,6 +134,13 @@ async fn main() -> anyhow::Result<()> { ) .await } + Command::Scrape { + tracker_socket_addr, + info_hashes, + } => { + let (connection_id, udp_tracker_client) = connect(&tracker_socket_addr, udp_client, transaction_id).await; + send_scrape_request(connection_id, transaction_id, info_hashes, &udp_tracker_client).await + } }; match response { @@ -123,7 +166,19 @@ async fn main() -> anyhow::Result<()> { let pretty_json = serde_json::to_string_pretty(&json).unwrap(); println!("{pretty_json}"); } - _ => println!("{response:#?}"), + Scrape(scrape) => { + let json = json!({ + "transaction_id": scrape.transaction_id.0, + "torrent_stats": scrape.torrent_stats.iter().map(|torrent_scrape_statistics| json!({ + "seeders": torrent_scrape_statistics.seeders.0, + "completed": torrent_scrape_statistics.completed.0, + "leechers": torrent_scrape_statistics.leechers.0, + })).collect::>(), + }); + let pretty_json = serde_json::to_string_pretty(&json).unwrap(); + println!("{pretty_json}"); + } + _ => println!("{response:#?}"), // todo: serialize to JSON all responses. } Ok(()) @@ -150,12 +205,76 @@ fn setup_logging(level: LevelFilter) { debug!("logging initialized."); } -fn parse_socket_addr(s: &str) -> anyhow::Result { - s.parse().with_context(|| format!("failed to parse socket address: `{s}`")) +fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { + debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); + + // Check if the address is a valid URL. If so, extract the host and port. + let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { + debug!("Tracker socket address URL: {url:?}"); + + let host = url + .host_str() + .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + let port = url + .port() + .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + (host, port) + } else { + // If not a URL, assume it's a host:port pair. + + let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); + + if parts.len() != 2 { + return Err(anyhow::anyhow!( + "invalid address format: `{}`. Expected format is host:port", + tracker_socket_addr_str + )); + } + + let host = parts[0].to_owned(); + + let port = parts[1] + .parse::() + .with_context(|| format!("invalid port: `{}`", parts[1]))? + .to_owned(); + + (host, port) + }; + + debug!("Resolved address: {resolved_addr:#?}"); + + // Perform DNS resolution. + let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); + if socket_addrs.is_empty() { + Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + } else { + Ok(socket_addrs[0]) + } +} + +fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { + TorrustInfoHash::from_str(info_hash_str) + .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) } -fn parse_info_hash(s: &str) -> anyhow::Result { - TorrustInfoHash::from_str(s).map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{s}`: {e:?}"))) +async fn connect( + tracker_socket_addr: &SocketAddr, + udp_client: UdpClient, + transaction_id: TransactionId, +) -> (ConnectionId, UdpTrackerClient) { + debug!("Connecting to tracker: udp://{tracker_socket_addr}"); + + udp_client.connect(&tracker_socket_addr.to_string()).await; + + let udp_tracker_client = UdpTrackerClient { udp_client }; + + let connection_id = send_connection_request(transaction_id, &udp_tracker_client).await; + + (connection_id, udp_tracker_client) } async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { @@ -207,3 +326,29 @@ async fn send_announce_request( response } + +async fn send_scrape_request( + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hashes: Vec, + client: &UdpTrackerClient, +) -> Response { + debug!("Sending scrape request with transaction id: {transaction_id:#?}"); + + let scrape_request = ScrapeRequest { + connection_id, + transaction_id, + info_hashes: info_hashes + .iter() + .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) + .collect(), + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + debug!("scrape request response:\n{response:#?}"); + + response +} From 8543190b242c1bece4e2b9932f74c1e3c3bc74ae Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jan 2024 18:16:42 +0000 Subject: [PATCH 0712/1003] refactor: Tracker Checker: use clap and anyhow --- src/checker/app.rs | 60 ++++++++++++++++-------------------------- src/checker/config.rs | 3 +++ src/checker/service.rs | 18 ++++++------- 3 files changed, 33 insertions(+), 48 deletions(-) diff --git a/src/checker/app.rs b/src/checker/app.rs index 22ed61ba7..66bbf1278 100644 --- a/src/checker/app.rs +++ b/src/checker/app.rs @@ -1,57 +1,41 @@ +use std::path::PathBuf; use std::sync::Arc; +use anyhow::Context; +use clap::Parser; + use super::config::Configuration; use super::console::Console; -use super::service::{CheckError, Service}; +use super::service::{CheckResult, Service}; use crate::checker::config::parse_from_json; -pub const NUMBER_OF_ARGUMENTS: usize = 2; +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + config_path: PathBuf, +} /// # Errors /// -/// If some checks fails it will return a vector with all failing checks. -/// -/// # Panics -/// -/// Will panic if: -/// -/// - It can't read the json configuration file. -/// - The configuration file is invalid. -pub async fn run() -> Result<(), Vec> { - let args = parse_arguments(); - let config = setup_config(&args); +/// Will return an error if it can't read or parse the configuration file. +pub async fn run() -> anyhow::Result> { + let args = Args::parse(); + + let config = setup_config(&args)?; + let console_printer = Console {}; + let service = Service { config: Arc::new(config), console: console_printer, }; - service.run_checks().await -} - -pub struct Arguments { - pub config_path: String, -} - -fn parse_arguments() -> Arguments { - let args: Vec = std::env::args().collect(); - - if args.len() < NUMBER_OF_ARGUMENTS { - eprintln!("Usage: cargo run --bin tracker_checker "); - eprintln!("For example: cargo run --bin tracker_checker ./share/default/config/tracker_checker.json"); - std::process::exit(1); - } - - let config_path = &args[1]; - - Arguments { - config_path: config_path.to_string(), - } + Ok(service.run_checks().await) } -fn setup_config(args: &Arguments) -> Configuration { - let file_content = std::fs::read_to_string(args.config_path.clone()) - .unwrap_or_else(|_| panic!("Can't read config file {}", args.config_path)); +fn setup_config(args: &Args) -> anyhow::Result { + let file_content = + std::fs::read_to_string(&args.config_path).with_context(|| format!("can't read config file {:?}", args.config_path))?; - parse_from_json(&file_content).expect("Invalid config format") + parse_from_json(&file_content).context("invalid config format") } diff --git a/src/checker/config.rs b/src/checker/config.rs index aaf611bb9..5cfee0760 100644 --- a/src/checker/config.rs +++ b/src/checker/config.rs @@ -1,3 +1,4 @@ +use std::error::Error; use std::fmt; use std::net::SocketAddr; @@ -43,6 +44,8 @@ pub enum ConfigurationError { InvalidUrl(url::ParseError), } +impl Error for ConfigurationError {} + impl fmt::Display for ConfigurationError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { diff --git a/src/checker/service.rs b/src/checker/service.rs index 254716376..fd93ed8c0 100644 --- a/src/checker/service.rs +++ b/src/checker/service.rs @@ -14,6 +14,8 @@ pub struct Service { pub(crate) console: Console, } +pub type CheckResult = Result<(), CheckError>; + #[derive(Debug)] pub enum CheckError { UdpError, @@ -25,7 +27,7 @@ impl Service { /// # Errors /// /// Will return OK is all checks pass or an array with the check errors. - pub async fn run_checks(&self) -> Result<(), Vec> { + pub async fn run_checks(&self) -> Vec { self.console.println("Running checks for trackers ..."); self.check_udp_trackers(); @@ -50,23 +52,19 @@ impl Service { } } - async fn run_health_checks(&self) -> Result<(), Vec> { + async fn run_health_checks(&self) -> Vec { self.console.println("Health checks ..."); - let mut check_errors = vec![]; + let mut check_results = vec![]; for health_check_url in &self.config.health_checks { match self.run_health_check(health_check_url.clone()).await { - Ok(()) => {} - Err(err) => check_errors.push(err), + Ok(()) => check_results.push(Ok(())), + Err(err) => check_results.push(Err(err)), } } - if check_errors.is_empty() { - Ok(()) - } else { - Err(check_errors) - } + check_results } fn check_udp_tracker(&self, address: &SocketAddr) { From 7f43fbd78bd2b363b54dd27718d889abc508539a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 08:32:41 +0000 Subject: [PATCH 0713/1003] chore: [656] add cargo dep feature We will need "env" clap feature to use env variables for arguments. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index a512d90b0..1418f23dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,7 +72,7 @@ uuid = { version = "1", features = ["v4"] } colored = "2.1.0" url = "2.5.0" tempfile = "3.9.0" -clap = { version = "4.4.18", features = ["derive"]} +clap = { version = "4.4.18", features = ["derive", "env"]} anyhow = "1.0.79" [dev-dependencies] From 1bab582beebc2e1b6cc845ac39ee0c7943f02d1b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 08:33:53 +0000 Subject: [PATCH 0714/1003] feat: [#656] Tracker Checker sopports env var for config Run providing a config file path: ```text cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker ``` Run providing the configuration: ```text TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker ``` --- src/bin/tracker_checker.rs | 11 ++++++++++- src/checker/app.rs | 29 +++++++++++++++++++++-------- src/e2e/runner.rs | 4 ++-- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/src/bin/tracker_checker.rs b/src/bin/tracker_checker.rs index d2f676097..926a0026c 100644 --- a/src/bin/tracker_checker.rs +++ b/src/bin/tracker_checker.rs @@ -1,7 +1,16 @@ //! Program to run checks against running trackers. //! +//! Run providing a config file path: +//! +//! ```text +//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" +//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker +//! ``` +//! +//! Run providing the configuration: +//! //! ```text -//! cargo run --bin tracker_checker "./share/default/config/tracker_checker.json" +//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker //! ``` use torrust_tracker::checker::app; diff --git a/src/checker/app.rs b/src/checker/app.rs index 66bbf1278..1e91ce846 100644 --- a/src/checker/app.rs +++ b/src/checker/app.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use std::sync::Arc; -use anyhow::Context; +use anyhow::{Context, Result}; use clap::Parser; use super::config::Configuration; @@ -12,16 +12,22 @@ use crate::checker::config::parse_from_json; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { - config_path: PathBuf, + /// Path to the JSON configuration file. + #[clap(short, long, env = "TORRUST_CHECKER_CONFIG_PATH")] + config_path: Option, + + /// Direct configuration content in JSON. + #[clap(env = "TORRUST_CHECKER_CONFIG", hide_env_values = true)] + config_content: Option, } /// # Errors /// -/// Will return an error if it can't read or parse the configuration file. -pub async fn run() -> anyhow::Result> { +/// Will return an error if the configuration was not provided. +pub async fn run() -> Result> { let args = Args::parse(); - let config = setup_config(&args)?; + let config = setup_config(args)?; let console_printer = Console {}; @@ -33,9 +39,16 @@ pub async fn run() -> anyhow::Result> { Ok(service.run_checks().await) } -fn setup_config(args: &Args) -> anyhow::Result { - let file_content = - std::fs::read_to_string(&args.config_path).with_context(|| format!("can't read config file {:?}", args.config_path))?; +fn setup_config(args: Args) -> Result { + match (args.config_path, args.config_content) { + (Some(config_path), _) => load_config_from_file(&config_path), + (_, Some(config_content)) => parse_from_json(&config_content).context("invalid config format"), + _ => Err(anyhow::anyhow!("no configuration provided")), + } +} + +fn load_config_from_file(path: &PathBuf) -> Result { + let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; parse_from_json(&file_content).context("invalid config format") } diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs index aaac0e910..90c98608b 100644 --- a/src/e2e/runner.rs +++ b/src/e2e/runner.rs @@ -197,14 +197,14 @@ fn write_tracker_checker_config_file(config_file_path: &Path, config: &str) { /// Will panic if the config path is not a valid string. pub fn run_tracker_checker(config_path: &Path) -> io::Result<()> { info!( - "Running Tracker Checker: cargo --bin tracker_checker {}", + "Running Tracker Checker: cargo run --bin tracker_checker -- --config-path \"{}\"", config_path.display() ); let path = config_path.to_str().expect("The path should be a valid string"); let status = Command::new("cargo") - .args(["run", "--bin", "tracker_checker", path]) + .args(["run", "--bin", "tracker_checker", "--", "--config-path", path]) .status()?; if status.success() { From 392ffab67dc61ae7dc9230e2ebd290980d15d24c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 09:30:34 +0000 Subject: [PATCH 0715/1003] refactor: [#656] E2E runner. Pass config as env var to Tracker Checker This was we don't even need a temp dir to run E2E tests. --- src/e2e/mod.rs | 2 +- src/e2e/runner.rs | 95 ++------------------------------------ src/e2e/temp_dir.rs | 53 --------------------- src/e2e/tracker_checker.rs | 25 ++++++++++ 4 files changed, 31 insertions(+), 144 deletions(-) delete mode 100644 src/e2e/temp_dir.rs create mode 100644 src/e2e/tracker_checker.rs diff --git a/src/e2e/mod.rs b/src/e2e/mod.rs index deba8971e..e4384e160 100644 --- a/src/e2e/mod.rs +++ b/src/e2e/mod.rs @@ -1,5 +1,5 @@ pub mod docker; pub mod logs_parser; pub mod runner; -pub mod temp_dir; +pub mod tracker_checker; pub mod tracker_container; diff --git a/src/e2e/runner.rs b/src/e2e/runner.rs index 90c98608b..a4bcb3aa3 100644 --- a/src/e2e/runner.rs +++ b/src/e2e/runner.rs @@ -1,15 +1,9 @@ -use std::fs::File; -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::process::Command; -use std::{env, io}; - use log::{debug, info, LevelFilter}; use super::tracker_container::TrackerContainer; use crate::e2e::docker::RunOptions; use crate::e2e::logs_parser::RunningServices; -use crate::e2e::temp_dir::Handler; +use crate::e2e::tracker_checker::{self}; /* code-review: - We use always the same docker image name. Should we use a random image name (tag)? @@ -19,10 +13,9 @@ use crate::e2e::temp_dir::Handler; Should we remove the image too? */ -pub const NUMBER_OF_ARGUMENTS: usize = 2; +const NUMBER_OF_ARGUMENTS: usize = 2; const CONTAINER_IMAGE: &str = "torrust-tracker:local"; const CONTAINER_NAME_PREFIX: &str = "tracker_"; -const TRACKER_CHECKER_CONFIG_FILE: &str = "tracker_checker.json"; pub struct Arguments { pub tracker_config_path: String, @@ -63,14 +56,10 @@ pub fn run() { assert_there_is_at_least_one_service_per_type(&running_services); - let temp_dir = create_temp_dir(); - - let tracker_checker_config_path = - create_tracker_checker_config_file(&running_services, temp_dir.temp_dir.path(), TRACKER_CHECKER_CONFIG_FILE); + let tracker_checker_config = + serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); - // todo: inject the configuration with an env variable so that we don't have - // to create the temporary directory/file. - run_tracker_checker(&tracker_checker_config_path).expect("All tracker services should be running correctly"); + tracker_checker::run(&tracker_checker_config).expect("All tracker services should be running correctly"); // More E2E tests could be added here in the future. // For example: `cargo test ...` for only E2E tests, using this shared test env. @@ -128,19 +117,6 @@ fn read_file(path: &str) -> String { std::fs::read_to_string(path).unwrap_or_else(|_| panic!("Can't read file {path}")) } -fn create_temp_dir() -> Handler { - debug!( - "Current dir: {:?}", - env::current_dir().expect("It should return the current dir") - ); - - let temp_dir_handler = Handler::new().expect("A temp dir should be created"); - - info!("Temp dir created: {:?}", temp_dir_handler.temp_dir); - - temp_dir_handler -} - fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServices) { assert!( !running_services.udp_trackers.is_empty(), @@ -155,64 +131,3 @@ fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServi "At least one Health Check should be enabled in E2E tests configuration" ); } - -fn create_tracker_checker_config_file(running_services: &RunningServices, config_path: &Path, config_name: &str) -> PathBuf { - let tracker_checker_config = - serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); - - let mut tracker_checker_config_path = PathBuf::from(&config_path); - tracker_checker_config_path.push(config_name); - - write_tracker_checker_config_file(&tracker_checker_config_path, &tracker_checker_config); - - tracker_checker_config_path -} - -fn write_tracker_checker_config_file(config_file_path: &Path, config: &str) { - info!( - "Writing Tracker Checker configuration file: {:?} \n{config}", - config_file_path - ); - - let mut file = File::create(config_file_path).expect("Tracker checker config file to be created"); - - file.write_all(config.as_bytes()) - .expect("Tracker checker config file to be written"); -} - -/// Runs the Tracker Checker. -/// -/// For example: -/// -/// ```text -/// cargo run --bin tracker_checker "./share/default/config/tracker_checker.json" -/// ``` -/// -/// # Errors -/// -/// Will return an error if the tracker checker fails. -/// -/// # Panics -/// -/// Will panic if the config path is not a valid string. -pub fn run_tracker_checker(config_path: &Path) -> io::Result<()> { - info!( - "Running Tracker Checker: cargo run --bin tracker_checker -- --config-path \"{}\"", - config_path.display() - ); - - let path = config_path.to_str().expect("The path should be a valid string"); - - let status = Command::new("cargo") - .args(["run", "--bin", "tracker_checker", "--", "--config-path", path]) - .status()?; - - if status.success() { - Ok(()) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - format!("Failed to run Tracker Checker with config file {path}"), - )) - } -} diff --git a/src/e2e/temp_dir.rs b/src/e2e/temp_dir.rs deleted file mode 100644 index 8433e3059..000000000 --- a/src/e2e/temp_dir.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Temp dir which is automatically removed when it goes out of scope. -use std::path::PathBuf; -use std::{env, io}; - -use tempfile::TempDir; - -pub struct Handler { - pub temp_dir: TempDir, - pub original_dir: PathBuf, -} - -impl Handler { - /// Creates a new temporary directory and remembers the current working directory. - /// - /// # Errors - /// - /// Will error if: - /// - /// - It can't create the temp dir. - /// - It can't get the current dir. - pub fn new() -> io::Result { - let temp_dir = TempDir::new()?; - let original_dir = env::current_dir()?; - - Ok(Handler { temp_dir, original_dir }) - } - - /// Changes the current working directory to the temporary directory. - /// - /// # Errors - /// - /// Will error if it can't change the current di to the temp dir. - pub fn change_to_temp_dir(&self) -> io::Result<()> { - env::set_current_dir(self.temp_dir.path()) - } - - /// Changes the current working directory back to the original directory. - /// - /// # Errors - /// - /// Will error if it can't revert the current dir to the original one. - pub fn revert_to_original_dir(&self) -> io::Result<()> { - env::set_current_dir(&self.original_dir) - } -} - -impl Drop for Handler { - /// Ensures that the temporary directory is deleted when the struct goes out of scope. - fn drop(&mut self) { - // The temporary directory is automatically deleted when `TempDir` is dropped. - // We can add additional cleanup here if necessary. - } -} diff --git a/src/e2e/tracker_checker.rs b/src/e2e/tracker_checker.rs new file mode 100644 index 000000000..edc679802 --- /dev/null +++ b/src/e2e/tracker_checker.rs @@ -0,0 +1,25 @@ +use std::io; +use std::process::Command; + +use log::info; + +/// Runs the Tracker Checker. +/// +/// # Errors +/// +/// Will return an error if the Tracker Checker fails. +pub fn run(config_content: &str) -> io::Result<()> { + info!("Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run --bin tracker_checker"); + info!("Tracker Checker config:\n{config_content}"); + + let status = Command::new("cargo") + .env("TORRUST_CHECKER_CONFIG", config_content) + .args(["run", "--bin", "tracker_checker"]) + .status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new(io::ErrorKind::Other, "Failed to run Tracker Checker")) + } +} From d8a9f7b358ce0b8cbf806d188a8c89abb4f54ffd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 10:19:22 +0000 Subject: [PATCH 0716/1003] refactor: [#661] move E2E tests runner mod --- src/bin/e2e_tests_runner.rs | 6 +----- src/{ => console/ci}/e2e/docker.rs | 0 src/{ => console/ci}/e2e/logs_parser.rs | 0 src/{ => console/ci}/e2e/mod.rs | 1 + src/{ => console/ci}/e2e/runner.rs | 11 ++++++++--- src/{ => console/ci}/e2e/tracker_checker.rs | 0 src/{ => console/ci}/e2e/tracker_container.rs | 2 +- src/console/ci/mod.rs | 2 ++ src/console/clients/mod.rs | 1 + src/console/mod.rs | 3 +++ src/lib.rs | 2 +- 11 files changed, 18 insertions(+), 10 deletions(-) rename src/{ => console/ci}/e2e/docker.rs (100%) rename src/{ => console/ci}/e2e/logs_parser.rs (100%) rename src/{ => console/ci}/e2e/mod.rs (82%) rename src/{ => console/ci}/e2e/runner.rs (93%) rename src/{ => console/ci}/e2e/tracker_checker.rs (100%) rename src/{ => console/ci}/e2e/tracker_container.rs (98%) create mode 100644 src/console/ci/mod.rs create mode 100644 src/console/clients/mod.rs create mode 100644 src/console/mod.rs diff --git a/src/bin/e2e_tests_runner.rs b/src/bin/e2e_tests_runner.rs index 35368b612..b21459d2e 100644 --- a/src/bin/e2e_tests_runner.rs +++ b/src/bin/e2e_tests_runner.rs @@ -1,9 +1,5 @@ //! Program to run E2E tests. -//! -//! ```text -//! cargo run --bin e2e_tests_runner share/default/config/tracker.e2e.container.sqlite3.toml -//! ``` -use torrust_tracker::e2e; +use torrust_tracker::console::ci::e2e; fn main() { e2e::runner::run(); diff --git a/src/e2e/docker.rs b/src/console/ci/e2e/docker.rs similarity index 100% rename from src/e2e/docker.rs rename to src/console/ci/e2e/docker.rs diff --git a/src/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs similarity index 100% rename from src/e2e/logs_parser.rs rename to src/console/ci/e2e/logs_parser.rs diff --git a/src/e2e/mod.rs b/src/console/ci/e2e/mod.rs similarity index 82% rename from src/e2e/mod.rs rename to src/console/ci/e2e/mod.rs index e4384e160..58a876cbe 100644 --- a/src/e2e/mod.rs +++ b/src/console/ci/e2e/mod.rs @@ -1,3 +1,4 @@ +//! E2E tests scripts. pub mod docker; pub mod logs_parser; pub mod runner; diff --git a/src/e2e/runner.rs b/src/console/ci/e2e/runner.rs similarity index 93% rename from src/e2e/runner.rs rename to src/console/ci/e2e/runner.rs index a4bcb3aa3..1a4746800 100644 --- a/src/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -1,9 +1,14 @@ +//! Program to run E2E tests. +//! +//! ```text +//! cargo run --bin e2e_tests_runner share/default/config/tracker.e2e.container.sqlite3.toml +//! ``` use log::{debug, info, LevelFilter}; use super::tracker_container::TrackerContainer; -use crate::e2e::docker::RunOptions; -use crate::e2e::logs_parser::RunningServices; -use crate::e2e::tracker_checker::{self}; +use crate::console::ci::e2e::docker::RunOptions; +use crate::console::ci::e2e::logs_parser::RunningServices; +use crate::console::ci::e2e::tracker_checker::{self}; /* code-review: - We use always the same docker image name. Should we use a random image name (tag)? diff --git a/src/e2e/tracker_checker.rs b/src/console/ci/e2e/tracker_checker.rs similarity index 100% rename from src/e2e/tracker_checker.rs rename to src/console/ci/e2e/tracker_checker.rs diff --git a/src/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs similarity index 98% rename from src/e2e/tracker_container.rs rename to src/console/ci/e2e/tracker_container.rs index 3e70942b5..5a4d11d02 100644 --- a/src/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -6,7 +6,7 @@ use rand::Rng; use super::docker::{RunOptions, RunningContainer}; use super::logs_parser::RunningServices; -use crate::e2e::docker::Docker; +use crate::console::ci::e2e::docker::Docker; #[derive(Debug)] pub struct TrackerContainer { diff --git a/src/console/ci/mod.rs b/src/console/ci/mod.rs new file mode 100644 index 000000000..6eac3e120 --- /dev/null +++ b/src/console/ci/mod.rs @@ -0,0 +1,2 @@ +//! Continuos integration scripts. +pub mod e2e; diff --git a/src/console/clients/mod.rs b/src/console/clients/mod.rs new file mode 100644 index 000000000..a3fd318b2 --- /dev/null +++ b/src/console/clients/mod.rs @@ -0,0 +1 @@ +//! Console clients. diff --git a/src/console/mod.rs b/src/console/mod.rs new file mode 100644 index 000000000..54ed8e415 --- /dev/null +++ b/src/console/mod.rs @@ -0,0 +1,3 @@ +//! Console apps. +pub mod ci; +pub mod clients; diff --git a/src/lib.rs b/src/lib.rs index f239039bd..398795d37 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -472,8 +472,8 @@ pub mod app; pub mod bootstrap; pub mod checker; +pub mod console; pub mod core; -pub mod e2e; pub mod servers; pub mod shared; From 0960ff269529fadff6dd9152445c7939c1cbd9f0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 12:16:14 +0000 Subject: [PATCH 0717/1003] refactor: [#661] move Tracker Checker mod --- src/bin/tracker_checker.rs | 17 ++--------------- src/{ => console/clients}/checker/app.rs | 16 +++++++++++++++- src/{ => console/clients}/checker/config.rs | 2 +- src/{ => console/clients}/checker/console.rs | 0 src/{ => console/clients}/checker/logger.rs | 4 ++-- src/{ => console/clients}/checker/mod.rs | 0 src/{ => console/clients}/checker/printer.rs | 0 src/{ => console/clients}/checker/service.rs | 2 +- src/console/clients/mod.rs | 1 + src/lib.rs | 1 - 10 files changed, 22 insertions(+), 21 deletions(-) rename src/{ => console/clients}/checker/app.rs (73%) rename src/{ => console/clients}/checker/config.rs (98%) rename src/{ => console/clients}/checker/console.rs (100%) rename src/{ => console/clients}/checker/logger.rs (91%) rename src/{ => console/clients}/checker/mod.rs (100%) rename src/{ => console/clients}/checker/printer.rs (100%) rename src/{ => console/clients}/checker/service.rs (98%) diff --git a/src/bin/tracker_checker.rs b/src/bin/tracker_checker.rs index 926a0026c..1bda0f54f 100644 --- a/src/bin/tracker_checker.rs +++ b/src/bin/tracker_checker.rs @@ -1,18 +1,5 @@ -//! Program to run checks against running trackers. -//! -//! Run providing a config file path: -//! -//! ```text -//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" -//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker -//! ``` -//! -//! Run providing the configuration: -//! -//! ```text -//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker -//! ``` -use torrust_tracker::checker::app; +//! Program to run check running trackers. +use torrust_tracker::console::clients::checker::app; #[tokio::main] async fn main() { diff --git a/src/checker/app.rs b/src/console/clients/checker/app.rs similarity index 73% rename from src/checker/app.rs rename to src/console/clients/checker/app.rs index 1e91ce846..bca4b64dc 100644 --- a/src/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -1,3 +1,17 @@ +//! Program to run checks against running trackers. +//! +//! Run providing a config file path: +//! +//! ```text +//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" +//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker +//! ``` +//! +//! Run providing the configuration: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker +//! ``` use std::path::PathBuf; use std::sync::Arc; @@ -7,7 +21,7 @@ use clap::Parser; use super::config::Configuration; use super::console::Console; use super::service::{CheckResult, Service}; -use crate::checker::config::parse_from_json; +use crate::console::clients::checker::config::parse_from_json; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] diff --git a/src/checker/config.rs b/src/console/clients/checker/config.rs similarity index 98% rename from src/checker/config.rs rename to src/console/clients/checker/config.rs index 5cfee0760..0a2c09b03 100644 --- a/src/checker/config.rs +++ b/src/console/clients/checker/config.rs @@ -117,7 +117,7 @@ mod tests { } mod building_configuration_from_plan_configuration { - use crate::checker::config::{Configuration, PlainConfiguration}; + use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; #[test] fn it_should_fail_when_a_tracker_udp_address_is_invalid() { diff --git a/src/checker/console.rs b/src/console/clients/checker/console.rs similarity index 100% rename from src/checker/console.rs rename to src/console/clients/checker/console.rs diff --git a/src/checker/logger.rs b/src/console/clients/checker/logger.rs similarity index 91% rename from src/checker/logger.rs rename to src/console/clients/checker/logger.rs index 3d1074e7b..50e97189f 100644 --- a/src/checker/logger.rs +++ b/src/console/clients/checker/logger.rs @@ -49,8 +49,8 @@ impl Printer for Logger { #[cfg(test)] mod tests { - use crate::checker::logger::Logger; - use crate::checker::printer::{Printer, CLEAR_SCREEN}; + use crate::console::clients::checker::logger::Logger; + use crate::console::clients::checker::printer::{Printer, CLEAR_SCREEN}; #[test] fn should_capture_the_clear_screen_command() { diff --git a/src/checker/mod.rs b/src/console/clients/checker/mod.rs similarity index 100% rename from src/checker/mod.rs rename to src/console/clients/checker/mod.rs diff --git a/src/checker/printer.rs b/src/console/clients/checker/printer.rs similarity index 100% rename from src/checker/printer.rs rename to src/console/clients/checker/printer.rs diff --git a/src/checker/service.rs b/src/console/clients/checker/service.rs similarity index 98% rename from src/checker/service.rs rename to src/console/clients/checker/service.rs index fd93ed8c0..5f464fbd1 100644 --- a/src/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -7,7 +7,7 @@ use reqwest::{Client, Url}; use super::config::Configuration; use super::console::Console; -use crate::checker::printer::Printer; +use crate::console::clients::checker::printer::Printer; pub struct Service { pub(crate) config: Arc, diff --git a/src/console/clients/mod.rs b/src/console/clients/mod.rs index a3fd318b2..55ece612b 100644 --- a/src/console/clients/mod.rs +++ b/src/console/clients/mod.rs @@ -1 +1,2 @@ //! Console clients. +pub mod checker; diff --git a/src/lib.rs b/src/lib.rs index 398795d37..b4ad298ac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -471,7 +471,6 @@ //! examples on the integration and unit tests. pub mod app; pub mod bootstrap; -pub mod checker; pub mod console; pub mod core; pub mod servers; From b96c2c37544c2db6d7ef3c9f1fb2070dacb52eb1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 12:25:04 +0000 Subject: [PATCH 0718/1003] refactor: [#661] move HTTP Tracker Client mod --- src/bin/http_tracker_client.rs | 95 +----------------------------- src/console/clients/http/app.rs | 100 ++++++++++++++++++++++++++++++++ src/console/clients/http/mod.rs | 1 + src/console/clients/mod.rs | 1 + 4 files changed, 105 insertions(+), 92 deletions(-) create mode 100644 src/console/clients/http/app.rs create mode 100644 src/console/clients/http/mod.rs diff --git a/src/bin/http_tracker_client.rs b/src/bin/http_tracker_client.rs index 4ca194803..0de040549 100644 --- a/src/bin/http_tracker_client.rs +++ b/src/bin/http_tracker_client.rs @@ -1,96 +1,7 @@ -//! HTTP Tracker client: -//! -//! Examples: -//! -//! `Announce` request: -//! -//! ```text -//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! `Scrape` request: -//! -//! ```text -//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -use std::str::FromStr; - -use anyhow::Context; -use clap::{Parser, Subcommand}; -use reqwest::Url; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; -use torrust_tracker::shared::bit_torrent::tracker::http::client::responses::announce::Announce; -use torrust_tracker::shared::bit_torrent::tracker::http::client::responses::scrape; -use torrust_tracker::shared::bit_torrent::tracker::http::client::{requests, Client}; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Args { - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand, Debug)] -enum Command { - Announce { tracker_url: String, info_hash: String }, - Scrape { tracker_url: String, info_hashes: Vec }, -} +//! Program to make request to HTTP trackers. +use torrust_tracker::console::clients::http::app; #[tokio::main] async fn main() -> anyhow::Result<()> { - let args = Args::parse(); - - match args.command { - Command::Announce { tracker_url, info_hash } => { - announce_command(tracker_url, info_hash).await?; - } - Command::Scrape { - tracker_url, - info_hashes, - } => { - scrape_command(&tracker_url, &info_hashes).await?; - } - } - Ok(()) -} - -async fn announce_command(tracker_url: String, info_hash: String) -> anyhow::Result<()> { - let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; - let info_hash = - InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); - - let response = Client::new(base_url) - .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) - .await; - - let body = response.bytes().await.unwrap(); - - let announce_response: Announce = serde_bencode::from_bytes(&body) - .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); - - let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; - - println!("{json}"); - - Ok(()) -} - -async fn scrape_command(tracker_url: &str, info_hashes: &[String]) -> anyhow::Result<()> { - let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; - - let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; - - let response = Client::new(base_url).scrape(&query).await; - - let body = response.bytes().await.unwrap(); - - let scrape_response = scrape::Response::try_from_bencoded(&body) - .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); - - let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; - - println!("{json}"); - - Ok(()) + app::run().await } diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs new file mode 100644 index 000000000..80db07231 --- /dev/null +++ b/src/console/clients/http/app.rs @@ -0,0 +1,100 @@ +//! HTTP Tracker client: +//! +//! Examples: +//! +//! `Announce` request: +//! +//! ```text +//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! `Scrape` request: +//! +//! ```text +//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +use std::str::FromStr; + +use anyhow::Context; +use clap::{Parser, Subcommand}; +use reqwest::Url; + +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; +use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; +use crate::shared::bit_torrent::tracker::http::client::responses::scrape; +use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { tracker_url: String, info_hash: String }, + Scrape { tracker_url: String, info_hashes: Vec }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +pub async fn run() -> anyhow::Result<()> { + let args = Args::parse(); + + match args.command { + Command::Announce { tracker_url, info_hash } => { + announce_command(tracker_url, info_hash).await?; + } + Command::Scrape { + tracker_url, + info_hashes, + } => { + scrape_command(&tracker_url, &info_hashes).await?; + } + } + + Ok(()) +} + +async fn announce_command(tracker_url: String, info_hash: String) -> anyhow::Result<()> { + let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; + let info_hash = + InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); + + let response = Client::new(base_url) + .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + .await; + + let body = response.bytes().await.unwrap(); + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} + +async fn scrape_command(tracker_url: &str, info_hashes: &[String]) -> anyhow::Result<()> { + let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; + + let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; + + let response = Client::new(base_url).scrape(&query).await; + + let body = response.bytes().await.unwrap(); + + let scrape_response = scrape::Response::try_from_bencoded(&body) + .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} diff --git a/src/console/clients/http/mod.rs b/src/console/clients/http/mod.rs new file mode 100644 index 000000000..309be6287 --- /dev/null +++ b/src/console/clients/http/mod.rs @@ -0,0 +1 @@ +pub mod app; diff --git a/src/console/clients/mod.rs b/src/console/clients/mod.rs index 55ece612b..278b736e4 100644 --- a/src/console/clients/mod.rs +++ b/src/console/clients/mod.rs @@ -1,2 +1,3 @@ //! Console clients. pub mod checker; +pub mod http; From 47551ff5c029b6110c06d3d408528472edfc376f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 12:36:04 +0000 Subject: [PATCH 0719/1003] refactor: [#661] move UDP Tracker Client mod --- src/bin/tracker_checker.rs | 2 +- src/bin/udp_tracker_client.rs | 353 +------------------------------- src/console/clients/mod.rs | 1 + src/console/clients/udp/app.rs | 359 +++++++++++++++++++++++++++++++++ src/console/clients/udp/mod.rs | 1 + 5 files changed, 365 insertions(+), 351 deletions(-) create mode 100644 src/console/clients/udp/app.rs create mode 100644 src/console/clients/udp/mod.rs diff --git a/src/bin/tracker_checker.rs b/src/bin/tracker_checker.rs index 1bda0f54f..87aeedeac 100644 --- a/src/bin/tracker_checker.rs +++ b/src/bin/tracker_checker.rs @@ -1,4 +1,4 @@ -//! Program to run check running trackers. +//! Program to check running trackers. use torrust_tracker::console::clients::checker::app; #[tokio::main] diff --git a/src/bin/udp_tracker_client.rs b/src/bin/udp_tracker_client.rs index 2c8e63cd0..909b296ca 100644 --- a/src/bin/udp_tracker_client.rs +++ b/src/bin/udp_tracker_client.rs @@ -1,354 +1,7 @@ -//! UDP Tracker client: -//! -//! Examples: -//! -//! Announce request: -//! -//! ```text -//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! Announce response: -//! -//! ```json -//! { -//! "transaction_id": -888840697 -//! "announce_interval": 120, -//! "leechers": 0, -//! "seeders": 1, -//! "peers": [ -//! "123.123.123.123:51289" -//! ], -//! } -//! ``` -//! -//! Scrape request: -//! -//! ```text -//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! Scrape response: -//! -//! ```json -//! { -//! "transaction_id": -888840697, -//! "torrent_stats": [ -//! { -//! "completed": 0, -//! "leechers": 0, -//! "seeders": 0 -//! }, -//! { -//! "completed": 0, -//! "leechers": 0, -//! "seeders": 0 -//! } -//! ] -//! } -//! ``` -//! -//! You can use an URL with instead of the socket address. For example: -//! -//! ```text -//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. -use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; -use std::str::FromStr; - -use anyhow::Context; -use aquatic_udp_protocol::common::InfoHash; -use aquatic_udp_protocol::Response::{AnnounceIpv4, AnnounceIpv6, Scrape}; -use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, - ScrapeRequest, TransactionId, -}; -use clap::{Parser, Subcommand}; -use log::{debug, LevelFilter}; -use serde_json::json; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; -use torrust_tracker::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; -use url::Url; - -const ASSIGNED_BY_OS: i32 = 0; -const RANDOM_TRANSACTION_ID: i32 = -888_840_697; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Args { - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand, Debug)] -enum Command { - Announce { - #[arg(value_parser = parse_socket_addr)] - tracker_socket_addr: SocketAddr, - #[arg(value_parser = parse_info_hash)] - info_hash: TorrustInfoHash, - }, - Scrape { - #[arg(value_parser = parse_socket_addr)] - tracker_socket_addr: SocketAddr, - #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] - info_hashes: Vec, - }, -} +//! Program to make request to UDP trackers. +use torrust_tracker::console::clients::udp::app; #[tokio::main] async fn main() -> anyhow::Result<()> { - setup_logging(LevelFilter::Info); - - let args = Args::parse(); - - // Configuration - let local_port = ASSIGNED_BY_OS; - let local_bind_to = format!("0.0.0.0:{local_port}"); - let transaction_id = RANDOM_TRANSACTION_ID; - - // Bind to local port - debug!("Binding to: {local_bind_to}"); - let udp_client = UdpClient::bind(&local_bind_to).await; - let bound_to = udp_client.socket.local_addr().unwrap(); - debug!("Bound to: {bound_to}"); - - let transaction_id = TransactionId(transaction_id); - - let response = match args.command { - Command::Announce { - tracker_socket_addr, - info_hash, - } => { - let (connection_id, udp_tracker_client) = connect(&tracker_socket_addr, udp_client, transaction_id).await; - - send_announce_request( - connection_id, - transaction_id, - info_hash, - Port(bound_to.port()), - &udp_tracker_client, - ) - .await - } - Command::Scrape { - tracker_socket_addr, - info_hashes, - } => { - let (connection_id, udp_tracker_client) = connect(&tracker_socket_addr, udp_client, transaction_id).await; - send_scrape_request(connection_id, transaction_id, info_hashes, &udp_tracker_client).await - } - }; - - match response { - AnnounceIpv4(announce) => { - let json = json!({ - "transaction_id": announce.transaction_id.0, - "announce_interval": announce.announce_interval.0, - "leechers": announce.leechers.0, - "seeders": announce.seeders.0, - "peers": announce.peers.iter().map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)).collect::>(), - }); - let pretty_json = serde_json::to_string_pretty(&json).unwrap(); - println!("{pretty_json}"); - } - AnnounceIpv6(announce) => { - let json = json!({ - "transaction_id": announce.transaction_id.0, - "announce_interval": announce.announce_interval.0, - "leechers": announce.leechers.0, - "seeders": announce.seeders.0, - "peers6": announce.peers.iter().map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)).collect::>(), - }); - let pretty_json = serde_json::to_string_pretty(&json).unwrap(); - println!("{pretty_json}"); - } - Scrape(scrape) => { - let json = json!({ - "transaction_id": scrape.transaction_id.0, - "torrent_stats": scrape.torrent_stats.iter().map(|torrent_scrape_statistics| json!({ - "seeders": torrent_scrape_statistics.seeders.0, - "completed": torrent_scrape_statistics.completed.0, - "leechers": torrent_scrape_statistics.leechers.0, - })).collect::>(), - }); - let pretty_json = serde_json::to_string_pretty(&json).unwrap(); - println!("{pretty_json}"); - } - _ => println!("{response:#?}"), // todo: serialize to JSON all responses. - } - - Ok(()) -} - -fn setup_logging(level: LevelFilter) { - if let Err(_err) = fern::Dispatch::new() - .format(|out, message, record| { - out.finish(format_args!( - "{} [{}][{}] {}", - chrono::Local::now().format("%+"), - record.target(), - record.level(), - message - )); - }) - .level(level) - .chain(std::io::stdout()) - .apply() - { - panic!("Failed to initialize logging.") - } - - debug!("logging initialized."); -} - -fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { - debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); - - // Check if the address is a valid URL. If so, extract the host and port. - let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { - debug!("Tracker socket address URL: {url:?}"); - - let host = url - .host_str() - .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? - .to_owned(); - - let port = url - .port() - .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? - .to_owned(); - - (host, port) - } else { - // If not a URL, assume it's a host:port pair. - - let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); - - if parts.len() != 2 { - return Err(anyhow::anyhow!( - "invalid address format: `{}`. Expected format is host:port", - tracker_socket_addr_str - )); - } - - let host = parts[0].to_owned(); - - let port = parts[1] - .parse::() - .with_context(|| format!("invalid port: `{}`", parts[1]))? - .to_owned(); - - (host, port) - }; - - debug!("Resolved address: {resolved_addr:#?}"); - - // Perform DNS resolution. - let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); - if socket_addrs.is_empty() { - Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) - } else { - Ok(socket_addrs[0]) - } -} - -fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { - TorrustInfoHash::from_str(info_hash_str) - .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) -} - -async fn connect( - tracker_socket_addr: &SocketAddr, - udp_client: UdpClient, - transaction_id: TransactionId, -) -> (ConnectionId, UdpTrackerClient) { - debug!("Connecting to tracker: udp://{tracker_socket_addr}"); - - udp_client.connect(&tracker_socket_addr.to_string()).await; - - let udp_tracker_client = UdpTrackerClient { udp_client }; - - let connection_id = send_connection_request(transaction_id, &udp_tracker_client).await; - - (connection_id, udp_tracker_client) -} - -async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { - debug!("Sending connection request with transaction id: {transaction_id:#?}"); - - let connect_request = ConnectRequest { transaction_id }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - debug!("connection request response:\n{response:#?}"); - - match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server. Unexpected response"), - } -} - -async fn send_announce_request( - connection_id: ConnectionId, - transaction_id: TransactionId, - info_hash: TorrustInfoHash, - port: Port, - client: &UdpTrackerClient, -) -> Response { - debug!("Sending announce request with transaction id: {transaction_id:#?}"); - - let announce_request = AnnounceRequest { - connection_id, - transaction_id, - info_hash: InfoHash(info_hash.bytes()), - peer_id: PeerId(*b"-qB00000000000000001"), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port, - }; - - client.send(announce_request.into()).await; - - let response = client.receive().await; - - debug!("announce request response:\n{response:#?}"); - - response -} - -async fn send_scrape_request( - connection_id: ConnectionId, - transaction_id: TransactionId, - info_hashes: Vec, - client: &UdpTrackerClient, -) -> Response { - debug!("Sending scrape request with transaction id: {transaction_id:#?}"); - - let scrape_request = ScrapeRequest { - connection_id, - transaction_id, - info_hashes: info_hashes - .iter() - .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) - .collect(), - }; - - client.send(scrape_request.into()).await; - - let response = client.receive().await; - - debug!("scrape request response:\n{response:#?}"); - - response + app::run().await } diff --git a/src/console/clients/mod.rs b/src/console/clients/mod.rs index 278b736e4..8492f8ba5 100644 --- a/src/console/clients/mod.rs +++ b/src/console/clients/mod.rs @@ -1,3 +1,4 @@ //! Console clients. pub mod checker; pub mod http; +pub mod udp; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs new file mode 100644 index 000000000..e9c8b5274 --- /dev/null +++ b/src/console/clients/udp/app.rs @@ -0,0 +1,359 @@ +//! UDP Tracker client: +//! +//! Examples: +//! +//! Announce request: +//! +//! ```text +//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Announce response: +//! +//! ```json +//! { +//! "transaction_id": -888840697 +//! "announce_interval": 120, +//! "leechers": 0, +//! "seeders": 1, +//! "peers": [ +//! "123.123.123.123:51289" +//! ], +//! } +//! ``` +//! +//! Scrape request: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Scrape response: +//! +//! ```json +//! { +//! "transaction_id": -888840697, +//! "torrent_stats": [ +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! }, +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! } +//! ] +//! } +//! ``` +//! +//! You can use an URL with instead of the socket address. For example: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. +use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; +use std::str::FromStr; + +use anyhow::Context; +use aquatic_udp_protocol::common::InfoHash; +use aquatic_udp_protocol::Response::{AnnounceIpv4, AnnounceIpv6, Scrape}; +use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, + ScrapeRequest, TransactionId, +}; +use clap::{Parser, Subcommand}; +use log::{debug, LevelFilter}; +use serde_json::json; +use url::Url; + +use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; +use crate::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; + +const ASSIGNED_BY_OS: i32 = 0; +const RANDOM_TRANSACTION_ID: i32 = -888_840_697; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash)] + info_hash: TorrustInfoHash, + }, + Scrape { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] + info_hashes: Vec, + }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +/// +/// +pub async fn run() -> anyhow::Result<()> { + setup_logging(LevelFilter::Info); + + let args = Args::parse(); + + // Configuration + let local_port = ASSIGNED_BY_OS; + let local_bind_to = format!("0.0.0.0:{local_port}"); + let transaction_id = RANDOM_TRANSACTION_ID; + + // Bind to local port + debug!("Binding to: {local_bind_to}"); + let udp_client = UdpClient::bind(&local_bind_to).await; + let bound_to = udp_client.socket.local_addr().context("binding local address")?; + debug!("Bound to: {bound_to}"); + + let transaction_id = TransactionId(transaction_id); + + let response = match args.command { + Command::Announce { + tracker_socket_addr, + info_hash, + } => { + let (connection_id, udp_tracker_client) = connect(&tracker_socket_addr, udp_client, transaction_id).await; + + send_announce_request( + connection_id, + transaction_id, + info_hash, + Port(bound_to.port()), + &udp_tracker_client, + ) + .await + } + Command::Scrape { + tracker_socket_addr, + info_hashes, + } => { + let (connection_id, udp_tracker_client) = connect(&tracker_socket_addr, udp_client, transaction_id).await; + send_scrape_request(connection_id, transaction_id, info_hashes, &udp_tracker_client).await + } + }; + + match response { + AnnounceIpv4(announce) => { + let json = json!({ + "transaction_id": announce.transaction_id.0, + "announce_interval": announce.announce_interval.0, + "leechers": announce.leechers.0, + "seeders": announce.seeders.0, + "peers": announce.peers.iter().map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)).collect::>(), + }); + let pretty_json = serde_json::to_string_pretty(&json).context("announce IPv4 response JSON serialization")?; + println!("{pretty_json}"); + } + AnnounceIpv6(announce) => { + let json = json!({ + "transaction_id": announce.transaction_id.0, + "announce_interval": announce.announce_interval.0, + "leechers": announce.leechers.0, + "seeders": announce.seeders.0, + "peers6": announce.peers.iter().map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)).collect::>(), + }); + let pretty_json = serde_json::to_string_pretty(&json).context("announce IPv6 response JSON serialization")?; + println!("{pretty_json}"); + } + Scrape(scrape) => { + let json = json!({ + "transaction_id": scrape.transaction_id.0, + "torrent_stats": scrape.torrent_stats.iter().map(|torrent_scrape_statistics| json!({ + "seeders": torrent_scrape_statistics.seeders.0, + "completed": torrent_scrape_statistics.completed.0, + "leechers": torrent_scrape_statistics.leechers.0, + })).collect::>(), + }); + let pretty_json = serde_json::to_string_pretty(&json).context("scrape response JSON serialization")?; + println!("{pretty_json}"); + } + _ => println!("{response:#?}"), // todo: serialize to JSON all responses. + }; + + Ok(()) +} + +fn setup_logging(level: LevelFilter) { + if let Err(_err) = fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "{} [{}][{}] {}", + chrono::Local::now().format("%+"), + record.target(), + record.level(), + message + )); + }) + .level(level) + .chain(std::io::stdout()) + .apply() + { + panic!("Failed to initialize logging.") + } + + debug!("logging initialized."); +} + +fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { + debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); + + // Check if the address is a valid URL. If so, extract the host and port. + let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { + debug!("Tracker socket address URL: {url:?}"); + + let host = url + .host_str() + .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + let port = url + .port() + .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + (host, port) + } else { + // If not a URL, assume it's a host:port pair. + + let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); + + if parts.len() != 2 { + return Err(anyhow::anyhow!( + "invalid address format: `{}`. Expected format is host:port", + tracker_socket_addr_str + )); + } + + let host = parts[0].to_owned(); + + let port = parts[1] + .parse::() + .with_context(|| format!("invalid port: `{}`", parts[1]))? + .to_owned(); + + (host, port) + }; + + debug!("Resolved address: {resolved_addr:#?}"); + + // Perform DNS resolution. + let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); + if socket_addrs.is_empty() { + Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + } else { + Ok(socket_addrs[0]) + } +} + +fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { + TorrustInfoHash::from_str(info_hash_str) + .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) +} + +async fn connect( + tracker_socket_addr: &SocketAddr, + udp_client: UdpClient, + transaction_id: TransactionId, +) -> (ConnectionId, UdpTrackerClient) { + debug!("Connecting to tracker: udp://{tracker_socket_addr}"); + + udp_client.connect(&tracker_socket_addr.to_string()).await; + + let udp_tracker_client = UdpTrackerClient { udp_client }; + + let connection_id = send_connection_request(transaction_id, &udp_tracker_client).await; + + (connection_id, udp_tracker_client) +} + +async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { + debug!("Sending connection request with transaction id: {transaction_id:#?}"); + + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + debug!("connection request response:\n{response:#?}"); + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server. Unexpected response"), + } +} + +async fn send_announce_request( + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hash: TorrustInfoHash, + port: Port, + client: &UdpTrackerClient, +) -> Response { + debug!("Sending announce request with transaction id: {transaction_id:#?}"); + + let announce_request = AnnounceRequest { + connection_id, + transaction_id, + info_hash: InfoHash(info_hash.bytes()), + peer_id: PeerId(*b"-qB00000000000000001"), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port, + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + debug!("announce request response:\n{response:#?}"); + + response +} + +async fn send_scrape_request( + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hashes: Vec, + client: &UdpTrackerClient, +) -> Response { + debug!("Sending scrape request with transaction id: {transaction_id:#?}"); + + let scrape_request = ScrapeRequest { + connection_id, + transaction_id, + info_hashes: info_hashes + .iter() + .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) + .collect(), + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + debug!("scrape request response:\n{response:#?}"); + + response +} diff --git a/src/console/clients/udp/mod.rs b/src/console/clients/udp/mod.rs new file mode 100644 index 000000000..309be6287 --- /dev/null +++ b/src/console/clients/udp/mod.rs @@ -0,0 +1 @@ +pub mod app; From cb5bb685d8e82ab8d667015c615fa87dccefd04b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 14:03:07 +0000 Subject: [PATCH 0720/1003] feat: [#640] Tracker Chekcer: announce check --- src/console/clients/checker/service.rs | 68 +++++++++++++++++++------- 1 file changed, 49 insertions(+), 19 deletions(-) diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 5f464fbd1..02bf1926b 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -1,13 +1,18 @@ use std::net::SocketAddr; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use colored::Colorize; -use reqwest::{Client, Url}; +use reqwest::{Client as HttpClient, Url}; use super::config::Configuration; use super::console::Console; use crate::console::clients::checker::printer::Printer; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; +use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; +use crate::shared::bit_torrent::tracker::http::client::Client; pub struct Service { pub(crate) config: Arc, @@ -19,7 +24,7 @@ pub type CheckResult = Result<(), CheckError>; #[derive(Debug)] pub enum CheckError { UdpError, - HttpError, + HttpError { url: Url }, HealthCheckError { url: Url }, } @@ -30,10 +35,15 @@ impl Service { pub async fn run_checks(&self) -> Vec { self.console.println("Running checks for trackers ..."); + let mut check_results = vec![]; + self.check_udp_trackers(); - self.check_http_trackers(); - self.run_health_checks().await + self.check_http_trackers(&mut check_results).await; + + self.run_health_checks(&mut check_results).await; + + check_results } fn check_udp_trackers(&self) { @@ -44,27 +54,26 @@ impl Service { } } - fn check_http_trackers(&self) { + async fn check_http_trackers(&self, check_results: &mut Vec) { self.console.println("HTTP trackers ..."); for http_tracker in &self.config.http_trackers { - self.check_http_tracker(http_tracker); + match self.check_http_tracker(http_tracker).await { + Ok(()) => check_results.push(Ok(())), + Err(err) => check_results.push(Err(err)), + } } } - async fn run_health_checks(&self) -> Vec { + async fn run_health_checks(&self, check_results: &mut Vec) { self.console.println("Health checks ..."); - let mut check_results = vec![]; - for health_check_url in &self.config.health_checks { match self.run_health_check(health_check_url.clone()).await { Ok(()) => check_results.push(Ok(())), Err(err) => check_results.push(Err(err)), } } - - check_results } fn check_udp_tracker(&self, address: &SocketAddr) { @@ -72,19 +81,40 @@ impl Service { // - Make announce request // - Make scrape request self.console - .println(&format!("{} - UDP tracker at {:?} is OK (TODO)", "✓".green(), address)); + .println(&format!("{} - UDP tracker at udp://{:?} is OK (TODO)", "✓".green(), address)); } - fn check_http_tracker(&self, url: &Url) { - // todo: - // - Make announce request - // - Make scrape request - self.console - .println(&format!("{} - HTTP tracker at {} is OK (TODO)", "✓".green(), url)); + async fn check_http_tracker(&self, url: &Url) -> Result<(), CheckError> { + let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 + let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); + + // Announce request + + let response = Client::new(url.clone()) + .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + .await; + + if let Ok(body) = response.bytes().await { + if let Ok(_announce_response) = serde_bencode::from_bytes::(&body) { + self.console.println(&format!("{} - Announce at {} is OK", "✓".green(), url)); + + Ok(()) + } else { + self.console.println(&format!("{} - Announce at {} failing", "✗".red(), url)); + Err(CheckError::HttpError { url: url.clone() }) + } + } else { + self.console.println(&format!("{} - Announce at {} failing", "✗".red(), url)); + Err(CheckError::HttpError { url: url.clone() }) + } + + // Scrape request + + // todo } async fn run_health_check(&self, url: Url) -> Result<(), CheckError> { - let client = Client::builder().timeout(Duration::from_secs(5)).build().unwrap(); + let client = HttpClient::builder().timeout(Duration::from_secs(5)).build().unwrap(); match client.get(url.clone()).send().await { Ok(response) => { From 4456203433d7c4aef463371b2e71ce9e69d7b3c0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jan 2024 15:59:20 +0000 Subject: [PATCH 0721/1003] feat: [#640] Tracker Chekcer: scrape check --- src/console/clients/checker/service.rs | 88 ++++++++++++++----- .../tracker/http/client/requests/scrape.rs | 17 ++++ 2 files changed, 85 insertions(+), 20 deletions(-) diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 02bf1926b..1cb4725e0 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -12,7 +12,8 @@ use crate::console::clients::checker::printer::Printer; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; -use crate::shared::bit_torrent::tracker::http::client::Client; +use crate::shared::bit_torrent::tracker::http::client::responses::scrape; +use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; pub struct Service { pub(crate) config: Arc, @@ -58,9 +59,32 @@ impl Service { self.console.println("HTTP trackers ..."); for http_tracker in &self.config.http_trackers { - match self.check_http_tracker(http_tracker).await { - Ok(()) => check_results.push(Ok(())), - Err(err) => check_results.push(Err(err)), + let colored_tracker_url = http_tracker.to_string().yellow(); + + match self.check_http_announce(http_tracker).await { + Ok(()) => { + check_results.push(Ok(())); + self.console + .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + } + Err(err) => { + check_results.push(Err(err)); + self.console + .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); + } + } + + match self.check_http_scrape(http_tracker).await { + Ok(()) => { + check_results.push(Ok(())); + self.console + .println(&format!("{} - Scrape at {} is OK", "✓".green(), colored_tracker_url)); + } + Err(err) => { + check_results.push(Err(err)); + self.console + .println(&format!("{} - Scrape at {} is failing", "✗".red(), colored_tracker_url)); + } } } } @@ -80,57 +104,81 @@ impl Service { // todo: // - Make announce request // - Make scrape request - self.console - .println(&format!("{} - UDP tracker at udp://{:?} is OK (TODO)", "✓".green(), address)); + + let colored_address = address.to_string().yellow(); + + self.console.println(&format!( + "{} - UDP tracker at udp://{} is OK ({})", + "✓".green(), + colored_address, + "TODO".red(), + )); } - async fn check_http_tracker(&self, url: &Url) -> Result<(), CheckError> { + async fn check_http_announce(&self, url: &Url) -> Result<(), CheckError> { let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); - // Announce request - let response = Client::new(url.clone()) .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) .await; if let Ok(body) = response.bytes().await { if let Ok(_announce_response) = serde_bencode::from_bytes::(&body) { - self.console.println(&format!("{} - Announce at {} is OK", "✓".green(), url)); - Ok(()) } else { - self.console.println(&format!("{} - Announce at {} failing", "✗".red(), url)); Err(CheckError::HttpError { url: url.clone() }) } } else { - self.console.println(&format!("{} - Announce at {} failing", "✗".red(), url)); Err(CheckError::HttpError { url: url.clone() }) } + } + + async fn check_http_scrape(&self, url: &Url) -> Result<(), CheckError> { + let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 + let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); - // Scrape request + let response = Client::new(url.clone()).scrape(&query).await; - // todo + if let Ok(body) = response.bytes().await { + if let Ok(_scrape_response) = scrape::Response::try_from_bencoded(&body) { + Ok(()) + } else { + Err(CheckError::HttpError { url: url.clone() }) + } + } else { + Err(CheckError::HttpError { url: url.clone() }) + } } async fn run_health_check(&self, url: Url) -> Result<(), CheckError> { let client = HttpClient::builder().timeout(Duration::from_secs(5)).build().unwrap(); + let colored_url = url.to_string().yellow(); + match client.get(url.clone()).send().await { Ok(response) => { if response.status().is_success() { self.console - .println(&format!("{} - Health API at {} is OK", "✓".green(), url)); + .println(&format!("{} - Health API at {} is OK", "✓".green(), colored_url)); Ok(()) } else { - self.console - .eprintln(&format!("{} - Health API at {} failing: {:?}", "✗".red(), url, response)); + self.console.eprintln(&format!( + "{} - Health API at {} is failing: {:?}", + "✗".red(), + colored_url, + response + )); Err(CheckError::HealthCheckError { url }) } } Err(err) => { - self.console - .eprintln(&format!("{} - Health API at {} failing: {:?}", "✗".red(), url, err)); + self.console.eprintln(&format!( + "{} - Health API at {} is failing: {:?}", + "✗".red(), + colored_url, + err + )); Err(CheckError::HealthCheckError { url }) } } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs index 771b3a45e..d0268d1f8 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -45,6 +45,23 @@ impl TryFrom<&[String]> for Query { } } +impl TryFrom> for Query { + type Error = ConversionError; + + fn try_from(info_hashes: Vec) -> Result { + let mut validated_info_hashes: Vec = Vec::new(); + + for info_hash in info_hashes { + let validated_info_hash = InfoHash::from_str(&info_hash).map_err(|_| ConversionError(info_hash.clone()))?; + validated_info_hashes.push(validated_info_hash.0); + } + + Ok(Self { + info_hash: validated_info_hashes, + }) + } +} + /// HTTP Tracker Scrape Request: /// /// From e9e0ded853e5fdb807924e7ee697337aba0952e3 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 1 Feb 2024 09:14:10 +0800 Subject: [PATCH 0722/1003] chore: update deps (Cargo Lockfile) Updating crates.io index Updating anstyle v1.0.4 -> v1.0.5 Updating async-compression v0.4.5 -> v0.4.6 Updating axum v0.7.3 -> v0.7.4 Updating axum-core v0.4.2 -> v0.4.3 Updating axum-macros v0.4.0 -> v0.4.1 Updating base64 v0.21.5 -> v0.21.7 Updating bindgen v0.69.1 -> v0.69.2 Updating bitflags v2.4.1 -> v2.4.2 Updating borsh v1.3.0 -> v1.3.1 Updating borsh-derive v1.3.0 -> v1.3.1 Updating chrono v0.4.31 -> v0.4.33 Updating ciborium v0.2.1 -> v0.2.2 Updating ciborium-io v0.2.1 -> v0.2.2 Updating ciborium-ll v0.2.1 -> v0.2.2 Updating cpufeatures v0.2.11 -> v0.2.12 Updating crossbeam v0.8.3 -> v0.8.4 Updating crossbeam-channel v0.5.10 -> v0.5.11 Updating crossbeam-deque v0.8.4 -> v0.8.5 Updating crossbeam-epoch v0.9.17 -> v0.9.18 Updating crossbeam-queue v0.3.10 -> v0.3.11 Updating crossbeam-utils v0.8.18 -> v0.8.19 Adding crunchy v0.2.2 Updating darling v0.20.3 -> v0.20.5 Updating darling_core v0.20.3 -> v0.20.5 Updating darling_macro v0.20.3 -> v0.20.5 Updating derive_utils v0.13.2 -> v0.14.1 Updating getrandom v0.2.11 -> v0.2.12 Removing h2 v0.3.22 Removing h2 v0.4.0 Adding h2 v0.3.24 Adding h2 v0.4.2 Updating half v1.8.2 -> v2.3.1 Updating hermit-abi v0.3.3 -> v0.3.4 Updating hyper-util v0.1.2 -> v0.1.3 Updating indexmap v2.1.0 -> v2.2.2 Updating io-enum v1.1.1 -> v1.1.3 Removing itertools v0.11.0 Updating js-sys v0.3.66 -> v0.3.67 Updating libc v0.2.151 -> v0.2.153 Updating libz-sys v1.1.12 -> v1.1.15 Updating linux-raw-sys v0.4.12 -> v0.4.13 Updating local-ip-address v0.5.6 -> v0.5.7 Updating multimap v0.9.1 -> v0.10.0 Updating openssl v0.10.62 -> v0.10.63 Updating openssl-src v300.2.1+3.2.0 -> v300.2.2+3.2.1 Updating openssl-sys v0.9.98 -> v0.9.99 Updating pest v2.7.5 -> v2.7.6 Updating pest_derive v2.7.5 -> v2.7.6 Updating pest_generator v2.7.5 -> v2.7.6 Updating pest_meta v2.7.5 -> v2.7.6 Updating pin-project v1.1.3 -> v1.1.4 Updating pin-project-internal v1.1.3 -> v1.1.4 Updating pkg-config v0.3.28 -> v0.3.29 Updating predicates v3.0.4 -> v3.1.0 Updating proc-macro-crate v2.0.0 -> v3.1.0 Updating proc-macro2 v1.0.75 -> v1.0.78 Updating rayon v1.8.0 -> v1.8.1 Updating rayon-core v1.12.0 -> v1.12.1 Updating regex v1.10.2 -> v1.10.3 Updating regex-automata v0.4.3 -> v0.4.5 Updating reqwest v0.11.23 -> v0.11.24 Updating rust_decimal v1.33.1 -> v1.34.0 Updating rustix v0.38.28 -> v0.38.30 Adding rustls-pemfile v1.0.4 Updating serde v1.0.194 -> v1.0.196 Updating serde_derive v1.0.194 -> v1.0.196 Updating serde_json v1.0.110 -> v1.0.113 Updating serde_with v3.4.0 -> v3.6.0 Updating serde_with_macros v3.4.0 -> v3.6.0 Updating shlex v1.2.0 -> v1.3.0 Updating smallvec v1.11.2 -> v1.13.1 Updating syn v2.0.47 -> v2.0.48 Updating termcolor v1.4.0 -> v1.4.1 Updating toml v0.8.8 -> v0.8.9 Removing toml_edit v0.20.7 Removing toml_edit v0.21.0 Adding toml_edit v0.21.1 Updating tower-http v0.5.0 -> v0.5.1 Updating unicode-bidi v0.3.14 -> v0.3.15 Updating uuid v1.6.1 -> v1.7.0 Updating wasm-bindgen v0.2.89 -> v0.2.90 Updating wasm-bindgen-backend v0.2.89 -> v0.2.90 Updating wasm-bindgen-futures v0.4.39 -> v0.4.40 Updating wasm-bindgen-macro v0.2.89 -> v0.2.90 Updating wasm-bindgen-macro-support v0.2.89 -> v0.2.90 Updating wasm-bindgen-shared v0.2.89 -> v0.2.90 Updating web-sys v0.3.66 -> v0.3.67 Updating winnow v0.5.32 -> v0.5.36 --- Cargo.lock | 486 ++++++++++++++++++++++++++--------------------------- 1 file changed, 237 insertions(+), 249 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1af4d5b3e..fa1d724e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -107,9 +107,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "2faccea4cc4ab4a667ce676a30e8ec13922a692c99bb8f5b11f1502c72e04220" [[package]] name = "anstyle-parse" @@ -169,9 +169,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc2d0cfb2a7388d34f590e76686704c494ed7aaceed62ee1ba35cbf363abc2a5" +checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" dependencies = [ "brotli", "flate2", @@ -191,7 +191,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -202,9 +202,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d09dbe0e490df5da9d69b36dca48a76635288a82f92eca90024883a56202026d" +checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e" dependencies = [ "async-trait", "axum-core", @@ -248,9 +248,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c8503f93e6d144ee5690907ba22db7ba79ab001a932ab99034f0fe836b3df" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" dependencies = [ "async-trait", "bytes", @@ -269,14 +269,14 @@ dependencies = [ [[package]] name = "axum-macros" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2edad600410b905404c594e2523549f1bcd4bded1e252c8f74524ccce0b867" +checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -295,7 +295,7 @@ dependencies = [ "hyper-util", "pin-project-lite", "rustls", - "rustls-pemfile", + "rustls-pemfile 2.0.0", "tokio", "tokio-rustls", "tower", @@ -325,9 +325,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "bigdecimal" @@ -348,11 +348,11 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.69.1" +version = "0.69.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ffcebc3849946a7170a05992aac39da343a90676ab392c51a4280981d6379c2" +checksum = "a4c69fae65a523209d34240b60abe0c42d33d1045d445c0839d8a4894a736e2d" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "cexpr", "clang-sys", "lazy_static", @@ -363,7 +363,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -374,9 +374,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitvec" @@ -401,9 +401,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028" +checksum = "f58b559fd6448c6e2fd0adb5720cd98a2506594cafa4737ff98c396f3e82f667" dependencies = [ "borsh-derive", "cfg_aliases", @@ -411,15 +411,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" +checksum = "7aadb5b6ccbd078890f6d7003694e33816e6b784358f18e15e7e6d9f065a57cd" dependencies = [ "once_cell", - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", "syn_derive", ] @@ -529,22 +529,22 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.48.5", + "windows-targets 0.52.0", ] [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -553,15 +553,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -609,7 +609,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -686,9 +686,9 @@ checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -715,7 +715,7 @@ dependencies = [ "criterion-plot", "futures", "is-terminal", - "itertools 0.10.5", + "itertools", "num-traits", "once_cell", "oorandom", @@ -737,16 +737,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools 0.10.5", + "itertools", ] [[package]] name = "crossbeam" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eb9105919ca8e40d437fc9cbb8f1975d916f1bd28afe795a48aae32a2cc8920" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" dependencies = [ - "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", @@ -756,54 +755,52 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a9b73a36529d9c47029b9fb3a6f0ea3cc916a261195352ba19e770fc1748b2" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.17" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e3681d554572a651dda4186cd47240627c3d0114d45a95f6ad27f2f22e7548d" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-queue" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc6598521bb5a83d491e8c1fe51db7296019d2ca3cb93cc6c2a20369a4d78a2" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.18" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-common" @@ -817,9 +814,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.3" +version = "0.20.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +checksum = "fc5d6b04b3fd0ba9926f945895de7d806260a2d7431ba82e7edaecb043c4c6b8" dependencies = [ "darling_core", "darling_macro", @@ -827,27 +824,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.3" +version = "0.20.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" +checksum = "04e48a959bcd5c761246f5d090ebc2fbf7b9cd527a492b07a67510c108f1e7e3" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] name = "darling_macro" -version = "0.20.3" +version = "0.20.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +checksum = "1d1545d67a2149e1d93b7e5c7752dce5a7426eb5d1357ddcfd89336b94444f77" dependencies = [ "darling_core", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -875,13 +872,13 @@ dependencies = [ [[package]] name = "derive_utils" -version = "0.13.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" +checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -1056,7 +1053,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -1068,7 +1065,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -1080,7 +1077,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -1145,7 +1142,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -1190,9 +1187,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", @@ -1213,9 +1210,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.22" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" +checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" dependencies = [ "bytes", "fnv", @@ -1223,7 +1220,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.1.0", + "indexmap 2.2.2", "slab", "tokio", "tokio-util", @@ -1232,9 +1229,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" +checksum = "31d030e59af851932b72ceebadf4a2b5986dba4c3b99dd2493f8273a0f151943" dependencies = [ "bytes", "fnv", @@ -1242,7 +1239,7 @@ dependencies = [ "futures-sink", "futures-util", "http 1.0.0", - "indexmap 2.1.0", + "indexmap 2.2.2", "slab", "tokio", "tokio-util", @@ -1251,9 +1248,13 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +dependencies = [ + "cfg-if", + "crunchy", +] [[package]] name = "hashbrown" @@ -1300,9 +1301,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" [[package]] name = "hex" @@ -1388,7 +1389,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.22", + "h2 0.3.24", "http 0.2.11", "http-body 0.4.6", "httparse", @@ -1411,7 +1412,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.0", + "h2 0.4.2", "http 1.0.0", "http-body 1.0.0", "httparse", @@ -1436,12 +1437,11 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", - "futures-channel", "futures-util", "http 1.0.0", "http-body 1.0.0", @@ -1449,7 +1449,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tracing", ] [[package]] @@ -1504,9 +1503,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1515,12 +1514,11 @@ dependencies = [ [[package]] name = "io-enum" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" +checksum = "53b53d712d99a73eec59ee5e4fe6057f8052142d38eeafbbffcb06b36d738a6e" dependencies = [ "derive_utils", - "syn 2.0.47", ] [[package]] @@ -1549,15 +1547,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" -dependencies = [ - "either", -] - [[package]] name = "itoa" version = "1.0.10" @@ -1575,9 +1564,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" dependencies = [ "wasm-bindgen", ] @@ -1680,9 +1669,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.151" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" @@ -1707,9 +1696,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" dependencies = [ "cc", "pkg-config", @@ -1724,15 +1713,15 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "local-ip-address" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66357e687a569abca487dc399a9c9ac19beb3f13991ed49f00c144e02cbd42ab" +checksum = "612ed4ea9ce5acfb5d26339302528a5e1e59dfed95e9e11af3c083236ff1d15d" dependencies = [ "libc", "neli", @@ -1833,14 +1822,14 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] name = "multimap" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1a5d38b9b352dbd913288736af36af41c48d61b1a8cd34bcecd727561b7d511" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" dependencies = [ "serde", ] @@ -1884,7 +1873,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", "termcolor", "thiserror", ] @@ -1895,10 +1884,10 @@ version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.4.1", + "bitflags 2.4.2", "bitvec", "byteorder", "bytes", @@ -2058,11 +2047,11 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "openssl" -version = "0.10.62" +version = "0.10.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" +checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "cfg-if", "foreign-types", "libc", @@ -2079,7 +2068,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -2090,18 +2079,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.2.1+3.2.0" +version = "300.2.2+3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3" +checksum = "8bbfad0063610ac26ee79f7484739e2b07555a75c42453b89263830b5c8103bc" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.98" +version = "0.9.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" +checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" dependencies = [ "cc", "libc", @@ -2161,7 +2150,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "serde", ] @@ -2173,9 +2162,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" +checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" dependencies = [ "memchr", "thiserror", @@ -2184,9 +2173,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2" +checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" dependencies = [ "pest", "pest_generator", @@ -2194,22 +2183,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227" +checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] name = "pest_meta" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6" +checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" dependencies = [ "once_cell", "pest", @@ -2256,22 +2245,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -2288,9 +2277,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" [[package]] name = "plotters" @@ -2334,12 +2323,11 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "3.0.4" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dfc28575c2e3f19cb3c73b93af36460ae898d426eba6fc15b9bd2a5220758a0" +checksum = "68b87bfd4605926cdfefc1c3b5f8fe560e3feca9d5552cf68c466d3d8236c7e8" dependencies = [ "anstyle", - "itertools 0.11.0", "predicates-core", ] @@ -2371,11 +2359,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ - "toml_edit 0.20.7", + "toml_edit 0.21.1", ] [[package]] @@ -2404,9 +2392,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.75" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] @@ -2510,9 +2498,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" dependencies = [ "either", "rayon-core", @@ -2520,9 +2508,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -2539,9 +2527,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", @@ -2551,9 +2539,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -2577,16 +2565,16 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.23" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2 0.3.22", + "h2 0.3.24", "http 0.2.11", "http-body 0.4.6", "hyper 0.14.28", @@ -2599,9 +2587,11 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", @@ -2682,7 +2672,7 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2702,9 +2692,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.33.1" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4" +checksum = "d7de2711cae7bdec993f4d2319352599ceb0d003e9f7900ea7c6ef4c5fc16831" dependencies = [ "arrayvec", "borsh", @@ -2739,11 +2729,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys", @@ -2762,13 +2752,22 @@ dependencies = [ "sct", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + [[package]] name = "rustls-pemfile" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "rustls-pki-types", ] @@ -2886,9 +2885,9 @@ checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" [[package]] name = "serde" -version = "1.0.194" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b114498256798c94a0689e1a15fec6005dee8ac1f41de56404b67afc2a4b773" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] @@ -2914,20 +2913,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.194" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.110" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fbd975230bada99c8bb618e0c365c2eefa219158d5c6c29610fd09ff1833257" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", @@ -2952,7 +2951,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -2978,15 +2977,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.4.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" +checksum = "1b0ed1662c5a68664f45b76d18deb0e234aff37207086803165c961eb695e981" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.1.0", + "indexmap 2.2.2", "serde", "serde_json", "serde_with_macros", @@ -2995,14 +2994,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.4.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788" +checksum = "568577ff0ef47b879f736cd66740e022f3672788cdf002a05a4e609ea5a6fb15" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -3029,9 +3028,9 @@ dependencies = [ [[package]] name = "shlex" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" @@ -3065,9 +3064,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" @@ -3120,9 +3119,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.47" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1726efe18f42ae774cc644f330953a5e7b3c3003d3edcecf18850fe9d4dd9afb" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -3138,7 +3137,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -3206,9 +3205,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -3236,7 +3235,7 @@ checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -3319,7 +3318,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] @@ -3367,14 +3366,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +checksum = "c6a4b9e8023eb94392d3dca65d717c53abc5dad49c07cb65bb8fcd87115fa325" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.21.0", + "toml_edit 0.21.1", ] [[package]] @@ -3392,29 +3391,18 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.1.0", - "toml_datetime", - "winnow", -] - -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "toml_datetime", "winnow", ] [[package]] name = "toml_edit" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "serde", "serde_spanned", "toml_datetime", @@ -3498,7 +3486,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml 0.8.8", + "toml 0.8.9", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -3556,14 +3544,14 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09e12e6351354851911bdf8c2b8f2ab15050c567d70a8b9a37ae7b8301a4080d" +checksum = "0da193277a4e2c33e59e09b5861580c33dd0a637c3883d0fa74ba40c0374af2e" dependencies = [ "async-compression", - "bitflags 2.4.1", + "bitflags 2.4.2", "bytes", - "futures-util", + "futures-core", "http 1.0.0", "http-body 1.0.0", "http-body-util", @@ -3637,9 +3625,9 @@ checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "unicode-bidi" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -3681,9 +3669,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" dependencies = [ "getrandom", "rand", @@ -3728,9 +3716,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3738,24 +3726,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" +checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" dependencies = [ "cfg-if", "js-sys", @@ -3765,9 +3753,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3775,28 +3763,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.89" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" +checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" [[package]] name = "web-sys" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" dependencies = [ "js-sys", "wasm-bindgen", @@ -3976,9 +3964,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.32" +version = "0.5.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8434aeec7b290e8da5c3f0d628cb0eac6cabcb31d14bb74f779a08109a5914d6" +checksum = "818ce546a11a9986bc24f93d0cdf38a8a1a400f1473ea8c82e59f6e0ffab9249" dependencies = [ "memchr", ] @@ -4028,7 +4016,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.47", + "syn 2.0.48", ] [[package]] From 3b735a7ce0e4c792a5bbafbc29e68083bb97d716 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 31 Jan 2024 12:03:20 +0000 Subject: [PATCH 0723/1003] refactor: [#639] Tracker Checker: prepare outout for UDP checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The output for the UDP tracker checks are now the same as the HTTP tracker checks. But not implemented yet (TODO). ```output Running checks for trackers ... UDP trackers ... ✓ - Announce at 127.0.0.1:6969 is OK ✓ - Scrape at 127.0.0.1:6969 is OK HTTP trackers ... ✓ - Announce at http://127.0.0.1:7070/ is OK (TODO) ✓ - Scrape at http://127.0.0.1:7070/ is OK (TODO) Health checks ... ✓ - Health API at http://127.0.0.1:1313/health_check is OK ``` --- src/console/clients/checker/service.rs | 79 +++++++++++++++++++------- 1 file changed, 58 insertions(+), 21 deletions(-) diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 1cb4725e0..90dc10894 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use std::time::Duration; use colored::Colorize; +use log::debug; use reqwest::{Client as HttpClient, Url}; use super::config::Configuration; @@ -38,7 +39,7 @@ impl Service { let mut check_results = vec![]; - self.check_udp_trackers(); + self.check_udp_trackers(&mut check_results).await; self.check_http_trackers(&mut check_results).await; @@ -47,11 +48,44 @@ impl Service { check_results } - fn check_udp_trackers(&self) { + async fn check_udp_trackers(&self, check_results: &mut Vec) { self.console.println("UDP trackers ..."); for udp_tracker in &self.config.udp_trackers { - self.check_udp_tracker(udp_tracker); + let colored_tracker_url = udp_tracker.to_string().yellow(); + + /* todo: + - Initialize the UDP client + - Pass the connected client the the check function + - Connect to the tracker + - Make the request (announce or scrape) + */ + + match self.check_udp_announce(udp_tracker).await { + Ok(()) => { + check_results.push(Ok(())); + self.console + .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + } + Err(err) => { + check_results.push(Err(err)); + self.console + .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); + } + } + + match self.check_udp_scrape(udp_tracker).await { + Ok(()) => { + check_results.push(Ok(())); + self.console + .println(&format!("{} - Scrape at {} is OK", "✓".green(), colored_tracker_url)); + } + Err(err) => { + check_results.push(Err(err)); + self.console + .println(&format!("{} - Scrape at {} is failing", "✗".red(), colored_tracker_url)); + } + } } } @@ -65,7 +99,7 @@ impl Service { Ok(()) => { check_results.push(Ok(())); self.console - .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + .println(&format!("{} - Announce at {} is OK (TODO)", "✓".green(), colored_tracker_url)); } Err(err) => { check_results.push(Err(err)); @@ -78,7 +112,7 @@ impl Service { Ok(()) => { check_results.push(Ok(())); self.console - .println(&format!("{} - Scrape at {} is OK", "✓".green(), colored_tracker_url)); + .println(&format!("{} - Scrape at {} is OK (TODO)", "✓".green(), colored_tracker_url)); } Err(err) => { check_results.push(Err(err)); @@ -100,26 +134,23 @@ impl Service { } } - fn check_udp_tracker(&self, address: &SocketAddr) { - // todo: - // - Make announce request - // - Make scrape request - - let colored_address = address.to_string().yellow(); + #[allow(clippy::unused_async)] + async fn check_udp_announce(&self, tracker_socket_addr: &SocketAddr) -> Result<(), CheckError> { + debug!("{tracker_socket_addr}"); + Ok(()) + } - self.console.println(&format!( - "{} - UDP tracker at udp://{} is OK ({})", - "✓".green(), - colored_address, - "TODO".red(), - )); + #[allow(clippy::unused_async)] + async fn check_udp_scrape(&self, tracker_socket_addr: &SocketAddr) -> Result<(), CheckError> { + debug!("{tracker_socket_addr}"); + Ok(()) } - async fn check_http_announce(&self, url: &Url) -> Result<(), CheckError> { + async fn check_http_announce(&self, tracker_url: &Url) -> Result<(), CheckError> { let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); - let response = Client::new(url.clone()) + let response = Client::new(tracker_url.clone()) .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) .await; @@ -127,10 +158,15 @@ impl Service { if let Ok(_announce_response) = serde_bencode::from_bytes::(&body) { Ok(()) } else { - Err(CheckError::HttpError { url: url.clone() }) + debug!("announce body {:#?}", body); + Err(CheckError::HttpError { + url: tracker_url.clone(), + }) } } else { - Err(CheckError::HttpError { url: url.clone() }) + Err(CheckError::HttpError { + url: tracker_url.clone(), + }) } } @@ -144,6 +180,7 @@ impl Service { if let Ok(_scrape_response) = scrape::Response::try_from_bencoded(&body) { Ok(()) } else { + debug!("scrape body {:#?}", body); Err(CheckError::HttpError { url: url.clone() }) } } else { From 011fdb7df19536039367bc06dfd6d6397e474626 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 31 Jan 2024 13:45:17 +0000 Subject: [PATCH 0724/1003] refactor: [#639] Tracker Checker: extract checker:Client to check UDP servers It will be used in teh Tracker Checker too. --- src/console/clients/udp/app.rs | 149 +++---------- src/console/clients/udp/checker.rs | 214 +++++++++++++++++++ src/console/clients/udp/mod.rs | 1 + src/shared/bit_torrent/tracker/udp/client.rs | 2 + 4 files changed, 241 insertions(+), 125 deletions(-) create mode 100644 src/console/clients/udp/checker.rs diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index e9c8b5274..8b1a8ca47 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -56,25 +56,21 @@ //! ``` //! //! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. -use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; +use std::net::{SocketAddr, ToSocketAddrs}; use std::str::FromStr; use anyhow::Context; -use aquatic_udp_protocol::common::InfoHash; use aquatic_udp_protocol::Response::{AnnounceIpv4, AnnounceIpv6, Scrape}; -use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, - ScrapeRequest, TransactionId, -}; +use aquatic_udp_protocol::{Port, TransactionId}; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; use serde_json::json; use url::Url; +use crate::console::clients::udp::checker; use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; -use crate::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; -const ASSIGNED_BY_OS: i32 = 0; +const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; #[derive(Parser, Debug)] @@ -110,41 +106,36 @@ pub async fn run() -> anyhow::Result<()> { let args = Args::parse(); - // Configuration - let local_port = ASSIGNED_BY_OS; - let local_bind_to = format!("0.0.0.0:{local_port}"); - let transaction_id = RANDOM_TRANSACTION_ID; - - // Bind to local port - debug!("Binding to: {local_bind_to}"); - let udp_client = UdpClient::bind(&local_bind_to).await; - let bound_to = udp_client.socket.local_addr().context("binding local address")?; - debug!("Bound to: {bound_to}"); - - let transaction_id = TransactionId(transaction_id); - let response = match args.command { Command::Announce { tracker_socket_addr, info_hash, } => { - let (connection_id, udp_tracker_client) = connect(&tracker_socket_addr, udp_client, transaction_id).await; - - send_announce_request( - connection_id, - transaction_id, - info_hash, - Port(bound_to.port()), - &udp_tracker_client, - ) - .await + let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); + + let mut client = checker::Client::default(); + + let bound_to = client.bind_and_connect(ASSIGNED_BY_OS, &tracker_socket_addr).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client + .send_announce_request(connection_id, transaction_id, info_hash, Port(bound_to.port())) + .await? } Command::Scrape { tracker_socket_addr, info_hashes, } => { - let (connection_id, udp_tracker_client) = connect(&tracker_socket_addr, udp_client, transaction_id).await; - send_scrape_request(connection_id, transaction_id, info_hashes, &udp_tracker_client).await + let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); + + let mut client = checker::Client::default(); + + let _bound_to = client.bind_and_connect(ASSIGNED_BY_OS, &tracker_socket_addr).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_scrape_request(connection_id, transaction_id, info_hashes).await? } }; @@ -265,95 +256,3 @@ fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { TorrustInfoHash::from_str(info_hash_str) .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) } - -async fn connect( - tracker_socket_addr: &SocketAddr, - udp_client: UdpClient, - transaction_id: TransactionId, -) -> (ConnectionId, UdpTrackerClient) { - debug!("Connecting to tracker: udp://{tracker_socket_addr}"); - - udp_client.connect(&tracker_socket_addr.to_string()).await; - - let udp_tracker_client = UdpTrackerClient { udp_client }; - - let connection_id = send_connection_request(transaction_id, &udp_tracker_client).await; - - (connection_id, udp_tracker_client) -} - -async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { - debug!("Sending connection request with transaction id: {transaction_id:#?}"); - - let connect_request = ConnectRequest { transaction_id }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - debug!("connection request response:\n{response:#?}"); - - match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server. Unexpected response"), - } -} - -async fn send_announce_request( - connection_id: ConnectionId, - transaction_id: TransactionId, - info_hash: TorrustInfoHash, - port: Port, - client: &UdpTrackerClient, -) -> Response { - debug!("Sending announce request with transaction id: {transaction_id:#?}"); - - let announce_request = AnnounceRequest { - connection_id, - transaction_id, - info_hash: InfoHash(info_hash.bytes()), - peer_id: PeerId(*b"-qB00000000000000001"), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port, - }; - - client.send(announce_request.into()).await; - - let response = client.receive().await; - - debug!("announce request response:\n{response:#?}"); - - response -} - -async fn send_scrape_request( - connection_id: ConnectionId, - transaction_id: TransactionId, - info_hashes: Vec, - client: &UdpTrackerClient, -) -> Response { - debug!("Sending scrape request with transaction id: {transaction_id:#?}"); - - let scrape_request = ScrapeRequest { - connection_id, - transaction_id, - info_hashes: info_hashes - .iter() - .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) - .collect(), - }; - - client.send(scrape_request.into()).await; - - let response = client.receive().await; - - debug!("scrape request response:\n{response:#?}"); - - response -} diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs new file mode 100644 index 000000000..b35139e49 --- /dev/null +++ b/src/console/clients/udp/checker.rs @@ -0,0 +1,214 @@ +use std::net::{Ipv4Addr, SocketAddr}; + +use anyhow::Context; +use aquatic_udp_protocol::common::InfoHash; +use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, + ScrapeRequest, TransactionId, +}; +use log::debug; +use thiserror::Error; + +use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; +use crate::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; + +#[derive(Error, Debug)] +pub enum ClientError { + #[error("Local socket address is not bound yet. Try binding before connecting.")] + NotBound, + #[error("Not connected to remote tracker UDP socket. Try connecting before making requests.")] + NotConnected, + #[error("Unexpected response while connecting the the remote server.")] + UnexpectedConnectionResponse, +} + +/// A UDP Tracker client to make test requests (checks). +#[derive(Debug, Default)] +pub struct Client { + /// Local UDP socket. It could be 0 to assign a free port. + local_binding_address: Option, + + /// Local UDP socket after binding. It's equals to binding address if a + /// non- zero port was used. + local_bound_address: Option, + + /// Remote UDP tracker socket + remote_socket: Option, + + /// The client used to make UDP requests to the tracker. + udp_tracker_client: Option, +} + +impl Client { + /// Binds to the local socket and connects to the remote one. + /// + /// # Errors + /// + /// Will return an error if + /// + /// - It can't bound to the local socket address. + /// - It can't make a connection request successfully to the remote UDP server. + pub async fn bind_and_connect(&mut self, local_port: u16, remote_socket_addr: &SocketAddr) -> anyhow::Result { + let bound_to = self.bind(local_port).await?; + self.connect(remote_socket_addr).await?; + Ok(bound_to) + } + + /// Binds local client socket. + /// + /// # Errors + /// + /// Will return an error if it can't bound to the local address. + async fn bind(&mut self, local_port: u16) -> anyhow::Result { + let local_bind_to = format!("0.0.0.0:{local_port}"); + let binding_address = local_bind_to.parse().context("binding local address")?; + + debug!("Binding to: {local_bind_to}"); + let udp_client = UdpClient::bind(&local_bind_to).await; + + let bound_to = udp_client.socket.local_addr().context("bound local address")?; + debug!("Bound to: {bound_to}"); + + self.local_binding_address = Some(binding_address); + self.local_bound_address = Some(bound_to); + + self.udp_tracker_client = Some(UdpTrackerClient { udp_client }); + + Ok(bound_to) + } + + /// Connects to the remote server socket. + /// + /// # Errors + /// + /// Will return and error if it can't make a connection request successfully + /// to the remote UDP server. + async fn connect(&mut self, tracker_socket_addr: &SocketAddr) -> anyhow::Result<()> { + debug!("Connecting to tracker: udp://{tracker_socket_addr}"); + + match &self.udp_tracker_client { + Some(client) => { + client.udp_client.connect(&tracker_socket_addr.to_string()).await; + self.remote_socket = Some(*tracker_socket_addr); + Ok(()) + } + None => Err(ClientError::NotBound.into()), + } + } + + /// Sends a connection request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if + /// + /// - It can't connect to the remote UDP socket. + /// - It can't make a connection request successfully to the remote UDP + /// server (after successfully connecting to the remote UDP socket). + /// + /// # Panics + /// + /// Will panic if it receives an unexpected response. + pub async fn send_connection_request(&self, transaction_id: TransactionId) -> anyhow::Result { + debug!("Sending connection request with transaction id: {transaction_id:#?}"); + + let connect_request = ConnectRequest { transaction_id }; + + match &self.udp_tracker_client { + Some(client) => { + client.send(connect_request.into()).await; + + let response = client.receive().await; + + debug!("connection request response:\n{response:#?}"); + + match response { + Response::Connect(connect_response) => Ok(connect_response.connection_id), + _ => Err(ClientError::UnexpectedConnectionResponse.into()), + } + } + None => Err(ClientError::NotConnected.into()), + } + } + + /// Sends an announce request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + pub async fn send_announce_request( + &self, + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hash: TorrustInfoHash, + client_port: Port, + ) -> anyhow::Result { + debug!("Sending announce request with transaction id: {transaction_id:#?}"); + + let announce_request = AnnounceRequest { + connection_id, + transaction_id, + info_hash: InfoHash(info_hash.bytes()), + peer_id: PeerId(*b"-qB00000000000000001"), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: client_port, + }; + + match &self.udp_tracker_client { + Some(client) => { + client.send(announce_request.into()).await; + + let response = client.receive().await; + + debug!("announce request response:\n{response:#?}"); + + Ok(response) + } + None => Err(ClientError::NotConnected.into()), + } + } + + /// Sends a scrape request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + pub async fn send_scrape_request( + &self, + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hashes: Vec, + ) -> anyhow::Result { + debug!("Sending scrape request with transaction id: {transaction_id:#?}"); + + let scrape_request = ScrapeRequest { + connection_id, + transaction_id, + info_hashes: info_hashes + .iter() + .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) + .collect(), + }; + + match &self.udp_tracker_client { + Some(client) => { + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + debug!("scrape request response:\n{response:#?}"); + + Ok(response) + } + None => Err(ClientError::NotConnected.into()), + } + } +} diff --git a/src/console/clients/udp/mod.rs b/src/console/clients/udp/mod.rs index 309be6287..cd0e8bd6b 100644 --- a/src/console/clients/udp/mod.rs +++ b/src/console/clients/udp/mod.rs @@ -1 +1,2 @@ pub mod app; +pub mod checker; diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 23b718472..41c9def89 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -11,6 +11,7 @@ use tokio::time; use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; #[allow(clippy::module_name_repetitions)] +#[derive(Debug)] pub struct UdpClient { pub socket: Arc, } @@ -86,6 +87,7 @@ pub async fn new_udp_client_connected(remote_address: &str) -> UdpClient { } #[allow(clippy::module_name_repetitions)] +#[derive(Debug)] pub struct UdpTrackerClient { pub udp_client: UdpClient, } From a2e123cb338d7faed4e8f35e54a6470cfbddc2ca Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 31 Jan 2024 17:57:46 +0000 Subject: [PATCH 0725/1003] refactor: [#639] UDP client. Extract command handlers --- src/console/clients/udp/app.rs | 56 +++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index 8b1a8ca47..e365f962b 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -60,7 +60,7 @@ use std::net::{SocketAddr, ToSocketAddrs}; use std::str::FromStr; use anyhow::Context; -use aquatic_udp_protocol::Response::{AnnounceIpv4, AnnounceIpv6, Scrape}; +use aquatic_udp_protocol::Response::{self, AnnounceIpv4, AnnounceIpv6, Scrape}; use aquatic_udp_protocol::{Port, TransactionId}; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; @@ -110,33 +110,11 @@ pub async fn run() -> anyhow::Result<()> { Command::Announce { tracker_socket_addr, info_hash, - } => { - let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); - - let mut client = checker::Client::default(); - - let bound_to = client.bind_and_connect(ASSIGNED_BY_OS, &tracker_socket_addr).await?; - - let connection_id = client.send_connection_request(transaction_id).await?; - - client - .send_announce_request(connection_id, transaction_id, info_hash, Port(bound_to.port())) - .await? - } + } => handle_announce(&tracker_socket_addr, &info_hash).await?, Command::Scrape { tracker_socket_addr, info_hashes, - } => { - let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); - - let mut client = checker::Client::default(); - - let _bound_to = client.bind_and_connect(ASSIGNED_BY_OS, &tracker_socket_addr).await?; - - let connection_id = client.send_connection_request(transaction_id).await?; - - client.send_scrape_request(connection_id, transaction_id, info_hashes).await? - } + } => handle_scrape(&tracker_socket_addr, &info_hashes).await?, }; match response { @@ -201,6 +179,34 @@ fn setup_logging(level: LevelFilter) { debug!("logging initialized."); } +async fn handle_announce(tracker_socket_addr: &SocketAddr, info_hash: &TorrustInfoHash) -> anyhow::Result { + let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); + + let mut client = checker::Client::default(); + + let bound_to = client.bind_and_connect(ASSIGNED_BY_OS, tracker_socket_addr).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client + .send_announce_request(connection_id, transaction_id, *info_hash, Port(bound_to.port())) + .await +} + +async fn handle_scrape(tracker_socket_addr: &SocketAddr, info_hashes: &[TorrustInfoHash]) -> anyhow::Result { + let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); + + let mut client = checker::Client::default(); + + let _bound_to = client.bind_and_connect(ASSIGNED_BY_OS, tracker_socket_addr).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client + .send_scrape_request(connection_id, transaction_id, info_hashes.to_vec()) + .await +} + fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); From 1b92b77052e1f96d33648875d184f7ad78cce19d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 31 Jan 2024 18:43:28 +0000 Subject: [PATCH 0726/1003] refactor: [#639] UDP client. Extract aquatic reponses wrappers for serialization to JSON. --- src/console/clients/udp/app.rs | 65 +++++++++------------- src/console/clients/udp/mod.rs | 1 + src/console/clients/udp/responses.rs | 83 ++++++++++++++++++++++++++++ 3 files changed, 109 insertions(+), 40 deletions(-) create mode 100644 src/console/clients/udp/responses.rs diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index e365f962b..b9e31155d 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -64,10 +64,10 @@ use aquatic_udp_protocol::Response::{self, AnnounceIpv4, AnnounceIpv6, Scrape}; use aquatic_udp_protocol::{Port, TransactionId}; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; -use serde_json::json; use url::Url; use crate::console::clients::udp::checker; +use crate::console::clients::udp::responses::{AnnounceResponseDto, ScrapeResponseDto}; use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; const ASSIGNED_BY_OS: u16 = 0; @@ -117,45 +117,7 @@ pub async fn run() -> anyhow::Result<()> { } => handle_scrape(&tracker_socket_addr, &info_hashes).await?, }; - match response { - AnnounceIpv4(announce) => { - let json = json!({ - "transaction_id": announce.transaction_id.0, - "announce_interval": announce.announce_interval.0, - "leechers": announce.leechers.0, - "seeders": announce.seeders.0, - "peers": announce.peers.iter().map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)).collect::>(), - }); - let pretty_json = serde_json::to_string_pretty(&json).context("announce IPv4 response JSON serialization")?; - println!("{pretty_json}"); - } - AnnounceIpv6(announce) => { - let json = json!({ - "transaction_id": announce.transaction_id.0, - "announce_interval": announce.announce_interval.0, - "leechers": announce.leechers.0, - "seeders": announce.seeders.0, - "peers6": announce.peers.iter().map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)).collect::>(), - }); - let pretty_json = serde_json::to_string_pretty(&json).context("announce IPv6 response JSON serialization")?; - println!("{pretty_json}"); - } - Scrape(scrape) => { - let json = json!({ - "transaction_id": scrape.transaction_id.0, - "torrent_stats": scrape.torrent_stats.iter().map(|torrent_scrape_statistics| json!({ - "seeders": torrent_scrape_statistics.seeders.0, - "completed": torrent_scrape_statistics.completed.0, - "leechers": torrent_scrape_statistics.leechers.0, - })).collect::>(), - }); - let pretty_json = serde_json::to_string_pretty(&json).context("scrape response JSON serialization")?; - println!("{pretty_json}"); - } - _ => println!("{response:#?}"), // todo: serialize to JSON all responses. - }; - - Ok(()) + print_response(response) } fn setup_logging(level: LevelFilter) { @@ -207,6 +169,29 @@ async fn handle_scrape(tracker_socket_addr: &SocketAddr, info_hashes: &[TorrustI .await } +fn print_response(response: Response) -> anyhow::Result<()> { + match response { + AnnounceIpv4(response) => { + let pretty_json = serde_json::to_string_pretty(&AnnounceResponseDto::from(response)) + .context("announce IPv4 response JSON serialization")?; + println!("{pretty_json}"); + } + AnnounceIpv6(response) => { + let pretty_json = serde_json::to_string_pretty(&AnnounceResponseDto::from(response)) + .context("announce IPv6 response JSON serialization")?; + println!("{pretty_json}"); + } + Scrape(response) => { + let pretty_json = + serde_json::to_string_pretty(&ScrapeResponseDto::from(response)).context("scrape response JSON serialization")?; + println!("{pretty_json}"); + } + _ => println!("{response:#?}"), // todo: serialize to JSON all aquatic responses. + }; + + Ok(()) +} + fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); diff --git a/src/console/clients/udp/mod.rs b/src/console/clients/udp/mod.rs index cd0e8bd6b..2fcb26ed0 100644 --- a/src/console/clients/udp/mod.rs +++ b/src/console/clients/udp/mod.rs @@ -1,2 +1,3 @@ pub mod app; pub mod checker; +pub mod responses; diff --git a/src/console/clients/udp/responses.rs b/src/console/clients/udp/responses.rs new file mode 100644 index 000000000..020c7a367 --- /dev/null +++ b/src/console/clients/udp/responses.rs @@ -0,0 +1,83 @@ +//! Aquatic responses are not serializable. These are the serializable wrappers. +use std::net::{Ipv4Addr, Ipv6Addr}; + +use aquatic_udp_protocol::{AnnounceResponse, ScrapeResponse}; +use serde::Serialize; + +#[derive(Serialize)] +pub struct AnnounceResponseDto { + transaction_id: i32, + announce_interval: i32, + leechers: i32, + seeders: i32, + peers: Vec, +} + +impl From> for AnnounceResponseDto { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.transaction_id.0, + announce_interval: announce.announce_interval.0, + leechers: announce.leechers.0, + seeders: announce.seeders.0, + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)) + .collect::>(), + } + } +} + +impl From> for AnnounceResponseDto { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.transaction_id.0, + announce_interval: announce.announce_interval.0, + leechers: announce.leechers.0, + seeders: announce.seeders.0, + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ScrapeResponseDto { + transaction_id: i32, + torrent_stats: Vec, +} + +impl From for ScrapeResponseDto { + fn from(scrape: ScrapeResponse) -> Self { + Self { + transaction_id: scrape.transaction_id.0, + torrent_stats: scrape + .torrent_stats + .iter() + .map(|torrent_scrape_statistics| TorrentStats { + seeders: torrent_scrape_statistics.seeders.0, + completed: torrent_scrape_statistics.completed.0, + leechers: torrent_scrape_statistics.leechers.0, + }) + .collect::>(), + } + } +} + +#[derive(Serialize)] +struct Peer { + seeders: i32, + completed: i32, + leechers: i32, +} + +#[derive(Serialize)] +struct TorrentStats { + seeders: i32, + completed: i32, + leechers: i32, +} From 661b5210b05ccc7e858b070126a3174ba09da4a2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Feb 2024 14:15:13 +0000 Subject: [PATCH 0727/1003] chore: [#639] add cargo dependency: hex_literal It allows simplifying the wasy we build InfoHashes from hex strings: ```rust let info_hash = InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 ``` --- Cargo.lock | 7 +++++++ Cargo.toml | 1 + 2 files changed, 8 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index fa1d724e2..fc45cfc57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1311,6 +1311,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + [[package]] name = "http" version = "0.2.11" @@ -3439,6 +3445,7 @@ dependencies = [ "derive_more", "fern", "futures", + "hex-literal", "hyper 1.1.0", "lazy_static", "local-ip-address", diff --git a/Cargo.toml b/Cargo.toml index 1418f23dd..bd04e1cc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,6 +74,7 @@ url = "2.5.0" tempfile = "3.9.0" clap = { version = "4.4.18", features = ["derive", "env"]} anyhow = "1.0.79" +hex-literal = "0.4.1" [dev-dependencies] criterion = { version = "0.5.1", features = ["async_tokio"] } From bbfca2c184b9b75f17722cbec4cf8575d2caa09b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Feb 2024 14:18:58 +0000 Subject: [PATCH 0728/1003] feat: [#639] Tracker Checker. Check UDP trackers This is the first working implementation. WIP. TODO: - Refactor: reorganize code: big functions, etc. - Bugs: if the UDP server is down the checker waits forever. Probably there is a missing timeout for the connection request. And in general for all requests. --- src/console/clients/checker/service.rs | 116 +++++++++++++++---------- 1 file changed, 72 insertions(+), 44 deletions(-) diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 90dc10894..9ca24231f 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -3,19 +3,25 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use aquatic_udp_protocol::{Port, TransactionId}; use colored::Colorize; +use hex_literal::hex; use log::debug; use reqwest::{Client as HttpClient, Url}; use super::config::Configuration; use super::console::Console; use crate::console::clients::checker::printer::Printer; +use crate::console::clients::udp::checker; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; +const ASSIGNED_BY_OS: u16 = 0; +const RANDOM_TRANSACTION_ID: i32 = -888_840_697; + pub struct Service { pub(crate) config: Arc, pub(crate) console: Console, @@ -25,7 +31,7 @@ pub type CheckResult = Result<(), CheckError>; #[derive(Debug)] pub enum CheckError { - UdpError, + UdpError { socket_addr: SocketAddr }, HttpError { url: Url }, HealthCheckError { url: Url }, } @@ -54,37 +60,63 @@ impl Service { for udp_tracker in &self.config.udp_trackers { let colored_tracker_url = udp_tracker.to_string().yellow(); - /* todo: - - Initialize the UDP client - - Pass the connected client the the check function - - Connect to the tracker - - Make the request (announce or scrape) - */ - - match self.check_udp_announce(udp_tracker).await { - Ok(()) => { - check_results.push(Ok(())); - self.console - .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); - } - Err(err) => { - check_results.push(Err(err)); - self.console - .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); - } + let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); + + let mut client = checker::Client::default(); + + let Ok(bound_to) = client.bind_and_connect(ASSIGNED_BY_OS, udp_tracker).await else { + check_results.push(Err(CheckError::UdpError { + socket_addr: *udp_tracker, + })); + self.console + .println(&format!("{} - Can't connect to socket {}", "✗".red(), colored_tracker_url)); + break; + }; + + let Ok(connection_id) = client.send_connection_request(transaction_id).await else { + check_results.push(Err(CheckError::UdpError { + socket_addr: *udp_tracker, + })); + self.console.println(&format!( + "{} - Can't make tracker connection request to {}", + "✗".red(), + colored_tracker_url + )); + break; + }; + + let info_hash = InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 + + if (client + .send_announce_request(connection_id, transaction_id, info_hash, Port(bound_to.port())) + .await) + .is_ok() + { + check_results.push(Ok(())); + self.console + .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + } else { + let err = CheckError::UdpError { + socket_addr: *udp_tracker, + }; + check_results.push(Err(err)); + self.console + .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); } - match self.check_udp_scrape(udp_tracker).await { - Ok(()) => { - check_results.push(Ok(())); - self.console - .println(&format!("{} - Scrape at {} is OK", "✓".green(), colored_tracker_url)); - } - Err(err) => { - check_results.push(Err(err)); - self.console - .println(&format!("{} - Scrape at {} is failing", "✗".red(), colored_tracker_url)); - } + let info_hashes = vec![InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422"))]; // # DevSkim: ignore DS173237 + + if (client.send_scrape_request(connection_id, transaction_id, info_hashes).await).is_ok() { + check_results.push(Ok(())); + self.console + .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + } else { + let err = CheckError::UdpError { + socket_addr: *udp_tracker, + }; + check_results.push(Err(err)); + self.console + .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); } } } @@ -99,7 +131,7 @@ impl Service { Ok(()) => { check_results.push(Ok(())); self.console - .println(&format!("{} - Announce at {} is OK (TODO)", "✓".green(), colored_tracker_url)); + .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); } Err(err) => { check_results.push(Err(err)); @@ -112,7 +144,7 @@ impl Service { Ok(()) => { check_results.push(Ok(())); self.console - .println(&format!("{} - Scrape at {} is OK (TODO)", "✓".green(), colored_tracker_url)); + .println(&format!("{} - Scrape at {} is OK", "✓".green(), colored_tracker_url)); } Err(err) => { check_results.push(Err(err)); @@ -134,22 +166,14 @@ impl Service { } } - #[allow(clippy::unused_async)] - async fn check_udp_announce(&self, tracker_socket_addr: &SocketAddr) -> Result<(), CheckError> { - debug!("{tracker_socket_addr}"); - Ok(()) - } - - #[allow(clippy::unused_async)] - async fn check_udp_scrape(&self, tracker_socket_addr: &SocketAddr) -> Result<(), CheckError> { - debug!("{tracker_socket_addr}"); - Ok(()) - } - async fn check_http_announce(&self, tracker_url: &Url) -> Result<(), CheckError> { let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); + // todo: HTTP request could panic.For example, if the server is not accessible. + // We should change the client to catch that error and return a `CheckError`. + // Otherwise the checking process will stop. The idea is to process all checks + // and return a final report. let response = Client::new(tracker_url.clone()) .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) .await; @@ -174,6 +198,10 @@ impl Service { let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); + // todo: HTTP request could panic.For example, if the server is not accessible. + // We should change the client to catch that error and return a `CheckError`. + // Otherwise the checking process will stop. The idea is to process all checks + // and return a final report. let response = Client::new(url.clone()).scrape(&query).await; if let Ok(body) = response.bytes().await { From 6b74c66d35b98b161ebd8919ca994b202a7543d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Feb 2024 14:28:47 +0000 Subject: [PATCH 0729/1003] feat: [#639] Tracker Checker. Setup logging --- src/console/clients/checker/app.rs | 24 ++++++++++++++++++++++++ src/console/clients/checker/service.rs | 10 ++++++++++ 2 files changed, 34 insertions(+) diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs index bca4b64dc..82ea800d0 100644 --- a/src/console/clients/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use anyhow::{Context, Result}; use clap::Parser; +use log::{debug, LevelFilter}; use super::config::Configuration; use super::console::Console; @@ -39,6 +40,8 @@ struct Args { /// /// Will return an error if the configuration was not provided. pub async fn run() -> Result> { + setup_logging(LevelFilter::Info); + let args = Args::parse(); let config = setup_config(args)?; @@ -53,6 +56,27 @@ pub async fn run() -> Result> { Ok(service.run_checks().await) } +fn setup_logging(level: LevelFilter) { + if let Err(_err) = fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "{} [{}][{}] {}", + chrono::Local::now().format("%+"), + record.target(), + record.level(), + message + )); + }) + .level(level) + .chain(std::io::stdout()) + .apply() + { + panic!("Failed to initialize logging.") + } + + debug!("logging initialized."); +} + fn setup_config(args: Args) -> Result { match (args.config_path, args.config_content) { (Some(config_path), _) => load_config_from_file(&config_path), diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 9ca24231f..40db30b90 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -58,12 +58,16 @@ impl Service { self.console.println("UDP trackers ..."); for udp_tracker in &self.config.udp_trackers { + debug!("UDP tracker: {:?}", udp_tracker); + let colored_tracker_url = udp_tracker.to_string().yellow(); let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); let mut client = checker::Client::default(); + debug!("Bind and connect"); + let Ok(bound_to) = client.bind_and_connect(ASSIGNED_BY_OS, udp_tracker).await else { check_results.push(Err(CheckError::UdpError { socket_addr: *udp_tracker, @@ -73,6 +77,8 @@ impl Service { break; }; + debug!("Send connection request"); + let Ok(connection_id) = client.send_connection_request(transaction_id).await else { check_results.push(Err(CheckError::UdpError { socket_addr: *udp_tracker, @@ -87,6 +93,8 @@ impl Service { let info_hash = InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 + debug!("Send announce request"); + if (client .send_announce_request(connection_id, transaction_id, info_hash, Port(bound_to.port())) .await) @@ -104,6 +112,8 @@ impl Service { .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); } + debug!("Send scrape request"); + let info_hashes = vec![InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422"))]; // # DevSkim: ignore DS173237 if (client.send_scrape_request(connection_id, transaction_id, info_hashes).await).is_ok() { From 592c0ddf48e3f9709e4c1bb642eb2e72dced64ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Feb 2024 16:07:59 +0000 Subject: [PATCH 0730/1003] fix: add timeouts to UdpClient operations The generic UDP client does not have timeouts. When the server is down it waits forever for responses. This client has been using only for testing where the server was always up, but now it's using in production code by the Tracker Checker. For the time being, I keep the panicking behavior of the UdpClient when something is wrong. However we should return an error when the operation times out. For the Tracker Checker, it means that when the checker can't connect to a UDP server the checker is going to panic after 5 seconds. That is not the intended behavior for the checker. It should always return a full reprot of all checks. In order to implement that behavior we need to change the UdpClient to return errors. Since that's a bug refactor I open a new issue. --- src/shared/bit_torrent/tracker/udp/client.rs | 44 +++++++++++++++++--- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 41c9def89..11c8d8f62 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -10,10 +10,18 @@ use tokio::time; use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; +/// Default timeout for sending and receiving packets. And waiting for sockets +/// to be readable and writable. +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + #[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct UdpClient { + /// The socket to connect to pub socket: Arc, + + /// Timeout for sending and receiving packets + pub timeout: Duration, } impl UdpClient { @@ -29,6 +37,7 @@ impl UdpClient { Self { socket: Arc::new(socket), + timeout: DEFAULT_TIMEOUT, } } @@ -53,10 +62,23 @@ impl UdpClient { /// - Can't write to the socket. /// - Can't send data. pub async fn send(&self, bytes: &[u8]) -> usize { - debug!(target: "UDP client", "send {bytes:?}"); + debug!(target: "UDP client", "sending {bytes:?} ..."); + + match time::timeout(self.timeout, self.socket.writable()).await { + Ok(writable_result) => match writable_result { + Ok(()) => (), + Err(e) => panic!("{}", format!("IO error waiting for the socket to become readable: {e:?}")), + }, + Err(e) => panic!("{}", format!("Timeout waiting for the socket to become readable: {e:?}")), + }; - self.socket.writable().await.unwrap(); - self.socket.send(bytes).await.unwrap() + match time::timeout(self.timeout, self.socket.send(bytes)).await { + Ok(send_result) => match send_result { + Ok(size) => size, + Err(e) => panic!("{}", format!("IO error during send: {e:?}")), + }, + Err(e) => panic!("{}", format!("Send operation timed out: {e:?}")), + } } /// # Panics @@ -68,9 +90,21 @@ impl UdpClient { pub async fn receive(&self, bytes: &mut [u8]) -> usize { debug!(target: "UDP client", "receiving ..."); - self.socket.readable().await.unwrap(); + match time::timeout(self.timeout, self.socket.readable()).await { + Ok(readable_result) => match readable_result { + Ok(()) => (), + Err(e) => panic!("{}", format!("IO error waiting for the socket to become readable: {e:?}")), + }, + Err(e) => panic!("{}", format!("Timeout waiting for the socket to become readable: {e:?}")), + }; - let size = self.socket.recv(bytes).await.unwrap(); + let size = match time::timeout(self.timeout, self.socket.recv(bytes)).await { + Ok(recv_result) => match recv_result { + Ok(size) => size, + Err(e) => panic!("{}", format!("IO error during send: {e:?}")), + }, + Err(e) => panic!("{}", format!("Receive operation timed out: {e:?}")), + }; debug!(target: "UDP client", "{size} bytes received {bytes:?}"); From 77c32a16a48abb2e43d411d13352371288f0d8c3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Feb 2024 16:53:02 +0000 Subject: [PATCH 0731/1003] refactor: [#639] Tracker Checker: extract mod for UDP checks --- src/console/clients/checker/checks/health.rs | 0 src/console/clients/checker/checks/http.rs | 0 src/console/clients/checker/checks/mod.rs | 3 + src/console/clients/checker/checks/udp.rs | 87 ++++++++++++++++++++ src/console/clients/checker/mod.rs | 1 + src/console/clients/checker/service.rs | 86 +------------------ 6 files changed, 93 insertions(+), 84 deletions(-) create mode 100644 src/console/clients/checker/checks/health.rs create mode 100644 src/console/clients/checker/checks/http.rs create mode 100644 src/console/clients/checker/checks/mod.rs create mode 100644 src/console/clients/checker/checks/udp.rs diff --git a/src/console/clients/checker/checks/health.rs b/src/console/clients/checker/checks/health.rs new file mode 100644 index 000000000..e69de29bb diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs new file mode 100644 index 000000000..e69de29bb diff --git a/src/console/clients/checker/checks/mod.rs b/src/console/clients/checker/checks/mod.rs new file mode 100644 index 000000000..16256595e --- /dev/null +++ b/src/console/clients/checker/checks/mod.rs @@ -0,0 +1,3 @@ +pub mod health; +pub mod http; +pub mod udp; diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs new file mode 100644 index 000000000..890375b75 --- /dev/null +++ b/src/console/clients/checker/checks/udp.rs @@ -0,0 +1,87 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::{Port, TransactionId}; +use colored::Colorize; +use hex_literal::hex; +use log::debug; + +use crate::console::clients::checker::console::Console; +use crate::console::clients::checker::printer::Printer; +use crate::console::clients::checker::service::{CheckError, CheckResult}; +use crate::console::clients::udp::checker; +use crate::shared::bit_torrent::info_hash::InfoHash; + +const ASSIGNED_BY_OS: u16 = 0; +const RANDOM_TRANSACTION_ID: i32 = -888_840_697; + +pub async fn run(udp_trackers: &Vec, console: &Console, check_results: &mut Vec) { + console.println("UDP trackers ..."); + + for udp_tracker in udp_trackers { + debug!("UDP tracker: {:?}", udp_tracker); + + let colored_tracker_url = udp_tracker.to_string().yellow(); + + let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); + + let mut client = checker::Client::default(); + + debug!("Bind and connect"); + + let Ok(bound_to) = client.bind_and_connect(ASSIGNED_BY_OS, udp_tracker).await else { + check_results.push(Err(CheckError::UdpError { + socket_addr: *udp_tracker, + })); + console.println(&format!("{} - Can't connect to socket {}", "✗".red(), colored_tracker_url)); + break; + }; + + debug!("Send connection request"); + + let Ok(connection_id) = client.send_connection_request(transaction_id).await else { + check_results.push(Err(CheckError::UdpError { + socket_addr: *udp_tracker, + })); + console.println(&format!( + "{} - Can't make tracker connection request to {}", + "✗".red(), + colored_tracker_url + )); + break; + }; + + let info_hash = InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 + + debug!("Send announce request"); + + if (client + .send_announce_request(connection_id, transaction_id, info_hash, Port(bound_to.port())) + .await) + .is_ok() + { + check_results.push(Ok(())); + console.println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + } else { + let err = CheckError::UdpError { + socket_addr: *udp_tracker, + }; + check_results.push(Err(err)); + console.println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); + } + + debug!("Send scrape request"); + + let info_hashes = vec![InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422"))]; // # DevSkim: ignore DS173237 + + if (client.send_scrape_request(connection_id, transaction_id, info_hashes).await).is_ok() { + check_results.push(Ok(())); + console.println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + } else { + let err = CheckError::UdpError { + socket_addr: *udp_tracker, + }; + check_results.push(Err(err)); + console.println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); + } + } +} diff --git a/src/console/clients/checker/mod.rs b/src/console/clients/checker/mod.rs index 6a55141d5..d26a4a686 100644 --- a/src/console/clients/checker/mod.rs +++ b/src/console/clients/checker/mod.rs @@ -1,4 +1,5 @@ pub mod app; +pub mod checks; pub mod config; pub mod console; pub mod logger; diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 40db30b90..62ac65636 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -3,25 +3,20 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use aquatic_udp_protocol::{Port, TransactionId}; use colored::Colorize; -use hex_literal::hex; use log::debug; use reqwest::{Client as HttpClient, Url}; +use super::checks; use super::config::Configuration; use super::console::Console; use crate::console::clients::checker::printer::Printer; -use crate::console::clients::udp::checker; use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; -const ASSIGNED_BY_OS: u16 = 0; -const RANDOM_TRANSACTION_ID: i32 = -888_840_697; - pub struct Service { pub(crate) config: Arc, pub(crate) console: Console, @@ -45,7 +40,7 @@ impl Service { let mut check_results = vec![]; - self.check_udp_trackers(&mut check_results).await; + checks::udp::run(&self.config.udp_trackers, &self.console, &mut check_results).await; self.check_http_trackers(&mut check_results).await; @@ -54,83 +49,6 @@ impl Service { check_results } - async fn check_udp_trackers(&self, check_results: &mut Vec) { - self.console.println("UDP trackers ..."); - - for udp_tracker in &self.config.udp_trackers { - debug!("UDP tracker: {:?}", udp_tracker); - - let colored_tracker_url = udp_tracker.to_string().yellow(); - - let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); - - let mut client = checker::Client::default(); - - debug!("Bind and connect"); - - let Ok(bound_to) = client.bind_and_connect(ASSIGNED_BY_OS, udp_tracker).await else { - check_results.push(Err(CheckError::UdpError { - socket_addr: *udp_tracker, - })); - self.console - .println(&format!("{} - Can't connect to socket {}", "✗".red(), colored_tracker_url)); - break; - }; - - debug!("Send connection request"); - - let Ok(connection_id) = client.send_connection_request(transaction_id).await else { - check_results.push(Err(CheckError::UdpError { - socket_addr: *udp_tracker, - })); - self.console.println(&format!( - "{} - Can't make tracker connection request to {}", - "✗".red(), - colored_tracker_url - )); - break; - }; - - let info_hash = InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 - - debug!("Send announce request"); - - if (client - .send_announce_request(connection_id, transaction_id, info_hash, Port(bound_to.port())) - .await) - .is_ok() - { - check_results.push(Ok(())); - self.console - .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); - } else { - let err = CheckError::UdpError { - socket_addr: *udp_tracker, - }; - check_results.push(Err(err)); - self.console - .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); - } - - debug!("Send scrape request"); - - let info_hashes = vec![InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422"))]; // # DevSkim: ignore DS173237 - - if (client.send_scrape_request(connection_id, transaction_id, info_hashes).await).is_ok() { - check_results.push(Ok(())); - self.console - .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); - } else { - let err = CheckError::UdpError { - socket_addr: *udp_tracker, - }; - check_results.push(Err(err)); - self.console - .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); - } - } - } - async fn check_http_trackers(&self, check_results: &mut Vec) { self.console.println("HTTP trackers ..."); From 70924eddd06b4abd295bd57d553c6b92857eab10 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Feb 2024 16:58:40 +0000 Subject: [PATCH 0732/1003] refactor: [#639] Tracker Checker: extract mod for HTTP checks --- src/console/clients/checker/checks/http.rs | 95 ++++++++++++++++++++++ src/console/clients/checker/service.rs | 93 +-------------------- 2 files changed, 96 insertions(+), 92 deletions(-) diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index e69de29bb..df1e9bc9a 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -0,0 +1,95 @@ +use std::str::FromStr; + +use colored::Colorize; +use log::debug; +use reqwest::Url as ServiceUrl; +use url::Url; + +use crate::console::clients::checker::console::Console; +use crate::console::clients::checker::printer::Printer; +use crate::console::clients::checker::service::{CheckError, CheckResult}; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; +use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; +use crate::shared::bit_torrent::tracker::http::client::responses::scrape; +use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; + +pub async fn run(http_trackers: &Vec, console: &Console, check_results: &mut Vec) { + console.println("HTTP trackers ..."); + + for http_tracker in http_trackers { + let colored_tracker_url = http_tracker.to_string().yellow(); + + match check_http_announce(http_tracker).await { + Ok(()) => { + check_results.push(Ok(())); + console.println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + } + Err(err) => { + check_results.push(Err(err)); + console.println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); + } + } + + match check_http_scrape(http_tracker).await { + Ok(()) => { + check_results.push(Ok(())); + console.println(&format!("{} - Scrape at {} is OK", "✓".green(), colored_tracker_url)); + } + Err(err) => { + check_results.push(Err(err)); + console.println(&format!("{} - Scrape at {} is failing", "✗".red(), colored_tracker_url)); + } + } + } +} + +async fn check_http_announce(tracker_url: &Url) -> Result<(), CheckError> { + let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 + let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); + + // todo: HTTP request could panic.For example, if the server is not accessible. + // We should change the client to catch that error and return a `CheckError`. + // Otherwise the checking process will stop. The idea is to process all checks + // and return a final report. + let response = Client::new(tracker_url.clone()) + .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + .await; + + if let Ok(body) = response.bytes().await { + if let Ok(_announce_response) = serde_bencode::from_bytes::(&body) { + Ok(()) + } else { + debug!("announce body {:#?}", body); + Err(CheckError::HttpError { + url: tracker_url.clone(), + }) + } + } else { + Err(CheckError::HttpError { + url: tracker_url.clone(), + }) + } +} + +async fn check_http_scrape(url: &Url) -> Result<(), CheckError> { + let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 + let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); + + // todo: HTTP request could panic.For example, if the server is not accessible. + // We should change the client to catch that error and return a `CheckError`. + // Otherwise the checking process will stop. The idea is to process all checks + // and return a final report. + let response = Client::new(url.clone()).scrape(&query).await; + + if let Ok(body) = response.bytes().await { + if let Ok(_scrape_response) = scrape::Response::try_from_bencoded(&body) { + Ok(()) + } else { + debug!("scrape body {:#?}", body); + Err(CheckError::HttpError { url: url.clone() }) + } + } else { + Err(CheckError::HttpError { url: url.clone() }) + } +} diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 62ac65636..163b8f205 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -1,21 +1,14 @@ use std::net::SocketAddr; -use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use colored::Colorize; -use log::debug; use reqwest::{Client as HttpClient, Url}; use super::checks; use super::config::Configuration; use super::console::Console; use crate::console::clients::checker::printer::Printer; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; -use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; -use crate::shared::bit_torrent::tracker::http::client::responses::scrape; -use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; pub struct Service { pub(crate) config: Arc, @@ -42,47 +35,13 @@ impl Service { checks::udp::run(&self.config.udp_trackers, &self.console, &mut check_results).await; - self.check_http_trackers(&mut check_results).await; + checks::http::run(&self.config.http_trackers, &self.console, &mut check_results).await; self.run_health_checks(&mut check_results).await; check_results } - async fn check_http_trackers(&self, check_results: &mut Vec) { - self.console.println("HTTP trackers ..."); - - for http_tracker in &self.config.http_trackers { - let colored_tracker_url = http_tracker.to_string().yellow(); - - match self.check_http_announce(http_tracker).await { - Ok(()) => { - check_results.push(Ok(())); - self.console - .println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); - } - Err(err) => { - check_results.push(Err(err)); - self.console - .println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); - } - } - - match self.check_http_scrape(http_tracker).await { - Ok(()) => { - check_results.push(Ok(())); - self.console - .println(&format!("{} - Scrape at {} is OK", "✓".green(), colored_tracker_url)); - } - Err(err) => { - check_results.push(Err(err)); - self.console - .println(&format!("{} - Scrape at {} is failing", "✗".red(), colored_tracker_url)); - } - } - } - } - async fn run_health_checks(&self, check_results: &mut Vec) { self.console.println("Health checks ..."); @@ -94,56 +53,6 @@ impl Service { } } - async fn check_http_announce(&self, tracker_url: &Url) -> Result<(), CheckError> { - let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 - let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); - - // todo: HTTP request could panic.For example, if the server is not accessible. - // We should change the client to catch that error and return a `CheckError`. - // Otherwise the checking process will stop. The idea is to process all checks - // and return a final report. - let response = Client::new(tracker_url.clone()) - .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) - .await; - - if let Ok(body) = response.bytes().await { - if let Ok(_announce_response) = serde_bencode::from_bytes::(&body) { - Ok(()) - } else { - debug!("announce body {:#?}", body); - Err(CheckError::HttpError { - url: tracker_url.clone(), - }) - } - } else { - Err(CheckError::HttpError { - url: tracker_url.clone(), - }) - } - } - - async fn check_http_scrape(&self, url: &Url) -> Result<(), CheckError> { - let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 - let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); - - // todo: HTTP request could panic.For example, if the server is not accessible. - // We should change the client to catch that error and return a `CheckError`. - // Otherwise the checking process will stop. The idea is to process all checks - // and return a final report. - let response = Client::new(url.clone()).scrape(&query).await; - - if let Ok(body) = response.bytes().await { - if let Ok(_scrape_response) = scrape::Response::try_from_bencoded(&body) { - Ok(()) - } else { - debug!("scrape body {:#?}", body); - Err(CheckError::HttpError { url: url.clone() }) - } - } else { - Err(CheckError::HttpError { url: url.clone() }) - } - } - async fn run_health_check(&self, url: Url) -> Result<(), CheckError> { let client = HttpClient::builder().timeout(Duration::from_secs(5)).build().unwrap(); From 873f98d872d8b0f17cb8d5b805db5d4d2a8ae954 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Feb 2024 17:04:16 +0000 Subject: [PATCH 0733/1003] refactor: [#639] Tracker Checker: extract mod for Health checks --- src/console/clients/checker/checks/health.rs | 51 ++++++++++++++++++++ src/console/clients/checker/service.rs | 50 +------------------ 2 files changed, 53 insertions(+), 48 deletions(-) diff --git a/src/console/clients/checker/checks/health.rs b/src/console/clients/checker/checks/health.rs index e69de29bb..9c28da514 100644 --- a/src/console/clients/checker/checks/health.rs +++ b/src/console/clients/checker/checks/health.rs @@ -0,0 +1,51 @@ +use std::time::Duration; + +use colored::Colorize; +use reqwest::{Client as HttpClient, Url, Url as ServiceUrl}; + +use crate::console::clients::checker::console::Console; +use crate::console::clients::checker::printer::Printer; +use crate::console::clients::checker::service::{CheckError, CheckResult}; + +pub async fn run(health_checks: &Vec, console: &Console, check_results: &mut Vec) { + console.println("Health checks ..."); + + for health_check_url in health_checks { + match run_health_check(health_check_url.clone(), console).await { + Ok(()) => check_results.push(Ok(())), + Err(err) => check_results.push(Err(err)), + } + } +} + +async fn run_health_check(url: Url, console: &Console) -> Result<(), CheckError> { + let client = HttpClient::builder().timeout(Duration::from_secs(5)).build().unwrap(); + + let colored_url = url.to_string().yellow(); + + match client.get(url.clone()).send().await { + Ok(response) => { + if response.status().is_success() { + console.println(&format!("{} - Health API at {} is OK", "✓".green(), colored_url)); + Ok(()) + } else { + console.eprintln(&format!( + "{} - Health API at {} is failing: {:?}", + "✗".red(), + colored_url, + response + )); + Err(CheckError::HealthCheckError { url }) + } + } + Err(err) => { + console.eprintln(&format!( + "{} - Health API at {} is failing: {:?}", + "✗".red(), + colored_url, + err + )); + Err(CheckError::HealthCheckError { url }) + } + } +} diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 163b8f205..94eff4a88 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -1,9 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; -use std::time::Duration; -use colored::Colorize; -use reqwest::{Client as HttpClient, Url}; +use reqwest::Url; use super::checks; use super::config::Configuration; @@ -37,52 +35,8 @@ impl Service { checks::http::run(&self.config.http_trackers, &self.console, &mut check_results).await; - self.run_health_checks(&mut check_results).await; + checks::health::run(&self.config.health_checks, &self.console, &mut check_results).await; check_results } - - async fn run_health_checks(&self, check_results: &mut Vec) { - self.console.println("Health checks ..."); - - for health_check_url in &self.config.health_checks { - match self.run_health_check(health_check_url.clone()).await { - Ok(()) => check_results.push(Ok(())), - Err(err) => check_results.push(Err(err)), - } - } - } - - async fn run_health_check(&self, url: Url) -> Result<(), CheckError> { - let client = HttpClient::builder().timeout(Duration::from_secs(5)).build().unwrap(); - - let colored_url = url.to_string().yellow(); - - match client.get(url.clone()).send().await { - Ok(response) => { - if response.status().is_success() { - self.console - .println(&format!("{} - Health API at {} is OK", "✓".green(), colored_url)); - Ok(()) - } else { - self.console.eprintln(&format!( - "{} - Health API at {} is failing: {:?}", - "✗".red(), - colored_url, - response - )); - Err(CheckError::HealthCheckError { url }) - } - } - Err(err) => { - self.console.eprintln(&format!( - "{} - Health API at {} is failing: {:?}", - "✗".red(), - colored_url, - err - )); - Err(CheckError::HealthCheckError { url }) - } - } - } } From 36fcee7d81ecfe8c9475388de40323dac3d7f65d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Feb 2024 07:34:12 +0000 Subject: [PATCH 0734/1003] fix: [#691] unknown feature stdsimd ``` error[E0635]: unknown feature `stdsimd` --> /home/josecelano/.cargo/registry/src/index.crates.io-6f17d22bba15001f/ahash-0.7.7/src/lib.rs:33:42 | 33 | #![cfg_attr(feature = "stdsimd", feature(stdsimd))] | ^^^^^^^ Compiling aho-corasick v1.1.2 For more information about this error, try `rustc --explain E0635`. error: could not compile `ahash` (lib) due to 1 previous error warning: build failed, waiting for other jobs to finish... ``` With: ``` nightly-x86_64-unknown-linux-gnu (default) rustc 1.78.0-nightly (98aa3624b 2024-02-08) `` --- Cargo.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc45cfc57..6b04e273b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ "getrandom", "once_cell", @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" dependencies = [ "cfg-if", "once_cell", @@ -1262,7 +1262,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.7", + "ahash 0.7.8", ] [[package]] @@ -1271,7 +1271,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", ] [[package]] @@ -1280,7 +1280,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.8", "allocator-api2", ] From 945e91f36e4391d6b30d54474c087576a697208a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 Feb 2024 10:28:48 +0000 Subject: [PATCH 0735/1003] chore: [#696] add cargo dependencies for logging - `tower-http` - `trace` - `tracing` --- Cargo.lock | 27 +++++++++++++++++++++++++++ Cargo.toml | 4 +++- cSpell.json | 1 + 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 6b04e273b..aae5deb9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3479,6 +3479,8 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tower-http", + "trace", + "tracing", "url", "uuid", ] @@ -3567,6 +3569,8 @@ dependencies = [ "tokio-util", "tower-layer", "tower-service", + "tracing", + "uuid", ] [[package]] @@ -3581,6 +3585,17 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +[[package]] +name = "trace" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ad0c048e114d19d1140662762bfdb10682f3bc806d8be18af846600214dd9af" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "tracing" version = "0.1.40" @@ -3589,9 +3604,21 @@ checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "tracing-core" version = "0.1.32" diff --git a/Cargo.toml b/Cargo.toml index bd04e1cc1..83134d8f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,7 +67,7 @@ torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "pa torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } -tower-http = { version = "0", features = ["compression-full"] } +tower-http = { version = "0", features = ["compression-full", "cors", "trace", "propagate-header", "request-id"] } uuid = { version = "1", features = ["v4"] } colored = "2.1.0" url = "2.5.0" @@ -75,6 +75,8 @@ tempfile = "3.9.0" clap = { version = "4.4.18", features = ["derive", "env"]} anyhow = "1.0.79" hex-literal = "0.4.1" +trace = "0.1.7" +tracing = "0.1.40" [dev-dependencies] criterion = { version = "0.5.1", features = ["async_tokio"] } diff --git a/cSpell.json b/cSpell.json index aaa3229c2..646037e59 100644 --- a/cSpell.json +++ b/cSpell.json @@ -117,6 +117,7 @@ "Swatinem", "Swiftbit", "taiki", + "tdyne", "tempfile", "thiserror", "tlsv", From 32727caff159f3b3200ad7e53f1f95f56cc4ef6b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 Feb 2024 10:30:01 +0000 Subject: [PATCH 0736/1003] feat: [#696] API, log request and responses --- src/servers/apis/routes.rs | 52 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 227916335..aed3ee19d 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -6,11 +6,20 @@ //! All the API routes have the `/api` prefix and the version number as the //! first path segment. For example: `/api/v1/torrents`. use std::sync::Arc; +use std::time::Duration; +use axum::http::{HeaderName, HeaderValue}; +use axum::response::Response; use axum::routing::get; use axum::{middleware, Router}; +use hyper::Request; use torrust_tracker_configuration::AccessTokens; use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestId, RequestId, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{Level, Span}; +use uuid::Uuid; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; @@ -32,4 +41,47 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router .layer(middleware::from_fn_with_state(state, v1::middlewares::auth::auth)) .route(&format!("{api_url_prefix}/health_check"), get(health_check_handler)) .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(|request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target: "API", + tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(|response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: "API", + tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) +} + +#[derive(Clone, Default)] +struct RequestIdGenerator; + +impl MakeRequestId for RequestIdGenerator { + fn make_request_id(&mut self, _request: &Request) -> Option { + let id = HeaderValue::from_str(&Uuid::new_v4().to_string()).expect("UUID is a valid HTTP header value"); + Some(RequestId::new(id)) + } } From 5ccdf10a3fd814702fed72a17428fdc5b6996263 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 Feb 2024 10:38:12 +0000 Subject: [PATCH 0737/1003] fix: build errors `imported redundantly` --- contrib/bencode/src/mutable/encode.rs | 2 -- contrib/bencode/src/reference/bencode_ref.rs | 2 -- contrib/bencode/src/reference/decode.rs | 2 -- contrib/bencode/src/reference/decode_opt.rs | 2 -- src/console/clients/checker/config.rs | 3 +-- src/core/peer.rs | 1 - src/servers/apis/v1/context/auth_key/resources.rs | 1 - src/servers/http/v1/responses/error.rs | 2 +- src/shared/bit_torrent/tracker/http/client/requests/scrape.rs | 1 - .../bit_torrent/tracker/http/client/responses/announce.rs | 2 +- src/shared/bit_torrent/tracker/http/client/responses/error.rs | 2 +- src/shared/bit_torrent/tracker/http/client/responses/scrape.rs | 2 +- src/shared/crypto/keys.rs | 2 -- tests/servers/http/responses/announce.rs | 2 +- tests/servers/http/responses/error.rs | 2 +- tests/servers/http/responses/scrape.rs | 2 +- 16 files changed, 8 insertions(+), 22 deletions(-) diff --git a/contrib/bencode/src/mutable/encode.rs b/contrib/bencode/src/mutable/encode.rs index 811c35816..25c91b41d 100644 --- a/contrib/bencode/src/mutable/encode.rs +++ b/contrib/bencode/src/mutable/encode.rs @@ -1,5 +1,3 @@ -use std::iter::Extend; - use crate::access::bencode::{BRefAccess, RefKind}; use crate::access::dict::BDictAccess; use crate::access::list::BListAccess; diff --git a/contrib/bencode/src/reference/bencode_ref.rs b/contrib/bencode/src/reference/bencode_ref.rs index 760dd3016..a6f2c15bc 100644 --- a/contrib/bencode/src/reference/bencode_ref.rs +++ b/contrib/bencode/src/reference/bencode_ref.rs @@ -125,8 +125,6 @@ impl<'a> BRefAccessExt<'a> for BencodeRef<'a> { #[cfg(test)] mod tests { - use std::default::Default; - use crate::access::bencode::BRefAccess; use crate::reference::bencode_ref::BencodeRef; use crate::reference::decode_opt::BDecodeOpt; diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs index d2aa180f8..d35d1b597 100644 --- a/contrib/bencode/src/reference/decode.rs +++ b/contrib/bencode/src/reference/decode.rs @@ -177,8 +177,6 @@ fn peek_byte(bytes: &[u8], pos: usize) -> BencodeParseResult { #[cfg(test)] mod tests { - use std::default::Default; - use crate::access::bencode::BRefAccess; use crate::reference::bencode_ref::BencodeRef; use crate::reference::decode_opt::BDecodeOpt; diff --git a/contrib/bencode/src/reference/decode_opt.rs b/contrib/bencode/src/reference/decode_opt.rs index e8d9a8337..8409cc72c 100644 --- a/contrib/bencode/src/reference/decode_opt.rs +++ b/contrib/bencode/src/reference/decode_opt.rs @@ -1,5 +1,3 @@ -use std::default::Default; - const DEFAULT_MAX_RECURSION: usize = 50; const DEFAULT_CHECK_KEY_SORT: bool = false; const DEFAULT_ENFORCE_FULL_DECODE: bool = true; diff --git a/src/console/clients/checker/config.rs b/src/console/clients/checker/config.rs index 0a2c09b03..6e44d889b 100644 --- a/src/console/clients/checker/config.rs +++ b/src/console/clients/checker/config.rs @@ -4,7 +4,6 @@ use std::net::SocketAddr; use reqwest::Url as ServiceUrl; use serde::Deserialize; -use url; /// It parses the configuration from a JSON format. /// @@ -88,7 +87,7 @@ impl TryFrom for Configuration { #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr}; use super::*; diff --git a/src/core/peer.rs b/src/core/peer.rs index 03489ce30..16aa1fe56 100644 --- a/src/core/peer.rs +++ b/src/core/peer.rs @@ -24,7 +24,6 @@ use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use serde; use serde::Serialize; use thiserror::Error; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index f4c7f34ca..99e93aaf9 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -1,5 +1,4 @@ //! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. -use std::convert::From; use serde::{Deserialize, Serialize}; diff --git a/src/servers/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs index 606ead3b2..1cc31ad4e 100644 --- a/src/servers/http/v1/responses/error.rs +++ b/src/servers/http/v1/responses/error.rs @@ -13,7 +13,7 @@ //! code. use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use serde::{self, Serialize}; +use serde::Serialize; /// `Error` response for the [`HTTP tracker`](crate::servers::http). #[derive(Serialize, Debug, PartialEq)] diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs index d0268d1f8..4fa49eed6 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -1,4 +1,3 @@ -use std::convert::TryFrom; use std::error::Error; use std::fmt::{self}; use std::str::FromStr; diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs index f68c54482..e75cc6671 100644 --- a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs @@ -1,6 +1,6 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use serde::{self, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use crate::core::peer::Peer; diff --git a/src/shared/bit_torrent/tracker/http/client/responses/error.rs b/src/shared/bit_torrent/tracker/http/client/responses/error.rs index 12c53a0cf..00befdb54 100644 --- a/src/shared/bit_torrent/tracker/http/client/responses/error.rs +++ b/src/shared/bit_torrent/tracker/http/client/responses/error.rs @@ -1,4 +1,4 @@ -use serde::{self, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Error { diff --git a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs index ee301ee7a..25a2f0a81 100644 --- a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs @@ -3,7 +3,7 @@ use std::fmt::Write; use std::str; use serde::ser::SerializeMap; -use serde::{self, Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Serialize, Serializer}; use serde_bencode::value::Value; use crate::shared::bit_torrent::tracker::http::{ByteArray20, InfoHash}; diff --git a/src/shared/crypto/keys.rs b/src/shared/crypto/keys.rs index 92e180996..deb70574f 100644 --- a/src/shared/crypto/keys.rs +++ b/src/shared/crypto/keys.rs @@ -86,8 +86,6 @@ pub mod seeds { #[cfg(test)] mod tests { - use std::convert::TryInto; - use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; use crate::shared::crypto::keys::seeds::CURRENT_SEED; diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs index a57b41c78..968c327eb 100644 --- a/tests/servers/http/responses/announce.rs +++ b/tests/servers/http/responses/announce.rs @@ -1,6 +1,6 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use serde::{self, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use torrust_tracker::core::peer::Peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] diff --git a/tests/servers/http/responses/error.rs b/tests/servers/http/responses/error.rs index 12c53a0cf..00befdb54 100644 --- a/tests/servers/http/responses/error.rs +++ b/tests/servers/http/responses/error.rs @@ -1,4 +1,4 @@ -use serde::{self, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Error { diff --git a/tests/servers/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs index 221ff0a38..eadecb603 100644 --- a/tests/servers/http/responses/scrape.rs +++ b/tests/servers/http/responses/scrape.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::str; -use serde::{self, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use serde_bencode::value::Value; use crate::servers::http::{ByteArray20, InfoHash}; From e0836bd323cae1cbf8c0d1a92bb1566c0bd0f892 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 Feb 2024 11:33:56 +0000 Subject: [PATCH 0738/1003] feat: [#700] add logs to Health Check API `````` 2024-02-19T11:30:21.061293933+00:00 [HEALTH CHECK API][INFO] request; method=GET uri=/health_check request_id=78933bbf-c4cf-4897-b972-4c0fd252159a 2024-02-19T11:30:21.070329733+00:00 [HEALTH CHECK API][INFO] response; latency=9 status=200 OK request_id=78933bbf-c4cf-4897-b972-4c0fd252159a ``` --- src/bootstrap/jobs/health_check_api.rs | 6 +- src/console/ci/e2e/logs_parser.rs | 8 +-- src/servers/health_check_api/server.rs | 58 ++++++++++++++++++- tests/servers/health_check_api/environment.rs | 8 +-- 4 files changed, 67 insertions(+), 13 deletions(-) diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index e57d1c151..eec4d81a8 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -47,18 +47,18 @@ pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> Jo // Run the API server let join_handle = tokio::spawn(async move { - info!(target: "Health Check API", "Starting on: {protocol}://{}", bind_addr); + info!(target: "HEALTH CHECK API", "Starting on: {protocol}://{}", bind_addr); let handle = server::start(bind_addr, tx_start, rx_halt, register); if let Ok(()) = handle.await { - info!(target: "Health Check API", "Stopped server running on: {protocol}://{}", bind_addr); + info!(target: "HEALTH CHECK API", "Stopped server running on: {protocol}://{}", bind_addr); } }); // Wait until the server sends the started message match rx_start.await { - Ok(msg) => info!(target: "Health Check API", "Started on: {protocol}://{}", msg.address), + Ok(msg) => info!(target: "HEALTH CHECK API", "Started on: {protocol}://{}", msg.address), Err(e) => panic!("the Health Check API server was dropped: {e}"), } diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 1d6baa23e..82e37f7d7 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; const UDP_TRACKER_PATTERN: &str = "[UDP Tracker][INFO] Starting on: udp://"; const HTTP_TRACKER_PATTERN: &str = "[HTTP Tracker][INFO] Starting on: "; -const HEALTH_CHECK_PATTERN: &str = "[Health Check API][INFO] Starting on: "; +const HEALTH_CHECK_PATTERN: &str = "[HEALTH CHECK API][INFO] Starting on: "; #[derive(Serialize, Deserialize, Debug, Default)] pub struct RunningServices { @@ -27,8 +27,8 @@ impl RunningServices { /// 2024-01-24T16:36:14.615716574+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled /// 2024-01-24T16:36:14.615764904+00:00 [API][INFO] Starting on http://127.0.0.1:1212 /// 2024-01-24T16:36:14.615767264+00:00 [API][INFO] Started on http://127.0.0.1:1212 - /// 2024-01-24T16:36:14.615777574+00:00 [Health Check API][INFO] Starting on: http://127.0.0.1:1313 - /// 2024-01-24T16:36:14.615791124+00:00 [Health Check API][INFO] Started on: http://127.0.0.1:1313 + /// 2024-01-24T16:36:14.615777574+00:00 [HEALTH CHECK API][INFO] Starting on: http://127.0.0.1:1313 + /// 2024-01-24T16:36:14.615791124+00:00 [HEALTH CHECK API][INFO] Started on: http://127.0.0.1:1313 /// ``` /// /// It would extract these services: @@ -88,7 +88,7 @@ mod tests { let logs = "\ [UDP Tracker][INFO] Starting on: udp://0.0.0.0:8080\n\ [HTTP Tracker][INFO] Starting on: 0.0.0.0:9090\n\ - [Health Check API][INFO] Starting on: 0.0.0.0:10010"; + [HEALTH CHECK API][INFO] Starting on: 0.0.0.0:10010"; let running_services = RunningServices::parse_from_logs(logs); assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:8080"]); diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index 8ba20691f..049f48d40 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -3,14 +3,24 @@ //! This API is intended to be used by the container infrastructure to check if //! the whole application is healthy. use std::net::SocketAddr; +use std::time::Duration; +use axum::http::{HeaderName, HeaderValue}; +use axum::response::Response; use axum::routing::get; use axum::{Json, Router}; use axum_server::Handle; use futures::Future; +use hyper::Request; use log::debug; use serde_json::json; use tokio::sync::oneshot::{Receiver, Sender}; +use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestId, RequestId, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{Level, Span}; +use uuid::Uuid; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; @@ -31,14 +41,48 @@ pub fn start( let router = Router::new() .route("/", get(|| async { Json(json!({})) })) .route("/health_check", get(health_check_handler)) - .with_state(register); + .with_state(register) + .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(|request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target: "HEALTH CHECK API", + tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(|response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: "HEALTH CHECK API", + tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)); let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let handle = Handle::new(); - debug!(target: "Health Check API", "Starting service with graceful shutdown in a spawned task ..."); + debug!(target: "HEALTH CHECK API", "Starting service with graceful shutdown in a spawned task ..."); tokio::task::spawn(graceful_shutdown( handle.clone(), @@ -55,3 +99,13 @@ pub fn start( running } + +#[derive(Clone, Default)] +struct RequestIdGenerator; + +impl MakeRequestId for RequestIdGenerator { + fn make_request_id(&mut self, _request: &Request) -> Option { + let id = HeaderValue::from_str(&Uuid::new_v4().to_string()).expect("UUID is a valid HTTP header value"); + Some(RequestId::new(id)) + } +} diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index c98784282..37344858d 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -51,21 +51,21 @@ impl Environment { let register = self.registar.entries(); - debug!(target: "Health Check API", "Spawning task to launch the service ..."); + debug!(target: "HEALTH CHECK API", "Spawning task to launch the service ..."); let server = tokio::spawn(async move { - debug!(target: "Health Check API", "Starting the server in a spawned task ..."); + debug!(target: "HEALTH CHECK API", "Starting the server in a spawned task ..."); server::start(self.state.bind_to, tx_start, rx_halt, register) .await .expect("it should start the health check service"); - debug!(target: "Health Check API", "Server started. Sending the binding {} ...", self.state.bind_to); + debug!(target: "HEALTH CHECK API", "Server started. Sending the binding {} ...", self.state.bind_to); self.state.bind_to }); - debug!(target: "Health Check API", "Waiting for spawning task to send the binding ..."); + debug!(target: "HEALTH CHECK API", "Waiting for spawning task to send the binding ..."); let binding = rx_start.await.expect("it should send service binding").address; From 30ae6dfd94859768c4b8f9121ad260f0a5cb9f3c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 Feb 2024 13:01:19 +0000 Subject: [PATCH 0739/1003] feat: [#697] add logs to HTTP tracker Sample logs: ``` 2024-02-19T13:29:02.301023716+00:00 [HTTP TRACKER][INFO] request; server_socket_addr=0.0.0.0:7070 method=GET uri=/scrape?info_hash=%44%3C%76%02%B4%FD%E8%3D%11%54%D6%D9%DA%48%80%84%18%B1%81%B6 request_id=2c4aa57d-dd12-4cb8-95d1-e8193627c106 2024-02-19T13:29:02.301095545+00:00 [HTTP TRACKER][INFO] response; server_socket_addr=0.0.0.0:7070 latency=0 status=200 OK request_id=2c4aa57d-dd12-4cb8-95d1-e8193627c106 ``` --- src/console/ci/e2e/logs_parser.rs | 8 ++--- src/servers/http/server.rs | 7 ++-- src/servers/http/v1/routes.rs | 55 ++++++++++++++++++++++++++++++- 3 files changed, 62 insertions(+), 8 deletions(-) diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 82e37f7d7..ca4d6099c 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; const UDP_TRACKER_PATTERN: &str = "[UDP Tracker][INFO] Starting on: udp://"; -const HTTP_TRACKER_PATTERN: &str = "[HTTP Tracker][INFO] Starting on: "; +const HTTP_TRACKER_PATTERN: &str = "[HTTP TRACKER][INFO] Starting on: "; const HEALTH_CHECK_PATTERN: &str = "[HEALTH CHECK API][INFO] Starting on: "; #[derive(Serialize, Deserialize, Debug, Default)] @@ -22,8 +22,8 @@ impl RunningServices { /// 2024-01-24T16:36:14.614898789+00:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. /// 2024-01-24T16:36:14.615586025+00:00 [UDP Tracker][INFO] Starting on: udp://0.0.0.0:6969 /// 2024-01-24T16:36:14.615623705+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled - /// 2024-01-24T16:36:14.615694484+00:00 [HTTP Tracker][INFO] Starting on: http://0.0.0.0:7070 - /// 2024-01-24T16:36:14.615710534+00:00 [HTTP Tracker][INFO] Started on: http://0.0.0.0:7070 + /// 2024-01-24T16:36:14.615694484+00:00 [HTTP TRACKER][INFO] Starting on: http://0.0.0.0:7070 + /// 2024-01-24T16:36:14.615710534+00:00 [HTTP TRACKER][INFO] Started on: http://0.0.0.0:7070 /// 2024-01-24T16:36:14.615716574+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled /// 2024-01-24T16:36:14.615764904+00:00 [API][INFO] Starting on http://127.0.0.1:1212 /// 2024-01-24T16:36:14.615767264+00:00 [API][INFO] Started on http://127.0.0.1:1212 @@ -87,7 +87,7 @@ mod tests { fn it_should_parse_from_logs_with_valid_logs() { let logs = "\ [UDP Tracker][INFO] Starting on: udp://0.0.0.0:8080\n\ - [HTTP Tracker][INFO] Starting on: 0.0.0.0:9090\n\ + [HTTP TRACKER][INFO] Starting on: 0.0.0.0:9090\n\ [HEALTH CHECK API][INFO] Starting on: 0.0.0.0:10010"; let running_services = RunningServices::parse_from_logs(logs); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 20e57db57..decc734c5 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -40,7 +40,6 @@ pub struct Launcher { impl Launcher { fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> BoxFuture<'static, ()> { - let app = router(tracker); let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); @@ -55,7 +54,9 @@ impl Launcher { let tls = self.tls.clone(); let protocol = if tls.is_some() { "https" } else { "http" }; - info!(target: "HTTP Tracker", "Starting on: {protocol}://{}", address); + info!(target: "HTTP TRACKER", "Starting on: {protocol}://{}", address); + + let app = router(tracker, address); let running = Box::pin(async { match tls { @@ -72,7 +73,7 @@ impl Launcher { } }); - info!(target: "HTTP Tracker", "Started on: {protocol}://{}", address); + info!(target: "HTTP TRACKER", "Started on: {protocol}://{}", address); tx_start .send(Started { address }) diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 20e96d7fd..b972cf62f 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -1,10 +1,20 @@ //! HTTP server routes for version `v1`. +use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; +use axum::http::{HeaderName, HeaderValue}; +use axum::response::Response; use axum::routing::get; use axum::Router; use axum_client_ip::SecureClientIpSource; +use hyper::Request; use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestId, RequestId, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{Level, Span}; +use uuid::Uuid; use super::handlers::{announce, health_check, scrape}; use crate::core::Tracker; @@ -14,7 +24,7 @@ use crate::core::Tracker; /// > **NOTICE**: it's added a layer to get the client IP from the connection /// info. The tracker could use the connection info to get the client IP. #[allow(clippy::needless_pass_by_value)] -pub fn router(tracker: Arc) -> Router { +pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { Router::new() // Health check .route("/health_check", get(health_check::handler)) @@ -27,4 +37,47 @@ pub fn router(tracker: Arc) -> Router { // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(move |request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target:"HTTP TRACKER", + tracing::Level::INFO, "request", server_socket_addr= %server_socket_addr, method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(move |response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: "HTTP TRACKER", + tracing::Level::INFO, "response", server_socket_addr= %server_socket_addr, latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) +} + +#[derive(Clone, Default)] +struct RequestIdGenerator; + +impl MakeRequestId for RequestIdGenerator { + fn make_request_id(&mut self, _request: &Request) -> Option { + let id = HeaderValue::from_str(&Uuid::new_v4().to_string()).expect("UUID is a valid HTTP header value"); + Some(RequestId::new(id)) + } } From 4a2d902aa3dab5cf82f9999988dfee560757b537 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 Feb 2024 16:11:38 +0000 Subject: [PATCH 0740/1003] feat: [#698] refactor UDP logs Use `tracing` crate format: ``` 2024-02-19T17:10:05.243973520+00:00 [UDP TRACKER][INFO] request; server_socket_addr=0.0.0.0:6969 action=CONNECT transaction_id=-888840697 request_id=03b92de0-c9f8-4c40-a808-5d706ce770f4 2024-02-19T17:10:05.244016141+00:00 [UDP TRACKER][INFO] response; server_socket_addr=0.0.0.0:6969 transaction_id=-888840697 request_id=03b92de0-c9f8-4c40-a808-5d706ce770f4 2024-02-19T17:10:05.244042841+00:00 [UDP TRACKER][INFO] request; server_socket_addr=0.0.0.0:6969 action=ANNOUNCE transaction_id=-888840697 request_id=2113eb8c-61f4-476b-b3d5-02892f0a2fdb connection_id=-7190270103145546231 info_hash=9c38422213e30bff212b30c360d26f9a02136422 2024-02-19T17:10:05.244052082+00:00 [UDP TRACKER][INFO] response; server_socket_addr=0.0.0.0:6969 transaction_id=-888840697 request_id=2113eb8c-61f4-476b-b3d5-02892f0a2fdb ``` --- src/bootstrap/jobs/udp_tracker.rs | 6 +-- src/console/ci/e2e/logs_parser.rs | 6 +-- src/servers/udp/handlers.rs | 63 +++++++++++++++++++------- src/servers/udp/logging.rs | 73 +++++++++++++++++++++++++++++++ src/servers/udp/mod.rs | 1 + src/servers/udp/server.rs | 12 ++--- 6 files changed, 134 insertions(+), 27 deletions(-) create mode 100644 src/servers/udp/logging.rs diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 275ce1381..e9e4bc642 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -38,8 +38,8 @@ pub async fn start_job(config: &UdpTracker, tracker: Arc, form: S .expect("it should be able to start the udp tracker"); tokio::spawn(async move { - debug!(target: "UDP Tracker", "Wait for launcher (UDP service) to finish ..."); - debug!(target: "UDP Tracker", "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); + debug!(target: "UDP TRACKER", "Wait for launcher (UDP service) to finish ..."); + debug!(target: "UDP TRACKER", "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); assert!( !server.state.halt_task.is_closed(), @@ -52,6 +52,6 @@ pub async fn start_job(config: &UdpTracker, tracker: Arc, form: S .await .expect("it should be able to join to the udp tracker task"); - debug!(target: "UDP Tracker", "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); + debug!(target: "UDP TRACKER", "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); }) } diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index ca4d6099c..6d3349196 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -1,7 +1,7 @@ //! Utilities to parse Torrust Tracker logs. use serde::{Deserialize, Serialize}; -const UDP_TRACKER_PATTERN: &str = "[UDP Tracker][INFO] Starting on: udp://"; +const UDP_TRACKER_PATTERN: &str = "[UDP TRACKER][INFO] Starting on: udp://"; const HTTP_TRACKER_PATTERN: &str = "[HTTP TRACKER][INFO] Starting on: "; const HEALTH_CHECK_PATTERN: &str = "[HEALTH CHECK API][INFO] Starting on: "; @@ -20,7 +20,7 @@ impl RunningServices { /// ```text /// Loading default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... /// 2024-01-24T16:36:14.614898789+00:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. - /// 2024-01-24T16:36:14.615586025+00:00 [UDP Tracker][INFO] Starting on: udp://0.0.0.0:6969 + /// 2024-01-24T16:36:14.615586025+00:00 [UDP TRACKER][INFO] Starting on: udp://0.0.0.0:6969 /// 2024-01-24T16:36:14.615623705+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled /// 2024-01-24T16:36:14.615694484+00:00 [HTTP TRACKER][INFO] Starting on: http://0.0.0.0:7070 /// 2024-01-24T16:36:14.615710534+00:00 [HTTP TRACKER][INFO] Started on: http://0.0.0.0:7070 @@ -86,7 +86,7 @@ mod tests { #[test] fn it_should_parse_from_logs_with_valid_logs() { let logs = "\ - [UDP Tracker][INFO] Starting on: udp://0.0.0.0:8080\n\ + [UDP TRACKER][INFO] Starting on: udp://0.0.0.0:8080\n\ [HTTP TRACKER][INFO] Starting on: 0.0.0.0:9090\n\ [HEALTH CHECK API][INFO] Starting on: 0.0.0.0:10010"; let running_services = RunningServices::parse_from_logs(logs); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 65e3f5b20..f8424879f 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -1,4 +1,5 @@ //! Handlers for the UDP server. +use std::fmt; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::panic::Location; use std::sync::Arc; @@ -7,13 +8,16 @@ use aquatic_udp_protocol::{ AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use log::{debug, info}; +use log::debug; +use tokio::net::UdpSocket; use torrust_tracker_located_error::DynError; +use uuid::Uuid; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use super::UdpRequest; use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::udp::error::Error; +use crate::servers::udp::logging::{log_bad_request, log_error_response, log_request, log_response}; use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; @@ -28,8 +32,12 @@ use crate::shared::bit_torrent::info_hash::InfoHash; /// type. /// /// It will return an `Error` response if the request is invalid. -pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc) -> Response { +pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc, socket: Arc) -> Response { debug!("Handling Packets: {udp_request:?}"); + + let request_id = RequestId::make(&udp_request); + let server_socket_addr = socket.local_addr().expect("Could not get local_addr for socket."); + match Request::from_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| { Error::InternalServer { message: format!("{e:?}"), @@ -37,24 +45,37 @@ pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc { + log_request(&request, &request_id, &server_socket_addr); + let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, Request::Announce(announce_request) => announce_request.transaction_id, Request::Scrape(scrape_request) => scrape_request.transaction_id, }; - match handle_request(request, udp_request.from, tracker).await { + let response = match handle_request(request, udp_request.from, tracker).await { Ok(response) => response, Err(e) => handle_error(&e, transaction_id), - } + }; + + log_response(&response, &transaction_id, &request_id, &server_socket_addr); + + response + } + Err(e) => { + log_bad_request(&request_id); + + let response = handle_error( + &Error::BadRequest { + source: (Arc::new(e) as DynError).into(), + }, + TransactionId(0), + ); + + log_error_response(&request_id); + + response } - // bad request - Err(e) => handle_error( - &Error::BadRequest { - source: (Arc::new(e) as DynError).into(), - }, - TransactionId(0), - ), } } @@ -80,7 +101,6 @@ pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: /// /// This function does not ever return an error. pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { - info!(target: "UDP", "\"CONNECT TxID {}\"", request.transaction_id.0); debug!("udp connect request: {:#?}", request); let connection_cookie = make(&remote_addr); @@ -138,8 +158,6 @@ pub async fn handle_announce( source: (Arc::new(e) as Arc).into(), })?; - info!(target: "UDP", "\"ANNOUNCE TxID {} IH {}\"", announce_request.transaction_id.0, info_hash.to_hex_string()); - let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; @@ -214,7 +232,6 @@ pub async fn handle_announce( /// /// This function does not ever return an error. pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { - info!(target: "UDP", "\"SCRAPE TxID {}\"", request.transaction_id.0); debug!("udp scrape request: {:#?}", request); // Convert from aquatic infohashes @@ -274,6 +291,22 @@ fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { }) } +/// An identifier for a request. +#[derive(Debug, Clone)] +pub struct RequestId(Uuid); + +impl RequestId { + fn make(_request: &UdpRequest) -> RequestId { + RequestId(Uuid::new_v4()) + } +} + +impl fmt::Display for RequestId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + #[cfg(test)] mod tests { diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs new file mode 100644 index 000000000..bd1c2951b --- /dev/null +++ b/src/servers/udp/logging.rs @@ -0,0 +1,73 @@ +//! Logging for UDP Tracker requests and responses. + +use std::net::SocketAddr; + +use aquatic_udp_protocol::{Request, Response, TransactionId}; + +use super::handlers::RequestId; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr: &SocketAddr) { + let action = map_action_name(request); + + match &request { + Request::Connect(connect_request) => { + let transaction_id = connect_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + + tracing::span!( + target: "UDP TRACKER", + tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id); + } + Request::Announce(announce_request) => { + let transaction_id = announce_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + let connection_id_str = announce_request.connection_id.0.to_string(); + let info_hash_str = InfoHash::from_bytes(&announce_request.info_hash.0).to_hex_string(); + + tracing::span!( + target: "UDP TRACKER", + tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id, connection_id = %connection_id_str, info_hash = %info_hash_str); + } + Request::Scrape(scrape_request) => { + let transaction_id = scrape_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + let connection_id_str = scrape_request.connection_id.0.to_string(); + + tracing::span!( + target: "UDP TRACKER", + tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id, connection_id = %connection_id_str); + } + }; +} + +fn map_action_name(udp_request: &Request) -> String { + match udp_request { + Request::Connect(_connect_request) => "CONNECT".to_owned(), + Request::Announce(_announce_request) => "ANNOUNCE".to_owned(), + Request::Scrape(_scrape_request) => "SCRAPE".to_owned(), + } +} + +pub fn log_response( + _response: &Response, + transaction_id: &TransactionId, + request_id: &RequestId, + server_socket_addr: &SocketAddr, +) { + tracing::span!( + target: "UDP TRACKER", + tracing::Level::INFO, "response", server_socket_addr = %server_socket_addr, transaction_id = %transaction_id.0.to_string(), request_id = %request_id); +} + +pub fn log_bad_request(request_id: &RequestId) { + tracing::span!( + target: "UDP TRACKER", + tracing::Level::INFO, "bad request", request_id = %request_id); +} + +pub fn log_error_response(request_id: &RequestId) { + tracing::span!( + target: "UDP TRACKER", + tracing::Level::INFO, "response", request_id = %request_id); +} diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 3b22aeab5..8ef562086 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -644,6 +644,7 @@ use std::net::SocketAddr; pub mod connection_cookie; pub mod error; pub mod handlers; +pub mod logging; pub mod peer_builder; pub mod request; pub mod server; diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 0ab50d3bd..1326f806d 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -247,10 +247,10 @@ impl Udp { let address = socket.local_addr().expect("Could not get local_addr from {binding}."); let halt = shutdown_signal_with_message(rx_halt, format!("Halting Http Service Bound to Socket: {address}")); - info!(target: "UDP Tracker", "Starting on: udp://{}", address); + info!(target: "UDP TRACKER", "Starting on: udp://{}", address); let running = tokio::task::spawn(async move { - debug!(target: "UDP Tracker", "Started: Waiting for packets on socket address: udp://{address} ..."); + debug!(target: "UDP TRACKER", "Started: Waiting for packets on socket address: udp://{address} ..."); let tracker = tracker.clone(); let socket = socket.clone(); @@ -275,13 +275,13 @@ impl Udp { .send(Started { address }) .expect("the UDP Tracker service should not be dropped"); - debug!(target: "UDP Tracker", "Started on: udp://{}", address); + debug!(target: "UDP TRACKER", "Started on: udp://{}", address); let stop = running.abort_handle(); select! { - _ = running => { debug!(target: "UDP Tracker", "Socket listener stopped on address: udp://{address}"); }, - () = halt => { debug!(target: "UDP Tracker", "Halt signal spawned task stopped on address: udp://{address}"); } + _ = running => { debug!(target: "UDP TRACKER", "Socket listener stopped on address: udp://{address}"); }, + () = halt => { debug!(target: "UDP TRACKER", "Halt signal spawned task stopped on address: udp://{address}"); } } stop.abort(); @@ -327,7 +327,7 @@ impl Udp { async fn make_response(tracker: Arc, socket: Arc, udp_request: UdpRequest) { trace!("Making Response to {udp_request:?}"); let from = udp_request.from; - let response = handlers::handle_packet(udp_request, &tracker.clone()).await; + let response = handlers::handle_packet(udp_request, &tracker.clone(), socket.clone()).await; Self::send_response(&socket.clone(), from, response).await; } From 1bf8a823111541752dc5c1ce51aa5821195b1cea Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 22 Feb 2024 17:41:22 +0000 Subject: [PATCH 0741/1003] chore(deps): udpate dependencies ```console $ cargo update Updating crates.io index Updating ahash v0.8.8 -> v0.8.9 Updating anstream v0.6.11 -> v0.6.12 Updating anstyle v1.0.5 -> v1.0.6 Updating anyhow v1.0.79 -> v1.0.80 Removing base64 v0.13.1 Updating bindgen v0.69.2 -> v0.69.4 Updating bumpalo v3.14.0 -> v3.15.2 Updating bytecheck v0.6.11 -> v0.6.12 (latest: v0.7.0) Updating bytecheck_derive v0.6.11 -> v0.6.12 (latest: v0.7.0) Updating cc v1.0.83 -> v1.0.86 Updating chrono v0.4.33 -> v0.4.34 Updating clap v4.4.18 -> v4.5.1 Updating clap_builder v4.4.18 -> v4.5.1 Updating clap_derive v4.4.7 -> v4.5.0 Updating clap_lex v0.6.0 -> v0.7.0 Updating config v0.13.4 -> v0.14.0 Adding const-random v0.1.17 Adding const-random-macro v0.1.16 Adding convert_case v0.6.0 Updating crc32fast v1.3.2 -> v1.4.0 Updating darling v0.20.5 -> v0.20.6 Updating darling_core v0.20.5 -> v0.20.6 Updating darling_macro v0.20.5 -> v0.20.6 Updating dlv-list v0.3.0 -> v0.5.2 Updating either v1.9.0 -> v1.10.0 Updating hashlink v0.8.4 -> v0.9.0 Updating hermit-abi v0.3.4 -> v0.3.6 Updating hyper v1.1.0 -> v1.2.0 Updating iana-time-zone v0.1.59 -> v0.1.60 Updating indexmap v2.2.2 -> v2.2.3 Updating is-terminal v0.4.10 -> v0.4.12 Adding itertools v0.12.1 Removing jobserver v0.1.27 Updating js-sys v0.3.67 -> v0.3.68 Updating libsqlite3-sys v0.27.0 -> v0.28.0 Updating local-ip-address v0.5.7 -> v0.6.0 Updating miniz_oxide v0.7.1 -> v0.7.2 Adding num-conv v0.1.0 Updating num-integer v0.1.45 -> v0.1.46 Updating num-traits v0.2.17 -> v0.2.18 Updating openssl v0.10.63 -> v0.10.64 Updating openssl-src v300.2.2+3.2.1 -> v300.2.3+3.2.1 Updating openssl-sys v0.9.99 -> v0.9.101 Updating ordered-multimap v0.4.3 -> v0.6.0 (latest: v0.7.1) Removing peeking_take_while v0.1.2 Updating pest v2.7.6 -> v2.7.7 Updating pest_derive v2.7.6 -> v2.7.7 Updating pest_generator v2.7.6 -> v2.7.7 Updating pest_meta v2.7.6 -> v2.7.7 Updating pkg-config v0.3.29 -> v0.3.30 Updating r2d2_sqlite v0.23.0 -> v0.24.0 Updating rend v0.4.1 -> v0.4.2 Updating ring v0.17.7 -> v0.17.8 Updating ringbuf v0.4.0-rc.2 -> v0.3.3 Updating rkyv v0.7.43 -> v0.7.44 Updating rkyv_derive v0.7.43 -> v0.7.44 Updating ron v0.7.1 -> v0.8.1 Updating rusqlite v0.30.0 -> v0.31.0 Updating rust-ini v0.18.0 -> v0.19.0 (latest: v0.20.0) Updating rust_decimal v1.34.0 -> v1.34.3 Updating rustix v0.38.30 -> v0.38.31 Updating rustls-pemfile v2.0.0 -> v2.1.0 Updating rustls-pki-types v1.1.0 -> v1.3.0 Updating ryu v1.0.16 -> v1.0.17 Updating semver v1.0.21 -> v1.0.22 Updating serde v1.0.196 -> v1.0.197 Updating serde_derive v1.0.196 -> v1.0.197 Updating serde_json v1.0.113 -> v1.0.114 Updating serde_with v3.6.0 -> v3.6.1 Updating serde_with_macros v3.6.0 -> v3.6.1 Adding strsim v0.11.0 Updating syn v2.0.48 -> v2.0.50 Updating tempfile v3.9.0 -> v3.10.0 Updating thiserror v1.0.56 -> v1.0.57 Updating thiserror-impl v1.0.56 -> v1.0.57 Updating time v0.3.31 -> v0.3.34 Updating time-macros v0.2.16 -> v0.2.17 Adding tiny-keccak v2.0.2 Updating tokio v1.35.1 -> v1.36.0 Removing toml v0.5.11 Removing toml v0.8.9 Adding toml v0.8.10 Adding toml_edit v0.22.6 Updating unicode-normalization v0.1.22 -> v0.1.23 Adding unicode-segmentation v1.11.0 Updating wasm-bindgen v0.2.90 -> v0.2.91 Updating wasm-bindgen-backend v0.2.90 -> v0.2.91 Updating wasm-bindgen-futures v0.4.40 -> v0.4.41 Updating wasm-bindgen-macro v0.2.90 -> v0.2.91 Updating wasm-bindgen-macro-support v0.2.90 -> v0.2.91 Updating wasm-bindgen-shared v0.2.90 -> v0.2.91 Updating web-sys v0.3.67 -> v0.3.68 Updating windows-targets v0.52.0 -> v0.52.3 Updating windows_aarch64_gnullvm v0.52.0 -> v0.52.3 Updating windows_aarch64_msvc v0.52.0 -> v0.52.3 Updating windows_i686_gnu v0.52.0 -> v0.52.3 Updating windows_i686_msvc v0.52.0 -> v0.52.3 Updating windows_x86_64_gnu v0.52.0 -> v0.52.3 Updating windows_x86_64_gnullvm v0.52.0 -> v0.52.3 Updating windows_x86_64_msvc v0.52.0 -> v0.52.3 Removing winnow v0.5.36 Adding winnow v0.5.40 (latest: v0.6.2) Adding winnow v0.6.2 note: pass `--verbose` to see 52 unchanged dependencies behind latest ``` --- Cargo.lock | 621 +++++++++++++++++++++----------------- Cargo.toml | 2 +- src/servers/udp/server.rs | 6 +- 3 files changed, 346 insertions(+), 283 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aae5deb9e..e1092fefc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" dependencies = [ "cfg-if", "once_cell", @@ -93,9 +93,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ "anstyle", "anstyle-parse", @@ -107,9 +107,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2faccea4cc4ab4a667ce676a30e8ec13922a692c99bb8f5b11f1502c72e04220" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "aquatic_udp_protocol" @@ -191,7 +191,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -214,7 +214,7 @@ dependencies = [ "http 1.0.0", "http-body 1.0.0", "http-body-util", - "hyper 1.1.0", + "hyper 1.2.0", "hyper-util", "itoa", "matchit", @@ -276,7 +276,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -291,11 +291,11 @@ dependencies = [ "http 1.0.0", "http-body 1.0.0", "http-body-util", - "hyper 1.1.0", + "hyper 1.2.0", "hyper-util", "pin-project-lite", "rustls", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.0", "tokio", "tokio-rustls", "tower", @@ -317,12 +317,6 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - [[package]] name = "base64" version = "0.21.7" @@ -348,22 +342,22 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.69.2" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c69fae65a523209d34240b60abe0c42d33d1045d445c0839d8a4894a736e2d" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ "bitflags 2.4.2", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", - "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -377,6 +371,9 @@ name = "bitflags" version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +dependencies = [ + "serde", +] [[package]] name = "bitvec" @@ -419,7 +416,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", "syn_derive", ] @@ -452,15 +449,15 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "a3b1be7772ee4501dba05acbe66bb1e8760f6a6c474a36035631638e4415f130" [[package]] name = "bytecheck" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" dependencies = [ "bytecheck_derive", "ptr_meta", @@ -469,9 +466,9 @@ dependencies = [ [[package]] name = "bytecheck_derive" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ "proc-macro2", "quote", @@ -498,11 +495,10 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "7f9fa1897e4325be0d68d48df6aa1a71ac2ed4d27723887e7754192705350730" dependencies = [ - "jobserver", "libc", ] @@ -529,15 +525,15 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.0", + "windows-targets 0.52.3", ] [[package]] @@ -580,9 +576,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.18" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" dependencies = [ "clap_builder", "clap_derive", @@ -590,33 +586,33 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.18" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim", + "strsim 0.11.0", ] [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "cmake" @@ -645,11 +641,12 @@ dependencies = [ [[package]] name = "config" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23738e11972c7643e4ec947840fc463b6a571afcd3e735bdfce7d03c7a784aca" +checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" dependencies = [ "async-trait", + "convert_case 0.6.0", "json5", "lazy_static", "nom", @@ -658,16 +655,45 @@ dependencies = [ "rust-ini", "serde", "serde_json", - "toml 0.5.11", + "toml", "yaml-rust", ] +[[package]] +name = "const-random" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -695,9 +721,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] @@ -715,7 +741,7 @@ dependencies = [ "criterion-plot", "futures", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -737,7 +763,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -814,9 +840,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.5" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc5d6b04b3fd0ba9926f945895de7d806260a2d7431ba82e7edaecb043c4c6b8" +checksum = "c376d08ea6aa96aafe61237c7200d1241cb177b7d3a542d791f2d118e9cbb955" dependencies = [ "darling_core", "darling_macro", @@ -824,27 +850,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.5" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e48a959bcd5c761246f5d090ebc2fbf7b9cd527a492b07a67510c108f1e7e3" +checksum = "33043dcd19068b8192064c704b3f83eb464f91f1ff527b44a4e2b08d9cdb8855" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim", - "syn 2.0.48", + "strsim 0.10.0", + "syn 2.0.50", ] [[package]] name = "darling_macro" -version = "0.20.5" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1545d67a2149e1d93b7e5c7752dce5a7426eb5d1357ddcfd89336b94444f77" +checksum = "c5a91391accf613803c2a9bf9abccdbaa07c54b4244a5b64883f9c3c137c86be" dependencies = [ "darling_core", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -863,7 +889,7 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version", @@ -878,7 +904,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -893,9 +919,12 @@ dependencies = [ [[package]] name = "dlv-list" -version = "0.3.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" +dependencies = [ + "const-random", +] [[package]] name = "downcast" @@ -905,9 +934,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "encoding_rs" @@ -1053,7 +1082,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1065,7 +1094,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1077,7 +1106,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1142,7 +1171,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1220,7 +1249,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.2.2", + "indexmap 2.2.3", "slab", "tokio", "tokio-util", @@ -1239,7 +1268,7 @@ dependencies = [ "futures-sink", "futures-util", "http 1.0.0", - "indexmap 2.2.2", + "indexmap 2.2.3", "slab", "tokio", "tokio-util", @@ -1271,7 +1300,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.9", ] [[package]] @@ -1280,15 +1309,15 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.9", "allocator-api2", ] [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ "hashbrown 0.14.3", ] @@ -1301,9 +1330,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.4" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" +checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" [[package]] name = "hex" @@ -1411,9 +1440,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" dependencies = [ "bytes", "futures-channel", @@ -1425,6 +1454,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", + "smallvec", "tokio", ] @@ -1451,7 +1481,7 @@ dependencies = [ "futures-util", "http 1.0.0", "http-body 1.0.0", - "hyper 1.1.0", + "hyper 1.2.0", "pin-project-lite", "socket2", "tokio", @@ -1459,9 +1489,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.59" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1509,9 +1539,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1535,12 +1565,12 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ "hermit-abi", - "rustix", + "libc", "windows-sys 0.52.0", ] @@ -1554,25 +1584,25 @@ dependencies = [ ] [[package]] -name = "itoa" -version = "1.0.10" +name = "itertools" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] [[package]] -name = "jobserver" -version = "0.1.27" +name = "itoa" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" -dependencies = [ - "libc", -] +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ "wasm-bindgen", ] @@ -1691,9 +1721,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" dependencies = [ "cc", "pkg-config", @@ -1725,9 +1755,9 @@ checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "local-ip-address" -version = "0.5.7" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612ed4ea9ce5acfb5d26339302528a5e1e59dfed95e9e11af3c083236ff1d15d" +checksum = "f63e1499d2495be571af92e9ca9dca4e7bf26c47b87cb8d0c6100825e521dd6b" dependencies = [ "libc", "neli", @@ -1786,9 +1816,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] @@ -1828,7 +1858,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1879,7 +1909,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", "termcolor", "thiserror", ] @@ -1890,7 +1920,7 @@ version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" dependencies = [ - "base64 0.21.7", + "base64", "bigdecimal", "bindgen", "bitflags 2.4.2", @@ -2001,21 +2031,26 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -2053,9 +2088,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "openssl" -version = "0.10.63" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ "bitflags 2.4.2", "cfg-if", @@ -2074,7 +2109,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -2085,18 +2120,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.2.2+3.2.1" +version = "300.2.3+3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bbfad0063610ac26ee79f7484739e2b07555a75c42453b89263830b5c8103bc" +checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.99" +version = "0.9.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" +checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" dependencies = [ "cc", "libc", @@ -2107,12 +2142,12 @@ dependencies = [ [[package]] name = "ordered-multimap" -version = "0.4.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" +checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" dependencies = [ "dlv-list", - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -2144,19 +2179,13 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64 0.21.7", + "base64", "serde", ] @@ -2168,9 +2197,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.6" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" +checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" dependencies = [ "memchr", "thiserror", @@ -2179,9 +2208,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.6" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" +checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" dependencies = [ "pest", "pest_generator", @@ -2189,22 +2218,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.6" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" +checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "pest_meta" -version = "2.7.6" +version = "2.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" +checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" dependencies = [ "once_cell", "pest", @@ -2266,7 +2295,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -2283,9 +2312,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plotters" @@ -2457,9 +2486,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc290b669d30e20751e813517bbe13662d020419c5c8818ff10b6e8bb7777f6" +checksum = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2" dependencies = [ "r2d2", "rusqlite", @@ -2562,9 +2591,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "rend" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2571463863a6bd50c32f94402933f03457a3fbaf697a707c5be741e459f08fd" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" dependencies = [ "bytecheck", ] @@ -2575,7 +2604,7 @@ version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ - "base64 0.21.7", + "base64", "bytes", "encoding_rs", "futures-core", @@ -2611,32 +2640,33 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin", "untrusted", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "ringbuf" -version = "0.4.0-rc.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b8f7d58e4f67752d63318605656be063e333154aa35b70126075e9d05552979" +checksum = "79abed428d1fd2a128201cec72c5f6938e2da607c6f3745f769fabea399d950a" dependencies = [ "crossbeam-utils", ] [[package]] name = "rkyv" -version = "0.7.43" +version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5" +checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" dependencies = [ "bitvec", "bytecheck", @@ -2652,9 +2682,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.43" +version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033" +checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" dependencies = [ "proc-macro2", "quote", @@ -2663,20 +2693,21 @@ dependencies = [ [[package]] name = "ron" -version = "0.7.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", + "base64", + "bitflags 2.4.2", "serde", + "serde_derive", ] [[package]] name = "rusqlite" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78046161564f5e7cd9008aff3b2990b3850dc8e0349119b98e8f251e099f24d" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ "bitflags 2.4.2", "fallible-iterator", @@ -2688,9 +2719,9 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" dependencies = [ "cfg-if", "ordered-multimap", @@ -2698,9 +2729,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.34.0" +version = "1.34.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7de2711cae7bdec993f4d2319352599ceb0d003e9f7900ea7c6ef4c5fc16831" +checksum = "b39449a79f45e8da28c57c341891b69a183044b29518bb8f86dbac9df60bb7df" dependencies = [ "arrayvec", "borsh", @@ -2735,9 +2766,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.30" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ "bitflags 2.4.2", "errno", @@ -2764,24 +2795,24 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.7", + "base64", ] [[package]] name = "rustls-pemfile" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b" dependencies = [ - "base64 0.21.7", + "base64", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" +checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" [[package]] name = "rustls-webpki" @@ -2801,9 +2832,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "same-file" @@ -2885,15 +2916,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -2919,20 +2950,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -2957,7 +2988,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -2983,16 +3014,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.6.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0ed1662c5a68664f45b76d18deb0e234aff37207086803165c961eb695e981" +checksum = "15d167997bd841ec232f5b2b8e0e26606df2e7caa4c31b95ea9ca52b200bd270" dependencies = [ - "base64 0.21.7", + "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", + "serde_derive", "serde_json", "serde_with_macros", "time", @@ -3000,14 +3032,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.6.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568577ff0ef47b879f736cd66740e022f3672788cdf002a05a4e609ea5a6fb15" +checksum = "865f9743393e638991566a8b7a479043c2c8da94a33e0a31f18214c9cae0a64d" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -3102,6 +3134,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" + [[package]] name = "subprocess" version = "0.2.9" @@ -3125,9 +3163,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" dependencies = [ "proc-macro2", "quote", @@ -3143,7 +3181,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -3198,13 +3236,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if", "fastrand", - "redox_syscall", "rustix", "windows-sys 0.52.0", ] @@ -3226,32 +3263,33 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "time" -version = "0.3.31" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -3266,13 +3304,23 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -3300,9 +3348,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -3324,7 +3372,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -3363,23 +3411,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "toml" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6a4b9e8023eb94392d3dca65d717c53abc5dad49c07cb65bb8fcd87115fa325" +checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.21.1", + "toml_edit 0.22.6", ] [[package]] @@ -3397,9 +3436,9 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] @@ -3408,11 +3447,22 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", + "toml_datetime", + "winnow 0.5.40", +] + +[[package]] +name = "toml_edit" +version = "0.22.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" +dependencies = [ + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.2", ] [[package]] @@ -3446,7 +3496,7 @@ dependencies = [ "fern", "futures", "hex-literal", - "hyper 1.1.0", + "hyper 1.2.0", "lazy_static", "local-ip-address", "log", @@ -3495,7 +3545,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml 0.8.9", + "toml", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -3616,7 +3666,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -3671,13 +3721,19 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + [[package]] name = "untrusted" version = "0.9.0" @@ -3750,9 +3806,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3760,24 +3816,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.40" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" +checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" dependencies = [ "cfg-if", "js-sys", @@ -3787,9 +3843,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3797,28 +3853,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "web-sys" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" +checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" dependencies = [ "js-sys", "wasm-bindgen", @@ -3861,7 +3917,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.3", ] [[package]] @@ -3879,7 +3935,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.3", ] [[package]] @@ -3899,17 +3955,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "d380ba1dc7187569a8a9e91ed34b8ccfc33123bbacb8c0aed2d1ad7f3ef2dc5f" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.3", + "windows_aarch64_msvc 0.52.3", + "windows_i686_gnu 0.52.3", + "windows_i686_msvc 0.52.3", + "windows_x86_64_gnu 0.52.3", + "windows_x86_64_gnullvm 0.52.3", + "windows_x86_64_msvc 0.52.3", ] [[package]] @@ -3920,9 +3976,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "68e5dcfb9413f53afd9c8f86e56a7b4d86d9a2fa26090ea2dc9e40fba56c6ec6" [[package]] name = "windows_aarch64_msvc" @@ -3932,9 +3988,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "8dab469ebbc45798319e69eebf92308e541ce46760b49b18c6b3fe5e8965b30f" [[package]] name = "windows_i686_gnu" @@ -3944,9 +4000,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "2a4e9b6a7cac734a8b4138a4e1044eac3404d8326b6c0f939276560687a033fb" [[package]] name = "windows_i686_msvc" @@ -3956,9 +4012,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "28b0ec9c422ca95ff34a78755cfa6ad4a51371da2a5ace67500cf7ca5f232c58" [[package]] name = "windows_x86_64_gnu" @@ -3968,9 +4024,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "704131571ba93e89d7cd43482277d6632589b18ecf4468f591fbae0a8b101614" [[package]] name = "windows_x86_64_gnullvm" @@ -3980,9 +4036,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "42079295511643151e98d61c38c0acc444e52dd42ab456f7ccfd5152e8ecf21c" [[package]] name = "windows_x86_64_msvc" @@ -3992,15 +4048,24 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "0770833d60a970638e989b3fa9fd2bb1aaadcf88963d1659fd7d9990196ed2d6" [[package]] name = "winnow" -version = "0.5.36" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + +[[package]] +name = "winnow" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818ce546a11a9986bc24f93d0cdf38a8a1a400f1473ea8c82e59f6e0ffab9249" +checksum = "7a4191c47f15cc3ec71fcb4913cb83d58def65dd3787610213c649283b5ce178" dependencies = [ "memchr", ] @@ -4050,7 +4115,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 83134d8f0..26f4334f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" serde_json = "1" -ringbuf = "0.4.0-rc.2" +ringbuf = "0" serde_with = "3" serde_repr = "0" tdyne-peer-id = "1" diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 1326f806d..fbea11fac 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -24,9 +24,7 @@ use std::sync::Arc; use aquatic_udp_protocol::Response; use derive_more::Constructor; use log::{debug, error, info, trace}; -use ringbuf::storage::Static; -use ringbuf::traits::{Consumer, Observer, RingBuffer}; -use ringbuf::LocalRb; +use ringbuf::{Rb, StaticRb}; use tokio::net::UdpSocket; use tokio::sync::oneshot; use tokio::task::{AbortHandle, JoinHandle}; @@ -205,7 +203,7 @@ impl Launcher { #[derive(Default)] struct ActiveRequests { - rb: LocalRb>, // the number of requests we handle at the same time. + rb: StaticRb, // the number of requests we handle at the same time. } impl std::fmt::Debug for ActiveRequests { From 37c1fa7e56734056c892639ecbe1339fb3c40ee5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 22 Feb 2024 20:28:08 +0000 Subject: [PATCH 0742/1003] chore(deps): bump EndBug/label-sync from 2.3.2 to 2.3.3 --- .github/workflows/labels.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index 97aaa0308..bb8283f30 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -29,7 +29,7 @@ jobs: - id: sync name: Apply Labels from File - uses: EndBug/label-sync@da00f2c11fdb78e4fae44adac2fdd713778ea3e8 + uses: EndBug/label-sync@v2 with: config-file: .github/labels.json delete-other-labels: true From d673a594fc2218114ce9bdd05ac04f5abcdfabb3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Feb 2024 06:53:33 +0000 Subject: [PATCH 0743/1003] docs: fix podman commands --- README.md | 2 +- docs/containers.md | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index e584db3c8..74ba5e72b 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ docker run -it torrust/tracker:develop #### Podman: ```sh -podman run -it torrust/tracker:develop +podman run -it docker.io/torrust/tracker:develop ``` > Please read our [container guide][containers.md] for more information. diff --git a/docs/containers.md b/docs/containers.md index 2b06c0f76..2526b880a 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -14,7 +14,7 @@ docker run -it torrust/tracker:latest or with Podman: ```sh -podman run -it torrust/tracker:latest +podman run -it docker.io/torrust/tracker:latest ``` @@ -122,10 +122,10 @@ docker run -it torrust-tracker:debug ```sh # Release Mode -podman run -it torrust-tracker:release +podman run -it docker.io/torrust-tracker:release # Debug Mode -podman run -it torrust-tracker:debug +podman run -it docker.io/torrust-tracker:debug ``` ### Arguments @@ -226,7 +226,7 @@ podman run -it \ --volume ./storage/tracker/lib:/var/lib/torrust/tracker:Z \ --volume ./storage/tracker/log:/var/log/torrust/tracker:Z \ --volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ - torrust-tracker:release + docker.io/torrust-tracker:release ``` ## Docker Compose From c348f72198c866e447f7c908f9f97f2fd8d2853f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Feb 2024 07:01:33 +0000 Subject: [PATCH 0744/1003] docs: fix linter warnings in markdown --- README.md | 58 +++++++++++++++++++++++++--------------------- docs/containers.md | 27 ++++++++++++++------- 2 files changed, 49 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 74ba5e72b..18f3d361d 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,6 @@ # Torrust Tracker -[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf] - -__Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker that matchmakes peers and collects statistics. Written in [Rust Language][rust] with the [axum] web framework. ___This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).___ +[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf]**Torrust Tracker** is a [BitTorrent][bittorrent] Tracker that matchmakes peers and collects statistics. Written in [Rust Language][rust] with the [Axum] web framework. _**This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).___ > This is a [Torrust][torrust] project and is in active development. It is community supported as well as sponsored by [Nautilus Cyberneering][nautilus]. @@ -20,14 +18,15 @@ __Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker that matchmakes peers - [x] Persistent `SQLite3` or `MySQL` Databases. ## Implemented BitTorrent Enhancement Proposals (BEPs) +> > _[Learn more about BitTorrent Enhancement Proposals][BEP 00]_ -- [BEP 03] : The BitTorrent Protocol. -- [BEP 07] : IPv6 Support. -- [BEP 15] : UDP Tracker Protocol for BitTorrent. -- [BEP 23] : Tracker Returns Compact Peer Lists. -- [BEP 27] : Private Torrents. -- [BEP 48] : Tracker Protocol Extension: Scrape. +- [BEP 03]: The BitTorrent Protocol. +- [BEP 07]: IPv6 Support. +- [BEP 15]: UDP Tracker Protocol for BitTorrent. +- [BEP 23]: Tracker Returns Compact Peer Lists. +- [BEP 27]: Private Torrents. +- [BEP 48]: Tracker Protocol Extension: Scrape. ## Getting Started @@ -35,26 +34,28 @@ __Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker that matchmakes peers The Torrust Tracker is [deployed to DockerHub][dockerhub], you can run a demo immediately with the following commands: -#### Docker: +#### Docker ```sh docker run -it torrust/tracker:develop ``` + > Please read our [container guide][containers.md] for more information. -#### Podman: +#### Podman ```sh podman run -it docker.io/torrust/tracker:develop ``` + > Please read our [container guide][containers.md] for more information. ### Development Version -- Please assure you have the ___[latest stable (or nightly) version of rust][rust]___. -- Please assure that you computer has enough ram. ___Recommended 16GB.___ +- Please ensure you have the _**[latest stable (or nightly) version of rust][rust]___. +- Please ensure that your computer has enough RAM. _**Recommended 16GB.___ -#### Checkout, Test and Run: +#### Checkout, Test and Run ```sh # Checkout repository into a new folder: @@ -71,7 +72,8 @@ cargo test --tests --benches --examples --workspace --all-targets --all-features # Run the tracker: cargo run ``` -#### Customization: + +#### Customization ```sh # Copy the default configuration into the standard location: @@ -92,7 +94,7 @@ _Optionally, you may choose to supply the entire configuration as an environment TORRUST_TRACKER_CONFIG=$(cat "./storage/tracker/etc/tracker.toml") cargo run ``` -_For deployment you __should__ override the `api_admin_token` by using an environmental variable:_ +_For deployment, you **should** override the `api_admin_token` by using an environmental variable:_ ```sh # Generate a Secret Token: @@ -105,9 +107,10 @@ TORRUST_TRACKER_CONFIG=$(cat "./storage/tracker/etc/tracker.toml") \ cargo run ``` -> Please view our [crate documentation][documentation] for more detailed instructions. +> Please view our [crate documentation][docs] for more detailed instructions. ### Services + The following services are provided by the default configuration: - UDP _(tracker)_ @@ -119,19 +122,20 @@ The following services are provided by the default configuration: ## Documentation -- [Management API (Version 1)][api] -- [Tracker (HTTP/TLS)][http] -- [Tracker (UDP)][udp] +- [Management API (Version 1)][API] +- [Tracker (HTTP/TLS)][HTTP] +- [Tracker (UDP)][UDP] ## Contributing + We are happy to support and welcome new people to our project. Please consider our [contributor guide][guide.md].
-This is an open-source community supported project. We welcome contributions from the community! +This is an open-source community-supported project. We welcome contributions from the community! -__How can you contribute?__ +**How can you contribute?** - Bug reports and feature requests. - Code contributions. You can start by looking at the issues labeled "[good first issues]". -- Documentation improvements. Check the [documentation][docs] and [API documentation][api] for typos, errors, or missing information. +- Documentation improvements. Check the [documentation][docs] and [API documentation][API] for typos, errors, or missing information. - Participation in the community. You can help by answering questions in the [discussions]. ## License @@ -151,11 +155,13 @@ Some files include explicit copyright notices and/or license notices. For prosperity, versions of Torrust Tracker that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [AGPL-3.0-only][AGPL_3_0] license. ## Contributor Agreement + The copyright of the Torrust Tracker is retained by the respective authors. **Contributors agree:** -- That all their contributions be granted a license(s) **compatible** with the [Torrust Trackers License](#License). -- That all contributors signal **clearly** and **explicitly** any other compilable licenses if they are not: *[AGPL-3.0-only with the legacy MIT-0 exception](#License)*. + +- That all their contributions be granted a license(s) **compatible** with the [Torrust Trackers License](#license). +- That all contributors signal **clearly** and **explicitly** any other compilable licenses if they are not: _[AGPL-3.0-only with the legacy MIT-0 exception](#license)_. **The Torrust-Tracker project has no copyright assignment agreement.** @@ -165,8 +171,6 @@ _We kindly ask you to take time and consider The Torrust Project [Contributor Ag This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [Dutch Bits]. Also thanks to [Naim A.] and [greatest-ape] for some parts of the code. Further added features and functions thanks to [Power2All]. - - [container_wf]: ../../actions/workflows/container.yaml [container_wf_b]: ../../actions/workflows/container.yaml/badge.svg [coverage_wf]: ../../actions/workflows/coverage.yaml diff --git a/docs/containers.md b/docs/containers.md index 2526b880a..6622e29b2 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -1,10 +1,10 @@ # Containers (Docker or Podman) ## Demo environment + It is simple to setup the tracker with the default configuration and run it using the pre-built public docker image: - With Docker: ```sh @@ -17,11 +17,12 @@ or with Podman: podman run -it docker.io/torrust/tracker:latest ``` - ## Requirements + - Tested with recent versions of Docker or Podman. ## Volumes + The [Containerfile](../Containerfile) (i.e. the Dockerfile) Defines Three Volumes: ```Dockerfile @@ -38,7 +39,8 @@ When instancing the container image with the `docker run` or `podman run` comman > NOTE: You can adjust this mapping for your preference, however this mapping is the default in our guides and scripts. -### Pre-Create Host-Mapped Folders: +### Pre-Create Host-Mapped Folders + Please run this command where you wish to run the container: ```sh @@ -46,11 +48,13 @@ mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ ``` ### Matching Ownership ID's of Host Storage and Container Volumes + It is important that the `torrust` user has the same uid `$(id -u)` as the host mapped folders. In our [entry script](../share/container/entry_script_sh), installed to `/usr/local/bin/entry.sh` inside the container, switches to the `torrust` user created based upon the `USER_UID` environmental variable. When running the container, you may use the `--env USER_ID="$(id -u)"` argument that gets the current user-id and passes to the container. ### Mapped Tree Structure + Using the standard mapping defined above produces this following mapped tree: ```s @@ -78,6 +82,7 @@ git clone https://github.com/torrust/torrust-tracker.git; cd torrust-tracker ``` ### (Docker) Setup Context + Before starting, if you are using docker, it is helpful to reset the context to the default: ```sh @@ -107,6 +112,7 @@ podman build --target debug --tag torrust-tracker:debug --file Containerfile . ## Running the Container ### Basic Run + No arguments are needed for simply checking the container image works: #### (Docker) Run Basic @@ -118,6 +124,7 @@ docker run -it torrust-tracker:release # Debug Mode docker run -it torrust-tracker:debug ``` + #### (Podman) Run Basic ```sh @@ -129,11 +136,13 @@ podman run -it docker.io/torrust-tracker:debug ``` ### Arguments + The arguments need to be placed before the image tag. i.e. `run [arguments] torrust-tracker:release` -#### Environmental Variables: +#### Environmental Variables + Environmental variables are loaded through the `--env`, in the format `--env VAR="value"`. The following environmental variables can be set: @@ -148,8 +157,8 @@ The following environmental variables can be set: - `API_PORT` - The port for the tracker API. This should match the port used in the configuration, (default `1212`). - `HEALTH_CHECK_API_PORT` - The port for the Health Check API. This should match the port used in the configuration, (default `1313`). - ### Sockets + Socket ports used internally within the container can be mapped to with the `--publish` argument. The format is: `--publish [optional_host_ip]:[host_port]:[container_port]/[optional_protocol]`, for example: `--publish 127.0.0.1:8080:80/tcp`. @@ -164,7 +173,8 @@ The default ports can be mapped with the following: > NOTE: Inside the container it is necessary to expose a socket with the wildcard address `0.0.0.0` so that it may be accessible from the host. Verify that the configuration that the sockets are wildcard. -### Volumes +### Host-mapped Volumes + By default the container will use install volumes for `/var/lib/torrust/tracker`, `/var/log/torrust/tracker`, and `/etc/torrust/tracker`, however for better administration it good to make these volumes host-mapped. The argument to host-map volumes is `--volume`, with the format: `--volume=[host-src:]container-dest[:]`. @@ -177,10 +187,9 @@ The default mapping can be supplied with the following arguments: --volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ ``` - Please not the `:Z` at the end of the podman `--volume` mapping arguments, this is to give read-write permission on SELinux enabled systemd, if this doesn't work on your system, you can use `:rw` instead. -## Complete Example: +## Complete Example ### With Docker @@ -257,7 +266,7 @@ $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 -``` +``` And you should be able to use the application, for example making a request to the API: From 5a6c968eb86dd7a4465f837cc8aeeb706beb59c0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Feb 2024 08:24:14 +0000 Subject: [PATCH 0745/1003] feat: [#704] add latency to UDP tracker logs Example: ``` 2024-02-23T08:24:50.137064143+00:00 [UDP TRACKER][INFO] request; server_socket_addr=0.0.0.0:6969 action=ANNOUNCE transaction_id=-888840697 request_id=c38ab102-3ad1-48d3-8f2e-e03190d6e592 connection_id=4792797915217963415 info_hash=9c38422213e30bff212b30c360d26f9a02136422 2024-02-23T08:24:50.137075433+00:00 [UDP TRACKER][INFO] response; server_socket_addr=0.0.0.0:6969 transaction_id=-888840697 request_id=c38ab102-3ad1-48d3-8f2e-e03190d6e592 latency_ms=0 ``` --- src/servers/udp/handlers.rs | 7 ++++++- src/servers/udp/logging.rs | 17 +++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f8424879f..91a371a7b 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -3,6 +3,7 @@ use std::fmt; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::panic::Location; use std::sync::Arc; +use std::time::Instant; use aquatic_udp_protocol::{ AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, @@ -35,6 +36,8 @@ use crate::shared::bit_torrent::info_hash::InfoHash; pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc, socket: Arc) -> Response { debug!("Handling Packets: {udp_request:?}"); + let start_time = Instant::now(); + let request_id = RequestId::make(&udp_request); let server_socket_addr = socket.local_addr().expect("Could not get local_addr for socket."); @@ -58,7 +61,9 @@ pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc handle_error(&e, transaction_id), }; - log_response(&response, &transaction_id, &request_id, &server_socket_addr); + let latency = start_time.elapsed(); + + log_response(&response, &transaction_id, &request_id, &server_socket_addr, latency); response } diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs index bd1c2951b..a32afc6a3 100644 --- a/src/servers/udp/logging.rs +++ b/src/servers/udp/logging.rs @@ -1,6 +1,7 @@ //! Logging for UDP Tracker requests and responses. use std::net::SocketAddr; +use std::time::Duration; use aquatic_udp_protocol::{Request, Response, TransactionId}; @@ -36,7 +37,13 @@ pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr tracing::span!( target: "UDP TRACKER", - tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id, connection_id = %connection_id_str); + tracing::Level::INFO, + "request", + server_socket_addr = %server_socket_addr, + action = %action, + transaction_id = %transaction_id_str, + request_id = %request_id, + connection_id = %connection_id_str); } }; } @@ -54,10 +61,16 @@ pub fn log_response( transaction_id: &TransactionId, request_id: &RequestId, server_socket_addr: &SocketAddr, + latency: Duration, ) { tracing::span!( target: "UDP TRACKER", - tracing::Level::INFO, "response", server_socket_addr = %server_socket_addr, transaction_id = %transaction_id.0.to_string(), request_id = %request_id); + tracing::Level::INFO, + "response", + server_socket_addr = %server_socket_addr, + transaction_id = %transaction_id.0.to_string(), + request_id = %request_id, + latency_ms = %latency.as_millis()); } pub fn log_bad_request(request_id: &RequestId) { From d4310a5d25af612601111e1fa8084733b18d18d1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 Feb 2024 11:15:45 +0000 Subject: [PATCH 0746/1003] chore(deps): udpate cargo dependencies ```console cargo update Updating crates.io index Updating bumpalo v3.15.2 -> v3.15.3 Updating cc v1.0.86 -> v1.0.88 Updating darling v0.20.6 -> v0.20.8 Updating darling_core v0.20.6 -> v0.20.8 Updating darling_macro v0.20.6 -> v0.20.8 Updating half v2.3.1 -> v2.4.0 Updating hermit-abi v0.3.6 -> v0.3.8 Updating local-ip-address v0.6.0 -> v0.6.1 Updating rustls-pki-types v1.3.0 -> v1.3.1 Updating socket2 v0.5.5 -> v0.5.6 Updating syn v2.0.50 -> v2.0.51 Updating tempfile v3.10.0 -> v3.10.1 Updating tower-http v0.5.1 -> v0.5.2 ``` --- Cargo.lock | 108 ++++++++++++++++++++++++++--------------------------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e1092fefc..53b9c9569 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,7 +191,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -276,7 +276,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -357,7 +357,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -416,7 +416,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "syn_derive", ] @@ -449,9 +449,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.15.2" +version = "3.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b1be7772ee4501dba05acbe66bb1e8760f6a6c474a36035631638e4415f130" +checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" [[package]] name = "bytecheck" @@ -495,9 +495,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.86" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9fa1897e4325be0d68d48df6aa1a71ac2ed4d27723887e7754192705350730" +checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc" dependencies = [ "libc", ] @@ -605,7 +605,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -840,9 +840,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.6" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c376d08ea6aa96aafe61237c7200d1241cb177b7d3a542d791f2d118e9cbb955" +checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" dependencies = [ "darling_core", "darling_macro", @@ -850,27 +850,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.6" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33043dcd19068b8192064c704b3f83eb464f91f1ff527b44a4e2b08d9cdb8855" +checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] name = "darling_macro" -version = "0.20.6" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5a91391accf613803c2a9bf9abccdbaa07c54b4244a5b64883f9c3c137c86be" +checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -904,7 +904,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1082,7 +1082,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1094,7 +1094,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1106,7 +1106,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1171,7 +1171,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1277,9 +1277,9 @@ dependencies = [ [[package]] name = "half" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" dependencies = [ "cfg-if", "crunchy", @@ -1330,9 +1330,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" +checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60" [[package]] name = "hex" @@ -1755,9 +1755,9 @@ checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "local-ip-address" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63e1499d2495be571af92e9ca9dca4e7bf26c47b87cb8d0c6100825e521dd6b" +checksum = "136ef34e18462b17bf39a7826f8f3bbc223341f8e83822beb8b77db9a3d49696" dependencies = [ "libc", "neli", @@ -1858,7 +1858,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -1909,7 +1909,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "termcolor", "thiserror", ] @@ -2109,7 +2109,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -2226,7 +2226,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -2295,7 +2295,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -2810,9 +2810,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" [[package]] name = "rustls-webpki" @@ -2956,7 +2956,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -2988,7 +2988,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3039,7 +3039,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3108,12 +3108,12 @@ checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3163,9 +3163,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.50" +version = "2.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" +checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c" dependencies = [ "proc-macro2", "quote", @@ -3181,7 +3181,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3236,9 +3236,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", @@ -3278,7 +3278,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3372,7 +3372,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3603,9 +3603,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da193277a4e2c33e59e09b5861580c33dd0a637c3883d0fa74ba40c0374af2e" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "async-compression", "bitflags 2.4.2", @@ -3666,7 +3666,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] @@ -3825,7 +3825,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "wasm-bindgen-shared", ] @@ -3859,7 +3859,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4115,7 +4115,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.51", ] [[package]] From eb8478d9101e5b4579ad1d76ede02c5f1c510d0b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 Feb 2024 16:10:06 +0000 Subject: [PATCH 0747/1003] refactor: [#714] use tower_http::request_id::MakeRequestUuid instead of a custom request UUID generator. I did not know there was already an implementation for it. --- src/servers/apis/routes.rs | 19 ++++--------------- src/servers/health_check_api/server.rs | 19 ++++--------------- src/servers/http/v1/routes.rs | 19 ++++--------------- 3 files changed, 12 insertions(+), 45 deletions(-) diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index aed3ee19d..e3d1ef446 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use std::time::Duration; -use axum::http::{HeaderName, HeaderValue}; +use axum::http::HeaderName; use axum::response::Response; use axum::routing::get; use axum::{middleware, Router}; @@ -16,10 +16,9 @@ use hyper::Request; use torrust_tracker_configuration::AccessTokens; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; -use tower_http::request_id::{MakeRequestId, RequestId, SetRequestIdLayer}; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; use tracing::{Level, Span}; -use uuid::Uuid; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; @@ -41,7 +40,7 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router .layer(middleware::from_fn_with_state(state, v1::middlewares::auth::auth)) .route(&format!("{api_url_prefix}/health_check"), get(health_check_handler)) .layer(CompressionLayer::new()) - .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) .layer( TraceLayer::new_for_http() @@ -73,15 +72,5 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); }), ) - .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) -} - -#[derive(Clone, Default)] -struct RequestIdGenerator; - -impl MakeRequestId for RequestIdGenerator { - fn make_request_id(&mut self, _request: &Request) -> Option { - let id = HeaderValue::from_str(&Uuid::new_v4().to_string()).expect("UUID is a valid HTTP header value"); - Some(RequestId::new(id)) - } + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) } diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index 049f48d40..05ed605f4 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -5,7 +5,7 @@ use std::net::SocketAddr; use std::time::Duration; -use axum::http::{HeaderName, HeaderValue}; +use axum::http::HeaderName; use axum::response::Response; use axum::routing::get; use axum::{Json, Router}; @@ -17,10 +17,9 @@ use serde_json::json; use tokio::sync::oneshot::{Receiver, Sender}; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; -use tower_http::request_id::{MakeRequestId, RequestId, SetRequestIdLayer}; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; use tracing::{Level, Span}; -use uuid::Uuid; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; @@ -43,7 +42,7 @@ pub fn start( .route("/health_check", get(health_check_handler)) .with_state(register) .layer(CompressionLayer::new()) - .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) .layer( TraceLayer::new_for_http() @@ -75,7 +74,7 @@ pub fn start( tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); }), ) - .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)); + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)); let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); @@ -99,13 +98,3 @@ pub fn start( running } - -#[derive(Clone, Default)] -struct RequestIdGenerator; - -impl MakeRequestId for RequestIdGenerator { - fn make_request_id(&mut self, _request: &Request) -> Option { - let id = HeaderValue::from_str(&Uuid::new_v4().to_string()).expect("UUID is a valid HTTP header value"); - Some(RequestId::new(id)) - } -} diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index b972cf62f..05cd38713 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -3,7 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use axum::http::{HeaderName, HeaderValue}; +use axum::http::HeaderName; use axum::response::Response; use axum::routing::get; use axum::Router; @@ -11,10 +11,9 @@ use axum_client_ip::SecureClientIpSource; use hyper::Request; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; -use tower_http::request_id::{MakeRequestId, RequestId, SetRequestIdLayer}; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; use tracing::{Level, Span}; -use uuid::Uuid; use super::handlers::{announce, health_check, scrape}; use crate::core::Tracker; @@ -37,7 +36,7 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) .layer(CompressionLayer::new()) - .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) .layer( TraceLayer::new_for_http() @@ -69,15 +68,5 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { tracing::Level::INFO, "response", server_socket_addr= %server_socket_addr, latency = %latency_ms, status = %status_code, request_id = %request_id); }), ) - .layer(SetRequestIdLayer::x_request_id(RequestIdGenerator)) -} - -#[derive(Clone, Default)] -struct RequestIdGenerator; - -impl MakeRequestId for RequestIdGenerator { - fn make_request_id(&mut self, _request: &Request) -> Option { - let id = HeaderValue::from_str(&Uuid::new_v4().to_string()).expect("UUID is a valid HTTP header value"); - Some(RequestId::new(id)) - } + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) } From e77d89f848c4525a8dba7081584e6b14db732c95 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 Feb 2024 16:59:41 +0000 Subject: [PATCH 0748/1003] chore: add ADR about when to use plural for mod names --- ...ural_for_modules_containing_collections.md | 35 +++++++++++++++++++ docs/adrs/README.md | 23 ++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md create mode 100644 docs/adrs/README.md diff --git a/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md b/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md new file mode 100644 index 000000000..beb3cee00 --- /dev/null +++ b/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md @@ -0,0 +1,35 @@ +# Use plural for modules containing collections of types + +## Description + +In Rust, the naming conventions for module names (mod names) generally lean +towards using the singular form, rather than plurals. This practice aligns with +Rust's emphasis on clarity and precision in code organization. The idea is that +a module name should represent a single concept or functionality, which often +means using a singular noun to describe what the module contains or does. + +However, it's important to note that conventions can vary depending on the +context or the specific project. Some projects may choose to use plural forms +for module names if they feel it more accurately represents the contents of the +module. For example, a module that contains multiple implementations of a +similar concept or utility functions related to a specific theme might be named +in the plural to reflect the diversity of its contents. + +This could have some pros anc cons. For example, for a module containing types of +requests you could refer to a concrete request with `request::Announce` or +`requests::Announce`. If you read a code line `request::Announce` is probably +better. However, if you read the filed or folder name `requests`gives you a +better idea of what the modules contains. + +## Agreement + +We agree on use plural in cases where the modules contain some types with the +same type of responsibility. For example: + +- `src/servers`. +- `src/servers/http/v1/requests`. +- `src/servers/http/v1/responses`. +- `src/servers/http/v1/services`. +- Etcetera. + +We will change them progressively. diff --git a/docs/adrs/README.md b/docs/adrs/README.md new file mode 100644 index 000000000..85986fc36 --- /dev/null +++ b/docs/adrs/README.md @@ -0,0 +1,23 @@ +# Architectural Decision Records (ADRs) + +This directory contains the architectural decision records (ADRs) for the +project. ADRs are a way to document the architectural decisions made in the +project. + +More info: . + +## How to add a new record + +For the prefix: + +```s +date -u +"%Y%m%d%H%M%S" +``` + +Then you can create a new markdown file with the following format: + +```s +20230510152112_title.md +``` + +For the time being, we are not following any specific template. From f9a5f7e3462526297b92dde9d39acb1029aad220 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 11 Mar 2024 10:05:46 +0000 Subject: [PATCH 0749/1003] chore: fix linting errors --- packages/configuration/src/lib.rs | 2 +- packages/primitives/src/lib.rs | 2 +- packages/test-helpers/src/configuration.rs | 6 +++--- src/servers/udp/error.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 4b81aed8b..4068c046f 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -475,7 +475,7 @@ pub struct Configuration { /// peers from the torrent peer list. pub inactive_peer_cleanup_interval: u64, /// If enabled, the tracker will remove torrents that have no peers. - /// THe clean up torrent job runs every `inactive_peer_cleanup_interval` + /// The clean up torrent job runs every `inactive_peer_cleanup_interval` /// seconds and it removes inactive peers. Eventually, the peer list of a /// torrent could be empty and the torrent will be removed if this option is /// enabled. diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index e6f8cb93b..f6a14b9e8 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -19,7 +19,7 @@ pub enum DatabaseDriver { // TODO: Move to the database crate once that gets its own crate. /// The Sqlite3 database driver. Sqlite3, - /// The MySQL database driver. + /// The `MySQL` database driver. MySQL, } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 388d0151f..49cfdd390 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -55,7 +55,7 @@ pub fn ephemeral() -> Configuration { let temp_directory = env::temp_dir(); let random_db_id = random::string(16); let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); - config.db_path = temp_file.to_str().unwrap().to_owned(); + temp_file.to_str().unwrap().clone_into(&mut config.db_path); config } @@ -138,8 +138,8 @@ pub fn ephemeral_ipv6() -> Configuration { let ipv6 = format!("[::]:{}", 0); - cfg.http_api.bind_address = ipv6.clone(); - cfg.http_trackers[0].bind_address = ipv6.clone(); + cfg.http_api.bind_address.clone_from(&ipv6); + cfg.http_trackers[0].bind_address.clone_from(&ipv6); cfg.udp_trackers[0].bind_address = ipv6; cfg diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index fb7bb93f3..315c9d1cf 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -13,7 +13,7 @@ pub enum Error { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, - /// Error returned from a third-party library (aquatic_udp_protocol). + /// Error returned from a third-party library (`aquatic_udp_protocol`). #[error("internal server error: {message}, {location}")] InternalServer { location: &'static Location<'static>, From 4b24256e0b7a2d36de439a69f63c626ddc279aa6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 11 Mar 2024 17:10:01 +0000 Subject: [PATCH 0750/1003] chore(deps): add cargo dependency: axum-extra We need to parse URL query parameter arrays. For example: http://127.0.0.1:1212/api/v1/torrents?token=MyAccessToken&info_hash=9c38422213e30bff212b30c360d26f9a02136422&info_hash=2b66980093bc11806fab50cb3cb41835b95a0362 ```rust pub struct QueryParams { /// The offset of the first page to return. Starts at 0. #[serde(default, deserialize_with = "empty_string_as_none")] pub offset: Option, /// The maximum number of items to return per page. #[serde(default, deserialize_with = "empty_string_as_none")] pub limit: Option, /// A list of infohashes to retrieve. #[serde(default, rename = "info_hash")] pub info_hashes: Vec, } ``` --- Cargo.lock | 36 ++++++++++++++++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 37 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 53b9c9569..9bc13c1a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -267,6 +267,28 @@ dependencies = [ "tracing", ] +[[package]] +name = "axum-extra" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "895ff42f72016617773af68fb90da2a9677d89c62338ec09162d4909d86fdd8f" +dependencies = [ + "axum", + "axum-core", + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "serde", + "serde_html_form", + "tower", + "tower-layer", + "tower-service", +] + [[package]] name = "axum-macros" version = "0.4.1" @@ -2959,6 +2981,19 @@ dependencies = [ "syn 2.0.51", ] +[[package]] +name = "serde_html_form" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50437e6a58912eecc08865e35ea2e8d365fbb2db0debb1c8bb43bf1faf055f25" +dependencies = [ + "form_urlencoded", + "indexmap 2.2.3", + "itoa", + "ryu", + "serde", +] + [[package]] name = "serde_json" version = "1.0.114" @@ -3485,6 +3520,7 @@ dependencies = [ "async-trait", "axum", "axum-client-ip", + "axum-extra", "axum-server", "binascii", "chrono", diff --git a/Cargo.toml b/Cargo.toml index 26f4334f1..36c865447 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ aquatic_udp_protocol = "0" async-trait = "0" axum = { version = "0", features = ["macros"] } axum-client-ip = "0" +axum-extra = { version = "0.9.2", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } binascii = "0" chrono = { version = "0", default-features = false, features = ["clock"] } From d39bfc20fce259fe575c1f4c4d3e3a628e2a78b9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 11 Mar 2024 17:13:33 +0000 Subject: [PATCH 0751/1003] feat: [#725] API. Add scrape filter to torrents endpoint The torrents endppint allow getting a list of torrents provifing the infohashes: http://127.0.0.1:1212/api/v1/torrents?token=MyAccessToken&info_hash=9c38422213e30bff212b30c360d26f9a02136422&info_hash=2b66980093bc11806fab50cb3cb41835b95a0362 It's like the tracker "scrape" request. The response JSON is the same as the normal torrent list: ```json [ { "info_hash": "9c38422213e30bff212b30c360d26f9a02136422", "seeders": 1, "completed": 0, "leechers": 0 }, { "info_hash": "2b66980093bc11806fab50cb3cb41835b95a0362", "seeders": 1, "completed": 0, "leechers": 0 } ] ``` --- src/core/services/torrent.rs | 36 ++++++-- .../apis/v1/context/torrent/handlers.rs | 91 ++++++++++++++----- .../api/v1/contract/context/torrent.rs | 65 ++++++++++++- 3 files changed, 163 insertions(+), 29 deletions(-) diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index d1ab29a7f..fc24e7c4c 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -115,7 +115,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec { +pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) -> Vec { let db = tracker.torrents.get_torrents().await; let mut basic_infos: Vec = vec![]; @@ -134,6 +134,28 @@ pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec basic_infos } +/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. +pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Vec { + let db = tracker.torrents.get_torrents().await; + + let mut basic_infos: Vec = vec![]; + + for info_hash in info_hashes { + if let Some(entry) = db.get(info_hash) { + let (seeders, completed, leechers) = entry.get_stats(); + + basic_infos.push(BasicInfo { + info_hash: *info_hash, + seeders: u64::from(seeders), + completed: u64::from(completed), + leechers: u64::from(leechers), + }); + } + } + + basic_infos +} + #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -219,7 +241,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; - use crate::core::services::torrent::{get_torrents, BasicInfo, Pagination}; + use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; use crate::core::services::tracker_factory; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -231,7 +253,7 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; assert_eq!(torrents, vec![]); } @@ -247,7 +269,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; assert_eq!( torrents, @@ -279,7 +301,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; assert_eq!(torrents.len(), 1); } @@ -303,7 +325,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -333,7 +355,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) .await; - let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; assert_eq!( torrents, diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 101a25c8d..dcb92dec3 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -4,14 +4,15 @@ use std::fmt; use std::str::FromStr; use std::sync::Arc; -use axum::extract::{Path, Query, State}; -use axum::response::{IntoResponse, Json, Response}; +use axum::extract::{Path, State}; +use axum::response::{IntoResponse, Response}; +use axum_extra::extract::Query; use log::debug; use serde::{de, Deserialize, Deserializer}; +use thiserror::Error; -use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::core::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page, Pagination}; use crate::core::Tracker; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; @@ -36,39 +37,87 @@ pub async fn get_torrent_handler(State(tracker): State>, Path(info_ } } -/// A container for the optional URL query pagination parameters: -/// `offset` and `limit`. +/// A container for the URL query parameters. +/// +/// Pagination: `offset` and `limit`. +/// Array of infohashes: `info_hash`. +/// +/// You can either get all torrents with pagination or get a list of torrents +/// providing a list of infohashes. For example: +/// +/// First page of torrents: +/// +/// +/// +/// +/// Only two torrents: +/// +/// +/// +/// +/// NOTICE: Pagination is ignored if array of infohashes is provided. #[derive(Deserialize, Debug)] -pub struct PaginationParams { +pub struct QueryParams { /// The offset of the first page to return. Starts at 0. #[serde(default, deserialize_with = "empty_string_as_none")] pub offset: Option, - /// The maximum number of items to return per page + /// The maximum number of items to return per page. #[serde(default, deserialize_with = "empty_string_as_none")] pub limit: Option, + /// A list of infohashes to retrieve. + #[serde(default, rename = "info_hash")] + pub info_hashes: Vec, } /// It handles the request to get a list of torrents. /// -/// It returns a `200` response with a json array with -/// [`ListItem`] -/// resources. +/// It returns a `200` response with a json array with [`crate::servers::apis::v1::context::torrent::resources::torrent::ListItem`] resources. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#list-torrents) /// for more information about this endpoint. -pub async fn get_torrents_handler( - State(tracker): State>, - pagination: Query, -) -> Json> { +pub async fn get_torrents_handler(State(tracker): State>, pagination: Query) -> Response { debug!("pagination: {:?}", pagination); - torrent_list_response( - &get_torrents( - tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + if pagination.0.info_hashes.is_empty() { + torrent_list_response( + &get_torrents_page( + tracker.clone(), + &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + ) + .await, ) - .await, - ) + .into_response() + } else { + match parse_info_hashes(pagination.0.info_hashes) { + Ok(info_hashes) => torrent_list_response(&get_torrents(tracker.clone(), &info_hashes).await).into_response(), + Err(err) => match err { + QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), + }, + } + } +} + +#[derive(Error, Debug)] +pub enum QueryParamError { + #[error("invalid infohash {info_hash}")] + InvalidInfoHash { info_hash: String }, +} + +fn parse_info_hashes(info_hashes_str: Vec) -> Result, QueryParamError> { + let mut info_hashes: Vec = Vec::new(); + + for info_hash_str in info_hashes_str { + match InfoHash::from_str(&info_hash_str) { + Ok(info_hash) => info_hashes.push(info_hash), + Err(_err) => { + return Err(QueryParamError::InvalidInfoHash { + info_hash: info_hash_str, + }) + } + } + } + + Ok(info_hashes) } /// Serde deserialization decorator to map empty Strings to None, diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 63b97b402..ee701ecc4 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -19,7 +19,7 @@ use crate::servers::api::v1::contract::fixtures::{ use crate::servers::api::Started; #[tokio::test] -async fn should_allow_getting_torrents() { +async fn should_allow_getting_all_torrents() { let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -100,6 +100,48 @@ async fn should_allow_the_torrents_result_pagination() { env.stop().await; } +#[tokio::test] +async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 + + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params( + [ + QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 + QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 + ] + .to_vec(), + )) + .await; + + assert_torrent_list( + response, + vec![ + torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 + seeders: 1, + completed: 0, + leechers: 0, + }, + torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 + seeders: 1, + completed: 0, + leechers: 0, + }, + ], + ) + .await; + + env.stop().await; +} + #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { let env = Started::new(&configuration::ephemeral().into()).await; @@ -134,6 +176,27 @@ async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_p env.stop().await; } +#[tokio::test] +async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_info_hashes = [" ", "-1", "1.1", "INVALID INFO_HASH"]; + + for invalid_info_hash in &invalid_info_hashes { + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("info_hash", invalid_info_hash)].to_vec())) + .await; + + assert_bad_request( + response, + &format!("Invalid URL: invalid infohash param: string \"{invalid_info_hash}\", expected a 40 character long string"), + ) + .await; + } + + env.stop().await; +} + #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let env = Started::new(&configuration::ephemeral().into()).await; From fd0ad1bbce9c06c5f195b6885f618a409892e774 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 14 Mar 2024 16:31:21 +0000 Subject: [PATCH 0752/1003] test: [#87] remove lua script for HTTP tracker benchmarking We will use the aquatic commands to test both the HTTP and UDP trackers. --- tests/README.md | 9 ----- tests/wrk_benchmark_announce.lua | 68 -------------------------------- 2 files changed, 77 deletions(-) delete mode 100644 tests/README.md delete mode 100644 tests/wrk_benchmark_announce.lua diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index 04860056c..000000000 --- a/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -### Running Benchmarks - -#### HTTP(S) Announce Peer + Torrent -For this benchmark we use the tool [wrk](https://github.com/wg/wrk). - -To run the benchmark using wrk, execute the following example script (change the url to your own tracker url): - - wrk -c200 -t1 -d10s -s ./wrk_benchmark_announce.lua --latency http://tracker.dutchbits.nl - diff --git a/tests/wrk_benchmark_announce.lua b/tests/wrk_benchmark_announce.lua deleted file mode 100644 index c0bdac48d..000000000 --- a/tests/wrk_benchmark_announce.lua +++ /dev/null @@ -1,68 +0,0 @@ --- else the randomness would be the same every run -math.randomseed(os.time()) - -local charset = "0123456789ABCDEF" - -function hex_to_char(hex) - local n = tonumber(hex, 16) - local f = string.char(n) - return f -end - -function hex_string_to_char_string(hex) - local ret = {} - local r - for i = 0, 19 do - local x = i * 2 - r = hex:sub(x+1, x+2) - local f = hex_to_char(r) - table.insert(ret, f) - end - return table.concat(ret) -end - -function url_encode(str) - str = string.gsub (str, "([^0-9a-zA-Z !'()*._~-])", -- locale independent - function (c) return string.format ("%%%02X", string.byte(c)) end) - str = string.gsub (str, " ", "+") - return str -end - -function gen_hex_string(length) - local ret = {} - local r - for i = 1, length do - r = math.random(1, #charset) - table.insert(ret, charset:sub(r, r)) - end - return table.concat(ret) -end - -function random_info_hash() - local hexString = gen_hex_string(40) - local str = hex_string_to_char_string(hexString) - return url_encode(str) -end - -function generate_unique_info_hashes(size) - local result = {} - - for i = 1, size do - result[i] = random_info_hash() - end - - return result -end - -info_hashes = generate_unique_info_hashes(5000000) - -index = 1 - --- the request function that will run at each request -request = function() - path = "/announce?info_hash=" .. info_hashes[index] .. "&peer_id=-lt0D80-a%D4%10%19%99%A6yh%9A%E1%CD%96&port=54434&uploaded=885&downloaded=0&left=0&corrupt=0&key=A78381BD&numwant=200&compact=1&no_peer_id=1&supportcrypto=1&redundant=0" - index = index + 1 - headers = {} - headers["X-Forwarded-For"] = "1.1.1.1" - return wrk.format("GET", path, headers) -end From d32a748e28f7fdb6a85e4f38beb9be44a070ea93 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 14 Mar 2024 17:34:28 +0000 Subject: [PATCH 0753/1003] docs: [#87] benchmarking How to run load tests using aquatic UDP load test commands. --- README.md | 4 + cSpell.json | 1 + docs/benchmarking.md | 252 ++++++++++++++++++ .../config/tracker.udp.benchmarking.toml | 37 +++ 4 files changed, 294 insertions(+) create mode 100644 docs/benchmarking.md create mode 100644 share/default/config/tracker.udp.benchmarking.toml diff --git a/README.md b/README.md index 18f3d361d..ea5078b19 100644 --- a/README.md +++ b/README.md @@ -126,6 +126,10 @@ The following services are provided by the default configuration: - [Tracker (HTTP/TLS)][HTTP] - [Tracker (UDP)][UDP] +## Benchmarking + +- [Benchmarking](./docs/benchmarking.md) + ## Contributing We are happy to support and welcome new people to our project. Please consider our [contributor guide][guide.md].
diff --git a/cSpell.json b/cSpell.json index 646037e59..183ea31fb 100644 --- a/cSpell.json +++ b/cSpell.json @@ -50,6 +50,7 @@ "hexlify", "hlocalhost", "Hydranode", + "hyperthread", "Icelake", "imdl", "impls", diff --git a/docs/benchmarking.md b/docs/benchmarking.md new file mode 100644 index 000000000..8b455d4f9 --- /dev/null +++ b/docs/benchmarking.md @@ -0,0 +1,252 @@ +# Benchmarking + +We have two types of benchmarking: + +- E2E benchmarking running the service (HTTP or UDP tracker). +- Internal torrents repository benchmarking. + +## E2E benchmarking + +We are using the scripts provided by [aquatic](https://github.com/greatest-ape/aquatic). + +Installing both commands: + +```console +cargo install aquatic_udp_load_test +cargo install aquatic_http_load_test +``` + +### Run UDP load test + +Run the tracker with UDP service enabled on port 3000 and set log level to `error`. + +```toml +log_level = "error" + +[[udp_trackers]] +bind_address = "0.0.0.0:3000" +enabled = true +``` + +Run the load test with: + +```console +aquatic_udp_load_test +``` + +Output: + +```output +Starting client with config: Config { + server_address: 127.0.0.1:3000, + log_level: Error, + workers: 1, + duration: 0, + network: NetworkConfig { + multiple_client_ipv4s: true, + first_port: 45000, + poll_timeout: 276, + poll_event_capacity: 2877, + recv_buffer: 6000000, + }, + requests: RequestConfig { + number_of_torrents: 10000, + scrape_max_torrents: 50, + weight_connect: 0, + weight_announce: 100, + weight_scrape: 1, + torrent_gamma_shape: 0.2, + torrent_gamma_scale: 100.0, + peer_seeder_probability: 0.25, + additional_request_probability: 0.5, + }, +} + +Requests out: 32632.43/second +Responses in: 24239.33/second + - Connect responses: 7896.91 + - Announce responses: 16327.01 + - Scrape responses: 15.40 + - Error responses: 0.00 +Peers per announce response: 33.10 +``` + +### Run HTTP load test + +Run the tracker with UDP service enabled on port 3000 and set log level to `error`. + +```toml +[[udp_trackers]] +bind_address = "0.0.0.0:3000" +enabled = true +``` + +Run the load test with: + +```console +aquatic_http_load_test +``` + +Output: + +```output +Starting client with config: Config { + server_address: 127.0.0.1:3000, + log_level: Error, + num_workers: 1, + num_connections: 128, + connection_creation_interval_ms: 10, + url_suffix: "", + duration: 0, + keep_alive: true, + torrents: TorrentConfig { + number_of_torrents: 10000, + peer_seeder_probability: 0.25, + weight_announce: 5, + weight_scrape: 0, + torrent_gamma_shape: 0.2, + torrent_gamma_scale: 100.0, + }, + cpu_pinning: CpuPinningConfigDesc { + active: false, + direction: Descending, + hyperthread: System, + core_offset: 0, + }, +} +``` + +### Comparing UDP tracker with other Rust implementations + +#### Torrust UDP Tracker + +Running the tracker: + +```console +git@github.com:torrust/torrust-tracker.git +cd torrust-tracker +cargo build --release +TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" ./target/release/torrust-tracker +``` + +Running the test: `aquatic_udp_load_test`. + +```output +Requests out: 13075.56/second +Responses in: 12058.38/second + - Connect responses: 1017.18 + - Announce responses: 11035.00 + - Scrape responses: 6.20 + - Error responses: 0.00 +Peers per announce response: 41.13 +``` + +#### Aquatic UDP Tracker + +Running the tracker: + +```console +git clone git@github.com:greatest-ape/aquatic.git +cd aquatic +cargo build --release -p aquatic_udp +./target/release/aquatic_udp -c "aquatic-udp-config.toml" +./target/release/aquatic_udp -c "aquatic-udp-config.toml" +``` + +Running the test: `aquatic_udp_load_test`. + +```output +Requests out: 383873.14/second +Responses in: 383440.35/second + - Connect responses: 429.19 + - Announce responses: 379249.22 + - Scrape responses: 3761.93 + - Error responses: 0.00 +Peers per announce response: 15.33 +``` + +#### Torrust-Actix UDP Tracker + +Run the tracker with UDP service enabled on port 3000 and set log level to `error`. + +```toml +[[udp_trackers]] +bind_address = "0.0.0.0:3000" +enabled = true +``` + +```console +git clone https://github.com/Power2All/torrust-actix.git +cd torrust-actix +cargo build --release +./target/release/torrust-actix --create-config +./target/release/torrust-actix +``` + +Running the test: `aquatic_udp_load_test`. + +```output +Requests out: 3072.94/second +Responses in: 2395.15/second + - Connect responses: 556.79 + - Announce responses: 1821.16 + - Scrape responses: 17.20 + - Error responses: 0.00 +Peers per announce response: 133.88 +``` + +### Results + +Announce request per second: + +| Tracker | Announce | +|---------------|-----------| +| Aquatic | 379,249 | +| Torrust | 11,035 | +| Torrust-Actix | 1,821 | + +## Repository benchmarking + +You can run it with: + +```console +cargo run --release -p torrust-torrent-repository-benchmarks -- --threads 4 --sleep 0 --compare true +``` + +It tests the different implementation for the internal torrent storage. + +```output +tokio::sync::RwLock> +add_one_torrent: Avg/AdjAvg: (60ns, 59ns) +update_one_torrent_in_parallel: Avg/AdjAvg: (10.909457ms, 0ns) +add_multiple_torrents_in_parallel: Avg/AdjAvg: (13.88879ms, 0ns) +update_multiple_torrents_in_parallel: Avg/AdjAvg: (7.772484ms, 7.782535ms) + +std::sync::RwLock> +add_one_torrent: Avg/AdjAvg: (43ns, 39ns) +update_one_torrent_in_parallel: Avg/AdjAvg: (4.020937ms, 4.020937ms) +add_multiple_torrents_in_parallel: Avg/AdjAvg: (5.896177ms, 5.768448ms) +update_multiple_torrents_in_parallel: Avg/AdjAvg: (3.883823ms, 3.883823ms) + +std::sync::RwLock>>> +add_one_torrent: Avg/AdjAvg: (51ns, 49ns) +update_one_torrent_in_parallel: Avg/AdjAvg: (3.252314ms, 3.149109ms) +add_multiple_torrents_in_parallel: Avg/AdjAvg: (8.411094ms, 8.411094ms) +update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.106086ms, 4.106086ms) + +tokio::sync::RwLock>>> +add_one_torrent: Avg/AdjAvg: (91ns, 90ns) +update_one_torrent_in_parallel: Avg/AdjAvg: (3.542378ms, 3.435695ms) +add_multiple_torrents_in_parallel: Avg/AdjAvg: (15.651172ms, 15.651172ms) +update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.368189ms, 4.257572ms) + +tokio::sync::RwLock>>> +add_one_torrent: Avg/AdjAvg: (111ns, 109ns) +update_one_torrent_in_parallel: Avg/AdjAvg: (6.590677ms, 6.808535ms) +add_multiple_torrents_in_parallel: Avg/AdjAvg: (16.572217ms, 16.30488ms) +update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.073221ms, 4.000122ms) +``` + +## Other considerations + +We are testing new repository implementations that allow concurrent writes. See . diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml new file mode 100644 index 000000000..182112803 --- /dev/null +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -0,0 +1,37 @@ +announce_interval = 120 +db_driver = "Sqlite3" +db_path = "./storage/tracker/lib/database/sqlite3.db" +external_ip = "0.0.0.0" +inactive_peer_cleanup_interval = 600 +log_level = "error" +max_peer_timeout = 900 +min_announce_interval = 120 +mode = "public" +on_reverse_proxy = false +persistent_torrent_completed_stat = false +remove_peerless_torrents = true +tracker_usage_statistics = true + +[[udp_trackers]] +bind_address = "0.0.0.0:3000" +enabled = true + +[[http_trackers]] +bind_address = "0.0.0.0:7070" +enabled = false +ssl_cert_path = "" +ssl_enabled = false +ssl_key_path = "" + +[http_api] +bind_address = "127.0.0.1:1212" +enabled = false +ssl_cert_path = "" +ssl_enabled = false +ssl_key_path = "" + +[http_api.access_tokens] +admin = "MyAccessToken" + +[health_check_api] +bind_address = "127.0.0.1:1313" From 45c77c33168dce1e5e8c01e82cde79dfd8842393 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 14 Mar 2024 19:37:33 +0000 Subject: [PATCH 0754/1003] fix: linter error, unused code --- src/console/clients/udp/responses.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/console/clients/udp/responses.rs b/src/console/clients/udp/responses.rs index 020c7a367..2fbc38f5f 100644 --- a/src/console/clients/udp/responses.rs +++ b/src/console/clients/udp/responses.rs @@ -68,13 +68,6 @@ impl From for ScrapeResponseDto { } } -#[derive(Serialize)] -struct Peer { - seeders: i32, - completed: i32, - leechers: i32, -} - #[derive(Serialize)] struct TorrentStats { seeders: i32, From 26215e8429b1eb69f9421220146372c5b26bacdc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 15 Mar 2024 10:20:06 +0000 Subject: [PATCH 0755/1003] docs: [#733] udpate benchmarking docs and results --- cSpell.json | 2 + docs/benchmarking.md | 239 ++++++++++-------- .../config/tracker.udp.benchmarking.toml | 2 +- 3 files changed, 132 insertions(+), 111 deletions(-) diff --git a/cSpell.json b/cSpell.json index 183ea31fb..297517980 100644 --- a/cSpell.json +++ b/cSpell.json @@ -91,6 +91,7 @@ "proot", "proto", "Quickstart", + "Radeon", "Rasterbar", "realpath", "reannounce", @@ -107,6 +108,7 @@ "RUSTFLAGS", "rustfmt", "Rustls", + "Ryzen", "Seedable", "serde", "Shareaza", diff --git a/docs/benchmarking.md b/docs/benchmarking.md index 8b455d4f9..7c82df14c 100644 --- a/docs/benchmarking.md +++ b/docs/benchmarking.md @@ -2,145 +2,124 @@ We have two types of benchmarking: -- E2E benchmarking running the service (HTTP or UDP tracker). +- E2E benchmarking running the UDP tracker. - Internal torrents repository benchmarking. ## E2E benchmarking We are using the scripts provided by [aquatic](https://github.com/greatest-ape/aquatic). -Installing both commands: +How to install both commands: ```console -cargo install aquatic_udp_load_test -cargo install aquatic_http_load_test +cargo install aquatic_udp_load_test && cargo install aquatic_http_load_test +``` + +You can also clone and build the repos. It's the way used for the results shown +in this documentation. + +```console +git clone git@github.com:greatest-ape/aquatic.git +cd aquatic +cargo build --release -p aquatic_udp_load_test ``` ### Run UDP load test -Run the tracker with UDP service enabled on port 3000 and set log level to `error`. +Run the tracker with UDP service enabled and other services disabled and set log level to `error`. ```toml log_level = "error" [[udp_trackers]] -bind_address = "0.0.0.0:3000" enabled = true ``` +Build and run the tracker: + +```console +cargo build --release +TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" ./target/release/torrust-tracker +``` + Run the load test with: ```console -aquatic_udp_load_test +./target/release/aquatic_udp_load_test ``` +> NOTICE: You need to modify the port in the `udp_load_test` crate to use `6969` and rebuild. + Output: ```output Starting client with config: Config { - server_address: 127.0.0.1:3000, + server_address: 127.0.0.1:6969, log_level: Error, workers: 1, duration: 0, + summarize_last: 0, + extra_statistics: true, network: NetworkConfig { multiple_client_ipv4s: true, - first_port: 45000, - poll_timeout: 276, - poll_event_capacity: 2877, - recv_buffer: 6000000, + sockets_per_worker: 4, + recv_buffer: 8000000, }, requests: RequestConfig { - number_of_torrents: 10000, - scrape_max_torrents: 50, - weight_connect: 0, - weight_announce: 100, + number_of_torrents: 1000000, + number_of_peers: 2000000, + scrape_max_torrents: 10, + announce_peers_wanted: 30, + weight_connect: 50, + weight_announce: 50, weight_scrape: 1, - torrent_gamma_shape: 0.2, - torrent_gamma_scale: 100.0, - peer_seeder_probability: 0.25, - additional_request_probability: 0.5, + peer_seeder_probability: 0.75, }, } -Requests out: 32632.43/second -Responses in: 24239.33/second - - Connect responses: 7896.91 - - Announce responses: 16327.01 - - Scrape responses: 15.40 +Requests out: 398367.11/second +Responses in: 358530.40/second + - Connect responses: 177567.60 + - Announce responses: 177508.08 + - Scrape responses: 3454.72 - Error responses: 0.00 -Peers per announce response: 33.10 +Peers per announce response: 0.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 3 + - p99: 105 + - p99.9: 289 + - p100: 361 ``` -### Run HTTP load test - -Run the tracker with UDP service enabled on port 3000 and set log level to `error`. - -```toml -[[udp_trackers]] -bind_address = "0.0.0.0:3000" -enabled = true -``` - -Run the load test with: - -```console -aquatic_http_load_test -``` - -Output: +> IMPORTANT: The performance of th Torrust UDP Tracker is drastically decreased with these log levels: `info`, `debug`, `trace`. ```output -Starting client with config: Config { - server_address: 127.0.0.1:3000, - log_level: Error, - num_workers: 1, - num_connections: 128, - connection_creation_interval_ms: 10, - url_suffix: "", - duration: 0, - keep_alive: true, - torrents: TorrentConfig { - number_of_torrents: 10000, - peer_seeder_probability: 0.25, - weight_announce: 5, - weight_scrape: 0, - torrent_gamma_shape: 0.2, - torrent_gamma_scale: 100.0, - }, - cpu_pinning: CpuPinningConfigDesc { - active: false, - direction: Descending, - hyperthread: System, - core_offset: 0, - }, -} +Requests out: 40719.21/second +Responses in: 33762.72/second + - Connect responses: 16732.76 + - Announce responses: 16692.98 + - Scrape responses: 336.98 + - Error responses: 0.00 +Peers per announce response: 0.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 7 + - p95: 14 + - p99: 27 + - p99.9: 35 + - p100: 45 ``` ### Comparing UDP tracker with other Rust implementations -#### Torrust UDP Tracker - -Running the tracker: - -```console -git@github.com:torrust/torrust-tracker.git -cd torrust-tracker -cargo build --release -TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" ./target/release/torrust-tracker -``` - -Running the test: `aquatic_udp_load_test`. - -```output -Requests out: 13075.56/second -Responses in: 12058.38/second - - Connect responses: 1017.18 - - Announce responses: 11035.00 - - Scrape responses: 6.20 - - Error responses: 0.00 -Peers per announce response: 41.13 -``` - #### Aquatic UDP Tracker Running the tracker: @@ -149,29 +128,44 @@ Running the tracker: git clone git@github.com:greatest-ape/aquatic.git cd aquatic cargo build --release -p aquatic_udp -./target/release/aquatic_udp -c "aquatic-udp-config.toml" +./target/release/aquatic_udp -p > "aquatic-udp-config.toml" ./target/release/aquatic_udp -c "aquatic-udp-config.toml" ``` -Running the test: `aquatic_udp_load_test`. +Run the load test with: + +```console +./target/release/aquatic_udp_load_test +``` ```output -Requests out: 383873.14/second -Responses in: 383440.35/second - - Connect responses: 429.19 - - Announce responses: 379249.22 - - Scrape responses: 3761.93 +Requests out: 432896.42/second +Responses in: 389577.70/second + - Connect responses: 192864.02 + - Announce responses: 192817.55 + - Scrape responses: 3896.13 - Error responses: 0.00 -Peers per announce response: 15.33 +Peers per announce response: 21.55 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 3 + - p99: 105 + - p99.9: 311 + - p100: 395 ``` #### Torrust-Actix UDP Tracker -Run the tracker with UDP service enabled on port 3000 and set log level to `error`. +Run the tracker with UDP service enabled and other services disabled and set log level to `error`. ```toml +log_level = "error" + [[udp_trackers]] -bind_address = "0.0.0.0:3000" enabled = true ``` @@ -183,16 +177,32 @@ cargo build --release ./target/release/torrust-actix ``` -Running the test: `aquatic_udp_load_test`. +Run the load test with: + +```console +./target/release/aquatic_udp_load_test +``` + +> NOTICE: You need to modify the port in the `udp_load_test` crate to use `6969` and rebuild. ```output -Requests out: 3072.94/second -Responses in: 2395.15/second - - Connect responses: 556.79 - - Announce responses: 1821.16 - - Scrape responses: 17.20 +Requests out: 200953.97/second +Responses in: 180858.14/second + - Connect responses: 89517.13 + - Announce responses: 89539.67 + - Scrape responses: 1801.34 - Error responses: 0.00 -Peers per announce response: 133.88 +Peers per announce response: 1.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 7 + - p99: 87 + - p99.9: 155 + - p100: 188 ``` ### Results @@ -201,9 +211,18 @@ Announce request per second: | Tracker | Announce | |---------------|-----------| -| Aquatic | 379,249 | -| Torrust | 11,035 | -| Torrust-Actix | 1,821 | +| Aquatic | 192,817 | +| Torrust | 177,508 | +| Torrust-Actix | 89,539 | + +Using a PC with: + +- RAM: 64GiB +- Processor: AMD Ryzen 9 7950X x 32 +- Graphics: AMD Radeon Graphics / Intel Arc A770 Graphics (DG2) +- OS: Ubuntu 23.04 +- OS Type: 64-bit +- Kernel Version: Linux 6.2.0-20-generic ## Repository benchmarking diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index 182112803..080c67e84 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -13,7 +13,7 @@ remove_peerless_torrents = true tracker_usage_statistics = true [[udp_trackers]] -bind_address = "0.0.0.0:3000" +bind_address = "0.0.0.0:6969" enabled = true [[http_trackers]] From b3ad652ac0782fccc149eabb0ed82e4a72227d2d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 16 Mar 2024 04:20:17 +0800 Subject: [PATCH 0756/1003] chore: update cargo deps Updating crates.io index Updating ahash v0.8.9 -> v0.8.11 Updating anstream v0.6.12 -> v0.6.13 Updating anyhow v1.0.80 -> v1.0.81 Updating arc-swap v1.6.0 -> v1.7.0 Updating axum-client-ip v0.5.0 -> v0.5.1 Updating bumpalo v3.15.3 -> v3.15.4 Updating cc v1.0.88 -> v1.0.90 Updating chrono v0.4.34 -> v0.4.35 Updating clap v4.5.1 -> v4.5.3 Updating clap_builder v4.5.1 -> v4.5.2 Updating clap_derive v4.5.0 -> v4.5.3 Updating const-random v0.1.17 -> v0.1.18 Updating crossbeam-channel v0.5.11 -> v0.5.12 Removing h2 v0.3.24 Removing h2 v0.4.2 Adding h2 v0.3.25 (latest: v0.4.3) Adding h2 v0.4.3 Adding heck v0.5.0 Updating hermit-abi v0.3.8 -> v0.3.9 Removing http v0.2.11 Removing http v1.0.0 Adding http v0.2.12 (latest: v1.1.0) Adding http v1.1.0 Updating http-body-util v0.1.0 -> v0.1.1 Updating indexmap v2.2.3 -> v2.2.5 Adding jobserver v0.1.28 Updating js-sys v0.3.68 -> v0.3.69 Updating libloading v0.8.1 -> v0.8.3 Updating log v0.4.20 -> v0.4.21 Updating mio v0.8.10 -> v0.8.11 Updating pest v2.7.7 -> v2.7.8 Updating pest_derive v2.7.7 -> v2.7.8 Updating pest_generator v2.7.7 -> v2.7.8 Updating pest_meta v2.7.7 -> v2.7.8 Updating pin-project v1.1.4 -> v1.1.5 Updating pin-project-internal v1.1.4 -> v1.1.5 Updating proc-macro2 v1.0.78 -> v1.0.79 Updating rayon v1.8.1 -> v1.9.0 Updating regex-automata v0.4.5 -> v0.4.6 Updating reqwest v0.11.24 -> v0.11.26 Updating rustls-pemfile v2.1.0 -> v2.1.1 Updating serde_path_to_error v0.1.15 -> v0.1.16 Updating serde_with v3.6.1 -> v3.7.0 Updating serde_with_macros v3.6.1 -> v3.7.0 Updating syn v2.0.51 -> v2.0.52 Updating thiserror v1.0.57 -> v1.0.58 Updating thiserror-impl v1.0.57 -> v1.0.58 Updating toml v0.8.10 -> v0.8.11 Updating toml_edit v0.22.6 -> v0.22.7 Updating walkdir v2.4.0 -> v2.5.0 Updating wasm-bindgen v0.2.91 -> v0.2.92 Updating wasm-bindgen-backend v0.2.91 -> v0.2.92 Updating wasm-bindgen-futures v0.4.41 -> v0.4.42 Updating wasm-bindgen-macro v0.2.91 -> v0.2.92 Updating wasm-bindgen-macro-support v0.2.91 -> v0.2.92 Updating wasm-bindgen-shared v0.2.91 -> v0.2.92 Updating web-sys v0.3.68 -> v0.3.69 Updating windows-targets v0.52.3 -> v0.52.4 Updating windows_aarch64_gnullvm v0.52.3 -> v0.52.4 Updating windows_aarch64_msvc v0.52.3 -> v0.52.4 Updating windows_i686_gnu v0.52.3 -> v0.52.4 Updating windows_i686_msvc v0.52.3 -> v0.52.4 Updating windows_x86_64_gnu v0.52.3 -> v0.52.4 Updating windows_x86_64_gnullvm v0.52.3 -> v0.52.4 Updating windows_x86_64_msvc v0.52.3 -> v0.52.4 Updating winnow v0.6.2 -> v0.6.5 --- Cargo.lock | 398 +++++++++++++++++++++------------------- src/shared/clock/mod.rs | 19 +- 2 files changed, 215 insertions(+), 202 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9bc13c1a7..4cc81979d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", @@ -93,9 +93,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.12" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" [[package]] name = "aquatic_udp_protocol" @@ -157,9 +157,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f" [[package]] name = "arrayvec" @@ -191,7 +191,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -211,7 +211,7 @@ dependencies = [ "axum-macros", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "hyper 1.2.0", @@ -237,9 +237,9 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f5ffe4637708b326c621d5494ab6c91dcf62ee440fa6ee967d289315a9c6f81" +checksum = "5e7c467bdcd2bd982ce5c8742a1a178aba7b03db399fd18f5d5d438f5aa91cb4" dependencies = [ "axum", "forwarded-header-value", @@ -255,7 +255,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "mime", @@ -277,7 +277,7 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "mime", @@ -295,10 +295,10 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -310,14 +310,14 @@ dependencies = [ "arc-swap", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "hyper 1.2.0", "hyper-util", "pin-project-lite", "rustls", - "rustls-pemfile 2.1.0", + "rustls-pemfile 2.1.1", "tokio", "tokio-rustls", "tower", @@ -379,7 +379,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -438,7 +438,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "syn_derive", ] @@ -471,9 +471,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.15.3" +version = "3.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" +checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" [[package]] name = "bytecheck" @@ -517,10 +517,11 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.88" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc" +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" dependencies = [ + "jobserver", "libc", ] @@ -547,15 +548,15 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.3", + "windows-targets 0.52.4", ] [[package]] @@ -598,9 +599,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.1" +version = "4.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" +checksum = "949626d00e063efc93b6dca932419ceb5432f99769911c0b995f7e884c778813" dependencies = [ "clap_builder", "clap_derive", @@ -608,9 +609,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.1" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -620,14 +621,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "90239a040c80f5e14809ca132ddc4176ab33d5e17e49691793296e3fcb34d72f" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -683,9 +684,9 @@ dependencies = [ [[package]] name = "const-random" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" dependencies = [ "const-random-macro", ] @@ -803,9 +804,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] @@ -881,7 +882,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -892,7 +893,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -926,7 +927,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1104,7 +1105,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1116,7 +1117,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1128,7 +1129,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1193,7 +1194,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1261,17 +1262,17 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.11", - "indexmap 2.2.3", + "http 0.2.12", + "indexmap 2.2.5", "slab", "tokio", "tokio-util", @@ -1280,17 +1281,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31d030e59af851932b72ceebadf4a2b5986dba4c3b99dd2493f8273a0f151943" +checksum = "51ee2dd2e4f378392eeff5d51618cd9a63166a2513846bbc55f21cfacd9199d4" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http 1.0.0", - "indexmap 2.2.3", + "http 1.1.0", + "indexmap 2.2.5", "slab", "tokio", "tokio-util", @@ -1322,7 +1323,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.9", + "ahash 0.8.11", ] [[package]] @@ -1331,7 +1332,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.9", + "ahash 0.8.11", "allocator-api2", ] @@ -1350,11 +1351,17 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -1370,9 +1377,9 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1381,9 +1388,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -1397,7 +1404,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.11", + "http 0.2.12", "pin-project-lite", ] @@ -1408,18 +1415,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] name = "http-body-util" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", - "futures-util", - "http 1.0.0", + "futures-core", + "http 1.1.0", "http-body 1.0.0", "pin-project-lite", ] @@ -1446,8 +1453,8 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.24", - "http 0.2.11", + "h2 0.3.25", + "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", @@ -1469,8 +1476,8 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.2", - "http 1.0.0", + "h2 0.4.3", + "http 1.1.0", "http-body 1.0.0", "httparse", "httpdate", @@ -1501,7 +1508,7 @@ checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "hyper 1.2.0", "pin-project-lite", @@ -1561,9 +1568,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1620,11 +1627,20 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +[[package]] +name = "jobserver" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1733,12 +1749,12 @@ checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.52.4", ] [[package]] @@ -1799,9 +1815,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lru" @@ -1847,9 +1863,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -1880,7 +1896,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -1925,13 +1941,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f" dependencies = [ "darling", - "heck", + "heck 0.4.1", "num-bigint", "proc-macro-crate 1.3.1", "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "termcolor", "thiserror", ] @@ -2131,7 +2147,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -2219,9 +2235,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" +checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" dependencies = [ "memchr", "thiserror", @@ -2230,9 +2246,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" +checksum = "b0d24f72393fd16ab6ac5738bc33cdb6a9aa73f8b902e8fe29cf4e67d7dd1026" dependencies = [ "pest", "pest_generator", @@ -2240,22 +2256,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" +checksum = "fdc17e2a6c7d0a492f0158d7a4bd66cc17280308bbaff78d5bef566dca35ab80" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] name = "pest_meta" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" +checksum = "934cd7631c050f4674352a6e835d5f6711ffbfb9345c2fc0107155ac495ae293" dependencies = [ "once_cell", "pest", @@ -2302,22 +2318,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -2449,9 +2465,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -2555,9 +2571,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", @@ -2596,9 +2612,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", @@ -2622,17 +2638,17 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.24" +version = "0.11.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" +checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" dependencies = [ "base64", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2 0.3.24", - "http 0.2.11", + "h2 0.3.25", + "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", "hyper-tls", @@ -2822,9 +2838,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b" +checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab" dependencies = [ "base64", "rustls-pki-types", @@ -2978,7 +2994,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -2988,7 +3004,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50437e6a58912eecc08865e35ea2e8d365fbb2db0debb1c8bb43bf1faf055f25" dependencies = [ "form_urlencoded", - "indexmap 2.2.3", + "indexmap 2.2.5", "itoa", "ryu", "serde", @@ -3007,9 +3023,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", @@ -3023,7 +3039,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3049,15 +3065,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.6.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d167997bd841ec232f5b2b8e0e26606df2e7caa4c31b95ea9ca52b200bd270" +checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" dependencies = [ "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_derive", "serde_json", @@ -3067,14 +3083,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.6.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "865f9743393e638991566a8b7a479043c2c8da94a33e0a31f18214c9cae0a64d" +checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3198,9 +3214,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.51" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab617d94515e94ae53b8406c628598680aa0c9587474ecbe58188f7b345d66c" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -3216,7 +3232,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3298,22 +3314,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3407,7 +3423,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3446,14 +3462,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" +checksum = "af06656561d28735e9c1cd63dfd57132c8155426aa6af24f36a00a351f88c48e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.6", + "toml_edit 0.22.7", ] [[package]] @@ -3471,7 +3487,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "toml_datetime", "winnow 0.5.40", ] @@ -3482,22 +3498,22 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.6" +version = "0.22.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" +checksum = "18769cd1cec395d70860ceb4d932812a0b4d06b1a4bb336745a4d21b9496e992" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.2", + "winnow 0.6.5", ] [[package]] @@ -3647,7 +3663,7 @@ dependencies = [ "bitflags 2.4.2", "bytes", "futures-core", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "pin-project-lite", @@ -3702,7 +3718,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] @@ -3817,9 +3833,9 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -3842,9 +3858,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3852,24 +3868,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -3879,9 +3895,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3889,28 +3905,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -3953,7 +3969,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.3", + "windows-targets 0.52.4", ] [[package]] @@ -3971,7 +3987,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.3", + "windows-targets 0.52.4", ] [[package]] @@ -3991,17 +4007,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d380ba1dc7187569a8a9e91ed34b8ccfc33123bbacb8c0aed2d1ad7f3ef2dc5f" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.3", - "windows_aarch64_msvc 0.52.3", - "windows_i686_gnu 0.52.3", - "windows_i686_msvc 0.52.3", - "windows_x86_64_gnu 0.52.3", - "windows_x86_64_gnullvm 0.52.3", - "windows_x86_64_msvc 0.52.3", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -4012,9 +4028,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e5dcfb9413f53afd9c8f86e56a7b4d86d9a2fa26090ea2dc9e40fba56c6ec6" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -4024,9 +4040,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dab469ebbc45798319e69eebf92308e541ce46760b49b18c6b3fe5e8965b30f" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -4036,9 +4052,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a4e9b6a7cac734a8b4138a4e1044eac3404d8326b6c0f939276560687a033fb" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -4048,9 +4064,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b0ec9c422ca95ff34a78755cfa6ad4a51371da2a5ace67500cf7ca5f232c58" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -4060,9 +4076,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704131571ba93e89d7cd43482277d6632589b18ecf4468f591fbae0a8b101614" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -4072,9 +4088,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42079295511643151e98d61c38c0acc444e52dd42ab456f7ccfd5152e8ecf21c" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -4084,9 +4100,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0770833d60a970638e989b3fa9fd2bb1aaadcf88963d1659fd7d9990196ed2d6" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" @@ -4099,9 +4115,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.2" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a4191c47f15cc3ec71fcb4913cb83d58def65dd3787610213c649283b5ce178" +checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" dependencies = [ "memchr", ] @@ -4151,7 +4167,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.51", + "syn 2.0.52", ] [[package]] diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs index 922ca3200..6d9d4112a 100644 --- a/src/shared/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -30,7 +30,7 @@ use std::num::IntErrorKind; use std::str::FromStr; use std::time::Duration; -use chrono::{DateTime, NaiveDateTime, Utc}; +use chrono::{DateTime, Utc}; /// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; @@ -120,14 +120,11 @@ pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> D /// (this will naturally happen in 292.5 billion years) #[must_use] pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { - DateTime::::from_naive_utc_and_offset( - NaiveDateTime::from_timestamp_opt( - i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), - duration.subsec_nanos(), - ) - .unwrap(), - Utc, + DateTime::from_timestamp( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), ) + .unwrap() } #[cfg(test)] @@ -150,7 +147,7 @@ mod tests { } mod timestamp { - use chrono::{DateTime, NaiveDateTime, Utc}; + use chrono::DateTime; use crate::shared::clock::{ convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, @@ -162,13 +159,13 @@ mod tests { let timestamp = DurationSinceUnixEpoch::ZERO; assert_eq!( convert_from_timestamp_to_datetime_utc(timestamp), - DateTime::::from_naive_utc_and_offset(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc) + DateTime::from_timestamp(0, 0).unwrap() ); } #[test] fn should_be_converted_from_datetime_utc() { - let datetime = DateTime::::from_naive_utc_and_offset(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc); + let datetime = DateTime::from_timestamp(0, 0).unwrap(); assert_eq!( convert_from_datetime_utc_to_timestamp(&datetime), DurationSinceUnixEpoch::ZERO From 439821ca29451a79bb2005ba70a8a5145ee0bcbe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 19 Mar 2024 12:49:52 +0800 Subject: [PATCH 0757/1003] dev: bugfix completed download stat --- src/core/torrent/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index d19a97be1..c4a1b0df9 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -99,8 +99,8 @@ impl Entry { } AnnounceEvent::Completed => { let peer_old = self.peers.insert(peer.peer_id, *peer); - // Don't count if peer was not previously known - if peer_old.is_some() { + // Don't count if peer was not previously known and not already completed. + if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { self.completed += 1; did_torrent_stats_change = true; } From 3a9c52f96ea8c9250eac786663f7a1a3e7ca6474 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 19 Mar 2024 07:26:09 +0000 Subject: [PATCH 0758/1003] fix: error messages starting UDP tracker String interpolation was not being doing well. --- src/servers/udp/server.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index fbea11fac..95c8145c1 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -203,7 +203,7 @@ impl Launcher { #[derive(Default)] struct ActiveRequests { - rb: StaticRb, // the number of requests we handle at the same time. + rb: StaticRb, // the number of requests we handle at the same time. } impl std::fmt::Debug for ActiveRequests { @@ -241,8 +241,14 @@ impl Udp { tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, ) { - let socket = Arc::new(UdpSocket::bind(bind_to).await.expect("Could not bind to {self.socket}.")); - let address = socket.local_addr().expect("Could not get local_addr from {binding}."); + let socket = Arc::new( + UdpSocket::bind(bind_to) + .await + .unwrap_or_else(|_| panic!("Could not bind to {bind_to}.")), + ); + let address = socket + .local_addr() + .unwrap_or_else(|_| panic!("Could not get local_addr from {bind_to}.")); let halt = shutdown_signal_with_message(rx_halt, format!("Halting Http Service Bound to Socket: {address}")); info!(target: "UDP TRACKER", "Starting on: udp://{}", address); From 14ef16821635b0488c83f43e18e66229783da908 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 20 Mar 2024 04:09:29 +0800 Subject: [PATCH 0759/1003] dev: ci: update coverage build flags --- .github/workflows/coverage.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 06529d53d..5731caf9f 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -18,8 +18,8 @@ jobs: runs-on: ubuntu-latest env: CARGO_INCREMENTAL: "0" - RUSTFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" - RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" + RUSTFLAGS: "-Z profile -C codegen-units=1 -C opt-level=0 -C link-dead-code -C overflow-checks=off -Z panic_abort_tests -C panic=abort" + RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C opt-level=0 -C link-dead-code -C overflow-checks=off -Z panic_abort_tests -C panic=abort" steps: - id: checkout_push @@ -55,8 +55,9 @@ jobs: name: Run Build Checks run: cargo check --tests --benches --examples --workspace --all-targets --all-features - # Run Test Locally: - # RUSTFLAGS="-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" RUSTDOCFLAGS="-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" CARGO_INCREMENTAL="0" RUST_BACKTRACE=1 cargo test --tests --benches --examples --workspace --all-targets --all-features + - id: clean + name: Clean Build Directory + run: cargo clean - id: test name: Run Unit Tests From 8395c42b4e4cf4c2a98d4d5e51036d7352de9dce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 20 Mar 2024 11:15:46 +0000 Subject: [PATCH 0760/1003] test: [#746] disable tracker stats for profiling --- share/default/config/tracker.udp.benchmarking.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index 080c67e84..70298e9dc 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -9,8 +9,8 @@ min_announce_interval = 120 mode = "public" on_reverse_proxy = false persistent_torrent_completed_stat = false -remove_peerless_torrents = true -tracker_usage_statistics = true +remove_peerless_torrents = false +tracker_usage_statistics = false [[udp_trackers]] bind_address = "0.0.0.0:6969" From cc1cbc12f5acbb8f91c2814bb9110789757c8033 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 20 Mar 2024 11:21:08 +0000 Subject: [PATCH 0761/1003] test: [#746] add a new binary for profiling --- .gitignore | 3 +- cSpell.json | 5 + src/bin/profiling.rs | 8 ++ src/console/mod.rs | 1 + src/console/profiling.rs | 202 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 218 insertions(+), 1 deletion(-) create mode 100644 src/bin/profiling.rs create mode 100644 src/console/profiling.rs diff --git a/.gitignore b/.gitignore index 2d8d0b8bd..caa527540 100644 --- a/.gitignore +++ b/.gitignore @@ -3,10 +3,11 @@ /.coverage/ /.idea/ /.vscode/launch.json -/tracker.toml /data.db /database.db /database.json.bz2 /storage/ /target /tracker.* +/tracker.toml +callgrind.out \ No newline at end of file diff --git a/cSpell.json b/cSpell.json index 297517980..d15355d56 100644 --- a/cSpell.json +++ b/cSpell.json @@ -21,6 +21,7 @@ "bufs", "Buildx", "byteorder", + "callgrind", "canonicalize", "canonicalized", "certbot", @@ -35,6 +36,7 @@ "Cyberneering", "datagram", "datetime", + "debuginfo", "Deque", "Dijke", "distroless", @@ -60,6 +62,7 @@ "infoschema", "Intermodal", "intervali", + "kcachegrind", "keyout", "lcov", "leecher", @@ -134,7 +137,9 @@ "untuple", "uroot", "Vagaa", + "valgrind", "Vuze", + "Weidendorfer", "Werror", "whitespaces", "XBTT", diff --git a/src/bin/profiling.rs b/src/bin/profiling.rs new file mode 100644 index 000000000..bc1ac6526 --- /dev/null +++ b/src/bin/profiling.rs @@ -0,0 +1,8 @@ +//! This binary is used for profiling with [valgrind](https://valgrind.org/) +//! and [kcachegrind](https://kcachegrind.github.io/). +use torrust_tracker::console::profiling::run; + +#[tokio::main] +async fn main() { + run().await; +} diff --git a/src/console/mod.rs b/src/console/mod.rs index 54ed8e415..dab338e4b 100644 --- a/src/console/mod.rs +++ b/src/console/mod.rs @@ -1,3 +1,4 @@ //! Console apps. pub mod ci; pub mod clients; +pub mod profiling; diff --git a/src/console/profiling.rs b/src/console/profiling.rs new file mode 100644 index 000000000..e0867159f --- /dev/null +++ b/src/console/profiling.rs @@ -0,0 +1,202 @@ +//! This binary is used for profiling with [valgrind](https://valgrind.org/) +//! and [kcachegrind](https://kcachegrind.github.io/). +//! +//! # Requirements +//! +//! [valgrind](https://valgrind.org/) and [kcachegrind](https://kcachegrind.github.io/). +//! +//! On Ubuntu you can install them with: +//! +//! ```text +//! sudo apt install valgrind kcachegrind +//! ``` +//! +//! > NOTICE: valgrind executes the program you wan to profile and waits until +//! it ends. Since the tracker is a service and does not end the profiling +//! binary accepts an arguments with the duration you want to run the tracker, +//! so that it terminates automatically after that period of time. +//! +//! # Run profiling +//! +//! To run the profiling you have to: +//! +//! 1. Build and run the tracker for profiling. +//! 2. Run the aquatic UDP load test tool to start collecting data in the tracker. +//! +//! Build and run the tracker for profiling: +//! +//! ```text +//! RUSTFLAGS='-g' cargo build --release --bin profiling \ +//! && export TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" \ +//! && valgrind \ +//! --tool=callgrind \ +//! --callgrind-out-file=callgrind.out \ +//! --collect-jumps=yes \ +//! --simulate-cache=yes \ +//! ./target/release/profiling 60 +//! ``` +//! +//! The output should be something like: +//! +//! ```text +//! RUSTFLAGS='-g' cargo build --release --bin profiling \ +//! && export TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" \ +//! && valgrind \ +//! --tool=callgrind \ +//! --callgrind-out-file=callgrind.out \ +//! --collect-jumps=yes \ +//! --simulate-cache=yes \ +//! ./target/release/profiling 60 +//! +//! Compiling torrust-tracker v3.0.0-alpha.12-develop (/home/developer/Documents/git/committer/me/github/torrust/torrust-tracker) +//! Finished `release` profile [optimized + debuginfo] target(s) in 1m 15s +//! ==122801== Callgrind, a call-graph generating cache profiler +//! ==122801== Copyright (C) 2002-2017, and GNU GPL'd, by Josef Weidendorfer et al. +//! ==122801== Using Valgrind-3.19.0 and LibVEX; rerun with -h for copyright info +//! ==122801== Command: ./target/release/profiling 60 +//! ==122801== +//! --122801-- warning: L3 cache found, using its data for the LL simulation. +//! ==122801== For interactive control, run 'callgrind_control -h'. +//! Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +//! Torrust successfully shutdown. +//! ==122801== +//! ==122801== Events : Ir Dr Dw I1mr D1mr D1mw ILmr DLmr DLmw +//! ==122801== Collected : 1160654816 278135882 247755311 24453652 12650490 16315690 10932 2481624 4832145 +//! ==122801== +//! ==122801== I refs: 1,160,654,816 +//! ==122801== I1 misses: 24,453,652 +//! ==122801== LLi misses: 10,932 +//! ==122801== I1 miss rate: 2.11% +//! ==122801== LLi miss rate: 0.00% +//! ==122801== +//! ==122801== D refs: 525,891,193 (278,135,882 rd + 247,755,311 wr) +//! ==122801== D1 misses: 28,966,180 ( 12,650,490 rd + 16,315,690 wr) +//! ==122801== LLd misses: 7,313,769 ( 2,481,624 rd + 4,832,145 wr) +//! ==122801== D1 miss rate: 5.5% ( 4.5% + 6.6% ) +//! ==122801== LLd miss rate: 1.4% ( 0.9% + 2.0% ) +//! ==122801== +//! ==122801== LL refs: 53,419,832 ( 37,104,142 rd + 16,315,690 wr) +//! ==122801== LL misses: 7,324,701 ( 2,492,556 rd + 4,832,145 wr) +//! ==122801== LL miss rate: 0.4% ( 0.2% + 2.0% ) +//! ``` +//! +//! > NOTICE: We are using an specific tracker configuration for profiling that +//! removes all features except the UDP tracker and sets the logging level to `error`. +//! +//! Build the aquatic UDP load test command: +//! +//! ```text +//! cd /tmp +//! git clone git@github.com:greatest-ape/aquatic.git +//! cd aquatic +//! cargo build --profile=release-debug -p aquatic_udp_load_test +//! ./target/release-debug/aquatic_udp_load_test -p > "load-test-config.toml" +//! ``` +//! +//! Modify the "load-test-config.toml" file to change the UDP tracker port from +//! `3000` to `6969`. +//! +//! Running the aquatic UDP load test command: +//! +//! ```text +//! ./target/release-debug/aquatic_udp_load_test -c "load-test-config.toml" +//! ``` +//! +//! The output should be something like this: +//! +//! ```text +//! Starting client with config: Config { +//! server_address: 127.0.0.1:6969, +//! log_level: Error, +//! workers: 1, +//! duration: 0, +//! summarize_last: 0, +//! extra_statistics: true, +//! network: NetworkConfig { +//! multiple_client_ipv4s: true, +//! sockets_per_worker: 4, +//! recv_buffer: 8000000, +//! }, +//! requests: RequestConfig { +//! number_of_torrents: 1000000, +//! number_of_peers: 2000000, +//! scrape_max_torrents: 10, +//! announce_peers_wanted: 30, +//! weight_connect: 50, +//! weight_announce: 50, +//! weight_scrape: 1, +//! peer_seeder_probability: 0.75, +//! }, +//! } +//! +//! Requests out: 45097.51/second +//! Responses in: 4212.70/second +//! - Connect responses: 2098.15 +//! - Announce responses: 2074.95 +//! - Scrape responses: 39.59 +//! - Error responses: 0.00 +//! Peers per announce response: 0.00 +//! Announce responses per info hash: +//! - p10: 1 +//! - p25: 1 +//! - p50: 1 +//! - p75: 2 +//! - p90: 3 +//! - p95: 4 +//! - p99: 6 +//! - p99.9: 8 +//! - p100: 10 +//! ``` +//! +//! After running the tracker for some seconds the tracker will automatically stop +//! and `valgrind`will write the file `callgrind.out` with the data. +//! +//! You can now analyze the collected data with: +//! +//! ```text +//! kcachegrind callgrind.out +//! ``` +use std::env; +use std::time::Duration; + +use log::info; +use tokio::time::sleep; + +use crate::{app, bootstrap}; + +pub async fn run() { + // Parse command line arguments + let args: Vec = env::args().collect(); + + // Ensure an argument for duration is provided + if args.len() != 2 { + eprintln!("Usage: {} ", args[0]); + return; + } + + // Parse duration argument + let Ok(duration_secs) = args[1].parse::() else { + eprintln!("Invalid duration provided"); + return; + }; + + let (config, tracker) = bootstrap::app::setup(); + + let jobs = app::start(&config, tracker).await; + + // Run the tracker for a fixed duration + let run_duration = sleep(Duration::from_secs(duration_secs)); + + tokio::select! { + () = run_duration => { + info!("Torrust timed shutdown.."); + }, + _ = tokio::signal::ctrl_c() => { + info!("Torrust shutting down via Ctrl+C.."); + // Await for all jobs to shutdown + futures::future::join_all(jobs).await; + } + } + + println!("Torrust successfully shutdown."); +} From 901566873d813356ae817602e427bb74803b81ec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 20 Mar 2024 11:45:11 +0000 Subject: [PATCH 0762/1003] refactor: [#746] rename functions and extract named closures --- src/servers/udp/server.rs | 67 +++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 31 deletions(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 95c8145c1..98c4bf726 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -255,24 +255,7 @@ impl Udp { let running = tokio::task::spawn(async move { debug!(target: "UDP TRACKER", "Started: Waiting for packets on socket address: udp://{address} ..."); - - let tracker = tracker.clone(); - let socket = socket.clone(); - - let reqs = &mut ActiveRequests::default(); - - // Main Waiting Loop, awaits on async [`receive_request`]. - loop { - if let Some(h) = reqs.rb.push_overwrite( - Self::do_request(Self::receive_request(socket.clone()).await, tracker.clone(), socket.clone()).abort_handle(), - ) { - if !h.is_finished() { - // the task is still running, lets yield and give it a chance to flush. - tokio::task::yield_now().await; - h.abort(); - } - } - } + Self::run_udp_server(tracker, socket).await; }); tx_start @@ -292,6 +275,27 @@ impl Udp { task::yield_now().await; // lets allow the other threads to complete. } + async fn run_udp_server(tracker: Arc, socket: Arc) { + let tracker = tracker.clone(); + let socket = socket.clone(); + + let reqs = &mut ActiveRequests::default(); + + // Main Waiting Loop, awaits on async [`receive_request`]. + loop { + if let Some(h) = reqs.rb.push_overwrite( + Self::spawn_request_processor(Self::receive_request(socket.clone()).await, tracker.clone(), socket.clone()) + .abort_handle(), + ) { + if !h.is_finished() { + // the task is still running, lets yield and give it a chance to flush. + tokio::task::yield_now().await; + h.abort(); + } + } + } + } + async fn receive_request(socket: Arc) -> Result> { // Wait for the socket to be readable socket.readable().await?; @@ -309,26 +313,27 @@ impl Udp { } } - fn do_request( + fn spawn_request_processor( result: Result>, tracker: Arc, socket: Arc, ) -> JoinHandle<()> { - // timeout not needed, as udp is non-blocking. - tokio::task::spawn(async move { - match result { - Ok(udp_request) => { - trace!("Received Request from: {}", udp_request.from); - Self::make_response(tracker.clone(), socket.clone(), udp_request).await; - } - Err(error) => { - debug!("error: {error}"); - } + tokio::task::spawn(Self::process_request(result, tracker, socket)) + } + + async fn process_request(result: Result>, tracker: Arc, socket: Arc) { + match result { + Ok(udp_request) => { + trace!("Received Request from: {}", udp_request.from); + Self::process_valid_request(tracker.clone(), socket.clone(), udp_request).await; } - }) + Err(error) => { + debug!("error: {error}"); + } + } } - async fn make_response(tracker: Arc, socket: Arc, udp_request: UdpRequest) { + async fn process_valid_request(tracker: Arc, socket: Arc, udp_request: UdpRequest) { trace!("Making Response to {udp_request:?}"); let from = udp_request.from; let response = handlers::handle_packet(udp_request, &tracker.clone(), socket.clone()).await; From 1c59d89e1d34f0f51d3ee8685c2fa4e513a6a9bd Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 21 Mar 2024 03:08:03 +0800 Subject: [PATCH 0763/1003] chore: various maintenance 1. Clean-up Dependency Versions (major version only). 2. Remove Unused Dependences. 3. Add Unused Dependency Checker to Testing Workflow. 4. Coverage dose not include examples and benchmarks. --- .github/workflows/coverage.yaml | 2 +- .github/workflows/testing.yaml | 14 ++++++++ Cargo.lock | 20 ------------ Cargo.toml | 32 ++++++++----------- packages/configuration/Cargo.toml | 1 - packages/test-helpers/Cargo.toml | 1 - .../torrent-repository-benchmarks/Cargo.toml | 9 +++--- .../torrent-repository-benchmarks/README.md | 1 + 8 files changed, 35 insertions(+), 45 deletions(-) create mode 100644 packages/torrent-repository-benchmarks/README.md diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 5731caf9f..66def04bf 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -61,7 +61,7 @@ jobs: - id: test name: Run Unit Tests - run: cargo test --tests --benches --examples --workspace --all-targets --all-features + run: cargo test --tests --workspace --all-targets --all-features - id: coverage name: Generate Coverage Report diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 5deabd74a..8a54e8982 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -57,6 +57,12 @@ jobs: name: Enable Workflow Cache uses: Swatinem/rust-cache@v2 + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-machete + - id: check name: Run Build Checks run: cargo check --tests --benches --examples --workspace --all-targets --all-features @@ -71,6 +77,14 @@ jobs: RUSTDOCFLAGS: "-D warnings" run: cargo doc --no-deps --bins --examples --workspace --all-features + - id: clean + name: Clean Build Directory + run: cargo clean + + - id: deps + name: Check Unused Dependencies + run: cargo machete + unit: name: Units diff --git a/Cargo.lock b/Cargo.lock index 4cc81979d..2e0912b9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -762,7 +762,6 @@ dependencies = [ "ciborium", "clap", "criterion-plot", - "futures", "is-terminal", "itertools 0.10.5", "num-traits", @@ -775,7 +774,6 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", - "tokio", "walkdir", ] @@ -2156,15 +2154,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-src" -version = "300.2.3+3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" -dependencies = [ - "cc", -] - [[package]] name = "openssl-sys" version = "0.9.101" @@ -2173,7 +2162,6 @@ checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" dependencies = [ "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] @@ -3543,7 +3531,6 @@ dependencies = [ "clap", "colored", "config", - "criterion", "derive_more", "fern", "futures", @@ -3554,8 +3541,6 @@ dependencies = [ "log", "mockall", "multimap", - "once_cell", - "openssl", "percent-encoding", "r2d2", "r2d2_mysql", @@ -3568,11 +3553,8 @@ dependencies = [ "serde_bytes", "serde_json", "serde_repr", - "serde_urlencoded", - "serde_with", "tdyne-peer-id", "tdyne-peer-id-registry", - "tempfile", "thiserror", "tokio", "torrust-tracker-configuration", @@ -3593,7 +3575,6 @@ version = "3.0.0-alpha.12-develop" dependencies = [ "config", "derive_more", - "log", "serde", "serde_with", "thiserror", @@ -3631,7 +3612,6 @@ dependencies = [ name = "torrust-tracker-test-helpers" version = "3.0.0-alpha.12-develop" dependencies = [ - "lazy_static", "rand", "torrust-tracker-configuration", "torrust-tracker-primitives", diff --git a/Cargo.toml b/Cargo.toml index 36c865447..24bf78b6e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,35 +30,37 @@ rust-version = "1.72" version = "3.0.0-alpha.12-develop" [dependencies] +anyhow = "1" aquatic_udp_protocol = "0" async-trait = "0" axum = { version = "0", features = ["macros"] } axum-client-ip = "0" -axum-extra = { version = "0.9.2", features = ["query"] } +axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } binascii = "0" chrono = { version = "0", default-features = false, features = ["clock"] } +clap = { version = "4", features = ["derive", "env"] } +colored = "2" config = "0" derive_more = "0" fern = "0" futures = "0" +hex-literal = "0" hyper = "1" lazy_static = "1" log = { version = "0", features = ["release_max_level_info"] } multimap = "0" -openssl = { version = "0", features = ["vendored"] } percent-encoding = "2" r2d2 = "0" r2d2_mysql = "24" r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" -reqwest = "0" +reqwest = { version = "0", features = ["json"] } +ringbuf = "0" serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" serde_json = "1" -ringbuf = "0" -serde_with = "3" serde_repr = "0" tdyne-peer-id = "1" tdyne-peer-id-registry = "0" @@ -68,24 +70,18 @@ torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "pa torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } -tower-http = { version = "0", features = ["compression-full", "cors", "trace", "propagate-header", "request-id"] } +tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +trace = "0" +tracing = "0" +url = "2" uuid = { version = "1", features = ["v4"] } -colored = "2.1.0" -url = "2.5.0" -tempfile = "3.9.0" -clap = { version = "4.4.18", features = ["derive", "env"]} -anyhow = "1.0.79" -hex-literal = "0.4.1" -trace = "0.1.7" -tracing = "0.1.40" + +[package.metadata.cargo-machete] +ignored = ["serde_bytes"] [dev-dependencies] -criterion = { version = "0.5.1", features = ["async_tokio"] } local-ip-address = "0" mockall = "0" -once_cell = "1.18.0" -reqwest = { version = "0", features = ["json"] } -serde_urlencoded = "0" torrust-tracker-test-helpers = { version = "3.0.0-alpha.12-develop", path = "packages/test-helpers" } [workspace] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index ecc8c976e..102177816 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -17,7 +17,6 @@ version.workspace = true [dependencies] config = "0" derive_more = "0" -log = { version = "0", features = ["release_max_level_info"] } serde = { version = "1", features = ["derive"] } serde_with = "3" thiserror = "1" diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 9ae891a01..2f10c6a0f 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -15,7 +15,6 @@ rust-version.workspace = true version.workspace = true [dependencies] -lazy_static = "1" rand = "0" torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } diff --git a/packages/torrent-repository-benchmarks/Cargo.toml b/packages/torrent-repository-benchmarks/Cargo.toml index da9aba621..e8b22f52f 100644 --- a/packages/torrent-repository-benchmarks/Cargo.toml +++ b/packages/torrent-repository-benchmarks/Cargo.toml @@ -1,12 +1,13 @@ [package] +description = "A set of benchmarks for the torrent repository" +keywords = ["benchmarking", "library", "repository", "torrent"] name = "torrust-torrent-repository-benchmarks" +readme = "README.md" + authors.workspace = true -categories.workspace = true -description.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords.workspace = true license.workspace = true publish.workspace = true repository.workspace = true @@ -18,4 +19,4 @@ aquatic_udp_protocol = "0.8.0" clap = { version = "4.4.8", features = ["derive"] } futures = "0.3.29" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker = { path = "../../" } \ No newline at end of file +torrust-tracker = { path = "../../" } diff --git a/packages/torrent-repository-benchmarks/README.md b/packages/torrent-repository-benchmarks/README.md new file mode 100644 index 000000000..14183ea69 --- /dev/null +++ b/packages/torrent-repository-benchmarks/README.md @@ -0,0 +1 @@ +# Benchmarks of the torrent repository From d2a346a216b37d5d40408bf4d81fd84f37438f90 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 21 Mar 2024 03:12:00 +0800 Subject: [PATCH 0764/1003] chore: update deps Updating crates.io index Updating aho-corasick v1.1.2 -> v1.1.3 Updating async-trait v0.1.77 -> v0.1.78 Updating bitflags v2.4.2 -> v2.5.0 Updating brotli v3.4.0 -> v3.5.0 Removing h2 v0.3.25 Removing http v0.2.12 Removing http-body v0.4.6 Removing hyper v0.14.28 Updating hyper-tls v0.5.0 -> v0.6.0 Updating reqwest v0.11.26 -> v0.12.0 Updating rustix v0.38.31 -> v0.38.32 Updating syn v2.0.52 -> v2.0.53 Updating toml v0.8.11 -> v0.8.12 Updating toml_edit v0.22.7 -> v0.22.9 Updating uuid v1.7.0 -> v1.8.0 --- Cargo.lock | 245 ++++++++------------- tests/servers/health_check_api/contract.rs | 12 +- 2 files changed, 105 insertions(+), 152 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e0912b9e..5722032b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,9 +42,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -185,13 +185,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "461abc97219de0eaaf81fe3ef974a540158f3d079c2ab200f891f1a2ef201e85" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -211,10 +211,10 @@ dependencies = [ "axum-macros", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.0", + "http", + "http-body", "http-body-util", - "hyper 1.2.0", + "hyper", "hyper-util", "itoa", "matchit", @@ -255,8 +255,8 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.0", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", @@ -277,8 +277,8 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.0", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", @@ -298,7 +298,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -310,10 +310,10 @@ dependencies = [ "arc-swap", "bytes", "futures-util", - "http 1.1.0", - "http-body 1.0.0", + "http", + "http-body", "http-body-util", - "hyper 1.2.0", + "hyper", "hyper-util", "pin-project-lite", "rustls", @@ -368,7 +368,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -379,7 +379,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -390,9 +390,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" dependencies = [ "serde", ] @@ -438,15 +438,15 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", "syn_derive", ] [[package]] name = "brotli" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f" +checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -628,7 +628,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -880,7 +880,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -891,7 +891,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -925,7 +925,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1103,7 +1103,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1115,7 +1115,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1127,7 +1127,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1192,7 +1192,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1258,25 +1258,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "h2" -version = "0.3.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap 2.2.5", - "slab", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "h2" version = "0.4.3" @@ -1288,7 +1269,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 1.1.0", + "http", "indexmap 2.2.5", "slab", "tokio", @@ -1373,17 +1354,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - [[package]] name = "http" version = "1.1.0" @@ -1395,17 +1365,6 @@ dependencies = [ "itoa", ] -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.0" @@ -1413,7 +1372,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.1.0", + "http", ] [[package]] @@ -1424,8 +1383,8 @@ checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", "futures-core", - "http 1.1.0", - "http-body 1.0.0", + "http", + "http-body", "pin-project-lite", ] @@ -1441,30 +1400,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "hyper" -version = "0.14.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.25", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.2.0" @@ -1474,28 +1409,32 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.3", - "http 1.1.0", - "http-body 1.0.0", + "h2", + "http", + "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", "smallvec", "tokio", + "want", ] [[package]] name = "hyper-tls" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", - "hyper 0.14.28", + "http-body-util", + "hyper", + "hyper-util", "native-tls", "tokio", "tokio-native-tls", + "tower-service", ] [[package]] @@ -1505,13 +1444,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", + "futures-channel", "futures-util", - "http 1.1.0", - "http-body 1.0.0", - "hyper 1.2.0", + "http", + "http-body", + "hyper", "pin-project-lite", "socket2", "tokio", + "tower", + "tower-service", + "tracing", ] [[package]] @@ -1894,7 +1837,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -1945,7 +1888,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", "termcolor", "thiserror", ] @@ -1959,7 +1902,7 @@ dependencies = [ "base64", "bigdecimal", "bindgen", - "bitflags 2.4.2", + "bitflags 2.5.0", "bitvec", "byteorder", "bytes", @@ -2128,7 +2071,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "foreign-types", "libc", @@ -2145,7 +2088,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -2252,7 +2195,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -2321,7 +2264,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -2626,20 +2569,22 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.26" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" +checksum = "58b48d98d932f4ee75e541614d32a7f44c889b72bd9c2e04d95edd135989df88" dependencies = [ "base64", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2 0.3.25", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", "hyper-tls", + "hyper-util", "ipnet", "js-sys", "log", @@ -2724,7 +2669,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64", - "bitflags 2.4.2", + "bitflags 2.5.0", "serde", "serde_derive", ] @@ -2735,7 +2680,7 @@ version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2792,11 +2737,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -2982,7 +2927,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -3027,7 +2972,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -3078,7 +3023,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -3202,9 +3147,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.52" +version = "2.0.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032" dependencies = [ "proc-macro2", "quote", @@ -3220,7 +3165,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -3317,7 +3262,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -3411,7 +3356,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -3450,14 +3395,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af06656561d28735e9c1cd63dfd57132c8155426aa6af24f36a00a351f88c48e" +checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.7", + "toml_edit 0.22.9", ] [[package]] @@ -3493,9 +3438,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.7" +version = "0.22.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18769cd1cec395d70860ceb4d932812a0b4d06b1a4bb336745a4d21b9496e992" +checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" dependencies = [ "indexmap 2.2.5", "serde", @@ -3535,7 +3480,7 @@ dependencies = [ "fern", "futures", "hex-literal", - "hyper 1.2.0", + "hyper", "lazy_static", "local-ip-address", "log", @@ -3640,11 +3585,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "async-compression", - "bitflags 2.4.2", + "bitflags 2.5.0", "bytes", "futures-core", - "http 1.1.0", - "http-body 1.0.0", + "http", + "http-body", "http-body-util", "pin-project-lite", "tokio", @@ -3698,7 +3643,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] @@ -3791,9 +3736,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "getrandom", "rand", @@ -3857,7 +3802,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", "wasm-bindgen-shared", ] @@ -3891,7 +3836,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4147,7 +4092,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.53", ] [[package]] diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index 7b00866d3..c893470c2 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -113,7 +113,11 @@ mod api { let details = report.details.first().expect("it should have some details"); assert_eq!(details.binding, binding); - assert!(details.result.as_ref().is_err_and(|e| e.contains("Connection refused"))); + assert!( + details.result.as_ref().is_err_and(|e| e.contains("client error (Connect)")), + "Expected to contain, \"client error (Connect)\", but have message \"{:?}\".", + details.result + ); assert_eq!( details.info, format!("checking api health check at: http://{binding}/api/health_check") @@ -210,7 +214,11 @@ mod http { let details = report.details.first().expect("it should have some details"); assert_eq!(details.binding, binding); - assert!(details.result.as_ref().is_err_and(|e| e.contains("Connection refused"))); + assert!( + details.result.as_ref().is_err_and(|e| e.contains("client error (Connect)")), + "Expected to contain, \"client error (Connect)\", but have message \"{:?}\".", + details.result + ); assert_eq!( details.info, format!("checking http tracker health check at: http://{binding}/health_check") From bfdeafc2b6ef4ff91fc234a35c58245ca927d053 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 21 Mar 2024 15:59:23 +0000 Subject: [PATCH 0765/1003] test: [#746] profiling: add configuration to generate flamegraphs You can generate a flamegprah with: ``` TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" cargo flamegraph --bin=profiling -- 60 ``` --- .gitignore | 4 +++- Cargo.toml | 4 ++++ cSpell.json | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index caa527540..1bffb9842 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,6 @@ /target /tracker.* /tracker.toml -callgrind.out \ No newline at end of file +callgrind.out +flamegraph.svg +perf.data* \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 24bf78b6e..e6f196583 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,3 +103,7 @@ opt-level = 1 debug = 1 lto = "fat" opt-level = 3 + +[target.x86_64-unknown-linux-gnu] +linker = "/usr/bin/clang" +rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] \ No newline at end of file diff --git a/cSpell.json b/cSpell.json index d15355d56..0e98c99ef 100644 --- a/cSpell.json +++ b/cSpell.json @@ -105,6 +105,7 @@ "rerequests", "ringbuf", "rngs", + "rosegment", "routable", "rusqlite", "RUSTDOCFLAGS", From 6a7275e8b837d2b1afa7c2e64dde2ce7c94e4579 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 21 Mar 2024 16:26:46 +0000 Subject: [PATCH 0766/1003] docs: [#746] for profiling --- cSpell.json | 5 +++ docs/media/kcachegrind-screenshot.png | Bin 0 -> 597087 bytes docs/profiling.md | 59 ++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 docs/media/kcachegrind-screenshot.png create mode 100644 docs/profiling.md diff --git a/cSpell.json b/cSpell.json index 0e98c99ef..16dff714e 100644 --- a/cSpell.json +++ b/cSpell.json @@ -18,6 +18,7 @@ "binstall", "Bitflu", "bools", + "Bragilevsky", "bufs", "Buildx", "byteorder", @@ -45,10 +46,12 @@ "dtolnay", "elif", "filesd", + "flamegraph", "Freebox", "gecos", "Grcov", "hasher", + "heaptrack", "hexlify", "hlocalhost", "Hydranode", @@ -139,11 +142,13 @@ "uroot", "Vagaa", "valgrind", + "Vitaly", "Vuze", "Weidendorfer", "Werror", "whitespaces", "XBTT", + "Xdebug", "Xeon", "Xtorrent", "Xunlei", diff --git a/docs/media/kcachegrind-screenshot.png b/docs/media/kcachegrind-screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..a10eb5ad67947131a10db4a0714b0f03e072d585 GIT binary patch literal 597087 zcmce8^;?u(+b$M}lysMLcXvp4cT0CSN(~?(-Cfe%G1Ag0DLM4e-R#@P_x<*MzCU2k z4;&uDnl+90yuFj^Ru=HD z+^_1A>paAj-Um;-l9nP3Nmw@HS}J;YdaI2lB_;ayh2KimTxY_;(Fhycs=WF3!p<#i zR8$nK=v&g@_y5n2gAdcfa3ueF>n-V(T~a6G4`$(M4ww;2^oYC*9_oL8>TS*Ou3PsY zGooup148zKt>`Z`y-^}&M3cM^H98F6OGERhQ$|&UDgM3hsqP#!I9}-ekHgcK?~r|P zx{SUVDSdpACcrRXuF>u<6p{BH*M5KqtMNV`>;F08pA-8;t!Q%Z@rtO6O|x5D+t4u4 z^+24^Gh@Ymn2|9oDwv4lV132apSveB|9j9Y83qOhc23T?tSn?yWF&nXP zaB$>W;W;@uy{dg08j``o!(&dBGcz-LkB&aQWuwoCtx=YEa&jVovtD`b-Es5iZ(?fd z^SIJsY+J9Xt1Ij4%a0|S-Bx)VcXsBChK}yUO#p0^^}W2jch5Bs%B!udEiNgE;tCkx z&p8g%y?r@Q5leGE7m1O)H`<#rOc4qYq|>YfRy#d_4L;TIhPf7hg-0bq9(+qB1Ij6F zto&)4bes9lz4bciki7o^y$pRiy!5Wosd$Bh>d8-p*t44)tST#u5RrGlE}aAAwX(7j zkSSMFQ~MSlFQ%%BzP`TxD(T_#Y_mS&oWixPogD)zDr#H(!=Gn>fBkZ1c9zw0BFA4B zG`&|)S{fltBJAqQE(StAJZWD#q!bkFT%85;6%-f8XJsi!OT&j6Bf4_`eVmKbO6YFu z%2q>iX=#carCd>#eD+K}U31PNuVQ>96u2!crl;`Z&d2D|eY|eMb-+k@-4MD>EyKlXQf6A!&)YKF#Zvx@WuV2PZOCGZ75d)Ae zGgeeAtp5Hed|Y1F-OrR6?DF~T?d`+C@y0mp92|#<1BFd^`1l?=UbC8|r78vn3JMCy z?rntX>hQ2Z<+@GLzklm>H`??ZUEkBt(wg*!p+0iL0(&$zViiwIGvgHG;OMprH`ubi6i8#BscyM1M!v@*TSHDOf?VrdM^GR^s zpIm4wvI2}!u0a<N0-o&Mi{%@`p`ZJ{>E_;nHQlA}XQ! zjVER3!>v*{6s^;!-HdEz$U618WJBrz5&4a zi1zibvoC8m+G@ub^4b4xCIV~BxdnES2xiCR<>etu7dM*4kMD>nClG1gT<;EXjF^*g za;Ak!P}$6tQ;-T0RgKH4t8PvTOM3hA8C^R zg{(n0Gh%6Jsimc*u(~<{GMtx}muJVyMAHv?ECk@hN{YfJwAtx=8E&++jA$SLrTu!x9m=p|s*XyANvZr$D8QnIp8rOXZYwvzI4 z#N|z{^ie*$``P(60VV-td}ad=&+CnqNXu+Y-c0X|DgPag@q7_SvQY7&x@r|0LxDxu;Z zKgK2|hV+?;%(?wpW^4PJgqD_ern0i_lPEOAjO0BDD=S(LGc_Y)KlC!Mlop_?-d^!I z!1Jq)Z6ni0w=<*3^yf}J`E&hniHQ}>%_(&oZTD)U4DIafzI&JF=6*>_!+ZG!Ce-*n z5s`w1h6ZTpdt;KHKbSavFt}%Zw^pXIygc&a;zB_P`||PJb6_FJ?TXO`aPU_Ip}r(caA5-1+kLsxfP-C^pv@ zVHn^`Ms43wq%RWT>%$@mX1Xqud@t1+!7{_6`p4sHnC9EP*zB9qGcpKlIq`u{e6Fr$ zK076S$}Yc!yBu5{ZyEp}M+yqUjLeo$#2L0cJ!!M(liv zsBT-%d@H<(Gyh?9a-qCH41D{Vnuf;sWUWhFLLxL;UGu)NO$)HFw4$PFB=R-FX9s{PWdjtRekYib?QGi+eN?fkjsATwI`jz^C);>oN4_#FiEpa1IB# z0m3=>Rt=3xO2W#^&lh7+)6}f9nx+gzAx5^vANUiwBqRVtL1!z(rt1l>8di@1FRb=D zH*Ys^ptkzW8IS;k2LSe<;l85x<=TVEIg_B~;hQ9%OA3XX+>1`Xbl(g?2a z>z8nj96l4S6?6FXe_@tY1my(~Z2a>Z>HML)9HlBX?$p&5-RF}NM*xk08|}HL0&oF> z7ywbMuZ2C%i**aO`hsv3(%%}d2m1Rr9Jw9amW6HHB;@1};o;%6=&=ZpCBLiWvjI|| zps)}X7dL#t7FpdLfT~BEK%1)K&UBBkL*2HnPBT4Ril zdlOFzbGl%qISm@K1|4Z>>8>qjH+)GK7dB&KW5Ohh8ZCe|>J}mg&GI@svj83euA7gb zYc|GQ@HOeqQDm!u$AmXwH zFD~l1>aUU}HL|o)^(!y=bEL`@6cquHGO)B%*oH$*M&_NevN9jR&S66pC%%9;dk`E- zQF}W<)f_^-MuXk6KVT}I840zLnzmQ-yO55@Z)C~P1r(1f?mEWGr{A=R( z@6^aC++UKg9Bl9f#EM&sk`@;kUJC|%oiTbRPlnPrf#j;*H}9=dSxW1l9kUze_>J@R zKk?gyY3KT$C4DqggW=Y#WaZ_N6pGnqjPvA0MPDiu$LHmJ_``(v1ysX`ut7JEPnjA= z+yv1|eH!=g>JOXT6r;&LDg#WC`>qIZ0#-IQpNkbEW+^Q#t?%B9tgLbK^I91S7P}S# zhl))9VYv`v9KuRWRB4mT76}o{+Yx z!Cxf9W~PwpaGbOV6U$)TQLPd4JhGm`orZsGFmK`N7T5tWS6p12==rAY@8+6;VQ38g zLPzQkHwPVe{HnRHmC#@i==j1dF)~-965a^#y2PH+?fiT4l8ngega~lhkv)9SToo;aIDO6+R4_t}nCI6m4(U=x%+Ha3$=RJZ!2cCLx-+x2?G?3v$baw; zuv92sUQ`*k9hf6P7myeXr2CN&7s!E9M#6nW?s0nqNMQluH1qB7` z!AqLAu9Woji1O2|k>Eqo(lYljeTpT3SOITJA8nkzTspkUB>H+~n%}0iNISP!+R#Ys z0&p!h_Q1({58&rOba8Ey+x1;_7~eV1h2AObP($qrE}H zb^ix!)853HFXLc#3Oc|UqvM);kC836JWk{@SuIu^u)yHA}c~PA|S2I^=tDJ zwe|s^3iuEZqC}-%6;I=OcOK#-19GMjxYCd-`6YgBV5wyqmW(BL=9vA`-itph82ynM zM|`>-IXv5SCoa1VMC=4}Tb8oW(Htb7FNDv47fGCD) zrk{nAQ-k(DLPi0g^>WFWj_}e_!Z>+57tV&!y?Q$7VxmkAXzLfD_ayJ%GV?RVpeypm z&KU;00^vm)@9N%LKy6FPidjZNd+gWaSZWXz8aB?oC+r&Sncjx{b@g;0g;G^l?+Zo3 zS5oI-x7zFlDJfD;PJa?qaw@8LfX2MM zcmQ7j;?;q>&u&MX*iwkdr%#`DLt6pW%)o#|l5k|q-l8xy`ffU?Nl&%3$t1V}`y#hC)@YX~4a?dbvl@c>+gnn)4!4*C2U2rDW& zIzvN=bTtdMe~trWf7;r(K(6{)2OT3kJe*H|Zmlb*@Oo0R_BZ zi%7)JUH_PxnyRLup#tO{o}S!2p-4S@u2_is7CUagg;Ku87Pst!G{O|?6`Xsp6e#dwoE{PEK#3K$@72X6J) zRksaaQXm%KP~eV`LHWZid986NDPcuc2?+@~R(L?ZoBOW!kS3zL+PH_Bj?T0%0#mC( z1Cah=@P&XI0=k}>fdN1@8b1CKdW`6K)_}MGJ00GAu|~+JCmUVnn+xM z(1!~03|6InSCYwA}c%q8V60Q{;zPfS8+o{jEzb2 zC1Z|GL!2CdD|k;#3>N#8^_Q3K@$~0d5c7vMPD@KoSW~@y4VLrYQGHYni;jLtGGd-z zGdTJ^Hvi`kUbpgeG5TBBu!u$U>d%!_R8V~QkY^Yt|F;70kmTwy_Iqh|s>5Eq!kTs^3G2rX<0b&Q(S0U;FhDMy z_jz^a6=L28GZXc{C&_8p&jVbkY7W1ZCo3+lR*bJ@HE%SXOI2TAUP3~GdL05}aq10* zO<+KSg@%Ssv`YZ!A*Y}?JUh!!plWGpX=CaBGC8Yo+%)wi4>%#fS`$r7`FVLb{GKQF zhKc~*6p8_{?!zIJ_#cMH7O*av%QD8f@%2~wL4*}Lx07Ggi2PAmiYOth#~#QPa;Yog zm!32jP5vV4@5g{A6yNdAbX?3T+5<`?IUHSPoU%(V|31cl7ly6I8#K)gG6 zBphvZM`+BHp}&!QrkT-S$NA5LJ-8o;b`Oq&0_3R#8Kaw#V=&A+)j+wGYb!#VPMeQ( z3ocDFNTr4ugKJI!7P;W~;!6`E0oouk;uR^!>GP41^q)uwygus&KQ#L_&nvfQ#0KKx zvqp02jUxpv%qo4&S;hpu)lWA){4`A_94|mkEkKkZwVAtATJR!;Ri;0IT>-;4sr|PMbB!ASr3x}&Svw{xp&|D zfGirKh)-7boiffYl<1wJM!0XqRp4~5rOY~}YBNBJ>4UGv$Xd3Kna+++wR`j?aOwhL+4li3V~kBkt?`vy3q{~J1Y z@&nO=4PzteT#O#=sK%X;saoso@yBqiz?b; zdL;F_Emk^hMZR@QZaXR-(zIVpn(DEd3f02)q4H6GWdUUkQLqi5&0eW=WMyZk_0*s{ zODT?;To_xp%u%Jkz4&wqV~r|{FiP+=hme{?sOafQZ`A_l_nz}(eQ9F3%fw(#Ra0o$Sr{o>=TmV z`BdPj7N#)6=7RPBa{~$JVv{^h#f5vV7kqBubk8vTJ@c8mE+$o2;f^Fu1vZ}RRBFtp zs3Na2x!zhiZqU?k(rb$&0Lb~lhZLyr( z#OA%Oo^kC++9k-qlSZZ>gmqr*aPsN?%E6k{p+kSrxR`i2N1@cx9WRC^C0h(tp&~uwU+3BV@z4+X7{LR9!(Px)G|KUoK$=NDx@-8_?MblX zT|AM3=Yj4l2E+CxkCm_6g51+nRGEX_GJQ)VaX!oQrN{h))!E2+Dd;)od3%vc9sFwKDJw3fVV%Ug_ zG{Gr2{5zfN1XLVQR9_l*d7ce9E0OlG&w-|UTFvh9mpq2-1Tf~7pXgUgdB%KY^pBky zt2J0OPa1RB2=fwB1u9SY*d4tTxT`L|vKiM8OFLS}bN+SzsI_0O*Eu*x_+pj4yf*Fp zBKeOXD^{)JnbhWfK46^X64AWhZxla! z7KFz1#c*ZgaXbHZIFj344&sU(6A}xO>uTNQOM-jhI<);9#>$!;B}AXcQMF~f?m>s+ zwToeLKZP~c7JI2~6-l(9opE^6xf4#s>2#rways{lEi9U#cNg6rGWqp`7bLf7{&zA# zNZd$COnd*w*zNj>Lle`HAke_*MgzsO{cP72RSjs5W`SQC)lXuM#t&HJbPSTs-kNNXbh^)E!^L#y z!OI-Zwoe)L2e%Jn>r;Hg;ByMvaMGgZhK*Ve*3DjD%wTFhZV})?N&DC3*FfD_*3Ha! z7sgKxA8toh?{2R8v^U*-3=Ka$mNsyMn?qko+RAD~Wj?YPIIvX;ChovAI%W7TzW4XM z^rLnAjI|BEBPv3F?(SM|!-{$QflIKTInD)DW469MYNISN40r(vc#H^P`0T*4`jpWZ z8z2SM#T=cr)8rlGETJRXx=6jQA%CmmMM{_6?>NyL=3Z6JY&%4brtv(KCTLDA2M^~s z2{Ej>D7(K=J#aqaJB4{-zP97M6DiZ`S?vFi#c!o%X}A@|p9Y!@8rV8YZgeY}=`u@! zey^AlVKC7qmE$+#*T;<4mUvD~H`j1E&y{3`Mr&$owcP7WYg4Yu8~c$(MI91d4mUsk zXiObJ)U(l0)p(?=>$k^OBtyP~6QVKD8zVX*RmD>+M#{JKB9-@hl>48s`%mog`CNFP z>^eq(syLM+(pC+_W@Q2H-r%D$K78uyFmOMzvS2xlP4=>Z@aSco&@ct04dD_;4K$XW zYEijdkm@#H*G1{Y-_aQ+%5OY~MH}s&z}=o%5C>HI4S9r1I(2;$)R+uLG1fXW*QjtZ zj+Eb3+A?dV)9RRNI^PU3HljXg=PkwVjvH|4#)RCS^PIOleSe0OCZlYQT77n-ebzgn zw4iWnC|bsO{n@0?dUBE-22;JxsNsmb!9vEkUq8EUv%SjmaDl4j;v3xjb7hm~!zR7E zVy5afJz3RSfT6+Q$(Y7`&+E7alcQ6ce~E^$FSF08ORm%lHQ)GXL5swl7JjRgw)=53 zhvH*=rE$rITH=%GyWd5z7_aK@ONRQ11M-Y}wImLl z-_^Lt9EelKXw0kW!Q)PsOQfQtN*p|ew6Bo*{6earYZ zF3(kjdRrS_ z?MIj1H^rup$8H(VW;_FWj{HHb?N+OGo*#-bM%BUH^~&9f&+;Io3vUhsavYIPOH++zg=G)6tckas;_qAy^NCW{bM4+KvFH^ku77!?B1q4sX)X4 zO9(gvLn4(oGajYZ!Ed!@gg{mPGKG!DYxig&N@fJZ{%#HZC_|-(&gHqcH$#rqrJn^- z85POv&YEftgHN{8-d$FA(C)V0wYhaLJ_0wyeX!ot0!69u+c&rH^y8j;PR2J^pk!i; z>heO`K8vNOXipH%QWXCXuPSe1+s84BG7D&C^7jBxZiprFU|IqGr{xB1TT=Bjm6w5bNggoEn(h zLg3o(_b_AoQ5ge6nBg%nU_4Di!5Yn~1;p$zNo^mn8`!CtbD6*cIydx<2Y-6*9Sy*f z)#!iUKh~Tv5Kz95hJmnia?V8h7+~SF2%)GDQXTQs3C{0?z}P~ zfLZ^qh%(6qB=z|t7-Uz*B3RF+taYO?=MV<;tBweg=VCqfeS2rYhlsLsimt!S&Hvcc zWOW95e-0#*e14rZHBQMvdtZ+>0>6B%pz=l&x;W-Ms2FGz=x|4p#Wx)s#S^dZlkHO+ zKRfooi1syi1yiJ7g_@H68y=%qErIV(eazPweS`3yuik}VDg+iu#hoK>1TPtE-&Ir~ z|BKAt*6c{j3jE8@Mc+c29R;P>}9F#~lKKx^RgOXSD@y=T4|^516N|J`zuD$Wt@f9>IiW?f=|(=XirXW4}| zP@!NOHDCV{5{HqL#zpR?3^U@RRFo5~F!HD26%ML6|2RwhehNwW|ru**@#lc$SUS*3;c znZQ$^Y_d@4t?2GQF0(<->NJt-g)ov!v0L_uQ76QfPZ& zV|ykHY8VGarDuO_JiL(IU+b_bX6#(${XE^ zt?I?rwMU&It(Q(il}xPH+rA?coKW9>ak!=D%Ggsw%6d_Bfw@^oP0rr{JaaQ=9` zI?U7b$7@=D<-aSwgwCDcw0n_LQlMgEgZgSAOS6^w!!F~3!!0|RlAfM7_9z$EGUr~@ ze!j>oxRV7LCl-wl^`D0{?M;6kNW$^f`zZiG0ETh0vlW3N-&brV^Qi)9t%}(%$nvgT zNh+7uIm7k0D8+?^Kn*bj=nJ<~0yXET zzP{g{kf=<`0vKQ}AqW=9q(2fDoUU>JO!WZ8`{+67;bS^dP(-r*2VRP}`G*=y`Cq-q zTwmmm{p;^vJrX+dW^V56K+bQa6ci+;+5I0cH7OM`xF>SM6?UJnhO)rXKgr=Hdn7g( z3}3WMUcu3!CC7)AQV^6Ji>~;k9+8v z-@YY_m|3HBo?+s&zUZF~j;}=j&S*T*j5>pfRdd>jFxdHkI)cjbim>$$2znV~3*z6K zI22M334{JjjfMG%$80;E-9MzzC)qOv0yXCO+D{0A*$$u`jE{fEcO%Lz{B^nZcT+L`0weTwv?QPD|Ax)^i~D&Cx%&O{n~B-;tHWbRiW{cnu+wTq zeUyW9kEy{}%MQIn3LI}*^i}k^d(FL<*2T|XzKI@G0og=NbA|};HGDsh+5V8HYq8{Y zjhcLq_0iMN=*~GXAo5W{0(K(J%=%d#r-xZdJrU@8&<@eLvuv&k28>eD8Z?J@1@OOI z)|Ss%2q)u6M{;z8dFlc`0tx|(IZedUTm*@h?4<9~IQ&R(zX@$V}!TPBu zFb(&o@BC>v$?2pE4jmVlXTtc%D=oSI&YV6-6^<9}iT{d>PnkQ@KY9MdF&tHzePf8G+NUuGfF4JytTy*Pgffv1Y_ixk}I!b`+4uoi5yL zlm)etQy5?ng#G=}clGMs00bQGyTr%pgVipUCHQ<}2B9iu7w4EKTcjTrrTC%77pqZv zZfppv`U;zoxRK%eJ2f>_a}Bz<&9lB^*q8a-PSz6R{lSynY2(ks57p^glK4{V0$-6b63lpmso#1tt(87LnLR@KAU)G>=6tn)8rn7BpF7Uu`UP1x} z&^wR-rul&3K%k}fcNpv8Vp>rk1xByfxW~l*Zm;-|=&N=UMYCk%dQ;Z3v!=*9UVuwuX7_@&E{W@o`C7ML?@BW_-2RForMXH2)lz=Zd5#n-k0*bm0hr&q1=j5-I7 zI+p#a%|ZWB^wa4pDwp0(c#O>$qu%c|a=Z`XW8m92SQ;MJTxObk3GE)(9)3!-%gy@( zo#xXe1qEd{XO zl3kmgDHpSOth{WzxIXUkU&uPdiE8Jt!2tRdnL-koqc#oq-MHqO*(G15lN=YXPEL;VpiEkDor%mB;Sx<+Fbxi2o`O^as#D@cdqwWxQKZ_~~0hLhvu^L#39P zCos^R1m;JNPW0~GEYl=_c4}5qQZ&?7drYkOA@x2zX4`?Qy}JX#TH`aUbvJna;4@3= z%f6$((%~KO!F{RdSX+Qsgbi^htMg0PYr3mV&vpu%>i%BkYgy8y0%dYy4}T`b3s*e4 zA*qZ+3jCi1~5#u&-eq5WN8A8>nSU)YS1TuNCt5QCWlHtP*TDI z8c0nI1L$iL$5Xo1y?!IPNG)L@elWpnYPg6(YR7^P`-;$Cc91qMIzzNN7tckU z;E9r0U5HWS{0#|?r93AtY->^8?StA=b<(DjSapabnr$f+KSy}~ZAc~osIjK?GF0K- zqEXQJ;LXx%W>$37k0cWrwDHZ$>v7YRaltdHAN1RqhW7_AZXh)py=5U=7&chjmmYN``Gi* zzg&hESU-;YUJdb&@g0#d`fV$Z4SFD|x$3K!obx<1n@TAV&r=322aLJm4Radtx)ZIA zFK55)lhxmI77W<7XMB`pLFsg%gOfZi$p`NUnzoHt=9!40$J^gW1P~DgZB$rHmmYgU zy3%<}Q{$Vmcrv?uwD0RO_uPFwJ0W9Vk3LOHKX0@I$babBE^0NS5K5FpZ{bRqyhubF z7_rb`L5B%qsLJj^C4Ts#Xdw8tezp7RGCu3!apg3$@&#;WR@;FnjDC4Ov1>zR^UG+# zOS36BkJTbSY|TjH&u;r282+5;BZmW~P4|w_Cwo4sgGt)!Q5#ayJJJ4sYcZSCIXTfbo=u@e*H6}eSAF)@berL>2VHvIR$i_NvaQZhjl;>k#ID= zmYg2;w?Zp7tEMA@<^i?Su(6!BF9!_naEPC;ZPSiYM_&!=tX5%y`wQ8Sjm$JyPiw*p zcI3A=p$6t&AO~02l7|h$3?7DA8#5j)r1=L^GmF){jrUsBJ$jSD60-QzMG z>BA8V6h2z#+_+X(v-1u?#lHPkm<*9k~eH=PmERV@M&47*H zr=lZGNyQ2TeF@Oc+|HPtT%bw4*@GfZ=1vX5r_&3{aem8 z{*QN^40<-X_S3xYk{`R{43lb{{ctZ=+zoB6xRe_0DTb37jZZfy==HS~T&kn<3^i&N z+Uy2!vz*^0C$3pv&l(-_CRJ96y)Ofl>ki}v$AFLIf)ogwHOAFVaF`>MW`QkyR zw24vIro?o*RW#|bHibe~v)WwsmB2yTECVpC0yNr-QJR*P5AShU5NNH^W%ftFHk^33oX>i^}2)7PS2WB7Sm1105ub z>6p-;4vOLMJ^CI!)&?#sgJ)MEt*>mzSz_I7IpLi4-pGMBiJsg~%vS;e-q>;AbjB&( zT;C3`dL7_grTZf8Jm0$vwbC;xYv27WAz;n@9D8e;!u@D_^iZF1bxE)vTFm`P@5P%p zZzB2b4E)uRUKXBvxj1oXeEc*>2x7bJC$L_$#RxFz5PR8tZoGvyWaRkiy47|iP@)6LtdA-3?Df}YxEy?q&H13{@u z-^RlpUQ!bKprVXcEt6p)U`etoBjaoMAeN97IbCIPoffY>ds-+jp&oCsOh?>|-%2_P zcS)T|M(s>Y+pkr)9YWiX_E*|JKO(%Nrp6iG1P91|cF$(7`6h*=j0f@1Z6riFg@+>-}PH#53Cs8;a=-I^TWju_sqX4cA1s0 zs_Aa16C!_i^&N+RlkO@7x%$w zIO!XZ(>{yo+d{8P=atSot)fW$(Vu~@QvJsWeQQe~hN+pykj#U4#>N51s`U!par3(u zZz6GpqR<)B2LdNRR@VVZ?sxYhx;?`#iaRM{lq$^5ugM1FAq6&BTOD)9o83b4pTM!V z7;?O6U$+_ZNH3s99M3HW80G~0Us!H|_qiieU5M7|;s|%5mfe}%x7a>5L53O*-c<7L zy}x3@5$g)J%5c3#IfxzxDbT&Md>o22#=+^@;`h3Wl^Yo*VWEupeuLOgsOo#PKQT z#g4N&%3o^g{1&jz^RXR0{b}LhOE!tU1!JCOJ`;I^MGg_DVv~SVnxKg>J(<~j zQu`Lc^whTi+Kf+nFR9Z17P-x?Yj#IXgWs9|O;CJTl(gB;;MCt&edaSAId~t5&UO!! zH8ls|hA=bdxG-cmvj&(TDBRJr3k;K=}`QPxTf@1nKl_QTXjl#n| z4WFqvi=BK>THTiuwkNFmJNsOMN^K>M#P&C%S7f{Wdv4!&yvdz3-st~QD#ue^Pq%coS+f*i{9?`} zs9v~W^Bz4HhWBV2>tIq$XTZXYwk019-TcL0h6TmtJ(hBQEE>+ z3Nk|aqMlclq59V(jRow8qd(rlG6#wv_6Wnn3bQ#P5-E(0GAW<_CY*V$4h&nrXS)eZ zclUdRNZcp%_<1XmNsGgR9Mzfw-08yQGRBiMgbm($66(Dwu`UD;T#lvI=wJqR89|ZC z@0eP=W*=`XmWS_SqA6?bNzW_W(}uIhHb)n3Gar!I*&Y(^J?|gb(d_(b!JX;nRrgdq z*8vf!(iJ{^aV{^O{hr^QiT!$qN0EE2i{p1f-VCLhwNk}3g`Ft5@nWeA$7bz+m(_R8 z3N88&-FA=k#KH37x{AqZIYYLE>G0JPONj?2&-NlOhT*rcXTKoSw}B>Wt=qmckqJah zWX$#$N7K<1XTMOw{MQ*CRH=+o8vPZ;bO+mx?ajL1DAooPyA*2S3Tz2C+W?a`R*`k} zM=^PYA;B?1iS%rq$-jro=>p82@rlj5$Kb?!B)-%#{gTO4lwaZXTWXds-jp6@ys<9IOa>%SspA{SNe4Uu4M z+1IEQ@=JhRU6`3r(rMHwFwcAYPeV=3$2Mb{+J{CGM7Zl29vDAStokWv*c)4Sh3eXUxPB zr7RIc7l$*j-oF@V>>9de(1AZG|2Dj}T&F3}>R|UC5tt{++|K*>fabw@EW{OmA(gq< zH|}`(Nz>V3rT91JugNXWs}LSP_WV4<^`0h;bC8R~)GY?56MUSYvS^~ z_Wsp%kVfAbafY1#E_x+D39>4LX(ZiG@LB8L;;8jrpNIfeY#5lWItYZOa-F)^XIQ?e zNbCMMD(ADywoQbCfCRnWUeImc8&L^#5c*Lwm`MQEzms!gt2EI#*=!hJ6S(H-6!^rA z`h_9EwgzpbOrZX2{X74s+GsHYt&uqpxv@(XhkL z;m@^gD*rVwM5hX6|AVQ{cs#GZ<^Tl_5;5Dgq6;ahMj6;tx!czN;e@_6kG<62^BeeSmFK~9MukWquW3%gUt=ZUorNE3Do9XkI5tRq zww*Vb?a6`B3L$$ZSR5uq!~)z`3PKr}J<7}?cGw=@@g3dtF)$vix$S+;RaZiTiZAj* zu79%WG@Fhp9&_FN7NL&k@HgF{{uOO8FtfrO$)Z0o14LL(o0kYz(n@X(NUxZNRXX=JB%fbxtKifn|uf_IJK;o zrHCXbEiWq90hVsoGA`~zwax+g=IGku&XLLD5S*uNCrTtk`}LY^ui+E3OYtW(x~`{EYOh^O{12IN*+9$Zur;}+1$8b%8)~6p=(ZB3luiA=}pg0U%`F_mvlqp z6>^)5=`V0`Y{oZXQ9S}_v8$a;NxT&QU#XEu`hZQ19K`Qd;1T9-+(fwMJ`Y1_M>YNj0`m$XtWEClWKi?sw~G5;DF-%Qm3e zsbK5PxrdCcuB+2XDAO1C%UVck9I?O8c#m zm)~UYQr9+E8$-D3K#GJ$q6>1>fX_%tufZ0PXH=`jOt)561^iK$sih90`&an*J_T1_ zMB}zS?`DP?vl=b;yShp((|8=#ofi@UXGmf@f`JSr9MYdGVf$xq(-U&ui6DvD2q+i_ zP2svP^S{fw|6)8R5W(TEMBF4w%1@fA)%Hf$3>Gfq5=PMwZLI9NT|t|8XLVi`(n^Np zU`iwV;0S~nC4Vf~=RwA_GdtZMhhg3yE10-|O)NTEoUJ^wcPt$8a}yEm zkdm24O6b7Vyr9BlFD+|a+mtwf8m`sf=;^u6V|86wO&f9bo(Sl3Qr>|{K|P{ zClyr9=dz=)oXX=e-tmp&)ilG?qE})l1D;Vu|H)3bp(7q^B6fO0kAzK#&xEkSVP5s= zHQDv=ZaI7>gnB%wabab3HCfpM4F^$tOc%wi0#}=u!4)hDQqjd8N~Wt}c{!rx4Xb|4+`n2m)ImVuqqEJRRr)pXbaY0v z)~ZZkqcltvjgY*dBwJ4mV=iaG-w+uB+GpL)&*fJhM8&7`;eYgVV!OT;i86(3Y~gtO z{_t70-mpIwUifh&mo*jDOAI!*5gkLbJ9;4g?<{~ZL*al?sOC=4gWHAS z0o+i6f^V}c8`9z@mE%38PE&OEBE39eB`iGJB?z8IgQJI1m5K!LtN&0&F{tb>m` zw&Nx|pbjDq!;r?4UvRXYejMlZ^f*~xx%qjlJA8jUD$ip9taaVga>yp9cW+0E|J%^) zZnsg*sitSLnB|oaf#OL^)$P+~9_X^v#xFnmTO*bM-|W1?DlC^VridWAqeh*2tgB|8 zg_I8dh#dGp%hb{tr=EEL;E!R`AyAz4Qmci_)KD6K)EpGJl&%E;rPYY3tQXcIbmy^Or{Gjh{ z4<3y>uI<-3(G+asbP7<{gPI8kL{E;I41YaB8uuo=a`2I zCHCxwNKOB*9(dU(SQ`sXf_iHOo62^`cB#dX`}SCe=v-te9~P;%S!QGJo)MNUROCSDZ7iej|6Ygkrh zy5gp_U!upvarOv$BAs%H%IwZHT8UKO^kI zy>uKmX{IrH78Su=?3)OJN9xJ7R~qx&_bL^6Vq-`ZE?YWk?hPUqhYAxDO?z9d>GJna zC7q>a-`1Z%-@d&U6%~zmq<{YBe~d6juDf$&O=Shs(eDBb7Y=$*Yax6NsJ-IT2J=m z{BLNwFDS`y9Z z;qS!i!CPGEK(yWtidA*FJ5x!`Dqmge%%z-`&a2fsvwD3R z6j|bhh?o^@x^MT<9h?rm5#dX`)jkGEaD0|E$3s)Q96OCG6NAibveO^fPHv@oXmn#; zcG>b(Xnajh#^$pB4fA7o7@}b zH4n|b1K|rr!7tEh5&4dab3~X&_-d{ zb&D}{uqi8HntU5uth8dQ7}vW42ZN}cn_3>08rh*WAI(V`bCQI3AX~azi}XJ3+XHcE1-|1Z8lqD93<50lj?I8jF}Zq=eTAxjFiC;V3?%z4oZk{&(Z5}N8s!X z=i6~}<6)Jc^Sxz1C!->q*TXbfTX`{9LY={;JIpr4vteMhE`aY)#^j{Bjw@#Yr zms{$87NVAEbH+k6yHnlNFZ#TP1q<)QQL2~IWcdaa;Ei%ZeDirln##ntlsh5MW?FoN z*EU7Cu5UF+3?Z^e@0K>K580)LiLB2dPo^Z}>To&zrfwy4G+gJz+I{W(;+i^lZIvTt zQxT2C5|bk*ypX=Dm7P3kje#vsvtO|FYV%d|%;f;@Jf?G*-JTsAi0y;wytq?&@(gbm zL-(wHT5!c^WuRlg3JE3$17DV?^w#1w42_AcrdNXL*UKfQr=P5E^e+*UfqUR?@gOHy z8ND%4y^(g{a9z6smi_hG`aB8T3RqsUDmy&Ymy|AUx?#MgUP2LjZ>`?+17@PPY~2`v z$uO~?R{|L&z=W|94*R~Gv}RsD!*Dh(C))|X-~-y&b4`%(i-6mN84Jj0R97C>tKyxse_!^T9mD$Z3FlWC zjhs@i_)l_5`x9VW4!G)q%hp)B_tc!TKYXll)KzqJr(S411TcHZ3_gk#prW~55SF)> z<3gc3PL3b>xO~Fw*7~eh_Q;R=!S9jWtPdQC##_89)|0hgJ{5`LNJ}0JHrDv5JUo8F zcU{zLW{$Fo;4qSowzE@F3Bjjv#Qh#nc8SN=Zdks~nL9q`%ZEZh&{Fu1_#ww$)>D78>K-84!)lQ)lZ>0$xtx{Hg4I6jw_otm7^f)|;UV9XR1=AZl zJMovd9xTO~^Tzd7aGImelfk2-Vy|~gDSwG!yE~b7ML+|*0 zb<|`6WZJD$T`jCV8s)F7)wowZbvWO1lL)FpXapuq zVoNjK;7fk&raN$5$P{rO&y-$1EmOmG*q>4APZEf{Q2OzT#|sqoGHtz4G^hr|j&OE^ z=(#4v-rf*1aLi;=$fkiA4au!Js)db@zu>tIYxtl-UAq7(TNlJNcse?JCpG4m92ws_ zPS{$bPPa1f`IE-Ot!HZQwb$0{5Nvqy{)DL%X2UXeQzZ3+gk>jV!t({QCu|)v-I-1c zP#>)2sm)BS22hJ-QdY#Q$g2?3a~JL4iSwsVxSH^%7q7v`A=qM7jx6xri^DeQmWjpzG**N4PGR@yuaSM zagL_-j-!01@FST9zYI>u%+&}a;vzai*ByuF&76$rstVTHjX#AgE$xU;`zT+Ym|cxH zsd~VOAK}#YC9bC3{-)8@dAo%~mZ{kL8{_m%nVz@XYXS*21K7rymc!iQq?tG6SxBz3 zVzausuSIeMiATbMw7!3T_x${P_xL#cO!H4p6ntOl#24MJd|zMbJ?Z1El_HNcl+{$0 zZRx6lS+hy7y~`E_wuAFY6XH?IBiAF+z(I`ry4~)xqR(DNiTa(;#!L<&=gyZ1{52B? z&lyC&JB%C&9;)tmMoiHuW?#$= zL&h+B&$nb}+}Z^q+AJpDa0f9j7+8{&$)FT322`KwY=9k)%&v?#0-I()yy0skL?6Mg zXPHw=N)BN>s7Ubf#i29>c6N3s6)SVn1e)R2r^7hWXtVW5qi${r%Gb8NlZjQmhd{6QDHPbKM;jQmhtccDodw`wrE$gFX#zynvD^ZwJpPTwC1e>5?Calz>Qu?YOXKcATfOtP`IgZpM__TMxFYmCzVZ ziN#Ur*u@6`EhAyP>(OVQ!U^vmsf>(2(E9EgVZJJ^!=k_u-2@SetmMu!xvV^LbGW)> z7Sf2zmDi+}vlA_~T;9fz`(sZUw@Gag+s{F|hE}Tx7i~TOu}5v@qw88-rHQ%J-Njqs z>#Sy+Z${O7_<>fu^2$M~k3In?>`MxAmuak_snS`Jie9|*gc~aEuX%)h40m(NBvHJ` z`fq?^mloW|cb?KF#j-h5FaxU(nN*Q)JJ|d(5H6kF))1yTpDYtR!jttlJ-7qaNLPza zb^Wj>mX`I?4{NO~&80-l)R+Kv_A{>eNQUecyQk)kcvkp2nzEc$znMcSGefbTE?aJgEox7-R%T zpwWO=sl_No$Ij`)XuEH%#hW9k4P2@t`@`H8-46LLf2Z`0fD+*arPjC4Y2#1|Qx=t# zPEy0riRE`sl+_B%YDw55ND?~&r-$xKBknln%3Nyc>%b3vI2H@M#Cs%0RZpP^bQV>! z`O$JKQT9d%jc(0oPWR$j0L`#g0Y-Bz?3qIO2z9U14YLynuO~x`bmI@6iQ(4N8Mn;q zB0;w09E)Wv<$-#|%BmVtK5~$Y(w=CUH*(|=5`apHriZLz+XDybpFJAPkyi8jH|0Y} zi2T%{U%!k}D3r{y8W(D{sAkwOt8)mh%`*auI9%!Vhuvi=qGjn8oYQD!8$z0G!v%1{ zKjrQr&6#Rhn?bHG;7cTN~tE}sz&QdJ-pD$L7`txh9Isif9+Qaz@3~cPcni>``IJpfc zRg&T#P}2MN?WeCo^!B7x9}PayuYM+74IYdZdAbf0&P~WFZLwrxkf&W}YR+u)2q;0Q z=j1Ws|3JT`Cvy=oES#EVQ#Z_=P-;=h-h+Vw%y5t|)nnc&W2NO_&_Lm6`pK1&i%do_ z>rdMKV{_)~3-)YbwgGOnuQc4i#NO400WpyeBv9_<3=^)sB~3`#3IoT%hY!>?w!Wxr z!dC011i$%;;&;Q-@5svK+}s}YcCfw{Wcw45$W`W)pXbB5wQQQn`3)wwh}ps6LomK#AsZ!f2db1A21G443-6p^Jb!xvrUqPWY65t*Z(N$D0-# zzh;zGZ-P^_IyJZ+dlxeu+h}t@*nr3lQk&hor^hbMcz8b?o00FfRjO1NiX?-w?`yR4 znc~?!IN0BwSx&FMU*k~^D6o&QBhZO)a)k0zwC1l8@JU7vPwa_3pH=6_hyF~cr?=A=Tdsl0b z=Z)?K)$Srogrpjh{bIT9TXX_19!Ui={PTHfG~yggp7e4@nTPp>@sWo1&5AIaT(EqT z$2n`_=Iqy@5hq^Sv08#Dn9++6Np+``{cVG6>2ke`#}3_j<9B7lan5$nEEWe$dTIhQ zrxX{ngZ7v8sweuATVD+Y;J?0Tj1Df0e8{EdJ07!UQX5jf6s_?>Ib`@8oq^xm#DWG3 zS5xb$=~BL_#7}gIV(bSjaBx%;N;SGVR!p#DSu;A%km$)XipF;e(C@GKp)*q3dHj}t zHE@dG7HwLRHv{Ps4?q_s7GMCoL6 z5ip|9;GDJE+UKio}1pl#lFt%?CbFley>OQ`m`ugHnIr+!z@CS zjwl_?tP#$NxHvHO~@DIvHo5Y}SCH)s~?(Wu-@wg`-XtqMGd_0_kvz{_;K!J?H<4s~DkW}r^ z+YhU-+3c+nivFmKmmL{bz=DJ*7ltWat|Tl_$?|OVl1pAG5IsId+Dz|rgC5ICxc;=S zuAim~o8xCBN9|AaY!SHx1)Mh2c(JONa)lG-n3~p}L&Nf3dlr)D=gN*OUfa&~3Iu}^ zqjYw42SeH&zWlU}SSlG^`LGP{Z+4^3yYDT`aha%1b9*$_JF}2-3!(AvEWNS=lV`QQ zp&FZ93ee}P9z(gGG_HH6v7Zk;8NA@D>PPn}6IPc!eRUvHhn1lx zM}Pq3?dQgP0X9E3rhC+VZfk;$2$bLK^LL?R-|wc-YD+hz?p@oG6R!wC&#VdD2{grs zhH5g#*T3mMd+`Muc^aMKKOg z>G&@p=Z7O1B7;>LoAqM(((`z* zXt|>uuQ5g66Kzgpl&Ahww&$&gP<$0=XTE2?8XnSaos`h?Vj9Oi`)1TP@O?u znf<>e-{Rp>!Raf}QVL0vNbkN+8Dh9tW!O3s;PyK~ffQn%y@lD;${V~=q}$hgPd?wc zbjLU1TuzVGS~7Jo+EK_1#^FCbkTpX^y#R`ef2F>BYaMRh+uKyL_#r3ze7`5$dW~#< z=uKdq`ImIpE_*i*dN0EcIcwP{Ee1GS3q7x0LXM5AtCo?Yw1Lg%(IR!8Xl^O^sv+F( zUzEo1d&@BNL8V>4oOsX`IFd3>0b9Zrf)2Brpf(Q0-(#DLH8^Z`867ln2pOFXHA}J8 z7Oa{nb6L`{(#S8{sJ~-R`GD@5XYIm(=is<#_QIt{3zVG65BJl?!T<=}cz>uq#@fR< zWgkbV(M}JJ38?fnN@@_}8HD8S@he?2sU^xz^ZWeKUbdssiR4Hp?)Xkp=QkipYq61r ziis>)P5gRrcZr+|$5Qj9$XBdk z*n8gVvum+qs~LILqx4nZc^!-0-54%$n11b7PR#P!wZm@19)M74(rJiAF&%a$Iybn{ zfIMo`N+?+BRWtNQd?`h|et%4z4L6Yos@1`mF5J^*o6Tiv%=uZQVpp7N4Ie(8CX39N zuEaBi-Jq%d=#-U@9P>!Efqb?1DR)C2=59SPwg(#% zp2~oSddxEEY4o%2@7;+GMY0TMBi__BZG#OQh#l<7X@K0(eV)#3BQGqIo%TpsBSZ+8 zFc4p6hdWuub!`qmRoJa#M0B>92g?H18f$I97H^G2QYS8BU~sQW`h|shd5%<8h=VI! zqJ?pH@?}Qg;*!^w!1hGF`4$8dL6CMlJJFTh1k(Z zCH)4I;)rLwZ3aqB&~&??R53iAEEc%A56u#+}fA&uD- zo@e?@7#!BB%{5{sBv+#LUJpt-{4#sTbk;rTTn?7Vlz}yKBJO7}mO}!_t6#FUr!4y0 z#q#`UOQYmh^mEz!POdh4J<_aIoqeL7d<#Du2rJ*)@`gz=n(iS zImebaDt~%r^H?Hjx*^dt)P)SOr<_MwpEwZmOkYkIX!0=GKa|SjCUUXt;?EA}vTH4g zbn>9^Lw@n;O1n%(W*+$H2mLs6Mtvo2VX=!LXo-fptwq9ym>vMDq?CLwbrJ$k+nO$LDtwrA6;7 zEBAz^C+(gu2Y_DBVo=2vBknWU{Tr%4x^#b+AG|539n#&RC5h%PkVzTzSi0v$L`0GT z5#PT4ek74SGw>`s#9V)2bY^__Tp6SMM3Nns(mT)w!rU1bc**s6ym5NK!T1^+6fTl8 z$ff*;s)N~Q!iv3;XzfW%e6rGt1&w?uRzbMZ`Ku_6n^PQoPiZ7%F`;6QpH6ly9ZLdm ztq+l(@7<{ye!pJ_Tf&QpaV{*|ZQX55l`oM|b91Lu8xCjBA3~S@EA1WxI5;>6?0jz& zazaI{zswR;bDJogFy&=@cwr|qEutu;ljm%+rvc?(8J)%%vXv{=ZxKm*x<$rh z{@JQ$OjK>+<90au4&^pOF{QW>eQ*4vT-%7h$rhsBy=6OmyZ1J=dU_vx?Fzl;P?@Dh zyL)nU0nd|A*cC}O)m=MZ*k_H9X3QupbE%nom80TW>f;o?7!6I$kgzZzsEH&YiOQy8 zg(?)G zDwsdyaylVAx3N(uR|qi<><}*N>_qP%&oO6>=6Q}i;N8|g&=S!g|a!^6glAXuy7~=?S`1MGefgwVB&BvoXrp}4Jz@Q z`u#?itHOL7_GfP zG4iKz&=*CUV^)7#KW&C%vv_=CV`F;lc1bArY!W=e9q~eGcniOObfmGQi1xw+?f1%` zet+0)OaxIu+d^l0X!dsjY3LM)6v=_g_6FJhXg)`>d;0o{qg~LT{@mu|&zmDvC{qsV zzc3sZ$Ki{Lil(KfpWoiX>umK!u$oM~5>i6aAUz3?F4%&S;%d$3-$(09uKaq@fT{Kh z-o&Z}VeUTNU;cx|D^siERfS3)H+3_MX$oY3eM?IzP+O4T=V#(d31UsZEFYB zNpH&5d%bu*Jw5%v{5GW#;+8#Y1_h--A#V6ktn<<=+E?Dde})n?WK~sFm8XeC>OauE zxQGYs90L146FFO@xHu(IP6j$}oENybxWrOuWj`$zRX{;`;w>~^gZtyS09pT>^`A@B zr3CyV#C$SN<8w=ZBJ;%T$WKCYa>$@q1vuG)eyVufc-@I3I0={>udz zXq^5{O-7c(dialQ^#45zv24mzx%dC~JAda|vChA=@&DU&B{2p*?JDR_uan@(z4z)> z$z>s4lvlT`pePs1w?X{18K4?q55@=8fE7|9e$E-CN7wkluATS52Hk z+&%C(ukzs8p^E4~eZ$NR_Lk>14^lAdEXKSDj4L&{TohBfYHYv*M*;Lk>=``4ANh?`+IwY`$#x}BLh95LE(R)4OAW=dNuE; z2B%*>S)&AV=f~NwE`8Ks`+sk^Y?9t5O0_qAR+KdC2@7c(6dkSWeFC%JsXcD{TdTi! z2MdhZ-e2Lv<+=AQRE+72utla2b}bIi;=MTGDwCDlT7mF)k#BbHYRh>A9y#%D75x+< zPk8)ZrpIOHXVJfzi>gL3o2(w5U0se(^{BgfA^pdaK>dE#8^$}!^m*?km7{d44PnbR z@SpxE^hdX-F&oM>824kZr83fwJDp15_Gb{OBc@6J``{5TU!@-GFTTjSnLQ?SPeb9X zf#Zz2*Ll1igm~i~;w_ zs(;_UGbG!j1s~{DAFIBI24G9Cy|TXvNB!NuOkjN~?mvRkZ{Xs`2eKw_osdqw(f~1s z36`15bq%M|L^Lxyf~jLp?aKb7P(!b!$?<5ZcgB>(Hjt>r5sT|NFe3Gpt*`Xri;>qe zl|0L42&Y|idQgQJsO9g&dO{-B=@b*ae%7wqxni?gZSAZdj9+}{Q>SZjpj61cCh6@P zMiUkm{<(8{YpV0B137?sbZL^&ot8&WVa8vGZWTOs_UtgaZz10GdkCML%n!NKSzo{_ zt+;P{6$cs;XBPGJh-k)+u0DlnMulJnWr1pLkMJA;2VsujFRmW$NwsO!bnh}e{ms#! zr_s<`eJ2oVJ|9Unqwzn*aFsiHUz|F~o?Tt{xzzVCRW`3x`)S|s=9+z2;N`v03)aZKDm<-sCQcG$kUC>u zAW=X|RkDHENcs9bTPPb45)2>Lvq)w?KONhV3&a(MJS;Ekww0c2cn3Dp?5Te*Dbv2wk%|C*3$ER4a) zO&%Sz6733?f`F*8_c#EZ0ght){*BXormf{s!j4~oV{`aI3W%fAC(?n+R35kzLAS>3 zMvN~6#9fr~^;I47A2Nz{B0O&uT%RYy^W>Ke;H;V1nVYR%ofhJKrp(X2M9h^Mzb|!f zgLVAzbLefiOC)d#eWt_FsFFI&0itt2Kxs#}@}DX{(Jxcg5@TzP*5CPx?ydxIlXA;T+B@%~x7NYrp<`i8NuR z7D=1$`L+a!GLaScl)1&O0da9(x+xN79J`qzR3!CoffCT2%1lhnzGF7|WZb#<*;MW9 z`~dM9L_1u@ZM?O#XO#38wWzF3Bo#p;FX;rCi|0OueS?DDp)Lf`!_AmA!|}XznOV2s zFszY`XokqD%GhfN_J3fL+tIX4+2;hQrs)d^iy$Qi!d^qQo;;kTmTQW*+AFdK8LRa~ zwiYfsS`?)Dp&9E*%;h#v)kZa1#B^*cdmYZR8e<^jcP^KDh%BIR&K$9V-AUN%@Z+J< z(^&f%?iG8+9fyTPVgX`mhfksY;l1H8$iLUr2FXNxpoXaMIOQc$SqSeX!Tv;!s~{lO zl_w6+(k&*7Wv?ap_TD6TtvzWcL~4Jwih{m(&j;tv%-$3@ex?txd-Gj{o&-uJKjL$R zZ8oeYuCDA~JCODltE%s!00hi@h2f$2#GpwrT0+K;ogBc!?(7Hjv^6jR&b5A_+_f$V zhO4H(E3>&e>+HTEJ}4>qs^b6yJmL~`ZGqz|OPhLsx*J@No&F@=$Za2A1h&?JM8f1c znng+tivQ0eHbo;$qjaA6#JNxk=B_s%hFT|~{slz*EzTBGY9t(w(F;qQ z?^CnJddoMhT5R#%z_u@^MLfZ*D^031WhA|&lungT1tqwEu`PjIo@Y zTx14bj7#nu8dn!c=}Z?IJ+&iYY8t|M-jEV$=G^O^2(PWC4Z-tExj0Vz&~hjB=RR?A zQ2H0Rub;Jj{1VU1xwN)@w8$B5LQV1m_U3u0!We^D{ypxL*tkdc54LFU{ZYDQ+ko6! z;ZKpkU6Blf1vU57hRhX^HnXKAZiomYh2~bzZw>yvZi%rh7#Ofp+(J%q%WJ}x~A3P|V z2674yeuLrpXQKwnp@3XoUK>IV6rt}!|DtK5ZRDm$>!ns*BG2}ASn2uFZ>i8%qEtG& zpAldQlXJC=dF8#~sso0=muwHw zCD!6luS0hG!wEzV6dlgNn{?Sivp0{9M53rB!*$sXivr1vPb}~Ik~^o;Qvf_uS&AMs zv*x3*J16b4QcFz9StKW)rV4ujm1L_xOCKN+ZchDWR}V0lYSU=?=ZQJLmM0OJ+7LfS zvXdHTHm+4nE_GNNwp_*WMt_l?DV9p&dxmj?osI9zC+4Gn`L5^t)GkPr!y?Fpia^qw z7UofN9t>deSnITY(n=kLbq~R$8rQlOY4GceWSoc4e!EryQ}}LCwk{5(Fk>AOel2bo zhA5u0%u@We?kO&czVUZieU;7%Ew(devrs;!WuC)SqCJ=`Enrre`HCNVp&Tsisd`6^ zVSJT0IY5Ou{$&Z}Az_Eik5K?GBd|JMTemZm$)0$3wEfg|edR$&36M0{qblTLyMiKD zOAifNL`8Wb_bgWpX!!o;F@1swYVTDF~$ptC6`5E zN;sJ{916ZXLtNVH1f-W(*&{T%zWIgfDFST{y^Jj)G}?~ybI;?JQJ-*=$JzzzKg>?2 zA3w_dm|S0bpr)}@?`i&yLG`{iqbk7AZwLf-Q|VyPww$^<+*8zGE2L609&#}g6RYX# z?TtYi1m~W-g!!5JozXM;i6HXqF>HTt4%H*-q1ESE&whpN)7pCUib3NkOi`lUcu?k$ z%l?x4(epXlW7tUEaNGJQdFrj{>0ir9j026R9C_So{bxwOZs&tu=p%p`!{5j<7v$Ue zo%18X5+%ik%<`5=<>!itAJ^8>ed;g1vmd34ck@mrvpPmv4%CEI=vJE#A2GNmvs}@Z z7aHPKx(oQ-)5sCZRZ0obl}f$WDG$Db(?+#l+=(r6Pa-ts@xsb_Ueb?bw=!@&vCadZVG(i2kr|Y*Zo}e%GZHZv32ES8Bt^R22u@J^ec`FQ0XO`iHKG~cqlJ^;FpRFG<(&wi` zq9ne43v;RvlXPiDA6vRCRcQvQ=)W8 z?}H7m+PuXr0+A~3bNf+N44+Um&(-8WG+)xuDUXGOsFGLZw)WPOHv#$P7!M-rRIF;` z5nlyF<-RtWi-c~s!$(+t^L=dXg4d?gnL8Yxtd&#x@~>yt@1Hw^@$WP`AD549thB@n z!vSpW3DwP*g#s05kVH}aH}^jkmyV~9T>dA zuSO-W6I0JwS=8&+yD?np2&4^9NyyGe@u>ayPFeHEMYJ=ZN;sUy`hsRV(z4ROOHya# z6$d`Rf6G(epP_v5>ZrO)Q+)o!Nm4xUKhl;NJ&wimudM~W3;3|ri00_y3qd2OJbd21 z>II&>$HlKPOMM?EDH&;H)AI?(J??n~r1u4L?c~qPLwMB>Xj*ULkS=FXBe+;4Q^s z5qLOVbg+Qo?rOe&@#C9Sf7ktg{8L_oaE+x}|5a0Hakg_T3?pkYbmeBi%JBSnvsf|r zUq_VqYxDoJ-2N4W-yM0s<*aY+_`m}jw9XWM`l*ZJ-IhMiz&*aIpu`9MIR4h_Kk zd7RlZoIE^R`}^PHX%s^V8KAYGpkypGh@V?co@rII|kJn=sfkbf4{;;X%3I8XrkM5a8H zzZ9DN14^9nYBI`ZCE|C-{U&>Mc}aU*{I5e!j25-}s0%HQQwxok)t3D2d5=(6Z*Ks! zI0EK_ho0UifLiSxj`?R|I4A9ocpj5~mpczd*VRnge_;D2_wXqcrAI zFkQ@7*}o^TVIXe#&)eNx`~E+H31OSh>;D>0s)V=c#0;U?agJF3j-M!8DO2T6UYbdD zW0r$qn0<*Bnis10?X&dK`$w9-sq)aj;e%f056|7s{0o>N{yD_82Jc5bQk#P0#all8 zJ9BgsL~{dTLl(9O?vDKhND>_tpJi!^IL2%b@_A&SM}N;YkX>x%14xrh#6CZG6y1Lc zVksf%UJDsJko&rBJ1{9FeXy@`>#yOI2L}+;0^k69mO-yxzeRIH`rwT#jd@`>8Th50H1r|OH zeUWnu!wOZU@1p=9FqQ;;<$*(u4%B&tO~2h9H@3j+>#cmO;(DZ?q83++O<_fHpFGYF zNzrZquG>oDCzQ8ClbVN0N_dKUq)#PKC>~}=#ReRZfy(*?WQ=t4@{V$GM94Qc6o2f z7Z|_OJ<%TrioUDh{F37aG)m>+9>gP;Z%fbQ|TpvPTV_A>_(BorBIh zRQ>4qOq+E#Z{uE9_EvbhJsp7XM5TDUqU6`MYP3;*DxON#DZwP)O%l{zjI<(&udqfI zXg`KrG8LJm6;J4$vGRMAU=ibYIwz2bv<)UMN7|9nz(&|%^i@2^|C&`Ow}!$ES=x=w z_obLE_=>m;UYoC(zWefUf@-RL>M*7$^L0-vw!XW7#aviF4eR69D|xkR1Ix4dqq{=# zdLz14>JvDQsiEoGWPy{pR|$=-JnJck4d0|X1}6aoiyjyF=M)4hwD6Pp`i!2czZ$MC z3GR$WnX=`tsOHKQtQIoH08f?5ujDaB=bpzv8yt=4rJ_|uUrL@CCPrY;e>>KgKI;Cq zBG;>}?629VK{nZe@!q&#$M$MY*!Q<*y~$9pT=9{XqScZA@_}ya!f}pp0-d*!M8?^` zS{nX{XqEa@J%KGusp=$?0S3P%Gb#Jdd~#lxpDS>$*f7F8RQGmX+oo+X)xP4_eKJ2q z+_Bfm_CntBJ|Mm5M*$X*x?=w~#yNp=O=#t90seYZb5t1d&~FDw|8aoReHA6^N_6F`H?yFRKeR zmMJ&NbW=*#Pdu}z1A^`CiGe*LLuMZ z@~JAWiJlf-9a`F6|P%u8(yFBnpL8LPP1* zAb}oR5}x^T&F=pGg4z&}f4U#mp)u`j-}s(zlp+bMOf+bmc8S*{ z#K12lA#_qrs4rgXsqx?q7PtLg{43Pb2!x*e<(;>NwN`Zq;H6hG@PI+(o3q2vc#9iC zsP^|MWcgZy+coCOo1XQm(X9xph2|~CPyOeR=@qNHx4SDM#e>EwFI!5;qZzu+QMM`+ zn%foeC3l7mEJA8=-_+NcRWXju`=7&j}MZ!F@#~?yo!9 zAHf-_rcO{GVg)jc)#~02d}Q|mVF2IgeIE3Z$zJxBV$Hb62oj>v=WPh3X1LyI+GewG z;dpn*t6#G6tWQlq^jMXzz4NK!I%59z!p#IFpJ+jx0~JNALB6tj!tTI}&*k~hR`SUV zmDWtv#$`)V1o>3_s)6TTn?STCVFA-2t%H7MtJ`B4e3V5-YQdx3#ugL3CT5`uVSc`+ zHEQj99-XYcyLqLq-l(5!pAc_-y&0cKD|Z`(rZDG?-;m6~gSz$ok&8}XDda#}CWo+M8Qw zy!%JACtYqE;ihP&`1AMDZ|u|E9A}6rpO+=lH`L6suOJ4cv)SmBq*Y+Kq9XT|so!O? z;YfNz9E#gGda~xoA1|it+_UVj50_P1$=`6s40o zgyQ0jhgp-8xa{t^HMn#Uzko8LIdMee8$uD31IeIHTTT^lWu-$`_acok7Zwi(Z@R^V zDZu6mWjEv%M&fU4-OnU!AxXG=R$3XDX^7f>@~Sd_!cO=qswf{QnpQ}MnZ7jLA z*S4$~{906I%|Jv5$O@EV;dkoD`n6dzuAC3?Mr2ELt?%~|m!h8nqazZA$%X>Lg_cHaqii@uH_zFQv z4P$msXMzb{P07_V{1SWUds7B=N8&9#17?vj zay20uaZCM;VbjnJkzBabkEkr_&n7?Zt0~DTn!K53fA7V*QIEE(vZ8s=#?)4Vl6fAZ zG-{aiDvO2G(0WZg%;ZKlaildTxAu&Eo|nFT(x9E5lcu--6U`uYaDx3fgBc^&xzxwa zLM7z5#LIU1!hVhSI7@Kk`9N;OzS_A^x{gvDS}ZU;8LNhmTo_?kt`q0*gHf>WmdjRa zoJFw7T7G8a&?6wtW%ZsWflc4Fx@z&({n>qyrwC$ujb+&C8i#T2)4DLgKF@4&j${?O z+)Czupg6P|e{p)k>J1woAZI#*t6-E(`(plE*U(bT%(j7KvDyHEU+?gH`ZDzfdYMWv@P?EBZcJ;p-{!9&B0Id> zAY0y^{J0F=<2})4THLd5oZ0U@!z03-aH7!6AnwA6XkdOjG3%Uuohpj6U{eNO;%HrY zfv0h=_D@eQUeA>E>-ZmPl~Wuc>zWh??OW}sc5h(mEU3s zWB$4j5`O&}JmvO|&&U5NLt_<0f)MS0MSS6zL5aGuwt6(OauV-yoBle&8nsWl5$p7g z=VH;I+nop4a|MpSlZ-Tj(yq3&bVYe=Ha5?sK%_*l4*N|1hfUwKkf^&oGQ|qw zyP2ZT+L5Hge4K9SP~*v}RGN_OsbcmMeD_fXyZ$7mDKo;mp5Ib^@p3wg1^10#Fdv4G z`~;wJ(D+Y9PT?{qkG?P%SNMUUfzIOQ93NgEd_`EziMRZz?LO@?V_AE(8iH}=r19>8 zrLC&p=1oj67jTtGHXChl(`_jyN<=oLNwy6#UmwQn#HBF&G-zx7#oJO-tP-Dgj1N$g z#Eno=P_)bAb~#Y|u~?{P&t0hn0pw^|DzEKnH)VB;829Rptr;SC9|VNEY@2@QM=LL&A4%LVvnD%s^5TV^^w;xQ(hEa~UUQmrZlY9v{WvaO87o;UpmrfCbCLzM< z)$@(m=g&n8k9X^YUI}(ZurIhZec#ZwJ_xsbU?8kTq~f-)3 zdCSICfKpD6(?Ll{hoyp9+lTtkn%j(-2cPZc6Zfa$;6=DvR##{G3J$HIZI_^OkN$bK z97_onw|e5f=)=imw}xuv`ZhLZjuAD-HG0 zw59r3f9zw^ESTO&cx#Whd!NMb$vH1yy%5>u8xB7hsx&Bw#u~lWSZayd&&D@wlk6b} zM7%jr99SxZ7LTj&mV;)Dd{p|B$T!SqZ850z@I}f95>6_q8n05W)AY|hH;a0+^*VvY z{NC|gBvP0n#*{Uh*v|KwWZcj}&A7R)sw=9#nHT z_?XQ4ViE$QH0RIgcq$-eZ%aWDhNrKFys=zwyRzW@N&uktBE51%(wmR<@&P+Ga?;`x zEm!Ek0-eH_t&IrL2fWBM@t(pr#aMQ~7KOaCDRU(VMqbMFj=REgaYvGBVnQOtsO4V` z(eJ;9s}3ClZPx}K#_D3kGYiY@jqg(k=n?giAzzO* zmB}domrb1?n{(~)*UKxSQVpjh32L|zBbXv4G!1Fm=fkYcwG4VG`YQA8_rKp|#RL7J zSb69zw8C_mznw&%Dndbkzp-14aT*2kM#(?nu^Yn)8sEkmw8g+~cV!mzW!^ulYCT+k zOq7=oS?UNL90zo{{FYzYv4yC84Fy43Axe~v{A@|*B#& zdd8;B;q+~pGtR|soYBygBqc&hXm)XHb3X1w#u@Y1dG`908+(TbcaZX4cGo{U4kwoZRwH3?V2gcR59BjlP;eQUFNxetV93%7<0}7_l9Z*9-qineoo07 zXWL5%YECJr69hW_NGjhux0sr_e6piu?Z$eNj>pRABk=r?aO=gEZDzl~=I{B_^i_=oWC-{vL)g2xY&Ss6ra!o ze>!^$M)GI{H?bt~D@~&V;Yf7POO@`qXz8Hb`XiUrOxdprIHU z70?pbxaXy$X%Gqk_#8rh!{VZ^=al~~*#`Aw za6ra$FJP^lYkH=MY5&-o>wJYayu}ooNrWKH10Q|Gj#Kb~)|}HPyXL3S;MU6X9m3{W zwjuDGPUV8v(z4=hY$JC-hHDwK%QMg|6{@2EJ?m}O&F4?Joxm~^!8GOh@o`7^e!cOW zu7B!^q;O&>%aY6U`G~+hD?(FL;8=5bRK~Rz?Q{89k1>}?k9@OdZEo1m=6L0>IJSlS zTX|gnrj`7u?UvO>#LYd$Vf_YPOYcU)Nv>l6bbnvK-d3IaWe2ueYMEmCq8t$Z>_jlV ztSx|U5TVi;EnTtRaYv}Vs#=ZJ{SBbQ7M+sjGdEw;Y!1y_`$R&{#8f2>iBW+Ti-#f* zOHRxb@YxPIJG>rnu1JQXmCCW0k41`ZfHwZx9V?=-HaU%A*njJb6k;U+)J1^&9*C{h zkcaE5!+F#awim-5u!VCy0gH#HPWSM!xP6zhrP0zgVuK{}<#~n^SZl;$W9QfXUQR7= zIke6km`#mFhr0&6?&hf67X-Zdv@WIPhxw{2 zdcec-Crg0rvh`8>TCFL@?V6i4IHQo_{B-2?3I`}5$hdW6q5Zh9f&<;j7U!_r1CJ|T zl zNk&Ff;w3dSK2hVTCehL0rxU%GwsO9A&SWrG9|=hlh!0f13~W?d?;TTKE8uoGP_PL= zBo!ti9xkIThYDt-2TE2hqNG^N8KcbUVfZoJw^d zd0@rPS`)tcWCaQkgDyd>ql-$)(CtV!gFi`$>$uhMJLmc4ZJMaLzS z17oI9Pb?)Wi?nBrT*h(xe-@?K5kA8RSuYQCy{0@l1E|lyia85c8S3*09oKW?pKTtr z#}Y~mhPj;6Zk_#D>)X3><(*_TTH>3~FLHH$qUf$8eLC_XmX92RtI^bk0C@i1xE37NMO1bh-Nw zEJfGIyq&|Cb0iS-p6zLVaCqNoNiUS7am^W@l9XHc35_1`$YCRHXXp44f39b( z3r3CszmKgz-2UDRQKSC+5f%zuX0j!7UiZxbv`&`6k7%}$^y~fk`MK_CMj-$mAZKd| z_6lIlmiWS0j4MOo8&Wa^tIqpiy`Iz(BG7z}crgRjGQ5`e_cW&P13OUs`puG13it5+Py>dBD6b@kTB2j=sz03o#|_tv8p)w0C`N0=NJU}fJ_2V#YojOPn=Yb`bd zgG~keZHNXRVOutfA>x_$qfVtq@{!CJ3wgMZ(nn)<@^C&iP*#FcOmVy%ndOq99B(=c zo=j|~=YCpr$3Nh%bkKnXW2Q3|9GhjI!r585-)=C1)~EgRfhm)%-nQvl?$(#>7C4JH zYY(}C%mGmu7h_Yl*r=+JODDM%2r)zX&@z&Ew;!YGs+eAWEaxMFc8LL0Mlnflf1U)Q zSnqzz^C)ER5<&*AwowW&7&u)#v{%!rTJR9-qJP_kIu>+u+rL0K;7<|ts4#%;1{aT zJsMn6VC!SNGNVME!O2QukYM@YnH889y|~D;eY_R+8dtAgBN^7-c@#^jNvIWW4@1;@7CCX1CwX zwt^FKcoU~EC+;OJ-T4g}KblTbO8L2eLbM<o zLjwMVoe7%+TnZnwAporW|2@qWQ`}rDp|75WroqW__Ld8l4Tvl=(hI?f~`T!lDb=0C(hzO-x z$xje^L5lkZ~&`ZcHMC9jJMNw+#`K5tZUpdiz#m)Afy+4Ye>y1+?@ z$hC~Ey5RO5KRuT#-HP?6d|MT%5Si|=k?>BInMT+19IwX-H^GaYUr&o_tsOTmx=*Fh z>w3*GPV`q6m%{e8qBE)QEwuz)y>D#p9s_Wr0XD-#K^-k2i{58$ zpppPY&m55>*lB5*s-AUS3QivAoOR5V6J!WX5icmzP+qT>l<7g;u0$+}*}R)XeDtwZ}!wjS`B+tZP}R}>`6o_d`uAs zjNiYf4wL66>U`V)PeoF>7TV^!7D~O!L6k9!x?jZaGJSTQxy=}i$VKgsUMT+LBUNws zj?rj0+u@c33jt3~rhbruCEsPt7X%ARaapuOST~P1aD3a4EloZqkdlQ|nTKWMQ%m2;TT=O|To+KotCrODzq%;$drnpKn7=K% zv6yZyjt;HCR(2F203Bg?G4V}XTM4SBmbRu!Q6{_vW6uW!_fF1WzVE2tETOFu`$kbU zFGy6>^gQf`%C@U*MX7kQ{2=AvK;Wx@c|7FEbk6pv3F21kyS11%1EdXosg#omL(W>< znVCe9Af&EuUu8AnF6())%Oqz@Hn!O>v7K=@yO+7OKK2I!`O1h>ht1xBrHdZcw5)x7 z2*eVEn&A6cBm5!|r`#9k@n*A_seiH*@in73bz@R`Hmr{TtPTz7jWHQ`d3i`#s?`|( zSlug@SAmfCk-CrI@<>91^-i$4&g_?O07UYgPq24CeRXx1FAF_(q#eCrg#0sC_r``0 zsN#qGK0)wpu5#(V9WtT3@k4}`ncbexJH+`DW7y~{dHF<6RZME4e{2bxX2FX4`49|! zMoq{~ywTgsCLepO?j|D_(l{dkCl+nU=ZId^_Bk5Pn{TqVT?i?q)}-6^%E$y4zQgR> z%+K_;6nmQu;g}L~tcsqo4w7_oySH0hQYNYxo6)$EoP2N}xOZuS*tHD_4~XoEt@k3p zK|ZiI@j??M9uCO)nJru}_cIyrtrZTt!(+*q4qg+V&#LYx2k;jZosG{r_lm`L42F9f z!N!wmNvVkpnOp;Qyw34AO-iK?v!kNhYy@C_@$mTm^z!!ZSyY1#o_(kW1WqivWX(-} zdIEaACK6s5VPtt^lYf+n;&$w$`+Uy`OUpQK8|->{baITlh#n%CrNf~Rr(L2F+t}P( z?RufB0r5XDcKwNT0q|#79vvkmbOTYuROIAPd5wOOU~d)8G6Er=*1?@@P3rbr{MFC~ z9}S7!$58fd(iK;7`w^wHO23b=8kh~`tMj~p6v1RwPQZi3A*cJZ_HdV`>0xChta!ed zs?Bd3rUlgw+mar{M5Bw7Rs4%oI199R7fe(7zT!9|8@>s)3ujcMNT+wvR)||f0r2Bu zLURsRP(R=b9&fskU!pC!Ody>k0X>?My5Z$>IJ)gzf3TnL4l+=8aM&!U8y?lR$dBXg z;u)Jpf4bWAp+X?}1V~I-uY0MAA{%@4OzTO|dFw#P3HT>3qWc`;Xh2~o_R!b2I-pDt zXLMF02|0v!5S={9Hp3{aX0bi zWKvK&!MjeRbhl zDaz6^eN?M#zWc3YAjb!Nbu1|t?9S&Y7>`4xcMNJWupI8zuNzn+d8n$;FImWu8|=+& zR@zTem(wuPIJ>f=-fU&XWmZSb)%`^EuqsiENkFeU>>$@N7ace>kLsO1C53+*S+70L zuHS_$68|ABzvcB;BiN|Mq6GRdZv;N87#c!V z#?*dhb>BX5^4yh62CR)jJe+zg@$%%z0Lc5Y4d7P>6ry}&i3uW7kd;sE9I8LCxu3|N zKT}mcbg;3Tt-vS1r>fSGW28Yc>LNEbwtV(x^e-eA_|WHqXyjV1`HYj4e30>R*txgC zoNP5r;0VLY%9^C_r!aIdYkQ!sw`LX6)aLd^#Is6F3|P)8o7$Oi6Kb}T81Q#Czs+io zC3BRV(|-E!b9*Ww__=N9L9D|8;Ivg1Qi1Cx$d7gI7mR8raogQ&EGA$RhZyywRqAllUcD->&5`^ZQ zL892lWWMATMzrJxKKIloXUK{v?g7#3_9%^~< zM}ZVMoBUGKMK~2hkH6L%bP7C}`YY_q6ZEKzf`?I!lioj0gl`CgYa(w|A=oP`-i)f% zMhFk@a_^F;G|f_syktEhU)49RN( zhqHZgh#n6BZN^(dOefc7)cF!o(lUhK9zxQ4yTgF>J=w~G1*p?kIkp7ZW%LY=qgp~G zIrU|uc4p6ew5uT!L-{MiXQU{V=R~KIa#H2$2bPLxgA&2djOrr??? znq0~O$E#js)rv(a#B?YQ$NkIqczbFR&yT|3sod8NcpWbv#~BZ_ABUj!&`zxr{S_En z{t>x>#imy1#8j{cTWdTz(VU#^iU5S1N>&n*TqO15#g#h@5kZ|bo5zO~J~kEspb{;a zNol=#(DB;X;=6Og>UC;^CrLbG5pOfr*k1NzdqCo?Lk$!X=~(V=R?~F2-Kn}oEDCPW z`@)6f?RW7mAmiWXCs^aNi#yPVh;ZLKw1l|R#-~z5khHJLk4fJ88T+OQOOxJkHOn_)A8cE0jU36Hn-`t$pMON=_2e!e zDSsB!5zfYV{vks10C1omWeI5%7C8z@e4`KYXk0iMOw0rDw{24qA(a+CanwjkNmBOc z_ZJrzs|GJhU~A5 z5=#qy~V|s=h%0z>BvamQlPQQsw-obEPah1N_NaZGEZPa&9Lb;P^r?4FfzeI z0DPTp+CDh>+MjnL+FuZ3T&-WH$(&LxDGwl-fd>mur$5UYYl+25$a$j)k? z^uOHt(2p`@>SQ{A|Fb?fk) z93{S`3|Z&6ex6zpA(7L7I%1#s^#$=+#0f zm>PLvefM;yuV4@omksh?r%Kc!_DO0;KMT~O=})TDWPn8>qSsnsxH<*xu?gz6#_Ld# zN)1kLfrE5p8x^zR{42Z6+K?$zd%tUnJeuvS{=uQ2?~P~TQxDy(1I)=@MJE?Vtr|E%lLl0Nm#>C0;r z|IlwHSp5_wikYqn+?!t0Q|^q2rT#c^LYV8_QrrkLjTrZ~;Ta~{goKFPgYEN|s%!gX z-3L6Jr8j9;%6@<;NG;%tycVS7T2jTCb7=*a^>VMk!>Us80bJ*$P(Ms7pazN0Jx`?% zD$(ct)zt%xw7h6z_u5|%4 zx`Q_NtUMWV#!~@Sv&LvMNQo+7(eWy0yRL$Eu1GxQ{Dpj=ZFL&^dUYd5#e>vWw zX9mNgqV|)&8*j++-?#g6Mw)ez{*vIq5x6+xe_fMLNT`GKLXWcwE=#CyXR?wmXX8Cl z;X^ff)nNSlVrXP!d4BRNeK9Ti@}J~02rN+~2n(BcLOz$$L6BCxm4+C{e?aWzU}Wom zI^`-l|CiOyAqy4^8NTyUQIX*{!^gx_IW+{d|1P-XN0>X|ozP!lgoryPplY99pRR3V zEFAS2@;#ls^QkyFs|cmNZnT}s7OMiG8IyTa{rbG8W)?I<=(?&9cUL$EmXHD~(UpVG z35uI1`>$gJ|It1;ruN5DnqKc$ZV=scpL~UV)y_BSt+kpStgFT}|ed5?YJWWY` z(TFuu*ll=W_--SZb*P{w*;({)Ht?&6j+vpm6fwAi1db5D7edjgid*34@);Z2sHd4! zL~?#}j9b8iy(Dp|-O^Aq+!*%?OByWdvH$zTU>A~c9n zQ&i;AOs|C6=XW>1o^F{orlXAI)Tcf<+KH4Uy9eK|6&0wSn*API~9=| z*Y{stX-!DO@dj^+Z^W5;L22@C++Q7E&seMM_8V|Vxqy2>*nmk@v%tUjm|u-mU{hN7 zpJ1GUs`L}sdED@DG7Bs)s3QIJFK34o@9gd>pBff(_Y6s251m=@%^`#rt2)<~Oe>q2 zgeUhUt!Z)3PkNP&j_%O=T;ZB9t%!&=M?RzY4wO&8ZY#2Q(reVw@_G(9gnHwnOj6E- z71J(kFluFqTD`^z5d^a;sT3Wb1Y0Nv#Xh0&sS)&6ZRph6xBuvg|9k!H2-Px8XO2y< zwtOCR+TRuicsn5h_YnZ28VR=v>=atM(*!xdgesR~loZo)7@4)NUc=xiE8hD~j4pI+ z`nGg8rJ4sw#Ma+>T~e$kIl%h#i3vLlH>u_aUNRAzJ}+gxc$jez@+&LObT);7DmwMU zSYmwJlY3glESE5eMB>NO`mH^f4~rI(SlBBM4Jmy2DlA=-ERvKWG#Lie)|>IwdGbG? zye6kt5V@>AxpUL?d#KplYd%x1`@VRdFRmoa1Jsat;~qB#X6`5$F3w((NzTzi{;uA_ za0l2F2-ON*9SUJ8SJww=hJTQ@PYPFP=;$&pi}(~nh5M2^1K~XdjvliMr7!!Ocq@4h zEM7wI>COX9vNT+{rz`dmo5JBoo95vk8@bi&(nd&cK4l0nhq!KoZnh)xMo(sOusMD^ zf;jCv=d*@u!YwEnNSLfEetxwUQ?P??57_F#Oy-lW{2M{C88w?6DCpG6TWvg5pwp^n z1>!W@|0Tg}1KCgBeqsn*amPFqgLaAvL*rm44a_;OkP0fmkGJuiR3M-~(>YIS0kxIW z7V80Kd7pn^wC~Q*{jMzV#hV;2Hh`&&%6o*k!lg;^5h-p9YT6WV3Z-dO_L? z{Z(Fnpyku4q)W>Oj=Rr79Hf}%WlG`H@Tz4SAE5Kxvg<3EL5U| z(Ec*qHp=#qf*FsUk}V7InL3yK7CDwzDE&}_omKNzmUr7oZO$Dc2mCYkB6Ux-jVqNy z_t*>jpt&sew3!%SO8)268G@eWQ1*<-NUN`h3U^hK~l%VVUu4=Vj9~sF0{!b7gsadLa>7d}`Me`N z0bgpxaYt7aC;`2cZz6d?oW&;_7GG61Gc;y~sVP5MscLJU2Ude){b8BiYK|u_-JLib zJ8hi9gz}6=$!)2VbQ%T}El!D)RLsZw=^=Abvw+=%4p1EoHO5a^ zj0C>VFh_i*4ikxYPDwS10+1;NaG*4eD8bRnTFjSMX{ysXwAp`y9-^2$`zrASZF@M0LzE>{$ zw*+a4pk@e%6SOUgZ8*bE5wheK|2c!M?BYz51Y;VjgSrr~{*=u~+}rfS)|s9Ov*Gb4 z-q*<(lAHnN{Vtzcr-^ZBme&UkV#1RFScHq5K*+C3Fzwe+LiJjs8NUq^cWp^p!mCG= zl@U}il{pSv&Q zE}Q3DzCm@AdBk^~VslKm>kqZb)#09hdw6i%q1&EmbbYJ~l(Fse?&h=v_I*%Zny3I>gy0 z`yF`Z{g1t3ASJ91xtqmY#Ff?+9gQ7Vac}|9aspe6$3NEfZXlS5wI-N^Mon)SDrQ`U z7rgk}k&*_YP{9_H_SR|#ng@ga!sfCEq6iiOcbZWL=@#W-`3=;A&eqB*$`}}7#j$}S zS{?fHAelukeyms^S$z=HpC2%#7VSG<+hUNM_Uhwu4W5TmU49uBF(l583vRHx@Vu)k z!Kkm_h|A2d+-N*mjg(k_w@SU7EaVZ+f58PTDbd7qR8+j^DMdlN(<2Y$8mL%H@q$HV zGgl5zb-%`IyhX4$T#z03DB-|HSsZZ}bJRCIkF*=2@M`FqzSE$K@M5T;v{=CT|5Z&1 z!S9JCgPYTV{>RpXs=S1Qei%E)!NSv5UK*xm-&CdzL*nG6!=ls1Kdm_FndIfyT<^=d zSnW#;K45Nq-|=!75t49 z;HOwF1&hIaky)gGPKYLiqH7Ilz56$_b>Z0om*uFw z&f(z+yJ0} z4>iR}0HWPQ5TR^a3vM~@_o}|3DVWiwNHS;#QwNWmWAo-5~9`{;|-p$9i$0x?m-gnd`xw4&!8?!vGlg2&p8)yPrKrOv68n?Ggx6AY3Q zGPfC8KYhx z0~=fH*-FE~lBzxy|J=jt_{PT}cH}xDCUQ$oO+u(vn-U zGY9%KWO}b+b2S9)hq>~8$eEEXPCqq0;5N8ppdiZ_K8NQt<~R!w&=+Y7wqA}j=zlnigNR@&e>Up|kQ*Rhi>6%EmwU6k8oAO%3841=OrX5> zTca0uwT%Gx1QN;7iw)=Gm((Z!c!J15I~o`r*xzFDWM9MMjmEMMGn$x(;tihUJj5tTIN$9$;5ESZjKT4?Oy1>D z6TrFbrQQdoj*gTGZT0!cRilM|!yV@xg-$`>Bb@l(7c;;;u)whhxmm0U4tyel)<7}ed5R@H)l$K~3SKqKk1l^;K zE`Yo7raM)<3uJDSvREBLP#wZiEGA0`i0R51X$a?}`@Onve2b}~n0RVl#>J^Lbb&P| zg1g~K1EI34+4k2Yg@(lG8rQUB_+~%9QZl|^j-A*tI}Vzr%GGWk;&&4=dG)D{=}cuG z+=qfSgV7Cfrt}Hvh@FehDKRDOTjG*R-ZR%siEH$)KkK%CmVg<5gsICpI6Oi;#T;%$ znP4@3PGd*;9~j~qozMAvB zol6{6OjECv+I(9*l;ZaS3zpNCo9hN9*1NugqgQu!Ts#4yyMV*Rci~kTUXe`BXgCC) zAl!-@gAN{T$%a0*O!O$PE4B%zPe!9!6IGz2soHmHLJ&-v+it90qE&3er-AZ{s}n9_ zmYuWTt1rs*VYXV&L5%uG=!7(1?B)KZ{8flBcut)sI-BLT;p`dJcc-QI@z(EaD*s}Y z^WZ~9(6!F9OKDP)dp*9Q(uhudT8-oHhX*eQe99Wk%x|oze|md+PZTj+&X)V+xYm^G z(N1uH;v!%s@!y7ev2ybNv$vhQ+8MYp;*MkAthHX@Y<4qyTM366=6hcPnM zG%oU7thDD~^QhRmjh{i}iRsL0)2eKk)6bd7oS}6>b|K>&`f6mQD&{O4TFS?t#_;3E z3*XJh7T3XM#eb2(f?%U%)1uQ{SP_kkDZkRCM;x)JC{eDTHKC*i+{;}`B9!qp8jrCp zH{rH^y(%bCIzH2EjRr3{nfR&UJZ}VMuiBUhXk2>YJ2V-e)zR$06iHcGDDVkJC?=ii zBv{z5`$1!W`M>I2pE0dY*N`->Y{81+NOPJfE2++T&%XC6b@Sj7OZ4nU>`>3EA=A}a zO(aC~U~dwsk-@E9^W;+PFiQKrB3WbLTLK~ZR=rTTcj}x0jq?(o_0T+1@gZUXTIm*f zn8WL`09X3|9>Ht_m6=i>cWn(di-rEzIc;=s@~E4d2g3d*FwRN`Hz>cmRxn7&tGHrb z(~dPd(zHw3zSVlpm2Mfa#|x;GFi!nP`Y|8Bx#NGg3PQrzp1Sh+J$;oR1m9oj@$Y+8 z9D$uy-;*|aa+?1`ozI4W^L8Rez-t(Ld#?^K+vgBAO>oUzFsoF3^ugy&E4*YqNp{sm z*qicgF9kO=LZU>dSUtP`9G8nXjl!o5Cn_A=SUFDdsJaU%jvl$&Jg8UIDBh#vJ%gFv z4=Qd==2!Mcxs*FI&~#SI zniYI}LexU;&-r?fnan}5Ml;Yq#ySXYvXEA6vyzPFhh?CezCHsnbxm|?&3Aw^QNGqM zEzKq0w@%sX!AUouZd24B3E#J(n)i<+>PEOLqt<9387$7jQ8ya4nlUcGHmyQ$|J%Qo z0vM}|m@!}SNLNy#IR?jN%!Xq`dlvJ!*KPv7J^XRKELe?7ePL&{9Pt#)XChz5MBTNy zLeAK4rj8GTI&(Te#ivc+_xmv#uMf}3)VFgeICrw5Md^ktYF+}e{i9(_waArK&V!CC ziRlV*95g~&65i=6LARwA#ZpU4Jj>Yb9ja5x9)$E3QyXB|INdppy~{lId!_+3v=^VM zLeo<_YHicaxni6O__qHJ(tlp46Ni-7I0^|PZHs=K5U+mWv|0r}*o9YbO&=<5nSf(j z^~$c8($+gjf6Nte<}|Oc=@S_OeZOeXQ$}9sh|E4g7OK*^E(?#@yjqI(h>6i&(`DSQLy{WL7 zh_ze2q*Y|&jJe@Rj;Vp@IWW)BjYo*@GDYXZe}hm+b3W%DauUOwPQbEog){9vOAT*Y znSZi!yE*!|qXsj@zyy%@AtL{`8^%nV(NY1(FqOV}l8a7baq%df)|@L>*f0> zAY+_s}M@w=6xfe#+I1PL|`)57&T>99fh(HRQ7mwA|aDH zWlu(99u%R8i`oUBZzjDQpUhWYU}mxWYluQE9wJIn?BsVo6=if%yuVrwC4~>5&WIaY z0y@DRmZHk#45d_)BiU5#7v>dppobrldsQ$BSCctQUS5^Y1J6DYfJ)#1)rClxf2M`x z_0_tH4S&&>-k(QnhevPl;E%yUZ>25}cV-mTwr{ivfyJwpCC0jlHgmGn5^7Rj)7wEo)HJ zyhJ6>-g4-Vo#QUUHhk8`EP?x^BdeDSf(t@PvN~xzn{>N&N)b*=ErDQXBI`5Pg{O&PYY6D6)6)OKau5(AXeNGgK8z?=qZ0*%c|QO1hQW7ukVf#ddcx1F7GN0w>!m0^&I^Q z-9+*hT!*S<&(AHRDs0D)xj*pVs}I3Swm%x+6+gR#5?4^e_W>=B{%i+-LBu`k6U@)A z2A2a(*3xB(bt+b=gkh~6{AyYo)L|_at6l*QzsnQvEyHjZx}a9UM)1LjgrRHqVGh*X1^5 za{J1t@sLmp-g@S0gqzq%Em9Ow?iY67noyS&yE&~i99v`PV$=?+15V#fzWY5C5mo$v z{A2a8v%Bq8le(s$f!35JaOG+wN}Jo^r+o$^{gq=eLy!5{^05xT+<{KU1E{Iea*QND zo$JTN&d6vMiR=jP;H@27F%V$_5h?R(D|Yq4leWl(F@kV0)m=a~V)0M+-ALY&-2h=% z#FXgeOGDMK6zijPvP3Ret)$vr(k<1v%e3GoceQdSfxh_mS1ndjk|2>e2fv8O^R{np zU1aezLP7S3RQ-r%i_9au2#(^pvQhQF2kg9mEWFz>QN((eRmO~I|Aj^ zg}_o4nA%?(tj-PWcIZs8$Iaqm>a3;M5KDY7S$}CT|F9LjFw+kD#F(}=kuJB9W6(Ds z^h@i)C;#5dOyw8tFg5T!V+z<Ip1{Do7LJaR^A*3V-`v*s&#R{ruZ51X+{azmuf_b! zbr=paI2p`52jUQ(%8KwwN&$@4ukGeBJ~!1mKI~5EBH_$V5_uY8fuh7N&+DN`5k9pR z7Z2BM@M`jIOeZp;n&P^zN5GqDui?&jcE?v@WLw(AxBjiJ3n`_{DRtkDp~7MuuQIYT zAqK|tSS(c6IjkbnJy#WXIV`asPnW>vdWF=zaG_K+0+Ws zT7%-gjknRQAb=W3i}G4)n<^zr`d+ktTWB?QukrnC?=iJI@8zs7e@%pu6vkyr>H1^y`YXLBmB3G@R&4mI6rZcG@|@rghPtFUtfO<2?96$-Hz{fI&}D zIy`86pmh3?fgEnCbbVG$FxFlS&jnAWqlqjRm5UW`Y#OdIl9S;0uS7)+4A(M~{6&oW zls5B&oXTgf_L}+m(0U@6dvUzRWmzTocQH$xgNvW{vM|6Co*&qRLtWY*nq*LEw9k)a zE%8GNy|LrLVvC){9W@1>vrpK9RPM&pi_4BObV5WH7h;Y|@FVq<*%0@4_*X+~v@R1JLKhN(9KWda z^^IZ4&_-Wy0ka=A<2>{Ib7lq<=f^6c&bSF5u-}twFg#51>37 z<}3?>8N;u7RE_hN$kA|Mj`XJCy1#|_8-TX^?;k~}n%AwulfBnSi{XiV<06-GubZ_Hu{LYahdZ3LFKHKKK=sp8voq^( zRYPIe#O#OSE2CE>Y5L#ceX(6*N9J^+rZkvw6zY=J zZifC3q_neVD5%y+Dc#w57Lm+83?Uu)3&OcF6BP;ZE-4zX)DN1(di{?D*GK*` zr=vp=BVlxAKK6k@WiLlW=v@M^Gm;-9rM6FWq!7=ygg4Kfy5V1u3iB)vJh(GH?ObSU zDCPfh6MPX!XVTF*+8B?*CbkIipo!0T&@r*j^&8GG{&!RsGK`68zBP%8p5c2Klp_dw z*z*=4_=Lb!WABJ10)x>;DX*ijIyqySMJBC0j=u4#{5JeYtf)ptdLJe(!c(7hPh)w(Y^}&x#!K=mwr3e>>fY9n_Mlb}_TsNwu`Qu&n z*RmTU(drul_QZM5N(T=WN(EKs5`)tri+`tT+hWa=CxCEC@<7+{>KdW=ym+4RY5gLK z{yFIo5*GL6wIQzK0zNWEO`^3bJ2^U8^GfE8o9|@gmcOKKm*pcc|G0XEEhctQTBa{Y zI0sFrVBCb;I9+IB*Ba>FZmT8E`&Cnu(p2_-(U>we(?1z^;8dkvwyBI~#M7+G2#-4! z>W&#TL7`Kj%;f51*^b}FcF=SuMcC3a{htCNAUbPx+J= z73w>w62)68JAk`Go5Y09+GSdhVS|;RFH!JZ*J}Wj74dZ^Tf~`cZFKtl;QG?4r{#6dQ7SpbSHQ1x zR?XSQA(3nc7Cp-L7s_kpVeVUDBZ$S$YRA}WYhk?6mKqIzq~&;*EPp?LD8uD?k|4M( z@9j(7J7CJ_Zus%ZkD*!3qp_$o*f6?-$~NjOJT`zZVOuG&&52FUd+q46dh=2CaSYo4 z7n|FtcC99%Nvz*HL$dTq%0*Y-TE@Ca0jj$R%F5eIM3&K!mkL_?!|4jHJ3Ag-zu>dUsit29p9mi!D&)x_pCG!W;+!tCKLvr2YzE!y-@mdFk@$en8d6Tja~>3 z!|0;@5e45l%nts=E4zsSV6=p;!R$`{ zd)UkYMSpuEA?ErcE8UkS-JHy?Bz(*6Jd?9o&#+V5YryvcCfbx6cB`(RFG^l^y-ja2Rw^6ZH87ckE zH7T2jqnfLe2A!9aW41T+`(!^sMK)m7=RO&}FV)~WZd<3Pw}VaC#YJob`}gVEWl;Xw zS!Jba|9SX!XGJ^vkwtKxbwttpyEBF-*W+|!4Z}S0G5bMq(vB$?Zcm;IU@J*aiKwOZWV8`LJ4t$^XO8ZwvuT{=$@>G-;U@< zSssdJiv{wIIVBf(h^>Dq8AK4K9i3ry)o=L6M5ISY&>v|H{TexV+v#BE!RKHvJr6l+ zgMEq9KVu5@FOSFYx(ZWjI{g`X54W8^9172gR^Vkon^(@~V zHK3cyg^;QaH!c-yagP;*l>|Ex0QbOL5HXxEQ?(9N;J?q^ezg;n2w-vzjoh5=3r=l* zgi0WFak`}Kuc)g_ZF!5?C~w^|_|EORL4By%iE|eajgphP`{`Z}Zg<3?9>XU$r`i0= ziEe*w?3a!2US(s{mY?Kf90rFk-rXK~3Gh)6v@wNOkYxhvstS5rs|4sRq&t9Xum1DYwUr1NM|-^S1r8*qEQU*MZjr07CO(MOriJ(eoP)CB{8FOCgum2g@G$i5B-e{W3&hL` zx1+-Zf{p&S2VP_|o3*{~)L!ZX%r1JLGG)q0Yu#NkHeTc07VNTo4(VV7!=|JUa}rKN zsD~!;d?L7UB@^Qad+6{St`jSCWr?XFz=N|T)9(8ilNXQ0PIDoN-|&S|5BcCyOwWUh zVVJt(UEQ4t!&fS>1b6iTNRD^e0`iBP3!Cl`Cyzu)cMzN}LVvx+?0EcRd}s>o26~_y zhdpNI7UKsxJ0e+D0Z{Ej}WQiWPDAvB`dTu@oHCD$==V(9?~)-msZT) zH9`_bXS#mZ@la@OZGKX^H`zrO;ekaH4~PCYY<&4_#KZGMyQ8T-G^F&~&AoW)=nK1Y z{&!&tW0@lLYSP4X9a(11cy2rD)Padw#B)y5m)oVb1TIj9M05Y5=qqq?*;Y@xj?we^O0 ztsLr--(E-=XWN9~=C(gL=q6QSWXRX%rej$wak}kGWSC_6n#bUR#5x+Hm%43! z^b>5m)(~26aQZT*7n{K-xQ!iOdP?H-wM00=1|{EGKp_<%~@{ z=nz-jp{u&x>l(>Zf|`}*J!a`vaqn4`pa~}kQN2ZJY#3J zZeOM9;iV^jZ~tpUW6JEa+0X9TknZG9_pPs&Zo4-H5nk!NQs8{`H*EBG0kU?Br@gr!XXF!uZ z`4?xGgnagt1)C|=i*cQEzx=$XL-)_*E3Vzk%n00m#Ifp0ocILm(Ij@jDPOCA0f=%8SKa* zbmrN@sXlR^-afXz_`Hx;t{h|2+VUl^$8?TzF$+h-$Ta_5(rTka!b17MUZ70vo3T#( zKHW+6`g7^re!CbSi!|8Ry0>-;_x+bn(X>(XOKx;@^i8IifEsD44c}USnOYnob_`a~ zne5X(O>mn@Z_EM>`6ts?TT}H6tB(v4DB79hAoNC$7gmm_0@6gr#6HXM{1|H3PN8sXR10#P>lBk>cx-=T zOZ>_OL-c%(0B1D!8aebxI3s|J0!D=V!x35x4Q`4oz3xVI^q)sJh^GhQ!_H5*-*vUXATaIhCK6vP=b|? z)c?cTImK7jZGE~@F)FFpNyWBp+fFLB&0Vo=+qRR6ZJRr`oz8nspL5Z_{`#V?_T^f0 z?m7Q!;2C4^vC4>+D(f2=NC4&9zWqMk7EP6uS$Pt&eTmqNiu6X5RIwTz9Oyh|GT{C~?K(4NJ;3TeML5w@E>+ zI}qbgL4XUEm*RAExe@ZoT$~DKO-pfVj^?5njN1zs9qY?U`vSe)BL#q)A8qmG)Hv9o z0yM1r)QBrI5?h)O>Jy0D-#yBBJ9RvC|ae! z)Tp*`gl-g1_};dB|EzkO-+YChVIz0;7Nz|KtP9F-{DF61FbOvn;^x(&mkyzr=)mP` zg*dZm6O`;xhI3sAUh8)mt5FP1r}WHi*In;RO`BWVxw;BDRk~xr#yDF3H;TKE*NtL( z5|p?Al=f}_vh824uR4HpOF)e2oA+hgUXAL4;YsBqTX@7V3-NF=|0g;|s=9&sX&m`b zhxwnScJr}2@9P?`@xj1MB{?vsXA31(sklxs@In9&EmM614^9Xa+1Iit1zay?w`Fal z2>xU19U*#{OpTU^XT#6tHN@B{^;jz2B8c zz?pgUE#kjUS8X}`8gzF%xT3t!dBu+OWr#)0UFJy6u?deggWIb_OkT?4z56SGeDRqy zy#1Rxyu)?dSU9p(A#uHBbwy`lp7-a-MX9S}Sjlh$UwR3PIe--jQT9Iq{*R6u{aI&I zARz=S^W6nhD4W1HB=iqfORM81LL8)`h<4Xzy0HxG33)sicietB0aIyaEK`N?f8?2m z63#S3r8XQX;8z1Y#7l!>5ee)8Tb&*;=+V`)ci}7Qt0;twgb7T+Y7RS3bjp=mY!mJ$ z$S)2++DhG_^6+G{_pvr%vDy3)?Ft#$C^q+w?qh6C>ysB{$~jkWN9x|=-4F}RF@^u_ zAN}*=(f7r#J7|G*3NG{9&}r!Njg359%dS_DE9SC}`)S`LX9mw5$|D&OP_W4J+$$Ck zFAh1uy1Bg)1IwF-W{ZA)!vZzzuy8TiqT3QA3`UOW;=pnQ)c_F6ea-WbtDV9k@usJF z7ev4^kag1IDKY6HCMk9o<<QH4esXH&@${t8?}+2pLKd_WH;3sNoZx^M78f znM}&2pi{@3T_@a?s5y^Jj#Y{+pcF`}im@NJo@##mLrF(t?yUAb6yZ#%Si z^h`oZuDWQ>fe2At4HvO=+*a6G$vbBsJl3qr==;O)it!xJn#fuqH3yn^OH~tul`=hC zIl!?{rY-!>)E}*fbWYqGFQ}D8coSLI(qEffhruzEv?i|jvF4zVj=6jT9Y^?SeH{`N z5w=z*Z{FGhB)BvGD@~->@yUxOJ#MwqA4!3G$0D1I#b_$MZe73tn`QN%`nD2>t z)Yf~#+m0qG6c|3Z`>-9QIR0^M=h58IL2BX25w^LctB36IJn}IGXC{0gp|>p^X_6ts z2&sFWzs8qO>)~f=6MhZC*jtfv*>KhfWt4hUVkhY5#YZm;+2Jk;qE@YZJA+>At1AL( z9Pnf@zEdXrV2$vH(PfUU=mBTsOzw2Q3FEti-H+lYIGmK)@ww4DW5ZZrYH2~(?hJlB zCZi1tU<*Nb?@Xqh;r97Mif-JH%XWD}CIXW)P=*X2*<@F8qAFhFPRDaSK6O+bo_#2~ zFZlZXA-rQ+Q+{`@Yh0T~JYSvQNnu z{hW5`!!v+fiLAmYXEgGtLt&gC?XeQABqd7kut`h%QbMj>j9#B4lP{w0tQ|M}>B4A! z1h$CNDwpESp&6~&@8`RzelpZak{-H!(KHf~+88RNmW^zq9Rf2EMDO}RU$#c9$lriq z%1UiC6SRTT{+6+eJZbm4#IjD*!?r_-=G|hdJfIp{u6bVJFqz&?5#)ijIwI2!8K}{%%whKHkgTfQX|U4Z4}u+m$)lYr+DHyA?Y^c?KjlJlG3V&iZ2BEDA>w0W9gcE? zV5k|nznHw%@?ok%{T|I!Y{;b<<+%U$XOjTio*Ke(TaIO&mX_C-7?waV(14G(ve7Qt}>C=v_MSGTm zWcu9Y=&9+m*Z+)vV))Sm&6#C;BvY<26R~C=|9&BWgR>QdXASp!x^F~@^=+p(b-j%2 zl~-fvIx^e9#S&bv$y@}m2KzSKL$G8^vl5M^wC3PdYxS#KYAw(E2EozOId`vRcKxMP zU%NdPRQB5YPyom7bf9LXU0u70UNvpL(eZFN^9)t*OG>HW7iWg;c_-U~1HliPph^vl z?sA>QzNC-ud4U5N5hrPl(Cd${xkD_n-}*L@P9H@YRXPAD@NWCmBp?7sf~u5nwd zu&Mg0C3t&>_o9vZ1YEjg1hEgar+}yMT_SY)n7XL*$fPqAXq8|daapLFU zc-n%W0U9a0GUlk?j>wbaj>iQ6MXHP3u52!LF%d>z-zcEb(=f=XGUW4o?WrprK^*?~ z=TT3FRcE4iIOI%)K`b*zI-?wv+R|OVEQ6MGq)ZL9d=wqP6OTZapcY8@)mIRe6IW0l{zQk)H}fn}M|HhN}$^ z10Y2W6mlT2h7l816q>FO#rcFBFNsu>osWp0Xs9oqY&cX?+eNI#97@86W1$1yC0f0B zN<_$3+LZK8%>e1o*=Gj_ds|-n=Dzu5q-4qSl)G2|N6QjnJp+m2)dU)=@KRqaP+0Ub zr}Uwh?4Y^c@?QrKSkgG7(+)3N9jAD!uh^LKTd-`NOmGWW(wU<(E+?Fe(ntI%YE*D6 zqRTC8+v!``Kf9lUw>Ab_7E#HK%p+1eA+zD1VwpZJUoytaHw?`bTfruiM9t}pWGMRgE|OxHMby&F{Mpg^l&Vpc zTF$hNu+)haOd7;U<=WmfB2dfU!d>eQueV*2PeW zD?N>&xuaT77B2Tp(vn?JK=e(p>UOLI%?-7oU%s%XzX%G56>%V!ouZB6o4nErq^>SZ z;lxQ_2|Fo-)wj+QsAL!zsV9kl;3Ko{^Bm=z?)=EiO@uMDuIL$VBX4}E*q3~JwYKnP zPqmDj!KLXG-Nf)#4}((6B9PC}TePkoCFXl^EZUan z(7!J4@9Qo=>6|u2@#$9iE6`p6zN7!7=l;5eX<&IaP%>NYuo~UGTSKgay^k)u*?v!_ zTe7I)?|Eh|6-!e6^-K1f2a8MvcKvQ)hKtSsd2sP-(cEVHPqe|JWp}U9oJKVXk5mKb zK*c~w)b``5`rk*KVbYm$uis&Q1C;3Su3;ttc;SjtiB)Du~j?7 z$f%g1f|XHhhXqJWb%oOvZ9VU1B^am@38Og99rg2t+!Tmy5$SS`4dk(QO_Cbe5(TP} zq#w|Oh2*C%aR3|eUuwmH3{12FT>rtz1J~xrOVz5fl3jdBg=l?2fcLbn0i=iC3FovS6MT? z=p2upESNqI%%#OJZQZ)p(>+%`(4fsP=6E{$`Ztd;p6)7Wa$@}Sa);JqzUYg9kWzPb zBH4tWS=V0iAM6~GKfe%NRsZvLfB?aQ^*8fjY^D26DZ_87-bGk0%?EEF)LY0-?byxp z-BxzlXCUysliZ*R_&n3<^xnZ_IJUW697Gy=N3E}2L!C$!tgtK%aykB3hb(hQW9;tE zMR@TsGQR0;hGjCa66nkTI$jp}`xVH6QmcaKWytfM=3?p;xK?4Q578!8ar%OZ+^AQ7 zaztu@qPPRN1TXszBmSG7_wQ8_GBkdDdSf=apak=;ohu;zx}NIidxA$D-4eoEXw#Si zUgS+b^T`ieXBYB6jGb(DCd&FaaJ}wNL*i-oSc0io?Z-f3g4d6poI%jmEqjhvpE5ax zz2A7ep?M)NM%g-WW;ZlDL`#nLmLlW2xCiZ%I>+=wkpQv;F-$$NIIQ5fP35F8gTxhD zH1BMH4`8IYdt+yvZ1HIG1<^b=pq(N+DfT0o(t&wUSMPc+n#+#qYUeI(=gw*d`o6UD z&Q+beE#3A!x8MY%eXyz6(iQt_ zirycsI)585I$2xe=iPw{?rpJ=sUdnY9D129Y9?#YvW}ZkTh!*R!C z9LfIETdyr6%D(Vs{l76icNVH>r`$KpR9`MRrhr`1^f1}}t{|4j$WY-wUecHO_GMJp z+z;_KoHpZxgzj=Xx}iQ+y+;oIO&Jc6Kg*DkZype5{+g;tUHTD+o}en5RW z9M+Xs6T4-1IL@MLo*yD2`L|D*3m4ZB^!HA3n&R&5Hc=ezhWj&E&nK8Stu1<}W7Lm!+G21(gbWPAWA zkd-%~8o{XkzM8UY7foie`9+*3~P3}wiMFZlV#C?wLKhSY+DeZfS$8jJdEA%0a?Hbwlm zqBp&_h4)n4T=euJE&QK?rv9EyI8am!chygiCMecr zl=n!xo?SM4tvdT&RXgX3rBq-JSZ!rK*awn(aFn{W?07bzV)P9h(-=l> zk-8;-O*^=5SUq^i09Csw8DtB3f27h^L0b-4AHFQ#B*+!yOZNE~%at(}A&x+4L0ZBk zNY>Q$LB)mkyUDe^>S@ZO<+!X(k?)m&NteFfOLlRo`dzAK6hDr`aEptG8CTJmF6tSl zcv&OKzz#-CK*&Cn!p$L(K7iM!uGQh>U!H_sH*s8X~NHv`s{Agmfx=+Xc;l1wp81 zJIiQB(&+}Je#$ej`XrY$$KKYVj>H++aNaO$&Vzz|oXpe3;$@FvR%Ktt%vJU)^gde> zu;7Ae?8z?Kh_dFu2}T!GWU`8~TW6Zz(_|p*EN+WQG`vG*fVA$9~{7DQKsH$!oRWr50WA zl$X8^nkYR!!L37B>(cX|7!Hpp!@j)^mx$DtKB)_vV#_wL*^rCJ4=0?e-v_9fF?$-0 zFT?&?QSf}yj6!gq7|*2$Kb~SFz0OlSu9P&6PyZ--*XuhYKKE8`b50ZG>O%jWlWU$s{c;3eq zME4D5ppP_ui5PbLOZ^I~H5k#W>hCT(b@t?(gkwO!+QN`7NSqF2l8t+B4Z}GF3-8aw zNZ7(KN$y@eu~%ZOhCFi6#mb?>atUTy&JcPbuCG=^P~KI!9r+@?P^24D578C_F;wSY zNj8Kqa=vj|G70XEp@_$kAEOfMaKseA=u`vSUoTAKrhKCYLL5hYf0`deYnT! z|I>wAszMG9YJZ1IyEQGZ|3?ghs|kE!NDkOMm9B6h=m`Oj#UEW%SgZK`;Qn~p?B~`+ zOK(CLs(csE;8ru$Tw4(|WxCTSX86N32`yxTp}fi*ag@%!1HU_jYB)eT&@(e_nU;5| z(aa!X0Xn9}>*f1eKTbb_E#y;LJ)!bJxd6gDDHV?BY|WM>UbIFM_`sZr(X zCL7%Abz=&r?IDajk|OPjO+1q*CKkHID|YDOz9cOJMEf;`Wn-0qkZq^?62P+_ifdj&U}}Cu4KL%8rcdp9Ur$ zi-(OzhmJho(|$gn`%Zb6-a8jdC&yx3pGWxI#)`6o#DOQiRPSehT)KeOm1{xUsH^tL zKnpu(O}4eqz^lc@6ZT*5;VdLnTEb5&TtsX#h1Q&i!;AsdC~e z+&^^br^~w8(DB5W26rW|W7p+W(gt#=xxVOo6%7Wb8&NTt>fh6S0@?)2ULHlakLu{7O}9y6OZc;#*2ita*$1R7$N#O_|kIlQ2$kN=p_C9pwmQ z0)KDzptEUPcCn$x{nK>s<&Q^*GEVG#&xR0DR?C48fig#q*=`&GEA?$C5fn zY89FScKbQnX_W)7&CJewfd1pXoIOkY)!IDbqpLy*MP&G&vNN-&3_P(BWj-&4oW)^d z=@_*7G60p$8`B>F(6-sX8*iQUlmm6dIGQen7cJm#E~fPP)=>^m0yG};jHEPek|CIt zYJ9&9%kNs+K$p9H*i_Hqkj$7q_+17#>mS7rT zcp^x|Qmo7`F*ON?oF$C2WMgB}2tposIeLY3jw%oWn`HmD^isU80!yV)ArZ#iI!IPk ztpi*gHLrXb=?~gI!{-ddl)>~SOJyy6b;w2SD{$itWN`?Y$!A@ zQ}pErwe%Jdi$vMWO?r0<>m$yB*qeB1gD|mqcAfjIqHcqB|DpY4>Tg+Tvk5jj$U0Lk zfTW4`l!o7GIRvWEq2rkc^N$V#TAlesq710=;o%zzb))f)Qq>zo&uBiOLlySHVU!oT z&kTma_GCy(<%9}d@O&9DcHG6-l+^Fe+`JwMLnP@+6rs^seDFp_9DX6o?wQg&k_uM? z-5Ksh`I@mQO^EMTywQ>2+ZO2?vK-(CBx&?UJEl&hGqG{;$&R^w(xcdzhA3!)W^W2w zD&MOaHH&aLWz6bVjh5NU?DIy^<4b0{P~*!NO)l_-N}mH!2ea>r>f2~6;iE{rE*Z4> z(uZI9!wZ0SviboEZ3(#vBaE_H&_$m40g*BqE!@Cy!1i)?&z;$ejqZ-w1rAS7`YaDc z<#z`oNgWnr%7k5D(jND$YN7g>I$<|YPwd!Y0=CYtAC@Y{5iv@^9F^olt|ZFB+1-Cx zP6wr}fWC=Xe}6J5IiESXBPA7%Z9BaZ7rz^Yt&pNFnElaO!++B2q+qsK%c!RLG*KYZ zA|{TIjtFr>O9~RFra_?9B`AAvb-Jw!$8=>eNx|%*lKYw&0G;)DO(*B1VkeV>fr^)P z(Xr~3lCHpVJr5ckKf4LNhK}Hd0-ZI|H0N~J9g*WX7~6fRP@w5fT`=!Ntgq9-uIei5 z$jd)SGZ(H^m}uk_Gpdn#+^6Vrr1r2gOM*}7@)>tBIVk?8cN{0bZFKnoIL)X;L78@f-6X29*%BMEKvGrB=6Mj7rwUp#^Faeg+1% zC~UDfe00Ot=ghvhcPls&J~$$2Y8qDE%+!WlCPTd}X>0n#T#~gJz%;)HGex8d zj3NCWqqU4pnn8Mp4Gs_YbD$#dz{>z4rmbpM9(AbRh*=3CldwT$qhTvqFuL{?p9%|s zD)UHyB`(>tx!z#}N%g{8;+pjx^WhaIEvZTJ85Hs>BR(t!RjedPY;N^s4!VvEm?zE( zv&wd&#LhFqT(dho88ju*{4`=CHEFWa}N1$xcy`Vc$94SeoT7P5U0>)sN>wBRw^5Xn#q%0>BM@ z8cgOFx>_%+zQ>%c(HHbNwMAwKfLQ3=p%aAm)e=k zaCJAh(GxPPmb}biP1tVDd-yoYfTpJscyUMjN%V=`85Km;uEf1d4O?U-JaeqZD`Vby zD^2*SbGRjzlyFP^NB@bbO4n1eAQjyNsf4?O{N95W`{75f6{zLrrdLF7J)E`Zy{T^e z=AFZP=&?@ut^r1mV|LEtB-pw2;vME>t|XHN&ouHv{dvSuH}va_C5F~qh?uDyqCx}X zK;_@h3zCaYmfm&h#hc11{?8sf36GQsvFEEGk=Er6AFSN41(ITu*YQ1ohLb@-U; zf=J|tzqwPX!x1*5;fAWdm^NC4N>{Q`PF5auk>jd>S_K@GuDlVyTqpd)-neRPgMxV#39ZNM-IY<#g+|oFl>Ut!VmBD#7^NEO3&npa-gIc z;IC$9QJt74T^ZMv+_j0`E%(9#$Ba?-BH|}`PFn5Z$ud|o$7_r@?_Ka{C$nyeEqxF7 zWJj!v*h1M_9n2JBwFO4ZcEtr_JTA7}qRsZscB~1ZP|;qW18S0cd9L_D%4MSoc=6;d zZcy21OhhLOGB$xN3WWv6b$YqxIGxALJEokG*I)1?@U6yyr%`T#DpQ~8q&bqQH_oJHwlKF$u>T@Ji zjFc+wf5=$Xe=^$pY^Ua@ydisN16O5pe)^u1Gc#i^-cG&`w4ddN6A5cBZf*d{^vkH4 zyucyPz7(91vD4z`#|}sz#umhZV|!GymT`o`Srzm74>8$$YyMcbF(qLgt|{Q3zBeu5 zz8JSUv$84R3tu0ScTYB`I2!ZE7VSaQM_86|*Iwvi>}cjeou!QsbwR;N$=}vnNtYJ{ zQ@7JRI6z^4p$na=b#};OJFB63YUr9;KJLqpMBEER3!p@~txH`^yiymg6mpLDNT5u7 z%F-Sq!aL!NI(n{`zm3&0SDNM}+f<1Zv3 z6^Z7hyHomHhY^;=;t&Z{<=pE)z6he?l`q6{d7b|_F>Wer< zNN?{{{PDE4Jl_mv+`TeFo6it&s+qMjl~+r9Pr;*hakwU`%CX8yvW7;zfj=^@fLwa% z65fo3PTBk(v@!O&7dwck993;94dw2Ts#Gjkf?og%37%AVA`e_zQFg!^T_XoQ3sWlu zPV+4@4Re`vk~Du%e_rGHi>mhIdehg9{)8YeH9jCOjYK=)0E1xd9WVE=%_j@&s*O+gBCF(doqFGaHyO(aNGEYh%tvRmw z1R$rx1BIptx@2m;|;uJDtGOjzH~n!uH^ z`7Ek2q4q=VFF{~ZDYY=r1MO{kA=8*QmwX&n@nMek&Fx5Qbx?GHQgH?`%^_m778f{d z??lCxhOVHRP^Jc1;Z~kEyzbTrTfV zwxsyZwQW`|454I7IGxVmYYCB?(hiLGTez$r=OQcabWlQ(u__?kY0Y8Yx)Sc%I33P( zVGM?tI+uX;+c|?1<+Q;)wwl}m;}b+)3r|WHjHxvEOP2!eBqaI7dhNPW6g*l!1-+n)#<+hD-H_lW%+LOo7MpfP|OOA0+IlFpAF{EPF|!mDx$ z2{YAEmMoIg6Hc0D=-E`NOJkrQYwpS}^|t2&G#FvukNFukarGPQ>394yR|?(0-huLc zKjCNJH;>!IxMwz|qrD%bpE=XSd`ls;jEo!4K*@l)*}@tp^9vsy;uR<3*K`z%QtHVo zB3sNXD+n}Zt?O^yEw~nC<1LbZBlhc=&02l=H-g|s(ydbxa@P=oj~;}OhImVeerYHe zUt!6@>Bd_nVKu9cuWy7!-@MW{lD)HK6#ytl62P$-^c{&v3y4mySawol;hYO&&pJ66 zQ}aa=&f9&%@hh%N`fDfYRdKMA?#$Cp#ybukKQxo^dS;f0cHx_KJc62}lzxsb`!f#U zn6K8eL@8?SOM@3O-4EI1`6yn7!Ejl?=?ui?mdEif<#6zI+hqMtF1n`FWsd3=n;;9v zcle4$o!qwE>b>nzc9?E%OMFiqWxN*bb#IScUt@4IZ8v%ei9^blyJao(4!zT^d4<VyeCw|hq8SfH>-Wy4tyDwcwzMf0I7Lu7~ zAGM*cA&Xb{Yf!+>+_m$Wz39{_{?=^5hJAE4)Q=l#V+6>l9gx`JVOA4r5Gy|vy2^dt@RAYSa?R`T^mnppGMvcusD^%qIb1apyabohsI-8Y1&dsyK^4JZgMXg)ldDv1ZVDV>st+XRf?Fo0J9&EwkKjjoDF7VVIrz&NCt^>aDleh9WS zSdqi}z1CJ1I{S1E!GOJlrNUI7o=NaW-hTf1o?|{lKNFE1I6EkU05C&;2Ci>CP$DNoP z46mTk=Y2zycpEMzR+>Vt&-J>k>8j^jb(01oBbLF83lWHhLevEVq{+j6Jd#U_Sws!h zoNsn-`^{;ZE{f#i1~!q7M>$z-uw&ZF_XSqwP20{L2PI@0}zXhH=;%bK+rCJc%Y=36@QNh35@q zd{3JkPkv1$>BgMxJbe%xRTZ;!co}_fN?stg!5&MBG+tqh0d`#FWpXQ8?Hg}9>d{)Q z2fI!kq%3O^uP^0elDZ^)1ABdPv^mdq^Pa(A@+{Ym*NU+h4)~(wzk;Fv+6pKzKQQ8t zWlelmU;Z4xa9xI_7Ag4DGYx{n^kf&5QI zoYB*@=ukLlAw_bevv4S%ZaQxIfF)NvWJHX(JqMUN2*jPQm0 zjYOTiul4s=%$hQ+*j^lJXNOYz%3EAaAWMQq`ep0i`>u3U( zBPUQ~JO5ml|Gm`jzQ3|#lFy&MdlRx1t&egU6+n2H1x@tS-Bmw4r(#7yevQJQw8{7c zohm!GqgBj$o4&kDd)p3?^-JJ|AD$?sMUG!CO~7BA8^H_-;Z=^a4xFcxLQAyF_W+tm#|%05;}!)ws2}LvRVx_-O-7d+XSds<7|F0p*J{ z!cMuf5Cz%lKe*4oUvrvmc5h>WsL!hF7ne8L^R}V}SIbPnnDm7nU++ry(yuYbDm)f@ z3SDric0zZqUis))#%X#Wj)mO}V(idEm?9~g8{cLn?wDk4+5^nvN!`+pXFwHH z#|k|sDq&siqsP5e-O)CfCh^-sOx>jO7dpFoW0@qo#!s!)|MTXkho)iaw(r{cpUmxc)T0QHqcd73Zf`ld;R2oT!#iR+oG8D$K#U-qr)?q`Q zKJ_74DTP#1+M#w>?DT!l#i7PygM?$Hfo&q!N6{m2n9XV3mymh(4@Da(&t9K}=m)jd zyk`fbgK(L>Me)(lO!kE)OC!`TRXi6#BSx6^*|_n+OwWfz>i{alZ9oQiY7gF7&@OE1Fs7Z{NcUWMkgh8;!t-4$pV^aRcWb9OH*EE<6?r}PQ@b24MqbS>g}qQDu(VZ$ zk5Cd+DM}~%okzK5~rnSv-gs%QhUcYnGtnv#xWUt`&k}&4ap(u z;9^15#l0M&Cu>_2>&ULA_E4k|%*(FxoT&?rQkt9!>oE=fguKgPr8uW?8Ln_ zovvD+huOSG`nuuI&fbN~vx;%EG%}*h93p|djW6sg5Zuz*0)H}JH||=(&`2|s?U*g_ z9~J<845F^Bfo7HLlfGFEoimDRqB0xH01hm3qyORWR0_ZdHSvt7rPGpI=hgMMXzG}k z8U>Kn>G~`-qg2#Go>QsO+7ynGBbJTKHvYdT@-E0%{nmdqM6@xf+!xji;J^Y074 zkhO7nDDDNnDY+%6{~!3`L3S60a;8AN zq`N{{1xxAijFM?{lnqI~-Fb~{oE8z`n(|lh3dz)|dC2OKvF=zsyNL1*EY)849$;r>LRFSb z8_*aE(Kv`gHkBm3`kCQY%sX9;Z^tTND_QFg7OxNTvslFr0;N;Qb5hKm5ea}cg zK+`ZnXJiVa^TyQVp~Szvk^u`;Ds292XsUVl#~`PkEcKa$44b92bcKESrhj1Zn?bdC z9;evH7`q&{Bpi>pIE+JX=j5)*KrIsszoA*E&w-)qnSt_z$>jKO&)d*;+@isx z7G;Z}f-U?jhABewf1lzQCGpk<=)35C*mBso!|cA`dS_6K>1$R#Yl>KtuO866A3aHh z`!i8r^_?t;(%`!>2bIH?p0>#oQ0I{SyU%-QewOeYfDnk9l2}21({$*y@Oo)%iO&oBv;R6sB?ox zvfXq))N$Gcba=T1XrR(EbqKJr@-Fpz>XL4cD{J1jOB7e09_t8)yKs(N5sy126&MW6 z?l&p^G4fNIfl;Q;#^-_#X_=Yy7rjxxUdiY)xh^WkW%? zOigCh9H+HQ@%2}`RtEh!y1+qW);$0n{f=-A_{3jp3#@CH`1avL14uP7R)tBb z77iqg%6`SLcA&VOIY>6-zC$3n>!acB?w%aZl4`AlAe>|z^Trdn_`K=K<==Gf--?g?Q5_Yt}NP_$mIYC5ZByE9#*E6nsMV|oIj?3 z0UD12P5~iU9|`LzcKUC0LrJEb?T~f#)axbPntIBn^#!q15sx!BGy)U>_phFn9A*8I zQEUkUbDl}-&VJy=(w^??D`WCUE85|6I9IiU{%ay!oGt*WTiUNd(5g6@W5!HTkABFf z6z$fvX;uehb~9LM&`@~frIEo=Yq-M@h@nBmRrV)1qw^*p1j@1(WH0PT$+JF+GM zjV^G6vKx4@a@wIBjEq-BBQ1qBakH;ot$(L;(39fg-rD+`Yz6&8b}0jfKh`MYI6A}B^_2FAG$onsSK<>Nw0W9c*#s3x~(CEH4{PJ zbHYY6DZF}hnuoi6HGWx_>jheA6k%mJE9Kj_5jf9c>J;+Cm<13fem0(#fIvgpvk4V( zc7@tYcFy<3ly_4>d&{(#+eR?KI+wpIgD9v)DCg7Bs!Fnm3bhd6R*`@F8ZNeiMj~Y$ zIXc)l79cfR_K7wgr1Q-a02Q+rN=Kb}NZ6gA&$XG(9n9#x-ax$d_=MgFEBCyUz|mAJ zZ!RT|V~|Nm0mQj)ykJOEIsb`|6Ag3JP((y6F&9`a?Hp+BALNO_{3mZ&i z9c}kK;Tngp1%RQF?pL#5Ol@5%)}7rNeUzS~JK+Wxovo{po-=LbTyw$Y!%LE_7I7t%2klADdAeeio!%|FEG&g%PJaKRl2 zr#xr-n(QF+Yn#6QG;H#=o{*hm$dLupx)-Kwdi}%mA&e7}K3z>fBuXOV3`=xZU6IA% z1WN_V38VQS*21OJ&(4A6arQ~qYWyw%V;on>yNP_);S<}U_@4ztam3!>yuENbJ^}_t zDG0?8gLxNLA6c9kh@GQ#S3j1G=%g9OAOqOv2NXc!gt&axZUPAM*A%U^zm-+Ak|*iy zB1RcG&VNm#E+E2foK_o#z4Se~wOcj~7@pL03@B(R_8Nl9@M6;223~3K*jUjb)H|=4Af@ ze;+=y-H<^PSS7-&IP73Ld)j}?e`PKQ*SOBiV&eG>M34}_qrQR8{LW;&oyx(k$Y#zW z`OE+Ue34Ahu6QfhaRrTPKC*V|EoMhM;aJk|@&Mfz4U~s>D+ZG97*n)-ge>cS3qeRE z8abH|HHd>bnJZ_r7dEG^$Ng#>nJ!p81pY{)RvCV6yZDP#dfTp_7@iMghqc+1ybklE2(!MFiK5q(mWHyFMLr2a?_3T;Q94a+}t+DpE$Lld$)Q1&2<&Iqu95G6Qr%tH9yy-&U18rpr%LbnBwsds+ zw=&PmhoIKJZ_Qmkrp~v&b7dVGtkjBd8(3Dqz;!GoT62ji%y4@96q!eo^r#>*if3oX zR!~jXN)gDuucz}>L6YHwM(vPbC&C!JVeE`xr9{v(esAPF8IqGEN>(mW#+t9ywIeBn zAvu{UDjH--^*jgn@$q38!v1tUJI^FKj#4PsCU}alo}8viOabiEPG#}!T;#?kymvt2 zgL75y-b`e|(cZ=AX(bix<^bF=Y4UnQH7~4d)_7BT(m%yc{ugC$!Bt1owd*E8Ab45gk|kfrwP16`pdtfh#w$%A4&zvTYrE z78Q>Xm0!*MoNZ>zSFbb+DX!xgUD0yT#2mpbH+T-!9?t zbfa&F7YHjXK*5tIBmL96)nVpD)v4}iWw#M3nd5>>DdSfjIm@nD0T6t9? zAxEE)#pB&05WA2bNKp)8@~bM0bTixflgsMxy%a_f@cgHpA|o3R%W04NHe(i%!BWWI zNDPhIkzyi~kz3^wb~|w+MNBf1?H?%Q^FBCqMT>Q`5XtTHkZtOY)?paYjO7XQ>>uVf zYtigqZegZ*j(aehiiGMILPZ)*Fc7&$;xi{7Jumfz%H&u|Lkj0$pV@LAZzWf|ZUU{7 z@eo~GT$Y2w;A5L8vimPfK6iLE`~I*DgxB;1U%{x7pZ-29$nlhWZN*URT?XIE-b~x> z>6&`@Z8p7RiDX1V&*(D+{x@DtVrtodot85pTe~{BjNlnupcXs^`bqtx+iyA6Y_bI) zU=>+Knh%pnY7}j2KAkXZz*ckPNZ1O7Q=QcWErcc|=d?6F6obfq^h|P-uFyeF^xCgR zXBNU(k_D45WG1fm(S0>TaJ0NgbsX{i{5fsc%FZRE^q1}S`~54V_xFDfy3_cKfCNwA zGDaj(mYju7_KIB(bK7#n+xF@|Gyh;>3|okaT#9J6+R-gQqI!NFzxjaPun+O%GySZ! zZcoP?Y_dk$0{*EIJ|cT*fUuIf#iTZ4M{w8fITcLs$QMqz;i!=~p@CcPi0nSLD4`Ve z+v@d3rw$WnM&&<7pM=gO+t$MJJ6obBa$xSLdG+3D=+)omlaX1?wQ0`-PnHqvorTK%c+HiQsE@ z^jYMy%-b=`!&r8vc8~AT7fp?&qw%a9Me8It(JW6F+KOtT^VwU5thnGyp1$#xEnDW~ z!Nm2c&+yNsy~pxXavFbK%DIc2XkcucaL485!ctfRYJ2@@qj){GX3e0IxB(*guWgK! zb#$W4yD^QOk!7*|hTJQdui{Z05z{clvmDvRA)JVBj2#g5)I&}OYsid8w=4zQs0eIs zV9p_IUC}vpbOH_!x>3!3VrU<@dhTv}QPGmQ`X|ff#Ljd)w9yWhvT;HMjwTW`K$*jR+WUaRFs@_!$q?`6jGq#}8z_e4Zj*1~+ zK@Ucf=FAST;;UJJ?AbPyz}Wuy){O$jTmV9Sjfn7}&39mh-c~K98l3lJ^m$UuQK~eW zUECnJ={1~6;bA%^BZvJJ7hk}g!dSD1*5dR>Q89-`{N-MHG)X6kofg;N`ODG=5yp5L z(ZGz%pla_}DrEr5G^{gx=}?x$L~s@D=!4dW;Y8fBD!8uXP&DVF8EZ;)x&+gy+9$<3 z*Y#HU_PY9)4v4=)b&IX!i{N$4z%%g8IPR?XvfzWE#N9{p{9UYPRA2dj7XD?t-eKp9 z=X}NXR(vSjW!Ooqpeiu$3!^}$IY&k2v)U1=#8Vs=O`89;FyW)~l{7z6o{Y2Wg-l}; z#50iqO`7)$y!VThaR|v2l>Gd-rAO)?bt$zdy@6M(1l=#J%xdpZF4c4Z+)jg{1IEAt z=FbIbZ{8cm9X|P<;y2Gsyd-D#i-H2m)!eJMqy=ha-2G5Z9=v-np65h*iXvJ=GNqD? zFZPlu%iq;b%p-#WN~i&Hl*=)bUOm&vY&G$8z$^IvAgpBk;QY5LMwkR}e^#Sk(Cf5; z;+J)GM*=du%+K-6hMVoe!-=coD_ywdZsY1Dv18wPH=pn3gxTaGdRG50O4CNm6?(AahSG5`qZ*vXy$&&XxlrTGCL`$ z3OpBUrie#qNzbQvl3Z~{9sB5JDi;34=8E#h$f(xg$|}zP(rXwTlD}o2(QkK=pY``L z))O25fyd}->#^+KElJLOlhwM<)CqRd zZLTkluHfHLMG6I;Az3k90G@yR@#CUol9h}AyaEDCs;o?7L5~ZMO){KIggPA<9PpXz zdu)LIjxZ$wqBW^BX${gC#rnHE$~?}U2?o~bI=;Gm9BSb=>f1q^1{_xXG{X4zhyIx~ zBUD;sH=!`8M$V)c)Ue6heN>Ll3*&{uhmJaI(pxskR{+wa#R#;#*mg6dh7t|a{K#X` zLpoN(`{zb8m(F(M&`3z=_zvHJZ2w!^kT=>Q{a zO%^|O;JsS0)__=6dGjsDrZr>k!7`G-MO`>KV_(AbNz+|a0-V5x{Iypp#sjII6*8nm z@^pDJQFs=pw6Ld@i)*T1qOgUi#Q(jK=r=hHZ1RTmjDM4CQNj>Uod;vFB7!oHCkKzh zlZ}B>KkDP236rBNv5DRysGSH<1i}F|mlfU`h3QKQwB77QXdz<2L(5&L-IwE88 zba(%sDnHoIWj(&(-v$=SHD=wWhT+1M0}BzP0H!m&!CuPbRk)3Au>_;^$Wgz4MhO%7 zB&eawYAXRVUqK#SIyoTgFI(GPYvz=`p}PM+6ZU`1!B^R*fqP~=9&$C5v5i;%y1c>H zAfl{345b118T~j(MicNl+ZY!6n#i#JA4cfkfgy3$UdIi#u%Dw7&W;ljFw?hl0=j*u z3qvQF$+3Z@P%{7aztTVdq(v|9qL*Aiu`Iv_>G4Gl_Rt{%t!SV}F*YQOQMY)t>7M*+ z?SH1AY7g?*>LRM_z_IT~e0d;cEnk!zB`SabiWS}lW2aRNgFqTaXkV32wm=;!B*851 zpLa1w0ExK@<~cQP>?tkkVVG^rvKws~MaW}(8f*!9-LNgQW^^t62}=;X{B)m$DwuKD zGV8v>u1I;&5Z zqtkB|8CZzpF7L4WwHv68>id(hpX?tHBQN2=#pY)Bc`j+9h6ZK#Bcn9vzA)HnX2JC6 z)6ww^>xrHzhgmuQa8kIS))pv;VH!wz!}jYkF`k`K8KB@)m)aS8hHw43h_ivLA?!eG z2^^8rbXem$%w+VfPm8`siLXoS;U8Xf8`mU@lTZ(TOwyKb=~cktxrx4D zuWLOA(VFOF>7WG$Qt+fMdX4EEJQ$9hl;U!}@bX}5HVi|ovJVA+yyjjr>{;zHLHlQW z4@OHtHKziOwXa7&XVK8^p7GQ&yvD@Iw*jL;sUAz7ufZ8BB|OI#&qSRagZF19Zsmnw z@8pZOcGcL>xY>BPaW$YMOTix{$4YnPw1jR~g9N(QJZeOApE!8j%UQ6O# zARJ6ph{55u#*jRKGmLBd5{S?nT0a0)MLK;Tv<$`$vn4lN11Zc+sG6lV7n;)n#s%Dh zPd#ql1g!qeD|A=JP4`(7=8q&2-HS``C&fKF<=-x%5y3ufkRa+{1J) z4nq!L2KzSqz3mT_HRuJx2VykI%ORfqUx2X074^j5~{P6U=6|TFY~2|h6znC&)Zl(vfd=dV4bB8dy0K&i&SJITXlpr z_0{JH7T5H7Df$zJS6|E_@${T0&}Lh&z$=uel4);@Elh?HRB8^Vsnm^z(n{AMj`;Yk zNZFmZ%9oP~$7DM>dP3A{A1wy#DtO@qYleh^OTv#hjUcrojtm@?9TqG=J#UGV+ngA; ztZ>+>TCkm$)Nkbbx;8T*AVY7ve{{i1_^47>v4~2I@QwCX&>sC~Yu)|+sUBv6yV@>k z^uo;9Z8in-X`0drFan(`~sz?rEcjf5*!1?;DmP^E#Q!SF` zlS~VXjqpcVqUy{vzSzCV#pFI*>r4PGMx#Q8!>E&m#7*?L=3_qH{1ted&W%ea`e=Ak z?>&@=9gEladZxxyem&zlMZf&?cd&_a6OF%!G+YgS*AdyPOY9NjUdxQRy=VoA6Tg-c zcMFjCZhRlQ>L2fhyq`e>K0}II9PeO6yd#-G;A0M~F2y4;#OI3hg*DJoIs|%SKNUDU zG7J>#EO<24Mi?414^zhCNd#cC1iMFpIB1Y%Jq89}%YI+fn#hZ^IHd^{kIYaOihJV^ zdY>FldD2O)_qFF%Bt7Bj^N+B;f{F=M_Lcj3^H^|r18aHaT`@hqshoUTBredGNTj|9 zcqY$wr+aNw?)SEEsIF;CKPb~48u&!RpQRytIq^E*6nHbq5m44DG}q&h@1@Hb*U=sMj?C(WoINQiBRiY^Q)BaynpE$oW4N#BJ2qEz5=c2UgGt0+PL zhkTi2!+LzD6>IwJ?J?H_Rg~CCh)9>#Lnk6_S!*-_N|qOJ2MqPf6e#90vy$6{(wQMU z7hG-`{(1f?5T`~sNSH^2H$z*Ic7(2Q-L$#WcN3Io#-^(z92x_767~WK**e__p7o}j@N zpY%i4u5@{4D$-g_<`k}oz>gclxn|J|LS+_;+N}lc+0o2zu+%2Q&X_`p+mRrLbESSu ziPTkQF#opgcn+v$Szpz>v?h-Odq314Dj#r;cUfc;YN#MT+<#dBCK_Cc$&!soR8P;)4}SRss4a|k zcC$N@p4P-}Wq@8xE*;5>;@;8`6W7caQOdZ_(v$4zRo$tqx!vTNs0Mc8)PVu;&gyM1 z9OE%_;jcKbX;kp~Q&n8At&5>AX6LeGap_uk)pd2f(?Dw4g|_h3Nuo`dQb@KuK~{^= z^4gtcSBMiE2(ll6qbG0Rc4I4Hyw*WrpQ-MbQ1AyH8}xWrSL}>(GpPtm_{Xyh>|Mmy z6;9s!y+T_eFxd@5qsHYp$LgNZMa)pk`eYvgsl$i}!(xWt8r9d7jt?@Rg!JxD51StY zCJkBUb{I9PmLHRFBWt;4 zirGKLS&cEVQg>u;p}5}Fdqyi*0ERXZ)xorGB@f19XR7;IDQsz)r_y(xYE@N({2e7) zYM%RAiYpO``&BbxPGjO*KZCx5efRjgL4qwmIt1h3V38KmpucoxT1?{F;sa@|bR9j{ z4M!c+rkLvLcbFs)DV1gJ?Li|cSvB-|ez}pkrc<7a2dAsG=hF%Ssp?y&hF)70*j);h z;mYwIY>px*j>mp;P_VF1up0ZcTx0Z#c|E4bR%hF-RLKO7-zd+icGB|p$&j5u8=vu- zG1ch$hT|ZC8#0^yQDWg4HeMp)!*m){vs>NxcX2VSEWh8Dm`0oXPo@Y`;#0i zJ^gSfup%L|1j7j)ncV1RV~MsgMH*RdEfI8mHF{R&VZlz)!!D|W$q&`TE{-=|MO!ox^dIEWbDK~}Xa)yGRB4K9v3!>gj1vSN1>&;?Aa_Jk(n zDkLsrSZSdppe~oj&8c2A6KaRD^iKfJ0OuhW$8X-7gzKt9_$Dd-4btKMMG1F>If0-Q zmAJK?d+9np+xLY}9X&#x-hAEG+zhdUe)m^?Gi~$m$6&7yzV|%~nqBh7s<}K>BzOY3 z@TZHIr(6T!jlGaUjjFo7nVHZ%{)ce=mf&*8Yl4r*E``fLd0SlWjWwB+gNn&~{b5!8 zY(2<|LVM%)XsST0nOrfUv@$tn@^Vb)10C30W0D*ZES^j6@yx7CvcgiJvyX_j!iJ8> zhPx49(=fa^8mU0-`buk}vngd#Hap;tMs2aVMR0yT87o=8Ejd^5Gfpqd#QH3aZe)J* zw+43cDX$p^S`Czbg2aDTwZ(w^@MI7kCF$!?6|`*%BsXNZGJXPur)-#vzRR2p_iOcIGg{kvn6g}>a@7Cw3{ zhH22*)Ev-D;58D39cjg$F2B})%QQYG)j6JcNv{D^DzuRJ><+~uHlYsoByO92cXNv` z4{f+A-UD?+PJ5HFA;vw0#_;Y^RsJRdBE#G0JjIsElhycSThCoOo`K#Oa7LpEYK7wM zAQ~sqNSeA`OL|Q=QFn{gP!a)!ARLHqbtwr)uqY{&*9I1?s>i%zB+5?KgH4xRWgOKR z;?9-q9d*2GIF15#&fsS|q>}#|RNy?)>V??->pJA!do0)!%P^r96N2`8$SVKK5JX`P zS`p>mfHWBKxx+01@3Cde$kRn_)|HP)QaJLv1hJVt!v`KO+*zcpiK|%+<+{C|8@;*{ zw&+%>hMmJ7b0gsm#*2PE$Rxf2?)UOi{NsiXpKQe7I^eKZ72AnR@J>jhX}%5Apf}-)x%s1( zHc<|VEus80)B?dkJ2Oe-fYmw-;1d5D09=}lGcfBO;;V>BhE&e zq1_3^UFU4{)3R-GGbJGFvpi%bk8{Ri)mSb(E=AGYjO04i-)4a&OqQ>vuwVeafDQ0V z>rkr!&O#{i+>O%1kl_i`S*bgI@hD!~;R7JSMCLBPD#mg0mn{{LHyt$# z)PYd3fKLv0`XwcKEq_yU8F>0fdEmi1qhb}r)134(>vM7xlD6iuIznmUx}?jAWGnj< zh`WPXSp5YaEhQ29f#nz~^@>Y7Y8?Y-Cu)-rcl_>&*6iK{p`fhgue1uvd!q|^dkfp{ zn7fX5u}Ns2cXu*)WpVvG6@>-6KCWx%n7~c~ttiN8qwmegX~(iS(N$weJeR$5x=Nlv z_`GQ7c)zO*pE=;+&6%+*k1<*ugVsq3ul20=#AEXEz8gQn5N~`AgL}~IHJHtpipF5i zo8MGhdv~&YTaweoKY@J|+N~)v*ot$DwH7Y1Fn;Ffo2=68Zv~7(S&sX$;8~qLpOHhR zW4iE<_Oa{`wW+p_F_L-|m_PMVd?QW&VAFm!sfMpjtU#80-TacY#Y3UV7nfaU+H`08 ze6*w8*h33eOL@xMT4!~6(liNfQU%Rc=lx@35u*Q61hKMIeP) zJz5cwzSTikeY%5iXtdc>V?eAgWm*`eS|_WqHEgm=87_&1p$OV#RIN(J4VE*PT{~&kP z{FyptlB7~9y=%^Ez?p_SDofQh&uH%llIH#sD>4}nDBG)dNMH`>V4w_>@?u*fZTor8}5j*N}!3C*$u-SnrcyFt!$lI{tiZ# zgkjd}j!G$eK4U;6Pt27Se@KY0o#!;+i>ENEG!!h%H-sEDT+M@3XFI*p%jYeE*`j2WzU726p3@)O|N< ztta0ffi=)@&|^;|YW?0k2IcjPOFc#&Z|(RjfHOak0YObIn) z<=+ps_Ia2yAAPqt=ch**wL=uqlI+=kWp1qQUWQb|6|EzD?5rH7960grozym1su}Xt{gS<`BSAEOxoMfAX+7I(SEZV|8=0>;xnxQ54 z)p;tQ8?7<0u*-IS!V1+d*R5K&Ht77#Pl?HjW30Xe4lMyDsSUx;VL%Euhd|pWBSK&S zKqtP?ws&bg6V`_K570^=nifhro6Qvi&b;$v)zu>i58nP-q)e2UKg-XD5W5Uz$wu*K zMT7!oM=r|T2!5f%hxhHdVj@@qvf%30HVX;Wy`;V$J0ywbwc*ZLx{7HObdz2Br;E}Q z(M5ymRVj4*wcJxXq|>BQ1yq8rE;~0>JIVXb@fe?{j56s<-9+NvrS2^6W3I3cc!0kY zD~04n$8I=e4fWU2p@cQ;54H<CL zUV1+&wGm-gL_T!%&o@xKQQ4qaPk&#M=O!hAdtJ}#s<$-8#Y7r^Xp-|K{_%By>#^6L z#Cl=bKE;Q3Qlf6@i2N(xRk#~1ncTzpuY$HG$MEYHczkjPM42}8{z&>W-#$<=8sK8$ zOgpo@J%gzU#AhnSErkL@_*qH~I&Z-iNd72-`beyo$bU2;SbZZsJ)SW1E|00hv=}hm zD36=1Rah#jU(*M>3FYK&*)8Z9rsMUDuT>+ZCLY$2GG zz%>hwb~^S<6o)KoViZj7gznJayI=5|=!3$d$~9lYL6rgKXwo5TdMc z7dIFpFM>MXbxjhxSL@38dB{W~q*XiacZ^P5sK+ESI%|4|()c4$4kuAf0-_en~ z-$yR>WE9eOg@2IM>(9k(CKBSDGR49zYKu`(-k|P1⁣>z~jP6V!(nd?;%#bgeWu5 z3YonpT#Ic9HRkKyd~@q*kZ~uCqIh^o)KszzUJ}b*o7sHgmPIp>p=XisdL&q>(1tG= zx?}I&_57oeiss}-9u%;f^Z4_pvy^=zH04WVbA)q)TZ}bhn-}hHs094Lx=xyPlD?T4-4lh#E^}@-AuYr4KYuKQ{I4-ML{Bp)!nCz zr-Y`U;fnU?_#2WyO;A%2aL<9fp(P_8twV;GkTPPEIR99+o*Jng^rME_FW zT|E$pa3?8JxW?`wC)hj^wYY!th{SHU(ESQgSo4O1_7bfvoSM95fe`N;mR%!tiz04+ zzuJ`^FjsGx98y6ZWhc8&E12<1OVcAK+xs2jc^x8_goGjiUu%RlxB-k1N^|osmO%)b zTTcaihilwh#9=YgTX?0oqBzUe)=-)(5ZEn>QjzN$mjEYG@=jGV<2Yq@e!Dhsypb?j zekw^tyF73l;YK1b>!kTlYNtyjQaET&cv!H^7uSAoz7S*H9FBvQQGjBtJ5s_j0Sv2!M<&)tgJzQPU&$x))# zYk0@(=sF8tm6Gc*ARxfUuo8c&TZI<|4y?lQeDmV(=Q3ZH>7K7=rgMpKI3D{B!yy!( z*dMtwa30-hUMzY-{w1F2uAsai+`$2x^(5j#Z_I=@6fl^aO^0=OOdJswNMDwiG_U@Bp58upJ}X_E`~VzB|C#RLd2~ zn_wl&JG&NE>H~G8|#{aVDGHOn*mJ5o##t@B6RBF68Uh{nI*YgBVKq_JbDZ zkLM<5tHG~#CEzFb%oG;4X)AAgVej0qxfOTAwnivIEqQIf8rA&sEG3s$KAIFV~r4+hq ztoPUsGdXuBN-Eq3PpwlW2_ zcC%2^<4XnFwE;Ts*uBfuDSKGYo=HtjCN@ke(3PK0MT)cN zLuCj8L1;Psn-QZ7pZmF#r)uTS{k<34UDLhKHeuLvFuH%^zA?Y$(#NMMdAeVvWT{^K zj;W_rz8JTgp-(fXL#V@?jW#QPnJL`0FQ}sps-^lBq>`a8*GxkPJ%8o;2Jf=HD<@v? zJ*I*QdA?!Zf3>sgEy@^`Nh&Lxw9t=qc3j@vC|z+42oRjod{}Hh2Wj=sleQL1nu;zS zEI8o}tKL}R^t_Y}O!dSv&e87F%Rt9HnayM~IP@d_9na^5(n8HD;!h>Wm@Yja=0Bm& zvyP)9Kf2GX^A-jU%OomsURlikLf}pOYho=0k|UAS&HRFYf)h{>jLiBis36+a1lgB(l$8?p>*>o68_ z+ZEZ^_589`NF^VrUwKgNyul|vKjQaMK0p~lrtP!m!UlR9Opvc&XMlI>uRoE2uBG~| zbh@Rs(iF$o>GhB39Q9RjP)uVpDXGwLNMpaJL}SY_SOgC{itY*%7#>tF`CK3wQS3BK zVJS(&)Rm_U{M=PCfyWp=4lDg!CIkS5;V%J%4Er`-R^cg{WoK#C4Gs}co+sO#amDjI z0)|r<^XQ*7I82k>04H>@Vj=Gnp%eZ$c0Gk(K$cWS!wI+L#Gz|9rE)`oo^PDV2aK2x z$TMy^m(qcr{yUrvG&h!?8%6E<61w zKC{LVs6~^1yg6NflQBvi3)TOte&S%SH~r?=Rs63+`0B&iZLUJ9xvn5TiRBem#cmMLM>`!8S8)V?w=< zm7Kodl^*A!4O4Ki!3onr13@P0Jm`R4Xs7=^`Yk&yKsj{Yu6E)(2P(n9h%8B2y1hbO z15QAWcWY^gi@xiO52jK(FP+Orp59vk?q$mGpqX_X)@>M}lJa%J#rQOyk*%00?3<7e z3TLEfbT{CLBbU&lH2YR_BT^kttEs*|_MR1}nwps5FPfEpCnW|(IMk9)^?M)%iwkEq zJmyB{M;MZ^GdtyUozQHa=d=;c3-9hH%DL1qOBjUPs58L2J-LR8Q$C-*Ro^zGgow?6 z(=z+2pSh=S z-+Y}?f-JSQCukPD&^un~7IXi2f5T`GsH~)i$7U9d&-x5?|1xTWuF@G?V)0Z;p90yj zt7uH`P%K5UVzGS44!7gs4_fuQ&7FbBFN;e{NW_^3RS$IJ^Y4i-JP&5xZ+I&7kGke% ze?{~8^5bNwNKs*+0n6!Wq+Zpgd=c+DpAhZ25f-u6Q(p_+h~U#&m{TRqj+X^7kIT8O z=d{6i#(ft^yr3mCaqGk@_>XKd+Fb3X*C}guHgId}M>b@*x-f}IQ0k?p<|{isV863z zLqsPrr&1_iCq{n_BdBpg+9!f$8i}q5%&N7H!CQ8t?H?AG_1l z{r$M9oMZpU3vxOXZ1X{I6tqnKVm>oV|KI?ugZ6v*JO|zicPrh%15pH50zD4}A#p;P0rB4(NMx#dJa|9hQv+1#{dr~F`{z`11Ine89hz=4#6WY1UsJtIOmeKRrTNu7A9H=0!Wdae+`;ZL_dL@^;_dBT+fuRWmJdQ5q-(Fyf; zdO8M=(^=uBy9+HJ0-egM)5!N8Ic^?Gih>B*xhTo?>Esxj>t0aO>JmO~u z+0QR|I^w(MKwQp!_lvq$5TNG0_ddvHa`WU;jP}Q2*z0dB zzHoG`k;(02>{Gj4s3P6T-qKdcrzv*SjsB36=1*Kt%OavkJrg0k0hvrygy;RC+=ahX} z@~XD4ll}omjLgBm*Mn{`Gu~x`PMFezbD5_*R9sz?-YH`1^$g3}!-Il^@`y+2R|Fh_ z;vS&RCEjx#CWv%2RYVT8rN!eHW8W+&NN7=ATrVb){@fKiirW||z`VvlZxjDtgNunP z91LW{@uy4onkxFJn?XxzpelYhZEqFawd}pY|C>!4lo9EX{jYSf|QtTmj zjk?Y}TeN}((bwl{9tpaF`CyF#v)hDme>uwdVDHR%Vp1e-Y^Q6ATC`#R72kpo9Y4HQroG5v8o!0l#JDOlUl7HUhXH1dJyKqqSd)Oj-LP$=$iFfw| zL+%jqX+lM=O?KtSH)ZNAljIG(ZwNODQ)H1~cjaieY3RM1bH6Fcj{nCA>Suq>z|(-@ zVcH+jfP6u7S+nYzFPA(|gk#%^Q4Q1$UUX|RGnY=n6R|5tS|Z3LI66^~8p?fHxqjE@ zbCHY2EZtpdo#-4f5#b!{Lgk@de#4{E4_|!$Y(8+T#wSH>{6Z5_pH~1UrQz>PS#G#t zt8sJnP#@LCaEMUm=%XM$9VzEm>KAs87vTg-Jap{buF1H{jHZv8%ru7Q^)Jh+QuPa| zetJo5z;&%YtdD~yHq25dwH1_Hbx!PQY(d~WcgKBV(9bWxgmCOF^Y~!x^IO`)eU?W0 zcaiMd6)U2fY`6Y^oH@WwD1M4%~)HD7v+{vbPO_S(J-^;~r#qT!ZAkVDVvl)?+J?UcY6BV2MkBf^ZJQ^w>o*IhR4%#;2KC zCXYDvEcP26URxG z`7r$9+i(-t9Or_V6$tP2m1KS6qq)mu6GsZ$M?uc@+C^S%Fph&Vc8WoA4{Ag zmy{KBUplFw?U1(vIUx|WDW$JAq4R&VnMNnb=b-}5Jr%2Tzk&(D^o2-Nf&KJDr>C>q zSbOsRbR4m+X68-3yGA**G2$S7rUnBaaWa?4ia+K==dd$czon-BOlrS<`wz-rlL z)T7q2YOUI9KPUl&C#>&n{y@xIjw{Xn(cN69jjFr!HbH5vqO>#=)WzMctv{SN?+L<`Jt&>L$2_>Phz}W zr+53VmEGC;G+b49M#V^n)FR>8;bq(Wj<+PE+F_c|FUV<9Z7Yr}L_)Ik zA@~y{7vIL`~LUZ=gK^_LyRL-PO!?x`@CgSELi$)lhv~LlsU7*Wcre%#>Pph zBa9M&^Inmcq?3zSNzIADo8T71y_<+r@Nmx7(hi$Hc;CzASXnyF6_sCl zF{#S%;cc-2BqcjIi9@b41@C78;9k#ao0Ol^(!S#bU3OV~k@}-z6bESMR6nE^l`#55 zgUzehOF=Ry-hG6Vf($SY@hbcS(lKUuNBFF^KfXYED3o@-skiO02qqFFKS1&tcI8&pm1>cC2fCgwUgIha7)|Si^&-5iev0i|I2Igee5zKZZJHb8|&p5$8Vpxj|lN4ZQM=JPmx@NPKI4En+CTd zFh>D>a!xCRbo4@~y#Spw=+=_0Ga9h@U;#eJB7A z4`QVm3d*GHu@+ej*R9B=2S-j9?K4zjc3;~U-?+=$BzDv%aZwataYwwm7SSw{r&@{f z@n{%mbtR^r!PPvN*33EQETn0#3%#%SRn^gRT-VlVIBrS%vwM-%k#UTTlcOB@V*t7c%IYLt;qX=tlx6q8a65VFS6@~)@IU&Z6I z7ZL_2Yh#53o-7iCx}NOzY+vmMTHC(csBLsbxZj+2c;Lx!bg_{QU!NtD#w{GoOcpb6 z`)Ly!CP4XO_(Wu@%^`zUlJxYAe_QnJ!>QZHBpz*?DOEyiPe)EjaOxZBNo--KKV40{ zn6`#ym6_72H&zJgVf_Lq36~qp6(z(u)b$QmgPje)XIU?@kFSoc(JKG@Me&G3fa5`m zNird`1SupFH>ptP-m_UDdLj8gApXuSFdbbzt*%g=CA|B8==$d1O4{z*WM(pPGV#Q= zlZkDg*tTs?Y@gV+ZQHhOTQ~3b``&x&)~(zB?CSpGRCV`r_Op7gwb#av5w&;t4HY7I zv!Q!&7A|!>W9B`|Nl(ENe|Qq(mv*o@aBRJ#toJ? zlQ|IM^!8-P&~C<@xUJUaM#6X-_!)i0AUAw&EVj426%D%>rOmV?TjX4=5w|L%asQdI zG|$0=^D<>~R9yGT0}lxz+F&yi_hyRwi1SS>Bt%q7az@o)i0nWSXEuJZxfG;TqFbY` zY;Z{YvJS355kq#~3V?I4D#(>O(*_&vL}yh`|=kXofb6Dh%FJY`W+ZomIOf z2AIoP@?`z~!;2BOJ7$80yqEW3|HMMl+kyX26t2jYY7N;XX)eJ>KS~45nsC|V5AH@8 zXYb$MQpx?dmA*&X{O#qDS3B?t7I1`bUdkhYrt#Y!>LZO96TbbqMaG$~bYnq=f}ktl zoW|yj4q#bsqP$F$xJ#wi@T-c zfAb?EZ@Y`e%dV7rf~=Wh3XT z*NYdbRqxCMA!PpwZ1F=H0o%?dpNYui8>-qug$Om5L`SzprTHPS{^Odv-yb+uDioQv z7G@&s_uBmH^7`bIY5E198hRbegMJ?<)c}`)a^nIOLz)C-@Za-Zh}WB!Bw1mk4E|+? z(+VF9<25c!Wl_gh@)}|%yx2&1+nj@w!nTS|kweqORbW78XlJjv?bMI z5cr1)9p24^HaO1F8KZ{f&J-0U;~DrAZzXQmKKWWbpu;Qgf^6`=N1m=|zvNQxp+A=g zAS7@e2F;Yadnp0pWgrtNQzNi%Oqj!)`}wD5==c=0pCdB=A~T|=O$cDo*k~TkTexD{ zE8erj;W=06z*#UqO=~a9)>5+CVx$rkP`oNnvvC``rB09aEXL94>gtmo;>DddvGDGv z)}0OlArZP`+2z^Z7%@s2DQMUkRf)>+ZiFZ8!Me{y;=*Jd-n{wk0H<`K5q2$FWaDsc ze~=8BnFzF+R{Phv3U=Y0eS{p^SH~-aD+0Vhxs7-mQrFe9E)2|(NXQ&3B7YKx1*sz{ zR(^H)k9c-@e+?oLSIM@n&jao3{xVpg_-vYicEy0e@NBw%l1DRkZGzI|(l3<5WQAOM z9_^6soDrD?D{H^76gGqX_4%=l)BYj}rl~nFMB22iHYwtcPb<`R9?B5KJ}|G#%@}|m zS5r$dW@Ic3+F>n&T-45v^%(c|_#LLc=?N7m)U=r#NSq%fgZ(p8%bN=MzPL}41oZLD zEl9-T#lh-HvLfcnpz)#$!zyOshOTggHFGs7^*3I;wvd2`A2{9pn2-q zr9@|VIf34_I<|j2zx>23L%qu=KIDHl)u5)riA*=AN)>P>4tUSw)NGP3U?t{|!_Dti z{CR32RZPIz%KS}cjw&VygnS5Cua5EQ>xs8Z&R8#L9Q?iM4dMJ^u^ZKqFYi`I$TOZorX%jn+49jS6kC`5dBflR)81H(T>XKaTEa~FRT?kk*Mz_3H=`>2K z*`e=?A#|@GEc${)%l}1;5UzSzTy=3wAD=)_KeF;l5(>MR{b=NZsYra2lW9KHP+I2} zOIP*z8pKPNv7BIu+NYtnF~qdw^+}q3I|HNm-<@FZJ1K1;u061!MY)SJZH(mrpmDq_ z<$(^6bAd_7Ux!Wl8ySmfewy!aP14~hA!k68AQV>8?$GinxjPh1c_im0Y^Jypnq_>0 zMvJ|8DU6VdUy@=M$Sv*!iZ8XiR~QCgxJHnkf6{dT=TM@by1h2YTT(;bJ~KRp3TUnt zGL$pO>)st>#03P3G@^YQEvS9cKQS4ADH`xyl$k1r*)P99xLo?<_l?9pF)`1gIUc2Y z(JR;Z(2#ZPBknje-%M0-O`Gk@Lz~q8gz>$uV1Pf9QyBX#9@dvuJ=OT`F3++VBZ24w z95&}Y3L$)>I>QN&Y)ABHBrZy{tF3)FHG=zFtrK&bv|+l{#LWk>xk`^1eswHAuC%~M z3-9LHekToM>g09?ITkc>1LmcgrITr6WV^`9K|&epxitI_{x2_L|3O0xS(f*{mbv&z zj0A(R&?)3!=|B zMhVi8K$+7y`&gHXSoUm3?`^mqKWmxWqHGkJlhGYlK}73Zl&=K-u10^OSBacI`(AGj z+3FCoV!l|?em35FI}k4beW7)AXwOA@P!WOnUBK%mcn?rc37jjAZ zH)eIF@Nb8Bz?<1N&{cho0APe{J3^>iM?@v&TpB$yn1Ymm9FO`fu-=BjZt-Tbrq{&Q zTLBf|ODFIeMhsR=ZvrL)o}UqHaHwq9WThUo!w}8Nrhk9JM$7O7fy5t8{U$*;;wQO zFY*`Sp>j1s^YFXED3#q8Mt9k1?q~z&4zozAaVA_3HKP!K1jC}3I%!3bWL5IjX5P#` zTw=IvsF#2+@|5{wnZV*^Em3o4o0K#p8_#`9eYy zLzq}9L$OIr6Ehr>EtoIPX1o&3ru;ZVTiYXF`ZS*&e)t4`j_#Vvvl>}e*pLcV5)@{- z9d8D0#9y2N>R;C~7KtLEy0_au23NY=Sj02|B4>>8ytsQ)gAi^~7lpF^WeN9pLKrpV zJ2m^9p5&BP2E4_HkAOG^t(oL0k$K0Ob39Sy>lwRxvXcasUDjP3eqsTqH4`&EC_cEQ zq>E#8wdKzn>DK$R_w2}x5qgCGIbx(UJpOzZjrh`W0w!*vn9N;ip&q%`InEszu?LJW1dj5H5JRi!Y(S%2*+H>shJ{;pVM3Q8UVNfFU$ zj@T`8K02rHYM34plJceMAC|Y9!oA54kf({*58P?`WZw9>Rw#3+_Y)e{?^>HEaC6qu zc9`|kRZaU9L6vns{K)<`=|dUeCswI6C~uKk)JZioD?&+@>inJsu_oIg*mb6XgSWNQNpksWP8!anmFqbGHgU>sxf?K5>(JI-~)MggB%K{bvoYD{cmx$ z!aB8idjQsJBBCQDsX7bjkftfKC9I~#>THdswnbD$uKV^F2E+NzEO>GpCEVo2Egb^I zF~>}jnht&#G08&J_r@|Cp%L}5!>ww%7bo(()*gX$jgmxv|F%50%8ofli3-}{df0x&6&zh#Xf71)g*C>a+4H+rDF_d^|hjq@p)cN8XgZhFtt%tDK!df?|e7Dd=zB?1jE|w)pDs%Zv%MDRLb*i4WaMeP05}v90qiQ4O-;8ww`z*Vd zGg2&rSeGgI`m0`wJPFmQudYfz66l~Bvs_|iqfhE#zKls+UPkwL)wR}3xO#d9MdIy} z!7jhJM37J4RLLXaSG(z9d^TS$@Ue= zBNnttq4#3b*HHOS%pU3N@#NMPf@1r}VOtfM=TvG7uZ^S%t^=8e%gf?e)3#`1%uL>; z?aZTEx=624R~c(GY7)0?6Fq>N)$@Z{5c$xrAH>gkylSY?0)rD z!#hKeYjO4`0eR%9S#gyi2m9EG3=R|h~Ode%#=S>q-Cy^JX6@(6ZlMcKO! z;1Lo4#E90b!x$SSc&tBsA4mp{j`>7@CZWn!+Nj~y&KIQf6lh0Z+# zb}u7V*{i&cy(O`|Frg{Yzs|PKjD#KXV!<2rLqg>A4ngF|`-Jl|6HrJ35M9teIJN2=1OitTM03=iBu*K;7UkGb~ zCYMJP?^pJkd6so}BTLm=2M#U^y=Ph%CVj7Sfumg;-~j-5XVt%z&Vio6hmf~55z)F7 zPZjfys&;b|4h2Qvwf92}8GQh=JLF{D=w@&dmQm&6cW(Fs)%Y<5DUw-)7XKzK_-x-h z2cm`ejEe`}YG-liXP~DjNw}d(I{8{yeU8C*o`bVbNFHYwDt#*FjBb+TX`Mn?Ln>r1y*TeQ`>$;^q=JPauQga67sd4v{0rT zIHTf>Iv=dQua0?7WhP@#i+WEbT#`fARtZKfpI@GBsGIbrI+i5dp+7LCOB~Y0WbP4N z!~?=z8{B)(?F-q=pM8nC6#O*e3axeawqzZsp1)Tj<|BW3D%Yma28;yFrM<=< zcUY)BvPZezFH2bdfQBFzm8}3(%$>%3qPK0}PkmVvW-B8>+A=q<&yqzIoqAwFaQRU8 z{xepPE3xyskQU9cMI=RJQt)q{;P~jXl%!)Kr8HGq0=1vvsKZsG!4IMbuH^?3%=#2V zp$;{-;L3!Ouq-^wgv~?Cl@Pmdj?5ueY=dYwxP8$+8SD$XZ|wW@$-Z1pzl!;ED-@eC zAVTDM9{L|gcc3UwBG$Wk^lQV0fvBUI*D|9Hh8NUHzvp~IR5kR&8CpDT>n9!!gimIC z(ZMfov@UCleLr{JpJi07UmH;@5=}nc86CPSfUJ>aXRXc98MaOZQ_)k9|k~l_k=4L0yN{f zVNv1ys1?g!;Dq4q9QgLR!;hz?H|^}p42R7$j1}!t%J3a$bvfN_GFpWn$?;T|>!}3U0!lS`c{6rwpZ)nfg!y|%@_msLF6m`GCXtHNc+wTTt*1 z;p|L{oVr}RLDT4#2*!8I;( zR7Sj6-g|m_fIP*_w-zZt3Bz+WKqw~ZZlgI(4Q`KQ;A*n2zYr6frBWWVfFhk+s;~Xf zKD3zgl1e3fx)LnDXUo>8pY9X%XVh%R-|f+Tsh-zeGo_^DLqcMw7jSR5vTbf#T)=;+ z&i@_0MCy>Y2u~K`QL4J6l3dsfWJBSm$A7Nu!P#98JMI&kQ!!C-aM(h7RU#Mat8K)E z2xhII05V2g0J}KU;W1M`oMXO>2h}p(v3(H3yT>=nSq$mbKD)dcC zw4Fp@gIZ0MTf)Codg}~|j60EXE7N#L~>B>&n%o*?;`3T5nWNnK_oPvzeLyCPPFR`%`m3a8mqwKaUO8HDgrbM)ScJACD}4O}!a0k+!3& z#AKR^X0F_@kRpni_PoHp2vUNqSRg4HXvR~cHa4P$g zIEb91`oKvg=kQQ1U}dZGs%2sEz*(=;N`LO@opkaCHH=FoZfDAF-PZ@};;P+{N-*mG zvq)BMe5~`8UB^v+#ZsrrQK$U?MeNOOnlTg(G$$7RTe)UBrDdgE)5sBb5IvR z)IegPe>fnlQM&q7qXFx)5r-Hly$;CZ_iv)1!Cie5 zG`*K<)*Oi;3ebfSVXZqHoWWf}nsrbSM%%!Ys3?VH;0P$;^Q5>?oz1uAF``PLr!Df0 zYGyz zfd<5S`AKhEdvK_z^xt=)mebY&vuWLnUV95}7MiP>fiIY$H@!5gkj@eqi0Vo`F7o$; z?bYie8NB?Ev8ZIJN+>+&WBd z65DvTz*rhe25byCF5%o>;fLtzvd&dV&82g2^0@q(q&a?z-{bX3+UOy-kwtqGwissq zlMpalDD0uI=YHt*c&p31wSU>ASpO-rS1{&Dc&DsRMln76Z(1p3UJum zz{2AoVxj$`k2Ez_spX47qRcJ>XwuCrWavV?>7IX5F6K^?y9>u7XX{Lvg4cP;%MjTw zTTk5AO@+K$bj#s}Uxl{Fk!Kj0X7xRLHT`CR&&E!ACA^lAKV z)KFtI_*@)gut`zoE@hBJG>zi?Z2;pZ<5}7?+YBD?ufoc_sLsioFZYHAwxF94#!t;1 z2lLS-%F3)x0O8Fy_CSgXhC(6CgpQJdXzlM$4OV>qpGwpBwYbenC8W@cfZY7Brl|IR z9%qmw07&+t*1adv<6B8cCeJeaJ)U3eZMLy($?eav$ycd2sD9*Wc zwmD(p_vgpmv8%xDWoTbt-Dl;=dIUFU>ht=fh-DFUW=X*8nAmgUx~C}B!)4|o!SC?m zkInshh~|N^s#~^}P48{TG_RBVS+2qN#(gp=f$M1%R@jAU#LGnO0elF?nob#!p2hm! z`bC`BnGHtCV#Y&^L19#B7E|h9wpOUvtJe9`o5sm9vIWdEhz&FI4bbX4jGsE7CkLX&s8y^mpo|zb5kM<9@Ty z2FHB{;@8s(_+`YYa@G^}OM(8#p{X?7fxm@d_X=AS#|)jn7+V6fKayV-pJe#u;%-E~ z>D3yXo|qYgiiSeXs8#It%%ZRUYS+BU^Z@TLd^MBdVhzp%mpOEz?;Jv@ zfS!4YT5D6q(x3{Qrl5A)r~LqD?R*J%!Bw1GVl(~K-OIkG9T2C*K+)W8^(VI%ZFZYc zS~7wnD_HbH(AJ6yWwk@^V^^_mWC}6|^3K*!E21_HQyT|Xnl45WkVqquB5x`FOZ)sh zDog~oVZ(BCz)DL}(^_{*BUGw9LzC}5oh$WD6g?+5@-M5nIPO6B6hP#8~NE8 zatDznp0z3X&)Lfb(ou8&l|5$5jm@$)g!q0dk{mh7!$Q*&b6>T-CJ|yUO;ytSqWnn$ zMck=+J9$p7tNRD1koz4-*V;S^G(7x98k?U$yBk|1&-_;(Qfix(0E;=c*AOb8748te zlN7|4!j*s4dtO-S{-S^$>sOOzp$mtVR#2AQG(H^&NM?jN47FRwZt6bUZ4`0rSexvJ z#$8MDqTU|VhdM3ezLK`zz;nbWT;6mPEaGAe;m6c%_QE1=r^_VXvDxSiT)RCJmz2j> z9lrAoG259SkWWZZ1^Ge%&Q@PcVD8uhsJpoQK1ww3>6N04F!5)EqUBrq5^}nrPD|o- zlXt;P-J<1BCmRV_W=O$Jj-ip_T@{-xNNcGUlVGaEUp>nlHC(w?p8W}7tHs@}2TzXq z?~N52Dl5qK0|O-xJv$iJorACFdoLfOdNcc~4j1l81(*{^b8`Vr(VNG!9oPbAwABlP z-C30R{>maMuZ`$j%9+OYV3-6nmBv7ATDMk(ouQ%Fp=ceeec#6^*zk1)#MGwNv*a~D zZZ0D^HUo_L{gyL_LqwT04jCGuqxL6qAb8irXbm(5Z+oyB zo|qEPW4?o#hIZiI51t)?ahNGOp7v~>98NmXy+X`R_7gSX6WGx7I*_6N+4M}JZszT^ zR^#*ABuYkvg!;miA&4nuP6U*h^YI7U^<1GUTqn#?nub73@hRMzuMd_f74Rt1{sCaz zym=VhHE@{2Xp&J8vrAhXJ~%$Z;-sBj#THlTLy4&c)fJw3hVPAdM5`?M zD{ScU)kO+EAsXTn7Gn;F^C3vcHxI&A_g3A-=A#LmtF353?2dL=-X$}ln=;tX>pkT0 zO+{&zpz+uT3kz!6ln~!QJ>QSo8KUPsJvN~o>utU*EtHe~12KCmA4&DMe+*zE@d!l* zeWWfD>|qST9geY!VrOR$!r~M%C#fKhJfyTB;r!w(H&IJMepRZhD=WVpYzV{k4VVS5 zn+y(Bp4{A0>(lJ*^YNWxnuQ2A6X_bxud5on=okHeg;HGkDhg4{?Dpu7=HrJ)Zag<` z%Bb7>%n23^I$Leme{SjqR%+>y4(*i2r}$E2gr9L=(O!+BTbd=E0d_0&H;Sg1KDGwUEV1 zm{=N|1E8bv7o8PeF3Au2Sm41?L?aEz+!nLrA-K3>D7a+I-E8QUl-)fded%IQ3Ut&x z7;^$rnS|JvZ&P6@0y|O5cPnn%M`0zuakz8n8X5+~$8&L;ZxVUy2x^h&2ue`2Ea~X( z3g<8AL%>9cwMyJy!3%rJM&!UHB&DS2YrA5R#{JO3eG7rB7vHwf1VLg{7l&CB+8afw zt1}%AbwHtMgUza!lK1jV@c=xZf$%t%`MUE#Q@ZEGO_QDXFNc z?k-%?A_niD^<2!+9t!g@B0;IRPqA%5Qp7@h6ypd%smgWDeG6h3|6PBNbLquaH8qkJ5{~h z{AiqmCpT<5)GXlL?e7v>GWt+x$RP9@nq1L=#Mo6eRecNX44y1JLh&Srr~?hf z{-yO`%oSR7IIR(_kmNiRm9-_(`fZ%&bSzB$Gx_wl6)*pFS8(CpVHWmR8=9Mc9?N)HEPn&`_&9R3b7QO1t`HwnIeGzk8uOwy^N>{*A@K@t4BwL%QyXF0S!O%6518UGdNXd2AO6#Mn_-U zU9V^VlbUUKbk0|ta~wYYjGXZekNz)%XENb0CpUcUg)d=cK3GBkl)U{Is2opbO@;jZ z{(cbY?QA2V4zZ5GY;cuPE%hjjU)M`Dg?B=Jk@!aeCw&; zM-%pA#e6ZgeC(?BRjikX)(Qe8v^vz{(O8uC1}VMgPlZah`aGV95msG8s}Rp^Gnh^6 z1cn4dMeUSV`33!0qneSR1Fs)hd;Ql!FeYj^i$JGSxf%@Wj?G?kQ0mvfk7qEMwIm@; z^)FRn8JUVLmN8;&@!}pbAI0d^1`)(JIn@Cc^78>15U@||C-hGCeli2o5YLbo52i!9 z$(eg=sdxFI2i08Dwvmc8kYt!-JOh`9A%mK|x3I5V>A?^2d^H759t$I4M6kQ#$ekjo zmCr0`=y&PwVY!_L7b5s5uo|p+F1h4!0PC#g*(0EA%)VJ_PS!0oLaU1Q@9c@VbnbJnqTZ+>W{Kw2{h@O2ts;oAd^{03Z)EV1RQIQTZ9jS)N_?4Nek; zLdsl3S;i71L=kvf26**nTDE$_zg`-S{O;C-TUy0rxgm`f=b^?r>_Pz(ayc7)Iz~A= z+W}ly)wtvF#_v~j5&MonEJ{NiW`@2RZx3tcCF-5NSpYIi*!J`}%hHJ0bEpELeuJpZ zMQd1V=O53;{EVxVsqfy|XSQiUn~FUY$FSQJB6+<1UZ-r2nh0soeYZM`zgO_r4^kVj8Kt0G4okI+rj%?<|K;yK1Q{jl%qiD#mZK-=U-n zD%d9@PXG|K7dI;-s39lZlr=?_1kOzNg(?pw7;!a}fcj_At&y>^ml*%0**KGBFK9!r zmvnjWx%^;NtSF^tIoDn(f&e8}6ZypDI_dFeDPb)us_rivN@D8F>$fF#-mIl2dHe&OFR>;ywjnAk|7eb{UPCU?Mk$efGjYRQ9ewP$FXBQ!oa#jZp&rOzj8*WNOd%U2I1@V@ufOp0D!lK+-F<&PDZ!$7&a>>z?O?rdK$> zt{-Yxzy06XLXJVYXFCof>hg?ge2NLiVqtx+4n#kp`0TRv4U+K|$wh|fc=Vo|VgdAd zV}=n(?OBzMO0uJqv~9QIFbeicp7I)R5zC}) z$zJ+YQt8CJ_-ay6I(9ZfMl_wmiMVmv6T}5htQ;wXaX#hppm@)@l5$g!Xq0RW7B&sw z+lEf$BnapTL8`|DO)BD`=1_?e^ocA_LFGRvMU4M+3EGpG#4@&g=>UH=Z4JKtK~?-ftx*^ee+ zt^|HsUgyTL@uIU!VGHqwfIM)b56Qs8-l+UX>X_2U)Un)`0sN%QAq)mC?QE+;AQ5e; zM9zSaJv)z6Jt(F3jQiBbk_LtSG2VeMhj`M(%-$`L0sFsnpfb&h&|`0pcHG&-uuvcf zLCf|VVYDBIe~Fynp;1Luu|#EVAySDt^muB`&;wpcp*GC?$M~s)LcXZ8kH9SD*gwf`$XG% zcixeiL&PH6J$vSVC2VZ&&ARZuimzJGop!dVMY-(ygx}@1zT*n zH_u@zsFkS9ua-v;NT~zadBM*XLIG8yu{gC|1SYZ%)rp-DUg^4w)JbjQE|>EX{8wF^+{A?3F&g9Yq?PE(3=#pPtSO@ zH1Bo}wP|M!&F1$c7^28+UsdalxX-;;d$-(%_4)TJ>{T6Ki5!i7Vtu(dd@l3-~H_M6^$l>dPpNO zZA}~It|YM$+hC-f@NO3`o_Rv$IeBY(eeLFS4sR zj2OMG*6}AK_0f)`3_o+(llSNQKa5BwT?+-B0>wFxum{?YYFh*4mhsgz)QRuIQ~zoK ztZ-kgQtR$U3hL-^Rav>Ts&V;>32u=TH3!xwt^twAk9 zNgZ)?{sicv9I-g#YFC;D8N%g+vp-Q{{?KkwvJVeaz8P_aw0fj@m(Ht}$TtJc?jwG5BF0 ziQKy4SK?@KuN@zMqZj`Q4GGp$>3`KENqEAZ7pHYHVhsB=AneRArwEw)am5X0%ApID z+FF-=(_aCDbHmiGuG~e_2(0D*eV8b1aAWP>d`E=SoR1!wNdG4k?JrS<7EFKs1s0UK zNoCKfCX6YX^t~&-WJRLd;kWJVMVF^Pzc=+(XQVJF#iEf5osyvLREWqb-;EhGfdz@oGw8k*)vm!e@2t#W1Dd7kd!Ymnod}~R2D47 zE$5vzn)0*U)2S@}O7a-rjRiW~Rf_jAfB((UM2lU@p`Rm_?q+s$ji|ih@Nf`R-s-yG z4PsTo@m5b7s#JTTm*Cej%Jyx0vEnLEfw2T-&ay5R*-UhnV!N#63BvG;N`0-0al3Ne zmGkgs_lL+XQN#5a5lL*qk`3r~oLQ|L`rlGtP^&2Zugl5!^^B`c2HM&wtHrv5MEkiG zFEKlyXqBSHr`nkB&K475UibJRySWszXUdozJj>XmkXdab;ge>J$5Pf06%vZ9hCG1< z*-gXwr3_{drTj08fZLQzd3m--XlG=H>xDUYN4GOKQT|#=_(@>E$p_5wHFeV zMvTeETeGRAIp`0UxfiP!t&FR$58Nk^Sj%Alv}JC)e+}XSO1-GD zqPqk&|DmWvFaNM-;Y@)}FU(gZIS$L3abg47R^Q+3bg$G4t~dKR7Ry116Vt-s7W6z_ zvxP3Mthr23+8b;el>Lm8j!ErM$9rqdg#Z4n(HzDRU$Hok{0->xafw0&wW5E%m14{w zDiyJ~sSpBbHc3is#uJ#FWX6Kxb*1xh6;)4fLV&$0EiTV)crdjspt_M6{{?kgETkx= z?bc$=>;{tHG*KfB281T;#0^K~oRs=I!?AR#zbnuZJpL@`N3CgKRACA~{t z?Tp5LUdF?71!EbE!PKp!vYRVQRt8`_4eL>*FIPg=%&7(^K6u$2Ej2R_Gv*>1pr!3kPd&?2886B7U&s7VfX8wMi@pHu+Zsy@NVs z5}PMGp|?y%VlZSV9vfT3?5soqY(dLZ2-GN%s!o+DF$Tder>WKNmqUGap|9;Vd!RN8 z09u?VJ6$>V2hLe<> z&CgTFdd?DCUP{RD zAG%ePTjY}#$`XaT7!`0#ozC!u#wn8J_&q&XkE?5wtcr$9)>i(ZUWYe*U*dDcvnNN( zVxmQDOgGYuUmx`qYly5TG6#ElzcVT;r;`ZEUuPDrDh0n}RR+X0#5LeT8!ezGR>5vM zl8wrsK`ohdu67Sj)PLs#mOO$fxfa=ciyngE$xccKOJrUZI%xqj@X%0rKtqRQyxns; zUv;Fw^dm8HwbU@+l<-)fw{Vvpvt*!A8=L4EPBLJ!z%+xH6RtKCt&zXl|0&<_Nydt1bq4b=)j}47l|O+Hn?Ci*-#}p z>e6U3R^prRAc#naE=5Y!(aD}KA2fkNojEouwSYkL2#h8!*ljINHL|olV5vIvZI^yp z#3?}D9t0&S68BKHxnXWN2I#b#U*W7TDWoVLm zO57Du48%}bUTU%zr7 zw54lBG+!7C7_me$^VkovJiW)l(Tch+G{2qyT#F?Qg;vgM>-T`efzGu~x>6~BJDrTge4A4*9 zoz0~fADP6n*+YXF=Svq)W1lJ=c0sl32v`im1FkQ~7J?8p(tDoLKtG4Y=hhhzl1>ax z7k6_I7yUvQs~$xKqVGgf)pu@k_Dvh>lbvS^Wa!b_J)|(91i7Akx%lAu+it&caZ)Je zWeL6~5m3qnj4(^E*|Mi>l*{wnAg^(Ig|eW$72|=cZifOJ-vYMmr|-;Syt;rU%iy&p zg(m}fwC4^JB~-0mM6^=*HIP)?-q8JMS|^(*HRqUYU&HC`p~rUhg6B7$@4@`FL7tvn^`nfwM z8;IANj9?YS9`usS7nKLZ6~~}envIyA`Jp8t*G;LrW(UL0YLgLeFJ*y#TCNIzHGxgS zV|A5FVYcs59Aw@TmE%t>^b}8Q=%<;^`m43V*)>?oN9dlf2eG~j3FMBZt4m_*aDFO< zy}x2oTW#fDModI;-W_?QprQez#a2c z<&LG!4oo(tm>Ms{VkjTMVllwwHm$oo9k0s4eruZBU2mEokG!eJM&8{?}jaMcDm}n1}UQCiKl;(uFv1u zjMae#xLf5o%e-K8#jo;7`^fmvZ~aRb zKM1E=@D>o!iQU7q?!&@P5Y)BM7ld`MdjW8>tk|5bx_f@9rVrp zdmc?Tf$utdt2#{DL?1xm%<3?_covlYeFirAUKqu-Ho~gOj=qDyk2LU_2PZGTK)33x7eg4qLbpFSx z{yVG-p{d!ZKGK03d;w2!lH(W94@eJwK@Avd>kGY`s!q|n-vX~~-=f$1OTEqADq8?K zb{V7E_V;5_8v~=VqJ*v<_so}q4NGcJFf_9UXC`b@>B`>s#%m30cgOPN`PArYPU5b` zKoTEM%)op9`|mF|gVU>DPISBb_1@3*hhz_X=aW;&i7>YJx{_8N>BsA&wS=r}lewE; z-Dbz`vN%ql+MASZ;h}^t^FnPR5ypwLGQxlA$220vTR`yKhl|&Pn$a<&)&>jHwCMd! zLFa!n6{Wmz_t%i;ilFIkK4q6dy73v@aadhsjQuslJ-*kMhJpg6WD$lN9*vasd%C^T zC)hy>_QLMDAEXbi2PVF_m1IjZ{IEP1jZTEib^ldw_j~qneMi%RYCCG8=DsUJ+8xW( z5_9Dh>;4$bcDcmkaUyEmOFB3-YR~mbN^8V{OgZM4E{P4Kb0NG9e{0}sXTnzy;;7KcQ z?M}t4q*wmRr>?jS9yY)H)Z(jNrU)dcslu~vcjLUi#;2!(38GMhi5wLt7*_HzfG4$_ zd*m5x#?>gc-*QQr7L!+bydpmj866Rc4!6lV+h883*$TH1|3>mU&0`31| z>m7q5`@XQz$xJe_olI=ow#^AAw(VqM+h)hMZQGf!W1F{s|F_<{b*pZF=;~9aYWF$& z>|VY0S`Wb5gdf*L)AhE1mJjwgO)G(FTg~OX8Y^!lzTry%e;3jGXVT&R{$=8c1oZFK zyz|;oL+4$5iP3T!?aVE}X*5JYwgtX?h`puBM`O>O#tGzg8*df{);p3B(i?f&R>Lym z>k$jF@iK^GAw7fcJ84<5U+F7GY$rVJbm{oJA5D?>(0;OOqmx!K8)dC!u=ECLb?)}b zhc<`z=M0RK2mORX6@KJe%d^)xxUqa8Z&v8Y=iW;Ek2V6FP);`{%SP;otL?fZ1uBmr zNCNI;{66fC1XUNCkyVk@V?8#vW=&#r1C6%ZYrL-qdt;4QXA0W&`jaI+&$Z!O;@QY> zzXyr~4{+uQ@1)ALh~2kGChjFdPz^4r*7g<`QhmyJevrGBHwL5yO5KPv#Gf00Hpdo^ z(UewaMp8eQSbBKxuNf-cs&H%a8__J<#jXjcgh!Bvz+$PO+2_AKV4?m`0>xTp-g#a9 zF{Ua4h#>&JK*3UeX^lU$u2`?6fPwcCoWnhXqYz5SftPB#yF43yPbrW+3(jJl4Kg*e zP>|OCaoeJEi!;+&wc>$C(!1WqJN5H(DuR8C!1vU7k@eo{V}LfL<#H$}BL+_Opx8U~ z<@{Ee%}Gu$$5v(yjX8_THW&+sNF+%{xVxYUE2&;8Zng~<_Z=mj-Qwqzj5CFBT4sMT z?{61(e(a-Q=yMBJGgKoSj*>54j_+nGe-5e#p!_oW4@!FO2xUW}ojkPUcP5V00 z7GtfXV+{0d)uWZs{>9!ZZ7DkqXOn4r#Kl-geYxZ=Hs zx0lH)fNpwgFLr1%_xEQ0cOQMxg4$_`Ihbhd%<3-KL1z4SK7Xwb5%*9MbZvzFTpyLv z6j=1DpJciTB!u+t*t#^Xho2HoFsgPlt$r&^l|dysEPy>y{m0**gQ~IAQGQfpPi|A! zI(|VGbib5eC15QlS7+oS!{0XnIH(}>M|KaZ?Ltd~i#>MqA(@7T0)*XKRldSJ0Vufo zZ$0_h+aEd)%Ua0BPZKaPXht8IEO0Ph*!+k;dM#t`BJM5Yx%14S9+dZ&i<4< z=~Sq2R*h&KU160oT{bl6eB%JkGpIDCfzy9s`{Bgf;8&)q%hWJlb?e@Bq5$>YWUd7jhlXp~ z2=s-=m*>XLQm^`}R^hDO!r2kWnl_mOo9NQPsiFPpyTZQD6o6;5%qQOFe&P9ovL@q| zWcrX|KjS_uIZ#(En43Iu(x5!lCTl?}AFK9xuUX8`ErgHw=*?LBcpOf+Blx}tY1!Hz z@|x(=R#cV7b|^$nOSRxa^yR8s8u-`a#E&xJlUhC<)6MJ?%X|W9}BI zlK#4IfjR_Y9Plg9)@RHPGmbiHzdc#Ha8-|qoeK_Yd}V9$JfHP0codH%Kjv^b**pV1 z4@K!)<<=rL(9nUbNvV=Hd`-)%6c)lZ+^o$c-(aQPbSHPKBNt*w&o4t%USaY@HfB`? zmH?Q&2@!!DsJxmdwJDD1VS`~sQn|c!NQ5YnapYvhZ(GIrh{dG%+ZyveMiEqthBzBq zr50PwQ>@JjQ&%_-7b;F2KZ9AJ%OtZd3)O{`3(%C#A!dJ7FEA}|Zk_GUr1?*~k(GTI?Sh~yn>kd^`@KRto1DEP?f7R%$W&K0y$yNI3lK8vJ;GXG2UW~3*MxU zY-QMqPf+DgUNIwmBrKzLoog7hvmcR+mBpWVB}OyL0Y`1jqjm9ZPsAv}$&L~$ z;mev22EBp8hr$1{Fj-?<;nY2}h)3YM6IFC%Bp4WiSfqgyM_OE~ylTrL&X9&6ib9|r zl6EWdbXgDj!Bwm00~^E^OeSw0XpEbwYlxT^EYXDgW%6FbSHTc@g-@Q1{(Zl4I5p(r zz`0SyA{e|Unz=*>l5W}=f`E`QqGhp&V?6z5iHnfjEI}NLtad{aE0gQ46qNAC$ohbL zP)-z-5jTwOhRB?Mt^Miv{=HKE_1@|)g9l{V`KZjp)o|9e{cFMv-bSowS(ME{VwXfc z4h}?dcj0s_5jY)f?k{@ENFB?~cZI9nB5Ky4UpMy5|J|vs6fbWaF&+nM^{EdP{ZJ5U z-(;1=spo(5po6x{?@Y@bHc-wB##A`{*XlWz?(@M~&jK90jfeRx^@lq)pDw)q zQuD}UG}yyElF2f({&u505Jd^gyX5r*>WlKtm|49;gM4f9sRcC>}GO z2og?u@HH7ub}rt=%upc$7|@@Jx6iRogksSUi>qmT>d|l@Jchek{$eCSw9#@+{#52m zTMQ2QbNk;64Pr*WAf9>PweSO$7;lCOOn&pAdL|x{>9f%TpC1VAL1>JTnFdi>!D{)9 z@ZTxO~*B7;7#b6;M<5!L@qztLUv zYQ{5`T`TgTXSmcl!)v8+E`Pju4#Y<>G5c+TZ?qa*y)QJBAJeqn@7Fg^pMstdo5JHi zl%s^Whn@~~z zPKqWH{KydjQO_|KW{Zp!SQ}nan3dMiJ*mZG17rW~JpSLspY}Zbf?+KOvXf5Fq0q+x z^7>6!p!36S@O=rK4kX-gi{`bi!I1Z zUiq~WfbbXzxZpK9y+s;&4O4Ioi?l4hJa)V$*3POSrBt24Is-GY3U8{h_9i(Z#sBA` zL90poNV}FDjAz8_uo1?BHtlR|J}=i%HIyo6-~6iSwSnKM3&@G_)dpYy%16Fza?NHsO6VgfvBGa+n43i3_7Of!gEV2Y`1Vdjzou8rf>F&tGcIhqWyu`Y& z8T>+XEI+uAsii(&cA#1Em#$=3_`yem7wNFL!9^TqfTE6_GP{c*=j8>*9C#`HA@eg6fa162wlz*OYb< z+y$U&?9b|$M&mC$GuugfzZ~IWW;_966J}-jYF)6(FWQ~@0fBK@;%Gsg>~;AqdWgN?C=$|zNTl( zOCmnn;2mO|> zF#6wLN28%l@y8){;3kR4|HT4ua>Od-dJH)+lj!<_6Oyy<<161N)!|%qt#%jAIfp%L zk8tjIXi75sLQ#l|S#`G9dtpWcpS)LB#_OZQ{>W5d{e{i`hUvosv41cwG!(1*0`}i^2E%%h4dYnDf2Je-)6p`?hs6x z>|CsqoE`17bQt)!VaMm+p7Y^G3Jmuga^E4ld!iR`q$^qI8%&HUv~xryOlAGDVoZ#v zm_85|LROa%GV6DUKx#uQoat+8L@(9f4YhetwmUzaD|H|SA0MyLv9p}l{Kl}-c(eF# zXa(H@eae}nw6R$8U|iUB*fuFse{6fiGvPd1$n-612^huy7plU)aQEDgdr~EW+vhtM zQ`c4D=al+Jw0(cQCM9wyxAEg~s^;u^q!10Y5a6^72NS z36IzqMP9E7#2>P!KRKo}eSF|;mT*eZMYs7T(%+_LYY**Xi2dr~tKrQY8otl7+C)G6o>E7`3 z8|U#D_e7K?zaiOE&u}pwOUC67VXZVvXXRnu)X$zuR8-Y7dm2JfAZN-|Y5C(O)7L)t zh)SV;rtHWj#aCZUTVZ3!DI>>97Dc$_xL|p)^`O??ht{ZVpw#^ZQuY0bv}ey&sWwO3 z<`$m~$OzzD!PWMWa*OMj7bkR2LZ_Oi0nGvPH2zB>%3CyqzyE1 zd|k1*c^4|h$_)8f=8Iiptg>NJ8-r|k4S4o(Xfdu9G-JaA%UJerPAngQ4xEhvDL@LZ zcdIWkk}Q)Zk(}srAFjkgy|=Hm<6-}DF^UcO z)vnU|Q}y3ct4yqob`<&qoiuGO<&qOdc0HAhmtqZkN+?*ZKK~H1F)OQRQag)`=*>Yn zblZg6mM^9s1UrwiA49WSA3%9)OFshZU}tOY4WiN z1&V+JXBuPWY+(_k%*Y1}BN7ct=J5R1n0Hn#;FOx3Kd6fOV~(q-Lc?#-SE-p8XylFt zA^r>a2(vOd0iY!2^K^wAa%Al0Du4WC&JRk>3vi={glnx5t zXbuNc85UBvWv0AB9+5je18oip)-D`{Z1&HeCI3q(i2+Q2G%GjHq%zr@nv177%z&)e z{42#IT7|O48&2%2?I4vR53c%dlLySdj2!6H?bo{xJO^=|l?6q-$q^wArx#yM#q*%_ z^a}{(jHT0vgwu^>H#TsR2$SIw2iV9IF|XDlu!v|$ zTGbiRfAEBpm11)j3!hr@Vl0*@&EG7R8oP;{QXMf#*;6Wr)Ijp5mFkhoBxakLlJ0CD zE-O3tJM~D1(5_%<8=)vn=cmaJg zRq|qcTiXV!r(JHOTPVRozpoeO_Y6LeQh|2g@*2tU&0TJ~upGX88RZif_fX9vFA_~CxDMoTkaZKg8LsbxgukZ-y})Hr?Tzk%2=#vt;U zr3|+b~*0ii14H5-jyEl$6ZhBX2hHzutJy zMjOJOOVmNxO?bg)@TT4r6xBa<$sU}( zavKt+Ww8{oz2g0KAU4xj8}S)tAa0n8pTc=sBHDa0;5PSXtkH2lLiUMs5%E4h<9NS_ZgIAaw3LZ@=+v9(RW14A ztB7(pn-3J+SUy87(Wb1T+uF!Hs11CWkMwg$I%(`Pm7s>U43~Kvvkt`qGn#t+2P5RxlN8Ni%fl$ zTchaVfR9%;n~|_%X!ujEOmh@W#>uJjj>BXuI&-q(L~x{P_vBV}dS3u%Z_$WD>D%p7 zmhO>M;9gr@^R(QJ=Z7W+T8d-WDRZoH1t2Tc&#gf46Qk0T@WxNZLl5>s0q+YLhUOuiT%r<>N5Zl?TF@s?{E?c>KVz$a^kBMa^lr1` z2$rJg9`YO`%VORnY@aAB!4&=(B_v;4sxB+czP@#sv(4Z#wj0W_ia9HKm8F-P;T8^g z^pfHv1DG3MI9knYE)J8BWL-(+;Z9ZQM1IQtznWaL$;vUynyUwZenbH$)~!EH=K+X; zI2uKzh45{Yv<{T2$<6y+(4T3_R!I{|zcDQsY1$X}JA~Wnm^ntc6&y~TKhn3J_}gO2 zFI5uG^#4T!u+m?IMJMwGTREE^^~8>W^rAm=8fbpb70L2j(^ieCwBi&Z1_AC(GR9?m zyyiD4%Em@G&de=#!c0zd6VME-U1vn@di3&V^J}EsNT8o$YKi_q9F^@j*K53U^gy~q z91V~6r~F;wt~=04ih)%tJ^N}rM9vHQcRO6y2*M9~$*?#|{i0GYgBEGwlke6K<;PEO z-+iLdwZiD19{)K~KZUjH6Uh3tHz;wQvzl;~MXLFGZBMnDq~glkQX~DZOE6YsjBkPJf=28h!ILp4Yvg2mDvq|<~ zo0JJ9_o?wX)5Bq5o!vK4Vq|PHx(KB3#wjCEf-v}no=zd{r=0*H3U!ZR(&)iy5=uXA zeg_hO$s--dIv4vxdf@0mK8LBMW@J-?@Xh)>wW%wCnU5xum!*DcLj(f0S$*b@^Ze27 zy_6Ju0VI52w?t6bu#iAe@c%|pk)j8zG;>*BveVI>cYQ4MFP!pbeQTb=2J2m3!yZ`N z|BA)h?4m|nBY7o6D^F!ezsLW*Mn*k5G%z;5Oe~l5qRyLD#Du&?zn-?=7ABZW^BZG7 z(Z$F;#)I>aSE|$VA!LH)RPnl|KdELXMMyxtNX7&DJ0TTk9L@3rw#LbVHU1s7ocW&@ zjsT6s;|(1TS1AZ9cDnMoDcLK=h6I^oq3@^3)xWfJ(K#6lR8+>n49? zt;rd&4zFZ^)5HSL=T1ykm0S1;a}RYzSoYFfTHm zofkQ{^q$^sG4TdPb$TkDzZReq7LZWB1&k7KjkutjGZ%AH8 zxZ>w=hH1!4I0j#I znZ_R=px#e@@&nmXB1kYeAhYe-2sz7f=+5{{bp4({Ecg}j_=;BW##ewu3jt{G)bw2C zP#C1fs*ppW#42)CLda~6fET(`@Ko&sN0kYC5W6ne7xjpeDUKoK;@`&YqJ68eD@{uZ!~Q!S_B zQ61h`F=5_En-V`L&5=hIdgWxXH)GVVJ~%&Q=xD_}DAfwt*(Qz#;E+~}C!9_oFx=_M zO)Ci$eyRy3TxWFDOeOLWHzt=Uy7+RU2yJOtLA%ofJnIf(ggh_6&1^Cl^P)z3>rqJ% zd%`G{#@`S&*e{oif z!0*1vD`nl_iec9&b5WlWd-V;W5?Hi-AoR2!g*87d^6)mk5H0(M+pdx`ubs9xD0#Rd;}4*p`9&t5`=z} zp74ii2l?%fXcI&(J_$d;Uovidf^#_?$p6lXIZ^cN@BO{*QO!7l%|;?zTwO6I51{F3 zE*!}zr(JUijM-U3a+ruEBay!oOm$Q*y|fF`x-D&G4A+*~diivt8LoPXm6MYpmhv3f z-gS9CeDylui#TCTC#C)aQxy_2yKqLU2Y-j`;S2H6;c93Z|0ji`ZttSG4vt?b9;4DD*{_nZ7xk=CpRf?29kW(((3sDv z_+{cH_rv6h7rTqwT+|CpYVOs%VG_B%+`4aHlc~FF_YWT7Qi&+5b?rUtcWSuzMLJE( zYmeTbwAP6i)1pg1J9NSJrkM6OL`)O%Kc~*-UF731V71lSC!B7&xjr(e=x&X>9f@Dj zTE)|aOsLmhs!e~o~XPiegKl{tSk*W2Nn$%l8s+BgQEk;gYO6mv4% z2|>Bg%=SL%p$d9&aERKoH0Py6R2a=caleEQ#+llWCGg9W_JvSD{E$uM&ra9BC2lA$ zL4YQAG}eUPTkS|0Df9k^i@ZuEdgj6P`LzsHW@ELV)vLkM& zZrX22USd%fJrOfqG#~FgW71T^9^3KSDDv<+$%HImCeh?DBi(V0G=o9dK!FwKS+f{deuW{&V3S5_cVcrRah~;yclccg7)8R%-Ef( z?1iD#bW5aXp#dXgbn6|PLy3nvzdQV!gl7Qm`Z8)qA|6C1?;&`jzJZz;_Kp=Qc0zp6 zg7F;%CN+^Xm-+Q6;v24n+4-ax)Z|?}$6={WfFaccnU+%HgCaMkt<=9J(pR2Hk9rT} z4%m0m>zWHGy_5Nj`a@+l1$Aa}cPd@pnECswAF|~+oti51!D_S!76Z9FUdm%Ad2rcB zQ6n8e9HDv$2F5~cuaex?BLN5&I4f9hh@r}#DdK*l%6}C4gxi=zzQr6;!uv--X8P{i zFRg82EAx2F_KZHg^ERPxt2`kw7sIH115Y_{8JDAvhq~o7gjTg(`VKoYpd)K8RjAV| z;fJ7GIg03aB!=V_CXgbr@HJc0NSC}BQSLr%IvTS6zE2o#bqDpPMPv_?A%Rixg zWS5i677Mwz*in_79-y1xR5iun05s;sA?49dwX*>ZDhIG-wD!Upbm<2V3Pcl`+&^4m zBXlwA?Iz{hf%MV9VG;KdJsm>b918JUrtv7=VAwx2-vg3a4yE(7(=miieDlo3&$RYe zA9iqu5nP4*li~&>q|BRx%1uL3@vv{1jWV1c5|j0`MsBU-()`>=qkn<>^Py|Ilqdx> z@yg}qnz|lLm<6U|-KJPg-P6gmD%ldtC?FU%WrD)wNy4uHh_>w8KV%p}pVZE}t;ud0 z)XY~kR?g{(?iwcvuvg(R0vzIy@}8O0dFC7BKP@P~SpP zOzdaT0FolOKPBCRcKn{G=1NsFgk)b&p?p_#o^6$-`INkS-Nrl*L}Kb2-5!!!yr9Q{ z0kYXjSv@K3oM>&`7NbFqFQctrsVUt;wEZs~^Q5){vWl%V@OVLlwZ+zL-jf2}K`WOR z4xbU-gK}xlM=#SOLehweM)Wg@dm z`CLpBM8N0v#@cEPZeYtfoAi4-y_p%*n#HIx{R_l~Wf6Av{jI$;nUDC{?<(cg1)kqP z#^^xbf1(#67Q+Cofary4+%2}WrS2uR;o=GA*wju$#bqP+l6^4Do~*tjh94r329DdS zLH}r2z60VWl>d%AXLCKigL;#EZ_EmMocliM7bIQ_HixFo2xn3=OWJGv^&Ce93Ifd2 z4;B8rOn2LU-`p4=uqsmrn+WVT{=K;rALveP>kgYH0)H;b0o?RYr()vqopnzNTk0&V zBI+zgBp)USj!3IXW)hNayY7$5IS@EWy*RN;FIS#eG^_)iUQFShyMDg(Pq5h65e%?5 z$?#OYBglPjU-kQ?ZzvA+W@UIcv8T*qOYYso z}&+jF5U>Z>^% z8cd$81@y65?yvZP5Bzik7!bh{ddd;L*oAUCq{^-|B$< z#5ioM6sLnd;OfVNd*6kPpKMWXcj>$7VCuEG_?f@-;}Kv{Y~RwVA?a zJ-nwhRs+THUSf5+X^Opc;RPfoaajxkXz=(d_SQ`qNkS0wj9e%tov9XtEwws()7i7F zm|a%D^bm2PBH#HT5yj?CTRb(5^Uv#J{)T0)c>FHsTDs3VCLFzp&`yS8*YgOOtZMv} zDS?5VZ?&NE<#tPsjVPyn5=!EOJz5{3hO5c+9l>KN*2Rr;8#1w#?mJvltM;j!4C5Z} zxHV^Bcz9Tx<(CpoN5G@%G~^f|;hFDakB5Ex8tC)EJRCAVv0^3ha?Q?ELdR0uQa)>H zFXW3guItl@uAHtNMhNMdDSB%2rHQyRv)aA2Y6nmZ5xo(8#}kRnJ!-_ zql-2_pJpMct_bKBnzzJBOj*pcC{uJ5*bwER0pTapgo>%NDbKo7HohJe?poOm`bDR2 zE)cK$+Kx?uES*81pS^IChNQa7+{6HtSfpL{eVe&8B8Ah&x%OXD9{AsJZ3hi|yWqxU zP8H~@oHjEhi7p6cP!DSZWBuwabI@v_%3f=AtK>fg+eK}AS`iQ`D{H{H0_bJlK4?=pHV)-41zRv=|QG;UyGWr$Cc zIO0C=<-Y}dw)c!QjQd<|s6JMf-ez5``jSeUfYK<~-hZ>t#$DB41YVp}Mlu(khv5(H zBUW5!)4-sq4vwIb+)h%&!zFFptlKiIZzy3*s1zvp7}0-qsC;tKU+y+;KS83su$53% zS!L@U>=gv>5>rcW={qF?Fl9klmtZbFZTROxK|yGOlSlB5Z` z3J<&@*!LgX`^3n^2_TWfQZM3VHHo?>b+hR!&4~yuTUs5~q-;pjS;2&@9e9}jrf!_hl9GKKgWH#Ro~IO>i%en0N`W>ci{}-> zUdzwq>kZ!s7+t?Ye?~DeCk>1(&PhR&0lK_pGO4PZ^^_eHB8)Lf>3LomRr2JMTpwX% z!+iQRTX8BxyXxllkgL~Z+B3y?h)ca^I;&tLI+OBcF&V*65UG#DhXzs)(YQA>`}}h? zrTBqXk{;{FggmOD(;$e{M^#buAE|G0e&&0URQySHzTx0rrH^aakQRG}(gr$}V$q{m z#KDZW%T~(Kt+Kwt(xa3XoKwMk_KB7^L`RIzVAm45>v;=czvmG7WPZh5EJU@un}7{X z&V#h$2ca;(i^#dryeq(MFfBv+fDjO5cLaj5T&$`flv0rUx#<9N;I`h z(6@uK2A+!^oOIV&X@@{Sf~Af!g4}WcS5*_03~gF`*GD+N^@~}?@ZE;Qqd2`r5jIP6 z?0NQXCkSDo>KN|wrIZkNE5EcdKvhu_TU07~foX!w63$ipnyli*4+5Jlsg#{b69fty z70k>JWH2^|s<6LLPVvv=M2)(?yrcJbzStxA|6Y1kjfFpOu5ZjQ|G!v(Q9y7+LUrz* zwT{cFx)Ku{A+^v8GjTP9E_u}&RlmnQ=U?HW$WlCKwQskB;B1*;W(=MH5QdK(;F%JO+FcG93KYwLqn(^j3Yq`@AO z%N36=ExzvnyaLB}>|YmuNUK&=wzwtsk_*VI&$!>KPIy`zO)p;ZB~F=rV4>XcL_SHB z&MBRt)-^Vh5|PV?X0sUcB6Q0u7jEB|ol4S!K~0iL(kjyc((04Puc?oJ%`AzLyOzAv z`2NC2onW~5c|F5|P1xJ*4Uu&P^1~{wNMOl}0@o&xuU2Pw5q!0j?e8`3YpE${K{NTiHXSyqzMNMP7fX)Ku-4x(XS4&kST zTWH&aLPDvAUk-mcv$MH&L?)(f2_1ul`f4`9(v=evCwaDBxDHHbKyxle`=Z!`1ycOX zT71}u!gHy{?#VpIsD}p^R=Sl0PO+R~_}=M|#G<{OLZ9_I$|9&Y*G{6Ca8BOwX!(^u zMl-5-OgBE6smoy{jg=S4-0sOql_Z*;wVfzQiDru8qqMaB({62I*mtY`e0Q-vfyZ{I zslZ|lgO%>C%U8J>;=e-^vM1S_YW!2FQCEUMGMXZs%y}=)lORW+sh|XA{Jm0PQNi>; zSy~6=e<^sW7$&_3LEgWoY_*UwU}Reb!IEsmOR4%Nf#N0Z;sbK~b}C*%PP;GQTWQx9 z*7pE^Zpaujs>-e}%@B71NR<(E;JlO_IUGaMzCq2U6Wi%JC_xVr2IEt=5u!w!wK+V3 z1D<*AEK*Go=etTN)Q2Y8f_luPG^418Yvg5{PwIA}cX%6!|B-g`>yTi;(+3sht15FJ z-9D*5zSSS>Cei8B4nLlQ#qn{Dr-*2H;3I=LgFLoZxnpTP1;?DF@NE!;qbu=BTZYe8 zd&1M*;S`@!*uI&KDbM(YC&!fE_F5Yod8$Hf598sG4)QPlO^1D^_x<|fARRaIIOhr6 zVOTN|d^#Z0OSr@zc{=ym7N>~AdW+%l?P0P#G}7s4A@)PSI)0ERsr!}95^ZJk`KMc3 zFL?)J{dTETlHu;{z^{zkjVl|r3`aHP?*5)%e%n*d*qaz+Zo=8*NDBLRe4$E&OO*Ri z$a65s#Y%j8eo}j@f9zoAU==GmA+G!Hgj)R7ZOVbdNyZ zp*fDlK;(@dnvnCVPt4@QL2BI9 zi-RJ>#}>wv)k3)xX{OoNbQvHXL|#Mre8pq3(1%yf(c>NHsue|x5beLzKNJcgYer#d z;tG<@$Nq@(^O#x8| ziF0&`j+;$C4jG;pt8L7Va>Io|jx7DcP;kCdQEaR`*{;*_xB_q-lg}bVs3&|yRX0B_go}o%z=%}jx^qL1{A*GS^k=D#%?#K zI~m&*)rqB9z}poaKBvT%8%MNxNqqgTDV}(`=9KS&6Rg-gS_mLTR{am`7CH`}S5;{X znUIuhWj*R*sXsd*27Cq|{@oym0=r%RfV;@x!;_brWh6xKwk_;_VdI>V!Z*g4*iLh2w z*+eS(9R+3H3mqC62Avf2B0%MwH*i+i(||X=-@)`iP8WP10qS(UB#?ryu5X}xF$RU+ z9IIk30fZ#ukLW0UkK9VGTE{w8MfVD~QE;l4CW}RO@(79e!SW@aPjF@91-*%(lKtw^ zLAclk5+o?*@bH;H;e17^lkn6DxRiU@YzO%c;S-5|)u=ENwNP|jv*WzKN3WEun8o4T zgGZ*^7s-W8Tb8$i*0$B^&SKSy#&N34kEd~_&dL(X1zAO+d?hXBGMOQb)Tnb+V7qvh z$jj9s+bG-l7|2^V=1Oc%Yk+aP|EemPpc1t5D7uyyDp=0f&GuQcd_j2j<<@bl5sR1C zeepoE-1Fl(fO&JHkO9xlS{*wSI3DPp?U%3CcoZJeEl%ROh>AM4_=LEpBcpNp>z<<2 zMaA+;z%;3LTF7s@$$6>TKj1;0K#s!U_pbMu!sevDL}7_(qr&ZM&mze9`$yRoOS zh6eQTPH3)~HrOX98T_%{{0GNRU9D_b66!>aJ<+Z7eCLhRsonx%L80fu{YvcZBxR4P zx^Z2UmN5Vo*BztwPqZ<7nQZoZxx#a*J`E9%&T=l4IxL6sG@#jayy({LQW#}_wd3I~ zkL#oTAZ-^vJKib^G}1(*AOm@GwSQtXS#$6ii**hBz|)11vRIw1M_!Oi;tpqRIsFRx za^KK4*a-8*Z#(`7AO0UIKe1hU3ouw7tp_h2NiDh7R2s865~R8-FLo=LVtGUL`n+T+ z#pj~1nD3x>o|GTW8capHOo6IR?eBQ^|eZ6$69)R9)M>@XVZlxhl7 zB(%O5BM+-`nYv0yPyprkdd>+N)D%0gqYE}vnV#Opb=pMMRh97vUpaF1TB@>Zq>{&* z(eS>JWu7h|?jh%&vwJ)`@sFu!ti#g(Lp(H&JQlb@xha^B+H^_7CYxbyvtT;6`9-<> z2WdjSXPHw)ANeM2LP$u*4>?k}Y)Xm%&s2V<{Gfq#V7v@RWLnj5tX(MyJz%z&2RD`$ z&^G|lNS6tT+wphtm>;r-g1hxQH0*+tC`*p+&sbzzZ^z1pyran<&j^XNZjyK~>uYfU zXQyG?&Pgx$aK##s)d+9}3I^s#DoIDIhpK3_*Tvl>;$N&fS#3*7T#b$$!di2lUi?97 z2BCu0CvSH;eI4#~>E++S@qoRmFYzO-RKpX$SQxzqzfKyl^9!nFYYaEsn#0+Gk})rc zF@F22ey-^q1TrZQt=g{6N!n}tu(@}$gb*IUTQKyB?tXyjA7GLZp;owi-vWOmbb2Iiz|T#}@UFKV&BeLEc%klBBi>|nU_)7EdwqH>qfg6oL&2O8 zhFhtBVH2Cj#Qo4rJ`>wQlUso1a9|Il;>1wQrpe}Si|flVzM^sXipMtEWm3K=0ybgHP0MeZQb$I^`#{`n=(& zkMn!9+BOBY+YN5TB3Fi^&!_KgDYwen3J@8uq!&=PjYh_)SRks$Kv>L+^8MoWO4!7c zsqn_iq&egF9@!mT((mT84twzyDqwZ#m@;Bz3T&>}M zuMoBD4TMi_TdV1B6zp;pq1cwMccSn%n!rKW+P6={7;9^><9P&cpgBlqt{P4cdz7aQ zDK!QZ_cg^5^)jJV%qt=@76_|Z~*K;@|z z!=gG$)SrZ@G>g*IyHe1QY2_KX=-BaNZEp@;3}J+b0Lb9 z^VDUU($-B8?8Y!2m|YCqCT?Ygu{}VCHLhbx?C10RqYY1GUf49Y>S|A z5~-XqTqa!@B-7j|8?@Sv(aH-WuUsFUFS1RY=g}@t*0oK3ZDZ;!l>3-f<$D1xEg#}0 z&_JL|;dsGfZ}ocz8tG7S$d>a2)oJ#sAH8iU)s)7}g(2&TE4dY?8%+jAMlWI!%*`nn zmSpIYI?eyL-td zpWi1homL|wr}F>ddI+;IUw*2`u4_=BwpWpa^mNM>C7^Kz+;$~uy18*xhtSR?*XHs= zm?^yuLX>EX%D2d&=7y5W$52l}W#Z{%>fga`RjTxzvF+S#h39vE0#L^ULig4dwt|F3JouCI6PV596x(jKdwzVw zgaXgv^GaE7g5N=UU9B@}?dU+o%kDfV4h;Np6Cv~KX=i4#1kwZRI_{BUpw*Y_8>|zN zsUeYJ{I_^Te?>}3{G@M4#@o@N@vk+)FPg&xFoZ4&LN@AGBpklv|1NdqXFP|4oxsun z#|O>UY`~Aecyh|sSwfCaz-3%q9w8NUQGF7a90@(y6P>~3a{%fmn5MRc9|>KS20?}I zMfk(ES>yFc*%*tfkzsL`0B{JzhT%NElJHJC8uPRAp21a%1vN)%<1`(HflwD+>Q2L7 zq~S{AG6-e|8CUF89_n!gI5Rw6tiYHVeh_OWwD9wDP(sY2lb`Pi;rZ0thC12P-Tsq+ z0K}u&^X--UHF!z^zE(B=zJ*<4@=!TSM-YD79{jb)^XvMKf?oWo(V~A|tKUL^fEIC@ zV(<&ik?ybvRyZ6!p1-5*pPK)-FW5oR6Mim}$~TCV*JWu~uu3w+$G1+xuSCFRJEnhC z;I6)R{H}72xTga;8#CD7HOCnm#<}jDMq*-3RK^ z7cx9z&Vm5^8|gB}^DlmpIYTfVXoTKj^%manm?X!yGEACW%!5WhBnR%29cAL9#u*Y* z=ZHZwZCX7s8f2u~Z#5fCP^lOqSu(xoxo+0?mgCr8R;1*k@#Iu-Qi)x&2e(OX!NLaD zJcZ^{Z_jUUiAcE7v{cB!vJ>pkYoh{guE>_Yofi-1hgsmZp_?&YIL{QnSl&fSr9{hRMjx|4Kl+qP}nPRBMY?2c`- z!-{QoY}>Y-R5ZEgxu4(6nl~_i)T&ywcAbsd`<#u>^}Ql}8RK$>#DPdT$gUy@`JXeO9%8>Tou zFCE-M0Uu5|o7O{uRu>$nJ~+Radp9yaPs)v9orMMU62=!e?V#-IE+2*WQbG3U^>TT5 zldpQq*DSw~Zf|dYSz21!Iz5fZ$N;_Fb&feg{T~_FCM6hdIC}4a9V3xrR5OqC-Y`C+8q%`m4?=55 z#wxV1#)CL?s*k!bVM*e810JkjW5|a;nAD2{TSV*J-Q;4%z}2<-g^8!1SETuBJ8Pal zmUvE^OXJzU2s|RsKrgMY(?LKCq%T$c{M5Tuwz^f;RMtQ&b)1<0sw~C~t*j_6K5DJ3 z_{F-GdWj(T^|J8$de-4%Tl83m`9YTFVJ2Xb2j$O#efbZ}#J+whpPVl$w|gmcWRwXO zz`37xX|ILN_xNtt!g}An7v#7%h(VjkMAA|nS%a!s72XYLgU8FyZ8V-EEnHU-XaX=_!C>GvHldEbQL;R|M<6rdk< zyVs;YDjt$0J0S`xsGKz!A4lmd3WJ9yjG1-5v77kA)*n6kx%@tmP?Nw)sh9@aR1CArz$iD%ACkD6 z%x7eHxF05#y~$0sGjiEu!iMV^-~==8pLMa;!7$|m z%VFY}WZDtd+!+MZ_xCfu83Fywpk2>bTox(9u)QvM2!bin$pDb(c?W&kr|lY~1W%o* zA5eDKEBMwXy{%wMj{2TZqdS^tLd;PZM+&epkUSpPR{P%>& z5(n8^dq5_IZqNQpci3y5;>~P&U5Y9GD$0iva<6sht%;l5rw3AV-zN4j;7pnO&EAc< z>aEY_aJik}C@atU)r_moUF62K-(P9s&-~j<`8@yI#{P zi!%x5G3$p)Tdi{f-1^IN-DrQX-NgIZx{b(T=R59-Wkej3H(V8q36qQ;Lp$oN$^cfH zvDCZ8r>K4EV?tWY6TmV+#(1?VFshytf^s$8ta@F|(X=ubjUjFk&t6|R-)p(p9h?K; zBx7ao$IkV1=c3`uBt;GrC64>Fi~oF+vS;?BQpC>SKUA1hN&9p^lh5Rivjli$&2(J7 z6>@oT(Akrd5p>ldFq7sO17{Xxptj~(+Kw|eo@I`clLt%kE#-ITO6Fp^=c3cP7=W)f zsR>kxvzJ0QH&w0JMBfLPyA z&dav6WnvS|){6(RA7rkNfYse!vPrte3$d1nSGLR?H1am zlJA`vVeRB40=?+JIP!2-*AEU(jS%~Ogh`rpu}Mu`+Q>!jyuCRU;{g^td|P6Nu4#1>XT$Tiy+GqLzCS2*71LSw6i;7~oi&oSfWI zt+g!&M|W&ETbP5eGL(7T%RvTJE(zORJ*bDrnZsqq9_*F6Pa4}CalII)sTt!HNm zF19{XpLHo%UI3}Gd?q*mTi6pf#)X96Gqz57La69SKaZ_Omq__C`6aOb?|=WMia6g^ zK-$tq8o7d+y7#8E)`>$~4ON=&Fx?`r^R-a`VxToer(P+YA-!s5Nl>9ZY~pDyFKWg% zkWs1VjW*3`15O|qNqhZGJSmu+b!L`bsdTPF9~`c)H=186su}sS8e`ZcDnzUr|WcZW}N}@gY9pMpCbbsNkEo~S{Etds8dbw#6jGGox7!q-d^ZMkL_Wuza?m{ZG>Xf|EP?>kSa zIl1=xGDk3yJ5Q~8j@VifNzA6wn=#hwhhZU#nU!!y~J7umY9UjUl-lREM)E6oDOTRaMgZBDx7&TumQ#3ipHOhgK>+zWnh%T1K3HMk|z~&5hU$-&uR&#W{6d!CO_N03+|Xp>WKs zC!~to!cxlLWdNp*uEN%Q8L8VUJ}Hgtkyg4AeqT*?Z;hewrJ+pz9AE~bEN_&hb3FhP z0a&2qEtGQ8$D8@f1i*Yu!pEDbs-&&!z90ehdo_DK;HK&5lHu5jb6S@O?C_>J z4#z@MKlOCPH=#}p&X4LJmaBx~5g+#-HIL$0>V@1m<jV9fSA@~`xRtd|O&gn-R4PR@JJ`PyPPYr5dKjKb54QIR zl6ZYkMqj?2C#fRdENA`ebDoFX3I@$_LRCIMG2U?QWz!$fx9 zYX$yO`+R(ZBptVcb~iEbrmt&0bZZ$4Be`lLEF%Ts@=X2OLBYM2T$aI!K(E%-?9$wL zK2^Nzm6MBCAlzO5mHAm>0rzHrLGiaOm6zwZW_PB1zf?yQ1DwUjcHgVi*vj6(yg3o( zA+^Hn?^{i#2Ciop{v**FhYwkDx~1QP=NO`?Xn(9u<7l3xemJUz%sfHphz28aUy>nY0_X7tA@e>Gyy3-9@iM zvXCN#ZtH^GMQaD?q)2Rh8@$2hze$`641X7Iv-3sc9g7(h3cNF#USDlc=yu`!5#8!1 zduL8knt2;&o#1HDyW4y^O1LL}?Jw#b$DZR%#T*9*Lw3QN+$2Qf-tY;r=?FPMNnvJm zvcKDU8uUN!CvfSF2YJz8e@X^gMg!WPLjKDF1Y52S@gI^8@+|vSxYe2%Z#QR4M1B0U zm?tW4U&`LUkFuz~;znqb8?0OgTl2I7`Yuj^VSQC$&hn`^|`37d* z%%UTOA3$nemyYb*T%&8a%y67t5amfkum)<#(2j{r!RZSF_6jBR>~zWZ7rpgaIv4or zzI#PR`STeHQV)hoVD`>jvtqi4wHRM3_q=)2X^Uz|(WQt3pEn+mT}>IcQN6w>S%r?i zMPFL!skjpC6WZ@{Tr>%R5${d{uT-_aOe|-NpcCzMCyS_1$=lX9|K3^YuPmPvqVJBH zip7mokNNAAFCn} ziMi(rCM3PjLCdOoDX7Qi+XYZiP)}#;A2KQQ)9rxU7(WCY2MxXNUr%7rmvSy2d}?+Y41^wNaU$|DLb!9 zm$DT~+ktPqw`3cxqyMbk!+-JIo4Wp0xLpIyq5YV4%>LwA%mXIv(0o zm*mkqy}ik_E9XnBU}8A1TD7#5=_0AN{(`mSCkN=;{prx!B#Q_F-Vbbwcsor46I;%z7VpwpUFz3at#>7?bZjz?lRI@K}FKzMGT6C3Jcrs>dc4Ttjym$`H zL{tpcAos)V5{gw|8Mxxw2iw!M*?-LXdyL!DNjv-8n-u zR{ro!ErFHw>%)clx>QUWCT3M5i$8w_O9V?)|0woh#-WB!4uprkap3VS$84kyXc-tx zjK5}#VoVz0k|gwov*~iX;<*<5H0HyR2Naz<*!(l6fAv~wWr0oULE}2Tg-Y?trA)!- zW{mK+*4eBbE@V1XSSO`cj^Iu*4?ND7U!A{!1)@5+or+ED&HLV(bGEF(^zWEx7UgW; zz5Z4IoCoI|uw-7e1Tq)JQ$(p!!I+WSDF8Iz^P5ee zc^o^1ECMPxKXZN&xokdC&E~FjpJAI57t4c(`@64C`3|?%;yxcFm#c5l+w1rPwqWs= zaE(` zAQ5)9w*DCW`t|Ee+|};I1^o`QOEDs1@h*#$3HHg0k&vA8w8y=3E}Ae@F4AO^8?ss! zeCT4jY5@j7WUN$4(q;LcnVb(#Gz*CYW*;p;L9|Hy#D!e<>@ry`F~?1=IR4(N!lPzE zNOP$QRbh8tsi3*7HjQ%8GUCdsU75B1g!Vz)8|nA#@5Bb@JtgzFx+P3Uerk4cnhm>G zdCfUJ>Q`3cA_A>Y&(YVu!9C&Va9@Bk%pnPpFjP`Ab75xnd8ffQKUp=pi}5m|^3GL4 z{o`|5;Wgy-z7-Zz7-wgk?dIpKDQKd40z0S_rE#$q z-Q=D*=9uCsr_3c^ioMxFzjG90ZB?WooAJQ7xUlLHPN~d@neZmRG1PM_bPO)K$RNYy z^1P!zA7KbpixCIO z2JQE11Kw~vYb%19>6SRk9Dxi*CT3)_7VI#xCcB81`MR)&)wj>JyEyOXJ$-1|HU|n> z7;0R{(YGa`Sf@`rHBBpm*cBAMkFh!Us_&iwd7Pv|^1i$#v^PF4`wCK|6a6&nx(sib zuMmp?cSH?b(V#6iC2Kv4h?))G%E;(1HZv8YkD%?DYYeQuoYF*Pv8QQ-5cm5ynd015 zY$+bHOcTR2t3hoUWkUrPO3#<`CETv!s z)n>pLi=f2O`I%08Mn*KFW6oM5HyVR!xr`FfPejp^hez>an3s$FsJbQ-%S8ej?+EuT z1I|UWYM+A|(&89dnK4>u>d|J`bNmwZ7_bFu>MIK^-dD>Me0bL6?k07#W!vvci01g6 zMelii`nRXP?P}#*On0K;bg`guNvk1|IcbN|4p7BXp;vS@x7(#!XtOs}v$7)&pmT{% zThaBeIpCp)|m1uRjf|#GC5GFwKq0Z6boes0}rn+2F&eeNnrYAqdehw?JOhGFWY z4W*(Y6IbWrRRhY_hRQ{M#F#@R2aJT3irEIG)znl|E9$xEIO(^?oEu4v(riW$s= zYlKjQUs@%i0&3HtNO~tnQ49`Ary_)*h$W4i+v9gasW)QQo#j$-Vy|}JVj6j}?3PKib!!YHb~ExGVlP8OjFc~ z(1HVuhO0CcJ=Eu7g8P0`{KLr{4D2ZJ;(4oF!(R&UnS z8pTR*DXmiho5@ls(WX2{UQw`iBXL2Jo&B4UN({Pqqooq7n0K^%SP-3R4`M?iOB-muF<^ zD^97T_uP>s_;5uTfO$r1Ul9x!$E7kcsF{{~^<*C$Fz&NJp(MG&b0=8q(QYCWVWn@p zVizOw|1_eB;|THRBNwqF6B~MK_pM#J5ZUYTLhcT0E92S$6zPBvoN_KRw-QorZwH47V{@pLo$wlDn ziVh#J!Ln-11sC8~DD_+i>vx;<8Cj+}i>))FWOEN)U~N4-AMF>!=J)&KD>o*Ml!bm$ z2b}2d7>e&>a0jYU_wk^=Zi8TB41U2l&eRU3!dxqzS|hXLwxR1>TsNU8&l0OnM;pp# zcbLgtsUZmuRF&t}EOPgJl{An_qaeCNUo80#xJKHx_5?dXQt{{fxN#KcK>s}|x+U>j z7eYU&xq3EzeSQ&;rRf2du*(ti(jRU(cUfq3=jG^bDC#R$zO~|0+f3^f`^wr!01=>< zYoUrWAC1PrU^y<2Jooi&i*H#PV?eqfKP16put;4@)7~AuiGbQD*HhsSJLEDQ(b2O( z;3_JyMl#epH*faBTQ>8<1QKXpiEsn2$Z!1Xlj}>=d_Eqf8G4Vwq$(B$02i+1C{c8w=~n(pQ)z<|Y(V*c=B7Ck%03Z2y!O#c*nuQDYRBSjT{- zIOBL4l@lyS(-xH3%O=D5Dn6<^j~{Sn(K9DU`s8!@kOBrKnj5@J6)eLGj?o$%T zmt(~pjYSDD~ZZ_3;C;6=Xrtd2K!)4v@Z1AX0JZEPN+@qG+3=i&Q)~RX5gD-rELvVSFlL!%WKihG%_`ZS2oy(Y=l#nOu5g}ahs%` zYjd0%(FqyB%1k*-_LB^`?uT-TD~>6y<6{X#5#m)LDzSr-RxVxAs|}~AQ|*|+RaK4_ z4?o3u&nD07jHyZ^OWHPXm_}WZ_-M?w8JeD9u`k5$@lrJB=lp4?PGtB1O+(S?**-0e z9RxuYtnHjQUA!yJ+`w!xu#Vy{Jxl9U467^e{oDM@c`(TP-BZXcln0K<*ZoPcni;F@ zVVB#9d+qqSe)tRI6)DqP1I~Z$uoc$%`R^RG28TdgKU(i#-df9neT}E(HYV^peDoC& zyBDwV>sdw*#xcCIIsP=o&Tk082WnrWjxg$Zb@m0v%XR@LGt5Qj0x>XrAKSmWdD6WTs+77-S3NS9j;MI>XY=!*g4@VhFPQw%jP(Cq7ic^Fu&~ zJ3|3{!z@j>80|ns@&j?s8%YCbS%%=joY|5;bIuG#*?pX9Rvy*W_;lIhz3yuHpEr@5 zK~ww>D+Hj+-qC&Ezh`7p^p5|0lDY{y-A>f%yLSN16b$qZQ^uWtS5sP%eRKe^Bk&`> zXQnYBHA&ZT?IYTPArjC#ZrjmYRX&AbWmzc=9if?HJQg{JgrTx%`*va$bob$2N&xb=N)09bZBxd*FD8L`Am&{*ml z;{y3HmGcg<;axsVY>00lWW@3FCTdw&CyE4dUi)EU$(2_oAPR%)d>^0&9x}qrRus?r zFUvh<5-Ma)jle86J9eVjDFDv>c*R9~!`A()8un90qCsX+Oy@O`WZCnnQ-ln`LJG~^ zw^&vUXjDA(^gfCPI)i!6Y^h%M#PBVszu@SAZOpS}yf6!!xwnzwUnnChhLd_x!NG@; zSz!KzMNg(Dtf2>E{uHCvLG(P+%TYwSBf?FH4;r$q{6Yn> zM;+iA#z(PEs*)T7keQ*{u?5P1*V%Ga5$oa&er>R2dbyR`0cE-|=`f_Jw91z6O$!I> z8^|ASP*}9@L!p#viTb$%*kru5Z%;SyRaw7NiVZFsJJKDD)Eow2#Tmr#CZB7$6}(A= zmBnoI*Q9=wiSQ#942i#kv18``!Zi)uFx2SHABS-)V~%(Uur{pt0?oENNJZzEb;HA- zK%;^sXej-l%WlHN_+=G06)Z`ceXwsiN`ORXiV)^%8+Gu#{I7eLnFNwsq$!Qak-H7N zt(x$w+HQG5otqxmP5e={@hrga;=!>UvNT1u z1|#RpyIQ_xgx0vo^`8r%DJnmwh77RV%;#Y{vrvECIb7~2{W2ZLHJ*5UTVx)_xmakq zGba3pCxH0j%`F#wnZdXG3#4_Az}s)J@g12NV*L$oY=KaDbP2p061 zG9e1$_Ha1aqebH@y)m|y1cROSAyJ6fE8xTV4H|R*k79Q!!IHYU!w1Be79i|Un>>VNfzaP9y=nb`a7`%k@c)CEW1+%dIu>7a^X zFjNcHxUlnu>$fnS;`Di`xA6jx(cfsRj5rs=ukdJjvsBtK_!XlpW?EzRJqk5O{U|FH zJOkCO)6NQo^fF6#TfDC57eAE~45=<;F29H^tnD?icMD1YB5(Q^Z?Er(iw$rG-^}z! zYu#Y9F-FD6fZlj_$iYbIxs{At$GVMipn++7YPqPG*>s+ju&+pO!iCZ_R4 zUJ{pM89RKP{^rhm%-WTRds}AfNuTfRd=FQZGK>mUuM;~;ww6y;42KwVy~2}d5d9L4 z%s(lA$Iw^r0=HQ$@3RHuqx0M2k|}T~YU}P~>E{%-d&J1&cHGoE1_nn%5YXoppYVS6 z=T_i$Wd;y8c_|mlkLe7zYAfeHH4~a#RL8#64Kn#EswH;>lmldx7Iv7BxNwKkwyh!k z|LSmfsFxzSE$_m3CB>bd~A9HR`kWycf&FH_Fs3>u%fawiJ=r2*@DrY&l zvqt)xbLwYuz|B!95c79x?c05ZBDNTmVE?%_|6ZextPr0q(JKN=yY096o!ak zbWnFKy|WnlE(|w3IrQ_0C6v}@^mYrL|I&wR ziC5rs#|6^$+Z+_Eco~r5D21h>xY=u$A7G_Qp#VJ?+>!;}&>-M2uOZjO9o7zMw-)ON zb9TI?`~77h0F%F50Izhvhmy-yTQFupeJ|G)h%LM99+(0S8K?0Qn?LAp*)j#6z*?`j zg7j^prT-RLt|Yw&0}qw`fm0RUWVWGenlo{%x%UL_MupVx8;3q538R!tHfxP_l*p6n zdvjTlem{pt=Bwanj`;P@KF5imhk~qJ4nGgH!hMBjrFi53l@rsMSD&P^uWFcUggW7e zPsJ1=X#%cHUK0X;4XphOg&4QFXi$g*Zy(|8e2e?8R(_bDFyu!lN5}i~nKNDdWH?dd zlepaO#atWxy@L+2>J9}9a=zVogtKfNR}l4jR=s_6{k*_fHMisGL~Q)S z{m}cU+C}kp8|p)0eWJw6B*aQGPEP~+YKs<&_Q_O$57qKBBU*}HZVRA%4Ya~bG@P+G z>w~W@u(z=OlU}b_YJhcaM#-KQ^sGbG4QD z;ga6j(1_~|B<7s)d~eP2)`4QDV;m{dx--|Q+h!{|{u?cxe^JRN=d=l~nCp)dZx!Ec zI$TLf+Sq)E6}N`WB^N!{Z3OaYvPxQYJ$IDMr!CNChNX4z1g8`zE^g8&0n{<^ckN#c z=zW#*(<=t1h$rEr{v0EY=qjQB6jnFUHM?eoL*0O`{8JG6J@)+q;emu+bo6U`>vlNX zf@rI_@a0_Lqb*kEoWe3k0i4PP-0ow52qnQ`uAHGoXzoJJ<5%q=q}wCw)Ck^X0f#qN z)FYq3x4+jF64e(j{-t5%j@08fiiNYZ3%sBJY-@OyE?0s0wX{PSBMYhA6(6m}S9H_W zw}Iodd&$>+TasEd`=RQF+iQAm_!I$&n)(NtwU0*%hD!JX+3eID%ibSQVLRg6;kp|> zNn&u!Uf{7$1?5@atK7XOt98z~F4;0?Af|G61tHq%fu*B*Kj2h5e_!7xF=sZH5f@;{LeILFR#hMFE_aj z#={|S-!XTzN8ml?biwz2X@)`KVUx**B?>Co43f0e)axw5bOGj{#n>i*vF7+?o$mPiay-#(mPS2l%knw zXKiAQ$FIjEK~bsIBgl!+t}Cy!jJzO}l0VhXxLn3^5!3teYFJ=X$DF8^|78Kh#mq+3 zt+|(i(88_pT^U?&rviBB8Xf2IL1U~xiXG3fp~N)4-~6u6)rRuf{jl5 z!?#PRT=Thr|4|gHYKwId)-s+kQp?Y8_z+ii;|OE^EY(_an;M& z0GovaP5?$W$2cV$ zF9Af&?pr6MxqdXzI^&OLQ~n-3Qwo@usM8sFLsRU95VZ)mU_2RSQ zD8=E^Ofq_i%(}#i#cXiUiY47(n|bLHm%DWb^kRqCB3awp+|3D;;4f;BY{o@CKLDMqzt>N9xfS@eqb;-lKal$@j@|k!;Xx*F zkMFLaDMTeb+cz*ZCwWvQaKWB0VPWN}cSR@%E8`2JZQmDQ`zw<ZV4Nn*@lC~{gyp!Oy!l_AIp5|jGMb$alhD)@OjW}_r*Fct63mnrKh}9 zRlG4`_M33tMT6g>R%29_Q;fg#W(=%H%ZgmpmEr0$#SWY|Dm&!T7?1iHLJ9@Zq0t;3 zjAU2n`eDc~wLz|VV1^1cbVpWsrCEX}JDfNY61^jE@h_(3c&A_Y{=K1UBF0(FGtDP# z90y@d0)<3HgOILn=yC}v)UC4L0ojo6N>E5aAH6ud(5(FH2}pa@33E;`jB(#*}?i#CG1$qpq6#Ov#M6Y%gyPfMzMFgJ&jsl zOOYJKK@I}>b}Gs{zlWZmd@%%T?qrM4AweE(olyznACJjP%X?3(x5108liQ!f#Qq+R zgAqDa3E7&kBXwABx#=zaM>8=JHAZD6dO&3D#fRr5`u(wc`W zDQld5Ask5Bf!v`K92eIMf1znq#7b9L)645Iw}v7b-Z1NCgZX`^2ONq2P!A+?Pw!~hQ5{rPLRgEW! zzDc=LzqA0zs%u`(%vcP+9d0Mu7biGF598U%Ri@vxtxC$pmd@eXA8D3e7gy}G;h|d` z?eJ56Vpi1_YYFU^>`WfN5u&)~`@fL^R^19xR}muL`7rc&POtw+CS6EDsMBFCEGAZv zgzS3mqY)9=13ccp0P)EkPt=p+6RDm;NmeJXiG{yt6O-(CgnTx;k%QYqB<@hvVKyQr zlGKn4fgRtEe#VA3cB%D>ztkCMNWT0}Vx-B6tcD03EqA0)Y}&;7o4X4MinMjw z1+<=boy0tNzL@@LqhCN3yyN-59P)a=0oUYD35Fx}O!?wyJ5x(U<+tx4e`1@~mIdP8 zY51pZL-6}n3{}=sTo4T=Ddr-+e3I8fCMG7C?96`!vBXSFWMzqM=}`|%#;yLU!n-AGzJkc=v55;n~%?&*HF>(Xe`J*+_%BA7r3X?*{siQFG3MiE4 zcpepiHqPi#wOa|XwL>ZS7`Ho(6Xl`!+deHEU7pFx*E+oS;p(iJD#N;dea& zyrpP`W7kyIB4hgA%WuiD9oFpD0Py^>t@d@i{`#=Ua+ZieLv&R8set}LzdQ-64JVDI zDZ^*C3wSJrN2gg2Jj+0}O5sGywSf48Uk`8^x&zE+gLL}A)QpoV(d6KbS2CwZ9F6zG zsi{)4uWf1NHuDLNY)*`VCP3Yn=QC%^3K>qDHhS~ANk(!W)Sr@nAOdKU_h@w!elvx9 zqq=o5&G$l8p^yby*4TZcwx!gZPMf#8t%*rdWyc=d7<+`nnX%6X*RLyOG0MRikxEEg zdNRjq$Z zt)2igr~=f!zT~1i9)p50RaB2+pC>z{l}De?fuCnoNh)8Vh+Z&wMgBT>jxM6k)5ldc z9*E5n?J#u)S%gqI9^(i3TkR{gyVl=qtA5HqK%V0J5vr%dWx4ha5l5&6;fX*`6{*~> zr;Y&LKq|TjCR#y}GZDVqvb!w z@DxLa8HANg=mZOk0+!rekX?2r4IznKK)^X~tIjHm6F4j-OS6>SN(#81RrOMj+XlY|$o3Gb5#7Ih4u9 z;Y2b641zs<`icY)CJMp|ZHlU2hZzJY(a?o~A*f(`s^7!3eaj#Oi zjL3_NyOO-<7j;d`(p>g3k2$l^9AfFmtG{9)*D_3EfmvH;SR}$1L2~kh zK>Hc#f46=*;f*?iJIRzk14>Sd0}}&>$vXHlKapd^9|E5cits<{&iFeCx(kM4s#>>I z24ZHNM86+O7z5$RV&dT}N@;Y%U8oIJnu;_j4&2mU<)q0DHC<&54w|>Z7RLE`6QOZp zW#L*)^J!F?pgaJ%I$JFoAAH~Oi(#7>uzbAFzN!IK6%nG|u6asW>$*^(l7tPV{&0lDMhe+ zO>?7E;_SK4SkE&5Ntjr?LpqT}OT`O=iYT9)ykioJxC`$Q%E%;#d+#854@^-}V!HSh zQhj>+kHr7%3Vq^spLMUvn%3)Pp0 z`+jxpt6!)b*5UO?4ubs?+HxH;SSXWXvC$6bvH0JzxS3Fh%^Pbg(NImp&d2T6!zP{Dy zAtuyUp}PT;OjHz*9S4fzs(x!6Q4eGps~X+&+sODN#%{t>XenX3%?`i0T~7!DB_AYm z6&<#FzFWvIP-}J3V!*5j*gDY>wyRf3EA=Nvn^sliahKWMcMQf^`ts#Qf~Q| z;g5l=&WSZ_<$Foe54$Nw9{ehRf_)ta01!T*oU7^kl1E{g{` zrkjn@%>o_6jVq3$jtp1d7efqsFZsOkDsjJ6g;gLR6Ss|0u6-w_h0qgI9+C*AH&3_f6 z$RRdSer9R}62UfWy({l}%DB->PK?96yc+ZUpFkQR2o@*_Z#su1|2avEXC2tI-7H8O3*GQ9H52Ks)azMbsfX=A*m`n{t`UvfYRe1eE)&NeCBKV|)| zAokg6$|SGmhae7qcFD8Mf2FctJ_m9El`Q;!w1fZs_CtCueT;>j%b+#U67WB1Yh(!$ z-*tEu+@iZ+de2gM3qlh9=jxxjL)Sk3BQtaaPyErj*Z(OJgILdPIh<@>Z}a2UfAV;@ z4n@e!oW^-P$CjKR?>;LTMp9Q~>LqbYw86f{}752KK z?;PSkhrkCe5^tvqM2_&u7&_@!?d+`eaioaov2h!v|CHaGrMXJs<6QW^wLz8|8D>NP z8uspdbeN5sd~PJ1$j*v$x-UK!?0@Qy9`wRGlw54)j<-==HOFrQ83wS_O$L(6#>xw- ziv43l1~r?!`Ci_e`@q$_+(4B@X^u016O!})#n)TMwY9a~qHUpAp-|i@?oxt#(c9cUs&%xI=Jvz1e%e`#bNs_niAzvVJ6)i}l!i#vF6ZtE;P|53i1d z!O6m*+G$IT_e}7cdfS$K4y26vT(p^_Hwa~`D*H)KKV(xCeFNRwSwEFD8x^0+ReSFb$uSrt^+b0|*?-6@!+HVM|G@42xZ-&>v_+=v z+)vaY^0q$jaF5gEtur4EZPl-a7rpE{#7_Uc(MQh(>OC?Us(U2^^QdKaVn>0|JNkqz zUHE_O-%gL}dq-I#@$-MuNR`C^Y>OkOwNNe#$VCj}{FaKyT+wVk6nabK8g*0V_{YG5 zu#IdaZt=%*}mwd(NB0V#@xD<2a-17GJGWP7s zkGe%%Oae`@3{7Yr4^XdQ*Mot=jhi_LC1|v)y_W0^QW;Mi(TcE&e)JEzB`Ej(^|JrB zhEH;Pa|*up;Yw(gFdjCw&TO0D@=HauuR>F*IgzPWo@#-|Zkx1%InY`kyGEdjAp$ z0N0MqH5hU#T0IS?2!SkS4RBt7L6~qbJn?0LmI%M*%g`=Bj6SbVS8B5*>{+%<>#~n; zUw4l!nkvEE>!K_{IVLhSz1jD%Mk(OKWkcql2;fT=IcRh=&*uW=Mc`_nU`zqaF{KI4 zP1$uynns;N8kjLaZ_rJ&VsWlKE;(k&laFJuNO_I!Y;{Q@SHdgU2rUTC?rp9ZFy-)F zB>3ck+-EPiAt6E)JU=w;ilh-@bi-P!6Oc+WiF?O*>^h>+no+2OaHxp|v2 zZ}CEjPy07_p$Rp7N=)yv+Pf;+q^f2QSQPbrM83s>29Mg!#h0OnUt2%`_tmTc4NOo> zN2QS!rBkZR## zFue|vL;`hZk(wdP8feAh&zH31=1iRiFTYQ-V6%K-X zK!?1&jGbe&XWdaF`_88aV0Xrk-0cB&pp^mXNDi83ie>{G1f;jN`!gH=qX%rU@WuRu zWMmp!Bb#Rx3^q8 zb*BAf`uQ`y-MykIb$FCGoqE9Y1}v6B#$LU#HKtanzy#q@KpmDs)Yt&D2+&lFrW5M? z)R+iw5BE%@GyyP@?|E;-1zv!bU!4tRKOvj&&cp-D_Ck-`LR9=jh6%E zxkYl)@lk_i*{i<>2@aQqbb3!1bQdsabfeJm#ASsv1ofrhq?^`ZLd7WI1rv1liOChlFgVUE?K2cv6d6=A}B(ri>XR5YzCJ-^1SrZ$K%6ciqWA&L|eTkh7Hx*!32OCE?J zN>$^h*eRW!@Lsn54TifDa09~7^~mc>=zGL9&IttxU6A#qP`^8k8u-0it$pT;5ymA; z(Eeg66xRW)Q(NfUN+Et8agBJlxiVp(<=y_V47Bso8IXr@-W~lD?x{cBmEDds!D0aj z|8%gm=4!Fm(|*Tv5!sm|^}+YdQ!X%%KcPjzPTM9joe5lOcQD~lf?5XU6S~hN&Vs_C`?vXiq8GKBD zeC?{Krp#X#vcaDVWJ#}4J#_GMD*k+=7Jv-2s}zv+LgMRd<+`%qF2aWRvdF5CD^Po| z!dba&d8gmmTv&?yK=pcTxXK@nPC{m@B?tA2gxnYj96dN2+0#}p?t3~NnlJc5Qe%S3 zYZ`-b@Q-U+UEzF31QO7=pD8l0g%+i_3Tz)Dz)Mqc#QXRxayB9*%n~V0dkpgDx>!y= z=l~!YG%98U!`Ft&)yre*o?FqkWbMmbil>X!O1$zs%em|~#>;lvdC$66QxzVQ)6(*{ z7f8c(%BB3iNUmv{O9X1)T@Vc1&@1}fpUOWu%pjB>^+$eNd+l4e3O;r&S-TQ@-Prb% z^pBnL%@`3eUCIZsH8j%QbgDTjn78Lv3#Xzh98vu3`bgxl^BuHnmmetUMNylz z9{&IjcgY;iKX!hO#|y!vREesUSoQ(Hu%MW?^S5uZj9D2Y<8#Kai%9@ABa2-j}SM5UR4 zz@6L)OatR1hSNXy21UuoaDePdqy%X%FtIJ$l>a>U@}9mk`{qUbFEv~uj_*U~Wz7K0sVqq}~Hn*gCK+U@^p;t_NYioe3bi*#lUl5^X z#kr;zxHAR$;PDG14)5zu3o_|+$;oJK`UBGbcyI(m2(P<;I4>i65BXlb=C^*CC{2UK z9ea5QpUEC@AE6n6noh5PW{1SxnJXU`XXN|1*rRJbMjdo9=M9tWQ|AT?j>Qtx+?XWE zxqSjfAdY(Bve^N5u7%IoHv4spe=X@$K#;_ZBU2$6?~%IUhDd#BMfr#p_&A-B{VWM^ zOE|cZXfcB(pvRw3w};?lW%drG^&W3!f?n{&yP;%~H;y-em++%3jK^-@nEd=uj-i=@ z>cPBbe&RK}I!B29?z;d-I$KBrujYA@z5BhXKz~JefJM^|-U433tIm86EW!lhAIdFk zbgykP&WH#=dZt?2rG>9coZ#>m&3IRBc}B((O0wUmNxQsH(5`a_FTHrS^ zH}VfVoZedw7WLE1_egl0OM}z48gFPr9C{C?7N-|!#`_gmG#SSmPC`@INlw-jc|iSF z%Fg`=sHuIrN&F{+Z*2OAc3N7UCsAnkIQh;ykxoCPxM~`NHyH@Vs<{Z9D7?mRlJP7?ZiR;ED(_gmfmh_=_gEKl zv4qYo#PK%lV9vKQYPV*X9cU#(rveoP>M1JD>y#8`LfhvC9qf%ho?c8m1C#g2C+Z2; z$!lB2Qa}AZB6u_fX3P?3J}8kQ(tm%S$+!Z4ax5m4^xEDY(2bd1PC_mtYFYdRdKh?T z)#rxrFBibsFN2^{9nLVcri7V%%wJgvsmVJA)KsFx2Sc-RM~55Kojz?$Q;Q~KZ&@y& z-tg+q5lyGg5?tvC9)`MZD{Lt^)ldlfO6{HdOWXt(nbvOD*{sJQw`}zPj9}2NDA5_qgf93lFp%Q`t z^rKak95ehAstZprTs0AhwXGFs@fOo>ZKKiYnjtEICgve$Wr@ag;CT2V?KQp<(nWi3 z04=TMcKvpGiinr78ZRA*jR=`zkoEf8y@$x20(C9-G)BlD-c4;xNT|IcqoeY!H7Kj^ zmQ2Bl7LR!IJqAYc_XHHB&8>a-t}$fgk0$FGa(2tfc0ICH1*-O5Zr0L4Ovp?SdYB7^ zux88lkr#c1|I43{$i-3aayJc%3N~d@scz6DETz;Ts!p&QJLJ{uUv)qR^(7Se#+o!U5X&Z!gI?(EI=)vE zs9DrmiDn??fHVQ@(>%?5zfevo$2&{DPeHdV#nqQ&R0oX^xQ}hj)6q(z#q zr$5DOHOA3j@fmfSrg4Y_X_S1ffLzeH69S^tbY>#KPO}P^XFTw zeY5{zIG1ykGAX--L1l%ekyl%LXDt;oVcO}_&XgSENms^|6#bm{0uUH3@o>1g8{WDF zedR}tlI0~PmE#{y7@68ws3laq!+<4Mi*V^B;+L?uT4(NFCp@?P6^EM$=ktM1EU?~D zde7BMPEKBk55vmX%3r_2@1m`K+#xE1K%yPqA9zpn%FCshiqbH&^4D3|L#b`N!}#Fb z@|GGQvkvO+mHklmgq1SELF2qLdI|2MXo0aR9j7^Z$wFg{>5|G{^=!ay zWRUW|O}d{r9o}+F3(P5IZ?VCQL6lcO*_x=lzVyuIpY^DVmSwv+2K~$vt>+NLNXb7a zq{H7Z4w<0!p%*#a1l{WLZdkm|qE_q@5HI5iXebw2wRn9g(0akWZ^OB9vVIxrNh9I8 zKD3F!zeFO`Iu!l}-`FIA6kGA3cI&D?@}*hqwmYGZQd@&ZIi_9Yvz}eXmfTPO78o># zOr3mTeiAdE^Knn@I|Dy+OT77cw(C-{)#wWCDtoP|N(=svsRYZ$k#u=e7Cini#d*VTH4b`uzWI#1U1&VHhC^=dX<~|KGw%;%d7U~ z?q_vP^!)f4;D=RwYT4CqVb8MSR4}n%)i~eky<29(yt?ST`)9yvAcdB{U25!nT503y zRoR68)nb_#@iock_R=$~2~$RY(2?GIQnX)Uaj0ZuPLn&oDKn$a5EZfJ3)!Vl`mdU% z=XQ$3R-5W9Rm-XGdYRWn!|CI1_kG`>zzkxk>)_yEh4=ZN$wrKSd$rFLngE|?Sb@Z# zGKD@9O_;N@j!Hux-|^mSb;*5q>);(flVpefXe}|Sn}$fE*(P=uyA(z=MDU~nNx~`P z;nM}vO{Nqk)`fpT5;wr>)@p;WKHEu@vj3CjeAD%-6~#ZHQJ!5>;eKqUw+NGU2jbMu zuQ=_Pn^l+{kaKpx)L#l{hk-c4EMB5*ENG5uKB5`Y8Wi{y%tSoCjNe0&@kPlpj4d;( zhzB7l^*#Dgl6}hp{aj;<@U0=_Tbx#5=YgfyTR6OglTZ0&q}s1aFJ%U#K2O6|lfrmiM*}E1M-Gm-jmx4cU@jWa zXrtj(7~TM(L69OMsQCn5y{u_%5{)o%-&L;|K^8UzLN#8&eVRBKPFB&5VMz~0T;cZk zP$58wQhOwESAw<2;fU_;Pl@E?owDqgPI*h2ua^?PE3>+RcU$yF%gqB3;{%pN|DSi@ zYh+LD_u<8;#PMm%Pm64K6$kL*-54Z0(S8nKM?j0(dyahw!qwo)rPjv4D)~frE7Dpp z^erj1>e?+Qb$?nMo=@m_fY9%x5(sR+N!D2>{HOne_3kQx=8TvccRZbDjVoUhlIJRU&W61aMiV*;Fx#M^C0s zbD=1Q{FHCek92I-*&Qil@+XV)XNJ0Dm;ij_7mFcM>a7n2vr*aZnM4A}nnr1PY%~1j z>ti3bSB4#w%J!hVJ& z82E~TSs4#H*{bAh2TG0IOAAI_s*2CxT}1}zsAUua?u$e(Vx<-*QX}f?{ z#AMc|AK!+}f?ZgS)d<07wwXmWlg;kxzE++=r0qFvU)SGuBX%3 zV-k8K&n}wwMAei0&A^-ZVa zxS&@~HjE+1@;u=7!lA0lmdyOYbTA%&udU!{A<}$va6B7S*Rlblsgvcw6#Q~sSqAYM zL`pP%aa{{$56EZAwb*B%DEi~`;#`^?v5C}-;XRr# z#Y}zU7L(j^^R?u(s>D5E&3uE`8!_7Yd<$U%idq6k#+4M$#jsfF6=Azf1$TRHtOW(= z+_IqyY&Oa`AD<%o5X*h!TPy-gQMbi5i}lQP;UE8Txm!`SuC6oT8{eH_0;}256vFKf zqK}&0hq+&O9JhoHd(huvkv&BVjK_)oNuT#JVX6i+(u&JHq(MkBQLzSDXq*Sa}ze7d~TToq}4A`-l3<4DgP z)md+yvN|2Z-NJNq>>u8PxX{;(ku$chlB-sfaMH(in`!EMl8Wt8)jKQz*JP!*rGNF! z247_@{7C-B-h%d@DO4Z^8FZ=1IXN}e_ZkAbiFWHNVEZ#jT*3yE1-B%Libq-97 zckEsViM$&ki(3d&v-tP>qE!3Kh7(Y4=0J>%1M1iR^ScAboj>6IS6yc>(I{I$G>H2i z_n%)6!Ja5o&Y5QM|L2+_)-K$Dq&W%Y zTdiXsz&a!^0qvR26;*pap}kr+D(50*kV{v^f4jtwNO;C|4-gWDIpAz}*r2wC%L*L3Q3em%o+EGoI^#_A2_D~h1 zt5g~Fqz$=N+!wr<8&5IekssgA6SsWd3>n^$$>eMSmxMoec#b8kb}5V)zm7J1(Ja&xz3Mk@N{ ztjXn$i~AZkKe=E`=In&VX>ISpGPu<4d(i!nbM}mmiqUl&{NPp8#J5-hzMU$L?86t9 zD)bw+B3FVX_GA;Ylo;^#!o|>@A(c1vf6Vu{2U$uPchN{FV?I_&7)R){TCe{ek7hZ< z!*LA!5E<-{b1%~xK#APkoP3UVKj|?2i9*;Vnmm53aB$cU8>CPmRxtJ9pd&&jpVdI) zadt9q9o2u)W=G-lUB=f8MaPO~4kGv1WLC8@zez*`^M|byI*2zZL^N*S22OF-<=4bC zlw);GtTQf7ZlDSDUY^GBS^ez>i(-y8R17=WHL){9>Db0bq4O5ua^;%o4h~-WI_AfN z?82n6t5-1D`F2mZ)FGMS@5y{y?dzMxW?wxby4J>q%q9{>CG#bE|7Xvp_1AvSzKNwz z+)kVylMA34<8&^h%)pn=ooNVc7=*+M`74N_URU5 zH2V*5MtnAi+i*M7pvW?zTaEY0R^z2PQ%`JXydV%uV{gfl$CW+u2^a7p0-v=x82sYc zv7jWWITq0*BO9E(oH98TL?YYJf#o1g3VTrN*LQ4ylG- z+=-qFYJaSMNXutg^DWXQ)}A7S`U#!w6T9d%2Y-ruw`pu5fpBAmx~GdZn%?^`65$H> z!*|SaATjh7E3|`648P%;$@DG;TJgr8kMty6?tq=Q-{RYk$N-}4-i(j;d&X9CiRQCy z?xbo@^7#pF8xOXus~9qK)Ed_n5})yOnV6sJB}RaM=yc^IxB z`*k~iWlhyERGpEq&vpG!!=O5`*Mw1rlJkbF3adWz0rw9rT9xPc=sB@9M*r=_qPh7M zG;6utJ(vZYU0dnSG5wC8BPweRG)sV*Q;8;gJ&iV?D z2D@1KZFq;-#l-}zIAhkkwrI$<)UJ!8Bh*mMWv4gN`>a%{&s#&h5pDGKSvEI_t9I3f;&>gzxKUG;3e`*-|JV5hDyT+CyiiqZHu}o9gW8?8m zXivVd4z9RGH-#U_35-78T$}mH%Ga`eK~=d0mgzY_(Na(A-9hH%nJdFDzaHIUSp8Ad zV)x-_O*DJtnx0y^I$G}5Y}yMqzGF1xT)WEMDMvgU*{1QF7--&y9UUd9uPaw_?_8DH z9x07uvtf``*PADz&NsF&x`A+$e0D*q;-pK_c7t7vYd6?xex>cY{XU~3`jUbu5`g#z4!~7S6xnxlI^6m-~vZobxG zzj)MQCL8g6cZ=87@ZVWyWA(r_Yp*Vy-`!VZd@rZ>6zAI@t$F-;hZ?ozR5|VFk z96wZhYALj_wHLLxBiEHto*wkD)R=>a{Rp`#fF$&m>aC>F$OGBdZ#IcZ%Gq)@v0Gz- z_lhV=i~O5~@7F9lTOQ8EH*Yvlf##s3^a>fNGtwa*r>W>HsP9robDUk13r`ZgSd{5% zWq*iJb-&aBFR*Fn0sQd=bW2S5*)ahvZxF}Jp1yr64`YWj5&g50ldQ z9Z+`1Sd7UhwGvfq<}5lgtl7UoHPXAg^_zPnB#NzT;`S$z0=gfF`1&3ZJ8fAvPYj_=YQ#g!Q(<`%D_|p| zM6(L#mU-Hv#IZQ^N_C4q%0pWvTXhAV|0M__NAr{tX(u&gGMy-uNxml(w%r?>sa0k< zw8OWjQc4^QD%l&~Ku2K_>>RebmV_6$ycP2DUs#Djr6(Y%a>(&oZBjb{#4JAenm@Nj zVUw82Z@+KbT`uF@20(vP8ZZGA^{MMexzEgbi)yB-Wj$$Fb^6MRcMg5dnZ;z=TN!+Q z_%|*I45WN~>;4dE*46ebDytv8bX&n6sHo>D5aGFF(w4H^3MkmO-KtQguDX*tm$|jQ zuF&s)3_9R3*9d*z=C^*ik78PN+qm=L-xTndTb@}Ga@l+dKtR)1u)6I$t7=ab1naAW zyld(!id3~wFJhJ&WHxo|E3IH4=xO3*E?P7pxw-s~839Q|BksTv5tl>#g|1jtJErx1 z`#^5dSVu&5#Y(+O80KGyKB2Esjf6_m+EZz_tyO0kI1lc~UkY<*@N6H{p*s{|@|4ID(a$hJBweWLY z5kl~zPzFjOy&5saJhu@?(!q$?AfmtgJ^zE&1B9A1Nc;m9LCDx>)ujQpLR|S({d~Z4LZsm0g0EPf zz7^&tI@N;xhWoK|YkZUmpFnsH9s}qq7%o_5QN2s-K2~O~B8AL_{6C!;tXb<=h5-IZ z`=x2C%bZThBc3M~%EU=aX)k;{S$u8(-xv(Oe-qH~1J;Afdc7aE4nKDoC3AcPPyJ2ZzY5b`tv364eo9QquzKgZ+^ZC%Jv;3uoj&8Ls?#hVwf`}9;UNF{C`B>OB=o6!Jr zyuvbgF5Ao>8qm|i;X0n#<1OJ6G)R}$G~ohjMwp-8_HLaJhIDaZ-K8m*Ec%}~sKepn z>_Ub@Ma9S_F21~7>2~F4+HG^hLnZpWTD^J4{~H^zLFH1nE_|JOaNJf4S?8*7Fgj%v z&}1sBaX53fd0I3*w)o?_mNT()f=TE3kq6I{vwlKU8k7~w$&yxLq_4wSUp8Z%wo#yn z$LyP*q4T_{9pS(B*br8zfE2iWn52r7jN83|1VijI2hvX< z-Yn!r2`fpB+|=hhzK=W!jGE869caZGWrOv$i-mu6{1-ph&B@y(KgEcrXk~dl(o%v> zaxVK-v18p^{yXqR3;O&5CEypDl*9)diihP z4!mt{^EmU5qg8BoKVJSm32Ns`ga-%S?iYS}StiWzPFmRh?-7tWGuG@$1j{Y6_G?f-4QKZ-3gMxyTA$wQ%@HcFaKX3UX z|NAB)a_F4;V|LFr=j@7};!()E@yG$-|Ekirs4;vM|9~7Z`&2M&8`8@z9)%ei*gpLwSSI|1%%;ut2dkvoNxL9N)2 zm?nt8T^sKuI9Z;f;mev=0)hIDu&>{#F5Y*@;Don98QVNDapCpQlNu>;^v*;~5UQPi z%wq~*W9|`MRM@2mvwP2gUu9+FJtL#kIfS4q3enNtAi8y+HztxkaUK@{uqM9H$h#S| z&h0jL5~`1mH%BDUS;D<2es}7#ye5N$r+Jy9@!Gs}WVq7xDr@DGc`00;;-dSWt%&Q5 zs0MeZXqiNbkB`=0kb-z-z{rx^Io(86draAl;si12Pa;UO?A>BN=+p6|w^!tHP{lJ= z4fekl6pwY`Ya^ZzVmf9|iu5~B>0l~yfh9q|2~EJCfy+ftfspj@>{g+i&Dq}w9CI_@ zHSVwq^R-oTHluQQ4JxXmzp z-AHtQTLB(+ox=Eg9h@J~YmX-U2i~#*iagGRNXe$8WD03b2+=n5>%Ef$Tb+eY**Z?R z3y$o~wGBEG$A_%(r<(;yz=Avi<0SHL?r2s((hVQ*kN# zKjY^&s#{5y2m}ozhehZ7>mB49QwJ}b2OiyTon|grJSnbruOxj|o=UPO9&h$x>BVph zwZQLf0%7FN5i0goP68^}&al%zV@sM%G)wep^hzgy+xfnX;^B6lW9+h6J6`Gf{mGw! z-n2PD1@~3^Kt^*H4XS|lVp`1e)1CE$8lU>j7pghT zdDxJAhp0blR9$q51RAIMIe^o?8CR z@`C9^FqfgPclguZM7k-CD zXopYOYdV|@*92eG%lyj)_}7KIX;rZQ_}zM3;oTQ3m7tq`O{3WwnfSP(+<4=dt=eW6 zN(P&18C+k%gIDt4FQBmFZR*lQA>iIXBvb+Y3%GFRdq7nRpyJPmuottPGISaT6X$cj zfTr%}3q0kF8!r9r?jT=wI(B>CLPG>$tYm;bTo*h%mBgOiU>(d(W`G90znD(k$ ztvYXUd`RcPv^=Jk1%@%%1=~kKUMJI!5@Ob5hm<7lxUMF}W~e3ic-;a6F8Zgt%5>UJ zW{M6w%CSBcYD7I}TcKo7KL&ZT8)G$l;Y75RbxX#^U*e7rx1TMV64HLeh=Z2D0X4Or zeGFzhBH@3HaD(H>Sv}4+-u{B*EAbWW>D)_!=U>29jIC#WALMr__)# z#qtgYN@fk)wf&J1`zS_z6N>~!MtNQAUMjR{<|@8-u^6pciqGQBD zD(khth56TCJC&nbldZse9&=kk3qBiWee?k2=DJX~56%_~)_?eGc-x7oFHtif>2&kQ zwDo$vGG98W6V=;B`f2{~#&_n+{(o_ZkI&CyG8j_5@6FxUkt52@V*EMp9LV-gnDb63 zXnV&jgJokC3o>XIL2SoPaQUdWG+kHZarc*M0NAdjL0+dP>~H04XCD+71iO-eM844iKFI*R@N= zI@n}h$ffKQ3@|5cO_k64%@s$afNQ0s8{1}wp8Bn=e_%***ij{;<_em(+sw3+RNv!J zB`>uTo3341$^7e`Sv3GYgs+>`nQYwj7}G0pNRrR_-K05VNJ}X*-M__s3Z*^6RW^LRAq&>!&?%QbrE!PFvQqD5*ds5MR1s<0_e&HnOL`vp(x^_Dn zJv%8R9Ll{o%@)3T+H`)bnold37?@{+`GOEc6W<|h`>}dkpb!$5&(ez~0wUAu*2#m( z8qc*mYmg6*Mbq`gPQ&nVw|px(5}9!m8${x3+YKd;!FZuh!QTsR_O4_+J|mj>CZ~1ls~wXibA*r`_}qaV5i8t74gD!VBqFeU5*;jFJmzK6Ular`5Z9O zh-3F#c#iFNCCg%dp{{UcB19p7wYMQ7oR`$G)O~)Lbr}im)kXO)4hl!jknj|iTeq>k( zQQ5U~I4cg0#R+f43#p~MW2U}J;`P%7J+D)xY0G|8)K8tZzrITag#M^o5lAgeJPwCr1)dE`SWZwtm5-}=scS^?<*$PdTdz) z`#%q4n-#{3)T0xOwSq_^9#>Tq477TCJxAzIO1(Uf_^)VoQ8)Q3wxttRYy@u*`?%dH zPMRc8b-c)YFt@pumPeM_w0GwraJp9|&D|0g7AMB3nxx&GndlDBmNv!pFr+*rslENX z>ag^zjmPIUjf78jJbdwOCGIi5v>C(N8d_nS53!ho*P!q)b?`5nTVZjdWF2d)9W*5} zB@)(18+R?9*69va=sjb?zMayxClNV(-wR%!BytjV6d29%^S2&(FT-)UmV932Cz-3Z z#izDay`|j--P3I#>7w7R^KKC7rKdZ(xzR{P3wt^mQ%NlI)i9sil9aRBN|&7>NS-_g zu-pmNp6XBfShx_i^(Cr&dxzoqfU7ulIoV2+ocvw?#p{An-M^|XI&acyV)qJvr8^qN zOCm%?5JW-g(tQ{=bb1w;n7;F@YCx*DC}QruNtuic%EQLqRzJge`!=PR zf*82k(wUk`qtvmlg{xncw{qEZxXKl;k8`BV$jVb`BOE;2Mljz`BE<+8o@!ak=s`*7 z4YDt3bl6*iG0>9sdi1Z>y=LEF*Qn-nv`244&)FQjOa{#wx|W7f3YJm^3O&4yo7mQxZGsuzf~ zQqPxO?9g7QvbK7!O5(Wqt#1GN=C*X$;XPFZ*rv_AVphm}=gCh;_QdU;$rncnd=rki z5T7O2jz}!L{@`}k+OJA{R-G%J(ps_q@MesR2rylNw+Ok(%KA14*$nV8eIhw{BkHkp z0?J$++FZ$Q)=J9!-SaSHX^Nb6YNKgDI96%8ahzA%pK(hgE`9w=+4fS=%bQH_v+c>3 zV84kI9ENwYOxYA6auXtv84q|B7@B9m1svB-xwj0eL^i{bHgvY6VYt)lu7{W>Eb#c- zuJJX5=wUmP*#kO1A2l?2_O2)-tp2m8jUnpIeA&P~QFW{~3!h{-Xb`S_BfqxO}cc<)h+vanJyFar&{0w98B`^R3mZ=-Yxv{8(xS}C1KP#BlOxGp`R0Wt-qA<@kFC%fb}hd_iwb_jC7+>MQi1TeL3|K?6NPvnHZ+>0vI1?qs$gpmGO-; z92_=G-OP_f9x!RTOa6Zlo^Nka-ILgjV@Uqsx>9B;6Y2Yuci;CTHD3%9wg+U#`To*Q z4E*!?M5Y=&(t`3S-KX^F05e)NJ0ujf+Z?KO&x%qY4e^?L3V)-p&?+%N)(GMRKGO*B zMM&XQtd}U>w@2rY>$WdB7{eV@fU@DnC9p_t?llE*>i29z+Yd&Tcj4PL=chaz@TP~n z*iWNnZ$Y4o%SV~}OPxshv94)FbS;e>MRy?A<wQP4LK6u{T45?f0adqs&r{)7Cf{717StyuikHd2&MW!J?h*l;9!t9}C(u$Od8@ z?FBcVoKH<9E8gX3LF_K?X9=(HEOcRWoHL(quW3mde2!zg1ZmHN^nP`3Kz)K$^&619 z%+>fd64BdJoZQmEiC%=U00=QL9j{z~jafbfmBD3!B+! zafJn3L-BfFJ zj?(^AZxA0B^gB6*UdDWg&yv+^B_Q}Q|JC%Okp|nb zu8Tc@f2L0_@3K{JxWYbZw|lRM1mxhhof%4M5Ia~zt(uRst=8~nZr&_kFPa(*Ok5%- z#39ffHc~H9@-0t%t1HWB1GcV|F^Cmqu@Z^2j{D)F&Yb0x#tBdV@<>|BC}(ujoDV-{ zE3QvVi{jl7Kj%ht=d35q^op(EHaXqqu+Uh(#h7Q&?619MatklX9BsdU{aY8$!esL9 z*+D^oZ9W&1fp&?~!=$vIN6wayM@k4|e8X=$c+?i;wj8537lIIXF`ErG#g`NBn%ej9 z(H~hHDY7sU!jrR9ilmeHiQeDXIZ-qHx^I0yT&Pr3yEP0QyqMB#DtHO0#u_d9(=edD zO$6pV>5$w~6@ZjxX&xQdW_S_UCfM2Vu!9VOH46%JxOx`VVLaL2Fj7p{nIw1&%qupM z%Bh+o@b4ryHs=3Ab3rHO5y_E^%Og%ub~3%A^ARbN@j9oUbDjr2-NXFMYXNLxU+2yxCuzI+Jj~WUd?8z z#5*@RrU>5e&&bvb?~RJIv1lTTq|b))#f7e^mqsIP5;X}l9l4Y@ci5ZqnHgS8P)j?$ z_tgQ}+s&ny0Ca)RPv6%fJO@l(ez=7Qj_o>ySLru?kLjs<@vsyO@QcooS=5=Pw;RLL zu7%cd3$w0*Jt{KQ88`_e3ClDBnwkje>ow$38o|iI%tjaa@+Z`J8hx4XvL?D$5&3^P z|4j<{8!)5B?vyRN#B7GlT?#>)w?=QQg_3?w%bdtArD1v!3;Pmtb8|B>F-0^>RX|MG z@K4Xqe!&9?BZA2;qDdzX@8sKmUiY^jBM0#PsV)TP0d_?%7j~%q7fXaoBs& z1RiUD{?{G4WE$o|i|k*~_7~3gKLd#qW=V^EElN}_PxqyB4851GXO20Z3| zC~D7(dnHnDgDDyko#ZCO1*4#G-5R>zfopLu|7aLPwI;BH67{gjXvsEwGeNvIA@)=v=3wiMRh6fs~Z{oHml#$xn)6n|J&Q|rd$Wwvl zQEMc0F|Pj%l%P_y^(9ga2CdOPSiMoG0s-T+<2nm4?EX+yp{^C$T?g&?=RzC#(#}64jD@Sg#OnD9X=)t5#c%mA#WJcnJ@sJ6>uOkb_qI z7zljiwJnKUdUGXoq_bXhe9zORd@ZW242SEwsc69*2STCJOfj*fH5z40!2IiUf`;+Yaawrz0R8genNX-cJukWsg%Xw+~Wim+)GowyiuGN%0~=mrE4bYh8bBgDjILqUOJi`I3vyGZOnbk}idkGBH12i?>m=*vL;qU*XKp zaI&=V;$n}}4X=HGJw<-3Y$kx9_1J8|kF`J|hI^i;`cn`4qOqA-q{&M0rM_q6%kk7- zyC0CqC$p!c+8!bAVOGe5mLT(_`0__1RylU}v<^Ba`cl&)G1X{{pkFd z7;&yII%Fj{Z=w(S^uqrq;QK`c>vKK&RTpGl=#GDoGqbO5yj#rAF{xmp^nx5^m`~PF zoX(w#i`LK^&dVNnZH1hln+cbYfHuzDTi6&mo*o|S} zD37;WNX^gF8_!ih7K%ejYvr_?+ivKpm2tP{w3UGvjc&%G>H@>~6*C-eh9!Hs zooLgWJgx8)k$#Vro5*U}nvp%FWtlh3Ux>LyU0QZ+d5}5$6UG#-MOz)L=b5mUa_k01 zIZ-N_uvmd9W((^KbnYP_Fzrf{#=)0ER?V&|9tJo~-}}h$7$uCR2dmOf&DnptKyQ8a zuK6L-0c|uLil;%?N%+n@tZOfQlaf;e?mhI4slF0B07(4H*z$Ae{l77` z(nO~kDMsb8Musot=GnqxUr_PIfuD+oUg+vKUJZdb@*-LUcTNQJ_*ymPO}~1;X=L0c zq-umdiT>Y-TUc3!OU{PMAqZt9Kl{2+oQX7LYEKlFBiv7$9;gwhP3Bg*K3H2@mzDu0 zZ*QP<-!a`%5}vG;$LDcnbu|U{-y5ahfP|7NFd7?JN$t?IyeSdJPfvH*ua`X+yFv@h zJ8y3CO`XaI{Q13V3Wwp*P?#{Bfw@Eh1NLC->vyU!bhU(SN>_2Xo@*{4Ibw_- zz-#PQ*H<;?QNI70f=4Lt!?$;)U8958Sg;@!AY^wt!CFas{NN5I_p~*r{~Rnc+?ppv z;+A4dG=^E|r7isOm9kKkf}3TJiS1KeH@SgrFzZ7)Jqbh1;tHl}&7~>pYWk{uE0!cD z44k!-8iZE<861ov{>IOC<1(HmvITUF66~RihON7~uN@tN;0J?N(mb!6)(vfgi#)xS z2Hn5)%=B|VVR&1;Urid6L|~LHJ*-x7>KFQrwfpJm&&vd#Ch3B`ci>7$HJwqC{&i+J zGj8IZBlD?X2^nF;t}dIv+;G-HPrsdgP*Kraf_Kqd;!EcB(d*=lURQkwiwVSEg~QCg zIc-|mM>|JlPan=D^?Gt-YI1FOw3<_l$eNO;=W)LGESUsutLsxeB6!>#3SMnQd!{v& z!)=sToh7|E&Yp!g$9p@7Khy|OCU@snS> zV!%=12L3lxshR#S=l65kx+b=^0oF;vu$A{{$RtY#pnfMm88)5vD47Sp_k}_O|ALU( zij+Iw>H?U}xT3b0^pgf?DOU9`U9Bnq9C)r7n}n9hlJCD?{rxpZI5cbXE}?8#*Ex~< z${wM0A5wJ%cUnUC!(FDI&Vz^XfgRLQG~0Gz^Ux;3;h#)Bt9+1L=413+>oWtkXUrCuqxzrXghej?s~F+cVs<#)QO zm03&FXv6A1h{J4LLU#myr;I3Z zXQ1Q}_;wep6pHV^ynF&;0Ge%Sk@H}_ozAO!TlV}*IS<6B@|n|TO%f3%`Epp)tEfQZ3sA+)~g*(FjTRUFVBO3O1_m+ z36O=0w*N{T(<`p0I^Wry^aj^d0In#@m^)F~d-s^QqTnD5yf?kwpiXM{))Ks9E*XPF z=>QJZ$UZ8V%whhSnn!HZ4>(Ns2Q~!3Yr8l7gcd1s$&qGoDbNE;9?kHb+OTANd_TUw z_sh?1jGaaA?UAc`Vi;vWrZ0yG~G9bKnn#69tY|$cbG9A;WVhM}6EE z3W@X@t=E}(Y?D7~8kGgk=C5)PAPY%L!^fxz}Rk{vBcgN%5isgWJ8Za*bCRA`)T1#v&)#+@Fw^U)kn4DNw-f(QBXmaIK@FL z|EnM2lyleNMMEFJwt#cZnY z?i0yTgYn(&s>d!GCZ2FaJ2QQ+MV#zTeKa`(DEAY5O z=j*Y167hETwm+Sn5Jt`gRorO5a2=as%U4(kMYv7uE|4HeOn#Hxp_SEX6O)u=Dwe=o z!iENuIKfRLWWIj!W}-@xb?V5iFgbfs{pDy2)9J3dT^Mi6;#%Le+5kyQUS_Cx=rgMB zN+#p_Giver`ru~Q7w4@$kWob!(Z;xu@8diIMYYX$9vS+?UTAPN@FD~&c%9#>tluP}ioo6dFfVj*-Wurg8pdz&^|~W3O>dZ{GUcVs5nd*2N#D@;TSpCpZI-SbgF5m-1S$M+lzE6X)KwS;;Q_*;uKa!f^1TfWnL#BaO!P4M1dZ*b|?undZUQ)7v4TqlpJtQ7!(N*xm zQSlLp-L6ke&e3dmTgp0pDHEzsYQ|RqtS{KM=e}Rz4et%6A#Ak27(8C9@prEgw5}Zg z!{^f`t#y%Dr6q^p$|?z+s*xGoo1Lkr^wm9Cnm<%0LORPQ4<-&>2mljb!_JF5^s z6BsUOe33b&)Wvj_;R?El9@Mo(B?Ibh+`>2SVb8R`GNzYj?UBVhZ<+6)e+M-P4SrLF z){dyz7tZN(ahTou3r#o#{&1#}wq#BlVaWleozhtN#A7CVY4b>T9D;XLq z1st8OsUPzZ1k3POPoEDRukL*Dj(YxVVH!=Wk9)meVE~MIb=Qs>yp*ok33qbfo(@Rv zVfM7BBzsW!w1=KLU6%$SD#@y9(!#i#dX=_Z^ITu>z-+@4ORE&V!I;J5ahgVXCt+uL zaz(Nk_*=)d-;Zbv14zR51a5`__+eHlhgHW+idnPfEUew~yB#G{afME%Gk)A#b0YqG zEoP?7Q~}(|(9d~h#>~2836~pVh&Dr?^MK*NZaNsqkL?EJ+0)==t)G4fWp-k#aqadn zGDcLnPp-EAnAkpknIHW~K~XqM3oIPH30w?0SombihkV@g#CTfF;kU>9Hx@vn`(t6- zBb6qA%PbZ}Tzt~cz^5Z!0qLACzG&gxj4cDflTk_7DKVax`^qga(E6bO?@6%c@Gzvw zJ)^bnSe|(e?qLDYVCM^P&Kf*XyJyF8qk7YB!HK*NINI@9W-olH)3^+IIQYauy0P9! z>LTjC!F2bTz2d}heR6`*Ao*<|eYgaTHke-jUIl>YZ8ecWR8KhvFvB=rprV@efd%4> zZpRLvVqFgaRzs#tYV9gXV(*w@oUJHgTj%nn!70*oU==Ltpf6!ANe={^?rF?=zD^a{ ziUKheFWPs69y07GcTaAt;nB`&)0>(oiFIxHmSU$=N+L=5 z5CcVAez~Fk8xK5N&lHhYW#L?)m155u#)7Rm82-28;OpCb*D_7UfIECB##?&1sHVfD zIWw-p8cnsG_&j@;XW5tYvb?pKL_fO7Mph(i>W_ zw41lL+>s&$e9DYO1$E_IuZb-@LjIp*YFo^dv9C|M$(oVWoS{2vEn-kuSl>mjjUn4? zw_#Z=>oFqtJhoxuanNzB~{@5SL} z(&OdEChrSkME2D+=L6;4E;XI@+W5s*qxx(G-dymj)Z`2uHNM4UA?o}}5VQBpsq+k{ zsc_=v_4ZiCDefwv%H|eY*wyA$yL5*+(0ny{=4?WX{)YCzfi&BSTVI-h55GN$Fg3r* zS6-_nQnQz@vk3l&u_kfl>0aZejybb2bC@cs22YJFhK6-kRq+U9^szg=xu4}brDdXR z-w!FdT50BE+_rH{mA%u$?B-GWbkFL--|nuDtNT;0V6w7yA%`>DR8-~+zbC)cMito~ zo{zV)EFfdyi3Ys$9Ch-1%xWQq*nbr^JARF@0EQBEwm2PLhLdp2I9RHu6X>beIGuv$ zi0|bLIHaYp7gnK69x4_-jO^VbyKu5c@rcjt|Nh+F)Bc$44P}u?<+jL^MI~f+V_N#` zrrbE`CZ}unx9n^j{6Y8l_+zh4bKp(os* zzHvc?S~5o+`lhCYL$K-N?Dmem_isNB#$@Zi#Kr51jp?A z1T8$*ufedaWWxzx-Xh#ms8U&0+Z}LCy1I8p`fCIS{Zhv8Y zlt_(BdekJhlQj4d%Glf2pFeb`ps&(F{8;Jb$YWC{g&VRTgNs(Ky!j|)QqL??aB4Df z!9m`5HUMaM8tyuv`bU_ zyiP+oZ3;4|wA|#n>Y^%tUtayNo!FsPWc_ur%LY3*TCUr4&Uqf_%XEe2K{;(eM3Bn8 z#eL%>dImQp3Pe+nrwYHm-F6Rd_r5#kUtw?OFEO+^P=7;_+9#0CM7R#}5z>jc%2(vr zbhvs<;isRo(JID8UC7wze~fpozciJMHH@?53uxx{`LqC*!cbwIIkr(iMzwxoHqLot zJSGB&dcP&np-vZvTOfVxFX$)j z@9)Htm%7+&%lv8sYSv!Wj3aMgX>fG{-7%bxC4xZbaHTEkXU;u~gEK|HyQPAc3 za}LR(GQ*~T&MEIT)Jhm;o&I>ku3Xmey8I1B(i;obK#q_aa(evnn17ql!sF9JnRu@x zpj~O^V+>9~dmoj#q0q;rEQM(owm;fNSBF=Zk%}Xr5` z>`sp{PvWRlb}aI>Y{kdV3;B|?KX9YfiMZz-l;e3Fl+G2G-)2qQK1dABx&@ZDX!v~qdlV)W$lt{nO4=7_Z*wj!O4laRPDg9+nYt}t zAoUm~o3>OJbe(s$C|8*`H;-hEW8`~_wi8b3*~UIq>m^b+1r^_~ z-y210sj8z{@bQul)i#ls(T3YFoh}5x4Losr7K_oJesnL~El9OU57!)YF--&D?wN>R zZqZ+)7Rs8(hdXEwFQ4;gUEFVOzs@H7BBSMOG@&&UQp&YX33%AY)?F6U=5P(%fTV4S zqqNnzAQLw6>w`n4B2&$jHF&N$IyqhmMhB?T;-xH+3UxsmXE?Vvib;4b4HRuRIg zn+;z}OPc zz4)ktKeJbA?Caz3MseAJE&{8G^258?`74H#loQ(0L28RJnPl*Z$?5uvFom$g`3pLM z@uf(Nfmacwb~ep0$?>8)?T>&(feP&w8SRX%IBI);ons!4q%4R!QgTw>cRi}`J@2)V zBvzr|sw3$yjEqDNge(2u-V0-V(JR)1XPC_CUPa6IPyEeMP;%VecR3P&Fg-sh87WoJ z<|ew^zb?~t&^l$-13W($jCiMgpbWn;bnmI9!QxI$ugiB5xvi~!7!%-TLpYrvp3o8| zB}KU&43=s%oT8Ghu2Xgx*vDbyVknx=%vPCTY`zzEeSkzw#ggk?b4t6J;}O_-;w2TKS+^)P@5b`b}TnN3Bd|)V^XmygmrR z0eDAilF>H8Y{L+%)48y3DRd9oh3HIK1l z*Ma4aoT{-nepY|he0GVmyrQX8rwhQ)9UKT*Gh4h-k;v%O8nwhb_|$pW^{WzBadaFF z6;}O zD_QXYOvq|?phI6rG?Qka%;)Q2f}C75n(AzNOUh=7#?9*^{Gp+&#(W{k-MWZu2G7K) zd3i=g(L+=l$%jLMxcFmiU;&JWXDA!2bt1&bKQ}TCD3n-I4ekz$=dAIwvEVi z5LRJz#+5xOMi9N9bdPz^3t8b=s z1);DhdA2_Em16X1gX_z-W4At(+HiE%(?*q-e~lbCAyUs5xHLYcHCd;7X+CG?gGuVz zC$nL!bjJH7Ko4;eIoG^1dw+*6Kys_79%N< zT^!=~K`T4ez@~t+f}KAYmwR6m3#XFl#10Mjg67M_{I05!X2*7J@S_yd76H!4#8hTe z?ui+A9kWh_9$T5VYq;|*2!lzuT$O)C`e=MRfsK1x_+OrGh?`j9%+MVMJ17swDTGWw zka5;p(%8rjf>w+~zWupnOK0YRu~t^6LZ+!_7~^T$XPs|tyYo=n;3GoY-KC4I_ZEm= ziO*11S9i8VGUE|jG0j@m@S;In4??zmYnOv`QX~PdwME1 z*&9SeM%Jo`=1yhJSzg}W=7UO^np#_DnjE&_CnoIYiT`Ary(63hbohIGa*3PvYP%?@ zellB8)Vvrdna+6*DD@wrgh(;;if>N@#l&77UT^~>|6l+>!$QnRA3+tKy}`okM}IzO zCjWR|ZNk9H)Ga9MnH#92&KZJ#efOtJ0XUf=nnY*>$`ffiNJ|pH1ypw%6x3Cj7g8Nmd zJ9c{c`_k-CuHc`;Ho9Cf_nzjLmwFaqqk>4`gh~T9pL3rxBb@iDL$`|tOk@#=FW?gz zs9a3VE3Aq0vOZKpF^V0x9^WEAH8VYZHCK3a zaIOR>XGz;2gC#d3iBF~1!O!c<69VuiUnq{;dMRmkE}jX5C2PIbs8n%ZmGj+9%j2ea zsGs-@azf>HHGx|V%hBTHZ%@B$35>m0-n=z)k!U-gf|-$*DF*5S&Yl8A@!z`WALlas zK+757qm3=wqJJ0t`on>b^}%ydP?U~7GF${IF{uy&FhE+%H#QOWz^r7kIuh+?b6p_1 zB|SzptIu=BzmS!sK$^%NRUnn=+oa6bdqLccS~?S;XR@#b6QSq%OG z5t8v+MBF3Riyj`tbY9GHHxffm7Zw@>PR#xgpN%m`^)7|O=1z2+ipiAj5BAvQI|IAd zIXta}PuEr>2MZmv{wL$rn{Z-i#poym!0#gWcZ`D?F&4mg{P~2#7&7~@J+<*{66jjD z99@UB%d2eTR6voTS;&Vf27v$3Kl|?T&3z24e`knuHM(y4j*1f{b8RJ=`TUb_OlZ@Q zGdT1^@5%eIp0Wrat&8PL+2qJ$ogFU`Lf{*c8;kyTM7Sf$dKY^~1xioJyglw^u zfCkg!9oVFNyR;1(IIJ4=p5J7h*JhjUQSV$3jebyN%hT~pK>|61a8DCGwR__86JMh( ze*5+#rSuCQdyHC`^ugJ=I3u5=HAD^47djP{!|Vebji#qzo1$Gs(7j?roX)0C5)IIl z4h0M)K#?O7z8rW8TBHR81M5k9SG&w_K=NH?L^kj&fnv{$shs&I2D?Q|Seaz0eQ!D2 za0gCJH!E=%FQtglYsK;f0nO$0-|Sy@)3p^msUFHQ`kxNw%eWeTnieEOihXYcL{=_m zmSx5WCbKzFfmd3PaLC)y@v}B~+)4Sfar|W1{{ZsC4#LPT8 zT2{|c80=txKXl@>16IhKW1tq@swr=5Ekw7Q~IwVqThJz&|y?Mn<>*s{4 zn(+fmtB`Q}j&;r}k^*OiC;teocQswI+Cv@Fg*+Sjx06m9K11Tg%2LQGQ-6LcF8t-m?Xso#VETTyoR(N*a@k!ho(`%K8}5hm zYPMI_0-oPKw;r&*bJ1tJG>lA6Ih$^J)1=1qR?&UAGlwC(PxPrI#}}t=bCmiC&?~Uz zSPK(=JztR=*?04wSh`kQ?BwF9*(ezcv~LA@Ivztvbx_n2Dz?<;GT@8MFV^`$kn@Mn zK0M$Qk%R!+i!X2Uf+^_r1Kq2L3!V__zKrC!go=}nfKwzIMuPFWDA|K>GV?70d&5IP zM0J;JCl~*;b3j886~7@bZsC1i$!jTjVSA*Nci&sv7PI8#Yx<>@)b)0BWR>^40X{Mk zL?{PC#MVbi`Sx8f)3c68>z4LJ%Z=rsnO-$30ovHY=Qy+ywdfbV0Z#PiJT0U%RIKJu z+mEn{Ew0=_bUfWXJD65s{M`PPs_{FGCcKn+zL--5h497v?@&{WjJ>@9PVUN#?)GWv zi6a*y`SM%aB%dDa;8hU5Jb~E!;jeP8=yiD_V0Wj(_EzzLeh$Hk^valR>0Pw_PK9$! zPI0VFcD=X z*zC@jw#6!GaOupCp7^grYOfhwAX zKhgqW$18C##J;QSrut}&6}(4Jcau@z_5L62nGy}6;t9X$KQ1Ae^O7|^?h_M+)YJDM z0U6nD+xpfRfwAK?=Hp~3J`2%?hXv+RhQN)Nq~!hmScxv@lxut!TA;~qqTn{@g$K(C z%AnU;Oq8jyNLg8n=bYc!=X5!SLQM|dB(Z!m)y?}9-qlBJ%g(fxFurI%@@q*%E$*n( zuQu}#$SGOs14`m>DROdQZg_i3rdVMf+YvGmm%m-i=nLjVd-pm9 zw+(4@k6zdZDjEyU4tkNhcj@paU@c__nbNanmD{}R!5ma@IkSacCqHc~z0rh@2O|hA z!MT|W!(ri1h%8LR_u9r5NIOdzsZ(^$mJubfvgFP5id0>fgY?qHrK1MLRzK-9`_nZ1 z92xiZJ99XHSM34?{4(}cm(1xCpZ0bs4~fnsM#2e=0Psyk?}2O!JCG>h@$2+fYW_)* z%Et){IF95s7JL!Q$%DS*|1E(e2Vve^-yMlSM#1UaPO*Z z&OEpE(xNt$)8}dzr1_NoZ0K_L1(izil~BO)r$~s5&g=WiF)I5rI}HA!0ro&7=0gEr z^7qydlT$)wyI;_L%lD7Xe=O8Lvqd#)IlIh25L`Iqq`M5Ya-!Q#>n`BrKF4(j(F51qg7zN_Tg|%;ixPhJJad zW^8B(I$V}l*D%Tr`NE-`Y6?Ih>bg{4(#V;ls%mKbLQeXtmu0uED;h>cQ{Sq1@P(lN zx!QSN<*3OgYO2{vowY)W3Kb%azf@>wB(W*!!IvNRj~s715)?S6HR+g+W4hknN&e*A zXQRZw`qMu3yO=B*RUW$-xk?&EE27qnw{Wuc8a#fMjO0tze4B?{1>?$u_aTU_`T$-a2ZZV#k-Ju=GR9Zp_@R;qnWbbC(uFmq%Cv0s8HWuL2 zkK`luk?P(5UK`2WND}_FCD_#9Moxmco}-(H!O>1A_1Lz<+u2W(IfW`=#_2jAl~NB| zK$sPpxYT?G#uuF5nj4kD|&{@Gp=YAhdEberb$Sw>J04XF}uUs3*PuJUh*`ss2HEi7#Xj9g9? zEEYJO;IYzHB`5MoM@#AJNYZ)cA+9Zn*XUMhczG!3e69!&@saY>aw^y418$++wzoNo zM0N*FW_4!Xgw|BAs-Iv?z(k22_RY4!nbo)Kaj8^Y^yc~#xwp$mB2FU62@k6!vy}kb zOq^`_?}rqbQ$tr+PWzc83`EJ4K%bWy;-7L}3xz+qw%?9FATfHiGro3~hn6df#FSL< z@Oh@RQgL z7iz;>sOchB3HSP!@fIGNfhOk8QYXuxc(%0f5CDeL)XEE_FfkjYq>^%|%?=w#ll6;I zBg;$;JJqhQ3i@--bk7UYCZlz>$~br5)$3^kOYe;QyvVM!sO0xfyEC5^AEs=eG80c^ z?0+F6_8l=8_PGvo<8<_N{9&~ImvAEF4A*Y7VOD=e)I?9w_GoHLyfGstsg_TMk5W70q7cdFoZ_2VY zIv=4XY(;(NydE3mKtTh_u#1ZeXPDge^>r4jc^dX7P>wfC`r!RGSfBFmsBs8U%qam*ufeW&fdJu;2%neOQM^Swp=p_#Wk z=54z~>(g+DL~FB3W`_?>B)Qgpdw9)Kd8N=mRMre~qGMQ5kva+riiCs&h;s8?p;QG6 zd$+5*JMM5smBHmovB7%%rJGaI$zaulOGU;tBjjeymG21ZztGc7tYO&72sO8U8iQVVt<+Zsk zxg8G23({yz9|ihAK%eDUFRe7sWK$?ps}#IL$(PP_63@Bm%ATWw$~+>O8oB=|@%;CX zWLupkH25Xxy~BfGB&o!+XPk-K(YNi9(fda=cWBg1 z5hO!)jrST;P@@I11n8FaPfMort~O8$pGcvqY^-sqcWS#wXHgQV(;p6h=VE?uw zCeXprfRK;M+P}CMB45jUB8r=y?;wP=oKdx8X4MUqFWrmz`u9kJq=u~z-&m{$LbCYA zWXRTzO>@M`Qp6i+rcUnys@vs;3e$;;Ul}eJ)&zAp$2*5#z=c)W(bg5C@AiMr;HOI{ zzKZ*PP}BwRj(uth%$9x+Jn6&{z9^_rKjK{iV{V1LQTLAE-c;b>X%2Z}FPt`61@!V( z{!B`JXY8CPtV?qKQFZ^NTnHCi*7{LGTU?rmjLaq6;XI~`mE6HoQtJtyvY&qaTdy$C zWI{mwNd%zE>+Z-1vo3A965KB0yS4cG`Gw8r+rfklb{$nnnW1dUPobCFOk}+Zo-zL_ z{R(S>kg0y%8(nAr2t%Bdi&E?NW>gn0`VGmv4qTjxSECW^5I5|cn2gOM;}dsir26PW`Y@ySo$ucXy*0< zUC+k%(Ao)N9Zlk%+#&W}|A)B3dtSte7?GNn4_uD@{4=Mw*osZMm@N$@-^;fUN`@q_ z$lAs^r`oOycUTp(7#IdtA7~r?aYg=Y>Mj2pV4^PeJKaNY*pOk07aF{gw^QXBF4jb2(jZPA zXM^V?=3}(L{!^oYsx9NXG{drc<(7#C!>0lVW%d!utzX^wG61L2ORRo&hc5B#{+4Nh z0wh<{ftpjuW3WhmD~va>>Z$98f)tX1go?W4OXkxLiB$&zCo{Mb;u}V3aC;+F{O1`_ z!ks>49G-+i9|dSKW>yqirG2fX5f~X*dYZj)$%Em+lTs_rgE_io~ipVq)Y3`2TI~guH*Hj;B^*&u4laq0U!K zK7RE~7QLoc>IAK(JNo&zPh`R7FAr9J(3g!E(`7}x7<;Zw^SHmS!WlJ8kFunQPnHo) z-(6Cl*7&^$Minr!?x$@=ag`IjE4i;VqNb5A8GeR9^ib+%LC$F*>hGPUk{Ua=Ovj** zm3@Lk^x(VIGNjCIDVe1!g;H#Cy<)kX!;v4^(IA4ID;4w2+)fLd)3PF83*TIC7r(-! z4e(n>osA=`&WN1xQk>HTFvuIsByg@IUY(kWFbsW(%KJ}rd@_Oo{@<|oFy((>?_g7f zoSRxdJzd~<-VGVbg?cQj)3_YcXREZCkO-k1T^(h#wZG2fEeNwc3Y+m(tdsp;AnG*B zd33w|mmwgeykSTiQ+mo1;cWOU`ok3WuRrj4V)|<*1PB(SsM>2sY~N-yNpEjR@b}2< zl5fO`z7|r606d2kx9|{qK*ODk@wY;|#j8A|{~H**#2-`%@b`LwysqaRkpFl{+;Bbo zOfc!e7qTmiLb`PlGP2~^fHZ+0^@uwz($D$&5l*^rf*<@dSJjL(D`Cm$Uf9eUCvw71 zkws^1hE7H_=u~f`sQwQqWYad-dE9za^T^zGMrsv;GCXUBu;=LJ&b~>vO%^_S(@?)` zTJ&dWgRBwVH$E(c7X5}%&Ct^WGVg^fumVexG!R?5p&Qu>Tl%Q+lkk7cY{2L9);|Fy z1YhCLfe$y=%V-Vqxh$^P9V_l!{zdRJ5#kwd*V<@>LbkgiSL-iOM;5TL0nw}SXJSq6 zwsw)gU-aXBr{buli1g70Y~$?zM;L3@dh-5|g6C7Id|oH`|B9~NZ66;|XqI0j^UKO| zi;c{0_WGDPu<%%(3@0lqTddU~mwX6UjXY(p^wR9iigOM#H}rhd^mj-^IOy2-AE)$> zsp0Tr8fc0m=Wm(DI8!Hu&R+KN}gMu`e-7zjB2Z(yfp#zcOSz(y|R ziR6;m=a~8|(Nm`0@|*1#3~k^_RXI2ml*K-|>Oo}rE@hABXiHmg<;_R@SUZTly60<* z<7LzMbPx{XFXQW9I!@?ny@MK{GVGmCWwJLg#Ed*q%ZXx(dNCME99Yu|7K@lp7t@Pm zZd;l6>;}W+^7|X``8fdWx|MqoO8e#j%2F_x%5iy2`zLg*sOAreagkfXAn43sqq;aO9ND*re*8!u&W3M@%2SGAbioxtnDTeG_f6_3wEuwIjlIb_h%-Ts_Nk{h}V=n6&w zO%)--5EV^!Mw`!F{14u4AyXlrPV`ZcD(!+pVs+*4-|ZSeBxHbZ`HJmssfx*OthtKC zqqn|?E4m54eqGTKfXbu1Z}?Ka|NjgafGz9GzCIITo{N0Wj1%ACwNs5w`%SkZ>#gw0 zJ7z1{HahMCec^#7BWMU8J#MF8w?aU?#?E>4osVL~ zBBaADA^rM!rfLiac2v8ss7CIol)68$C&k`po-#rA6A9xPotH1(gz0l7)^smR{muUk z)aES!PT?{n8HczuvfSPnDjTx3{`i$=aLD6C8i(093+_GYtmg#cjVDTq^vnTly$~V< zT7GfHRoyMFG>E~x7_*TCMLYs#FJ*!AY{=X5e|8vX!oPWC=0a8uCTS`))c!)2E<2Tu zrevjad8#trc8g;NbC!U7a(CLYmgsht7sefIzC}%5dnRt|`NdF2CEQ2OtI;|TDJSFj zta&pdvbx>d;sF1+hl7-huD(+#5X)!vc>m0mB=BUVjBdKY<%Y8Ao27G!96*k%uTX@z zmT+QdxJH3#p_~q*^)Ae84L^r(_t{J*qrqr3892u|vb5lAfXRXRxf`H!y0pqV2A(^i z@oJB|M!P=l6Yvu5jajb09w;+u*1A>DKQ(uQyR#HvJQg%fFf1mlCFZA^EAeM$e*}{>#0(r`dtE(K*NSw;G zGd~Dub5J7_0s;aS>&1)s;UnjzROly}aWCM?;9<>qP6v`hrfTWfya%Vwf1@|N5w=$8 zQ`eqqwC-f?d?;tyby}3ai-f0iR@n{yY>%S{uqS%nSk4|{@+Vf>9kr*e7uw-;vr~aa ztcxt2oGDRG1Hd@{zp)m#xD$8bw5cpi1j@#NAfe^h3RfvpEdAc>ny>iyO>bs|1!qjmg%_0Bo!LRJ{roOgN0i9n zC@I39{s#Cb2Ot=Nd3(0@T;5piay;L-9t4UB*br{UdH!!OiF+P|ZyDE*CB6Cwk2!o3uORFS34B=eROFM#{TS4&2Q2C96Jd zx$Wy{lf*AZcQD&`*GzQkU+qQHT6luj_RT1&15fA$YlyTF3iMV@O4i&`%tz(NcMrI; zh^_R9R{fc@tC}r0L1XzJ4ByQ*Tf%Evpn9qxnHce@;obx1n;_b^qxn}Nq^#HT%_9Cv>&=bgbAn)82n zfc_XuG8r#XIFg~H8xM%{)*RjG2g0yzxuzht6d503Y*~=|joZDL1%gNN`FF?+o8ei1 zkjKbAuc?U^+AkE)XSur}Aemm_2UuK8Ly5!s0hBh!Rby)J}>g%5+=BPS8b$z=?tEC zG8@wXNMw5*EAKbL0``6+3B);;gh=LFOvWE+KrMr)yXWvcU}J^ykyA?K$*RlK&Wv%c z#to1wjpirUD zI7+TU=o_xm6;_JAS`JJu8<6xl)Prgj_|Cj$iQeBoi?))JIq5Y2O;`FwYV)ll^H8<) zQ$s<2(h6q5c&|}q68e8gu^Wk&hE$n-c#Ef3mKO90>oH1vA_ApH#|;#^KHjBx$5a{M zo$*%3m!U8e!8RvCxuDqo=uQmx&vtmSwK=aapXN5@n|XonY{9lFDL z`)KY@KH3`J*QFZ4e|mdAYn^zz7XK^t502v2VW>fSzwq`OWqT9v=pgUj_YX|Ka`z8R zz`(%8zj2jN0JEpvD`v-yws&S8jm;*xww*R>^C^F!lw|DNuh?JmI9Pcw+QpRG6)Ztt zD^PxZh=PiWqcrF%*!Wzxn4gh|C0SHK?J`pEe&v4abpz#m`QSw$ytEdJn>(yu?DLzT zQPmm8HSoRK4Y;X*W(PLVsW$C(9xy3&AOrcWWjZ{trQUZ&m`c2;t(G;F@Y1j&pa)S+}zBq+?;!B$*(VN{B+hdfgTTZq76^q zeVK6`y^TFs%r8&d)3)_Iwhe#vp33x?FF7%ExUkL4(B8Lxd7J!{;@Gt> zfTGWWJCz>@Yh(4|4vg?xb_BfPq-C;|4B78!rTNqwb1ih|jGwa`%pSf1wSqr6HwqnoOmo`yS zUS>Nkr29Q-N5I^o+Osx{<`ldYcVC@A$oM~swlZZn2I|#Iac2WB!TLFNDV6q+sfvnj zT7e{h5-gCs&hQf)d0bB0+>wlTS$)wMkB8U8r*)~N-zm9rat#Rz-GR-{maM1)ne*4% zTB?L6+rcD=NvtyW10@uf~89E-T|nq^GX-a3kRJiJ6J$ykFC<>{qYDlQ24s< zNzeTBg$DqBoe(6lEm8KGjn(?B+P5|p|I4M7+2NY?SK6w*Dg8k2#k(*7|Tz7xK*j5q_Q;985RN5bvKD_KL)ujCP-COV9__=KP8C zR~XMlc|s*T28+*N3n#3KG%A_Mu~k#&yi-%2YSY>(Al6i5)7f)O(_xu%{wHOy#hT?< zB-dF{aguQwT%rR=;Zf5A`8RD%f~fY27WL)`$UGq|Ji5g$U1N+XKPOK<$igxXP)xOq zeP}Z~Gq#037i1aG7o9p$Z@_N7=Tcw|*4UWcvsL!6qv=Gf>p5SLSp`r{#gWpDoey#w zAw(?RLAQ^jbgZ%{vniixprsgWb-Ii;16DkM4K!kt%~fNpkiu<2 zOdTr7{aV!7$)8!_?OCy+rXBp)`OYKk4p7Xic_H+3_@RVzp!izsd zvr|{JF4+~IGw<;hB_-@3Nz$J_D>0YS|FI{a$$m){p6hR)cDF-7H^<&5dcYA0cTlrj zqSuRY4r}UeA9N~E=HpGb^u*daF)zj&ipbQg4%oY4ukpV?03TbEmKUopT(c?g^so6{ z+paYpdD87aGUSoQz5`Ox&g=-JTZi@{R(Lb-+cWmBysBBYqU3f5XxDGn)Yxs%qbvM!38@TyBVO9y0mFEZYemi^7yNA0x`En8xf)N||(a zvETKyQm8w)$WQo%9K$uEMLEd`fCDfxBFhQ0KN?8UM+z8jFFX`e)M6F>hxPvf30r9r zd+>)~ub&4EqV4FQ#VrTBCkOL_vWa8imPXsT_9r6U3ea+Qmw~r~kjc)gpXNI`)7fle zPVm_?Y(!dWOM{Sq4t6qoGCWCInZ8da8>T?IzA^l$rA@5KnS@MbN@TPtKieXTI<#V_ z?w%>+zCE&SSf%eyjStf=joO@ld-Q&*<-Z+Z8DQ=FD&qFbqK92^ci?N=y>?f@zKoEZ z^>6IA$76Qiq^4Bj*aoIqb|1a)WfmWj@5QKzQ@*n7-ZnRo@64eZUfLF1Rq&d+4*2b# z5!Owlx?uTLbxP#+9ofuXBuMl52paQh8v3ZWAQJ@@i?>bU4&U)D?nUg#>+yf@$73?K zm^i>M%rjlZny4czihk7sqgX~b9F4~JV$Pf85p5_B$wldp2V>p!;oS+2_**{5e2gSN zuZ@1`94?8#i7c3mdwo9FE0z6!_<9SNxZba8w3GrZ?k>gM-QC?C3KVyDDDF_)9R_!I zcNpB=-QB8a6DM9ry@F_y z`=j_|ixF`uT)vE+OAP+)bPz)mGn3C))7xSu`_f#vucF;atzW+~tnLi8<7|z8`TOIu ziLBaspQICeJ&CEKSD{5MSM+n<>N$_hdkthy&gDcQr(^sKr?idKTKG9(AVnv5HvAVjY$a>K2kk+V-0cGZGGY6<4Z|R z-8U!b(2ME$wRN*U#$quSdVjk3&%l6HQ}Q(^vE%>3@H;{OA8_%n&>j5$rjnu6)Mo#_ zWBg{fUiNL^W12m^SHchq6_OYG-$MLJJ?8O-FVN#R3OZ)92iky=Kq$-0uI@W=mAE*4xN6T#J#PFz{*a*axmlAsI8I7^ zbbfG&OJ|YCKu;$(ZoB^c(kDFzGWeO31?s>P)-c?;eDX(dPaY&feIM?^KlrqidZJvY z;FfbU=%PzR^{30~xt_D>fy^ph$|j9B_5PmOlu=@{R$pBJB)M}>uv0*$d5?M5JW^u;z3Ddgy} zd%5>@Ygib2Ryv4iLp}#862x&3Js({8EESPlsZk;)GAY*WzC-{Ad1J@xvJS%e*vrRh?I^C<-qAD2 z$ddKS@aKTe*GC3f-d}@%u|=Ltt|?P4+bU0GQO9^x(!ToBtLg1K!x#Pl`AuiN1<8xq zl3j&y$RK<2QwMoQJg2}?ya%=tRKrUR|2SQBl%y`>Mfxk7o7@2 zABW!16qkf(f?EEuwE0(+^?x??qS=KvN^K1DZNNvDEJ%PlWpZ&XYFSof!~5_EJNjcR5FkU8Yfp7?sRm0 zxY1;emRs7L9~@V_S|Uf0I&4yGeymc1}4W+QG(PQSg4|V=N*yMjw$kOGJC&+&v<6t zeVncs8ez@HKt4B}K0Uhc3*kMrEO zdX&H+aYv(jsK6-eiSGMHM{lK<-p(>kI#APQ5HtG)?|7~j8NSpXI98VHP%Kjh#4Ays zE6%CA*1dlFK!{@i>&@X-v{Iygn%V=5_4GNqmVh3(*>8x@I68 z=^4FA6;sjX91_b_ZSQCqA#u@zSeqBwX>D3MC4zB%=_jx6+f4Gyz`;(ha+BQ%txKleZFB%;aCw*Jt7zZ9ewYGWBYi*g>KQQ`@ zSXn##aaBZS>FY*~PY47SbiN2pZjiH)4~&#{5yHmE<2m)Nfl+izBJluDguAU^z~GC79#K0@=15A#%C79=6Z1a%83+5VT9DN zqOS2L><1^!Yy*+-4{ZJpgT4?;f=cC3?xqC!^u(cg5P}<0V!>=rTa6LBr3kda6^o}D zu&-eo7@oss-vr!cd;hHtK}&Q9w58@!@EE4s0rMW@f)W_&z}r36X~Tp^H__mHRxrI2 zj3e-Aa^%UheEnr>;3s_D>c!%Y5Ca7o0Qe=Oy+Lys)DgreHkj0`YSnjyCm6GFp4Zjl z4<=ix)H{H!OiV0u9&xRf4Uyq#cw|cT*ch`LfUUQbqP?R6Kmt$BAm4QxM-jQ*X7b+=%?Rofu9pi}nD+rH?8IBUOlbJL1`^u#a7%9dYT5v^fm9h_a8o<)o+GZ2uixZsiq&kQSCn6?N(HW3 zs(|c#|FdP@>k>k>QwoKKCbU>YltQDFoj~_at-nW?T(+)|Cls{}v>${jn;28EZ)Hu&_~+XNuhYf=Wu7Fro`n>&1#v#Ce1+vzi30qtWpeZL%%L1p z?nE5J&dyF~glBha46Xv!#g%81E2jIqwFz#!Z1JZa*#bu&#)QX>se+bi5h&6s9hiJr zGlYpA)$xX7Z~e77PZqJwL1U-OOD%r7jidmvML%5}LnGlgVFSngUI%60DFy(qsL z67u!K&1|XKy3Jon5Je#>7q*65+}U=;XB3V4Fyu@}xHv^FIll*Ettk3&-~Ar4b@EQr z67Q&uzC!wd>e3%up_h#z=lT0Zea7VR6{dfTT;WU)nAg*6 zYW-H~1=*(+Z0>ZNr3fjR!{-i_M>4H&{2)k%;dyq=S}9WPd#P%{;teAm$N8wN%~k1Q z@C5IhM^tsIKKa|Ei$Ans8~X1(C;J#%Hq|&UH7GjJxV>WTZ;6+CZeTe;{?@ix%NG09 zmjCD5jhNU%gXm*%4a@#ET{q1wZs9kGZK&+C}bt-vPzzvPgc^)U=)SWkd-?AIbz0BWY|VCVV#d%?qNV#nT^;?l~mq!7k<)Ps+T zo}xcHtLJsyWybDUtFt@_-B6mBFN6|9o5|V3l!^<}=(lnjJU?vVi#>lMBzEmY%t}7q zEPK`dYOC5G%6b0R_=+y)+!V-!W-~*TMmLXTqd;gB(AcuW{1*?KH@+}y)xjIfF0JM? zi<&E&QP)j2aoQ#QH0poo)_n|)yS z1RC?QmdCf-E^{$TC>WZI(W#?8GI=@UAI0Svc6E$`mWI;TklD3QO>!b{ZzwoKs8b1f zLwOZ6pCq)QcOse82pas(V8K4ODbo$u7ixeMZq%ao7*?wPzb^FL?&)~%GK#n(Ew(a- z1w5vt`VjaT$BUat(C898J3EdH3&>o;Y@~Sd#L;%q#^xA|G_oNcW0%2-PM?*+KWG89 zi7T72H6qQ~M_h7b);TqzpLaatSUw%=_PIa~$*NqZ_^t;P-P>ZIR+75q@d%lrHa12S*rzdn&B{GB`ukFn zgPhvZbNhrTOwr6;uZjttL@>`HYYxKizMO@~>6qWURp(dZCt9Noa*i!*D31)52Y4%! z8AE@#HkMM-3{KCbe?Y0?uZ;5~Jlc!ZS;=Q5488JVh@olE9Lo}%jnmqL(h`(?&SPAT zWfnFkLkNEUXnS_I`G!|CxwCtMuaUaCrP=}7KdBVyk6J8X3efmRC*(3{D0x>Q$hozc=J9^Njq|F|EV zR$aZXY8yvJsT-#uqzw&@%yTfh;(BG}6(r zR>(y}K^Dq$luhf>h4wUJ9h74|;&0`a<6f*L_r>q?rb~T&R;Bt`cIe_rYXljL=;F1U z(P_%JHCeb$`J$vR6dT^9ufOq3mPWtuDP(ylLprnY>ns&>rw@307~+bl^(dUQ21Jz` zopop1>ayeTO~C34Z7O$|Q2k$Qf>H1}Ew;SjIl{owdK1+}bY(*MYGzn^Rcr$1)1wrH zNG7+iz;Tls^DZJ-1CH%Hea}R^>Fx24>qsjzRtK5^^`-))IzgFDx7cCz=T-5rz5aH= z`eHR)S9OH&1e>9DoNE#?N&%`vdENf|33kf3i53MSHpc3ElJdcF0~x&4yHh>FlO2eQ zZxuQ5G1Qfn<`LS@2DnP3h-j(F+l2D7YS`URKtb3ly$IkY?Y2O7pFnl=*dZNqi#4M0 zIzGW<(GVmuvSH|GZp?i1f@i&&?S9p+`E0?!#rm+pCVc&aLVlO0Uj#xWQ(5X^K8r5z zhT)8HXiWG7*Y|WIK7AFcWS~w@g_!eN*7ZiUTOFeAWCBp z>UC@jM#}?c9oUp`wO`Lk=PR|?-5^&wpYUBb2^edKG*Rs8AVgQ_SUZ>S) zwC92hmpdvc7r7=-heswsz^n~sKi+7VPI5gFbd z!}H9uo4i9o8u}KhV;9?T$+Kc6hx1AQR=e&Mf%<*B$H%FsE+Bo))5}YzKQ2g4LLm`E zzjYT1eaLR1Omm05xLdo?JKTo0yO6ze@h;JOsk=K|KbTThrrebFBd2#*B7*P+4C_^u zh3=yHNa)ANAuy?(!PTXQi6?L>GK4J<8Py~#!f^qQnlqU(^8()Th0XXX>f){8gW94q zBbWL(u9>Ba_%f1}!Elw}jxK%u_Jc|WcWm~p3?j0;Iz(SxcM{OY7;+7s&$r(9La7ts=3nWzvdaiXc)s_1 zzLC8o&24M7x!{X?^K{Wn2FAT}iz*n`$;+`|vlPU|rFqjAizfDzw>c&4uV-*P3mc7g zyz@fkC!XD=*rF0G!sXpRF|5#Wg!|h4bftTmE)xk3S5KhXtw1O#x18F~SHi`gj%mR_ z&VG{9HsE;uD*XM2dvhQPmV0C@TElWd5=;Ykg$eHVN!Dcv-Yq$AnOfc{^yi(WdVp#6 z{kOp19C{kx6c$zcpbEzaI0pBpHuxV|iulVgaJ3*E>ga zZ}XlwI$uheOO-+_euQxTHQ>vdbA07U(1=<|yqyY26vZNp7T#>w9s4W4R9(v7dB6#{ z^8X*Mj4c}>;-F$tRuNSxPPz7-Z}qnP!La+`dtNVrYZ64}TP!=RHsj|5r%!3Ji&x*e zvD4=jg4dyqkp*+Ij%|4JuJW^e1iSa^!u~P4>mG3;=Yh#z#O%MhM(RP<1EX%byxrT8 z$_Q9O9MzYQ17#{iui0+PxPvoj7`JU}t?|+b4K+9ot$e zAm-p@*hp-LKfCMuf?IOmdZUO&E~Ape)Yv2&*YgC0s^oZZH`EC4` z0bB7sAGlPh55)h3qv5}SBxrCIhcgR*QUboAwmpdQ4Yf(uj+)xEaqOIw?*J?QSotf9 zB~&RT{q^=7v0+ejQKH8`tk1NhKyS-uQtxe=7gt$5Ps`jI z|6(A+yq#Kk`USP^QPh~b^m4x;R^GGWKUBbTh1c^92RFCrYe#&1e54I1P3SYU7sXDB zZxVpk){KcOt>l2)Q}p?3$&TNO??lpD`H#p)=5#FjyE>pFfH*S_6`1HfhP4df%YJn= z9I0pH$Xy@F~j~woi`fZ$&n_ZfX+tJddx$aDG zoTo!221ZFr644FA&B=fiw|X9eM(PO28@sG*)vw&Xm_5r$v#C={j3I(h9){p@wdN)S z*o~TEu^@%$N&2L`n<>B{$Iw{)<6e94_%)1Vt!ps#RMD*T-bf>JH}Z@(h_@*(G@=<8 zP2hR;J^=8VA4)a_Cfd`cv4{-Z8Z6-s^>|BBW(A!tS&n-Szgg;L&Lw+oG7tyebGghj1e<3 zry$$R(D`xNGd46^ryo;&;bhb%}8lB;_ymYnW!w9XV|X9t0CC_x*dc)I~++e z;7w^gGbiNQT$RQJ|V6P<|l zN#1<7`0KkW`lFTLBIlvRh|VF^KZLT(OvVPRriZ^L;|Pf;l-+^Q&k`!}74K=Bdw(#m zk0C4}BcmUDErEjBG<_D`81=Oi4W+kY`y#7^x(f7I5})oN&3K6-4mz0X@R>vc2rZya z0h1~PMrY>!sx3-7s&Mx)%r}z@O?J!Szl0lBm@C;W;+IG$7{H*0zGGJAG&+6fL1uyv z51jdn2WQCeF9#u#=v~@;^M+!I)^zwpLpt1 zPu9j>Mf9?5goa^HC%vLCt}U)Vs5`udh9Q=0;VceH~9K5JuLPvNIDA zYcgNQv9F(}l&oIsVRoDJh-mmy3sMw>h?NO4)8EXAnX{V~^Z!i(5q%o7k4lQMgXX+_ zrE@_R5{h8Bnv9XIW1zn3w#f4XbIYa+r}p4{53YbD;oc5oG;WvJ%0K3RuZ>?DZ4%Ad z4e{({eRq#OBw0qEqM@uggI}JfwNr(eZ7r>&TnytUMHZr~KF9MVZh}UGT>;j=q?Xt3 zuAI9o&xYP~U7&FQYy)EVUdA#Vc5!vno|rmwm=$WTW{g$r;GbST#hPIpeI@DJ#Iq?e;kl} zVT$dpv8&MtN8I$3uZ&SR6n-9gzdW$DqQ~LBXtXp9EwfRR0>q!Rmo7Uq=Qo1LPI(tB zx`Efw;81E`CQ`y)C`y3%^?i49K-gSo9NWkUC%l6@z5hZwb!mp_mFK`gBEofup=>}` zY{9N~v=CaC{8LyVDWVoD4?PckI(s);a0#d znz$-~!dDKTceKUx2Sw@y=X!EcUVbs}ljG={I=)+pwWsCJl_KsNqrwV8N zR`U9oJHG+pdTEQ1v5j~Qy7cs7Rh0Z-GF^%PGD|(*Mq}d>3wGap(_xLXgK#<69YV9q z+xH>5khruKprXO76Bjo+E4h;x_b8|4u0|f8w=bVeGZ@WgT6be2UTDU05)&O=g0C+h zB|(MBSV~%p{8*AHl~uZ&>0#|d|KzC*n!0qk4)v5tKQ`SEz}fuJNw-voi^)I%`n--8X7#n`xMw3#z`Z~aG;dFwAI=CtMgNtL9uI9Q#I)NPwp ztTzA#8{3774R=PD&5;6UqX{rtzu3_nH7bI0Td{-v6tC;|Z5m;MWQ+yBr7jNIDe!IC zd70ac!57lFQ^2uocCAuYxin^_{{){Z$yWtU-IBZA(s_HR>(XFYI6XD_hA(wp=3Ej` zrGMlj9%bF~G6$U|;_QkuZr$N-Ic0JdY-M;_jD75!{@RhkDf{@4JuA){0;!vXl8dS6 zbL8O?N!KEo`}kiiz{s|3y3$O~SL(%ZFZV{0dulGgJI8no8`G`3r|!SEt+7xb!F`gc z29;WQ%030Tsi`RyD9Ur|`PEmOY0Mtl4WtQ)DkdT(&i2^4U8*qxxe1j2yaBlh^6%HxZZF5=iM7(V-R-yVwGg6_|koDT!=5n3~5{aB` zbfgEv;qc9NwscxD5d6O+BJ%uF*Hkl}vAZS>ml3#4IWuDxx@wsx5DissE*Nw=zuum% z`-h5n!7L06dYU$UsArG&8&g$v-v%r;Anp%sDLE&?9*K zKc*0%y}e#&Rn;X*env)S=D&g&KNP^_ zON+Ox{8Ct1*@~RO=2y}GxyNmUVnfL~p z-dT&>)D^JX@%IM?yIGr4@V;RxLS#nu$|?Kjz8Ya^O+6#Z$FeE?i`hpt>Fn&((AMUE zu?5japS1s%8NsKfW=D}2PuKJ9A%+wB2MR2taMI-+l2E8WNk!a$IHv#T0|MO$nR@?6 z%9!`JM5RPY&=h1n3O(N9cnHk}BlJf8zbz&65KAjn=#OYXT_bAe&LQ`>WD*P~w!o*! zI8X{E5do!${t7S}sZCR%_|5^C3)aU6^3cvy3ZNBMj@^-vYRUbe$~6%vq_#Y1^^8+%}iY!B|DfQyY4d4aWLcqH{DWo%^DG z8$|!rYka9*&;b2rprR;@m>xm}J$ss@4g_T|e~h)=ZX>hq^EzVd$M%NRSRXf(Vfo_T z=BIihA7b1A_CvhwxSL|L@U^IXT;mFmphx?3VFjPO=TbC{J+8$39tyhfja7OdYwv88 zTWR@vN^cfKvUPz|K!Vxz1|m;>dO@{o0`EOiwok5bq}ymj{BxlInnuZXj+hvJy>Qi` zNv!)rl2TO1{yPIk)ug2)n}Gt|y5gNu!nb}ezT>4JLQ=XqK;3NqAH`rDWLHZJ7~iMh zV{Y&;29M8UaO}J=8LMu3Fz%zv2Pf!XKe?FDWBl4$c-r{HG_7JYtv}B^gDXA>N~)=5 z6Qc1{6KcjwFaCw9K@aWlGIM1AsvS4;I5?)LEX1k+A@*I>w;F;Yrun2FKATF+MeugQ zUtS+K0BWCA<|pP~EpVvro-!WKKKG}d@2>eQ;IJ#uEB>*oC3M(b^zzJScI+(idoH7t zOVUebD$Q7Be2>XKQksRqfx!uHUrx@&)8TNugQe8G$lcw!+||HZO(TW!1*D3iyUTL{ zV>&(yPV#hVa2NbmNSVH+e^F_i^STs1d0H~EF$r+(5WFj`+FjZAJgO@RU`09H(znF^ zq`8c=R8#j-!Zh$e2wyT)PloIj#@{+Pk2OX-Ph{%1vwp3jc{>DyzQ)yNONYZCs-@%3 z>0*g`0g4{&UFbA=k zX7*HQO5sIEe?axD<7SQ#)T{x%-hvqEeddOQV@(tC$%{<`o5h zJ|?l5Lws_pV0dW`)1L8pEXs(;&m7lZz4(*9>KBqwJ0G-aw`ADL1UtCP?M3e~iz~3u z%mPCyxsjBJ2Dh)zdE~LBO65>?lLpd<6yM+`#fSxuJ)sohNny%TSBy0J&mXh)aEu8P z)W1pL*%iO%17nIW67IKJn=`wre==n8zEkhon1)4(8?Qt!3;t3=N}R*AIZx|di|=%@ zQKviI_BbJO%5I9rdOzKcl})|#d;A{uknf0OJKC`v-NS!LOu@Q~Uh)kh$nNHq{fb60 zk&GZ_@Y3*%8ZH6Tu(HPcp@@ZE-yd@GoS1B{ zzwntku|Jy{eUD1gA*N!CeZ^3pZgWy20mdv zK?&E+A1A5fXOB=dY&t^!RVH7T7H1!#B+9JuOJXMQSb5TC9qpFqr_CI`_Q|NA%7T`=5jH3=NWr{yfTs?^%2LVyaq|3-laj2OnUS|N0pKL z+0%U-w>`MQ>a1gZ4@oT9%_*+Y@W$O$pIZd>A+#v$TYT^At^6Rv8=<3godD_YfoJb$41ZB*=_P zq~`M{zYvdfBQs)zd=^V#|2h9p8i>?I#*rT0@1VhYfZTRmxl;R#3gitBNSXGK#QYK4 z#MxrNgopQV4Pv8Zc(d<0D&LHLzMQa`AehTNE6F}>2ZHM+Wo$2m@nhXblUpRB+y~Qb zZ%s&-V{P>%rdm^Iw<5ihRULYH>`9(aM<1sz-dO8W3k2!*F_y1#grU<>sWjf3={!Sw zK5ojxWRc>VOg`_Z)N}T3+#%V$lzA>^D`gL&%}y4N!ir;f%CKOM<7QNkoQBCgr61Sr z96jH1XeHM73@`bO@(#{SI&%A7giTzA|5t(;3!W%-F9V}a^gV+hL~dmFq12Q=n5N3k z9eDcF*1V`N`~LB7EpN(JCZ)oZ^ObKs&*^*etNU=5Xczk*YHJWk)fR9j4s<(5P5YyK zvV07?xfm#!twv&7rLGiu|CziC1`S-8#?@kp+FZCao_ru)lbxt9`G6cd#LHFdilNZ_ z<5=3!X)H6aSw(;8g))J0>YLked&~9&+3Vj|Q@WM_K+9lMwXC7tOsxr^a_~A=`${01 zp|a%quvEqgeNDmsCaISGO5M)0t$hk+^`|bQ5*N zY7OqFy=i!paNgfF#pnp=;_wqoaxul>h9oe`))nuzXG7w zNNU4qe%hj{OLN~jIZ11DYab{WLnJ^;CrqLE&)>(6ssiuqq2`18!`%LDDL?%q?!HUI zFxpLd_F;=@wkyaKz^tR9#?mmTL{o4+9C63_)(c!wkDH6mda3^&?6ne(WD7dNM%6IM?)$=5;)>?cVJ{R{nr(^ohk?^v6%#G2K zFX?rQ4Uv*MX`4YV%oSK|=Lmc>FGRP1(kY915#CS{gyY$p5_=a{K{RxsX0UdqV2K7b zx2|i(S<{SsopkpzJmq9HdQ42Psh4&^+M*GimfR4UqR2YHLi)a>hF8=t@B}wW18vzF zpiBzxV5I)gjo@qk9X7|`jdRPLq=8?RbFZdL%~u#csZC*RqTI~eb$lwr_ZMorv$bdg z0IT1&GM+q5mqYm@4A`n1lT(+V3{`2%M%xy_4xjt#;h3*ji>5cv)^|P^e+vGyaSTl! z5|}yoc|jXZ#<=!pbQCjvGJJcUJ%FT*NuzjcH@i%`8@f(k0Sy640*DBKl}lW2u)scTHH5& z(w}L6mvcq3WW;QL2fBlkukqP;uTh@z>`K`S%#uZS2Q&FSP za5wMHk3paJp&(~V^soKubsdPuEfPAliCT>$T3Y7U+?SF-b%hGfc`4w~i>m+QZV{PvuJFI}re{lV48(3B|D~pec zdT`rx;75j3Ex+P2B%20In zLnzh^$!w=Q-&iIvvyj0%$=|?95G}fjNn*84Pi2!r<vA^rWUXcANYM zn{aU#-zzi-VGE`a&5pxkP6~U!wTRlHKxLO$5?((r!D7738qd_^z*GcT$jO3{9DAM3 zmIQs1$DC31B$lI(AK?T8rkawN{N_5o}&6HT&l&Ah;QduPZ(NuY(#ZC#2 z4qJMDrvI*jtoh$~wl>awmlq`h+}vqJ$YKqLuP)#6fC(B0FCwPxA0miNWMqcC?U=?J z3}R@p5_fr?{bU8UIn#zp3%HJ#^+1!vLdJvr@oBCSFz}i#NJR5}xiH#B%ydkNmNMad z#oSVZFec_CkxVpntuH|Zj$y^rT)%Li{%CXVGruCo)!(o+v14v?)sl#gSr8IIWTqZm zyPihcVX4ODk624xZ&JE% zK90#-$c5MCg+P_b92d5M_ICf87HIs=SS`lh7iIZ&DRafIhTq>A|7ee3OTjgWz?o5T zC^`jk4>smJ#_Vkzf_+P*6pn40`z1kWct5$P>3eIdbodUJ#m&k z#=w$>crW1R*Wh)*hH6ZB{5?JKU=^aF4I+v`{HpATU#cQege?ATy2O&v>TadVGK_z{ z$k5YJi)1jESuA+NPQjabxe(*|BMeO+Vcju=-ZrUNlu!2QVtq}W3Pe8>*Ji-c8_ZwC zzDSeP?F!Z7OAtO~OOCJXO3ea{@x^x%Fr)EWWlFdjD5l($v)XrxA#pje_6Q^W4#m*vTJkpdtib2n+R!K1|o1@ zHvjpqNn_>FJnX%!nM_43!HVrKRyw_%_a(>4fbH*(gD-c%uV-3MP(6vAhNtt?Z||oV z^*~fBrf(kuhZP$}y*(NNgbtIKD-HO-q^UcCl(e1p%@P*wE}VYs5SRJ~r2t9m4c|xn zuKU!UL_nsw)jdt>{Fd?v#(gJRc`LH0lGk?(;5u1PJ0tk3d#$ks0^p(*7!ddy8GmhW zDJ-4jKwtL1)=O^i(9lx;@EuZ73#7DP0y2LL!XIxMuU%}5OOkr1lAJv9*jfMbACw7N z>BC)Z1r+1kNuoW=24IGpaR4iklvIx{F%qg`9 zGS;DCWQR3B##9RBuj*?Lr`$`K#N`{Q3H(g;zgLhyqsGc*Q_FQ>?O8e)xx}Rcy;}UW zvIIAYFfT$AMKbVL4~7a2+mn@wcFW99m>Q;<7tWd}i+^9MD?KX+lTEpky?&0!A(S^}1>HL1F`4EFl0T3%s1MMmg7|soTB)CyBl9 z2fvY<7avW%S;dzIXlqvTj1$U!JZTy33zYlvoXQLTM^yFlvqbUNGARuM?dk6gCD5{x zIBUV&IV;$5Dk?8*YGRnnfS{r|zQ(0Rzy4eEW1LFfpKlqQm7AOf$Ax0X^#jdUAuhr=@ciR3V@AM4k_ne z6RHpz8@ubj?)0NL!LgXElT5OtUH%Dt8 zg9{K&qH4^wI;U`#O$W<|m;RLS4~U>x%ELE3FEfk(`Tf)VP%=;%&P-3KsnTTs1@PNa zL0>&)`xavd|C5BB6<-uEDWI@Ujd^#0z%$!2J|0w?Y~4n3vZ>fMs7P!&fn}4Q)Da8k z$^3NQ_`S2UVjTV!Zd_Kcdu#VcBqiz4`oI}POC%Qm-@!@14vY;D&Zz4kD!n)M3#k!K zgkILTzq6-=>#^OSu1K=B>GjyG_Ydya5dzkbj)+ZzQDNcb%mX0;tr+R5uIwNfvFW_K zdBuzugiRTb%*Ylwt*E5?r_{lYVM-<~*md^GlkY z2aD(_hsQdfc`6H1^n8Q-qbV}A3aUJDKWIfR0RXT<_~`B~zzzRY-Wsc0B&6qkYdr|| zshIo0;s&ALWt~Ltk~f7hS(HHqrf5J``Pl=wuJbUCKvT~*INP^+JJ;j&J-q1tkGa=! zqu_HPGK3)Fw|^ah%yJf(Vt1@RNH!YM{c(DKoZ>{%mKuHo7ytVE%G2&VzWR=t&>UJUhP;Or zkhb5Yt4CZA0=0E%2UJ1+L$$9YGS*f{T~6B%1bPo$UTsdf9(z3e;n0v+0)vQz`Fm9R zV;l~VfqXI{ap(N4`U^&ORBpFlWcxbSqjFVY2bp>#maLX=o<}y01EgX1Z)uG~r+4MA z2=XVXT}G$7r0y#;3Ut<>Snd3Mg~>|l94Xcg#hHgC@F6Pa1;CAyVQj6r8-IRHE_xbH zP56RmF?(df>@d(-tl*Nk7zt2lr%0@afM{lBsh0R#VR)PHFb~iEqQ;0xLN(u4w;E8( z6qeZ$7h|Ks%HsJweDwGe?M=%1M|Foxdd%y+tI++Ffw@8NV+@pP18!Z+T;_nAao>(@;2kw1(=1DO?nT;lqsKA#1?2qJEIW!cR%ncM)t#Xo5|TRz_3e4iR+CV zElXqRTJt5}^fqS9Di6gwC#_P5!ta|tPVMr+f0>2z%CHyC(fQG4i0b%(k9NhN`|j6% zKP0fh{67@~ zsv9}x1o9uB*7pgppWkTg|D zSKxBfyJ#=uc1ip3u}C9+=fX%F#iy7Tn|7DHmb%;jEFX#Y9g zaT**|>6H7zk{P(CuGrvUdOnOaue+$)^%32Kk6o(ukXG3l^A@hg<)Aa*Ua27UOnrKU z6qhyg7>07`EIH2hqnA^-AMLyU&;L2T3_Ifn@@gdq?~bDafrAxa$gr04^OErB7qDtr z{_Hpp>|bnquEBbg^Ad;oR^jJX!ZLl*wFuUENmBH@@YF88Q0i$0)&z90gcqlIs0oKN z88tRjZfLbMD%E2}Ovp}m89$o4v7zr)+4gu1Cbh>huruziszg> zN1Tv(j)K8^D|^`HM)JfBk^SwI6ciq4tN8BP84f6ULTI@{A01wpdHUXwI+ZSq>Xjt4 zpam|djwRK2*haasFRIuebzlBq(1_#_pVf{P_S^by>#>v~)kbFoI&DPkr~>lFlq|Mk zAY$ic`-M2MpB~lU)-^AAOg!vc^dSYo2r9*3$<0T2tTJ}+X|KJB$=@d%z6%}*Ukk!i z`T_+7`iv>IZyZDdbo(!weO+`>fi;wSrpc1Jdwtr)Ij!$*3#M@#tQhNm>~ICK=a>w} zDM?%tp+x7WSkUG@>kn8ENbBv9O5$fJIgZ4yDWBk5uTT>%$y=OB8pt&$QsN|B; z-rZeVu1gAEYndf8Qibh<{ZCY{&S`rS@rq+m!}U^a#ZPE(|>Jk8q$+ZXIBwlaS`zCB8A45o1>U70LnSCdzi3(ME$hczG*~E4|s4|naE-e)tjBC>GBahqglEwfRUDQaK zSug1+G4Vy07pDj11aVrglBHbFN3yoF@a)}*U2pd|zrrEDJ{AF|*>qv<0 zZoMP~^Y**1L7Hhrxp}*aUam@rPZE3s_2F0JS2b=ohxtzFm6sZZQW<+yiDsI^>SI$0RQ;8><`QJb2Y}3MQlF^AGkNk~>Z{#j@*e;R? zg;r#@s1<}hkF!>3ssHyt%3S%eGGW0LkUvMnzj%cc5M{ykl&H#=<8Cb4c4z+0o$~T^ z;r0)CBZFBRsZ1o(f`YjgzepB|WuKOT-W%ZRNPqQ>=2cuuJs}46fHO6J2gr-53dw&2 z5>O5G;@EKNzyOP48W+QNzm1svUGEVe?*cF<|HBI}+`n%!2%Y!T{6k?G74B$C^IA zJCe*EEXjS@iK!8^zjyq>cX^ zU{z&4i=Q8v6WeKy_Mo#8b4h?5bvfwhZAzyZCi8ovQS1H5l!RAP$bdZ+D$Hu&Zm-)& zH?B6%zs+5j z_Kv+r2MbJdQKWvoP@1@*IQ4$@7&m$Gs45p2$|QUDPO*1mIoDklLhacGa8>0@pJ1@lX z^DA`o^*Zr4dxTKEPX|01w%7IEn;Yv~qD8{%jA_M(v3n<@>A{|lZ-7?-|7HW?kr~Yj z?F!EPIah3;=G@5C>VQi$$J2e5hnvZkI#%YQ{gas^KHkQ9s0&*`Ow+M;)jCTW-yT}L z?gHbGX?_w{cCMEWk;x_d*$5?x!(6&n+aZ#5R>N+BY=s%7_vMw1@kL5!Qo)&HlP53E z4Z4fnL%N6DmzbL6{MNJFrwZ9gQs7^FDYG^|Q3zRsw;jyby)&?_qT_W>5U9b`h!#`W z?d-|?%W=HTeotVkDF1B7am9z@w@xnjgSoeXM7|UA1W{CgW?R83P`xm`(Cb6s)&|}G z;%srBhEbtyMAv!{p|mO#rfqGyc|AcMAkX!8GFPqXDs`vJGqAW$Pd?R6|WWm z<)Ee`)=7TpuW0uj<2D7KdrZmjge1Mw)_2t~c@7G-4RYJ| zyX=i%{+^jiAMuNz+AN5k5!njbQom?PWqi<)s*&kcDu>*=0Ex|xDwAaHy0KDTlj*#j z2}9`OMb8};I1jzJHh808t6J^b#>98^G{SXNkuoB{HVua#J^Y~<}Ve3ofA_@X+>I%7=*)NSYyS|$+|L%Uv97$HyGO9Cn%fJhy$(&6e zOYi?_$@r&EbhhOtQ|j&DhmlQf$$^djBr$I*1s9Zs98INr^ub)E=*7jF?TVE>3MyTzrIWsMG%jqIWK&hdJQQm(^;3OzQj({ zYu#&#P^43==Xq;mhuXM!dG;KL_#x5n`go9gP;=i2*;*tFCqTwaCQ1TNcRm5@jgD^x zTu7c~pn|IJ@!@RXQ$=|3nJ6YElKbEG$A4ytsFYpWb2%4V-9{Weju?Jb{t@|%Gud9Y zF5A!ryx9|)3TPvY_gB3z`9MX@%8VaganKu(g7&ioa8;9KG^~^IFzQ1uc|B5f5=C#Y zkW$9ZchF-hwIfV$Z}#`u(2%aE3a{>xsB}F*g!@A>ZQJ)zR=G_)zN_~@tf6WxoR~fh zE@~ees!xSQ`ceZb*iNum@_o7eBZwtuOAQNw3hmn5`mzrTxv2I>a(GwpCkm~i%Ijv> zPiKublwD>&RMjP$@O1G_A3Jmm{d1+jqodqu(+$=eBl~WGlIY?e@AryZTLa@q=j>U> z_8O+w&$O@5gSfp7b)&-l67D=bn?t8;PJ2pp9VGbD7QhcTXX43SbO$WF@d>Ox@TJ-< zi4Cf7dx0ZVa^v*KAY;I=P!cJ+LPmQw|0b4Deb|k;0!F8fXMC2$Ad|HoM4Ed{D=~Te z&EGYi)mhi^vNAJlwT^nyxp#50DP^%EOK3k>?R^B!t%+=3j2RDKclRQ_8#w1Y%8 zdM_kmcWIKH=Z7b9;_L)v=9Whg()#o6U>r2-!H;t9vAiKsO>t_+_~t$2sukwY9h-K{ z+Z_#sFqq};KLoAS>ti=#666DpcdZAJg=bTD+aqg}XXQ-{*2^@`6v_)7H2mBqcg0Jv zBe$LgoT1TLz_q}ft!PnitX+5Odw=@iNtQ?nycB;A#DG|Pssi;rRICe2R6-!(cW8E8Sh7`z;e2u{i1^=lSw-d~=R zE*!%43k`GpGm8yv!JV2f|mhs67hRuge*Q_Q$GueTP{F~xxE<}!wQI=DH{Asxfg9XS>kUw=7Ky+S8b z4vW)>u+5srnOCd`gDbkle{oHN3$(`b`hhQlMNF0(}&2Nw|B=N1wx^0LJ z>-u%UB=$9$-=KMs#Zy&$hLjIog?rwV$x55?NOIQ%eg+rB*N+Fg8+IrAp(}{D9TPxn z{}gK>OziL?t;JpSa9ft*^a-VTO(SjwdETuc9izFCp~UNqbgxv+H5?kDA%oHKU$pR$q7A7L)uF*Y#6|j z+OGpm;wZkoomsdx2Qz}<8RD->8e@A$7?(ZLm#JGs8;A#^BT0N56bImD;qdRBXL>O% zh?}SO4X{8K2QZrX3eumJ@ZnpRAq@-Dwa77op+vs-^|C(fI%V?Qlg!`f*=9 zO1jrI`+k-vHVSz$dy87_h5AvZH4EbH@3Dr)>7Y%l_;4FL@Y)XzB9mt#q6RbjZaB3B zg|+vqntEjH&4EwYBN40lNz3E2!~rPB0NPGoTe&A*Nmd&lj;gTY5zrGFAXH8rybUsg zh*GK7_@>T=+?l&Zc3jbjm%6+{@-QhapReEt*B3%xwlsQP*oDADf;gF}lBmUhHRhHq zG|VIqE~09V)He&Sw)>=p^X(+FBF(?JFj#ScXBV+OFMk}o-MW!QGr|Jx3=9JmZj%|^ z4ET_ln``&`pn2wxRqT_!#x|UyCz%!+5G{F&ZYCTIFg&(MJJO&hm&sU~L4CM0{#f8W znRPrexL_0QPGo11PL@|@%j`D$a`9D;aP)J;_He4sX2Z)1=@dDr>^z*Q^wzCe;p}kR zzL27}E+Di3@D8(hpHd`}atkjGmJ`cK63SuDm#pe4>eJ!{y(Qn@{&o8@bC@`rxxG%u5}Td$nJSdYHI@vUDiJ7D?YFuhy0o#zqDe) zW7Y;a=1EM(0FlucP@%0oM5;q%KaZiH!W#;2vP{(p!C57PRBR3z)sNiVQUFZ0bTI=_ z%B3vv^yzEM<^+jDnAAKR_cPVdAC?5G0Th7tuN^GZ-%A93omgpwWo%bdspd< ziBVDRA9qBJ4f*k`^AoKF(#{50m@?yu5Qf=)-D-3WSq;SAitLAR_i^ZJ0TjJ-i@Da+ z1gE8r_IU`rRwOo%vs%cra~ZCRa_% z2%t?^?{CyXQ$Bf9Y29o)g{I8^|8a z*Rck%vCAHwL;ileJ_n8Pz-|>*#L6!-D~MBj*pRYD?mKeJ$Fhc>P;H4sHpt@d_)f4h z0{bA3mp%YlFRqx9Llzy5Q%Z{&Ntd|nYn%9DQzKN!-p>)a6J=rfaSVaYIZFokmxg0o zZZF{*6dAwp#boF@KfvDCQze{Ls9rwavR<&BLNBI$pIP5(_)y(4BtF4+iQ=WQ*NIOK zT3jfnW_qxCjTQ3EMaZ8w2VPUaLj7wd`XdLeH*a?p(`<(s*_L-xnjdmiL4Z#6Z~26L z-4s$>_s+09yv;mf>t#WN!lF3$$J8w8i;Ja}vM^<8(oVe0yd1mKY1O~CFY*k3Sa5*J`2Ju0%jS=GMgEBl1s)M87v)HywG$%1 z#d;uabB*rP9WhRhW8i<g)%WS`gC7!wqNwT{@{rN)~BK~Mn`v~0p!e=(gO4^igt(JQ{>vsGp6;wqF;mP(zT*r-2U&V@@o$Qz?QcV>_rjtl$AYz(upS2JhVl+6s^FB#Z74UI$6ZjBUPCOcl=#JJ$AVj7c%;zAMY z3tl{Eu#?B95&GQ99?W&xNA0jybBoQ&2MfHOx!1fhO`WK!m{~I;vFFS@mO33H8i~Hv z5kpDB1EU@7B2E9eW-DWBt#39OBe(psKSTA(1GNeie`8OUYwxe8hm|+udf-zR%_^&n ziqztmDK{W+%Jx*7AIF0BTLy8v>coH0;rt6Wmu^veSpDU`^%>X*R*2r$lLWig!nsZGCAVBd%*r)vPo*cO7fKMn$y0DNWPwZ& z@-?$%OL)ByqIDcQvL}m)qLFiJUY4ewyj0N!23F4A-C!Q1*Cavm{w4JOUFWugI<-OM z!Cm8^7kkR_%?bLct-gBA>YZ9W^xPrY-qr4v#B<@f4nxr9aF!OVK-4HRPoTH-ZtPau z<;lKvz#!t`g30O4K%b(TyeHRcVgAmx=uzf9)dSx%u~g#X<$Ol%9)j^vf4( zZ=e4Exw4?}Z0r4JbsImF?9hq4Xji`uBu@IGTIguwgLX_JC)sTNRgu8V$MJSc`3|md zYR?UPT#{1@&vk2p#3ed3M!j}kLn%CE$&cYgGMZCMx3a8*^cMF#rBisn<5WP|LSgI6 zo%|aLPiiFfsCa*e#=OQ4$=kQX>o-NCG2dqaA=(PzN)Q77UdRh4mWh9unbCN2;I<{C zFuhS6Yx$2U!ph#(lBx-&q%5?Ct?;X7lI7jOwP`=nk_NIgm5C?Ht#2_kG9k2zK#lol z#;BI!%34*ZK{)j#q5PwMO zbZV|rwIbXEQNnqq{fh9QyAboEJTs-qg>i}_BM^3QS`aZOw%!IB#5Wx&vcp!93xCWA zz=Y3!3VnK5LYruTD0U>W0@qwRe!WzIWQ(+t zwX}sol77DQz0yPTVoKmDZi&^n7hyY_*e+iqVmP`xLgYVZXXMAvN$E$e$6R|M$~!~z zGuQ12`Ri_1IrO}5F6Wq@np!!le6L2PM9py6mM$d2BZA& zRpGpz?6!3*Wxbs-cT#ON`Zaz~(R=?bJztWq>Y#0%`NM>4NprjpcQ>?TF15B?<9e39 z9#csYf;H2r9=}lZjUYmV{H{7Z@bn@&)w#3Nldy|++}-nQWs%h3$#z`3>SF6DDjsa+ z#2)B^k9KTeq!#SyD0%6)!3B$*1-|bhVsF-W>gvW9Idi+#_U`5e(<)uGJE%vk!ns#d zmi!=&ZJsT;fwF1KG)UZL#_6&nZb^(2p-t+=H~Qu9(tT?%Adt0Ss3lu$Nn9-M(VgU< zN~3L8oQ0+>&rk7N4C0Gu*sI0;;B8Z4k#zK_r0Z^$k&3~4C{B0HL3w3chT$%kEBW~%L!x%)1R8hOUv>$xzeu4OLjrVs!1Y;s9<7R z?yP5buSbOA+IzPA=A=Ul?q}pirKymM9CTS9*4ILJ5y#+lS0-~g1OoP^9Z#;XWlp_i zzmEM#2|_n1w(nzJB`39OG>x?e3+)sdpQ8E40$ZC}brCr-(Y180&5u#IzA9;p^%odp zK;@is`@QRwnuZFF)tp>QdNR-xi-Pr30fTU~fje9+^Ka<3ULP6=a&Upi=y-d3d#X00%+}lFlTmsNm22H6;aMeV-s|EpjcYvlLwRM~DaHd4 zATE+>KMx%UObZ&J?meMy*5LgSEWPYNeY!VXwezjIH5SjCIk7hZ*~@ssj;5>NXI`>Npgx_radCJZ1~^v6-N35mQ5iKIWTG0 zf~N|Hl$s&BQO{K`>1YeK*-=mD2-PYU8aRvgpC9O@$nRt^71R8l7ASCLNYzU%>crg! z{SRUpGE|eKj-F;o;qIJDwu)kJ%TrU9GBF|w&B)srd2vK+*@?L_!@Q&Y=6gfB!pvXz zWl4=HGnbRkk46z5OJIbV>n`V=w~wrP{vdk3wo1i)>mL;V$;4Er^b_RtIMoMI{Hbu< zIXc#DW0n>a7AAYQBhR}}{e&__H^@BHcQ}zAU66P%N=`J*CSJz#jAp7S9nG7{^yv}6 z-KJlYDgcw+r0M2r;gDrx1a)9Wmn{21;*WQIWsahpdN4+q|oD=nuR6@!0P-B+#Wv z{xvgr3dZ|#>qOr)UALb#zTL%c27C&MBrMg5y=w{l7PLQN(9#o5A|YA%HJw1=${d{W za&y^Evag0LAS<$KX-snTL{ZS}FpQZR&5vJD6UUWys*KL0GMY0qPmD1aYd3Aax?a4m z0v41dpvEtgUjpFJn-N{hwLq7+4bZAHDURpS7Op>GaE2$ekAQzu7T^Gkk)5maHfA*^ z7kF_cpK+!ZbexFJ9Z$2@BXdnC4Ftpn@%KdInEWDFRg)Jk9(q?p0q*ep8=hp`A%PNX zY{-y3Qgy-Lcl&nEIt^GQ1K=xv@Tg_a4OQ8Y+fJQYEN81n7={G@RB(;l@jxoM`=T>n9ZJ_Ng7k++MZsH5nFY8@`S1ZwlJ2)n! zTOk$!S@2CD27aW0=Rz=kviT~U&xp@&aQc^jxB!GQ!G+C@o&`uhpC<{0oDoVrrBV{7 zT`B?=NvwYTOd8q?E!Q&__z~a`nqvDi;Dd@XgOLA_dEZi%cpm|6U-@q0%u~PM4Y|9P zcN$&iABG}$E{~mbsF(ReuK9Dvnn2dO#Cmr_kYRqj{t)>62_H(TOqjc zZ_L@xR_isL&v3q1EX(~FB5(2`o88MI#cnNFsN8P`!-* zHXt~NS_*X@)c&~k$!dQsy-EMyG$qR(bmVa?HvZGwOM#C!l{KEij{A*bLB!tSYC%br` zyr(+ejsFiEx-8zp?bP*UQ)V8qRu4L?@J>4Zk#a_tF4AR7Fsr4DbOwQXk_)%xWukFE ztEg7l2?STga6El_Cz^HNbu$bm7w=l$UaH_N85eMnRBfAlm`g%`_~e_cNIoA$OOcke z#6RihrJNnJE^FOSbUKz`b}*{?kgFn5DqrKwwLv7+0B~l0|Khg8-_QQQHlUCDcPZ;+ zO`|{0fDdwOt}2N40YA5p>0<|p#^QzS!?4v3c=l)V?_Rd$x64ZJc*v|LUy=h?1dY*Q zuIJIEkSqJ5bB?TtNuNCa6uvW~_p(cUu3YErdKRYcuFO@Zi`})ew9Zm3wKwAbzQ_-+ zIM$N)y( z@Moxf>5s5%dGX?2R*9Wqijwr<7`Mlz`<#cV4G)-fso2;-@r;+NBQVpmQSWPVT~aax ze9lx0#$xx3e~!d4^W&j!SI2&Kzn_=)>}bZgEb@z;i8pcg2d{4u#NW;|aWBvgu>A1) z;q`*UxnrvH?b+V@5rVIs^(3KtqTG!Wr7^94P+8V4wyIaA6^$4`eaLrr^tU0YK1E)8 zh;U;G7ulySTpi42Ctd_p^Emwlt1j6_WNUcCU(mG8lV^hJFjPWE=4&IKR`0#KHiMEP z`$Km<2E)FKpk_b}GKFu_uPJCVRpZIQY<_a~2pJa(SiNo@W1rp#mHtLoAZ+hX7u?3WoX5n zaGxu1=hbf0%Yj1U4aeUEocV4HT1)5rORp_d_qIs5lN+F{%DZN(= z9yuk;v9AZsm=d1tl0U9NZWCVtDdAdZ@QN2?gyt@IIDonmZL*}td++mMT}jRN6gdZ1 z-8=A5-HuGBZ$5`T+PC!dj<)*EYqi{(OnQxoMj%g-ThrffVvxrENFL(@x7j?&E3z9B zFYbgXk!ZbGx)UuR1o`{AYnyDDp+DTt?YKZ;*_u(+Tu+b$Sp!V|$nXG)JTv(sy)aGs zYB0jY9#kzBW6E!{>4UGE8SL>$OIkt=8zCd|;Z6R=wN=|k7PKEjqNdu^7~wM^v}L61 zGS89ob-4~*MiEw6O0%kj+9|c47iVmcj%c@c>A&7phS;e=hnb0kuD0qPY)LZfBBO&G z^Nl>k#NB2#Qfe1e-=3~N`YJTJW_kEf%0Gv!;5&!p8DMW84G^Z%Om*@iCORXFIQ(j>R(RIEl-=`JNK9= zr0C;ClqudxV!ha{O+1ou&#>Zz7Ree{(H=i!fS-_eNQSm!w&7Rh%}_Tws-Ql}&AtHBBsiiLy5qU6C8uA3>Y|4OYn zZPi#-USBLGOKO{T+Yf%X?-pS>dWAKm@wu_-84?$}u`E?@-|jS=azFf|Z^WvfQ@{%D zZM?%nx7dKCjT?4&?-{kMMP^!f$Eg5{OhJV@CJOTvU&%Ay(c9RJjL~QH^H7gsHrlEd zU%lwagCw-5e0*(l7+v1aX@2nP%Zn1Ac`LA|tGb)R;3}r9rf#Qzh+7lEOmElDa#RZ~NxJTJ zVccN2xX=>gWV_aRpO`+}qVAO9q{^KUCPMQr)c+C;p2GR$$za$!N6vYJuX-13m>#MT zTWCh7?h!AFfYU?AH6DC#8HMVs*i&Kf@n&B8nPSAnmQQUASlX*mV`(BP*qP1&X@A#| zkI_5BWs~2ccw8|(@T`0lrFV<3mK?^0X(O^;NHx1GB3KO=Wj7goc9`314@c0BzK8}h zdbS@6p9Gy!uHQrVd29_lN;)$=h=_`N!Kq$fFOu@0c;8A7wA0JA3rOnrhv*>NMZX1j zcUgxMpSxt?J3=;hsbwe1HV$rYjb(2Y&c20fSF}^1u(D%UD@OmT%5+U$cOT8;4yg5~ z)D|RkfaULaB_M7)?`?l(wk{|2ui|U&s7>(L)vu+br17f4e+!<-+q=GESrB-StG&hQ z%EI1y3Vo=u^5L@r3(vxyjQ&n;D{evOOt1M3?BfqGWien>y~6v0?|I{$LAuFnB4;+8 zEmQ8JMBLN;fi+?TZxk{dIGrDhJC(ZvXU>~^{zTPDOla{I{ffjEG(q`0lo>5vJ!c5P zw&I53pKuO?aoNM#?S5r0`61Rq;3ocIR)Fa(_Onh8UOyw-!wpTsM<);J6dQNHq?6j8 zmKD+8vJ9$6b44SMzjP&Pi$3{@rO0VxbM zTbAc)IsaURm%FpDq`a0UeyzZ4m%T6$mn_};R=TgN61@Ef9^_KGA0({Mov)*{@frCQ z&$yxNDE-ZziZI&qX~yslEgTf3^w235Es4B51bgaw7b@CXWcWoAA+oAUe_iVUZcxov z$jmTQ$iW-g8-hVJmc`1MwjA3}k)O>C_#DwC6nFB@M@ zTz2FyCbF`1uV_@2a6_7m#$o9@s(0uxA%Bc+LQfW|F2gZbWHsolztRB#yd_(KuzS+g ze_dd&m9%YK_3LkLvT;m-S%-(~Ygz7L3GKo1`c57`50(kAAO`goI~lG4PDr)xBT|SU zS|cUk=J?~oC2uy%Y!~j3!*^mg3)lR=@mN`r9j})}Tpe72);%Vd7NMsuI@zhex>G@n zyJ1`$6KH;?T0(sc&>ulAZgwk1Jp3L!yjVfIP!@vK#XyGACweIl@3B}@q)&Q;Qo#4j zNg-Q93S-(#8=W-iv*-x4M%rTai3I4QLo8p z5L^+!iBR9i5Q>ay)>qtyq~hE*NcXgVrULAI7Z)RfumOw7Pv_aGVqHhuQY!G;DY6xq zkDvPRi#kfG*c{W--oh#`KVLGtR*MIK$;WX8prl>H!jmo;@Yz&J)(3rRa6OfD*$-Yk1F5q1ss*Nc3en6k>0z3 zE^LX5pHSC^+L?Qd6r&@a9b{#5wV}4WxGsGPC*r9;h@nwelE_L$@YF?r`VM`;^4llv zThkC{M}?Qm0VO%wHEyjh`D(1}#5&CR?ET6H5N+12-Cxi;HQjDTL6#YtlOpCvmn}M5z0n-*Pygw`M_oOqt68%#*ngQOA zKxQ2X^+87`mL2J^;irk;2TpD*>yp=(I(P#62SITjMeGQqZP{6`!rvbe@we~xRN*i( zhN|+Cjs&bM`}Dt3&IYlL2a{|2SMVtCFF<)vVD!1M{G@{NyVP~u6ykVd>l}S7RAqKB zO7%U)nr6ir+08)Ozfhknp5haG*i?UQa)m=r#FbXQX8?63HekQwMMg0m0mzgxu470D z{))2OgNcm41>A~e<(l|I+NBbgES0ehjEHE6a+kkHM#aH_9k`l{iDX<=9^XxSG}La0 zg;Un6vAROwj@`B<-NSt^o5?kQqv2hsh7{A4mGBX`MmBdy3{)S56_+Fx<`bS zI2THqk>~QbAG+)*V>xLtp0B4OQap&8^7!2V3g27+PV7!#fMdCpj~!P&3$D-u)vOg$ z$b)SWwgc^P|8wdfYqNdp&EZ@!?PE1(D}K%(>UI{xvlD5QX&B8Mskio(j!K_}F?w=m ziBudfx`b1EPN#ATtdRe3H44uMK*|jp-jsgRM{ciI=CLro1zq`G>nD?gz8vU~|PG z9v_4sw$1<;NXkh0jd5IuI}X#xm_Ok>o7vmMWN_<;oeF=-Kcoh1go2{j3~O>Krc0&p z+(~#Tbyu)pAX)8aJ}v`lLtPK07YmLJv`WZuK|15mufR zm^}omPL={!XW9iNavpcz-jiDr{i^K7DWrF3hYRE`L^>nq)bAEpUtc#g;+7EeS9UIK z7KCMTlOvrpYY)TkjL7(87^SIORTXpOK|a`DSz@SUC6a*KMbY=n-8K`%U!$}COMmdc zk3;Cx`RvFdHj_E~PHF%hO2pN+J74D<_ok3lFqbbXEYE^~UpjXhtMpwM>~0%!ec^-n zr0WL$i-qxKg-Q%AJ!cv zc^||xK)k>T#NA(QJw@|MO02Q{1@kYCT5IpY1hqW=g4333K>?(@+0CBH(WQ{>IK8u3DJa!x~$s zGJBrB-t4%O>YDl-!@Cxx*kSkknrBJSX^9@mP~AsxQWg{_o*R}>3%z?CIUX@JV+UN+ z{%k&4rv9SsxkIgBqq36vxUNpXOXCK<26kZ3wTmAH4I_I_pRTs z=$e!C3&U+c{_@Rc+A&a<>|}5NB6$isYm|RqJ|N}_w&N)|H2;N~)>!-V4szEPrUVx5 z4eNBXFY4%20#I;Z&9 zQ2}ypoiQzW!w}Oc^(@aoeTPTWzKLs>BI@b<#<_I)Em<`1haKnl%KX}%S8Qc_VqJV> zeRADH&$k<`Gq3RJ-=Rn6ku&C5yeb9;VF#jgt}+e+?Vm)xbcof;yn#9^9hho*fvd#t zw$(*H+1S6$;USPrwMSPO353y3L>^CS27rsTd!v6U!)*dU;(sZbcb`n0xv?I!M_6%l z5OhATKt)w7-TPSc-14O(1I5Q8Gs{H>BN<|TOJRc4_PGHrML z%iMHLa|RDi6uI5N<^Vi{{)PkxgdgMCBO;yb_{rXkkNNF&;i%qE4%9?h{Vj6enxhWF zv-8as%ApZ#UZyBvZTLWY{b|0Pk0azEApt`A7nysGax@~S&kPmMj3N&ilU)`_{Q=s> zz~pyg6j?F)3wn5t0g3N2F|YHwQYFJUar#P;fKF^47#KPOpB04<<7PM-npYp=5-F zVeKY}4$lC0i&CRq!10Bx?VI!DW;7w+H3Xl-3&RQM8tzOPuW~R|f54Kn6~IZ3$5W=K zEyy$SfgtFRx1j1(tF|70!G8vnaODxdLRyCpWvjW+Sae-@W~^Ea@A~e{ln2y$@4^(e zTy~1GS{_*`xCN8L+!P%^;~w(>hX^v+Nr)HxkI3xRg}j!PZKg`V&-M)Ykp23JO|AW{Pb4A!r!< zU?|ZkB^M9H2ONq;9)MEJ_2Em2rygR;xA?Nu7sp^XEV|;Zt=8oQj;~R`CF_!SlZ*o6 zct5^RQEFMRJZpPsTonE2XRU+WTIf({|8i`ZrGu{-h(0F91kg<`et%_HmjwLsXXAHh zaN)f!M7T&%HXaWWK7xQRwy&b@Ur8<+xRf3@10@}R;7gF2IqG&TAUpxS?K3cJct#~l zR-E4N)m)|JTHfq2;fzm5YTE;cr5w%1?@dr5Z`Ig^gIk0G2I3cCm8p+Fp)~HUl)aX1 z#R~ifSpUgQ56SH_v_ z0w^_hWhZi_OWCxdSO{GL58+noVgTPGzc`#oYxTsu|29)TvJi}CNzwRJ`Yt=IA4_Tl5luy%Jxoap*pO3lxODV(qyy8Ebg4+>IBHV(Xo!^Y72ggVM=}=tB{e zVnO;lH$sg9U*Zn^snjpzEZ5uDr~4T6$q~sH@Q2`x#s8E}RwuQi*1nF&AAUE8xF5ZW zdBQH{(~^o8#5Q$sg3a!{`;GM}!X0w9cX}$hP^K)e%sQ8zNAYrho z2!&>AZ8odbfaYeGw4t%`)zA07>17VLB%>`jp$IN(B6*8ARdbcjnNjh~$q&5sEcvfv z7%^e{px87kh)Np0ux2%(Cbm4OLapZXaddMoZnjb`_rf3}Cz1!31cPk#97)2`F_RX~ zE?+nBU_}qMSmsMecBcmQJas<_!ap#3B4t58OpLVT=9!Q~>X%mhVfRq0*&qBFB23PeCjKud%W9Wv^Guc_(Wpb1~S>l3b6L zXLJ~s4UG1vGq-c$*)L@Mm!LNk;d|j!6RP^a%VdSFK-hz1C$M!*K+p8`ntI^r%qLri${jJo<*vxv`w`6@Kzb*@~qx89_|0q)K(k6ZOb{>B!l7FgZ z^OAif|Bq3hmW4cMjLdH=CI)`m%!rxAw}KctGj>LW*ys5WliKr7h4+Btq|EJ*tj|s1tQ? zDc&$rK9cnc9eZ%QuEQQMHPviz3A&e=~!swQ<-`3sx< zL_Yi-O!sB>re%d@^O+Ee!C) zZ@Jr@UHDnX^UV|LYOq8|`}Sb^pzVm+L*z)D4Ob#pBz%;kTUZzJ-+$Sr+{ecU{r>ol z{{7y_l+iXv;vX(R_K)h`p`qX&e@Om%oBOjB&&Qo$&IIDCt{GF7<9F;|^85S|g8Kyz zw@#1Xbg!#0h>D01XR_sxszYAcoVOL);N%X-<7YgjkH_Hm>vL}&0nghE{I_M2cIc8) zQWdt;3n@FNRWbP>$bl@q<6kymT$*}2aw*Y#x`R)T4G1#(*UzyHRSD?a%m(8)Fmjb+ zGzk1GD1jls*It!`A;{Os?ZHC3 zX10O;-qbv&=UNJa!xcxJGK9F#Ef{4&qE*jIFVrA+_4erwMINH;7k*v(PGUyMha>~U z9t$NQjwVx>%%;)s=~=`BqQJ3pg0rzikY<%N>72|j@8}7G=j*W%)lk6#;sX?9s`l%i zf-eenYzGzS?b)TBMK>-&C)|10f3(SYZUBQpH8^1z*fBF} z?Nmy1#~>LLkncV+|BUw;+r=;~+W*|pmAW(dYcY!7|3leZM#a@;+rj|?1P|^I+}%9^ z0>Rzg-Q7cQcQ4%C-Q5ZbcXxN!%iG*62gOD^q|*JXg=E`KTdyv1CcOaTVEoB<>Ev+ zw1CcNcB9YCmj)!W@rh}+hsPbhGw7F|ATS|Qeov&pq<7YVkz)AtJ;E&!S2UNXH{nx@ z^~tm2)jmF{_+*RLl<89;oCqF4Rdx(1gjLYgsLwo&;XbLd|mTUHlqkO2MHj@)f{9`ee^)R99c z5ewJ}uTA@bI$FQ9da}EKHf;7Puos7T2bn*(d-W>6mJr3#UxHOix?`x=OiN z=!Fp$yH;i~Zc>;+qj<}z?}~a4u=ytGGkXVQh4fGmtm~I1YjU6)_Vzf@%!azg(Z6)M z#z4z}OFU_mr7=fkQL}JWwLw%YE)b(7B?6zeJ-C(cy5^=ZK<4e(OWc@mqOWeufub5! zVE6*`0crJIiS*iaNAEBJHc<9ln@UID0;VmLk$3n2a<^By1*X*u^FhO}D+@KY*PXuo z4TOFO4)m(J(X&UeDhj$F<~hC@zt|HYGj8p5>nu9$nmIDP-rTIH zEv2qhASAo|gf=JjxuXhJ=)a5t_ieeRJ<<-6C%8*aOV&M955nGeB(**Y7Rndl2?Nn<-4vnk>XYrng2<_V zOIbvrmhe)qzgoTL%HiareMqytb%agm5|3s#MlaxFiQprVNCHoWbaFAn z#(81ROZ*8_qzb9ssvGONrn=)sdYzH2HX;wLD{_Q3b37aFFj{~+KV2fo(k0@S=kOFH zFhgevdzNTqL{A-y)vJmzRFgL6PEg=1dNnjOR^ zG@m`EqMs?Au+o zoHW{tDT?=S)Y)rFzgLmc5DX2R#EE@U_TW+L0gJf>!{#bgtzO7|oO~D?$Qg}BMvRdVL zd@0@nisUL;QgCq8es}Zi4Q*;jwSqZslGmMJYlJ3z1PRd*b(%-FO*&K!x6{SMgHVA^ zu)8AkCiG*?(6!ld2xlXqP7(u`JqzGr!Q+K*Dnsd2bW1}Y-bX9s+g-O^0mjm37ek`0 z22%9U;km|ywH3X{2|O|J3o~vD1@rwhNR66jr57FkhcuX1OHW6B~VFnh*&E zE#{ZL;kPi0Oe5FIX3YsOs=?$bEAHymw;gvyyvfGbhjtyvvW&{a2`zXl{y_hCE2GUq z0xTnPhEUXck+R*s^j|sFX9ji4nKvg)nWa%_e&i}%9m{sUL6>5EpRIC)9u!+7d9NrZ zN(bz$=Q9tu--aWKKI{9*Tsc^{u(oL6t)h>J!bzjz#eF|HePzhQeZBvMb9{6KRmfu5(HbopY(n`|N^ zgFTb>2G!~EmdvDb%y@i$%nRQ-d^x)=E6_-e3KBRt&ol(}%S&=OB@I1bSz8{9udHZG zwj7NCJ8e+#9g<%C=u{`KeJmqAUlao$95&3|3Lc?RQ3=O)wNQ&Zy;*NH-iP{G{{$V* zpOP$Xk#OQ63w?~S7!!(XU%S2We;NRoO`EdT3@4JMW!F2P{T#mZ9KL(A1jVr(_`5#0 ziGmfHB8KA!`lPvqQ`)>unNVnH_hhAP-lWXL{Q=F0qwqYt=^CB>h_EB_Le5pu6mw{yC?}bIUjzL2~6$-BQfK*MJMxH-8qi=~gu7KC>nmX!>m| zNucR#GJu|sXCkSlQMNKN{k{_l;>1$@UdON7*JzP;cZb3ATp=VROmstQ2+U$I zhZU#R-;T_g*=Tc7G1pH~UuJA?dc3u$){(E5=0gWLWa$oj=QL1lH>!xUFaL>!8d1An zN5CboD=siEc{_d-Oo4<1_|ZI@jZqfJ2q}@o%c*9{#60?Q*E{!cXQy{Om!VTWRew7? ztsGn3KqIvAT!u!*;7X-JjEDy{yc}-Yy{mAC-|h3e;WulJTJPZO=O|QD^#u$ARM8P< zT$xN>9@TmVoD7CqIph|)r?A~u34>T`dZ69deq;g$ngWOU-Ld3i7s(4M+(B+|IQzgP z3kSPMR<&)e>yJT`KY*tyzi@N*-=nMZwK<|OgYzT8+N|t5_tc@K2`~kb+KyP$n3W2x z56{CmDg)y?w{^X$UMa|Fube8OT+(Oss@S-C zTNj!w=3kBr=?3f;7(8!dC=%6KR!WeJEkzM<=kdrw40DLLnH+}0O7o$@%Pwg%kZ>Vq zl7Ho(@m#rb>W2R*cCZU=!f97k7P3OX-=|RQQk9dHw{cEijBQzLV=;n4Ip1v5$4lp@ z6mCBA`KoP1>p`!hJ1?_JWdRxL1nEBZp`IIU#Zn@QH_{rBFH*-Gc(((H`ejU2HpVN! zCGS0h#bF>7YU@GM0xgG5mEtu;Qr`LH!cT{G^&^VsJ`cir9ZYv&9B(NVZ|Ox;hw$aM z-si^L5k`LIe>GMLx3aUV?oqV}RJa@n*JkpIlJ)_eKtOqG8>u#$xz1ky9o`8idf0$V z4};ZW*;NrJbPg2>J^cJoOlEscb8&ighJ+BR3)~mOAndIZvp*re11o(&CTs_-Y%?3& z0yZPpoAkSvb^2yZ)U3gLn|A(MOXHZ5xz^j*fHBKDG7VDHx&jUaQIV_Nf=tuBE6tF) z1gvwypO!<|%JuEt6F63C_OFW^7|a^biE9dK?^ea2 zinffG^a*B)tzQtyaU=mWlP;Dj0GD!u%D`kW3~_6?dhtOoZYWJY0#COYOc8L_(#Zb2uH5m0+o@@q zA$tD(erJ1X<9uyHNC;OOM@LVlqpw5?5Jc?6++$|r%xE5w3B4MZKPGvbT0CT{?hK_# z$)cmP{)kz10Oa91t#^xN4NyvDR(#kGid%=b=rgNIfKe}(ZdBU*mKh}v&DWb|Xo9u} zxv`{>R=dlHY*dGbp&A*U@wg315B{zw+BLD6VjR27g&@&wjsoU0nWKqWzrD^?5`J!Q z{03HhzKlN>X|e^~`@kjp$cw>AII9$I$s^k&Wc1RB%`~!y6>%QjH)0a;aQ+`PB{>Ca z#EFx%v!6OXmTefzBkk}bzx!B|9q$Uj>cELFp~B2vKLaI6>lW*qKD|$RCoqE{eUU$l zy5GZ!?De8dT)2zM8l!j|Iuhgq*LHMmBT3Hz-E$Vi@1>`b0Vek)vk4hc}y@n>&7M2Ay`;3tmB^zd9R@tYK`BN33k5s4fl{?Dra}E$gJX4 zbb?mD(%3f}&hS}@r{=rXI8vQlbJh#1GN>}h;1~16Uw%Op28%iu(i^(7XQr=m% z2>a$ZnqB_x%|#x^dEMw=Ql*#0S-gCr)y2Yt^glUyOOQGN5i-xjDU#>;X7yNP6frOI zpiriPfvjmhZ+)HHttSIUGo2}3o3=+qK+o=1L8U3XckI&H1@2z~nUkp4lSFnAU zaDXb{K0d1eA~2=o>UR{Ev$d_MjLmo-ZfL>)9)=I+apMk3ls*|c2|2fk%oU&5pHlSv zM|V_rxSbneZ1ZmA30r6e;q~s0+}~XS&jnt?Pcw2CoX^*PTR{( zusFHC>yaWQHV)ub(Q-sb>_e!oEvBHZyD8+{HR6RT{@9kDUKDL=I@c%>DbMkEWOh_X zj(6(zbtwTBcye0uue1KrGg`-R#Ck&IVfdMkd^3aUw#nfAkb^%4T_$A4ocJhIV)76h zkh;e*PQ(8Wt6u)$gnMSDtIFhl13aAe*r+$HKWrz6@m?)R&kq0BCjC#mq{jorc1qqD zXxGy#lS#a(9KJ8ZZ5mEvM6r0%16)n+A0f4iDa^K1MO{Dj%tPUBDmq)LnLL6`bU| zg_Yl<1OARkq>(9~J`prxRIlE!p)x;qdttJC)@{^@);)|bL>jZ_2gtY{#8nys1ae zLf|+b)#Vx#*NT~GI1`V%7IsCO?9G`@@n5iXDjEB65q0P021)W9^#H59e%`lXa(UgP z(k+g9YF$Q%46lLCM)=IZqtputH$A6YdN0~oUh!U_&L4IZmpm2`DpXOJKIA4j*60p! zz-=p!$HVayFz*b0Gq1Psw?L%b&TFdE*IiciCb;A4=!PNvOsIc!0ZCd%C`@A# zfH_k1zw)Ii2(m`m@u)z&>HvFGpAA$bA%o$Ym5W87@AZAK_%C9j7aVAx7XX#0#8iWk zUHFhBK2{FN(;Q60_~Fm=z0nImi+{Fuiniewt4Gj*8vE}Q5^oAdb(s+e0b`$5aH(g4 znX#$0g?-A%c>3cJ0gFVtqaVC>Nc5XNT;RE|URcVnSh*h3ChPTI^n2)Q9rQUm_*610 z-QD*4vP&jBqFjQdPH z<)0S_7`B@3y_Dd1s8*!GS~oo-XXo<)8x;lSaS0L0Fxi!B*k$DhTaF))H>BjG?cR(M zb_M-ek*108S?Az76{4ikSg1Xa>v1bnV0pIN4@C3YG~7ZTtw0^TuO9yEK0HyP zASFPveK1+>vOJT@k)71xe41VU8a(wg$NRG-H4g(HDL+-@gv6UBq6*99ODDJ20u#Fx z_i*}wtNs~*$#7S8+30K%m=h2T5|;-Tr6Ob7T+sLkqpp8Mj@S`;)J2nmn+^LieNWvu%iAcEN`TR3rhV&2U(QapftVi*?i=V>VJxrr;d{`be{Y}{~=g9nMIIZdEd{=vt{ZtLCfaa9L{`;x1 zQRL^X%<9$$M|MzARJndpcsD?kD^X@-e_&t*_Wmke_3JgvXrs)YP#8z17aK4$PDMy< zc@2Ae#>sj%7G3U>-3otOvR*{{kdaYjgo)FWRb9^e-WWVX{+K2{ug_S}sOhe$Ot*Ub z!=wO*-}_S#8()}_yCdqSB@}qA+Q5t>UYwo!?gm?rhP}+5C$ah|uV){XMk9Y(uFSJU zeuENEt|S5~0eOx_1nK@XmM73RdDh2+27T!5>vKI_B_zQ0oZ-TxyyS9oGGa4Aon(Jx z>iX+AW{a6)<_!`hU`xHlW0`NmV4b>zjGqSwYum1ASdkExx`Nu3)&ODV&+Ho zgyZ-DqFOMGT;iJyE|htMa!Oqj8L;#2GFV9y_by46+^GdC%OjJq0e-^h!8zDL;#;h? zgU?7e7lJ8HNX?EXU%Y*z=CzoU8HzaUAcbg?rc0vn_b`+>6VgZ8uXZn36~689T8Oe2 zHu+(bE4w||?y%v!a3_s32v@Q-v1j&Ho>p9Tu3b0B$Ntb}&Y>F&&X1#PtK`YYqpPZ_ zBYD^L300(hkgVF?rCONH?5{8?57wISwXaZCZB5L1EOPoRQu-m=$m|!oq}TNe3891J z&yet(&zCgQ|2a~B>N%f{NB?2;v_mbEDU_d5ZTqc1T)VowecFiK7TW-mx(vx>`{8>9A zGbHNPwRlnBf@SzL(=+$nM^kIxosgD~jSt6a{tk7kOm<@b&6PIuM>50Yd&>*8M=C`aufOicRQ+p`T6BHjbAIj3HXkOC zVV?mXZ>%dGp8lR=ahE*Jxf0K4Y*&PP_eDRzJS2ctD^$z&c*{10Ov1#w7{?xHy(>8g$0)Uh zUs6y!0`no-sg5iI0)+JVpFFYmx2HMT?eMLD=xDo-WAWSqt0G8WU1I7QN z*ou_VPWS*fz3)LdBTsL1$%)L75%Y+$ODFWpqV+iv7N~dlPpR?)#Zd^GpdWO}cxv&2 zTxtg9pO=GJO%D3$%lMjOLh(%0gVX zHY1^e`8#3}4ir^rgtpE~~l0#u0E$VZ< zCYd=qOmY*nGygbMKh;?j$^yly|1tglF)k}XqoXv-m3AKlre$Z~hwi8F-o!snUvyVy zNuIA00%NaXr~HwN&ph9j>Vh2#+EMt-xIeS`fWH9O=%k)pCp-k7B>hJ;5e#mHXWQ|{<6aMt!L`78qi5NG50;Eu|wZgGYu zcUWWJ??3x^o9o2MyQNn#vklD5#ZhY+SDhqfv(h9%<0_!iI=D@G_IS&ybmHuSazO1= zdmr;JodR7Tz3a?F#vUaMw6mv37X$49hZ;VHN2yvWAV8NX)cK^{q@JQ{o}LDazjq~b z6DJ(U^Z!E83L<2y|nGm@1{L#8_9t6{WaElA=#X%-Z@*h ze?XEO?cRoK|34`g$?7K|RSz<%2%7&|oxW~$8lR}LX|Q8GuO!4aReP}eEzfQHT^Uj8 zl!M`m()SB&ne9tlwF}Wcl#n2lBo=B09I>9(MgQ-Fq}z@FLpTRbN&^y!-gQKdA%r!%9e~|_I|AXOI0K4V~&rz5F{h* zrT`sbD$CE);rXA#Bbo7vph+IP*-$(aV%y8GRu?_^w5FcH;EBJXD`#6HC+ z$uG_srBum<^sk559p8X4y*y8H-$V9mK>V!u zUQNt(qzz9zmV}f3GSKNpL+2Kpt@&RtvOTa?>kz#5W;PcNVzILOOZV*~y%Lf%3t%+Y zxx>j*>T77%VYmNAtCK460s@ zefg*m>uacWc@~d_2DMV;0|h$Q|eJ} z=pG))5F~v6WA?6gLt|oo@vHw$2uHE0%O+06fnOf_$@MJrIVfz?JvNT8ay$M2bmN;= z%xwYgbYkyqUeOf&+{f9v8T(XWrfKIAN&et1_(VaGzEPM`Xm+851dE$*7mhRFTTQZ{ zy~iWsW^yk>j_<;OWT5$-g&e8k!cvxTm(yTmNq<*4(C|7gDZ zrBY2&z%}VJx;(KTCx&fUeEbfp=%Aq&j!o#Fdw1ZjOEDq@(?n-u~pgrlTZN zFH;g~;($nU^Q@un{gG+9xI!%eH&Ix`QZm(o5*)D&! zkep_qlSQ7MnnZf^gg`u*HJL1*vwSakq8uj}nW)Nz(cS8TJMuGq^Tz77|AivMSo(PB zM#}?QDfdUNTrlqgLWKI*hN9s3{OPj2jFQsRZ+};*u2nte)HmaNL_GYJmms8vg5AsP@fEIaw43jmR%@mUy}Rb>7pEV)NGm z9fYsEGVgFq?jTT^?DBNs8ypQEUk;M-{)NcRolS2^zSeM(vMja4{{>X7^$sLTDTk0a zFD`qp_~hY{G`KpP(8JckY_v>AVlu63CNB0i}zPuCAk zBAz)X=-=Vv*F|Y5@~b89M9U7LU2?*LMa+DpX3=0Xf-4;Et~T1&4y*oNH`E1NjUXKF z--ikyx%)IQ#j2sT6F8zbjWrO}ZQb0?>jsPQ7{64=9^8UJ1(ej7HZPFVMuFVYe8hk7 zeNHP`aF!aDjXL?y8zN5w-u_vLHkylv=WqUui)7N5>x8|9BUDeF>#^~Co+u^on4@Xk6yHJg|wU(d_ zS<2G=^>++l;rdD68=jKx_WxR?k*!k?|40np2i(!>)JbmKS37;_tcsEGZk|)O0*?m2 zhyP-l%J~^3U3f(rC!$UlsvRV)1ham_HAYUx;=L`$9`7_jy7}3lWOiHKm>CfJ2hl(> zT&L#Z1NT+ud`a2CUWLO>By?Jt^_GVJQV#n@X8OXpo2@ORuyo{GxxOobBx(j?3 z0E*x5F3WJU*!Jkh;?FyDoCz!csuTMX)4#TNiCrn9G)X}bo1B7Zuxd3Io<7*V2l2q# z2*pZpvy3bvY|7Z(9o6u6IfnOkH~HVyBUJBa6929qk@nv5TyXBn2#gTN31%GFc$Sg~ zguXYqJ~}~UFXmlfk4~JXO|J6N|36g^2ICC#hS6J7p}tlIk8TV14^R}*Up<41doiOY zyuX$M(5I*kM!0ld921I^kNR!n-i`#{?rR}*URTn#7V-JSxVnMJ8k8KD(EOK-tWDA& zgv>EKPM@eEe^Qj5O1q0eP>OvWsg(xCU@&mTmxw#ZCSCU!pgm*O0S_vcz&g6E@q4Zz zjsWD-DMzAY^Qd-gb5Fv^t#CUf@+>mC4>rj&Kq>PMH);*m>}r0g4!IOw;T@jr3RVFu zfr8um!@9EB%J>PzV84rl!wkzPC|I6UA+`=Z>Oxp>FS6#no9xlza{eZJ|BQ-?IPM>LkJr1srZUw<8MX4U|yd-I?hB#02%!+>`R!b zN0IXT|D@T+PIf;p&;H+({(eTDq<8Y)T{EOorJzB|3@LL)O%Yj%G zZ7#R#TPjl0WXsSe#6^r8&2nS-w`0Pk1P9mRU&o~dljSb&NloH+3ikB|S2OL7hgv8> zK_X2F$0I9r4nyfQVI*ZDKOa0-omt;;hl9CUno$aRN6JYNL>Wxn?09;GI;TNgkya+x z&&_Y)H+NY!{)i+B7MfQ|1xd@ftyy%7Aq;8Oo257q7fp4)TR;FCsdB1iRr5!(^#)48 zOj*cz`+6$VJl%XsHKx(5f2ID0aAfI=c>b=-Smu?XIc-6wu9^+`0BbEO>S$s4k!^0N zj-GQNQPhh`OYcJkoYC+Su1ma5@75IF-XxZ#E77Hhw2_P&h2r;y4!8G2S*2S(%U(f4 zLNZ?7mjK;{%uGTA9M++bmoG%Zchg$>=C`SuSPp*u5o`G&$RDcqkGMSJFA%3x`KQvX zq)lkf#)rr~fm%uXvBl%=B>QI=#>r}H61US)+e~vwYO_m)&VEzlD~_7iS=z62t(C(0 zPEPcPv|kLhgD{u$T(%pZUY~9~jMrZtuZus7q{a;?!_XPDd6z4Zv9KNv7@mRvcg?|K zlkz(btYnXykay#_y?LlgDV^S*(SeFR4e)uFu_!Mc4FW%zJzF_a!a5y|zw1&ck6Zo; zLmBU=6wr&^!<~~UvbrF%zMQ!Rqlnn+pPxX0%xCt{G`=@-azEeV7y9U0$YE#gtl9ga zzWsO%18eJyVh2a=kCxFHU0vUg=E_G4q|z?#?xe3D?v4H9z6xR$pg3O=TBo6B$y!4L zWB)vwJgR!PYh;=!O=m2s{+<7X`1t*>`SS+0-#BJ;IRv*7A~~?Y&e305ueZbmwjyi2d3w1=KY95oHu%JNaor!Q%ka(vmIgr%5SO zUi?ke7owWL4`j#{u6T3QG@%l685P0H90;udzQEDcbxhY&(Aoox8o9h>f;c7sD~Ryf z-g}B%=on{PgB{jfx?o%N<`kLm<3y9dgjF%?zKB3Ivu3Eg`1`Txa_Pid&3q>4Pa%pJ z?KwBp1E_Q0W ziW^+=MfcF^XXC3R#(#(Yf8BsV=0BC+T#^|1nCC5ZbLcS*bZZ>$?Q0wE;(}kaZA9q* z+gTrD?tcNtT#*jPLAg@fVt5k6hk%a}J0}M75CQ639r60WzOMETzYV@A@h@@hR5KHo`KB%|!eaG>pRXe&dU3};9wCu7ufS*Pl zj&FD%V(?4C@*P0Mr$8sZ{{BQO`=fMix0M0)nZBNm4Sb!7jxvFR;no-+lr-1DdLSg@ zaIxT|zc$BQ2+&hsZQG$+jXg&a%_*q=$Zh`>JfFAUQ*7jQj#Y%D7s*W^JD@69@=;$5 zKcugSm10u-g`g$)U@~~Nq{#0VE=TcP{7>K)*P25Pikl^@(<(LdrX}~PVhS~hFs(&k zvq+fovtjK}p>NF*6;y)hnZ#NP>{R_2Ae^_WcRrjaOqW`RTvk z2+{^`c9Y@`(ax0e0Q!PPD*VZ4T^jE_o+sV~cd6s+$i;-*3DC#fn<@7!?EbsJ-Xf%@ zltAmB*zSx>q7|8DDIN3N^mNa$3@X~dSoRptEsCe8)w`8@8}Mgn%}dWUgX$}tpZD6SIAMH|I5no%+n=7$aWOb%Jn=lDi_-N&iUhg?$JZP2$1`dKz6 z*qnXTP;y!C+H33bu{!Rlp*?;w8@;58=7Qi){}P1tv9RJqSk|<=d{pT$)H(f>v**?M zkcZ(fUn?rTZIgU%lL4dXhi%P~N@@9^%1DGb+jICX1zZd#j%0gn=29Dsl_Z&mhh-Xz zNDPWJ^U%H2n{vToAquXpz;V zNayG;(|$3!i`eO^rj_EU%QfcT#%uO2Xji)bt4;XQWQQ0w-ujG{0?8L{!f{anolQMU zJY<*gDg$K=T6ULQ<0SNz;ga7#pY{xg5e=ntv*`e+>#hzgGR=so)qCsd*yYmC4M>07 zvSe&B|BY*}nVidY4M&b-*5$i;&ZRGNu`RJ->xQEvz46$rk#hm%6Q*TUOV+TxBZPa6N=cJop%N0tua|uI zG#6;$*a2fZtn z0cUI1+%F)4Cm$EdV7GaJbxIayl`DGcG3|0RK)5S74cfvjsyPHEvBD|YPFi%UAd*utRamzKn$d3Aq$!>d zgpL25mOoBIwmc6SF_9cAO?6Ybh0DNY&Fd~j>P}CUDNWQ#S0@ZR$6Nf(M?YcLh*P^r5AW)x}eKp0Mi@RLh)klK#t3Ja|F8OujYWxO~YY(fo9& zHB@~T13mN{CYq+EgtE-QQ zykl2lb9}G5uI%7XKsB=R+3ewqgl?j9V)@!N4fNwa)xASf*RIa9T%kW*fjKH|`slR9 zDr(*14LWEX)&6J0c^2R}*4lb2O*w(ex_Dq=1uT;st_d(YQO^5}OPUi2Dy;3qRVFu= zEYm9H_C&J#jrIgbnC1gyQ1cFnAy%1~X#*1wi1`$9(H>`Zcpl-GLPSj>(e`9Tf_T*_ zjRuPrdvgWB)iU=VPM)%c=;^-|l+CJFR(XQROhzZFmCWKF&$4zWOYCTw-eS$H@P?a2 zDPj^+LpAe9?1g^$iY`uJt+b7#3%dD4#o7zaS6LgY@K=p?Dl$wfiTq~G;Dy;d*)ea5 zFB0yxMpYxkx;M5)G_$^X&ha9JeEn{~zyln!q>?LS=+q1cac0Dem{U_Ksfk1km0i+o zcb%3tw;f4@?r-*Dg?cpSdX$O%7)g@AAJ#hTaAdiDo*!7em<@Y3hc~Mxj(>P?-P$Hu zSeb=Nx#RD2dQ7%!xF_AIYlJqKEm+1lhl)2W>`QY z-=(eXfjUQeDV876HsOgQZeS|9{1h1v=sMGoakITL;rHSWE3op}Fy`huk~H4@kzb;8 zL1P>aKf%jC&@k%l=|Tm_%QHQDVfllk_|?S~Ug`;2_OSI+Mqi+V+2h~}{xkME-cVBl z-Op2F(_DfR+m}?jdE#H<@Vg^zNME=eVL$H#iPKi)IvxV;;%$WkrVCU3HT_(bBT}KX!jQn+U1c3IqdP+ z+}+>;M>a6`r4D&Q4JI%91_x0cjL2v06*X~G_8M{?dt8@Z-4h38Y^tpC;9A|ifGY38 zCYrgaL4XBRk=6X9FCjAIo0DJzT&Gzsm6=VsV``O;>4heSP=Q#{BIc=iq8h*R6yi*$ zjJf2K5;R>?zst|iDRV?@cVTNGv69w!Sh^@{MC=a5xuWFyab6gRdS<#Ta#tL#9I0)J zEfXRlqooUq1KGS^4`4~2Xt&(LW#_p@Gw}JI;7+F!y)H#H4o)iy;;HdBP)E zK*l$%Hbt+oGLQQA2!&#u5Ux?{!{ryh&Q3C=PzP5!)34~DA9p|Id13P{>Z|Bl-eu~s zFxkNMO^>?Btljqt866_AEX8c#Q~0k2C;10v@@5%LER;4yye;szdttxQA^Zs0~sr#@ZFbgaNN<5bV92=z#C`ClHZiMN!2B^DyE{ z4NA({%KGwj%EFTAy(;8}lLmjc%Zhp#Saj_f6^cKmt4s-U$33Fs74V-w$~A z&&Z<_3uspIHizdnC0iCgyuxYr?G;Kt?T*$*uF)UvCtj?^P)oi$p&^K?8W=0YlA|g! z7T;KRXgQOOBG`ubUtgOo8zJ3#ulEIywAgU$WoE#ay2mZN%+scF4}Bux$7)IJ^f~k2 zzg7)kYCxRBqDI47UeL?F#}~mSzoTC$NuUhJE8^D0OSg)udKT{97l}<--Za$nmpAe~ z--j?eE$PWMw-Q&y$zs0VtNmeR$L|7wGyA?J0Mx*e0TC8V0++!Pd=b#yC9^X}>F3U( z{7#INA11bad}_^gx!~>b)FdWG&KEW~W8bB0#LReD%6R$p53ZV7TbPpqMCI*gQdXML zuN!Q(($tL^u55Gz{L-0V1Bepu=CH)FEmL1<*n5*0Ybt2S#Yq6>h(`JpTxs)}-_Yrh zlzm3c!zv$~tUX4SadQ<@^v`uuB&vQAQ?VyHqEf7!ghyxhjFghni#%qMDDY6w!lO}2 zom8{MfiVNq`D_^z{WK#yrr+x9_(0WRkO$atceT1dzS8Y680bo=&;s!&*R2uXpQpkT zYUd20poUE~`cIgYs2Hp0g$7|>sT{nKf=Kf5Zd32wz^T|0MKK$gw@dq1Dpz9)#;;zM z|Dg5j)cO!tjDlWfmI+Nz(ZvmL@vOop_y$-b7LrNBPgZSNWBhvHRdif{*Iw=E749F! z4DDD2;?daLavt9?sWo}VQ&yl@jlepy1r0R68 z=rhp<_8?1TKhmd^@=<-8oPniU4158)I2lH!DEpnX!WB?D8lg}b5G84At|(g1bU!RA z_mAf@EL8>#57O1lAX1dct5Kwr`dNAE82SF>3wJrB_Z>M_3b*XYd;lP+kl;0fJu#UR zoxNf_ndW zl)a%pUxP-ySetT~qjk;+$?+?RX@))$#$VQL|OObszM170a- zdqQ9Px2Y$N%D8ra^`7NQ?PqGEM7GBw&(AdD0wNms=zGEvo&EWxb^!Py@oF;$WeGDe zQ~nSUA)F*>oq>*VN6(w1;oTpn#^Okjs0Z_SOQf16J}?TAx#i_C-1KJS^O?EW3BQOR7G2%8e=^jC|!3;x*Q(5Ic$p68R9Vg*{Q zmXD--6)DnsxdGvmkdV;J>+9wH{X0V~#g_o_(plo9P6Lh@$W0yUR61?5K#h89VA^_& z|E>gAG+=&!=|sC3zmz>wf$G`oYYHUc&3k|!hhT8{7ovkJ_9yqkZPoUtuo(Pvf8LQ~ zKdv{nmtTnryE*@*1!!}M+nE0V{3VNpiz7&hsU>>Ep#hc#U))l;wuTmxBClN>cI`gL z$Pn6Ey_@n!r)=ufS|+9$4Qp!6`9>9*Vz*u+xj*mm2$O`ZjHpC&Kb;@E*o4F{OL@T< zGTgP$8f$j(WGcv6DkidFLq=F_l?!AfJJshlnDV)|vGfF01}#|0iqXBYvpK z&?c*K4s~Y*Xr`t6$IqWJqf_T&M4SyjI$EB8uf(q=r6`QX@dAV-#D0kjmE*O@q60NV zOSg!Gi0Av1G8S_tkEt-!l!W1%6tZXCh{kc@#L4NtE(-D~^6u=d zAw!;LYTrI77t5NlH3NN%D%+MS`FE$ID;|>=T*u@h+bWASLFsK+V|#@A18>8MQCbop z@!i8VZpzFjIl1R&I6Ul%EuU-WuW`rR%;O3_LQ_qAa(h}wVd*tjN0Rt|&{``cmIygt zC=we;=kHg>(Pnu*Nm7~PUMpj<935~On?NQ0J6Z`%EKw@mIy_7{ch1PjAYSzha)o$x)#<#AF)Ha@WWkqU23`U z(zsEb3&R0IndXQ}7WY%s&`ePT5fc&9sMjy-#9eCHmjUF+0F!3DB81JM>}6La+IRiB zKT{!r)6Uh`qr&7I=lx62E%g52c`DywJt!>w{F>fv*2Yb!Ad|z67o-az%8XjpdKX_)H;aW`%c*?0KNBME8!Dpy2QQHyoD(k%gsJQ-Fhm!{lV4`du&b zFB6U)c|&k%`4uwfb51E)VPYz(%g;J1eG76lViuQNPP3oH04eLf1+Vjl<6<#He2l;2 zQmJPs)0S7I5bTgZHC8_ekLp!Jj?2(Jwf4$k#0(WKn9Z%v^={WOU9r-I#r3h~8>Dtd zGq+7w_Z7-??pKMnTW-#@!+i=Oj7Hl@>!@TFIIN&}v|6Xj#S^bv7uRSy0oO>ePjkpp0L#626 zvAcS&s#>{nWfqYkHu@q}rbQbJDTc1Esz*7E`AC7scc(Dy+u8_|#-^bNikVsUh)9*Z zmZ)8mrmI3AWW)$5O}keGbp_|To>YC%MSLAoF`0n% zIbTVbtElxq^t+*_Bz>IKVxn2%&sTK%KeWyGl1$%O^70O*0&RwSqZD)E%1^2TZ8oLqU--!Waqo|QYF zIc=|inh4^K%k)Wi>udGl((K>(8BbayddYupWaB}@-F8tzDixpMhK!seB7Y8Z9|f$7 z$Ns|R`bhZ#YlejD_%uqSFhNZpe>|f~zBjBGCqCklr>jw%BXme-?qCYB(e)HRCW|a4 z9Y%cg%93TlZ(pNd~d98ai-KXtuP_CL|~m)eeomaN6+>f(Z$pAL$&O=Mxx z?E={L(M67pk{z4S)qc2c)Gded145UNWh5*N{6Zuow>Utt4N~gbMXFJ_U2T2* z8J0WJO6rb6^Qm!#h;-4ecf1H2d}_g9q%AgKEP}1@eIvvmY2XUT93K#zsqX4RMpWLB z0uT=C_|wXzZ}qqim#7eWy<)H$woqHG)75cuq&)G~_U-UpBla+k?ZI~J}w-c^( z!_zf?doF*mfk^<~;iryI@tHIx%{>;EMCQ`jK;mJ4ZaJ&ViLg798@Iegm zX#&j6gG>W*D>{>gPQmU>gc3v7Rv7<8OuUut80d#Z$_fU*1F3g3hUHD9zp%;T9PTw1 z7aWU8o2+HvxzZIKEI=|}HuOCOY+BXg^xQ`Zk{1$-W{S`7XYknjOGaN~9uxM}&2ZmW zSV{NJMkzTl4~*AR(+t||X=n}VI7Dw&Z1TcIzX_IrsI-cOQLlCsUkkO001(x|At9*K z%|%S4ltdMh&87i&WM9W4LeVpgU&O_+H%g2PGSC?_0xdDgeS5SVzEBV@18|kw$@j9sTe8;!4X25$$5rC8p2906U8K%FLO~$g-gx}Y-AbX}ImsuWg!;;Wl6{L`tGz*GTVEC5 z1a-%CkD)2Ex%cfaW`P$*7DHjnSmknDN_kr|4&&OaDGFi!cRyNVxf)HMsyLK7=zj(4 zsHAWu6kTm^wPt*h=$?eJL>&**2v|9T%UqKM%L`W{D_|SaK0RIY|->t3jKGYdD%5)oe+<{@%M=26Dusd?}hrl zHxkcmv))2~{nusa6*_dSZRgHfJqjB`CY10-tY?ih63{m`t$4+iZ2HvAQ@w+-DUn8{ zcx8(_nt>JmD31_ELGTO9r%YA|fFCygONlB^Cz?lg5Kam(^Kf zTkWoNG@4CDTV3As<*L;`UnE*51s|WAJlCSH+8*BMK>aIA=8YWLS#BH7l;45jCKGA) zpo=6gK0QB!jOXqJMq}|iGL0D3vpy!RGqtjAM54Q1P!pZoP*glV2D)r_(M6-Md;0po z4Gj%D??Dn^W{nzE+U+7Bw!})EG4x-GhO?(VmjcLQL_6_5CLFY!Af<J2443cl&lwU0# zQsXXRFwT%-V6N+Zi8o)jeuO>^c_Fv}cF`i)qp2`f2n%abN6<67=HD-YJx*+nH^AR{ zA_=*5k5p(fnb#3@FWltytp8+gt?)E0)HQMGL}FG`&L~CJG)P_J{I_dZ?(ogSgReO) z8G|dx0j~E&lsf}tLv|Sz*WO@ye0)z$a~6SO&Pa_pwR=>l9+Zi=lW8pV1ma zQ+~8AV%awv@#!B(a1$CZ7@6<;@&3uc>F?z{#lHM6doqxj|s}_|NNW0r1O)|7915z4BAv6f_GIam1lOl-ywiNA~}tr0~z%Kf=EM{dY$)aqqp4 z zc2{UFXn*{@yrVA}4bjU?j?9Yh&GGf{00uj9!9HFlHTouo$*@50=5>q_|5KjFF&=oBM{2)G-7(*gd}!`7Dd;1;VV=Wnb^FKY1yS z-9OoNc+E|d@FXafqt@6hxI}4r`9C}=o?-09TK~?j;*>a?-GeXTJ|;X~iA&&u-_CC5 zQ;>C)=lB!#N*Ni-Q{?ctg#2Vf)&Y)WTcdwsW>U2-s+WQa4ez@erSr#iV@ywB+5NxJ z?y&H~fWqHj;0r%pd(s{LUT;|MWHA1BcXCcJvQ!F?z6*mTP=Jz?@;g^j99hJt<; zR+k2%YV)NyI=VdVsW-J3b;Lv#ab%|T(gO&~(b{vn}-ojb1u3PHBX2h;W zV<|E-W!Kf}7{FoN5HI96j133&B~NQYJ5d(u{HSZ~5Y~pr!Pxq(C6UOaoI-80?%9BW z7rePu$=$^^f`-QmXcIEm=_TzdQA`eT2C|e5djwBzPe!Lj6;QzUjpCbHA6F5P> zxL5Sw==LdIyr~(N0P(rv{GV5yHh{YRHn!(uiz%sk2F!>n}=C+!yU%>chzLYy# z`YBR(sJS>T(0~yIp-z_xtdk3rW zh7|*glD^qanD0_+h1NC81XY`!jhSDlAclq%*H5*iZ+ivH&=rr@hNJAMh80Z(DY~XA zywhD89><63^Gy(i;hOoZ^ndyX<1Nky>yJ(1vRL8=Ye!h@>sfoIe@0r51bcN}(VF|A z)I~(oSk`Z6u}U5gsV~e12lMd}+?{Bhus<`M_try&9v=O6NI2?Bq=y_okvfPT@Gb=y zM~0>%CxVEas@*Rc>^KtdQ_vFJ8@y?x%3gML^diPNgOd-)B5&7ROzlfOLn@KKa!D>4 zpRZo-5~yDaZ_K7(I8`40toDDws@8DI?9A4a7EzD(*$gknu`I^Q#|{Zn9X*Yrm@%_r z(q9WH$50t9Uq{*PUQS?V@5G5-7_w~)X4=SmSsU^b^1l|T*eIPdC;R=TZXSb{6D7tm zy)TGoPP=DGgxc>6d!FIt8QR1$e!71}Hi+A8?e63X4S%Kw$bfh8z52C0l3}BlnK%Py ztRhF2MWHrE{YVX4KaLqELk5t%3~2YG;d0(w(Zs-~x%c+?5Sbr!@nr$=p{-qUBF$to zz+xj#T0Utz0c`$sE`&F4x+1)h2op2ljb^fJid`}84crzH;Wb$CQe<&#{p-T)9QW?+ zFLYiV`^?LJq2A9QdzMRYFeOWwe7Kg*%EGMwe-%SL$W(7{%tXk&KQl*YJqZQKXM6y` zJ`Xtb1l-=Z84FbSl(9mT&__QCm{o<)*7|hA;_{gS-d+9@lB_Hh_vX@wFv9=WS}ZCH zfgZIquWt0pAL%a;d^@{%JaQFglJykD;v&NPgU$ro_{np+*oT|BeL~*LF_crgWpT*T zhq(CUjB#u2LpzPt@Y15V_;7wGwBAc`a9G&rjaQ}0*^J{o|E@Mre))W}CeAdomBlFL`^DUFw`dt*Clf&rzSR~|Tk%*5XNLki*@$GuQ zpz-fm-!!F^pdvCXm|9Gw6Por*v}YiZ3kmL7x^kvM5Hq;l@nBX-e1{pPlA@J*7iYf$YO(h$*3<8c zBTwCmJMqWe$xuj z5(6*z&N`(o;sGR~K2v}sbA;9^yWbtkZbP^MXf+83*hdz5p|plSIoH(8ysd%jFKCJhz@1 zoWHlMhY|eeY}CsoMUdRB8a_G_3hAv)UP=Ah@mn}r*jT3cowvbG4#h_Ysuphb?lP~| zdazqr1}@@A0R~988vK>ttr?}ZTqW3f=q`{<2-(R9v&u5dINcRu*H9cG7S@H+cNH4*N;UI3d=t{rRep67^>7vzgBkegp>}9S2d<>#b%N8U!Z~rKQLEPZGDqU2w4xj3S7knrL+9+U z1R*PNAn{8X^V9G*OK=Vd7v&?qS}qq));hi_eCID_HuVzkb#w6UD}1LXF1N6qkx1(J(cP(cd}plkZpT%_b`w ztWDvV*^w6$6Vm0L0?$rUs-GeAOV#ygjcLD>=dr!WZ2u|c6R8rmdpk>Krm+%k5H;D7 zWHm6m21!gJ>E|C;sR3y7MSd{=Q{ZSk$E(n28U(H&2pEe)yg&XJ#~23c#HTKdvmPFi zSbuL9WzGc#xm3J$a$Qufs_fmfuZSRHz7s}}x;hOKki-oW59-lC3+|3PZ#%I~$73 zrgf@mJ1Uh}HwU_TPT_^vDN5lcB6PNK9u7=gxL=V}mx5ZS76*cm>8eA9g48pX=b;a& zaniNUg$7s^ACK?(l%CA7nE1|1UsyCg5(VfCIaQl&0E=&)4cCuvPi=w2W)0cy2<#SU zFCvNi`<~|xXQvTH6IZdRA4%WUHxm@}7j(Z6M|dBz`?BF2n%cx6{i&%VBUh=nIyfTY znRSy1mM9d$`oW)onq@4qBL@?5D;OwbF5a2%i(_6@X?z#C8ew5lJVZ6S$;y}@@f9=j zKzLiE=;z;}4zFhZpTa3roAL5O&jMA6NoB@Dq#%)id*M9tM5u<<2!NPEk3juXVKP>s z0{^?bUTDdL?zEQ(rXp+h_7s#0I&rqKKxFXx#Ja%FT(C=4_;L$daFX<7tq(}b>*IQC zHfpZ5*_JMv`>lqvHR=3eb#9JGl1jFFYVdvHk<<&{*XCF@)t6}`St`}uhnWtFRFPah zhd*7Nzb%}aK-{QFp6R6dBc&&v)zRCDV!XM6tmfh;ruxB*KM5=*4R?0(Yz|yK9k8JJ zt$pd_A4?(S-{PxG!&`+OF&J{4wconHX0#&6m-Fo%jQn6yrGmLVWA`mMVJO8D|6oO| zXQv##t^`d^XSi6Ov-7b9zm(Fcq$l4Q=re^D-T)n_ug*u7sK&TO9kdTmy$Rxt8&=0T zW5$f|S$w&raJko>Dw!|6jrU(y`IH4_^c9EgB~Bp@~n zzUWF5ymk5Z#oC7h;YK0OkCo*sj;$>fc94n)$v#;IDJ3N}(>a0w?)g5IE?S!{)UIBX zUVIcl=(TU2!?c8<2CLB7CS&xjFkZd%sP_*kYIQ=edFgD+m+W1oq$lrL;Kt6cRUCLt zcd!$&zKu*@$L7^y=HXM5H!ex>QV@X7(~@mjU1>j7b4wL47ipLg;T2%`xsZG^t1Dcx zm2vNlR1B8?kS?G77ZP!?YzXS26cWFCVnW>jVJw;ha)P@#qVO=>Q;&N+Gw1es(Er?SRr>(dr0!t0zChG z>im}>f!`b`k&F@r!r$x4`g*BUvC=c*(k)C3^_3DV6)TSQAXU5p(LE1uw%*klZd3(pm{h7MGCzksX(XX@%y*Qb_+H~>Oc<`K7E#?}ULnj=M5EmQ677kM^ z7Lhd;%M{=AkyCncAUB%|FKrq=#jKchZwl(#LMIra&5U%@2&%H`OFrz{nPTMoHks`0 zcRe1kR<52B*B36-Y>r!S_*GNBMu%p5KzKbH(?uY! z)jsDZQ|W}klCYD88k7;^W@wEpca_)TcBG>&GUk8SH_xXUw*iNvd~pE-GxK=hdAH;Hb`fyrOh(qNB)hin?VaT983mId<9T$PVyz z8@G-yUmlE1+9YL99gU6KRcqXXShMC)SLX{w4S@FKZxdyYKL`Md=y|DX4?H$h4 zko!ZY2_mH&pMWsR7fuubs4pt0i+l!4LWn0)V*&8K4>5W7RWB-Vl2&b7s_P$v$;%e1 zcU}W;;t>-a)j2RjZu34CjO1#5aQ}_Z9fsM_2PL8 zJ4sWAxPy;k%xEm+_LYb-pPG@xxjm^^eENq7uFQxVyQ03x79wszb?bN-$~Ti+S7I+o zd`G$>{Y(}BBRk8<}6brbkHZ zN!k37B+Yap7vqMU#--Is(N`aC*+tZxmAY(b!5<}ev^T(c*}ZWbfpmBMyzt!< zm2Q?h@ub=q=5iD`K3rqYm%a7*8)Ff_{j;d09D15QP$!XBhEAh>yof`$;Vt+%oo^qw zc%PboM?(0X_Gi2%%jyCcKVzRZZ?v9aqJ&AG$WxiD@Gz=QJ9u{3(he3-kG+sHi2WHJ zZU6x5gjs|)X>^#T9+9yer_d=7%b`+W{sDW2uEHWoFD9O<^G3n*K&4>%%3pYx9FILV z>wKxp6%HQ;OTamSzy-8uK(CAJ{-!*#zV(|Dxw&4LCl2zhjNzT1It8s*pxstocO`sR z*fXNyA7B%RLPNHgX?id~{fju20G2bMAe=kUJsfC9Bp#|O?b>yA9_lh<4L8)4;lH|& zT3Z>(QFD#TC79h^{#tl@au}~8Aw|fBHw-Bq`!nu0ob+~ZLI%S_(|Sx@p|X~ib8N0F z%WworNgPSdM6nqo#{l>E&aU$_js&rcf*Y|+rVB{>)e6_ExMGf{ln-}d;uT)X=l>`f#Nk(>#T(mUsBwBGh@3HVi z-hjuefQ>h+7-BCmUB=#};VedM_k-c+)t<^ZV0aFfQ%kO4< z7|BkdrKP4NIt{kowvO(7r3u1oww+oUsBJjR7dpJ5^>plrA*otC-I342wz#2p*Eyd4 z%t#XHkx)FjAQ{0G8>|(5du=5zAsHO$d~g+4UIFVapHIn0r?Mbx;MRmF)6F8WlMY(N z$vQLOA%EHkL|021+&z5l>Mr|Aghd+B2zUiOFP;?J3G!V*^*~2;qQ3q7Sufo)$jn6{ zW2wR%`ur(7Srja~tDDRSZi+B8Q+wGTAL%~Hij0z7nEL|DAD`9lh?%AwhS z_=ufFA)|%wH=|?xsj!A391br-`F?%>fA?GA?~B~-jz)BDU(VK9z=olT8Vw-mW|Woy38j^9BU{;8AeMNx^s){V#+ zP+dWw%)NCo6xR%Gb;Py1tIRT^wp{_VYlsV!e~na;lt@boB!-ope3KXs`mx^bT1{W~ zs4~nO5?}RH5q8a~4Mo-YVX(z!A2C-fd-#}1e%kH$;_2YPcz3#FI+jGkY&MO*WQdZA z_`ho;1f;rjCCw~n#Kuoggwdoj79j87)}$1YEd|P7uJOmp>*$6^V*H;Qe}H=ow|1U0 zmIIAp?K(25c#lbPqLXh@KR9vr;}yP##_pI?iUx~_W;rMF56My_mV6;0nWtMc2Pphj z!PGB`i%G`-SQrT1ZT@&9mMnuJS!YfnBGR5xiM6HzH{`WY%Qz^MwxbVBV5V0lwHKq{ zcYyJI#~EsI@fmMJ9lZzWV|ymt8}uiv#{{2C{)t?J;Z4fXt)URrsoNuq?hu1y$mjry z1RzPU1{Uq?8j8FIg=Abi=hs1Jv{&a;M5k%08j4L*)nvbM|Ga)Ps~Xd~Y6{2?wcw6G zUa{wUEA+_(g0yi6t-(GgFiity`A_H)JYS$?@sJaL|0dkEe%so3(pSamaeN1YmxZL zm7crae(Dl#6MwfQNL5B-9de>Lv+D4MNx$+`tJmUH+T~-42~7PkkGdtCEdFN$6CPP} zmDQA#z(mI!20Oob9r#9uWMdm#ElwW|U{oDddUVG`^x}&pW&}E0DsnWR!inyBrL6YF zKW1Opk#pXZUQmbg9v^cJ?iac#dQmbQ1TRx;SKSY=u0&_AxsgG}%r+8R?xU9(iM2+} zjPgV!-kGYTl?dS$a47UBj$BASN12QYsCF4iaEF(+c#|fS*J2z>rsoj|dvNHc^$SJe zeka`4B4iRl0p$11SVes$rs+ur*4{QT&9x;K$X+Y^bmjVP>XiBNTq8Nt#GPg@%S}l0 zEBBct1(~_y3+Eo|1=;>4C=4J{M=kznoyz)F7yzkLN6Z>1X{LY-DPjL-O1g(nU8hua zd=BQOSbu%VzhF2iDuHRjHRv}Bq`(XjZVB2yc*k!I+p zbzn&3=ZQW!R*2Nu#z^G-iXZ40Sn{P^WH>m6%Wj0Rx27-PaRvL6JDTGyWV7j z`o(R@b6WOLampOJ4#`v`LZVoq*!p#$%prdXM0H)0TdA=a(lCkxN@{)uv65R0 zmSOHEBPi@J48z=on3!h=oWU*c{=K^^C5uC0B1`pm_Zj7nRj+HpU#zrY;I0b|uj9CP z3u6AvIgp`yDTBekU?*C=dBeo4O*&IG=bA&<&ewl%+{CA5&5p#a+60#*I#6R*9M7zR z?XMt24Fx1%_9YQfC|Mr#kX1pTW0lxZX=da$qMY#|@TZ^>R|W?v{TMh$877`4wG>mr zl3!O+d0a98L*+*ZBW39Qry%pMABeoB`7o=tzftgEd0{3X6lS_&t?oSrW zQY^2F@wT2Rwz5XnI_zO`Ve|^#jnxzyABlM1zP9TM<8*TM>CojuRsyx?Cvk}pHXcDq zmO`!l45kEvfw39~=7A*=3?_VbOAYnkUQTMgYuqpI&}`&R=_+ zWx0py=7lnyQ3z(BYR-!U%&`_9bAjL6rb39NHps8G5 z?(}_dt^qU7950U20uHsciY4_6A9T-NNTckZZ;w^qyZQeQUdkQ1K_G}%@n5=x|K+IA zxH!V?&Fm=@G#gcrz?A1M2(9T8`Xi1IxFkk+a6;=MnvgpYGVl=~z8lw4@5|&G-khJ? zd^Gvr9Uh<&jHaIDe$2uy&LaUcZSzs>UMJex~^yW z4?_5EqB)Z>TQtk_88xD-2kPA!Oey^n5{IsHTK|G^rTqhTZ*DIxFwHJ?>l<56K_-QT zrR|ANZCtr{t5tektjl0sL~a=!P4K+tD>)#X<_ZF9upuuloFh26XgRrTD(8nP;}+IS z5%JCCwk(XLYWgfK>vHo;K#D1zIfxih4z3)gnWLh)d$N_4^>E}l3VC>&q zoxyyDeEOTY`(Aq4y^)oGquFjArco`U=Wn(-?qa!T7;{;+pWiUp_r7&K7c$cxOhx6B zn3@bDwl zApvKQ10N)2Fm{OR3oO#_=WlC7tgU`&p3@yElVzqa;l?iElTjG3^|ih87m}18AEwiI zU}|i%+PB6U8zdwXbng! z-%=dfOq4kljGexIe3-Ixu^NwVW+4WiNHbJdvp;*yXC>vh;~A{Hx)Nrq%5bN1amab! z(=ex%zgC;_jG{L8F8}$2JmfJBJC7gTIh+Jr4C$%kjU=1Q*0(y3qNvCecDKn7<;R-g z_m!t)-#jujlqHL$t$tgmK<8Wvxg%txl4{#3SH#*RTy+B@!z&Aj(&W-SH2?RbSQ>A* zzk~?M4qbDSfFCREz{j*nK0iRVBXD9fl$yZgj7OkySwo^yfx%)Cw|ligrBtJ^Axxpn z%u;b4m*k#QBm$j4@LuO3YYPvO5DBt!1-J%$>hu8e0Nk@~G8}EQfbLoGXjqCwQWe)T zE-RfM>vqon)@FGx&&oO$R8ToKmM0Mh(Yulloprgkf)i3H*}#9R?rnfr4+%&U6B|tG zZe=i=l@i8$5o4e>r(bwxF`eGUjh31Tz z{D%mt`TV363b^bE^C~`YFsd%S+!a02X6F%~epQE0t^!sRTFV!y`Mo;7H>3A&?qjw; zc%;MF4XhJG@^rFmAGNGjlVj2Z92k!xaFgK&vSy7lT74+ODOMdkGnhv6yq-P5z6e(i zu62GYv}iEce)!gst?f*Ttv?uO$ei`ck*K4ePA~LQ>%FO$mP9MjNe6F7#{i}rNp{>r zVZux!Qw!0V+IsbQCzVpXmO_YRsw`pBGT7hWy{pd6LnX&A5dwLt%`F&jzsA?2MIv(d za?3Q7=eVylglWDo1F#MGt)l}fSGu$3eS)7^?LgE>;B#)5?{s|&Yxp`P`@>`W<+7TA zpGJ<~JIj+TzGNi6D9Czw-^5-4_spp9z+kvyZ&y+oOXI*^t+Ew%B0*E8{)PfB4g)vf?Yv@ zQA8o@ko{1L*LoxtJ2Vv35yqPYTRs`^geTuVWg?LE?4Sgx!<4zt>W#5@R0;{B`x#tb z^}}E@rznAT@prWh`?URvx=q$&rakabFrQuF@WvRs%|5iz8`^%qJ(^s-1FX~3YEQsp zPaLoqwxYSnP?Yo|M^DxFGZw3CermnHfo4eXkE$9VaJs=yw^#pOhg^Uoo#?DRrm5@w zg!f(MK2N{(_9)6g1lQD4szGBM8@qPoydwOnJ%xuY>0(Q^s5JyuWDrj2h&6Qcsp;lr zGX+~khJce2OtJWLi<;R_%OKAYf3-t&MCBSMr-4DW@X6j#ZWYEr&Hrj3{Z$l8a;4rb z&;xd+O}B`>!pjVLD?^_|@d{a~gb$vZjs~tdrEVtr+p5zTU9iexkvyGgelu{P(<@y4 zwHa9tnSk!U%+NuRooPJ;9BWsgZ_k%kqZ1&o$jNN-RKn#zZ5_{6dvC_5 zC$!JtOW(anBC_B>7VSt-*7Om!aM(8t*iFc-YJ2b3b2lE1mwJ|)+SFc}F~rvc+W4Qc0C?tn z)snc-u0Xv5>!eS1_V(hEHjSw+7`udq4k;&-!3VlhdBbV&6kA50>e*h9t}5Ia|0T!< zdel1@bRNi|K?_yqDlteG-g8WVrnR6x@8_LtD$h`Q3rYBL^1ycXo*~vU=Jr^G2RzrG z4fEz#-Y%w7#gYZ2i_Ty0o(!%IgV+2BJs3A8-w%9#lT6A?VaB2iMCmr`Ej58B)?tn& zdZagIvzTn(RaI=Uf1b(+iRwTQE*3QXPDV{A2<9}|au7;L&h8DzRv+g!wbC=-sUP@l zYv+1XUP->9M5X?0E`Qp=KI7LRZFZHMa~a38j3_3Q_i{$BudRu$ z6VzeTN2F#xDf43u*iO3(QVeGQedSW=Tn8NLhsdvp&J=rsn4Fv(Q3__pjBbzieK&%- z680F_H7D_4l`KlWkYdUJPbjXr@t5-S~a zLQSQh(7)qr-mhKx%+0EEACjYH(`hnrm*Azp9|>aIB*)$`V>T-!G~5D?MuvxU`9qsq z5RU|Q{MDsHlgAT;QhlVY>$WbLqHe+yWFtF8e-`jeMqyz6%1ZaO2AJHzo149a z31FK9nK55-H9bGqo=VF0s<`AE%vd-*UZcTnNPmih>4_+4 zcS7YQH69of82KhHz!B-*Z~B%KEiwn|dri4I0bRxi*|9k(hBgpa_E*>SBL9l@-ybDQ zZ+iF59H6;eJNmnuE`oMb!P$PvONd})od`_FEF4YQZ9G(21qJF~hl>m@d({PA^S?X} zK0#(=+(1q|SL_~Wa^fWK&Zqr42dL3oaq{wq5<}n^tX>KeyPykMiUysHrktGiF?cIm z7%}V(*a65yXlhLR{in!N!-w$Sh)nK|R#@oTNu!Mcm0w`QA6A&2EZ-e~qs7WhU&djs zzUVBEFBAJe`KmUE$diu`j;wCr*R)$W5W z5kBi;lyN>~XUp?pxtUZNV(=;PcPFyhf&sB+z+AUngYaXEpuLL^hhHOHrCF>lI4SVz z>KhpK5laYFWT&~=9e3)2LWvg!^EZ|2p+qqxfPahIJL=T|?C};Co@R)%;o^m5-~bsS zo=bosnN3U#fK{>8dl$iHXQae}ac61LfoB%Rl8Q(U8`N1B6kqPUFJ<=mD%U5Y6P2Rp zAIEk#@Xm|UE@JVxTZcFV0qz7qfu=2VUx1`l8b>HBSv7K8&H$+{M#t)u;ZREwJGoz) zR1AjwhVTRDL^(`I@@Xd#Tg7py9$@aeE>Jr?u_I)Ce} zo8ono4$7+$Pn-XK$sIr0JNq34i#G1Jdgj|M;a_U( z`w%2dxw&rHTK0Z)Z?TfHJTh!LmSnRGy*0B0Y{hL_-l#PFC8_49Fr-jTxW zn{MSp$Oy$6I~wVLtg+MweT~U7*i6xAnK!d-R)6Btym`F#FxY?_KGUjy*7|=%(<`!s zkWjy({gL5N)J%bR{8B95Kca#kgm`#(o1Q=(CFKcc1(dI#5vZ&9zV~IL)06FQ&&pI5 z&wRD+N9*FmISSo}Q7fA_-p74gwSi~#DbLmJ`uh6ObHM16qOYNT(LldgfC@;68%(LYbknw!f9fSbP3Q30kx_{+Mbv(79g2D`C#%!Jq zGJLu2KZIZMisM&iK>uk=ACq8!Ps=~`HYW+m*&gBfaM79O|Mtx&%Yv@F^kK8$q3*h^ z>49?58{>uDezza`!y4o+1I}u*F4h+eTfZ^H1+wM%pMyh3>t3z=U-`av>CZ9(zn3I^ zGi^_mjmEDMsQFmUn*zB`ISp>Y>i=Wy;dZlk52pi~Bp~e*t(!Y;lnLUlu=pSG?^hn? z;nz=F3|{YTH~-AxZlRk;9rfQ)NBIBJp45SAmDd5?!Xt3H;{xV??&V*9{ccW=utNgf z(SHt~ACwp?e@$IpABMk^5o{_vk4YYF+S7uJYkb}g7!hh*=xNB_q$QH7x16uAU%K#g zfe}BwH`EdTb%kBnBJ!|;Z~Vgr0J#Jynv_xUs1bs!(m;Zi zqnF#<-=ek%RVJSA(j`fr%iu$*gbjwT@A%eS<_o*kye}`hYbuk4B8Obppzfc@jJu^y zc;G&26K@!7Uwcq?IXb=`7XM3x@9_{t7<6Wi!~J_^9&JaqWBgL%C_!fN!~W0s@|X12 zL~}0lZx6SvYAi-*edPzaSG_qP(qX@@@x+me(x@pY!6T6&!JPJ85#H5N!Elf!IeBDb zwyW&j3+F(gzhlprK+jz`o-QOaJj}9^mHBvWOkhBJl5L$x!zHmhs>wQQ-$!n(Yhihk zzMk{db!d_Vi^1%@GB!M(ui^fRjbqNf#Mjx!6l!@YGFM~)W+fd--$*AFNn6i>eP2UT z;NWBMH6o!n|83RlZTTf)qiQ|gv21VQeB}vsN<4b2S`ee{gi;pnR>Ej2#$eC-9=Ull zIP0RtTk4c0zMF_-JE76&K9XEiM^n)W6P8nv3L849IO_rJ)rSG%hZqNULSEb1#p28k zuieKOmV9w=-5$837FPuU@t7cb^wedX+WgbSxmol_+S#P+zaE3Ds%i4SlX>feB=G9m z!8K*4KwV3-nPJ|3s4ycu7J$q*1B{P?q4Y>?+>BC(2S~x~%&&4pWY)w%?_4E}-rA3c zDpy3m`>lLSk{h51El=;tam>C4bzx&GmUU#6&53Fmj70;`+mF7(=Vg>8&p`!~6C#~l z^=;tX+IzR5>NsOKPWpZZ3sGUTzFo5FK;*Aeb0`}ScSd8y7c4N_MXPfBQV?|%y5?ht<-$qtHiUlkqOzq zkJb2i4c$F2IF~Bv?uoJhUKIDV8B+wp98#@2ieZ%}ldqHVxNZZb1BJ($9%ajlfr(W_ zkfXYW)UxojW5jQ|v=t>7SJ&W6XD_>2ivGUZ?=%2> zY=KB^qbuUIQfhyx(zNa~Djk_CXO+)7Ez&C}8B{jDUQ07HIhU#}+Oj8LX?~gbMSzkG zY?Cjq;Xy^cN2!`g1V#m$n#Hf|VSP&6CYh*hZs?z#4eQ`(jti8C+k#7a#rf0MS1pol zxU&5~0WgScb=DbW!5XC88ZvyjWmB1LmM010)HJk6V12H1sUFDiZTKzGAIsrXrt*mk z+L8Kf;|jYe9-VPPwWv*s4*$LUMRAD;-emA#k76?htXm78)exru+ei)4lA4C5w&BJg zDI51T`LWg0d-R|go#(jiI4Us9`9?t?vEa{rpk=D|Ze72{sKw zvvcw2Q2fe|6dz|P-rf`kI262wbat3^%}oflWYY2OD%tWWntU^jM)WFH6LgGpfZ8>< z2(8ZKe3cH$m|;{c$e+#f&g3a^2!IF+Ms=((`qfg=P^#iazOc_Kiq`_X7CU7fW@^{H zmW8Ree>T*>xsBd=XhSqV%Arc|t8l(}I@;>+XR>Gwr!4O8)h&)0_frGYDoO&bvIWL0 z@EMg-^o)&#VnDvo5#^V$+^#!KxzWe|oyJot9kwv6s>fmjv>+cv-ZFrcON9&JR zl9xU?dB~OalpwC^*mJ2!*@l3M$rsaPu0u!CPHNfA>?`c_8^}h9gMq>M@5tp|4cac@ zZvoJ;&4(^Xj1@TDtY=&L)?U zk^Gol=V42JIwf7Khl^92>{IAkFN>jmOxfK~GitBRawhxCb{wwTE_|jo|7mGiks50? z(gd$~yWSgrzzM&B-S{IXE(5Z{*KXntyhMExY5QV^IX;~6Lgtdhv5l5)Lac=7B2AY?A3fcX-=Y! zuWY;{qX&cFsOi%s18m+1EN8OoM=bSq{jfAFTsv^lA8Mgl)V`mxG*4DM_LQ5$W zX>q4e+=DyC-Q6X)2X~4TcXuZgcXxMpFYfMsviE-Xci!*Mxz4}jy0Vg)wdNXgtTFES zJWZn{)&ow~H&lMg3g1-UhgW`>3A@S1;*n205QS~k#_t=|6}x%1bzbLwyfKxq35_i= zJxX&r#hx_=pAmfA9-sIUYhB@O{{`{&e9KVV9U*#yWZ5+M!JS0<>3*|yCNRG?J&2bz z^7Leh0o;MTJ6HYAk595i^W3w|M$OX*m(=mq5@$7HER)mL#a$QP0{{HCLU<8r8pN}V zTUVW%2bDrE9|}R}@wB_&jAGBpTY~qBr0iub@yy z?iMTi9%rx5qxJ<}@^V?=UVPW_If_oP2m~@l44W0Q#(TxhrGstcs6j}e^hwiU3SPs< zK{Lds;%A_>qj8ehp|-Ur!lbBy$@yJA(OQUqL)^Ez=SEf6#4|l z?$#Od1g)+P&9x8SeG!Y5+h}gUxR`_~qL2>n0I2?J+VXn52N-UJ_xW=!p9TN)#0g&7 zez1C9jPO(CRU%;$*BjfI`dq=C>ZrWDxKzV%21q9iS0Nbb1$L@k63AV`sI-rI^5@`+ zx4>@CWz5jot0?>abS0#1Ykh@49%(p^ffz9&iNhruX|;PX?#^}m)3^B0Kv@IzE2a=* zXExuzQoS_|b)Gz}7X0mnR$eY+W$%M0Q^srcRyu(e0=re$7bWHRt#4J=5in^=U8=PO zYW0r(xR;k3VK>*M_w>RrGq$&pgoC;}?FKf2)oF>|f~JxY+hQZODBU(7uQ!~wm(B0y znd)QFy|lJN9?~wts2u{ zU0%B9HG=+Y&I#R2$=D*nMcn-E+^tBHxNwapzJD zhm-mzk90pRuxo?S<7!%%mp~WkUGD;tsw%Ejjh@1_U?1WIq`ShilBSky5MY{P5N2!Z z+^z9Bq>-95G|x)cHK&bnHXlAK-!%7jhCTxphGNo^J5=-}zbh^NzW()j5-=X~Z-Lg< zwnqw4e=#hd9f7Mm$nDhhU1*9R);b7|u$*^j#0^m{V%!9L7WiDzmuhGHuGj2pc8`X- zD$MH6jp>^=py{P0-1?xIp^VQ5@rLPSZt1HkuA)PdLX{w&hAgL4kqLgt5)KqW&A}T0 zHI-n)Qo`mk`JG4vnyswvVm5rM`;7kO3UGyKCMya$OE04zhD#AY#LTP6IdlAyD`V!- z4W=6xDeq79A;SrUoxYtiO#zjj7%7J`GRDrmr8pZEZ~uf~Zyy|{B)&!o$m!i?{r1Ew zih81;C$4Y+igiW*WGmF@N6frOwqdMA({I1xZR~ws&Q@@oC=l z39eM14AvW9UlP)d1RDXMB2R!h+*vki6r81yKwTeb77v{qd5zMCfk53XP-3f|Cp z{GVq-m)gAIiyK_N*PW40CeY>wy-n19H9&2aStxW3t%Xmkh-^pgmjvsE{=whZ+!P{l z#hO1eYAq@CkmP;7T0UN}9=#}5Cku;q7Ao;RvN-0Y`6bNkt0A4Vc5iNhdWzrStPC|y zmg*2tHQ?x{#f;oY^3$xL_2l1|uomQ6`Vn83TSxrj4F-Cz8g?%B)MP-Ha;&}Z*e;$7 zA4@L&Wy90McLTZW(T-bHQJ3ffmdWRtfO4M?KbCGxu+>&Plc%zLs00d0e$24xKeK37 zI4^Ck*6=Iz)aTZWG!({0tfiLKKL|1oqshqrW+RTRI5uj$?IYshoQKE37c5N5NfGp+ zfRnjY6TzMKk6JwMR&!OwtHy0#O@XJIXj$F92CP9FZJJqD*CyIU;;vfc%htsVxnvplV528oT$Ldk}iw2PifB})s1 zdJnAOv6XH)>Y!=Nu=8VOR!ql-?K=9j+ur;>gq2Jjn1s-mIqLI~(xWr29=2mD+|+HL zet>RWVXzzv<43{66U>o2L8|Dzal3jK9`o(Niud8vwXGqH#|s<=6oI{pN0e&l6hYdi z--td^7;7bFue(e(X$rUSK8L-&xJ@)wU43@)w;0f^rHar^?`8XC0-MGt+1mFY3BbF$ zAUWGc$d?lT@y1W$rexqrZ};@7&xxjysZXHHBQBv|B#QOz&3>$`2-L+QOHv@9NYvVc zgUOZ_>j5YGPU_p@n=CjYz4K7e%O6*&a;h)tkCKm2yyYtuOODbDC%cDtOem!sL%%zW zL)^_D{|6f}Vb?jA;~17A4^|Xs4_( z3DvNSPd|`%8|aL8iReYfs3TMwBWsP&Hlhzq?9=Fr$vg&EM*ADlbM=g&z7FOeMEw?E zy|4MSfS^swYf{}^BJWz`LXdd#M4nzA{;G(F#aVz z*aVrWs)vcrlB2G=#7oYQ(oqw8=l!374Dg zzFW5bo?v>L<;aL?1EEsZUen^n{_VoV3t5(&p{HJ~wb_o}gs1kB$aH7u`VR3VyY%dS zvRDnu6det+p-~KXRNlhetybD=Egq9=DNMz5j5iwaeXi;Ey7LCtk?kENp@qM(#KJ;N z*4=b>KNRFt)`F#*iNnlOI!umo#D-d0pk$_1AB%69I7W zG+J1g0~T>t(7<^bYN@?WnxY?E$iVYC>UYb~x%_qCLguEbpZVkp8TU%w56&t&rGC!# z{i>JUJ+>x*%(@5cRTFR9U72rp*6I5C76p4;X4M1xTd!Uan`U|4Jm#Vyrd*xz6&pG% zcfgjwH-3KpNCi_vcJCV}!7JIz7Ye3DBOT9MWZCcE!w?>?os9iO_l(k7Yl1s_pC7W7 z)wyAh(IiIvAcPEuBnyS8Y*wRyUGa?5CvG-x_ z==^DWFB~Xi@}t~aOHH^~FqFWLALa!H?EOsX-56;UYV*zqijAGK)(eBbMXH9=fxFqv zsDFTpFn&o&PXQLSu6!}`mIjP@Gja>dF8G|K>TY}b6^i}YQqspyMmgeo2T)_>Y zAh{G8huV_HASjISUGOu{?)O*cGMwt$2hiE8_Olamfr8AWWH?4TS!r9%uhwMFTucHJ z^2sts*|x_McAw<8M>mQKUQt!4EYTw*saB;J-s(26XW1#^Kw~=3PO$y1tNLVx{bz%@ zl5I6c61=tUgG*_%Bv($Qe@a0^lrd{T*Hqj~nT0Gj65tAp`S;JLN@C)>+hgu&RrY!E zUNFLdY&T)aXJi;#b|vJZ40n^Ws@Dv3X{w|gPpUWgUqc*!HZcdx+YJ?z;v3wFXt-ho zZp@t)z;9LrAf~YIP=h@MSV5j&1~UsbD;yZRMO>SBrSPNx&6uO3Bn(2mwxoUj9<1;< z)b35QlNCl56#9opeAY2KR=5}fEgEOW9S20py-_hjQ=j>?xc|FBmAsb(K9J7uTl@ZP ziom2lF!0ri3Zi_nCG+L$#UD_hNr&~TDkGux{6n?3;bH-}vzkUe#KWhb!|M!WOL;Sg zc$$QnCzo-Y(jHPo79E#%!NbG$w$wLzNH{fdJL^FT*5Mfye!4rDs`}t00#5-}Q#aoC z8S~&;L@f`8$V8&?2O@#E1fNvDd$QY(Wn7}KOzd-0s4^zU2GU23TusR5Xn{@%dAe3d z0L?4MY=NZ~tHW|b&&BQzsoA(DRzON`X|>a<`kb$JFhCJ@{iK7h_U z93fWinf7`lKrj5EY2$6#V?J)ECnH65N9rdY6Z&7nPe>>lV7sN?oIv}L+KZmvJ>u(} zt1PWSrHEq=2^A*zpT1THj17+`M(eGZgUg@Au6M$8a`O@sc9wihx*CzIu`mldMN-4kVo zQw@S^g=wPY-5C6BZ!JH?^BwSqx9plirN^@vb7+|>ctqOw$%R}Ja57K}<@&5w&dx__ z>$;)TRpFwCs^oSjw%hJoL%)M_MyhhKbT2|H6dIqMY+(Z-dtufy6nMXfPuZ0NG*UQ^ z+1Y#X4z^g!wuH}BIXF{@zH*9XBd7d%w0^VuyBh6s)p|-`E?!<&&+DlhBH|~L8&kyV z6*&Xh9fKRaE*_dAKVc@`47iYgRcOSI^DPd=UZS}ypQgk@pn4QNEBpXU8g*zIV6pot zUZitqF+xRIs=<~lh9P53XUgxx=86@i5ma9UYh1O&wo_a{Nch!;(O}yExkLA3i4n z5%K~EfMY%JrkvMVJ}`QpHty%N)0%O`1?*7dog*)X-})+hH-Jm-#``xaxg5Lp$p$RN z7a^O{`yp{TrUP_d>!-45P;!c5BVTxwOROs4+lTVA;%Ia%C}v#Qt)NXNUWWM^Q7|ND zPW(oWSi;itc}?{2QRVQ=SO!vi9(b^8oNZWXf769k$iP$5y{cWGewa6BtLSJ(9tSqEA5p}IeF0U=F0V=x%hrVXz=`&Re%Jc5${C_}o8cKly4`WFsiW3^Gno%hRPs zd)oB?n6L{LMCRUf!}e3|swEZo^z~$$<59|Y1xz2Q*itdmh84+5eIr%8DM(xIIt#S< zn*2p{ptm;cxcTQc1mdCCnu!WQCxB!#Cd0yIC#h2D4WrLBJ9KW>D@!d=rQ$dDyJtsQ z3(vGKIOx*2zi#%U3@u@kBlzit(yLd}Gi4cygfgK>7g*gXK09G%SrtQ{jUi(~){;#= zDS0$;8!8U>v!c1y0RPV{q2Ac}LVpqlI`}$%crs zJ1soUh$RihVHSVEAm>^AbB;LOhCpf;85T9XhN5D~4vR?fWY%y+NnhtLO4Opu<%t}n z&&}ggOyXEozmZ>Bf3$Jw3)TCet4|Cj9HpMxO%c&+hYD49&4*=p4ploSVT-t{>le11 z0?7|DYj);6PvYWkoP$zM!UQ(kx6=>m0o)Be;N(n&+$`E%jEK0`ecZCywEl<(T9Dj3 zmgH~iKs=69Q5U*puc2PWw9uvD4&T0ghbGa%mN7gC-uuI6FQzdp;FI;S&X3F{;Yz0Z zp4!WT5{A>!K14yv^tIgAx%dm$1Br`=!`W-4z3IH4EUX0h_tg_@B~MG#R%oHTLDR*3 z({(YpbH)kVr8B|8l~(E(^GfC>$CY#Ie_}EP^pWohMC6T3=JKr5Ufma&rmIci@AN03 z>FJP*1FC;_q9k>X9fYnn>OTH=lZ)jn49930F`WMGV}U%Y3bhct`zMOTWh z4S^XOmg-zVC}x5*6=vM2_}ML7t+M8cbD zEms>pNl|6si{=tLsc|Ld40h~j1r3jSv20tt@VQZ570!cTHq3YD!Akh;zrsCB7bcoH zA1rA=6mvmDe3`^EQG#uVdKaA^c3GRdF_rNv?aIUou%8+;8wIr#qpYdxnF=SYKeio~5>cxLgM6*F7LT86 zSwNvJh5H7<@sjGiIL!&efCEe}E5OH+mr#(fCzg8qm%0Qew4&M2Go+8;aOTOt4d>@i z{dBOgz7i{|_INyQ0h$1Mm#-7bhTE6r7YobjRYUWcI>qVAl4Iti*7m zgTJa35}$&xC2~Go?(ONIRY0omesEf)%|TX7hbC7+t~vOIyl3{4V|GNUdwZzN4bG2gyQ->HtwC&7nO?SOe>YxTgms*qYx2C8 zgx?GZPGqGw%lWKr2S(z{MU*$|0Y*vZKHo_#Av)sMKZn9w+3qhAuvZ_I|J;FglBWm5 zm&!zh`tx`jORwV`Ug&hs#Bu~ur=B_6i1}`)rJ7^FSiSjQ;`JF~a4<$Nw`4B|9r#$c zhUQnbDSnUU$SCp5Bdz|qGBY{2I8jc}o;>$rf;~M$>V0_6RuZY5eV4ZUL4_jBnSZC2 zkWak4jps9w4lgM&@n&I0&i?`s&Nx}(GE>oa>m#2R7MF#+)ON+jXU$lCJ2{bbw%6|B z#UZrA{lV5ku?swK2 zb(Vt}Jnp}B+h6TVQX?3j*dNr5{qDQJph6{Y3oDgb`7Uul^*!QRMd@e`5DM0=zBHx0&w`qb5x z!NQwk!~v!q6N_)cVP;&eY8T?8We6E*v2fVQy@!%lj(20n=JI%3+jik`XN`n>*o(4k z{YKq!s+(VKA^b1gMB}UtGC}A5J+~&Ci$iQBbU&!;kMG4PKW?I2ukf|TWmOMDCf zBx585TA&nT*~eblgE%T%)$h`Np+5b~0z`j+*dG5y4Gb*#tF}vq18Q+0{%!}XfjQM0 zKZtZ$*bcD;9v&r5cQ;r^&jg-O#4UBd#5z_SY$Tu~HsQH3quXn8vM?CTmm_~tIGN1i zg{OmzKb$z%9J16HMg4?$oh)05<@>g!bIAwokvYDgvEqMNxeHxL5vv$6 zm4XxB`jmC%YDg4yn8ElNuM@S}k{ahYz4ZKgL^9Bh>d3n>(8ldrtg4K$ve$YPHAC*dKdf(4EpGB}soIBth^4C%Z1 z^vL>vgw5}fUmu#PFQ^WvE_W_$+dNhu{`zx0dM$E_kt=uJg~fKwNSZ zul(xZmI6{5_JrxZ=ZE!43SUJ+`eJJNAfxo-$-w?Ccb2$9$fjCKL1qb{fxuU1$u7&d zngiC+D+VV+ehG3iyDei9+yt%dj#edt((Fn324CFL<>W2PvM8L)9_e;DW16<{U)#sa zkbTX`*7=)itn?`^=o91>MuK17p7IddYXG))GqhmsXs-uPLVbVt!9tIBAjAOn z?<@ZI=)Zq-tq6As8QLk`GizmeB8{Pc*y8EqTOLRB@j<}FyuS}$kR$WNPI>u=cwwo& zF`TeBVs_d3^lWn&d)5p4fyoHWeFv%f>9GEN%KrV{ch@LZf6**GpJV=+0{{HtH%eh& z>AxnM3kby)Ws^3)y~STAb_?ry_}kD?#y@1stv5o$OaRN)q2Be(k+U{;e;Z+6hJhm1 z>H<=7ugX7w%mrDH-8F{=nMon(Fx}2 z=w7WZkTsZ9tBWG;?IR0fvs$2<2sogfAC9r{c?pL?i~OM4BUj13^taD5){x$sOVnoD zQ(V6%O{CPMlJ75=e9Wz#S#}JgA2L+!AfFHB0HjBzM11$?fRl-!4?L}n{}1fKE4`2U z#&YLv5iXvvEWSNtV1skR#~*sacAve*#Fz>}UeMP8&b=5&8O@RXiT#TlzvVOn_n4OX zQwDT3|2!VBDiGX6;ashSh2q`oZX9z-N0yFMk~Mh(Li3wt_8QU#XnDNak&H~YwKV3& z#8{DwhISSx1sB=i3^uo?0s~cU49fMhdXwyJ)f^k=RN6f^G||Yu1|~U zfnyJ{-~LQUfWu!?!r6$$ZilQT3-8PKC7z6MB=+2$Y)Ww)IcoiCjB(Olhp$>Piv1otUOmSvYg}BzD7SCm@Iw;&`8V0lAS7pVJqXVu zZW)z2OEP$?vy|!hUZ4F1ckRbsb4VSFF^I=eWoeTV7HfM$dP??{#_}4TZtedKx{P~M z{P~L_gM?saWCCuNRrN1tUm#E)WMO>hamQt|&Vz_cms5&DKA=8`2E^GvlB#v##)u2ojk(t z+<$>87`lflzp(TZ5{ri$Sey}W7cvGb7G||y)9T2aI%wfvQm`lgssD?ZiG>g|Oco{~ zEH4qC3maE$kPx!(J?6tTew|cSR20~Tmw7ldKZEp`n?2rVp|s?2+$JQbX=p$&2$dK) z@yq4T7>L!K%l+)WJq_+NuaQ;>pngoU(YRY}2lvht`4N};VEH$SaC#|#g|TaZt4QyODugukjNG6ubXPN*2wWMMSjlBC0ub(ZpewPD zzU_SLhB&{OnFZ&kV^%+3XSMf?uO2C~wrgvo+7^z}=<-AfbleWs6sHj4+Dqa|GH1P2 zk=GZ9WbW)iAyRIb1~-;+Df4EGlyzJe124b!#o1@wXRY?=|5>|aZD?fHe|7ZK@U+Gj zo-XROtp#)sEa3IW5Hi?Z*nypHa?}Xy!=x=K)r#4M%Sm{6{r|s-7i|TCIqwL^-&v;2 z6h93NePTsvIb{j_RaZDl}X&ZT(crPl68YXQ;sZ@-~ryJxQH~(j} z+f=FY)7TssD_LVp0}Sxo#V6iYpMNjN?6vG|IQYt-q$F@lXS^|H(PQXL#XE^q#d&t~ z;UraAYa8Xr>*iI{_*ei&jquE3#uSYA*Zp?o$Rx0d}p(A zVM!pUH|@{B4z$ZG$Xg2zgQZZHC1OjojF+Vs4^A`*uz?j$?k$sFvgiC)*9EsKgb(bL z9Rj3-Ex?9WXR>8~&7?IEKs=%maWguf`X9K(?5iLjMVKpVvhw#khNd$DUNy#4unb3- zHM~xL3m>sd1sI=H%d(<|`lm_1`n`X?v-fThexB`{*Ovk^6>{B43Fb!!?qW}%3wor) zKR^o%YGT@tKW*7+&zF?n>wC7%{{yoOcW!PifM}Dr`Z3P+&zVy^pLx6v5V=n$uu`0~ zLap9dv_6G=yC1T~aHI{aH1M?_EOJqy^i78jktU*YRmpeIVM6-+!L{y2s&2hGTY1=)CwsQdz2-@GI1w(h)gFDO|etmeN5f9C0|$#KVGcfp1_ zfcg?{4{TfV`c>g?Xz;?ei4O>;Q;dj0$bzJf=j&LGi+!U(9}J*nD_e@;+MP`YqSC+q zZY=61N1_PMPF&=CA(wHj0lOBiXb~d%r!V#K24>?ShkXY;A_cZv|1_do(v7%D#}P5D z`92%yEuPA?&geP6WzV|Pg)GIPlu6$=)5pOQHnD&Gy^MCGf1R*{k1;s!_b^SVd-O)@ zE=V{tVSZp3mgAJw7wDWT7Sz<8?K=7?bJ-V+8W|haXgKL*naP#}&JR^M8-BkcDfL5` z6tF?(F5)giA0-Ww_1UIBW@S;aV_F_C;Z!o8=9x(Au06}KkaZC-yxM8<; zmtXFa>>6_!N{cb)u+<}DN%W26$XmNKsmqIPTL^X!gd*XPg1M`U0|dNm1%m2UJw#L( zhd~G1im)8|+7YbZRqUQ+K5Ted9H6iTvc9NX{TQ|4V#w{S`m*`m8e~3=zzbsUh~gYg zawK{njlTf&j-_|GN=;<-HpQ{Err>}s2jUCf=g0#7yd62rHH7`yqok$WG5w{T-Grdd z7}|$v0=Uyd`K@o6R*NNgCOmSWl-!~MPU1q#b|A7qlPYA~cCMq{7t^OTUC{uz4|Dd- zR6!#^Uj9X$sBu1gm*7&@Ge3|Dj(K1h9rdzDX%Ahr2Ohk>7^b){=oudW_zOEpx8Z_d zCxb-mjj?WSMAOJBMnhB4%rxea@q(a~2+cl!1+M$ug3w42VX8i-hNdOG9F{Kdeqb2x zrf|n8&&tgE|Kd$>>{TT&KtG7=J$cj<&1%rBW-=aouk`dY0#Qc$s$G=$U(>2l-qJ)W zD2=y4b^Sucy(67pajiZb8PoLYF9?Xt*!K&VgjkFcg`NG@?>@{Wc~9BD$25H>Xgqog zPV)nZu>Q@PC~>rr1}a)ilO6hnZM)Pl*82T(ZGTtvfCQVvs9wqc-G zjT2i=)Zf$z&~S%8^_6hT>G6{ZA(A|!UEa@TS`m0G4yA18NKStOuD({$9Yu!mE#LG) zBw+Eg(iv~&j^Bv->kYg$uAO3Fp;nR>x}>6_RCqW<310YWQQw212p2sQ=mIv_@(;~N zDy?K0L)xX_sB2F<(hNr+UD1oob$=g#NH-8*laLQ{JM$mfWP^D$6V_e(L-to>>K=qd zU)RbRAA?Pa^tSXu+lNo&Ta4SYLlA5R)q$)It&lA#|1;;8*B1xYU&TLteiwTKY~&Or zFfDpi$?^6Zj%nCrpT`>RuCn?4+^d62cpYP>LzjW}F5EXf64RCgcgGDF>5yG;klGRF zNAnMPSPTgz8zqyn zCwl#DsOkp(XLia3_i{33I+QoRSdi|lZ&NNZ4~Miecqh|Ka-%St`d_%B`Oihsp3)Qf z<}H&^msOM0S;`fYz*-C=I4yN_;C*-!Yl9XY0ed+LKAMnkEV=X%M)#VUUnNlaG3+d4 zYpj<0^lik5YeIcfqX~+qZMCNhgDn|g&jpe#f1sa2t?|a-!HF-(D*kK@BQXiI!h#DH zZ^Y7$@9I@dv@KK*^~H`TFwl%`+UaJu-z5l`DNbwC^EVl-YWzJ+Uhlb4Lr=$I{^wPx zd8pURexd1gOxUBljImyvS4&H4Dn`=^tR2uWkmMb87N|r#p1EUvu{1LfyRl%QRd}Kt zhg&cf?Kt1ytL)>u@arR38u$;b0V;ve8g3q=gay8Y(y({W%-iJ}=>5Yc7oKb3c{*Fq z`B)L*S0rcMk9LFOqLR?!LBT?i5_xCB*6X_BfdX`qhcmU;3?6YV#NRW35q+YIj(@Ql zMA?yonSY}iU2w95*0INLNRmZH{ zV@&cOX(8#CI&+FqKwTb13} ziu3N+)YDpWCqR`zt373C=U&Z#2?>jA_E28fGkn+?Rem*1Vs*+&?aNG9dpTc+{VY4D z@>u-XK9QiE>QhFr#n{D!1vaAYNVf3c<48+vFSCEJbHhH2@{tO0u99~`<#ZYRzwsYZ za4?Y=c3tFUl3==2KIlqooTpqffa@ol$x7a6CY-Ev?9m~AxBiZ0+skvF8-s7%)-rUl zH8xmrRcAbAjVNQyZC=EB1aN z>yw4v7xGB6ovOo83;E9M-^&fQ(?C8^ZV9lOiMUTj28!bVwZP2zBcgVo7CT8s2fa{w zsndA^M|^SP*VavfHgk%5pQ*aTg5iCL@AdH@kL9DUo0EKZ#@Q4iB;mWW)%d@^b)nGcR8FR$nEy)+sn76Z<{imspAb{eRmO7qadxn*|+N2VU zHX7KIO!i={R5&Tl)`)U8eDWNiD9sv>??=K3{gsQLfD6S}Fh&yPyXr@TGZWF$WJ$`s zmN@^@WmAK~pF&JDQp7Ik;Ex6z98EBSHg|?cmEON&hOus9ADC$x*{J68C}F07919nRJ>2}-Fq;?};cFC?ZdxA^}GRk39+A@MXu%-4P`!4V-2rXqE7 zQ+IGl?v)9xO#Ump{gWJQUfui(x0MjidUP8X-3_@d5fQi?_X7`12jI_8^C`SL{9W>_J*p~3-RvS!dsr4Uv!^>^& zg~T3%1Eq#bI|h1!8h$59i6~N3&?pCRJIjK|D0;unc%^Y(yqmAe2Mp$foyEZP?v_|57(vj`?P8DmCIx9Wy*YGLJjWeA<44H zdRvoKG@XBC3}rLz>JDOH?N)-ylH54`Ug&*8{&+$+T(M>?ZlsuGV2vVfngl-s<>Cq> z*XHS5w8JQZDVLHAj1zu~vAu#;o$aO67K0O^n;~S6GYVtIYpH zi~T)q?zKDtaOBcGj!4IiLF2`yo14TJt#!Wu1jXW}lRlh%?*LjU%Nb;zb7ZCMIhsLd zu(NxoBl-R~hw`TCI$k4s;*I>?4*QOU3~1aXU>D zxg#vb?$4tqpICzV_ma6lh@ALMit`^zmwXO8`R9&rHq}iLK$AWzYb+U^q2Enct10IQ z8FVD(H_;H-dmdkSg$kjhp(`n?3gGP}M|{uhhUtmY+Z}BmyhyA(Or}ouv{ziw#TQ}~ zY4$mM{dxld6$wxj$EGa5zgLF3F__K@X9V_t#l1zKKf@%+GnYKX)WlvYp;eR>eb0nHOr1j9!b;061k^M$EDPtYAh)|wjM3ksJ|^D8l#DoTc(=nScG66Q1I|&Ef-M7 z=*~pN>6m!O~ZmK0|tU9m^q!$Xw5t0Y2R!)C@8~=Lc)oNlT>{8bD8 zPZZ^q)s^ZJ$9D~=)`%4Z|A7h zcS|^`zB+_Xy=;fjdN8J|*)I>+K@%zpTxlxE`jOpYCNl0}Hsu;EJ#Jac8FhMLuvU*b>{gcn zO)4+X&NTcMq{%O?Cz>b@_wx_%Lk4N%jY)I)+zVC(a?rVd$725MHYwtDdiW*x59 z<-y%-TsGQkc@uppqsmdvvsHf!jg%B2OoR|S2SNZmS&D!!3(qr#9cnCAOn zq3~uU)_c*sxHcbV)mr?=Qa?m`;}JpKEBTuv#*T{L;5=&5`ZgsHqMN@fmvClGuxE7m zp+v^3scdnaB(WQzBn7tFqZSKPM#6+#k_EJ;ZSEb=w8WY9HJA(7Pvum!pZShMr`d5A z5#qcR|JbC2rBLB!g`&bt+#V0uZKR$s62?23!#B2mU!2F9ubHG%ExOTA%K}4FN+DBy$vQ0@y%0^+W%t)oRwfUFNyxlaH&2>?gKz~w=VHuQ@^AAj z2rLhQb1<|3M%LcC5eJXqc+ztHA;TE@=73qU%zRQhM{?V{V$$REg9z8Az3h$>Juch@1xDznTl_bt;%3c7dj$C@vlZLZ3VEZGDPrT7$+o2p~}N2Ij( zh04b@ExdpAS1$%17Cwg#lcic_j+4CvT2>9-wrQHDASCvD$z&836BA2G;qrdvM#N=( z#@c}ZaTod1A1YhKq2_8E`a9PmGS8bpnegb&1**AKiLkFn8nJ6tQiP(=CZcg!si#!d zQsvLT=-?r@pzcRmS=l0$(v%t`a6+m;cE|K|>`**)`yB|GkXH(#^2cGf{RtwWy!cpF z$AzDsi|#hbDB zTdy-;4}eam<$mqWMy}pkyUu1^2BIm*$lZtepRMzR5lic-SM@qyaEb+Du{vW>?eC5J ze9Kc4VhsOo^ZpYjqWu4UQ*|8P~hm_2vDQ#|+MH@bZY2RGHazUS03Gsi|#V@?}8c$(%j|F0<2Qy_^V^KVGLYx8q${#$aeX+D9W4v`{jSX>S-s2`&XRe-$-^*F4iR;L?3l{tLt#YoM{n!@J9Q; z4%;RXXNS9hQGnIi>-p6$WIpxwzhsB}2gG${y2Me3W_SEdw|9dIeV}$5B_?Np)RCJ^ z4>Bu)b>Zgx2P+9lDQbVTUU9v6jzb@Bo+h0anTybO(nbLc483ip+K=VeWQTn0=?KEB zdb1sFXq8O83r$}fKbGvONPWxscpxe%0p`$L6~5x%MEhZP0$VXc*Bb7h}8y~>P*2g0Z}dCy@bzXNd&cbV1RWolq_|r{Nm`bk7y%P(=>yU&njJX%D;_zA%kF zp<-XedA90{H)oW1M8}y2H#8)aZs;=Y^#FbSPOH5wy&{lJ`{N`D&2$*0DV_rA^cHS# zXxcVI-)i{^U9Bi+MF_~aTgP#0^gRus;oxlQ&#Sk7D>bmRYVJyud9dgcece9w_D_P4 zd8num-=f`_322Tr_Nub)Fe2axCA^A&VR}J0SC6nk;c0b1>aKb=IVR-Im$*+Oj*OG z|497&XS(S6(vxlERoL{VIWp}lQ6giIVQiPqh3Z@3opbI9H~FQ_jI+BQ1LB{ah2ERI zpQy|-oX@VQZS&s?&pQrKKM5|;TnJq%n@dXKkT2_wbZ~O-Y7B+)2x}=M)1&^(iZ$&% zhjq9O=%$I}0-D_Xco^rdg8{1&!*DiHiqI9>Fgj1HZ)Y+G+Ss?3xv}_omD>&_;;tfV zd08Eb9V|u3=qC0Ij(kFWLa8~OPY{r`vFE$>G>96w2|v0hWQzgZqyv;FDyf!J3q8{p z8XaJvaA<*sN-?|mtCR*NzS-(@R89xdsHw0kUiX9~1hf1ubo=7z#J5Q}$$GK%cz#ZS zSwucesKv44OCR2Q0xd&lOcr)UNlaU}gfa-z1p15U)KZi5wlw_4m?Ae*8M#t7D*sD~ zq_*}3b{*2cx6gTAfVtOWj^=j95Xta;n3O#8#D@(*t26}u_-1Yy8$%1?|6oQXS1j~V zKiGS0`kf>^T7|&HhD1iTf3Zn}fkJwA{#r(~S2E`egGMiQ`G=;N`SI;#^Ncs91itW( z_9dmjkW2FO%Y4E_Or=g&kj_dGTkc-u_xrxlX4NTT22%^sQWzTk1ogGg6v-*qVZH`=73mbq>qLjCJb#n08(ZF~6)py|rS zJLNs=4dc(MO3tiT_VYHT7>$|JsMH2Yz5jKdK7d}g4O;Zn0My}ey)nLG9ma2>JPhdHEByA>wIN%^rbjR7Z5lD7%SJ~Vm!A#^`8xfYm8UlW}A+jdd zzIyA3U_O%BSG4a#ue%XT;h}iFAOOEG|6YMjK$>fY=?_bp{;!Il_Z^QV?)ppO)a%tfL)`x7T(L3#EBmtb<3X&d9ueWU36 zHJ)&Ey&RP2oOeTa%$1im+h>z1cc2J6WdvtwEOP;Kx%e z$Fy!6@*$=w6}q?$Uaf}PupXhSnp_{ou&+P3VIP~O>;^w?mRc}6b)U^VPPB&rM3ec> z;7Suua%~)^^X4z(I~X6Vu2$%h-s?%dl6jh@)_*lABb^wf40W4sF<7>d6hE$vd{|HH zewU@-jPU~UQ?N3wwAgoR0}MPb{Lh;8g?O+h_^p#45*r#`sm5AW|1_4p`v`@p&YFX{ zD!RLKyuNNwAg5=#`C~Cd66;wNYQ(^Uow*hBz1wK-$u`yy1(Y;aps2IKvEOY8ds|AC zmOlTuwFA!HKqYo9K+;ocRxrWg%?A<2xO%ji6y{uiem~;+%uIaa#gEc*BCI_Fwt`uB zjCSsbP2<+H16dj`7CaZyJaT>Z(KfG-_?EUXeDS3n``(j0U@=Wkotn$FLOqCn1d}O$ zO%v|k@>n=mULRq-Eo%1`+ffsT*nM<;az-i#wEF`EqR6Y0L$Jxi-1MeSZA+eg;P2+2wnWKb&tNItA z9U=Yccwu}>YQ{n8wBJ8*OKk4^^;woGFE5&pNX>BH$GS}~UP?kmTvLhCX^O(n-usv} zpMAC1ySAPRDKW^C?dHq)ihkiM={@sDDG}#-5&fROAW@O$W>Z7W%v=c|{RAfS50(5l zk=H`qh-V*LFGiif9Yjo~5{g2)wsDB4O*Kmu8)qqDtaLz4Ragdk@G{u#$N1;!c{riL z|AJ(BWXrJYuj`K)Z#8dojvLJnlINx>D56ZLC0gwUdN$4tZo$n(dE@)&zlZB@R&ALf z>JDw}qi!`FHxE1>o3X>VHL0Y_VfXr%w&onE`ny`GUt_s)n?V3~oqR=I`9yZd)p!{{ zO}PmnSbS8~6r}wh8(f$T7^Oe5C-_wh12c3k!HvV4nRC%L*Gd{YQ^MJDUp$ZLU*W6< z@8QBhwG5qY0)*h(@!`&1T$d6VM{WUj<_5fcw-b`6ym{vF`HAeA`H|-I^T{L4I$38) z>D*{xe%=Innhmt!()<%MX7SM^&!c^L8o$b{cW{_VQ;twhTWk!Cil5tX0g>8!b8=6H zgdfXqyi>)*Ji2yu5V7w6z}8v6MN8}w(w|vdp8Yh-B7CK^r#JW3`Ixk+m$~@6m;tF| zEnImSDQ?&fgWaA=)&BeAE04#ly63H@DdZ9&Sgx8rZ!z{Ia1RmUp5|`y7-Uss31k}K zwI7L|Q}cOPALpoM3*i;3tgS7+tG2~Ai5@j$IZ+*%;In2FCU4P zKTdc^yrWG_@9w{Yk!@j{YR$S$p12Mz<z*q^lfYJm(H_Gz8+|ZkxT@I!nj!gA5_4Ha*_fEtdDV;!Ab$8gc=GevWycX^ zqVgaro!}|5LcWsSoxZ^m28f*j#_b?-D7oF)Cc44UY~EDrvJ>ZasZRQeGo39d=PVuC z+)U;@?oHYANCOYQ>CCwL9>8znwE95xS#Yd}#D9QNSFTH3aN^XbCdF)I+f&jkkuP*a z;#lyMr#@HD2kkq%l%!%|Lx|{|ZWi=6l<`bWz*IRQ7wdJl{{<{f8^-mJN`c2Uw#=%> z@*ueupO1-@Yf-}BwnwgPNCI8L(}ATTEcb6h6}Xc4I#u{T$xE?VB6Klz0a6Ztt5 z3*s2U5UP8(ovT`oa~5_ws1p{0!g_e$P&0kG^KG?-l)~CBoe7gzu{F1A@6Zv&+*5R~ z$_b0&N48BsvVI7>Wcs4)Lw(gJBS{jwjXX@sf`#x#Ti?*8dYuw#~vo?9qm^EGXxs(TqgzYnT$>^M!&r^2(X3Gd-!7!{Dp zQq9m0ZQ9_n|MnSaot*s-P3T+?lY_o)NS=&cuY0A#hV&uQE5SV5Ss~n|_pe63Rj#`Y z2RyrtEF_XY%1Z^S=qjgSnevgt!2YW1yXcWm2Ytt0y-b>7LZ2?+9_&zLf)kTZDr2kK z#|o@6t`$E1TjIMEgGo}#=Z`ljNvXp*V?h5BdG>!u`^w-rmL*-wk}S&>*kYC~W*RYC z7Be$5Gcz+YGcz+;IAUfRF*CE?oOAElyYI#BuQ$=r(GwG0**#TRRhjvHnU{y7JV(NQ zp9Ew+9tDX#n$$4y(3}a-``J4_;>#^I8EUtYp+zP86lqb_cowB_+rrl1ewtdA z=48%Xj223N@c1w@-ZE9GKyTj=%Ihl6&)GIw>B41WRg>C2qr_0&*9M%R5`=Y%s$RS1 zhKRB06N0fddbbaJJL5aOi?Df#hFlLnrq6Do#)i4m-z3NXt+yR~(=p_brF?}(30K5Y zyQzo~@*$+AkE9(pGM}!gkLrP@=>=)}t)jYM?gHUd<*LGwX~f5Xj%81kuNp#0oA zRmITpq!^JsBKtq~!l3%-DeBEZHAY&98HO6uobBgf+i>pNaP<3$>h?Ln0OQfXa?yE{ zJapgYv4>AkjiPeDOP$LUQX?+U*k)Z+u*B`qATs4*zR3up!*n@Rg5s6m)FDVuR{M}U zbBIZb{wb-*9Z-SK&@2)>0sPw{akJUg`H{xHm{TO5>)w&ls4Kym9RfVxa~aoc7ODn& zY;x{XPj42qc*}a2l~wi&hpWxx-|blBOo`zQL@zo=8>;i*MyuYz*+Ym28uqrXc?;7< ze0zKa>zppsY6$JAb^H`+XmBX*usIBw{``&^XCnz~bxK2T`MRAdIZd^EPN7CpUnouk z2(S1e>$M7BO0_uAV>r*&X{?p#z|S}_QLfD&hs2tUrIXpj?$MMoZ3RRS?39EuK=qpi zx;LcGGb=XFpADUBwqVxYBVoIt5Ic}DM8*V03kf~cWpMbG#2`QE-fc&0j4fCVYjjZuqYsMx5tzF3%*8^z9)4eOFAUe;oBp5qA6W};*40)eEa9f%`taOr97VJ?ddlQRXd93`Np66nqr~stQ&V@vRfQhx)c7Q7zVB< z`|ja|wEr=X8Tr*S#eyQsh5!zfcB%VA+J$(~pO^(oc$~QjL$mZB=Yxtr5OTPQ`+fZE zY*Z5Z;pL-5b(2KfNx)IJ?5B4mk^8Q~ON}_io78GwzO>~_^VM&O+`^aj9Gf^QULQu# zEXZERq4?Ps7uArzf8%!_=6mWQ`UOOTIlehBHz8Vv1q2=WX4Hz1)Tbj$#nKXnE2@eoJb0jN3*vt z1liWU3rk}94culGZ~x=Z>IXP z>S%^GDTY>g*;0zlBidQ>&9Oft$A|Hzci9hle&kHTSq4f*vfB;>kP-41*6 zB6zMSby=MTo&%m^DkO@|EYN5TU9<*57&SZ7=A8^s($~&1dZkvcsmL&&;tWM>C0K!8 z_*BywLYlt@CVd%7-_wU%V6>3SR zD@}4$njLUaIb*g=|JkL2uVhnL>Gv1Zq{&DMuT9~EjKq7|rn0ESSeEklmX=;QYbb;c z!IgRXJ9n-ELq=Pa!_P@k1=0UhVs@xbI$ zX7%NUr-bm+kH{CSpssdQ#cbabwj6M=JJrs|+FN1US>F-$lJ57%!|3D#9Z|@Xvha@Q zr`sNm3n)fQSPXFEfNqmrM^^39Y|@pd`tuTwK+cTzl%7VmIu`%S5&t*&w6|DgT)p(+ zM(x$YER%HI%liWuCznI{Pf0iPsti;UQSVgzyO)47Xpi7V`@F+!CULXlWM-2@>ogwH zlL3np`Xgns56~{`FT)!S5y{oLfVStBSjk9H@PZWcFMrxUFXBFIA4Yeg{G*rQ#hY6p zuzdF+n(9tZJ^T*fH0@c)45Of}*$MzOIgr#vrFpd_?t=_pIz8V=!apQ-`tRP(@gqcw77BaKP7Y`TRZa@k9R;Epg|@Xdst* z)WfP2)GvvPxeW8x4YBb;x>-(+7N0ei-&4$@M}Lmfn=g_FCuqvCdd^*KpFE9i;Xv z;)fR}Mjxfc#?y~JTAJIa|L75SDJ|({KJ4z%6)&T$UEl1V57+7i#z!>?LMdFXxcAW( zd8*RvO~i#HXsk)^fm(<~P5{AHP6d;df`o>Z8j(^W$R;Q}b zFT2a|_4OY%uV6|WZaJ*4o`rUf6v|YP?VCsX_>kWT98Y@{u*l$_!F_<&K`)e9DES!I z0dUsj*@R7NMei@{Ros&8N*crPue6Mei9{;6x-}FZV~diVs|y@_c1xb?B3Agyp|d?5 z`|vukgxBbUnK)@=q7d`uJqeU+I6mK3Yl1+dd67uEnYi3ZoE z>DRC1e%WR!G9G^^keiIC?y?>?; z2_YgOiA}~BE6IVDWqjkuvpSM^Jy%3lupk?e;v$+f`VM~+w=U7loYA-zwZ;5Q;3K2M zxp+ej5tlmSI4mw*JOWa4zkOOu>-TGT91u^gWkQP4 zn)w6nS5Ze?QMF?+=XGKC>y)t>v`sk|X5jl_@U~E7;{WRDG+>|&-bGfyc!bMCdZc-O%J&s3X@c*kAj8anp_UDXT)h54N|c%JJ0(X&s4Ln zPC8ZiG=fm0JHSzNy^eJcb9P*wwcaXHvu4)fZG2HiGND2 zE^f(6f6u%*RgWTRIFQVdkRJk6#Ov(*Hhs#1#BgplE2Xk-MTg$0Iurt*XQI|X*J5sA zLnWOUr~xxwDr`$zg|Id1i|4Meh#;pYcBN=M@{qx8^-djq(4QOGK%N0Ykb~s92|t~l zr8d5JFTKr9c zc#IF{IJ~d5m8{9wxkkp3a^OsY>JIRr(?(2>S%9XX5quv1ikR<+Z9Sxs2QM=`XEj`+CX+FIEiJQ!zEck*80$8B9eu6` z4Xph!*6qcw%_DueUHiqHFOGLKx6{P2DMJ65t^1;<K>2jcILFRg<&zCvI|0`bJ$2OIUQso1+ToD0?sKI1`;M#nI{ZWhQzq+$`b z@m6M(Qt6aXfzv5IzCW!$Z`c)Mp_L#*LOuI4*5phQc}mn}iiBpGXAG&Ht+oQSKEF93 z)a?YxwoBd~p`H%n3!Pn4FkqZ_rEPIE5auBegI1NEFO-0Lqf#dW103D{Tx@+(j%g2_ zBS?6}NzcPOd-lp!qM$LB3vkxPCE^V0Ifd6UD4V_^8OF11krvF%b~wbXC#?u)#zOeK zDB=XM+by^@%Vzp**KhRnIh>q~v0t7_mb*FVKgv(AdL5wNCW88S7!s>F%1kb_KX$wa z7i?>xUq*!fF;a8;ZRE7SAop$8*7K+EftdjNx86#9+u6@5+?u)X8+frKD0PQz{{_YzddnH!b^=(3vZH z!8ZOt1j=6Ux7o`_k4fTed0`)gU0`*X3S4f`n4HVy5qam78=y^8^RnM~=9*KfaX>+p z;6<+X>x=+i*-+46%cQ zpEYUBd?8fSDZgN2O(+ZJeI>+b;rbTwMDB%{$}il-gQ0lkfEX>Q=C;2qGIyU%a&t#c zv-1{nJ<$^k>o$A{E%7-Mls`VOws|8>}L7?%UNw5V7=^ zU`%DYRAtewH^hyJVq#+aUBCrCTi=yY{@IuFJG$qs`PTfasKS1&5Ow{A#*WZ@7%lu90j$vfeF-V9c9fxOA01d+X7{-F?AKhi+%tGp zcn+Qz6wiiAdqF@0)n#aLR@iQ#SqVf=#ycoRrM zfF`*cU&NxEVzSSc1=YA4xrIXR*Sqv^&t;CDTG!I?ml*NZAiNr9C?O_^Hjo*YL}jY{ zT)ue^TvZz1O8CH!uRH!C-1^F$Vi-U*In$IQz94erxIXce_dWV4-Qnwu!=vWgq0$!* z#y)qKj0N*N)<|lK#|N@1;P*U8vYW3spN$nm1azVz zI0_ydVQHIKMd%zkUPy*(dJTQUhEWs0v8T5$?f+vrC$pTGtEfUM9{*`k3H#SXY89*3 zo5&}*EKhy*z2y)d4ELSjSJ<*jKWD{_#wIpHxD3sCMSG>PQ%1n`S3UjK{pwUzh4 z2%vly{m6~&Hf1c2(c46u-{K({#cLLP(b5-Sgmp63d!f#q*1r;EWB;y_>A5vin$Jxr za8<8s!jHSZ04sICxeaq=q%iaU#4ESHW*Ou9EyNeHw#*egpWwbMy?h6N(Qb^VNy}MD zZ>=acR)UYKBnL;Sve`>eg7Y3d^A;>3P~t}tMkBP)tkY#UE#|o7^4lW~&=+mlGVCSd-7`HjEZ*R!06ONlPFnkisiY&u)(*)kaiJPy?m6;`*~ByBBfcG z;}^D_X3N$R@kj2vgmEy|+|02v7=mW|h$H@)bbbG+;`4Y^8Gq$MjrAyBSeJW9$mg+c z{^*Zig<#;c9+7WI{tU40F_1En?Ldlerd-?G?vqwoe(?0cZ2D%gtk^z5L*iH-pr$DV zuEr$i*!NvmS1)+(;`~?{m?9_#!K1(Qx{Piz|8HrquWeB~)Cfx+GI` zS5+C)R8b;@RYtbi5#HQNeSpJHret}x!f_$=W7#LU6oq{ZJ|UjMnf9Fc*-F45TAb-^ zac<|pZd-`=m#*!)1I;eR#|)gIj4IYOmM#JkW}1;>(Kb*>dfxVizsK(%5IR+2g?@ZA zFrQrniUrfSFJ6z7{@I&;dxx(8{&dIFZE2`SuGmWfn1I-qcPI44rFx(X?KQmD>ea+8 zbs*|D+PF_2YfR-nRaNvy(jef!42U^mu5f5$9Rzs|KkNJxJH6Y}|KI5-JM$1oS=B94 z06~~rz2iIXc)B;*@x51Yy$X{Qxt3JyDay0WvOwQ2#ID26zkLa(w4>Gc2Uyutk$2Fe z zkjL=4*RN_dk|!LH^uZ>99QueRr0+R{{3zqe5=`YwQ3z%16Ap5JS4etFbTI?0 zPV$5TMGnOstu(@HgwCdSA9!Bqy)fnqcXs#J)G(CA{RH!P<05>I505*zw0@~f`yuZO z$x&rA7PLZU6VTI}EO*%-9_5OCyo610CxHj3Qs3)HjNGgUOYUxpeUnKH?$CQ@WTXwqQP~c2P*h6X`xH=vhikm zW$;E5P~7f;qClnT#fQG1xzfT>B>0qPR>u?kT_dSy2NBdCVPuNcY#4nyabeGB%)o(f zam*ZM2K@`!GsSQJhFp^|SmStnTc(o4unIA5?yR98FHDy7(0)!TX{un%_KJ4BXD(5o zT2|1uTFd0gqz1dfi_|t4Ov;H}@UW9*v`-L4F-5IvJ1&3z1IFuWRXqLiy51)7|Mrd> z@SC0TXWzcg(QL^FPLC(XF*mb?%H5m8nGfMuOh-J_N#B0&phO4~zBA8Eqci?rL1usy zGejTv&bl4P7c}9^_;M2y3RI=bgSRj#TUXMK+BtH3k3cyV5O+_S^(#~+{cedW$?4-! z+r++(F&%`b=ffkIA9Eickzu#VyhX;n5*sJMzx8NGDfo1&2Mmdf0#8{jUUCD zkXY9^hHKCdqiQF|lmcb5MbjRWFP}s@FX7fr}UPkA>^1X10I!EGJU^NsHjDD8ad5OZ` zbwr57n5UlgZ7is)(;uIL6B0SxqvSKOqm|5v5Wh4fxR-Rj8m_>xROK?v`!@y1Nr=PL zuEMy7mBH|b&=Cz7>VMc=c|0}&j01-3YO9GgyZDqHZN<~%6J4I6wpUC6n1c6bbw_0dBodAI=n|aJezd6-gS5 zr81eUYt2kp3He~oR?iWMf_7=RZ9+YT`yr+}VtF$|sziek~AYEeUpZOwqV@-CEaWaw)3P zR}Mj36-8l=r`R4Wh$w5pLgBNYHje_aK9R)_Zam{WG*BX2F>e4f ziPtUKzUYY}F7*Xkozdmbh__Fx?1`rAzA^gaWc(`I!#lZL$hP2+OEl-_ipvmBsZVW$OfZ=xRc){(IU$fc}90PQQ-<4jfI z*}iS@>3)#Ssym{gY-QSP6Icj=$vw~7ZkJ*WDGs^q8gs}Z1E;f<{$JQXsu4)jWs7k> zsFie=L6;}(n7`!J;c}#^tj`>6ueytA_<~RkK^v0dOKxG_#jLu$ua}4-&Tf!uT?`S& z{?`-vOX^ISXWi$bC*4b=aQbfXAa91<4c<;F>Ur2MK&$(Pn(aCG>2 zx?bUfhEnIDw7(w@qIR7GW$OMEgkP!q^{pM+efmhoC)|=G&K;8DW38}nM=jGXy9l3v zFTaE6Nk7jV$X3s2r#?*NJXbfUdw%CNH)o~on6O^ssXJx~>lAFlfJA!>p9yrFJ+BLf zWXVLv!IP{$hom`sP19i6?jFqG^}DLkQ^tSeyidOv{MFv%Jn5l{k>Pm~>?p=~v?bdL z#ZEe3y(vjnBjf3;twQt2t~FK( zg*n^LdW{MRhTmUOu+nr7mBRWvROkN5bhemxvnQbLMIC%DQK{SWk|qEvE&j~^A9%te zx7Qs7jWM;u4-|@OgEKAiV;RZy5?d1|`cu-t{HRaq z_Nz&gBQaMjdgpYzs)QLfrMlVuj#Jet+>%>J1XrjbTdRqBRi$&KTFPM}8y5wl*01|M zP)XVf&WbKIMjJ4QKjXz-qb^H?CJ0lR3DNP+!!fG1SkU}YrO@My3Gr5<&gcl6B>px%m>cBsI0j=Ge+KZO(gC#zA+a5U3N9qj^fV ztH}qLx*F}cY2{v9=k#-vd_DqxY&gp681F3=WR^ye7TwD?Pp@z()bMdv?l9)-z-He} z=jJwT6Vm{(h|E!r-89bFUx-aQD{OR~-;4xVvZEtl4I>LMTq?dbh%J#g9DFfTozURp z$#bx`uIujWLt6+O32~~D*xcEq5N$fgSRYNYVS1|Lk@PVD%T-@XEhfe#KQ<0c(GyV( zUm86P7N=l!;BY1tJSQJv&f}|`=Q}J>=FgCP|5iS<0ajXGJ6(KogQ>>WG85N1B_5ngUFh`N?d=x>PK81v0)lrg;`I#Q#EJ<1s|_pI0KmhxH`N|5=<>$|zCU1z( zFOS!Z=8G44@!&js;2`LS?5(pKQ~A5AKVBNgh#V1R-?qZ`4i81I}ZEv+6ZX4CZPGl~;D9^u*0+Uc{E5GMrd3mr2^rJzcX-*9}2+ zMiZUapifL@^ZF}|7PF-al)ZNDp_D*a-yfgR>n`26k&FdWUcSZU-J5a_sT zX=q>qqju02*m%GBzXwkkig59MgW`40`1C#{K*vJg-S>;Z?tHA%$zwDAhkM~pumWQp z^Qw`!I8_PvJ$KetXxqP9|2eWGAO1RDtQSgUMK&}vB>zh~apElU-wpq(dGI|Q3MI18 zZf;1IZjx8J@(lEI$oGLwc%nr@yhh=bZK>nLvg3%Gt(0{!KSWcYn;WROgMr-_x z&R_pN+b{5=_H@P5+%N#gIe#cJzkvIJjq2_T$3o_Yji@9Hf${m7(X9_hpTYK1F+Atc zUPcn^^Wehtf}Z$J=eta0Kq%?w|Mn&i_YdxgvvGti)#jXeMiv`YZQsdGzFr9cLq?w~ z*<2#vPNVxW#*i_YEU1GjISHqz&-y)fUqvsw?%ndb&?m7?-;f_pFN` zNP!n>W>1#Tk}?DvYWX0C%&U5=TVy~hO8*P~2tPqo6ph}QFVCXg71jjh>u3i2DHur@ zV0{Dh34d@wT<9Uf31!lqEN_7;*@|XoFLmpAE6Gc4w@2K^1M2Rm$1ihvZ4}iw*eGGK zDl}e`BD)!}eBGxpoPqGNEMnY_Qu|tbt-2zz0aho*--ae}n_ms+ZjNmKsbZrh(~(O?c*s~O-sZY~Vx6+H#$3@gd{}W4(md{n%h+-y(vEfMtg2lie1p1hb91nz7E?~wE3_iB-9{b=lt8!y*r5zuKk6H}RwEx#@iYge zRtYxSp6#nD7l78Ap^tUtl*{hp9l-%FnX+pOR~838h`w}DVhx*oe24l|!0Y>j~J#!Vc8Ebma_w7nHuvr*vvEIku-5*ypJT1(_ z2ivZ=JPt--W#=Gm?Qa14HyA7h}f9)#_`8AJuNg!(D~FxbJh>Th~lDH zNb$(PxpWhg+)#>v-jN2$??CFz9l++U81 z$7uFNd!&vASGOplcQ=4Qt7_xBmw}cMWsWyn+S{ek)N+@&Hncre^LKWW1eu1)#c|6H+k z(Wi@ZB9}V8&t7+a<1D(KDG_mGclq#WrP$>7+p{@k)1%X9WJ!%PY+gsjnQrAhQ?y`% z$-V>tE_2o^!eE=L+^h4_u1$w(-jI%~5&kKv=t|pRW_HQImbEO+<=Skni;aGiqamX( zUE4Y*vv@&$6Zi#F;-}bV)z1K9?0d?4w7KpF0m<>mLjDz2wAExGraY9)lZ^36o3Bew z)E@~auIllak`6UObz&MldQQjOS*~Cdaa+3WGKgDB!*#B+Agk3@LTy2P40ly)H`koK z$+Z!S>K2yQq2DItWDA8zQOc z;ge@3@nD6UJ7Plky7YEy+WJ(SAKi$U;nwuqN(}_|TnV8N%2exzm6*daD#Xczwdku5-Fu^AR`H`A`g1S{XifEfz+a zowSXiPv}t$WKOD?yVi;4k%sos#hkf85F(%{p%vV}Kisn5dy2_-~45H~eafce#z$W`~qo)(mbFTm*EkLKxI73F;I`r1pUoSn$Rz9KZkjTmFJc5c$E zoG^N`28t8l`J|`lsOxRocyq@7WxnF?$$b-CDcUYTDuHU`z2xq3368acH*)uKo6L51 zL_(1=>nAC)rx{|MB6qfH(vt}QMPD{HPNsOywJtAyhkVa(D)xAF#@j9t=B-+mCVi*f z#UK#H%Rx6UR!g?uo|u%jaI&Sby8&|Mz`6EG4J{R6OIR!^+)9#|XHzB^-sO7P^8s5K zTz50wD;R3#^O5U4*u#ifG(CpNWW~HDru<`woQTv1ZiNu{+pNp;(P|o*^PTGmNlms%x ziZ6nYug&Sx@u8+a4yk~zdd<05TCQ za1kTHS(T2++#`PMJs4x%vDUIV^n<|eV)d7d_;I!I0kk!hAp;L zqpcg(a5|^|woKsn6`~(b5c@MC3nA)ul|&t7^|MI4iZC{YUNAp8od8mneSL)|oim zBG11H&wsmYUrzM#m8Hh`&D|4(FS)6hmmKrCHl2Sb0%W3O4kdZ(#-RO+R`sFv;>eUx zr%=l<_R`D(hAq?V*(vALIz0%40wf>~7tSD*Eu>HSu@%*Q!!8>RkKQQLD=jr1;2cnx3xN^ z{ydC*Iwlc*yc{}Ekw!bhr zeFrzwI!1>+qv>OWER3f;1l17LMC@iOz{#cSEh#~~%552#zK%bfWJbGv zUw|w~&$39D&&JidUw9M4>rDCMY0?_&hFd5ObXQ$m$x^y;ZT?=#(gxb{VHwJ0^iUkl z{t zn;&M1_($qadz&!Y?f=07q&Szr%E2TJ+5hSG7{OYm$H7=f8}NxI#=Rz zc*Zu$c^A%)$tVMn-g1KxK@yPe4CC%DOji?GSv3JC+q|)d!6$uzO>63^amD7D;*rt{ zvM2Qtl$svd#L1wGz&f1PI3K7YWnC9TS4lA8!nUGH(bQUgpLBW`pl1vn2;9jRyZwQDajp)}d1A*f%ossR7FLx%TGl z4SM4B)uj}2`--Uq!!tb3so{e$4enmNosI3g2h=)|ed-3jX1m?u2ie-kfipyT+ucX_ODEi+ z26COb!EtOPXyYgpCqA%9jZ7r9w-bkq_KCk#yeq|=>RPpI;NbU%ho(4NnltbNUwV3J zmuKiKK9Fv^%E^=)GKIgf_dUkXy1H8Lu*%l_US)eik74*lMT(G7q+DTI0*Q?faX3#D zGFFCwe81H<1*2Ry6Y99u`3HsCNlDyDs$}S8xs8*CgkrSgh|W?RGSgzYhY8-8hH6Ww z>&r{EIaI^-ijJXffZLQWE2U5Vua^b!9WT@?nbH+}{`jmv-5e}S?_w;Xs^0o4ftg)+ zJM$T@;nc{(rIN%$N+rl~@2q0d4AGsV(+wH*^S%Ljl=Fh3@_Ul3%15=qDkMMIu_9V1 zH6uxci)(Iaf`eeA4PFf=xluJ|&7zZL+#;{G7N+uN+E|a2lU!XA^3zrRjG9FxGZk_5 zpGHWlq38EI@sYb3Lbhcctq(37d;A7=F!9v1Z78EfRf<8l*^T0Um8p}kxaxR-APTB+ z*wba?LSv}XGf}T7za#^f4(M_uGVP{vng@F=dQ_c6X(#rkmNCqq{`W3s2i6bf0*$ zqD~?Lu8BS@{Yk9q!+D8dvgL4HaM_lSDzWE1!JhKgL|3b`G(fjC(sLe*HQ_6wZr|OlRVtNFQ3X3`$s_8e&x1o%dnXP`I3C0YX}{Ux^t+cUD&nK zY7w7KbY^A;BZj%OeOHOoDIf8pu9 z{RQWErycnZAzSlzRCnz8KO|*Aw-u&SrFVcvi?#r?Xutc~YeOd0`5M9L)y=D0j2B>1 z?^r)HIy$<#y6xp$>@Or;f~nY^gzE+C5_wq2vLOQr1Va%%N^$XUdgi9ynR_$I_|&l& zc&@=b@A9Lud|5(MVw}xC4)BA#IP4e#ej<<;Tuo8PHovw^5Yk7*{9N%&EY@RH*HrbM z_H7-72ED}(?siXZkNEdxO@H-9j4T8%UNZ$~XzeTnaB4ImV{iTO6IO4FnLQMlA%4xa z;a(#%Z%$$*n;u6_r&r6cZP6L~D>OH=)7Y*yFPssh? zwcBkMmEFz!0#|^WC1=7xCKLSK=W#SmT)V_?Lha_AAxvGRyh?xcw+_u(AP=*m*)d@$B5J;SQIhVLA8MN%K zBu>5iyDB{}bYz`~0-hc`Hy9YB!@q8Ey3$3qgmZ_ucUjpY z!SBL#01qIe#&dOst2_C@sOkx{D1cG}jk zWW6Y=Y$@ab?DiT-Er$pq^%;jK>s~}CW=r)6dh~8BQBpUzbo;Nfqj5Syirj8EGSsL9 z=FGUz@u076x}njSom|@_&9W(Wlki6dYr|i1(4=NHbp*@CPcCsr#?7QYyW2+1$eCT( zLQoI+v`UQF?M`q(nc&sFM<-(4noU+m5(nLD{v_U`<9HF9%&+8QCaBFfOml^@S{at8 zh$LNMwmuzUo^@f9H4v#QfPjLA?hxS)@Be?ajtWBT8uoxgRYCR7np{Ebo8I%(z z%lK7%B1jW9YiFclWvF$bF!s&yygh>CnyE(ELRbd@D-^0Coqt4vTYDU z=|n`S3X*CCC%D|L?`gM)Y@v)08azk#6Ie9C)hYJ zm7mVU-|>0rfKCtKmOCPg)7ny4imA13LH@JGAG;7rUhw=9Qh-!T9^DUoO&3|954&$U z@>eE(FC(<98Kt^}YgFw)N8L(Pn<8kGh9k8aEU~y7VtgWGKz%>K3C#$LYgv}W}sO}4kVj!ef`^O)eaM| zMIA>@vU{X=THyKYQDLi-2!sf@vY2=bA#MO9I+W@f=8QfnuXB*gpp^!Cd>6_Gs|KoI zftg+$i2eC_5&chTF!r#Q&a<>@!k_6GiV8STUR1j{CJx_jq{8V{vht35Ca5HmhaU;- zg}%eQl}*{_fSEiI0u-L8h^MWdU-=Sasz?%$`!RXxPK<6nYiQmaDFmb^HTq%d5P zve}%@#a)mmMQtWY2Zj;@hOAy-9q}3pH2nxZWfn&ETd4yCp>Fk_pSV$&o=FL$3asyQ zs{1V@-*F$02F$h~&!AhQS(tVawqaBu5vj^%(~&#Xu|fb(3^luNb@-$q z#bi7}Rc><&tubqPI5PPYBekyhW?_WP>1rrwz2X7YN@N?ZD7bw**AP8bs6uZH#G=wg z;vJZAtP7t#EOps~C5g)}(caCafI^`eZB!(6`QRahGUkCTDfwzN190Hp0?rli^&r5~pVlcRWVQhsI(*5J4S({WY`dibyU=&2L}K}o4&x;8%U>yQ zYfhPvihc{8e*=yncRq3{@UAAE778bQB;)qE8fHvM=ywo@DKAHH&7zcENars(MXs(> z+!0qlPyayFdwaDG^a#7_=RpMu(e#Et4X)H*ia)8SmUw=2@eE@3oFMW<2)nV8cqh)9 z@Q&NVsS|n=DLLPtVo1utc7lE3YNCdJE|>6t@uTqvv>{j@+7Q7GiSTpX8TChBp43|j zq}e{PMAr9laswl?TxGY*6r3fs#wT;x01k4trsLksHZRQXpFDX(h(5jc-+T`fHfM?6 zi=0k#5zlMc3+kFr6763PS97wW&-1L?P6N5hJY?Ys0A6G9G*(&Z45(J{h_c082Xe@H zs)2Q1GdymG)zPIk5?-mW)Pq!K?BTT7Qc6uA1#CwR?*0#3-xwTOw}v~D%p?;}Y}=gJ z?pPDswmGqF+qRR5ZQHif>EPx&=R3FVk6YE%KXz5u-o4h^?|#>_A6#%s@~d(K0JYz= zSI3uQ%f*3ZxH?!EG-FIYaXgV8jl!+g+*`QtL~7`=f17T`-`j~c)()p;zLF<4BLD^A zYpiX)*m3g95X##qN!Qi&FukHJ`76MBRuae#AvXMYjkVr$wq>SCDhVb}*>Ye9c?B_T z!x+Tin6cLIgu=Kp*dC~536sZSJfwuHWXHu-(Vk>k>xu6rNxf0Wy98pO=hPf&IahJ? z)C*%y$0nq_u1?60lrmnFYwOBf4G6QD3oxQmc&Se-JwU#Z;}tr#leHK=VOc7KTH{8j z@|19e=WEP$x~qeKo{+N!JnZ1@q<%OWGB%WCAjo#^iaWaTi^T1l%}=oGgYv&@?A)t-Z0NL`DH$jd(QID_~HMMuyp3nQmKC@!!gcFKLh;qRCm3Id@C*Za_|2 zQXnU6p58T18-mtYYcV4Undsg5DS)n6I#>zxsINP+7jep+>DAmPE*?dG|0r@P+);*0 zjIOg9sS&!hL0xfiI(o3BvV)J%pQ%3(cea088%6E%p$}}fqByJItESXSHL}HZ*IHy8 z+Tw`x;b*~eB!Sr4w&%x5T1*mGTeLAoX=yzFjI}Vi?GA;cSZRdQHskxg(8Yr(_sAXJ zCbd0fLD)}9`V)A}U2M~VK`>PzOEJ9W#$wfHGTXJqB*Pa$#K@6V^H*+H8>M!izg~|= z?sYSn&_|Pmoh<0D!x*q_i;4yWpIAwTm0aTSWOo?@|M^YUQA7h%%pG0~R$TXih$a*85W7ex5F2qRU zG9mkF!@(NPm-+n^sRmY+`#X*4{iB9j|3IyuAACO&Bj(PF$CY&zc;d%lZ=$MT!tS|R zK4Yc?glS%Gp=IBC60$A};J%#u(aS4j6i9a9mT^_M=t9VSj5Q0O156k_HW(!O(c9ho z&&!@*o&DENIo2iz;Yx*E_gI>=S~9Gdc&N6+sZ!=Kzl=HB92!46y9myIh?jd!I;{)N<$olFReZ?3Pn zK_*kOADHc8f16a%+j+5}$YR}0=x8UmW^JX|(xVcK{1|$1@OU0v+C4yY%w1aHt!GZO zU(+?d6pyVgvMavze55rU#jY+MQGO?}#Gc%nYBge?>;49xG^v#FQ!2f2>?9WS)+V+j zkgwKt=2}F_9M<8bdav=z8kRn~z7_LO8P9RqBG`t|1Le;r>Wi_+9&_{irJn6%qM9_A zjAr=>Cb2rYSXo5%mxAD&rHp1l+jP?fz`+Vz>`j5sbscSJN*CNmf4}-S{^b(IOmoov+U#Qg^ zsna<3WYkictTLfODukkoWNa7>L`7yUy{=omzdvbr-Y118)RZy@zm=yUH<$1fYSPp= zQVyyZu-Orl{c;bqz)m{P1eAvFm0nndC)Oi4L00TbWJs;=NoMeR`9d;sP6ye6#wlNI zI$WShr)z-*NNKi<4QC>vHRAg%*3W}R@Rj%X0?SO0G z8RLY#JQ$Arg9Ll5c-u%yhH7o4)@FnE`%*gJj?jaqf`?-5k?WCHJl#5fT=pZ93-hEc z`D>!Rp4EluND=Xh{nB4YoJM4$RnM`yi@6dLDAiy4Lwv~$MNVcJurj;iD#n)~G|RN0G-{n$?|@FxL-7JP}hz zL&I2ZHMKRQvWZ!*jzkPjKlvv+WW^uhfCtm&>I3w-2AFI#6P6=uwxp}(s3Y;Lye9Ha zR33{>1iQsWDE7`p;^hM*E~~>;p$B!E=5z*R&oav+OpEqvKHf? z!@@=!6ZRYCj!-$`jh!(~@#u`u)zU&>uk~r#==c9_@l32W8yES;=-et};>iNbf8mp} zKhn(LMj=qsWwu8yOm^9 zOm^v$`l=gdRDAMWu_AiXH*~|}9imWPw5QFxaeGx3=lX6(&mjhFQXV5Q zBaP4Lz#B!gElUA1YS-iSN8v|`bQRfy9mx6Xz!Of-)3Ue3D=QKteJ5WN3ae@ZUiegz zR{~E*G*WoiK;dxslU?yxeH#T{`J;koh|TF$o;lk&#CYyvVa8L$5-?>BV)^Sx$A8l9IFR-^Mx2N$WNR%W`_Qh zDuT+9aBJ2ofx|vz_UtgeJ@=^`l0~a*kHdq5^bu$9=~-T)`aHalQ;SPnM|AX+UG_au zU*k#caT&@J!Lo4p*_oXMSKJqMjnd(q-N>bq&?kA8{w~`oov~jIpK*GSIZ=R5v2BlC z50w-FXW;4DI&2CzATwASn@Nd|s?Jp7kD}IX)dnEd;B)L-GKu2X^z}+r*wp506N+&I zU2F#0i)d{cehd73XD$c{31PeZ0tA5+o2@r8Ld5=MTzt}j{=B}vmZ;Y1pMtpRQ%L~5 zQ&Z8m2V>oLZl9@);&VmP9FE67KTU>Q%e|PrArFGrE`_p%e)2jU9v%)in%liD(k3J& zQA9+_mnh|kYrWI1G5_{wzW0P*x-$!l2Kc1 zaU^5zoHr?xW~5uX&CT`gUeCA}31xZA)0DUA;KSYx(3C`yx?ow{F_B!MO!5nxdj$dE z#SU)iExW{n=jepBWN5OLZAL5RQ2ATy+YoL8r^R>|7T~=~9j_T*QndgU&1s~&l5qwX z0;%FD??MHbE)*f^JMw`hUobCIC9^DTpNB}+C?;p?NV$tHeK^x3NphlDgUgxD4R6uH zB}DTMDlX$oU8LLFIFS*;9)EYx1@MveJ_-?cXwcb z99-G%#m##cA{6rKr+yTSV%;|524<45+ummf5vieS`7zZmd5EetWn;Ej1w;ZVq@1fKUT={U`Ln{eQKHIWu0p6#krrllAhBpBsD+bnkEVo-i7(+4!uN{PXcxocEKxbXr? zmX?J05+C2+so91t`RS8sGnV7HxaqC+swx30!b_NF{n>aS zLm~+{$e=h#g96^tp%4~h9528^8ApcgqC=J)L-xT$7!`+g&%-6D6$MS+MfdYD_V>ki zU1*3%3c#F=qzJvT8WEr}nG!tGuL#C+7?{N*TxINqk%lQvv$#n(01y>5R!iT^)>!Z^ zI|b}Dk=lmINw_1uP)(M>ce6$EB}2)9>k?U+a{9mN=;LUJQLTe4@c8g1HpDy)ne4}f zdePqmGGyT_aYz_GZM#*J1KwEDkJtpK$@*hF_kP4K87M&hfStDq%Ia=|sXWf#3aH zG)1F2B*!a{j#)+uG&jYEMJ#AMGjuY^%USPMSyS)xX+SEhQMuv7C!633yYaEaSBOo9 z)KK3?^^8W!z2=f7ldL3z5`M(_>Bl%R+3&*%zITL-4nYzFCbe;caSmo`X1Loio{Tjn zl(U#A=s&CJZ~H?d;m;L-IlIl*_U+fon3P z>6|j`oZ5#~ICT>Oz%urZPr|BQ!F5+#8Ej|Rw^h%GeD3>vTO*ND=Se3IuH_UWZu3|m z&N~?!AA28*`Rr!b)R_EKVVDh>(tC1=?Y4-(x>Lk=P)Xgo%$Ny1rs7fv{AZDwZQyFx z;F4Fc?eYjI8E_JdaS3wA_abA37(1Gkmev$}{Y?7RC3NEunD0Q}Hl&zrD_}>7;_JX- z5?dL@tb#;t3KLGtp5PK0XDP>EcfGmnSABlz?HpHJ$Vt!-vd4+L;RVkQGC4sxf!kr3 zGY9F9P*mF>+ZbEH{+Z^Fb8ITHB-s~V*=CJ%ShSL~FZa{PhF?a15-t=MEoT-&^yd2Y zF=?YRgzx8S5r3_7r%yK1orz)l3DF_v=e^Eb^h-~iuz%axi?!sc`&DKj6MPfRT=v1` z3)OwJ4E&%z_y;ukB)Roqr0Ee9!+j~t_PyREOMO3wHH;}-GO_B{Is$&>%6N(BKbdR} zSeJBfycqM^KgXrls4ZSoF8`Vc9qKsbMJ)UBAtSK(>4m)ti!8KuW+LE|6t>A)2hk8V zU(xw2mH+*ocP`^d9z+;`;VD54BaiO-;$)GPTotT%=KJgjf4EX3|BdHMXvF9Qf5!0e z2o}k}7$U~LM|sPP6>yC7;2xlDZ;ppjK+V^AJ8d2-Pel391W1=QZdo@X$O@sHXz>`| zMm7+yPIO@%8|Hr-#gaDP=!#Gau9zVNm_1}>vgYdqVLrs(TXBc8!{PPGf#wSdEKtOu zfe&@!A6Mh6Nxj(3S3?F4&Y$HdiFViFj0moecy^Dpi@5n`A5HBxtPeY+ zdn=vHNG=R^@k?qbF5lqmMGaX_6UAQt#|3!0irrO%peFBYMm)$_>mNexFG~;^3=Tyg zCY||i;Q#Qp%l`uIA06tG)1PNKgVXH>T(lb0#dlka_x@vWj89kIWR!u4 zhWB1l%szV2PQ{T&ZA}ig2eQS{X z)VH1|Ge~P5LXcL@Rj|rW~NacZz61VAQU@)!jre|I(74AlFpO3f? zEMp%;Ge;p~O+llNdHKY1AMe$HMHY}JqrzjOK? z;=>sSs@`0gahX!F{#A(g8@gHM_qWO3wgTO-bJ<5m8GwFTywQvPudWg5VZq@X{oj#& zu1zl2U8U+|+L7?F_U2rREV$A#c3C#Wv^>K*N zK$_L0Lo{DxpB~d?_An<~eqRVJsUJ|}QF)0*F`rpXBfY41c_IL3CJ2t5hbF9~$R3i~ zh%5q6w9gF^8~Ww_(^IkaMjHoW^e24w=kWz>H8RBT`H?XJxo_0(dg~II(j;ZU8Y+s} zr%=(Fu~zLLfyD6VhA9k{ny7fgbWA3W1d5N3Z#0?yWAcc{^_oCBo%LWkS7^ROey{Q| zu^yOk2eM4w@cO>vpl56xq*;zOWV0do8HnDyj1egD9a`j6a`eIGan$`P+U@2eneFUT z>cCr(yp)5!LzHtR-soWS>HneC0}Y%&fg`=QZh{?;=PoF0Ry14vj_5@6BgrZNna#9kGJF z-ETHuClqC@yFx_#AyArM1F{@}N_$95_l%$b9QQ0MAG4C#2ArsD!MkamgY{e(6xr8i zoquDOt#`cbMY%}q zOzuq8pWXEg@&^0q=o2|7b`0AnvtrBBbj##&6*|R@l`VBPm$j+yIp+8x>Kgh%qLo^V3WK;kP<3Pkgkj zsT#y??1}G12@biv2+6}0{k%|iVkw&rWJAOcYZc>n(jR?u<;)tr4#yB}JuUp`*Vqr5 zFiiKV3X}bXt^FAgfj2eHe^m>6muF);3eRSZUL_`jjPsj5q^! zEs?RTtOPjo2BkTm0hz6P+LlokPv0UWpFv#aPF>PhMb>7-bywP=L(xDJkF{vg6VX7l z3r4y#^BkR5Y;jezGNH95q(BAauv+q{am8*d~LpJR!Ylbr|zh>@DlszjLg$5P;Bc4#n!^Lmfss6NOz`+wH-ab z%_ma1T`;gU6WbKhy*hGoUwEaR+-7)&F%Dc%vR@O}rS_9z2Jd#;ldBl)yF`coBTx{u z*!r9(wQbcBvbSU%LRK!PuYbUhB_*XIA{grJ3gr@6Bh)tcEBv09=WeJH|na z^wQRl4iOPjwe*Z>-3s@sL5~o8&#SYZR{iBJ5qZSAGQTs{tjNg5m=fH)ZIPtMj)qod zF&abRHalH@##hlaE}1A!jKsrZ3&zAky*scsO1kY?8u_jELoeky9W#H>3*HxDi(cUg z16TDrZ1>SFJAV-RFE>6dPB%N7-EwH?<;i4Lsy7~8X)d1KrY42{TR~oh?A@IM z#EC-hucZ74JiF}OU3apHhi;$kP+B`2On8_9GGwoZ+2iWKKc~2-d+&8h3a*bdArA3W z+p*Owja;(2eT^9Qr*$dzJ0wRF)QX+YB5_sny1iP zuso%7q{v6_o zeZb}&-mVFgkzBv7y1S!{5B{vo_?w9woO7Z4sn3bW@QS|R(Hr@IsL}TM@>?r0l`UU! zj9?d|54wP_DQeh4?Aifquv83@r2o6UX9MloOXT2`33vOIDALm_&DVZ^h_fnT^Z3!@R~jFT%oDm^%*CI#CQyL%|W12^Fwq_GM54Iy?i0VHU%XrNtLpvE&sGAfz zvF}2$V-^t<+mA%fu={}c7?LLu=Gmcf!)@YJ*jG%>e1ea^rj!r~S7~Dh+#n)6a##Z{s6zA4|OObQF=5+p8^zEP@1$+`@SKti_krX|| zD(X8`ivwEYA>oaw638YK&Oo#u4(vT$b2cCFBqO7UDi+8}<`&S+XJQVrzuZQ>1bDHD z=aYbDGu^0{N3wQ|-$VHRX1LWlg501Z?=xvDRM(!V%$J+WBV|b@V!5p>P(ih&s z4DMwINrP*ELq^L8I>T%_?)p_sJ&m`zWR7w*PJL!^^@`}219}>9Os##bPF$}CIjx3< z;r02H5`~PcGR`R-O~GMEaHsnAh6pA)N@DYR&e^o0Z|d;kYDs#l92J?iwQ{*Eb-gQ_ zu#{SARO{)CCP&7DO0}Mi(S^iSi-RQ*R@d+LKgYZ+#Ga>^m$M$>Nk7v6!p&hC@Be3WGn4~w^UWi;{ z_S0dm?4FuO)+$6gb%{pcYeteoBG~a&_L-=W{kuWGmW?xtoh4VoIG~OQ4J0};0B(=` zh_`>Jy?OoXrpT_Z)=Xt={1L?ZurtPg5{K_#qF8`4wZ@n7`7-3(D+pYM@|)$cNau_^ z?AjxYbOw_T9e=6~0VMG*RCi~vz^nK~P<>09cU{aV*b>pN6J8H4nDq6dp;V6%)0?_z zLa(TT2BQI{xJqtsB*%bnLHcTS@J?YIdUa?Yc;&MOtH!Iw=KHAsW^@gG-ypk>qyz(ztKee;xP6@3wRU!+92D4hpmMw%6D{luPNyH(VTk!S-z z9HBD@|+duG7Nh{Qp_YYf_Id{anM!?F+V_8Umt6 zPiZ}s-%R>i9;!UEfTVj&FUL!dFpv zS;nN&eonUsvF$SF@+2wamt>JZp1)6RdJX;l{r(l_Bg2EuTxDjWYR&`wVv5 zbLuYRYrY+AzDKJS+f&}(WrW)LcXS-tYJFH&U7bAMwO#xB1qnFIy zZ^+0D+@8SmaOJAw>5~Wy*l22%2F?Zy1}uC}WA*6L;>({{ST7VCkx_<{*ty9FQyNkoEe`Vxz(9`XH&;!@+Zgt2JiSnn@PBI<$8~ zQjFssDi$8#zA$y(XY(v76pBm~9O8W?9Bsn+P!-GugP@x+m#n7Q1K$3(J50MpTlN9Lb(l>(E zTAu1@Kk=4l399qCG73{E4Or-#a=c{0>Q1B;Lc&fI58P z<5-lUt(_HB`4x*LW0x9Nt^b7$Te%~y&`4XWvmXw7lEGPHYzvr}l=yg0<<*cjV z>l)qFROghhL^Dyd?xB9-Q(|wWoJDwPkO(fkRC5g&)_iZ$g;N%JLL$E#Q#uxc6{uZy zz1V+L!@*Fl*6r^(w}N&VpAgD$*`+G?#7b)2DOfoz&W9`u2S>Y9&6Je1NTI7A^GAo% z%+^+Z^&3Rc0~9@Mtq9z<-Z}9-=;GwE*u`LCVMjDB|!0_zS4z@pOU)xJx_iQu5 z()rA(zo#o$zT+!xpz}{4lb5*JQXdR84a{LwTUoi)3v=rjPr%m6H>Tp~8|tJ%Q|J(&+8 zE*Y({g4Y%II<%SG2m?eJ=Au@yf_I;FM|4(>^0tgbv!j5MspQI^Y&$#gW&}%(UCf#< zDn|__Hcn2#J3x?V83;djG1S2IN)=FgnK%>Iw|9bRkW+g1J<%Dcc=v2)eyJgp`rgPO zk7t=r{b%nwyR?ozCM_8JdX)iL1z}UO{M0%BTvvBt=Up#`%y!X5#*a`kM$*)jm@i4N*b{@ES94vW ztC5_22Uav&o*6O6!g0t%?9FEzDQfK(T{t*c8enC9D@n>Mg#h=}AS7tda6A$CadFq(%#xN{t9z#gtLSp%vz1 zDwdula~|aG&GEuvVFe_EA8_3to@2eK3HcHy;T3A9Lz&OQd4AD$bg!gR^bPk${Mmk% zSeyQ4Ty%EygsA}@DnEX-39fb|!+No5>dH?QIRbA;D68wV!k8-|Ttw?#zjz%1?Pxhe z_gicyLAa#-F@Sca4Rq1CJ!J8j;y4V4;5$ff6Tx84hPOHYP{|Lmha=CO)Wv#g9tlfB z$K;a?_hRyiJ4z0mK9gaFq(O~OOZ*PpczAB@4#tOlQExui$m4$3UfGY45N`*-31>i( z40PMB5hY?*=2kNVvdy=7rMS54y^_%$jvLvNQKekVe&r;qPEpHQFTtqH!z2O8&`BXv zELMN*?AO6qjam}YjibjC{|s1=$&gHwLEf^SjZ?70QJ(f_UBss$wXk+BMHmU-qx4># zr^6%bEG{eHFL4QKnQp1qbt=H^oJw6^tre^d5||+PyXF^%=r~ZRrd+nxEx#7-J(l@u zamO~;PWbRlWCatqcDf{o>UeZxWs@oXbfqo)+(N|WGmpCfJ;R(G5MBE>+h9d9(K3+J)y(^$4ia|P{@ z!970r<0M-4yH+UHvMB9=QhwUAO4UehtOow+ZBw&e9LemtIdv1Bbpd!lHfM!ePqEzNs(qvwos`vUlWTvy5;$w z+TeeW)p=r3K?0~|HcCkMYj-2tE)c~#I!^rYetaAeDieIES7$wO3;L5Lc%1s>6`UTa z6f8(C>YX*n`8SNNg`V_#nN|w=Qk0Pfnfwq_<#@of!RxOkxWv0P-4i)K(R^8Xhezc@ z78g%ugm7wSmej7OD{hzBd{15+?)LhPJFZ1(4@jET58ZsSkagqOX*U*+5p?7L8tr z9u*Hc-eD=|?4M$RZ|^4*9A!tl_PGeL%aYYuQwg zhbHVj`L-rTX?24Z^yAgI?%FJC|J0c21~h!1OA%ci{4VHM6Q}>CZ{66KaJz-5a?zhd7t`kFB|HuuZi>W>E&W}E z2d)M@p~g=@6mMxTM>^t;i%19?b8?9KN9MCT%lBST9fKrDJD}oBsxGZjMtr zbMihq-4=IM&VjNxez4wxTY&LpAl8#-EvC&BmJR$;mw_bS{$53p6>DUvU^X9wZ+_~f zKpZazM|n(azz`?3UY%|4d@dRX`)3DhC;!FPZs5BTOA0DASn^a{%s?5m#0mn1>IKTu zNAsU!Y(|Vc|IimR5<->DyloI^b_Ev^TE0N^&LEzkH(YOpvh;YPT7p(C@pp-HLf2c- z-F!uP1OL{iH>BOqM@x?g_%T3}H!2;FNwN9l1?_Vn`I=BFZ0zPBAcD^jd7Ng3N z#PKuyZB#^FZdz3>pF}hUCl&U1Ciq)?T!yvAz_&Z)c*?tX+N#HFnen^M>rd0?O?FZ$ zs?$B*hVC$a(Q3Qg7RZwx>UPdfG*OC=J9fH7zbgd%{Y=LQ`3)3Lr|j7co1h;ohDL-&(_82N#XJ^oMO3Q+O3W zIUL)}cHCkuY4-Zt%e|?)#{BiI$kw%IgWgM?lD}2Ij9i)sZacFAf-IO;Y}9E$ZvugN z4nJNz$<>(O({appf66V}xrOc;A(d?WjKgzV6peHiIJi3?zJ|3}i z&)xtMa$jlDI(_o(O~e#vZ5%-US?gWz^xFBmI}appmJyCim?HB0<}LeqUaiMPg?BBOdQ6u|OGVUrGRvI)%(Z zh=sD@|CDR|W0!gjJxxKT}|#(>8@q@-TCRdCGdsXfBlnE5nOeA_eMp zxBFVmIWCDl?p~*pGf<&SD`lfCs&m7G17l5kUbJ)K$-(2y)bvD{kBExQLWU(5adIlF z@~#My7FalwZ&*D)n{tHx^5mTG+C>X62)6Y!VQ}{+eav5yd)3`cp(j~c1xfnY)q+&|J9#jtyc4IvFscxJeuqm>NYLL5 zW=Nd+R<2Ji`GG}dWT%Sx?BGy}lQz!{v+FRBchq6XE?Rh9cpkS^f zPAd8|W9|<7mhm|_`$*~%(LV@Ye$f>dvpX6|OCbROsfDVKu(V`+6oH?E$Gt7xy{r~j zupXm*2S{#LoX5Mwqm+ENyO_S*knxij=xoar7g7v^yC<@~vsRGfmj|p~!#rhp$lKe3 z)8JH0qm@K~*s}F}jG$bi3XGY`PfiMZ<~=-GSA953FNzc@VwrzR@7G06zfgzy$e}W8 zIIt)KYf9ANnYG$d*<=&Ve4U=m5^&dG9Wue%^joA-Buixq=N|2zA^f_zA}^M8C^p6* z&-|iHtQz%0aISfWH~Z)-$&?#Nvx6AGsbeA$!6gBQoawBGHPQJSUKWHf#*$@9o3 zNs-)(;?&-e{oG-V$)jDL>#}JP)U1JwqAk-mH}kEMQ=_>(nO?vVPsQgO=mFf_V^E|< z#)G{m3PY&w&3*`-LoeqrHu4|shKTNryxl4BIk>{|S-=Wb#qFzN7jp(qo1;#r<}-w0pdH5M)kiB9ijnue8rj6p{luyRAtJyC;ovgHGj0Ws)J1)2w|UH? zdBL2uyz5W#@$|Jr*-N^sL?i>hDT5^6@0J2D9yrl0_R-xKMU5P-&t{G?S%n4rzGd&c z58r?YgXu1BTaT-F2z7AkC+aPmqGy@wum z8O+#2{Gu#cnoix-IRHbg4;y$ipr^hbQGw6jysiNYSkpurzxb5VzJeX?qM2?;8fcW# z94@964-xdS^KA}2g=R4MO&1g;sQbjIi@_^cd1JGD4F*Vz1Mur5r!f3A%3E$9^l11% zxYd1PrKm0G3N}MOU1%XhX7n!?5|PH770>n=JJ_U#PTs-jCb+(0p2>6#XVQ0*7Y;1B z&Nq489gXvef1;ge|7jrx_`$)zOxIg((oLiPow|_ZPv*;2pbqT$#l>6i!{u_V|0m6) z*5OD7i`DX3bFIvLa#JmK&QtIQ0~?#z!-LUQAdIP%<#bAF?#u~)JlZzFTcqA^5wGj3 zzG^pHufL{Jpe0>bPk7$vuAhS3ifv~Rc@#MdMQ`cvSohbQ1r!L{6Nx)*_uE*8^Jk1? z%f{B$bf#nB{`x1R%d?4lFV_@m~ zyQ}lM+pD9W`xC*I&%S?gIBHSRVvCCDsELm@8L-ijf|`oaH~z-1dh!{ES6UBtYtlqp zrsK|m0u!P9xRADH`V;NNBa=KGZwb38WaDc0DnRnkhZ0IxCh3mEEJ~V8uwIVVk?H4A zlX<1|#@&}RGqIq-m7LYxR+uE?tydsIm?%=oL}Fi2*V|Kq&i0jL$Ng}etQe#?I8_&A zE&r1qN$MuA}^^Vin(xn$bV`cki!`3+_94>OG>h4QpxnU$I*{cL(j6%of*ECx z>XMLAC4qrE+gm&HNNEGg=M1}}QD`bQuCZD&5WAEAvfe=c^9E~V(@@AQH@<$qv z)g*b^=VJTdXbCy_rL3NSt9u_##^QU{PNoxGZBg$>jz3;fwZkJZp!{zU9ZqbhzN?n{XX(9S@26tQgh^T$S3D<8 z{({ysF%sKZ#lHh+s6^p&$`r+CunZh?_+aL1sPt|OLI)#t!Q#(jz~k?Z_mvW$p_vsAxZF#s=SKs4-=2>1)(H|3zSF8hsD_Bm9A%TCdE=nYLwWD3|ml{EQ-w^*KjIHRd9o4qE_U~vRx7AB?{(hu`ri28j3#%(_g_`z+ z*$M>?dPP^UgiuH3EYf;~W>J`%=YZnjnC{hb^~S7PV%Y+99QyfIZEJP?mhaxQNZi=~ zrTIb(P4ts`IzmgKXA)Tcq-*TG+f$?_*?LQreD*8DATOCXbEzm@(WGU#`PYT%hsvtS zhy~Z)@vm5ilhxRcS}wZZ-G>QZ`NtwVVV0eNHw_rSZh!LKSBU{ksK+ZeC_XM#EMa_} zy@;E=tt7uT55Bste4%`Qt=8w2K-d)M*ug>W#4)|oP-?@}74F0f+2tf6E zL7H~vxZ|TpxFm|twfq$%?G?7C^kxyoeU`wqd9=Hjt=e*gM@6;Mj&^<5B-zhv;GDX1 z?s(dYQgl~lJ0J0oi_~-jvso^m_WFpHl9r;UvK$^Y_ZlLllI8xv-VJ+S7*YQ#3Bj1x zy4&E({kO>b{?;*dJn0xkVztFl-{vAMH*R7ECqAL@#qL_=sLjlQ1#zzs{Zu5A`>z3I1)-55<=n4c$X`kGP{6)ez}qsF?f>;jL;T4 zWUts=lu43XeessSj}H80y)9n^^NvwBezh5SyJMS*jQPmZNsjakV|g2zavxmjc6;Zk zl+gImi?v)FOqF;)(z4kS5QcJQsFyjQRfTBG%aDZfoSoXZiY(WO{8WS2^rF@ur_nw9 z1r^^$Prdc4jRwxn_D!+PwMDpx0ym{!NdM5mCQNiuO*0!1ALmUiIyUPr*iZc4oS$|q zINR*HL?v3#1Rg16WY)7aZHI}k(WYk{bfJpj7(UE3q{>)=bQm2Z(^IGW{WDlV3K1L5 z^iIC6jZND~lVK*QAhw#zw;f#EJw+?haO)DrwNnw&;>*2e7KkyvF<*_TfcsEZDJ$1< z09Hko-du@3N+`&eZL@oLaYe|rS`!M(voTo%5L!|~LK;-#V8|npU@y&0o_vDl1>olO{@U8UsCmG^+zuPZdlD5vL7z43hGm?WYrj5-ZGxt;)y3*EOG zw;eXv%Jy_@4`+rBzZ2>Tws5Zk1bxScASrIlXrlw)*R~=|YlKIDpeu&m-o`?oQNy%WHh^#XS{V!aXh zO8POMm(fk`1!hR|B~u5GjgKlF-+Yv$dZywW$GCVk5yJrqNQ=}{n2A|seFVGN56|9W z>M2^x=JErx&y^8TqyMT(r>A!!M8K)C;itIREHl~lgF?Ef3tX`7HRaceWbCsf1+dq{ z#W+2%IWnNg__K6s!-jkJ_IB8kdnqjFZ;v8m763;Q^z8L)4p%jl*FvcCk97%dSs{$H zQvb8HzWP*-;*_z^hBB=rcX&jX4*^z1nvzicYOig3qz=!sUAQaNfBQswvtv>8nh_qT zpUct_A0{l=433;6Ed1&n`ME&!%QlD|&03VeSwTxRGGuF}e#Cey_N|X_>vqE4;sH-` zqr!+6EA9IRL;jW2uU@Ivo{%xFOOuDjqn90?eI}>z>otxKR#vmsk*7&BR|oZ+;i;F_ z@>{iOkWwXC6~-6ggT94<3%cWT6-UZf9MBC*soVEC_rojZsyLg|8P5WJS}HPza5}IF zI6V#My` zb+`8>2#VHm526)M7Di}JhB>j2d%)&M{9#QFtU~|ff7|(SY0b^k$uv|C zoqm{)M-Q#_L=}8NTR`_V!e!=I-+6@VH7fofF7*Q~KamJa=kLa08ZwyS3vUnrmV^}{ z+)SA?m|9jGnLI5!b12np!E}P_g_Kogug^U_mL?-I#(nPBsIu4_ej!VPefx;P<4uzh zxGv~KDACWQFSf&fmm2HjI z9}r=bwgcbRnsl+?MU!bX6#pZ4kPZ}G zwBkGTs$l?wfyoL-S+08v<_uGns~^BW&PerjYAD4swRW`Ie($r6bz%1!Msp3Lj-a!A z(%d^Gq+l)gnU&wEtv$3imXj0q$k?EQejey(;-H$rsI#)*P3nN5**gn0*Jw_<#u3l< z{uaE_lZuu7cvdF(9s_$#Gys=W1t1lOqP|$=RaN0^HMXdh6i>w%AI)sv_hSq?WYGT9 zwldqfMjkgR(#Udqz`p<56pTfKe8^^jcE+K3N}*3!KnPS%X!+)sQv0@j{^+5b*-kfJ z9ZuUSZGc-INU~sqrca-XE+}TSUvdqdh3qm1FX9hdl4JolT;7~%hw$sK*m^}ZsM2T+ zrGtLHiEO)$8j$^`UeF|kVB9m^>2K@q#yNNE!ycF@+^$HQ=wo~n5w_`Xzt9hrnzB3J z>BB$dA({4xMNErh*Fzg{+UTp>B8pFcg$~T{gJ^xaZG+C&XA<>P0VGdxH2tuLl^9Y) zN_)n_erUBZkg``LEN5V?#rx8T#{0N-%S0Dnm?5Y*?9`LHZcyR)L-MxycI=6gPe@our2o=k05*m)*i^n$*XZ-!SpurGTEjKE zPL%hx;w&w$3ju8N7dL9o^iDmz5YQ50N(YR6&u1b(pvHN;dvk~*zK!Y5q=cD&lze+g ze8>`ouwwPB_XquD;+N^H#}y`6U+8*Oa;xc(sk-B6dvz;l2y@xBgVV7q+|{n`vPyNX z`%rEa5#?)E3tQyk&T#U&Q1$DcLp6b}r&htv9&=%{(E-{Md1Y#q!zfA7EECoQ@rG># zHq&VDT07OzDje(e=AYOO3?-U`W>)Fzq4>I6f(W|M(LY3QUj_JEnRTgGPv&AVAIU}f{)cemmt;0(`zx6Trm$irSjkF}QuXfIU zMzr`sLbJrhQc9u>e%7bgOH@*{MHJkYgky&ay?c`_O@!F(xhOguwhjY~6)|`&M4NeB z-M8>3#k5;)%gDp#+lshBMvhq~8yiigpMnmxJ1wrsxE2@dxd&DgqTOtj*Z6t>45f`Ct7p$`0Ir)P2_L-#|=5vO)d zNU-8UYapZ6sVOs_S9Vt6{+&8{C}?=9y`D&w+s33P>;7sK>z%?gTLeDr&f@vvNYc3BnY7HJPBqygOrx z&iY0x@y#iJH%y{r2)!@E^}^L=>tKrfdy9m+E$!687Rs1i|XyP7u*WUv~@1FcS(P2xbTj6#!tBi`Zs(eq5V)k{Q#;ITs- zG%moE+EUp>UnmFA8$G1Ud>LDo+-I4D_X7=7_WrguRmH^iZ|eMrTKUV zyDg!L!T%ex;HSK{7GV%+rfVD5<2LinnbV`sE9H8^fM~qY+0p@3RVo|a$Gch-xn(!s z@X&?T>Z{{*5wAyIspO%d;bB0N#_=eKb>G7<-O5QmndX#JSEs4I&+dJ8kXOH?z`juG zo5jDmqF!%`Y&WMcKdm!{TlvM=7+d}zHEhH?6d?=PgLFEqH1mKo96BcaZ z-2CA+(qU%(SM36V3PW;tKaQD=uCv7T3frc73jvfkShb~zRp_ogQR;&B?XlJ+#{)Wp z`q`CnHCo!lJZ)vJTcY0Xh92Yt>ctSPfdpU7`i;sgV=DRu^1%A=H`EDA^9Bp?GhA3) zjsJ~*X4ftv5r5cgP1ayazEDjXSQ>^boL*Zxmd2?B!Qdv~-2D;+gSDet2LJt`3GGm- zvl-a^?3q5`h`uujQz+}?la?*#5Ew8I%dm`!s4gGv@c>0I=~9osCudQOP5SmMD!KjxCQ4NLdg`|m6oRjX zONZW5or6eyhW+8DsP!$|t>xbJkAuj_>$Ydv`%@A~_g5MPJH{iae~&&E0oe+v%AVY3 z-6%x6c5uoy_v*U&EaOpX3dL_wm|r6#!*ZClk3SVVsKaDNYOabxh9S~nsR8(kr_wCD< z$tIf}rYY>dKNKLI7n7NZQ*S!Qt1$P2=>OtM73NSqj*8&6RYpfgr9C{BoVGwzF)`^4 z4UO$tCyk}`6}viO8#}o4`irQZuq6Lvil2ZGCU?1NRC{hyn}m-}n@veFj)W-EP*TsN zJSAWE;=VeIvM=vQ$AI~|oBDdft1+fT4}|>FA>l#mE5CdHo-&eInJRRS^-E(_6WRs4)%0@6k!1fPk3FaIjra9bdpX28x7 zgfyA3JF_;Byh)Ix{qF!jW)~N<^83FItXohoarq5`;}vc70-A;1Ns*`CnmH&s^A>eA z>d{Dy_M>C{63*)l*kU^ziQ(U**xBIpx5A6ZJYsWeNhTW@zqYU;(3pY_^?xHS%9F@Y z;9(dJ=LEiAVArw`j`uf6!3E`}+DSk#n zrG!{G-e%*tI-VePpUqUlO?qYafp6m}1h*F!r$!&!eFc+rX$zE@U=ye$Yq>pwK>OY5)QNMc#hldj`+%HTl$AsT&0(ld1d** zstbn-e}2``JgQkov3f^5NUB}$+G&Vi62%nHQ&pvo#Oo_&jeF^zp(9u${8iTgvw>3L z>)mF!%7j(t4=2`J*!9{Zxu_^;togS@>4FF>lCHv#C7J*_Gf)vpk)(%bQpyP00liIF zd2EY_E5T1Xx$U^OuFE57NM=rD!wE!vOWzeDz*?P1?gosM2iEYn7CFzqzeKyCUT{%L zS9gt;GJNHH!`Y8CEPAj)yl6}f%dEuzHN)_XZLaw00XfGaBz&=hCXf7m0Rqqvs+)@{ zw-+Xlz~nWs*CT?UH>P(HgUkP~7GT#2?3XPV1oPjrls_x*s)TM=HJ<{P2gg*17j&SX zFd%H&Nu>_?NHB9zl9Xg~4={-03SRCN*pN^`LhWuFdTU~OdQvUXRP9Y^Ko4c@2ps_^ za%k}YXC@=HDV^yoUS#UO9jk7q?-L_y@F&84&Ii3I3pE2+U#u zMW=TFCE|^vq2lm|4kEf~II>I3?da?3h}3&qcayJ+VX*tBvczSg>+NqpBK-3zT-aft zmXje>FHDfmc9|4LJ(4%BlHQ>;a__}zS(SQEIeqhFqbAV;@%%WFn>8xHyEJ$&QmX~KNPXPuoY7Rn z9euMSq_C}JLl3`B_)sSC^Y z2}7p~KODJoZny#{GZ+u`5T`h;svv%PGRbkO3g^;Yh<1wnvoyBF*q=XJwLtvY&4WGH z;+_8&;ow`yi_w2#{>%{E&G)_;Zv`>h^O`c#L5KCt+4{bcvWfT;sg06T)n!`D=%Qno zaNL8I*8nXwKfqu|Di@t$A~P6G2}3_+W$?_P&4fW^;$uF2=R&sn^sxpDr-8=krK?(= z4m1`7ZKVWE?SyKp(-y~ke4F@k3ec z?H8(-==(5+c(S6Q8cBwII5{?GZMEu*?|_=fCFIae?bHgRezY6}OlqQCtLGM@DLsaC z2|qO&k^*`HTj%{R=WA)T;a2`JD?u}QjoSusnQ)`!Vp= zY@NTi8&XF(7skTIxsz0&*lY0;6CiVObFzyPU-jiT7~M@AR}rlliPAE3AwNo7p{Bsg z++arvCj(s6$J^L5qK2_G?oI4AXnXbzy&Iv$b28sW!`6CjiM-yUTQ4_E!+TtLTbQ9| zCs6>RB{8?oKv}mLN%~7R86Iw6ji)JnTf@afOKD~36(?#Vg;B>7*ln4WdS z;T~2dObJL}?Tm@C7vWbg=FuA1^rp9ym)S@5NEt9jV53NU0)m*P7Bb0s91!jMaVd3m z6`ze(Xv6ca+Al%<(Wd8;dx?B#kMk7o$*rLrMm;n@y}Z+;IZC*x_f3zE*xJM;ZG(IE zC&D>2)T%Hkv9?)I;eR%4FO;Z=h|7!woXhHZo14jO(G+5)#YQ{8^)BOyzWR^mf$*gG z*N36$k6*uNkVwyY#uYp15Nvh`pK<$0UN5nm3#f6%)p|zT9Y~`4rcElNB18GS^!D;B z<(m}XTpET|QVXbzID$M>plpGZO^q5PTl}FLJJH6?zaOL$eM| zuzJYrxp0eB)5+060iV5vMuXr<1j6Q|G&-|4*@WCVh9p z!pUHDMnWQVkEpK}(~wC;bSa1wgZh!D_A0)7uO-mymmD{5_vO91+CO7uY%?(>wtJj|R1Y+U?RUo@_4I%J+K8<;tew+uQgd;nykR z@!2Kk}Wz`HfEXHHwlCEAH=mBjvH?#U|h(;xKFTP(} zscg?HDbYO7fEMrU3e1LzBYG4xAcCFk= z5>k7^Ho0qtm)lU5)mZ4l_@r3nB*Kl~f?xTWMOEHn{|28`YzMK}F;)82?hP*6tg38VGVGp;JCL(kY4bfa(kD6>;s zpGBCg*KqOoS_>p`aeK7fbNB>H@Vbg8o4Fu(jD)A}lp0g1&TL?I?=_#p8y8zRgI6#) zyJtAc$6x0JZKy3laa`ngZ#*i`+ccMT0m<}tc%s-Jsj$np`)SBNt0Lpx~QuMGJ( zXUmcLQDQ`dV~@d*R9D)H_N;Z^K>LNJ2)+U&!={Xdp_tp=6~6tGc>KN6ySU91IhU}& zPKC_D^KumE9R{3d95r5caaaAaoaP(kc=0pa! z*l*JWo*tfv7J>LZ_Pge$@CsgGdJ?X|g8L<)s!QhdRd5n#XRa=_3>3)b~GHx)w=+qnYB;dE)5^IBoxCe z?C|z<>#Ac_`)A4aKT)JZmKhS~DFmxXP6@yQ#fN3`Zw2Pb?{*~Jab>y&m{Bt@H$MM2 z{T7h%P#7d|n*Cs5xMmirG9A_l)&w%Ls5JtyUAU13ouinx5 zf@9%7CUtaemVVg0_3Z}3=p!&oU1)4lh<^8?&bwYF^HStw+Z~}G-+C>BT%7}>GKdO< zwzBJRu0tE`6^Nox#cfK*C}x%$C<;CNhWDRI*o#vr6~?$HhzR?>opH=te;@Z`rpr3|8m>(>!EDe-8%%0r{_L{AISDv+a+n=AU=&JYl67jf_p< z&tsayfIYtc9>O3o?6-7zT@a$39ZkShA+(B-M0FIJCIs6#yT^?( zus+$AvA-(khc_~E?|}7mU`nn<^a|Aidy-vV!VnvXrAKRuuMJVzooI+6jR`WGDV2ms zH!rWUAq|-5BuJ0+dXkV@;_&uW{VFgNElx^vKUJ5G)NpIhTR6iZxo?1up63-wJ3Uh2 zHKhJ*hHudps%>TI+pI@p>Xg>i<(9I#={3IdGreM|;GRQW%bmOLq%g3?U7G7&DXpj4 zO$2{+d1BT{143;+(zlP(aON!?S?_y>7~;dUdkxdmTmw+`5BwsL|J&*ut=ofY67tQe zFuv2L%G|t$1l#2EBsR-|Ker&aIno;nbdq8mNNvyhuG5=+z@HN>%+zwLl1oOg^v+6x zE-IE0@fDWoIR@Y|CzDp@vPsG1F42||WU3$b;|MKC1AZ1bK6@u-HrwrjX_;p(|9s&r z$sRdcvn35*>D=Q$t_EN0I-I|lu{0Nv6fcih5;Jc%d0SGKN(p!-PRu>nmZC7OuKtQO z!pV*#7aS>L+(?_av7-808>lJR;6f||kWTFtzQ4+jL+}k^x1TJfzDD64^B^Cc0uf7L2=c7__SRbiUi@?|*$Uo$ZHAkjbe}m%m#oej|xRys2|7 znnw0#J6x5pU)Z{aUI)Gh0>O=|WqrqTug9z^;79Zk$WGrQGLb^C^F-0`H4OHZTd&mI zGZ~$4vGE0C-J@d@;A@ywoy!}gQ)kB>zEKQDXE2NQj!hHoSN;LaUq92A?qBWuN_hY8 zIwyTvJR&d;agG)!t~5vYPcR>hOHHjj(LW*1k)Rc`nvjCF&yh&# za1_&Z^QFs}k&;0A0Nfz{UwjF*D0V{445gFpexP8KMwvb z^m|l<9=a$2i{T%gH z7H2_IZ~5^E$DV>6m<*yM6I|t$QD4$p%eFxzF_s}XR3$;@0{y8HBuQw8UqJVxya`xQ z4yM!`N)EiH`)&-U2qRE-x~@)Fgt&=3n0m9Fa_^VLRj* zcuLi_^|hh`m=vPj18E4#h}jP(XI#xaY+b}i5CcYjc;r(GD!Pz7oe|&rbCio4 zUJx7$rDtsTVFZ{{Gtv(o;F?K$3Yu?T$gwKm7F@wG!d+lh%-0Z?g(@F?%PHEc)l zQS^I7>ZeC14B2Wa9W=3*W92sWOg)RZu*j^V-FUfs^PIj&nKSCtCA<*`RJdd<`KGdS zooO@e=;D(HSrN;WmQDD9i0|$yoD5MtIo1`*@B^?ctn@LbDa?RqoPy~YftS9WlAtdf z-}SqknuoM%zF7!WY_9HzFJQRUACc2qFG1A8~2@oAFNGoI9e z+J9r(K}8TbyyGK3k_FKSz;?9UE0!Mr3iXwE^jfBX*Cmt{BkPkqPvO#@QapMRc1$!} zU%4M1aUrOxqr;QKh66ScJ|bq5-+j!?lg>4Itc>hx8)=IXN7Jo&5g}m5e*y7TGIMW9 z)V@RwJ6)uNw>(zG;Qdg#K~v(AIdJ1FKnhOfOl|n2I}S0|WOBfdmX6&S?Iu{v zYwqBFf&$&{)}%@7#u)YOg8b#RC&b-3#LKWgFH6=(r(#>aNII0AP&La!pvxAS zc(}KAbbTOEsa` zC}sYRX}jfoOH!O&3fsB5#7ZSRlcTb#-(%MBw_i?3vy;EpkNe5om(0~+kxdz_-e*b& z^nHy~kswP%0FqeMgRyNU+faRS;aB}v%>ru|MZ45jY_SvM^)CVI+6#l~rze`3*E>s+8^=<5#D-39ftig*}#b{WnV)iWu0LS3j%VDMNsM3%r4jl26B=HIo~{&WG;slEcx%FPXXzMo7%!7x6za4y z6+&V1!`nh;O|>h$tq+|sYjtGg{z!g{>4P~3M_qrzR}-n_nQUVoV{GWVE_q5~jHKjB zVapSn19?Oh<#eLPlb4}uD+oK3j>oV~R+TkjM?JQxv7Txk(53az`hC=acDel6z!YR* zRJ-;~I8n+{5FQhe{CZA%P^-4c%>|_+QygSW&v8^Wu;l?^-;q2lH-s}#krHXlC=|qF z7<#F*BaAg#3$rdgmCoovv0t9SMTSWO>eT%Eio5d*YRTXz|s$_uZPMBxlRwq z?AO=0!qBeBW^yFk%mbF3YibmC+wkLy2|q8Ma-9i4jKMiP{n<9$N0^qrbNUTK7sK0l zzcEeUo5+DxdtF$RFcby)5Qdl2DQPpf)8Lu0>gCN2WDmpjJ51`n5K6_$*cqLPH?Y3- zhKfj#0NaSRS7TLJHctxq2$Wx$e;BbspCM&w1XqlF7+7>5{62bXg`{({RXNNpx8Ofd z%|)A|56A_hyizq*#^Sq0#41Y%P|tm%-~(G#FLTb|D>M*o*A1v(}z) zx4!bT(X`sYw*gq|MJm!BgpDZu-$`$MYry6|N6}Z}bq(1R0VNGa+ldahqME&RW7(Cn zL@%`7KN_W4JAc9m^doL{Bz2AEIRc$c-*^~atU9e7VhUNUv)v)8DljHp! z2m?mem8vUc6Mi4Xb|CF56v<`j|ID594QapLMEwW_EQ*~!BJ>1UNUl55Vx>z}ZLoc%m-)f9DfARrPd!1&t)m*G zxWTDE5Q$mzN#c;xQOoJzrNxj~7PQI!4nk9pY}42(s;!OV{=V7dS&14#Dn~lTcO!so zQK=&QM(E|+>@O!xH|!w)D^=ZCOK49Z`L47u-Q<+nEqE~rbjvz2X4&ggVGEDGJe5tS z=YL5jseZf$9D)kgTZb<7!DhaNi&|j^!u85MoY?WhWdS;*J)WB)g{*j)UHev z(Vz^-!JZTHHqww{c{T-aUbhOpejC%W9$6Hyyp(W*IMN6L4N{m*RSF9vA+%pT5E8xOb_7}+54oTGm>u+Awm7DjnQ3IMZJ#opB%bb%(s4+ghtZq1^m;$o z9cyn@K$mXiq^y`(bR+b6ZfRvUpNaN)qSHM8ORT5G`1g_JtkdluWetC)@vdQ)vFgXT zt+w*@OC=8*rz$K`5e^|M+42q>Q0@SWC3@Ev6`;i}<9Z{+PEvtKMKp{yW8%lSjqrc8(L_bv-vA-*G+>ZLKA9Uh^U5fjh)x` zLI2qCu24x%sLnfHjLvxN2HauZxhA`T0|GT^I@g8nn<~J_pf-KUi;5 zU?_}t<#-%ZpM&LVTNu4fYmcd*TdibmTRQy-hy8X?DM(a?At(%ZM4Jfg-s^BK3~1te$sQk3 zvs4yXcj;t6meEo$3fwl378y*(nA%qsT@2Y;Gw8EaWH?q+83dc%*sxz3c4W5QFb=3O z2bvsFRr#&7*Pc$y)lr3e#_y;>rBU+y)kuwC+z>Li$P$_oiAVV+=vF6&IE8US8WcoS zBY}NqW_GI8SrYD?*6VE{$)QksyEmXN4w=F#Ob$o{MC7z2GeRUDj7i^#*qznuz6&IL zBAm6Vt)d&&vY1Gb32RB(K_w z0AcL>o5Jg3S}Kp%ZMJnF6QIqbDlWpO>wcx@~Ks#@UK(VIyxy3|gQr%$V5%-J;?4NZr)CS2MI(+z1#cOGV^6!xxtV!u7S*r#i0EuFcGqF0{fqW$+Hx-Us3yiW_d0;ax^ zR{8@=+}Hy|acDMSJ+;p?JE|0}b;C)PRfY4gBh@;`9|*<}qN*%2Ny75dnFe)#=XWxESd z#2>5M({yg*^FsQ`0@D76xSyKbA-f@_XSNMK5kK6aU4(Rf;2_?P$+|w08!tmvOIN5R z^ik90^b5qW0Y6swMe&F~LCI8JKir{Pgtdj7IQ04(HV$Bz<0#IHE7x`OptlWe8~5DW zamAC8$OzG!wx}n7de+IuDO*<{Fg|us zniYi-DWSSu4p?xhO91L)(TCFKscVuwD~72eC9cECe?pdKrtIZ*Z;kt&vgIw<=LtC&Mm!A{-> zWc_@^%}qYs9RTsWNOku1(_^s4ah1;S_MjCuFLMg7NQ&-E7MyqE5&E;^*7V12W!SVK zgPc(4s^W>)$lGF<8xYJUu@KGMeJQo?{KlWk-ETD;{oY&1j5P-#p~Q&V(H&z*!<^{y zU6&iHYQTFKEpmbkf8AXt>nc`ZwO&i;ogg`J^8AKCGMvq9i`!U*nW;}kMzI929NuHH z8O`;HUpZRPm#_fVpvY`&9+{X^)wJcBNJi|zYV0Inuic}kVnjRZ)s&H}hqN=Xk)tZ# zdPXCWZ5Zh~XPZg=VD@!Z*BR_xOlkpT!_F!VXL147I&aMFR6G8~J~WXAtTp)eZu92= z(R!nf@D1A^-%nk=CCGYe*lThUI$*_6n(Zu{1x~}i)6k9y1bHOJJJs_-Nu!(%Mn(9c ztJQP8#A<%1=&bn%yr4?&9V;L2Hg*-O!#XNQX+I?LG?9KrN}dDsyzfDS16I;;84HwA ztqT`$3L%u**c0hxe4O1}ZI5`|_sOP6#OO@#kRY*-1Z~LpzDJA>G4&?#?x|C|+;ftY z8L#F-T=^8#nJS$gA%rYj)>)VdY0GQ4(${+U$(s`NwLD}GDsZK{-sOzmu-PCbF93Rc z7$}0(rZcAR`;!+@3y&t$n(lW^&gy?GUoNz75TCInT4|6|*OT$K`IKIT&A>T*;AUiS zuDaxWo6~!p$mlvzoqr*1i(4!;$6iBX67#T+u#>U4wK%3olPdfnv|f{?78|qDIHl&r z!4+TEGu=$v_R$_8CdBT>^-PQ1OL!ymu&EO?PZDPC6P5)4` z(ee390ezlFec=eIDGOtvM{y84`5XX-64<<+=I{LtOg0DNe^iD3vco%KXlA2&dtkd` z-x*QXO)UKZN-3{5{1~`sbh%+SSDrAssuZ66Hk^|*#6Uk~#Nsg)c@5~V-RlO$26L;; zQ~bo`#g&?CQ>l0e;x&Gidme0wE4sg2V}C|tYilt(=DaI7168+PS=2bgt=!K0%;Ex%SLC86FO&%Ges{ka+P*#x^X zOUB2duKf-cKw*%t|EEam=^!{zy@r zdGvl*axznVJqF?W^|T;OV)+N}w9Yf?`L_{JW$6NQfc&QHdOhSwc z0z5PtD_GB{&e3c*%YL695@$WlGhADVGPVhXp&V_ZQ@Rl>x#Q^ux+V}z5LfNauCDEO zAJC=?6=OhxrRH^oGj473PprD4uU@^s5{fH@8)p0-smtU^KxHENcb`k~$m z7MA!zoLlPba)%MXviD5BXPjvS+j}Wx#8i=@)GbgvXq}HK5e^I12DGYsPF}+tr>zlZ z5T`BAhugNp1@bbzMufYR%87h+i~d8Osg@Emh7W|BBkcAI?(!Uh$ABqZRRc`-jFX$X#=bI8qnH^Z{iVnp9VmjlO3?nM7;AIW1iDSoL-F3o~R} z`r+otDEB_bH<7DAr#UQzI2NPz%_eVNHxkxMc!JaeeP#2zovHAkNHCSpACf8)odCH| z^OxmuVa31!WzbvQ6Rt-Mi@?7lgtId0_J2t0%9qw};M8IVBYNx34=hzq)2R{}TZ?Oj zs3Vn1c~v|lWbZUvLo0AY&R?-_L|u|7YWC`n+l_#n5ah2d-LVRekwFtoVO#c6$4xE}`r$3vuowuYE%4E$N(nyZWL4rRr z&zUw~Q|uRyIsUjdH2S%9bbOoJx48Qqd7VCYC?;X z0k+|L!BQ_`p8IjhzY`Apqzmvj@q3NPeAI;I?Wi}$jLhd`8ZX;WczyRlaXA_47Sb4JTwGjWE4-h0%`k_U#Ml+*T5^=RHhy zYPi??wMlUER#&k%F@D`&q|;wZ9aCLNy_xsx`IO$2EnaVWkG$qa?VqU-4kJRJZ0R%?{29b$a!*QR+`@2 zV>No>!b;JmtG+}Mx*lRIOE)QMC2ECxG^aBigF?e++#G3^f0{24OjbtCx%XC3m)z*g zQ!~d|i;tDuN5f2sDK#e}#DVm@!tSuZ!CFc^o2qwx8r7tfttO6WS0f_XB0g|Eyp;ww6Mt2(n88Ku%RA z|IVFK$OsW(+H5Fr5lL27^9PMlzihTNlq-{+_z4=rRE(7Mu~)MHth}t8oQOok!S3X1 zX2A}<%qiA#yiQU{%^@#!HY%#z*|v0QBs0IbVWkL6kJ}p>hWuIAbEmT=0Ncyfl4C`; zU2pt~jh`J-dxOe_g)s|SZoK0|+Mn>1xD@ty#-xwM7q`3VcRtfICfkfbmqdnSGYK_y zL*c8pgEP=6{CZr5usOrfI>^MCt8^Um-iJ^hiz9U$zotjgQn3H}a2sS8fHnHCBNcaB z=f})cjlKqptRJXETrG88-N;04G831;bL@1UyAuR!I79O*t&!$CpDn)PO4dKrn7 z+%%tWvBKJfLB{D|08|OP9k-sUc)%QVrCmXV;Xl6GrV30HT6}as#5L9b*DNAv$#jw= zkd+cvzl$I$W<2}33NA_TlTA)bCN<}^cdzo44zk(6lmjvx!cs4z2bgzlp2-RK8FW^X zJEo%7Yds4LEfK|S7q*jBIHW581NWEVMr8=&w$>h@OKG~s z$HE~%4;3$h+H^ALtrsv^TrETtEh&(?W+!)aFw1uEVMC3K)T}uQo0`gj?ljU#{@TJu zsQEoYPUtTs6*a%rC_)b!I=Pil@m^F%TXJwsguuv0q_$LBFdyR_o%$O_`CsKf*`%j0 zhYt$?@4n@QMOM(j5Tw#te(-6w9B}R7_h?9D-uncw)xIhi_o?d-?A!U1Wi(LPKSD8*TW09B>SIgNfYRuMDf-}IGwShXOkfj(M!8Jb zmCY8#$NRx4?J%x{&5xc2aL4jmC=E`9`kI*GclMJsbuQ)V!dqSKB6XCRm~?!4=IUlM zUiml^wg?JcV9Ap?NRozuXsOCOvUZS~BvKo09ms7qRN4{M+3@?41V`;xzm8A)moXo0 zm(#2BHfvuwyvUT~eBwvU-#tJNcxgu*N82~(;WJ*RdH}wRf`^zZa;L2yHa}1IN{Jq= z4CQkXn3K_TkEPQCa~o{P)`ST6H@f0&X}z9V8p$N%uD~E%{OmXWCwlt>MPje(ousZZ z9)}DMRNVYp#$c5smX?Ah?%lUA^Wur4SY;7K4-~H~eT3TB37=+VSx-)%{$M(SNaE2J zLwVlFBzPbCB>GO-IYiO3g+gOwai743nx;F48!HgD4D5iTITkYO*XbZF3Jy58;=tdx z{r?=dNylW6%p@c<`W%>r@7yeD3!r2wayQy%ROF0kqj?u+F7PFyTU$MFzlq%WDnJY# z8*Z2xHC(RYeZQlNU9pUP%_W^B&c1o2S(9ZbHr*UE*$8i>VY0@VXqeM`YA-@(8U8Bb z+1W;IcYV+}xb$?vi#YQ3GPUbC6gF-qRU-2JZttRbCD>{`10zrh$76JX9wj3$hF<4)(J)V?qb38OYT!<@gTG;q zgnCpV9THhIW`G|G$#tx(hsZES#^t547OAHQS4Oug|b$BYG6#vH?!afIddKdrP7&~(gwY6vvb zw5n}D{yL(N_q+5gtU6Vut?p*#IBMsMAVCb0M+7nhC0)*edqt`DY_0goGp3EkgEbLh z1`+IU<+(=k{z^x&$IpQbSYq2!t;)-!Ov^0AVci%HGHF*Zp@jSE@SZk{d{O6KwqzE} z#!GX13mywD>Cximn=s2lk_SOUMj78+8Lre>s=yS2I94M>@F{me!c`+~e5_3= zoB4&!acy8D+ZE6M7w(xJCfMj>LPOc;ijD}U9{~q>a4O);M$VUXQ|dYVws8IVguj0P z;keaD=G@WPfN6fcGihf3HWvI|b-+JzLpn=eNKrkCkOiJTM1)^MO|W&FQvO{B*DDih zYj0AH@vWd`DAOX9nrYwdofVazzY5k1mOR9>rBYmCQSzDbF{L#kaT^hvuj+tYG~-w- z4ZWS6Z7gp(%Dfd#RNWS&OUSF&Siwl}Y@AsCnRRSO*jq-$xoq9SNeCL; zHH6^qu7Tk0?(Xi;KyY_=cXxMpcXxLh=j*-C-sin{jQfs{Url$@^>k5B)mm%LHRn5$ z@ag^`RES4kd2A_`!Z2=Z$)+6rBbl&uCzLs9I&e$R=R~1iwoMkV=KHs0SrowK=VDh4 zn+1res3e=r-rRL^nK~o^^fOYvpVI#ZA%2fN9)h0>oukgi|12jKvDR>cYp0B|WDSqd z?GRpWwcC?emPat=xd|9OwI6aj+L7^4#i`&xTFiEut39x-a1jtOU9Zz-1#c}i$(~U^ zQSvU*y8v%$@^-c-w!=D?{E7ueZsbfz?{W3TlXcHz0st5#^p<*rXR|A>1)f>|E5Hre zl>{-XB(rIc>G{DWq|>MQAh8;}nS~w{WBYQD3AuQY{uPS2BIRB6+%ufF@Rk#)f_u5(n z_LS+Vg$kjvs+v<+9_|wlXT%=yL_t?G?q8pcZ0X5KarfpQu|T?fnH-xIpe)8v-`y>N zpC5r`b^bivqZ;UeRwCb}fq@tMr|xVMrtq8bmE`9g1cpPPO3NF4@BsMu)O3B0!ea2< zM13Tja7I_De{VL4X|J_*#^L zl`O*}@6^zF@n7POeYOCrnM`jhb=AdonU#7w84POq8#KzW_G%h8rG69M284bOc$=N& z@5&r3d$ugX!k;;m_K()vIU?KIpxnM^D(G13dM8(~h!LunNzRz%({-&(qwC{YI#6qq zE29i7BvV#lcO*qKr_;-w?8+EI?Q)n@d&ZIu=OLn`dv{oVKMSwpTm5R>*p@`WhXP8@ zd(t$TLG5-v8R0noXZ70F=0f^#Xv(DVf`=fXjxo+_>1W;B{yHoXm;v*37Y=~D_sse` zNzD!$Jh;YT*B`4m1_*yqRkwH7Ae_&TkIfcr<^VE5pAlZV>q~nsNw<-fO}RS_@1WK?Ox z*5!fdD@o|4(vuJzX}tE_knY{Z&>D=s;}3+YJk3~w77|rM%Dx0%<2e`ms;>YBXr{I3R*>v*oDV;r4~LQL>7R5FW` z>AW5|r_1(4r`TT<%G?vWGR@fFbsJgm%Mt!%#E@kMQ)eP>(#Jlb=-%iPaHLhkFDw`d z_1$N8Y-VUwV!Z@jRlGkAX3uf{Q9=VnwATw+9pKvp{DDc8_q`nVwr1utjm zR91GW$(`qfq(E9PvPvlr@|grQchVEYhe#7X+RX(Em399%dYHq-Vsh(>Bjvg1Udd@yJwi`0TwbVOXH0+<}+S^cP~H| zB&Fwq?2=InWL`=U+O76r33bBVAL?LSb7$5)0o`{-Ousd6_5s^w3Jp?=-_+n0>}k8G zFGD92O=f?e<#@#&CxvbHJW?Da8|d%1wMFuCVm#RqBV#{DB>Sl~u&D?mJskBbG$pj7 zIEf348-bYI@v=~`QV+}V^2fK>7@?)T0~op&p$0KdO=kf(ifA6#piOA2we z`xzXJJyqTvIlFd!qJ|B;kY75&8xa?faEwIb^_UEZ@wx(f@3e`t$&ZrLPORwq`SDtc z+HKL0)KyB!8oT4#?dRWJ^%4=0L_G$izsfZif7WPu{#sd$X5+CC_&v-6hx+jFi#0OcV!;sYMji-+pTU`L!<2~Nyw3#xM_z*Oo z9lvF|L*nR1sU)f5%@ZpSo1H&d!9Ps$qCB-+5b^K?kimI}MCsQay<3Xja4hY{6+*2y za*+_wlS5|Cqtdu*_jicRCw8!+{XZxZ#wc^+9i0k5k7i8Gn37EcL<42EsXRyG3X7M( zRx>`&$lXE0qIXMG2RjW&J34d*3@=yubL`1pw*hz(Wk%P1RnIgEXT2T{X{#riT&*eG z6!edTb>q0FQFog++#08`Sf!7FDZsbOZu(t8V=4X?YzEkPVdY-CV`P{1 z$#9g#4$~N*?T@@V9QdTrW{PhDrOBHFNKC1{seG?9Sq{`z&fI$>gJW!!+P8p-vP&S> zJk)2jB8=r=%HO?mJ-g8ed`}?CDegIFz!SqT3<|m6;3QS9pyb`X z*ORA>ddkD*pPXs7cU(2%mn|r73N*!pOhM2LtaZORoWnqe{-DsIVRqp2w8lzTrx9K@ zC*%SCfo|FD-9c}#eh~f*k%=>Y8#wzVa2ar9Kc$U5KHGl5U{U#{wIIF7)sq|NP0{3g z=9Y*K$bW#*!$+D^Lj5_;CFu>%QCtQ8^5bk;+l?Lt z$Nr2h7xIW)u|mUijYQwhb>~4C)bZd%xF?y>Bj6;39xl=eA@4~-faF|LEVYfjv{opW zfc^ORi=g!aB*pi~P5$^szLoD-pH|r7Cx&0X4{QswxwcBLT-ZH*_{iOvXtRl?HRZK_b2uD?rrK# zdrKYtfD5lXz(c5b+n-3U&nb@O0z4?hS>e&e+pS(tl;7lfsN)#}xq~N9xHKUtoNx!# zB0cXSQpc@Zb7nH8dg^SzASPBr7yroNrnUKfyF@h>*?knx3 zk=4h_-uT%DNc7jJhFj58RWA(1ar)p-=7`TE+d)|1`@LI#bY$v$B_mFeCrJaS|-PVk+ibumV-N zE#VS#75Nyt2Mcarklare90i}MU&y%q(TK&C?E$mc9^Z)(KZfu(y~*lEABBGj7N6vi}mjiu*y<50#P?!+0=$!AAJAiyQ=dx2?A1E<(#x zCh)P*yu{c>Sql$-#l{o-IM{f_;T&4ntW#O1>gbEMjy+?$onMGDjWvH7jaE@%q_Nb3 zekm+nSgIHdP0P?W*Zr8$BB7FlEolbp)3FG($kKn89~6gvFi%orZwmh4d^)1tC|6r4 z?sNyUec%zhekyfCCks|2DkcE{>Lm0882NH!)z|QPCb-y;Qbgx1(N(x>Y<0l=h;KcV z+QyF#6N;7i?M=&?3roj5{R_p`f0pN0UojsVhTrBsBvERTZjMW9^@CyKli+pB!@eo7 zbKYg`PTx}*l7?E{SD6dZIY7}%lh`>D?s1)~e?)slW`f33OMc<4fNC{SmYL>Sd{x%s z##O{Z4!L@p;5XL?&r|_lU^f$Nx&To3^cvocUr!Gffj09%PK~2@CLapC2fjvE`-iO= zUo5}gOem|Mu33a=A6!L1p21Zu`V&WZmf@8|%bPXEBK{zur{_DYnr8$gy(p2;<{;1xF}bgc;gYtO1NO*ryugQB)rKLesEZxe zQ{Grc8g@ZazwhK$!EAl&`0|eGG^=ZFEv`>pxhXy=w)(2X1^)G!5d&vj*9`9z_KzRM z#cOR6wCxg;6z*2*T22FWn$$cxm`dti#IgE%srF~ZnB>XY{9!*)ckeJucV^FaXnrYs-W7I zN6bSU=AI@^$Bbad}>zyyeX+M08DTz<4IDMd}-z0!g8VQ%o zIDrLHO^m%!nq0jghb3F!il>1{D?}IA;{u^Gv|5J>6ek zH{qTOu)CwcQl}NO-G4?iiys2{|Fj~u8$}KP)W6-Nsz-HBAhKwT>bZ8|&KABlt)Cs4 z_dINDMvabPmhx{h7&Nm+&RwacDn?%Ox5v5xy;`-)UzS){sZkZI_W%*#X^MKLn2f;>m-_fHbxrTAtvL*KLXA*By0i*X@ z41Kl4ZkFD@DT`;x(d3EK2)a(UADzs!0pZa|3!ozQWDdmsYzJcf9?vh$(oO0v>B50&cPMEy2eH6ugc5EBO8gR0!dJPb9TY8@dj3%rrV{QH@> z+nce!BMSP3HeYOHK%ucB zBsH!&Y<{600W)VAF@Nm1ic9wh$THhQji2a`dNN5kjBEfU$Oo}=IU#;b9N@R-e^na$t0Ej z4pLpdp8o5L?##*pPGJ5HK0RFA=2CF@?$)F=5&kn^2M=hJY}eZZP={>$U6TPTG)QzI zbZzB-08i=()w;1!jTvcIuRtlIwMsL-SaCBCY z>6Tf(u1Qa4F`jT}8%6Y`A)>(Wi7V;!G|M_UbA)$n`))V!)-J~SQpX{8XDJm^59E`k ziJnGCkvf+?oHqPGHdk*o@%&$Q2;$DSt;By`P1B-ixm9$hbe~?O8-W- zB$iV#MJUZz6gOM?c^dfq;~;@P1BZ@h8h544Ue_z5*PfryJe_Gq)3>IpAJ#oH;Nncv z8Dl2Tc=~O4rktLn9lvyOvEwe=8$os5xOvF?;pxXUTO`r~Ixw(d z=j6OPcpC`0L2dSNq7{W$Yr5DQ#m6aVc%mA?*N-ImJYJi6&JKN4qoIM-#xjfrLKp?! z3~wkLnCgjnyP1VeI9R;`li+hrZod?{`E-P7t{+CHxjQHtMZWtO)35Mi3FXV#nZH9&`de`*tTCcfJQmmUj9Zzqj3M{nfE$m0FjagNe6KS2sQ`!7hV_{=9 z6>Fio)aO_Ccl)X_u`~JK@j56Ye1ks=HT0WIU%JC=vK!bHq3s8JyS>+!eMoZL$3|vu8QjXsZMXHh60m> zrK@a^8s_XEM_ChQL_B7zdXUG(Vb)}Xe-drAyMs}j0^`qNo!)qYil8wpS_eSoy0`8Y-JXS^ ze&>Phc7JExQ`?i6rU*Y~w___0Nx9PHXHfkIkAtPYZu@hTP$H`6JD>I-nSI;XqS^*x z20yk#<{g-Z*{gk(L}`hC!XXTXu?Rn&zS3CGSnMjCSqo(sFz`*(_al<-<95`<3DA_-#GgtW>){z zd3aE_E@XQ!424X1VRO6$GKZguGvg^`uPuXafcXTLQU*&1P^Ic-ZF>Y(YGcX0kpUy{ z^jxmb{%~Gud#I3Avlv!O;tws>3{D?FcVq|iEjWt7s|;r?N8f2JgC*86#I#G>P1Jqm z4NsFwGy|uLcrX&3M6Wwb{MHF)`DkPN84bZ1-uShjMB*d&Y!r+h1oe#0>y6~I!HUD_ z+&>5{@mkk@S+!~nK2DLi^DT8b)97l#FR^=WoVB=&0p7>86KMl*nqtN5L974`pJ!2$ z`sOFj`;m{KXcJ;^&e!1wC-9k?l{r7~K>Y(9pd6q0=mxa=Jv2Xlard3Tx@tDb^8i4J zHfgd)Qn{*ZH61|dxvVvf)LzF}f}lQJ?6duN_8JO)o3itu&y#YGHROE(>R9jCHRt&F z6yY_|&gwoa6$C|(Tn#Ll?kiG^qcE#8DPESVdz&wrSpkIs6d_8o3g?nhi0;;Z&IFz8 zE}!0_fBcM=Sp~kG+|IaPuunemOma7~vTwd{N!yRC98aN|jmJ5bU(TDnJczslUb<^d z(%Q)`bwijn-}u<=RUKb)_~9QxZ6}WKk0XYk%f=VrC19H1 zuPu0o`AUd1jll#&EEcL~5qN433uvV9n*2jBj;Tg5O60~NuHor%%+m}bMbC4W)D*q( zO=5s3Cfy(`tMuylgMQiYI#KXljlBbD%Qs3B=@)v`w z#$4UOS1;=d-x#slV+Z2*+g+T0E(K_JJQl)AL;)av2si$1)}$W&2^( zSTOn7Qm!Ezlh$fP$Tj1!Ww78W$yhBIk+3*TFu;g(4~Ya4kc41@t!WmCooXN+-f8*` zaD`5D4yR=-1~1H(VYTaGopalbAL;CD3Ct37UUh0_yuKcSb0AR1_o}JENHLqqQCaX} z>|`fcCKST5E;l{=X#C^0bMEG>j!b`DjC)Teyp{PYtTOffRS1<#D|6B)a+k0A7Zux5 zrf5AATML`biw7I|8oq!LRJ&->020QFu``lMsH4G}pH0C#R|;?yH_JVf=gvePPB}IZ zC>hE%vhu{3%nrn_aCb0R>&l4p?8&qu)F-@iN)4r>*rg5;7P=wPl{H(S5Pm~Rkti{nMRK*d5y1o&W zWZY|y1-TUbfG(t(drVyms?TlGx9Ze6ZpQNX7VeAEK&#^tAltv9AeGjWA%DzNmUKOn zkGJXXC1(hsJj8CR-_((0b)G0|w4p@u#LmT3(l=AeWJT}--Da9)bO4oeSDM&lb@59! zu<$LkKy0rCP?w=Ikz{#mqRi%%m|0SiSCDt`>~Ru)>Zi(fEi)l0pR|FKdz@|_EWh;iaQD36p;DGU26o1PYLTBswVqF7h^$Ts(gL!*62`yi@ z)0F9qGvHe)2EF~WumY~0sIOJ#^kFQpSYAc^s9W)s!Jeek0xJJ^survUXND0^13S?{ zhF=Dya73&U(7i>$VJvB2atVfV)Kf}WZ+R0a$7Tv7cQ zXH*!F^HLuO;TXK}W0-_Mi`aTHIfwa0nc8^r$p%?YE~vF5)74YB zYQ2>+ozjqq{|mFCiw~3%sQp%isk#Q5tF?%bj^<*ID=OHG(7w zy8|sRfIV>!{>^l^h-*y($Jqd#5P%-jq(1w#eelKbCEM+LgV^l@3Lfj}1Y7sZPZad4!ZR)VGVgGslLp>uAufr2 z?W0e(4!Fmkj)#c~*ST5J%^`tyJRE3>lElwFvNQuJx|a7#w~zmoJ_7W|=fSN8&M1DZ*N;8s3qpdv(!+7A?YshHHwy2!Us}IQWyTPD^UG_oW*Z?f zdSa%Q(elM^waVVzfAA!?$6&ho#S@pR0t0q<6tfKT=bXm<}OVc++dTy z<8=uec95!E^4G(os4`X~v53K2&S~!sP19B2S_REds-8@wO+%EcgOAt>dvTT-x3SAg*$IAt5%YDMP;64;A!!? z^Q;D&u+u}T(;?BYowk8+0eL_3`^(t|9j7B z!3w%UD{m;~HeGxW>c*e!8UbAz>ljvYv&di&n8$4*ayt?B+=4Srv}*fC-_FnOenePn zh-Y8aSFtv)BF6986RNgr3WP@n>_&SU#bBT>dS$J>3A?ga_8|)W>p1Io!*j~%OB|4F;wyE%)T-Dx^znVTx=`2&YSFK2+@pDnqn)Hhm z{e~`B=VFtpCvfYk9MsX6jfrPuS=d{)R$5)c7^mnXkSQA_J(L5hX=IT*8it$UUKGCJ z;Yb%>c~c~(UPQl1V?EX=%{&!IF2%i+w6=@nt|I4tWCqWjGm3Ut5T>~qS1pa{TVSK@ z)OX+B*|QO&70)jnvY`9YF*`gSM| z2lwNloACQu0kuT!1a+O(mI3`=)i6+v3|nmX`I&@j?UT;$Y$3`kN_{YLjHW7M2-1n~hfXQ% z3;sCr9RO)Ayc*g^*sK^2L*!Fdf4P=*m9{hHKFG@L08`@;^x?Jr=zA;?t|w-5Cla~r zS%o%B6zGjPf6eT1lbL2t_z|=T>!hyX(!=KTC_g==3^Dw%EQc|XgmX%vTbgyUsAP;8 zZR~!0_cfE1dd>zhQ$Cot+%fSsYzGZSYEn-XayIQ90;<+^_F9ERdg>v=mMl zzGcpR)8I^va9?-+6rU$vyVULsDNC=Lmq(7ds+40eFNC?NGWsN57%a`)vo;?&A_*b8 zw!v1bt_S@sWujZ4HAw6$V`0l<8#iL+p9O@*mWlYnvc;o`&4zdFAbEv@ zaKTwUa>ek(TEH^C3X_J2CvB?5qA&kU|MA&cZEVsKl-wPMgvF>#9zOzwB1LBMkCC&> zUPgA*y3`@g<0LkPe1r3M1u`ua6bNU`H)yGA*Pqz3F2Z&DF2Y?D;Q+s8fRog zOMy{1<%ykH0InJ~J@J8wghTxt%R#Z^F$rjvrD&A?7%G5ctS;w_833SFDeyP982)f? zdPXylf z7eNS1ngBEC&l&&cd#1s~4=J~J)4Iy7U|BaJTWBE!;#3jXA3FQE zGv*QpmbAvGbHal$>K~NNgL=wMTq)&)K`e8}T=+i8jc(ap z!bXfwKs4~lo8T338=bfc7Vak(9v?CJBjPo9>248NKS&vP<(>wW|M}KpISoAkt>#wUd4K(h@vp<>xlV{^f#1Kvj-da+Q0$0T`?vSHpE9+5IWfq9jIXUDY{|yKvekpj>)~DMhob!` zcoamJx|<^kxEop$f~=#ECr#Rw1lsplhm30XJ`Rh$>iBC30xwJvDr)(N@loC>hG&z# zj-|^d!%DGopX_g4e#XwoP8;DGYmwcwB4-Y} zJq^f4eTi_U#1ObmQr^<<*VFRv0Uxx#R%0_f#|v)9_N#WLh0#tK37=wuX(mRp+ee1) zr_@g~#%1q_G5Mz0Et~$!K!M}C(P5And9X8qF$+rUzg&P>DtD7AC)!BUDb*Y}ii2gd z;o=G0{ItELO+sj=H|$Qc$W3e7KQWc1;(uqV|L3G;Vz?fvf$w)qNFIEYB3O@hXo81{ z?;hig`)l&rEPyBr@FWQHnX)&s+TU#t9zR0R`=cVdyWZFX?JT4h2056pW#FE1cZRn= z=GPj`Wl_OOeziAB%GQw?EEkmju8%B~nc)gd{fJCF9*z?`s-T4qGV{NLB($wz5^Qfz zlkB`04U-RaJhnDfy!)~k@zGzbh{vw9qmKPDKOg~)%BeA$K=?d_`em(9ZgrH|OP+au z>BF^O&sQB9A3kGUTJEW2t<9`!z7fX%{@|>|1KM|&x?+>Ur!{jN;+RnfjE%GV4e1bUX}R!-qr|y5EW015c8yGnU)srj?=AZ(SmHI#iT}{Ix5rP@zwW6SRz<;_L{g58*y>34 zSy>OCvH)@=#h41FAOpY_aJjcquBjni?%(Q*#6og6c7)iw<^P!4sRzIC#Hg!&XhN*$>Hf-MVNXcZwgB z>zq^kvhXJ5pqB8=xEk;5Nm0BA&|Mu#{yJC+a!+zS^0HX23+4=fJRMPEf@dZ-sSDJ9 zd9dXq2NzJ|5`~7w0xSF;Up&+zDUa&js#B59C8TEtQ(S!*EJ?Qq&;XpyEB3awp|Co6 z3=Wre=j9bdXSl=$MWr4wGO!>tEZc;C!|BigZiiypg;GvK;BboPOjr^z&=aB1ri};SWE>HPG&~$X=#jy6Fle`m8o?z4byTU5v`HrZ>u&C;|3v=`c0x!4TN#x z6OWeNh_-2(K58wav+UtBTCm27H1{T zoHLD|e-6)*(d=rUf7*{8GHmXwJ!IwnMj=_ZQKp8;mt=!>i1KPU^oj7QDe@vy<))fL z$O->|=mKngJ@wC%B)->mGBWq7`U^U0uiwldy~Z9SOKq%x-&chDWAr~#Wv*naKk4|Y zH#f31J^v(b0w8tCw=_*7Tw#Xy++x4euwM zm4e!xKc>u7CJ&C6rT5#%4CRVBMRe+U;_Jq=zMi)Kj%0ysMUmpb^-4`#nNo^o>Wkfy z9YK6r>Xy6*YhZ#@SKFX6>Wq;m_V61ap$f$r{a2$@}IykI|QWY{wt9N6^>L za(a4t-=EWkI`$NKK%7kqiare zbgAkRHM-u&M%NY6;uGxlS=v-4t+Bqdtkx#t3eCk>r_0#+q!bjXn$uf76FH?TfREE_eA*kCKK58s z-vW=73G(FiqH`~1qYF;qPuL~Ya;a9`SeS`zJ(>b$=R z17d-A1n<5dH;q1(m9(YGl>?KLw2RN$1$z%(1(EFIDb?piZ|@FL16beNYqzmKXgGpi zygS*Dz6%UxYn!66rXxI45pvyfi^FO1M>>-t&YBWq=kpUZtiiv z2T2oBp97lPz1Zf(8n$PZz^6Ys!_yefpF1bjoKj3jV=O)LA~-!*PA& z|9e{OC>EB;<%tZdUBLUhP209vfXckj?%~K%$7yP80r}2qH>(?1ObtW&HU$Nk_>(W` z+vTIXW`;hNw!9EPbJ#$58w$qAC?^-rcO^@#xppq_Iv5iF*T))VYO4$0u5m6q?#Y4D z4I!|`guCt1s?TNGw_y0JRDr3E395K-i>j>-`1>!Bd+o>xCQM3k&s&fX$}5Afyfg-| zp-vzi$8h&pp`4Bdouz5<*^hR^{@}?pXlo?NJ4p((-icjd7= z^yKpj&d+7mcF22i4n0$?Pe2Mwtm+3bnm6iyBA6Y?JZuGWxx6)MilP&7#6fWdKSb+* zMY8R@+_kR!SN4i;iny5urW9aEi`G$EeJH@OF~5aboXI_?pD`DgC}`h9-S1COJqWmg zA&s(L8G^Ho(GG?X;oNxD4R%>B?b46?Djo+muS|0Czl`Gyzq9niXEID^aCt`H4x$ua ze}x>BU7GbxGqx7rT_~6NBQ)Pt?m~?1gx7r|?`$WEiq&-e@{y#^;!SUD)fme!=F23#Skvt616=+oLr!by&2>>?b+WF9{R1keH542HSh+oLa{iJ^47X z`H~&^DOhlD`^FlDQYD|CO4wa|XUW`O(XsjYY;Z+w7neEPv8H+J^Nx%77-)i-q@>Iw zM>BU^y?!CG_D)<+BC@hLS@#^O>lua%|GOT6n14+hrUG1oB4GQ?`6RdPN<}N;r=N>` zn-Fy22g!Yp^<5@zZ=t^XR`N3CikxQ>Fdd24iz_BxxHpK4A<aVn5(?P--iyjHbzwkS&@|9Jlmz|-m9!B4c? z^$)xIM+hJ4y2dSynIKOj`sNvfpf6PrS&DCGnBBpI9xm{@^y!_t^hG+)-_~rCe1{58 zmga4|jy!a^&YW>os?Cj%Q!9W4yGH=3*SAq^E~ls-K0F!cJ`f%hb76pST`*g~i zta!-Od(yPKr4d(h4zhB*Cd1W8tI;eo)lhwhmh1@8pE^mda~RPti=IGM#@B9<*6K}5 zNKB@)C4^pfYOZx;5ubziSQ+9-gz&359xd>&yYqZknS+ZoZac|+pJVd?Sr^_7p;K`I zkUj)^j}_W~kSseh+2!f2>b}eRfS}b;0+IERFQ^52-}q z;;Zt(9?x9NA1+RP6VX@N7GtF`>I!CrcZ=M2gi8*&0$0Tk_Dyp^PNR&!QPA{14USoa zPhcqrvPJmie;Lp@kQ^*dJz`_N%|i8pi09_(UNNkdNCs{AuIVP+<0;cP3DX<9wtwmh z+^F%9G15r;KJ{NZ3gM=rSN1lX8&T{lXg)J5-I;YSMrT%=Ins_K@jBh&r#7ev4@g28 zpG@Yuc8-5;0UirPvdr6flI?yqjTTnr^dezAeD8IPj0nS7imj1;o-LownKh`ePF7!# zRg^``SdIJhZ?2_9%~16#h&A;mYq*~aVf!>;8tfv*Z223*$k=jM9wajTCpW9KPQQ)Z z$M*XOzEJac3e+}7pV8q-?lsaIhI(1~BBqUmLv|ao(WBzMQVkf}>?C1$Ra7~4PG{86 z`t+x=V#x!R_#wwP6YlObdYl7pqZ(+w$?+|96Wp|_@#pTura(tdOVgP-(m*N;uJ_G` zgVCu;mD|HxQI^j-N^t>i1=p9ww-!$g*s|h0IeOP84v$U*?!K|!3j7G~-APu~vOR@( zwUuu4Hs@FqmZ`m4==oJ{JqQnV@;2A$gDl%xL!grpQj2Y6!=L% z%EIWZL--1yqjQ-y*=R`xxn%0yOw}vAzLTz?Pq;%xzV8?%l<|t$SRj)JBc(kVe+4tz z%$A%ySTa10x|4tZdQ>V$8L0a!K>QYyWi^7JmbaU;ux+WfKt(pk7Qdi^bJ!nGzbLgT z!S(ddH0YG#@WLY453G7$Q*C(V3QFKZSf8bjZ zUg3noy-jva1WLTd%hJ1DgC82kR{Hu>*snt^s)UbrHgGUn!H|r?BTVOmDgE4JHw*0Ms8w7}tvL+U0p#>|P?W+G*&oBFeJ9j5)S8hIv#bm?{_9}S zO@6n=O{zr<8SIkE&J02NwyVpRx)PmKtKsI7dvg;$sN?yWJS%m(wj8ARsAVCndBDAZ zyNR`%{~uVYbw1!~665~JYY+VxTuioeRbwF%kQ!FORB)D9s;bWtR>`Tn6`Fdy6#uZF ztIcvPslOV=!&EU$Do`?$ok+Wh4x?w0kAO9&M=fx2mwolr%;btB%JJ_)riHtYSBF-I zx+X$Z^#I-28OSIow^M}xKE6-kIIIAqYmo0G4)-CTOz=Ke0OSP%G5qcCjj%1__3f=# zwUz{ADAP(7@~5+tcfM5FYv!2mF4M0E+{cBhb%0)61mBX)yPOKES$-N z85<;~dsrhcJN$aN(TR15lZCASYc`}r3>oOHsKkua zj?R`g#D~A*OG+?dvsf`jBpIXP@(v}YzR=)4oo~E^A?{qiC3eDr7|N>cZp;7xpn2ir zAnd<}qM&a}8?i=+zH?6m1|sn$|5dU4Z}}+W+c5iIB_)km#GjO+Y|z&OGje7opQ>p& z{`F8rw1#bF|0`T;>-fL_l6&}X8@Ed4@lbdW!Q_8F^RM@={9hGO7b8@xmA^)9L;CA^ zYs9zYCy%vb(un-S*T?Yufoew=afr(BVc?gB6HBjxhm4_$K4a|Uf3C7^230TwmE2km ziN9?bbPatteSen&l!KM+0j~#Q2X2QAK)o)yG`TM+{*n8<1RwtZ^JOP*Pr?BJOmQoPa%JubxVIT=hDa^7`yI z=3Xg}O**D}e;m8*nDARdMvGOQXHaS!sV6k?SSvRJBm> z63PLj7uyiF9tz-LsNlrv(=DC@53Pri&3&8N?cz`T;8k^1-Q{+i9h=y8A8EmdkEf*z z3U;*veSOHJwA#*P(3wRO$&gI#=dp8mB4`Pv_R zg~<>9CRW;X>@j7GwGCuofY{NykSUsEiJ6%dm6j&u=8oph#BkTe1FBel6@eo)IbLKc zpY_dGMz)t`Dc*iv?L^z%5tDtzDnH8?I@6g(m*=2m8yY{dk*at^o#kzvu{;pGC>Xyx z1nl?%F?3eEz!C{7YD}}BI~&f2%0huC<^fHZ$RpuwrM= z)(T`iSD)~YTOH%ghcNCqX9~T=3A{+E9pZdyk#Hc#jh3Tw9WBRyv%kOAkPG-h7*a*1PC?RGLRCrIlNOt00 z!L5D+lE?~g=p}2)jQ+fnkn$*XY}`mPRyr0oebhe6tBs4$CcjLY8l&N|3x^_QShzpj zMLo}UC@_uPZHo+RmuChli8B=CUYMcT9_x(yjC|FQq1PPW=($Y*D3pTPBM|K5SSSOi z&^W3cIddg zjMMWWJb+Pt9BHrwlq)(g4-QPAO5lXe4xeCw$A+DF@Zggp_2b!D&o?n=vl@$JV65-0 zT{i`(I%YdoLU;c(I#Q{i@;Q6Dkn6c<>_CS?J%xd(UuZQ4d_LxNf7s}*E>#8 zY8*=tnA`KkbArG7Ph^Ui&Sgd4)Ei%DLA}vm6;5CRyLg$Uo9kg;V;P z?D?FwI7x$}>05q!IXzqBz)~w5UuHX&o6mAWLgGoQ4`|LqtAtL%<4!pGueP8_P8MM{p;QX@RuHNC9FH@}GIEdXG)|SRi>uOL%4*?ZIe3Q4K|lN9 zcDi59v>{wg@(dz;VaeFsWQX?j_zCv2Nr3++l<~Jbs^Sfs?BOSr>S{DQAgkPnV(!W;kg@N&X-^Cl?#LG8KJDLf>kQ4-W|&$W9cl zAK5-SEgu0D3^+$_AJe;1UBxA$b|)WySb@ffhYMZ1&XGR)3eCWeRzF*j6bjndTP=y1 z`C5vFPeESX+4rnhnlGD!gFl0;%OrZwxQ($*qXYWET-uQD;mj`6YkP3*l@}O8V^pon zv}gRJALbo`H|B$hT=AAw%lCo1UrWpgFEJ)(CU(3ypj^5b=aCEc=R^W}ENHJ$a}@`{ z4(Ih;A{al3*AhSPl0xE<7@@d$#P(^6eck2ceydJ&1kWv<2kTpyqcd6Uiv=>G>HKvk zC%E{;bUEgMs8{0^wSq)(pPq)yU^3p(^_$PRT#ji^YiA%59-f?+7qYKwwHSl8zRzId zYy3OI=!7Vet2In7_D7E6LbJ9{^^MCv)vkSpGvuIfF6P<}q!k<>t(ftR^Vnj0mW$e& zsph$CTc9$jd!5`4a3?Ay_ZWr3^yE~a$PUY1Ff+ab-G3(l@!{e2F?h=Oq zIo62pAe)bgTsGcy&d>8r|8xK%Ck#T${gJRnKEBF2ZRGD__?w={!-aZmw0k^a)v?8m z7I}rVG3G3W!CVcxFf%Nf0!>Q~w+_tQ9ApBnv!mRA-vS(9^T?<|M{qJkQC zLXTGH@$N-6-leXm!ORLD<{5>6wsw!4^KTj87i=G z>vnO6W16a^O%p;zw{q2q{&o~F4GA3)XD?>JPFqPbN#LP4RD^m^EC+Zp_~q?-Mc7I5 zjm98pmdus^l1LmgnLAdJ@r}rWIr`%h)k_vpq}pngL6m|UKM>kmXjiNP^ANo5Jk5^b zgqgJLjC-h*&u@L46umtZliEy-6iI%%C8%u14#C{yiaCa#TRdAln?h%5yJgUp^5L?F z_;QqMYG1Aiz980SSfKk9tblW}y>sqgnhJ>mX-h=G5xUx{OL9LeQL&~e7@R}f)MSLL zmf8JC71}o--t$kt+LeX;Uk4yQ+zO%redQP*RxO41TsGa7ozw-IHBteKiyYCgX)JS= zPk)*=xd|Z1-%AQl{y)l&EpOxHQvM(C1Ci7lCYd(2$`?T*`pt|dO?0SMl%QjE$U*EE zt?pt`nXtgK13~WDnz5&{XC~jOU2UD920Jf1Vp&amL%x)#9fU%Rn+mu~TD<$`Uj!_v zOh3(a=AE3!7x~^WsXXd8PlhJc>s!!_ftjLu+`J;CLcyQgbpwHkX3m_*zj513e$=+m z(F_(aJYRe*E7=UOclh3G$+GLfdosbnmXlNBI#G~QLU|k~OV=#tD1z)ajag ztpA*Eo;>Zhj#;dQZ?$$k6bl&Vx7zDf8KW#7TS?x_UAt0gom=Al28j=i8i|FP<~%2K ztHRMtoZD9)4J>*=J_FAQ?Uz2sZ`XB)%?`Wl=FXjkQX+#9Q``fp>HrgS$9q1o8s7PY z90P#Uz!|TkWbN^T=vNZ(^X;q#>~hINel>rG^}sUYaRv_F@>}j%&KemV@)XE>7-1 zWw=#`oASyiKu*L~FzIYl&_hP@;I;zb6-IOrK4TGzCsNLc`BUH8htH-T+mT6JT3ooF z`$iiY+-qdh!-tE@2EO-YD_ig1BU1Flv59Ou7L9Ogp^A*gWci_j^Ua7D3C}H3Y240b z5Lq^ZPcFwIR3=7M`Th-xt+aI`i}{b6HR9)1RePpmcu^OYD;xUcnsXNpJu35srWRk+ zK{*^^Qi=Y>0m$g%Or`n=(*ZcCHy4-JTNX9El;l1rwax_Qio^Kwb#}prXp@htn>=%E zir0Cl5>MlGrAfWb!gwcn!>)^<71t!mY z(3>1OShc(gfktYMm#&j`eZZ1fsvTq-mrz?w7m3ox`KU{$YNS&b=8_p5M8DRp1&VsN zO@96tR3hVb4~>)g^mkb_$zNsBJe-LQ0!bOt4$E(PyeNNRJb1fKW|;LbSjL{G{3ilD zkJT!+yp@+!L@AyI?47Y6-VNxb>sV$ApSoh^-?*;dAbAe;9B4l*Rp236(rUXgy+)7; zAFk{)!+hC)6)3&T+*!Js_^c-@&Qq*%V+Fap+lINk$3FNR4-al68k}cAG!5YCZ)J5` z(Y}Fu9;bYI=GMw|+xkT2lwYhp8+%*DgxwkYD*5(_9E#jc8klP`qiSo|NRurE{o-#PjglnFl(hB#*do9S<>oQK`Zo zW`kA_7?aq3Ntk@YARAw~9Ot8AL0_^0wOa7d+t`F+u5ZeoDj#vY4!#@B_MdRMZ&d32 zr@MIs<{d0m=L6@UgY(IESxk@X`@*rR84)|f2m1T6si{0AHY=7a(U0MZ`je{5bivkG zc;bvyU;|4p!|^N9dIN&*Fe8l>lbVvHV)+gmdvf6qSAF$ zXg#PgdSWv5Cc%RxVN&VyDFU}#9rR(P{jM3hnh8svWTB92BYNxQaDnotA41Vp4n|m% z$aOud4eW5inOOt3kwB}MQ8p&sx>IYf;iCV5X`P(x2J!V1G+MR0E(5&Fif`%-ZA^{6 zURZ3Y{o*@ zl1(@A@Ulwra!F+c(N|wIlj;WNE55UIu#28?oS$nqiVyfB7EgIlcgzP%AMM?cCPER#!N^;}P(ucW24JcSv&efMZI zz)N_o?jXLiobiN3rg(K|hv~6waHgFy@P=wn_!&7k5xC48w_F&!qXvu-RPm@<-jbRx zKa5uEnBMmAca_YuHWtY3$lPgo=FyaHznx4`2=it# zv3W^%3B5VeZpK&LZ$P$efbwonL1uw)LcwS;qb6zamDccd1kymuJ8ddRs$TU;ex}cP z&N_0E%k;A)u*7cL*z^6+CD74|?Vw_%Z)@Tc+Mt#EETt7Te>7ajL(p8NMe+}vMVCRi zmrp*?+kTS|X7?0}Lt?=)xUL+Ybvgo@ zm$Sk#j?L$2JMvoA-()9;$!ry9`Pnie0>_U0ji4V#Fcd0ucvVaw*KEpyG28<%v+~|w zGtM8TVS!sR@f}GzMM(rZ0;NuM&%UOq|BMSFKEN6bN4mGw2Z=fX?W0A)yC!#jqTDlMczv_qAkz zIRoo-)|}a-*^s?8>0J!WP1wQN0-hzkLBMmo7&{Mp{`#O0(}JDkbJAF?+7>9nGd^SN z?5xl|=)2JQM)AQGIYR1SVNO=zPP3CfLJJrM`|1jRw|{ZdQFnC7o9J$gK8;ZGku-IP zRhcC_b*cnJ7iZ@U@Re09Fb{@TKafu%xHX+GlPeG%{LD^5Z90056o1SLyTil7ZpP`4 z>MX_gDq?h{DDNQ7S6mX-{mJ9E-z@W22?>vJFq+Mtm<@Rqce)?j68{$x3L*3Fa_YZ? zlc6lj7TAAswr**o`I+s2B8{U8#dRYHnO>HN;+ooim=famXOTz}3Pd3F>EvpwY@NFL zaxSrGOc^DB>DTT)D}kq}LvJyf439Kk$Mo$1yXPgL&7GKGoDd^2|H*V<<4C0GM-D6T zQJ7-c7CrPJ?cRoELh&kha#wyXREJ@hVSo2*g=$oi#~f%iqML{WC~&>XEM^Ed-9UdmKsg$e!|T<4)~?3_K8XiZF(oNsah@!ErZp=(mYQ)Q|y|as#T3yVO@~A^3 zUQ8d}kgHRNI+~;fO%GsV~R<@Dj zklI+}<+@Q6_*$#p-QN#|tOW!TfTHPy7o}s8ezDvXl`iVWDVZSo3ae*-Aqnd%%NYo* z+%%W4gMs4qnKwwzm-w>wmc@y^sge}sZ|e}^d+f<5lcvd zA%)UE{`b6zwi;3152cbV?e*5!zU|-S7Wtpk>?qA9rZ#u-R9%MCD$ZH67wGhLtLXMQ zOk852R+s|10vecY)`Gq?(^SQ~#BZL=H>tQs0F+|iINIpKV#_!BC6}VY1P7$Opu=*q zN+6W~{8h-yZTy!M{tIbuqcB^{&d31BM~cKQ{@@B(4Sl8s2kvMa5FIy`rUJC5x70gu z8p02d+~&hENlqoW`OBWRmC5W!>2vXBX$Nxea8R&&wU;SU(31 z?Fbj;7jHj?q~rb(UVP6CCFc!Di4x87*siZ{@%PL9 z`&w?lN=oQB!D51=gR$NA51M>&@{p=qsj#B^^v~rX*ebK@$o1f)bP~O-Run$gh`+e_ z4Al7UOf9JnzN}qbHmJy}ps+ll#x%+r*mHeTVG+Xf2|A1y3i!}YebX2(rkKrS#o_Iu zWJ*dz^$rA+qARPTd}^kKL-hUO)*U{Amnc+PQiMD(T8ZQNe%6C0A-WikA?@vI*gWvx z7Y!*K{5~>JZP>hQ#n?{y-WZJzLj7;CXa{zH&H)LC2;m=PBBU3*dTYoSgwkFR=G+$H<;A|h9 zJCmDO`T?8;DC=;CgRqV`P62&K?$(#&^O-v46V%Vtu^cZ_SkS4+1rt%T!5J)l8Jm?1 zYwm?%WaID&7(V2lz$fo?ru<-}M-WnRsFpz(74`V$6d0kGedCIgr>i@rHa5C#U>qeA zlj4->{%4Wf#zQaHG-Izl8hT&Xk05}h*OTGkviI%A_x_x*EbJ6v^YKBKR@^l_J>;ih zMJUc&x%+iaabufH(}gJZ z=-M8~W~67pJ%awnpZQk+W907%y6vo|pt!i@>DqLNgG$*xT^xSG{U?q258E%{vM)## z>sxqDe~`Tz?{a2HIb2u)ju}_$mXA*dG>6W({qZ`E#qzVrW6Z7I^WG470S`iVo=n5C;Xg#BTKE&bHdeHv7l-5QjXPQX38B7)Bo9>FLx%<{smvNjBa# zTdy~>WySLqR8N#zF)K%Gomz^Yv|bvGV{Wlh=`ONM64FA{5So2Rz`mweav_%fM{xL1 zJ)II>UbH?yioXT<=l2DOgZF>f$2bkK=`Sc_0d~|`dB{1c`!S7B+qppPQ}<+1 z?@sTB0`=&RDQiE1+GvEXc5|(sX&dRLaiC9YQPUQ6Cx@BI)zL)Tgd%d`077s17_as- z%CPp9qXL3~Y^9O6Fk}PJT*Tfw>lg|(dR;1zM9Y2Nhq}MhJdre}w;ecj2JEVm`bVHy1km|S`)!3-i3A^1&%!oGzji7a zq+2}v3HtYhwY7RKW5U_bQ3Mrvl}?|_c2-|S5A#<|G?KDk_ufO$1Q>>Ht*UG%Bkh?; zrz`SQkBi$Kn`A??43Q3b-tb+$bbnx$3XYEWnRHT{^dF|6kMckLB)B`F9WZ&4X>exV zlUZJb=Gh;>VZkiN6nLApoTJeBnmT=PAqJad;$*Nozw+1yOQ1UzB8ZYY={YSuz)IWI zx**DE<2#!_8p{PXAbyBaEHMEn1*~+CAOE^uatVUd>;ibox#Qp>cz_p8+YnNrKJ*Ge zimSQEniYgEU=Q2!8A6Wjc0~@hxt9t~xDW^1ju#|)Fjng9GkRQG$hwPVDO6G7_-X-H zb?*-gMDZ?aGe06T`{!X0zNs83WX%iT<2`|Uia=M&P3$ZbTiQ3HMF4BQj6uqno*zD| z7k}>Cs%}CsRMp$>u!Z_|j9RC?-<_fR6cgB5yS;El&EXZNcIAOwsQ}me6D8k4rPGm? zO5i)#-%m&T*r?}$^MCA%7g-00e)$p{!?g(a$L$t|w~}yo&r%V(KJRA=m|)uh_OmtR z*tFvs1$O&3RsXYbtZl2kR3T|DHZN_Z;ACPF_{wmO(J+{%+s*l*jt!Au zdepnnXMt9z`_WvMyqf%KaoPN!0lhp?vxX@A?2>nT!3#!;saUWCg-U}XBNJEVQD#&I zNx|*VDd3ebtn8cFC?LDV=}b<)7I{)F&#kmjQ%Qk0p892JK?1aJSeT6#_gh}K4W7+B zizo_)Y^)z<5lVjVwbX4xwWt_ISN=zRzRrj7tR0S=IZH@8XN<|(xT58i?m5?re)4Yg zg%%U@5WP|6HOhIRP*LL!Zp9sND#T|fL&5rJp^+Yi?p2V1qfakWwD$hRKDjl4i;U8M zAbRsU@7by236o?`z6cr|r zKk1f)tkAJP5J;YK>eCm&Iq?0szxqiCLwh5bI6B>FnC_#`xg_Ma<5%es-74o?aL=>k zx^{+KG`tSr=f|(PNT)v-$k{oDD8RD%KgJO<3SpFc39r@8XuQrIii%v`Q}`s8z%#~U zp2|xvX$_~iuTAx?oL^hnuRM~tMzJsmganp+R+`=|38=40F1HWWHE%Iwmr@1~4M~Jh zoWg4n&XVnSzF1#d#%-ST%toypakhSOHBlascQFbEQM=`Q3KZGL;a?2e_Fx#PH^9Ff_FOh*(`z;qbMG6{U{& zxmo6e?5Rf?>YB{=N;pD$LH z*`5liASvIEwCf!=h)5Y5cVp0yVMZ&9GnyU++E<`J6@E7CrsFK8(#FtP)&ab!5`UGA z?zb~V^LW2Yqr<3X8NPvLyTvhkIB}`yjKE(t#p5wVo3R z{-QNZb7NQxO-jxjJrI*%u5Y0)&C9}4Ck>>R|751Gv52VyK6I_2;Aw|2!q+x1rp^z& zGwJVK1W}=H{~)P;VxD8I2-`~dtby>2e5Pl18C%Dhg*jq$+Ia+jWSw_x&+c0uRMhJe z+7yrKm0D-X?t+3?wy}SH=>DK#aYEih5Rw(v_Ady=h-0~(&6sB9#y@_?GzwG4$;iOy z4$PtY@+umW>#JHunFt!`NmcU625PxMcdlg3YvNkf_Dn;{;1e%jsIO915zGooGuiy@ zR}rqV`#ndV#9yypt<{~Z_>e4*1O^t3RBzRN1N3H-X0n`&IivK(0EJU|m6VKiGBga+ z%30ZJzbw2}Ym5U}wM?vgl+*M^RhU@ zhBf{i8RBf?^^7_gi7-xLSdY$Zq@NL#p}`B z+r?hpk^MobYxNTM_T+2018aH;be^28&#EhjOHo9JJg6ke->^u$FA7QM_{BY(D1Z4m^jN!l>1*A z8AU8%@FW@zT6YPmpBisCJs3|%ShUn>`yaIl0bh^aMzsSkp07@+c_JnFjNkkxs4$Sg7%})!>9pJ#AZ*tq zgF~K}DNMpNx*h6Xt-s>hGFMeG580n*+Y&sT6AtKE46W*|v*elH(>CX-bs$Ebj!nf= z(bt`f-0H(?Z}@)sm>543Z-av+USEwdah5VDBM|E?gZ^D`-*bA)J1$nZ!FA&CC(<=A zL#6m2UES6t!cwYUkyTw2ElYiE$&Hlc&mVk&5ks3bwZZBFZ&u?>CinLqWMvpqdSP&i z=q7u}5;i;H(z-1&^I2nH8mHTB1%G-+?cy1RaMTfaGo15D(q?}-m#{=;(>QgpQdSj- zzF&8<>l!93HRM&`29cz(8?%$rz@RX0(g#e9AB)5g;bg9M)Fk=jBOq^*FE1r zU2DJaXEAG;JEoLZ!`1L4l`?wn(zY8UtGe2wxwbu_U(9qvq{q_o74+m>M8&=O_l$DYPv z)Bic%o=z=saVVi6*lg`)hcr%S${80VB&0-Iospp!q8oh9txiEHDHsFco z5ZpTUS#b?fsTPd2w zaD-5!8wRGpkR@IPjgz33EY}OX@O8_W9}y zc}%waP$+Ep=8kf{1F)Rt4>;tH>m^@44&^D-~z^0=3tH!RI%Ml#b!tNC`vxxcRY(roBV+U0a)D+#+QJElrP};1c>%_=;bEhGal(&9iQJH-OsWw z3Q3<8)x>o2cQ0h7O2ty)I=~WS`~pHJKy zP1-p)7~CC>V_;;IY+M7ass*(o{WT<&)q44M=?oWi#2Y;b>2`|ktCE$nTokR(#4wmz zd33PupCMWUkT9;RukIrcq%95`jLyP6fu(N~a^{t~*`x{Et4-T))q?`p?NUtxYB?TH z+I_iRV;RL%-|TQDgS8Rd@9N3P+4dk15ee|HK(UE&d^ueE{fmQr8;=D!xk>}_6&uf^ zTd1TiueV>De9SCI8gI1C5#u75_JK#rxdH`U#+70#y@>QifV<=*jXWs&>+ zw*JAkc5a$heRNZdlCK=2ZsX3qeE|zBGEt0j*;#Oyvc?=pHL2YZVLig8_%eYtQEg8i z?spq{@(N)jI#+at-}`V>YV|{qQg@?G!mijMUO{c!^96UfzICS7ALGdge`Zoh?gs0D zOgX-*n>En#U^R5q15%PJemnAoMc0_xM1+DeUS{igkLxJ5-}ZCd233qc*Rn*gzOG14F|&o0pxz=%ck(HqfNp-PLACcjSCfKFY-> z-$Rj+8qPK2H5ui4llOh_=tE}PCR_yQ!uU2Aob>X8@umUx0(rEg`o<1U8lT{j(s8hy zAbS*QIc0drEAm488v%DqUF1gk3H?YTOe!j;;dN!(PYw~zyFm&gI3%!8c7&hs=sq=OQI6Bc}S=9CSowEdQ` zHsO}5DwD~UQcA?3K&b5!hLDNBkh_%4`O?VQq}#Tjche0qH^YQ%ItTvNR-Rz~)P zqU1<`TkOUKHB;_bNCk1(>0k&Ao7)>}OhA#^>ecmxJ9{hjgt1Jke)%^o&R=>D?ueK# ziN7EZ*uCaH3=#163|a5ZWgikA#03erUzwNdFek3?zbSuSZ9W#U^M|I`qv>$P{gG1; z{r*xGGo2~*BDal#EaVlBrfP%GbI8W;1LhGt$=k-81R8w|m^T8GnCv%$V{rQVScIBRA6e zQE{&S@Vm(MTXQ_v4_>k^r@ul6t0ABnPY%|#j1WfMKmuCKH{t=Xo1r_@%J@$f$QSQ0 zV;N)H2ceKnc*s(UUhEx>ct3~%(Z_p3fa(guVK1bzXg8H%(pnY2`Z4g!y@X!q~QX?vaE|0ZqMV%>W>|JDLb zr?S}|t~N8lVX^iN4wB8>JM4|vzrVV}<8q6K{vf%=7u@p9DJ>l>Qpk0?KgGaev&nNh znm%gSXmv#(BP08(8xo|QH{_B6-+i3;WP2ne2`5*6TP}D*C4I`rYrFv4YONvgfCdkH z2oECpD*JZEi@y;B{6z|bz_f9X-9DP~)98iC)at;|O@Ij2$f!d!_A1#dI$OnkS(;N^ zI#;DhFGK3Wu)~YN2thY42dp;Wq)sB=_%`z`6v=Ed@%3XNXVtiXAO(O&Fe_W9|G_#HB zaOdzioi^T$(SfBcNDM!DWu}-$lXG=FZ;uih`JRf5Cij?o7GYx%ozTJhth#5$b%3aX zif!6lI~Dv=9@5rxyHCQvT2-(d5+0xC?O%UJ6GPCOH)Fu+8fws?^!c{ld9v23&m^Dd zUbWe4t$?aE;>yHV4i<}M`*yyeui{7&TuSGb`piC~E;@MXf|sQ6xX&Z$Nx6wBFsMIb zvXNY9XG^OI52l50Y=1Y(MVQ)L#vBIbl(USa_&_&G?xl}g=i2|Z?TvfX;)StOMg!>N zL{D?aL@VlCd_Ali57N*;9oK<6*q@T+*-gYI?~VVaSc1CZ5)%4X+x_vhIw@FL`DrC$rQlb=^EY9S{2p`RHl(%9CehrW=tp#gj#-dzHJ5K>j=5vvYRN_qaR0*uMqt5ylPw_0?joM9*uz z)^uXJ#o2N^or|nP58D57&Cb!$@N%eo3TAnP3GP zCN2?2iinP$Y?GRuO`udDt$#R~ZLm%+@cmck!&;WWZwPEoCnz$qLaXSHom;yfU|XdM z;O<*jU715*B#M&EK1rxoookFK4R(*$MsfcN&maF)DljN$y3tn8V!jMuTvIHY&biv| znFJa{9%>YK84Vgpohz5g3mT|SZ$3@T%)-KF6&?{`bbC0pxU^({JX_2iq4+%jFHoHQ zoBt(%IOoF3IKmO!3YK}eP%mUTSEu^r2A|1i#QqcOR&cEUAQ+buGs_gvU}~K2kMGH0 z{y%6lK9u>{QZ1yGmKLkk;ve(Wz(DYo2J4-kK$v(6B~pYOqyX33+uLLYLx^ZRz5>gI zilYV|@8@_@nUu*&JwA|sye1a}SrstCEn=u1(^&mUH(Iq!Fr>I-X+>>zpN!zA+FF`r z--pR3%^H%vn+=XM++VCl|CnP&ZGZaL06wC@z{3{^ z1;aZX&xTG;PCBjZI&ky`!R>%_$LVAq4j}C%h5U{KjY>Uqf40)>a)voqq9WC3{q}tK zmw2}OLb=0azXx2T|0SOPkvWtgr9|vvyC<9RNWA_=hc}>si<8q~X8gTOU`L z94Zskqr$Sfy1LlnOf@htfH(j0@-oxr?&$GwE+Z@~j4MhG)$<@uoc14R;h)dXP|(mh z-Y@q~=c|CKsw$@yCue7kY6I|v3Y}bo{xEmsZ2a8}Y0_NJhjVVX%U>>M%lRt}<};+H zr>E>5w`RB^WdHY%2H>R-OPns(!*FqN!DF-kG57op0oUaF!3#<<1ZdI5%zJ#pLFe^y ze`Yw5!IKOCc&p=m|IS~kT7Ec{3%Rwm<&K^WiZx-b&=kLSI8wMSFr7x{f=RgJ3@;RV zHD&OwB|5N?R1zWn_m8}ne);;Bw&{?G|$T;#o9FE z`wI?fnliydlhYeG!$-H}OPVM7ztJ_mmv=02wc#NAU!rik{8?}Ys$=UJ9S#5d6#_qQ z806dp9-pOUWR|K8A%o#@dZwqZd!Li&^sZ;*_^R!;1c@b>RY7^OaYt`Jobc=&2Xj>k z5SGrkf;StQx3`J1fLRkq{CUBg(MODIYGfUn#~MdEY{7W5WuH${FQ5fV&@LD5<&AkHh&k>K#!>`9QOk7;@{#4U) zJ|`|VK7zOwK13@?`UN(boK__cXaHcVZWe7W*ui6<``dW@sBK6^)MwTNPWl_GuE8_@ z=^G(wGyUlN3|5H1k{Qc>I6^)Jgs4GpkK*}u*Kx>U(V7G3AZeZ;-}92D%lf=klb~FZ zIg_=$3zw3SXI#mE^Ap)_K|V+P*G%sz;?~IQCc@H(Kv;mZK=&4!1gRekk(9xj7Y&_q zCU3#NccrIO1=Q$ey0i14(s8~r zDUd4wCNtF8tfhp6gm6cIF5gGV1?5&=gj?5^22RnEJ%+AAvl*NW^nl}_aLU~AAp8b9 zBkKj}M!M8X$3eB0+wVV%q@&bZP_q06P2&`FJQj5@Nz zxVueYzZ3^;k8eZ@%iOYazK#mjY{&37qr(In;e&D6`1v5u0diqLdPb0Ro!VY5flNvM1Dh#+KJi#$P~sVso{kfYfHz*(4)S7d z?(S0{hci_w@pz7bf$>)--h7#cP>0tOZyBG%E)?i8Z$96hxTB}zLw$&BDD+T(UyLy= zBCdtWs3py)AIyAEgmgB<0%%iix$H8IqQayHTN_Q$I%Ic}XD1w{iqf81_@_YN}O(OI+kYEEHv}rLMSiqr0j&rU}u0qC;Gde<| zo|J6cwc+X?R!}U2v7aJChAzIjAi%6-GZU`*ex!?dih9~k_r0urH zhYnt1@(h&*-jpAQqo%?tKV*0bkvew^{a}*S-V)oHKJw^W3KI zJP+M`?ldVlAsvu2U?AuE{9yD;I0Gk5_yn32-OSuTxruXa!9DKv{SSyLJNzkFjA$AP z9*=;kIM{~2b`M7MdF0p3>r?~x6I|WYJX5}UyM`Mvnf+4|q56X(fwgH%6A@3fQ(C`} zv5S4l<;HZ>Yht9vlkzu3#@uibLwbUYN_ti55eL-V-xca#X!fjM}BhZJ^z$GU`eDTM0dEQ`#G$Dz?P@golf^ z%#PHcb-JjS$4~U+>VdQ{+E0s1;*m*7^Y+ih(0TMx4^*PrHZQ!u==|#y{!zjDWJf-V znL88elhgC;*E1dXvvz@yQyrWp#udP^u@(LFJipFNvVn6@mBGpn2sX;_{0fMt+iRub z6Gj`+LU+g_L3^F)R85h{sVRm6vq)WPdjwzC^@s-oA&xz1!l?t~6S_`OA&9(HeUYT) zMKnecwoJ|^SZKxigD(vgxH6^CtcM3P{DV2v<4m-9Se)`6p22XvY2?-g;SKr%l&;C$ zfL{*?O80lVPfk>CC(BBU(n(&&n&>D+{G(H;O*jenZ%IJQBn+oA#ZO^IK;T`xt=P&Y9XpMw=`1d0`d0w9C) zFbwG8<%=0vMks$F<2G;LcS!P-_zraqQ)m~$gSV|Uy&s`I(-nAKqy6@|%f5;d6YlH( z?qrI#bE59$W`@$RV;>$tKuQ`!Ub&>K6x?_)Fm1I7E-B4B95-o~k^T{9l4$;}lw~3q zI-*jnxQy-0Zz3NM7Nt^EJ1;1TT}XOo&^kq2(H2TTJDmhW)oM^WnH8&qlQ(+ss6M86 z>ChT#Gq#NEo}rV*SZ94|??f#}z{4?gt*@^IJ~+}(ANyJH%fu>e?)nYb1krXV{H2jp zrPt~4cHhvu7c?Z-G=D zh<#)MJN3=rpKC$8`lM0WG))9+-KoqRys3BK1M!P>Yux*s2!mex%{(Ke7g9UQYCD6E z#ILxU9dO+n4Y}dMNqA3D!_VlLO!|CagD<+Nn4>}|m>F$$3%# z^OZu6kSN10OO9}q6V}i-B>sul%Y`F9+beJoK%d6yhJ+l)Kbu?5?7DmJ`DBqbeK zd8j+en>O(cfMxV@o(sE)>Zu|~=<$DUg5$U~sxNUPeb1yqskInL{O`vN!@GYDu+Dyj zDVT~4r}i2GTpCeo#Z(DLom5V(Q6rWkwrpq2ZBw4{c<)N2SDDd2KJJX)hs6TGb@itP zh<#Sr!x%NR)nX4&Y!105&lk?LeK7=@CU`vtXbaZ<@D%gVI+?SY63MKJF+Em>KL)+n zdyVUtp_S((MPmO;QKpzjQ8j{$JY{m$q%B)aBr;OJ7Jz^iJVHTCl$S|MQgeS2j3&^| zZkV+$xWd3i%M<8a$3UwYUO_YBBTZcNKO`gm#72s@~5hM!_ z^fIcb9t5XzS}~q;tl@*A)}6|`b}Opv-144+5mq5=?(+=0n-T38jnOaC1hMn&z7iYc zH#>~~KkQ+x#}}qFy59QzL}1qsEFziNStoLNTkf6oxMsU&-NcDDU=xF5B3r*95IvC& zec<-8`;}#c_s;96lFeJgH_Fx6s4PE9u#kQ%$fZ};L}u^Vn0`(csPS8a<#<`rz8D^41DKfGhlRK>71tui4sIAO(6m-%ZzvPoD*kn z_c!o?c9F8Nj@u24YeUNDz(}qvX8}f^OAKv(7dL!ik%7U6{E&5>p-OX*15+RmeL{s!sro_|R zbWp@e!|uQjrTZt>6D0NX^JC}Yn(6fQRW7;R*n)$DTWWJ>0>|Y zVgSrWsbieT@zu!Vs!iu|C>qhckE%W}3@!zGy=b{2?nd9~^E_3ri&!(I+jiKKXxUm< zjP68>92A0s9SdRizqQFRX6;?fP&71x z?_nYsP9fS7u16o`UkDctY+-Y_MI-ZvweEW*SsHFTBUKvlhi!sZ z3outI!&;8MB2&)=vB(*Q$~_jgIF$ZFq8g=Z#_P|TvBDhd{G9DWbJAjjexVy#-pyF; zfxg-O?#~BHL^jy7{DrV5B%c3i8s?&EklNbncYgj}E6Il(&>N~cW@<@$aDlM5Yx>Jr z6+Z92L49@IJQ*YNmv5oswsf!SR)tcCqX<_TOOgF5sA5I|$F(PMz$SRvJ)&ms6Oxg5 zvQ^LTk%P!$WgB{}#-Gy`Aiv|`gvJ`3Z(I-5h{8epM7lPmz=>G>fPxMaCB&ecfK;C^ z6WLS71@%?!JJAw7(im@9B1^ycOJ#T+ykH4kw_qfhC^6LkMx@u*{KH9|xwzVUS zlhBuMk}C#f|B$Yl9A1oJN?OtSvmG!fwHQoNPj+Z4cnbxVQnh8lD=!v5+^QOwWC}`? zogB#Cte6^0BneD0HkzN^V3(Q$c+0sMz0Pr0deN}!ZG80`Xj2vws3q;;I)Pb z>f68MEW=ogir+if@K_b|Q&9Au)s)2W%B#m$JZ0b3a9)(rDeqR><0|tSAVBO>d>(lx zH%6|KK074ds<9RV6Zgk~FrdZPjz#jzt=;id*kSw1mAJoKqvI;&0xTKQ6500a7Yk_Q zUjf2>-THMYDAFg+=Z_GNI|uc_9sQk`TS`hokw&qxe3f3aEVBxfwVjwp(&3wXMOKkMSS|monb`yf#J9Pdb0Mmok#`)t&EGS63H;l$jAb8RY)QKAkr} z&@V+X^jOG^5gD3AnQ|`}th~pEJL&c7Odpz{Z~$Z{7kNh(FNHk$G^y{MS#3AlT0LZ- z2x3b)j^?M>TNI}LgV@atHEjuMDwd_X&op8Y=D5i zm;LZd?^5k%#zvd9o~5Oy-j?KK^yNm|iN~uQ35ns~Pomd<+zP-5T!nj%nEJH!x_~=@ z==52r%_-Z5R7#cdagz2n+E?$7f^r?M-xeMvmFY5jPpRzH*j=VVNXJfVj-GOMpN5gR zE9bwE_1Zo;#6j_}rm^~3FzxJWa-xhA?i1UhI-ic#qjGU@S94&lqJh zhp82$b2j`k@ddKyx48{So%J9yBnb5|dFiGqw3(%R%W@jsQWqV* zA$uoR_^FjksmVsSb(Ia15SElgcQf7ae<*v)pt!njT{}V`cyNc{ZjF0@;O-8=-Q9zQ z;O_435FCPg<1UR$aCi7t_VYgb+vim6ABW;cS2a{u&o$RvW8C8!_ux*bOXm1<7@Mb8 ziqn&n-fJl04j$F+sX&i5H_yW$lG0xK;rO=_tKx8+4rA}T7mur|e9WIeTJd)&I>4pm-d@9hm=tfgy_=?9jJ zp)Z9w!I>thlIJ+S;l?eA>SmqW?wHrxVG;`IX{_mvh;w%5z1pG2_=DSF%~D*(?#P|* z8vKo^^Ss0Al1Mttk=qzadnPLBRQdOs*Rz!L%EpW3>>JO{rt-?Y-_D?u+f~bKEOFgh zWxoATKp=VE|BPGJ;i1OpJgiD5ygpl&Utyq_Pn&dtt5o^aYLulJ;!@A(;pfee#INEa&bSG!VAN@nok6f&U&}q(oii=U)Y~yJC4+$4s5~sPgZzfxXKVd3U;44c zf1If@lsZBc#^fP0{p#rxn&yIGNl}Dg&YvsI7;f$MH*I(>7r*`918N@WDhrG48gDAQ zwWOJu8KLOy_#^JP>`7+HEtx?kzG>IQoHC<%kVFh|I#|2UZ-p0G@*NnQYq;|c+w4$Z zC+B?~15^PXEal4vl8?DM918)x)7Z#_(I}zU3;2s@H}V0DBxj$aqrGV=@>193xP`4M z>ho}1p45X9#GUrTo{fRiVq1!qu*q8DMuvd~n+sv3A1oP^uJK6|8<`W1pcV_xqK;u` zBQ4=D%}n(;h0OsyVI?xPMx8|=)VNy0=1}*!STU0$^)b8=M%dTEOh^jGY*GRm{^SS= z<~3YIahlh6fh0XjRvdBItzA8Mu40E=bIjF7^#8qpK2ai@XHm5PGNsj3ko+9-J)meY2 zn8HtX$PjOH^;4tR``}t@M(nvof1SHl8j724e(>7uH24+c(I+9dq#tpKP z>Yn90^7$kJ_eJs@thoo(&=uk&yzgkhZ==tD#UWZ;6?4^7`sG5#?O2EwYV79Ej~|I| zY@ftHyBDlJTuQ+r(}#xI*?3_uRKq75EOud})7OQJhgK&W^MgU=Jj_nnhyw#RCS8k^ zfqmqtST_h!c;kw}8KZVYpu}m8Lfry^9$Ls|l&%ujCV{k-0dEYW?ZyieYDoGa-*=iP z?qHG|?MoylSmEt^-((_a4;mcA$} zqqlqB7OIq~LVfs9uw0mz*8_wRcJ}u78@m<{TTv_YzG1^rTUs{qXG!fV$dRAdccC@K z=<_ZoGN7d1fa;y-OF&jUnYJV$ZuWjS1*>-j%lN#I*CzSU?QJSkQ3yh*3eLYQKv`d@ z)?27moiRCAu?^l6I@>z#qN!lx44!HgQv_2z5i~TrgpxV2{aI%eWQcFD)c=s%n??nO zrO&=;9`YCCVkJ zKx*rby((~8#?QkgxzV@g$9mL}uz;JlV7G%q+(By|KdX$?>_!+Ll{D47CCVz6zU@8; zFB>sB@Z&~Da*&Vw!r*QT3?B=YRBti;)p6O6|A+D`NfULc)_a)XQg)gC3ebcKMo>g1X%3UifoVZ1~4`ycA^7RLxO;2lm*|6Kyrfu{i z{?x>%#x2Ei+G8ckt|5NekOehYd)LmfFf)~3*GeevK?h?}23Fr>8c?vPxgMb`m$gWQ ze@GPdS)HKvc@@3ew>-=?UHmyFDR!jPs!Dy-!cu7bM#%!PkDU3x&I*v>62lt!Q$>bT z4U3@IK)gW&N3?P$5_zZ5Q>_tW;<~=V4~q<>OzqDwa@Df=zrrThE;85L#`PvOYBVH z5W{eCY!!a=_cYoGu)AKfQG%vb%Xu68Q;`eTm1n}A5+3abo$v>4BC@Z7@mte{G9Kh< z7qT{WmI}b+2~mGmm%3T@7pmSu+P3olefFDzGx@^W+xgPit%vI?fD9L44x;?t!@PWI zPrV3iqw=1!F_cVmP(L{Mro0kT(}UL|#bh&8;Z!(8^Bj#000=QB~cK}Ott<}ACW zlAf&_DNFV=FH4dpEoM`vzugOYOkOq|M-LLpqxOHNb5&5;WCw`MqRR9k_s{o?QJblnx;=pO`XaW2_q{|Oqti;h5eGstrDw9Sv%GTx~_}L z;w!jvaOWG!o)bDSFWgj&3rpncN%ltz zSV57Zv^s9;`({u+A&ftH!jv5F*oszm1oKgc?RQu*Gczj!h1zL8TTqI$W~+;}Zt#ta z@vF4Vv&FzGm4pL^Pe1(-V~3#9~bi zh$&L@=_yN1m9&5L$cfJnG!&33>>CibF<;VF*2gO_X^EUl7sDZ@D-+dp-mJ# z5=s9k+-3da2z?EE7zGnmn1s+3Z`3-2wvQs^%m9ew#6MGM%P_l&vo&Sr9|KlX zg~_iAL{~mKeH9yPm;yo7uzS-*Ght^Xol(3xs^u6rVH6$Peb#&;5)De(rjw=4gdyhH ztg8I{kxQz1{HY~zA1@(ydSi2R`5JGhPY?NDW)O-v%FV+=u|$y^NKF%L+FM$<0pMx( z&um#dFbyp#_pzqWy`oTRgghC{(>t04l9n$k?!Mx&4MFS}uaxx(ubd5$VVt6L0lhR` zuTuTgh;YpO{;I=~Nrr8h?iNK5mRR$6B%)o(E0NK82Va2HntVO@2NaGwW+K=QAD(*W zZzCnD{~GWs3Q$!T& zNk~geH&&QW7yjKzH3ogWp(hY4O@^dSv1zs}&|(s?;*!$uzO)O;(DXIi_dOayBDOt~>>{eZUSFj6Iv0Cw1XRf?gr<30eVnJ# zO>ubW;5{Fa>6Y-bjwm%-Z3`R-X825_tJ^UuwfskUZj5G~3M_*QZicpOax-A)b3rn? z5Si>yaYI)H{URdsZ=8oLECcw*d)1Ff*>48#y~##4qSspxzpg_|x_muTzVs`{0ff(vq%pO$u|?5z$knq=*DvGw$fDwN~<7Nk9KWVPTH znmFf{F!c&)N;&_d|GQjt6qK*%&pC?9pP*0#q3DXU74xNn-@H9&Z~Wq6o7B2;d>R!P z9TpxB(#35v}{TK{Pno-75G>vfsO ztF0B2G?;z+gn$%qE9!npnBHGDc*B$C)6*C*vYqzj(`|tKrx*aV{O^NL@KdHZz(^MY z4%7JjUYzqj0QEdqr8gc-hyJC{TTcVRw)WZbw+BsvTm$r>UR64$mjoxr> zv!o?$-{3>a(}bDhtQX}B)|LDGL9RxU81f%vpZ`3F46&Ni+{<($jHdWMe*A`x)Mi}Lz^j)%77Oe>V#(M-}nsJi^@s8dw!3)Zr#;0?It;pE$HNHNO&CB4%= zU6JytYS;_nM;4d;b{PDALD?54%hge6`0>F$I1YO>9rm$=#Sx$0DX3HLuX-Ao`xoj2 zwn(4HIn|S-9FY?p-k!jOq}p3x_eG(QipCo#`+r_J|LlubZ$AA4pcbbA_UWB~ zp4W%3pP-`iIPdl(kwmt2T&kP0;`$fII%^*_JvL71*=cLU=4&s$E%1er@^ei!wRv;- z>Wxx8RYa-xU(bf0DkCDdBWf01T)p0_W_qwVkQ-4J<$>Vbo|DP5j8-rBR#=n32mdmw zZVt)P6uREmD*aJJzH+q(32M5;oSf=pX;2%mDnO%`%h5uNW$Fl^_|*aNzhb!Lr@(<7 zfn>W_2~N%ocHQgyZDr#3ciZYnqj=JF4&eRk6XoM42Fogl`%+3*$m^_V#&~+FB!pwZ z#$)@#klwI_5B6-Lqz$wa7{iS1LJXY0y7zobPa5r!nSkc0I6qH_hxh&@VVTL`fLf8z zT@cQ|V2ifWHCV4qxwu{>#f|EcxXFCrqgTTHsB>v$pV+`9KNX_^9;T{Ta`VePS;Y)Ev{CG$FWAOtLIskRvUq+`>C`f^ zA6ufmA$Xj}{nPpM-Th6uDlHHVZcO9|7vQIS{us67OS54V z!qbyl!Z=ZXt1U~8&JNu@e4O=T!q5YuT`isb;)cKQgN=cFl^j<(Qg-exOfYlmSn{dW z!TkMw9~>(-C|NXhKt8)|+l{?rHlv0zwPsx$8)cw#CuPfa@;Reo`}L`*8YDE8v-6a) z5W?uHL(2X@lD2khx;KcG@*rm(F~NkA_4)kCR>BDQvFT@#wv6T)ODo*bu1s^N58;ZK z)@vZGzLtMb9pk~!R#>V&;|39BbzN536fKnte=Q)hGdH#+DRMIG{bL5Fbz`>-*127U z+K6Gw`h543lE70SpixyQ=dycQBS`pK_Ud3d6)x)DmSw1bef;K7*wL36CHG%e^8=6P z4V|E%;L%z;&%6&KBje4{Vox-&up~7ISj94pIukkow|{ij)Wp?kb=d&|!UAldQ5sl7 zz#yaRz&NX#w**h>7MvH*5se{Ehjnki22sZ5NQ*(_o#2Ttdb}#RLU>KAYBY&BRiIKZ ziK@de#-5LAt&f29@RIeOB(d>JJ!_DSt2E%s6*5~0e| z341*sneQ<}*$-a>hqZVQuv9NOBjJbAv8G&ztLJ@;I?pb*eT!U3{y}nQby6-gP0Ia{ zh1PJlQ?}puPkJ8o1=kY zi(hYHb>!vlb3k97I6Zm{xL5@s;#QFEq$2J-#+7Ic41O*5Oadd)ZXBD1iCPB;r$Aq2 z;7D6Vs_!hkAH|#UhqLjhhn6dKefUlv)0bQf#+5l+$*C{hj_gv6l1ga_*TqV59y79g z8UZU`R0~@g{w_0dK54vgE7xInoy) zL~eD$Y`n+y``WZq7BiBSJGk){6i(BWS2pXx0pRaiJM7`=9Lz-4& z)rB*qz&neE{P8<-{7~x8C?o?lKEnvzFtUbb*|lfm;3{APlj7731qxN{p5VmCmnmS3 z1+i%R#J}&_R+hEZh$?vXsHDV|fEDQW-)mE#dhe#c#!%T`6qjk2NKP+z4xVUEuY02& zBG4Z2ZJuM%ku8$dkgh1vncPZKfZ`=1LA6GEGYzYindjUclc+_+YC*vVM+M$GW**G! zXkjAPGA@;KdmG$@firi_*L%8nP%eNWsQ6?&hY-}q7_r&NBQg2dk4)_e z$#Z;#*yGp4#%xUL;6Hyi}8gX=R2cA4AiMOSRP)KPGL{Z zH1%|qJgfZ<)>uGUiH$Qe)m?Z!)-T&7x!{+yvT76Oh!hwy#&bjVJ@NT%M zA+;#^$L_ofB1uk+C^qhN@iLw@Lawskn2ffbx!t5g&u$tN20Nxt>u20sON#<)UmV3F z!apyL!QZA6)AF9XNRjYvuF1Bz8AENWyZCVWMKti6C@w3eB5SP1Gcx2yfAUC|(L>JL zOo=j|_aO0%J@GEvxj^M#Ru8BlJydc-VtQq#Ym1zjF@xYPBkEoBRW4&24~jPpcWrSD z4KgWh%B6r`2#jn;wldM zuz)$tf!bN>QwK^B>J9A92u7GJW~fBNvHkjDJqb=+A>K&>FL&?Tb@l#6&u2{HvAyYBj{8du zmNy-Io;PQI%Bo043P7E)KtwR1lx8;^KaNc(v6p*`(s#0=5&k|zVlrJSSlh3Vs~B14hlo@}4VbMnRGI06g(w};`Myu@joPpbx% zuHG4YZJ|t2$e`+?vK2M^Y@&HdZMDIFnOmr9Vdb{GxZ7#gUf7 z4Y8yoq*ovkGK=2fbflQz`|F78Efc}g=!*qu^3-hiO(&y%m*oCU#n<{jLWb6B0b}W0 zE=X0-*VNUeEDR2t(x}97c$>%WMd3b9dibK6ioA*Mkp8=E142qn>{qGN3bI{m(|(+_ zV3SIu(+8vnDs;JfLt4SRDESiQr-#F;nKCs-Psqj2h(`4{!GBJVvzeFi3cKrG9TW-m zN@vUPz+m#tKL?3r26U#rJA0R72e;)~2huD_B6}x(#5CzY(}}f6n+VK*_dKtmNQf{P z_B;C!&1EViL7(saIcKeB98=uQsSu89fn&&MO?)JFeBk%djFF!2sGyQu%FAY_QzVc; znjv~2;294_!$Skl#`!Xr3qfZ_;F(5Bx|fF^`Bd@P5+EDwM#8VOc*ntOL2_(}>k3NP z{;WVS>x{lpn;Qrv#<09nldxH^Y3wo5`QT_I#)R1yt9bwMP zRjkimh*;)_haR;J`FqT%Waw1;>joo%F z-Lf+*3?4{ijVH1N3(%5*dnsJE>Tk!?-Tl4sDISXn0+8X-_Z344x-Dcb1){kinrZKF zd)}h2M6>pw%D}B(I$_k-4^vRvH3c#3Phc>6&5cN zqFeJG5dU95+eTP$GT_pD495XL;Rdqs$;%P zE$A1~OYVqssuh2q4*G;diJlIv(Bip)>ymKz;REHt#G;Ueqy_&Fy4LV7{NXDBA-qM()5$ zli99@a{l>HtcwhPWtj$Um5-2yfUplTuSKO_#%OSkATHl$40`{S^Sa|~f%Zs1X|UfN zP0@Y?u9-Ao)B@-Rj0Jq+ykZbSiN2WM$UH&9mKXRVo#T661UZ-q@`;~exEqcY1 z`A;7@X82?Hx*ci%S^KD)DRF;C-oXxqLM%v=k#TeN12`P0DbPgmdB!FsXtnwnUU!xC z=BG0C>zXW&a@-wRS^j8~XBgryU%aNa@x&4yB6U zF5}?AJ*2SFi|PD3qLQ9e&(XNAILE7fkLJAJDYi$}annMQm{c|NQ|Gqm_~bJan5cW7 z2ni)MIg)vUVnBA4rrSe9a#*$Gqo>;0ataja4JaJDPC%Q&=27hGh~4iR1*8tH+fHC; zV||69__At5)$^X6MSoD+1tLyHeuE41x-v>Vy{cGZ;gnzDL$6e1wQgXATUZa(O^S9J z*%7B*tg#&DtNJuJxzDPY$7pwCz)OvV{K2JAvr;%b&hX2AMJElRIC^Jn3PfL)pRZfoiOBH zVq$lGWSl+>gGmqT2KgHfZUf_eb~e9$3(Fbh+3iV2e62PR-w_hykRf7D7N$*FbEW0i zTJnBiu|4q3RdW}!a)#En9RFNyLokePPKKOK)iWYsYie^@<~3QzZR5dLnV@SDrzC}Y zhh88xq3C;|SB_72KK;AJZHY&Ly;vaYg+Ov6Nm~D0inIPTb`0W5Vc;TH+OoK%+3}%g z_n5SF0y)Xf-JX5qf#OU=OW?@a}tDKt`)>&}3&PKx|41 ziqlBd4A;A+tTInxsZN9e8>gNvF{(vlz>!gx*;s3xoih@lG#PSU^BqrcB@mlwQXy*1 z_iDV8N4vVw)*VY0!z1$4E_2#@zD{cXWr@_2`jD+=XCOA z-HHsceF-&5D!Q~X#zH{qm^r;w3hs=(jA03UTpStT{e=yeb3Nndl zq=MXXfov7a)%nKT#T_dvr( z#>ERmMn%ko> z0Rx6W${+Z3ft5S2`L{a_ZAVW~71k90RU?r{0JK|Hz#zx5VwA)tmJ9_SUs_&X{-5FN z9#{KafcV-AN%B8K|JxS)`}^B+%?6rzAHZM%h&+KpuF>B_NXQ(hx#t0ry>_b$;+qK3 zo(}KFVzny5Fid)bnPLS%@d_@GN=mdy_dnCS{J(|WPV`+UAZBuVH#DTpCMZWK8yYdr9o_CP>)?T7m00(^C{GuYj!6Ii0N$FFdfrm%o3>s9< zN6^O zD8}Wds<#z6&~Zo@rhoYV4JI<70VQ-3@G`)T5dFJ?0Qw!u)tewN&nw6*BK+oWHk9Gxc6_^0+?c9#N|Vrz z1aDNi9M7@HYPbFc*bWr2GX9^pr+}!HL9ZkIbUa>?d;pNiM^S*3tasHzW*r$a?M2s2!)ygR|^72UW*sTV2Z|SSHx3@hDcYsSaL;-jX{40b4FN8w2 z30C9PfTjC;FT|4Zbz}qZhuYcAjcCy{#+_qWpOBs1nPIs2r$89MS`XM;6qMD@4jlaz z5yA|UFzT?+##Hoci~c2N99Y6|k9!DPTVv!AWj{w@l9zIjA==fv!*%~IAdu(5;jKDt zbbr8P(7wYa1#TnA=4c8Fu-iic)18g|{p3?mK$N;p&I8^O{23Vc`Kbt}8MQWUS^YY0FX; z8lW>>xbcVCO(y1pe|BDK3p~4eEK73#4J2Sr{; z2+QHh@X1Oj$#;`R*9PDScYbqy0`#xiPrsaL+7BN$vg>+U!SK?nCRfUU(pYf(Vn zIVmGqfC>Wx14y4h2?5|Pr;6l=fC;oxl;VJe3Z4B$0sb@ZZ{za_JP(x_)hM zhW;u?TuQy2fC4OGeamTU#mbBMN4`SSv$fJXtIFiwx!WvLc}9BQ)5o748i(q`urPOR zkdMEh+Paf*ER(hJ=wwr;(p>K(XbsOdn@I!81Q0j-3gAlbcgNfdQCerueI7=W3xPz#vsX4j06xuzU|6<)4 z+|I=+Wte~OW&+^6dgn-tU&2mN-4A=-PE39_%lS^WLJUD&-vE=zL!%=X^Ha*j!c%Wn)#w6<#R)GC$7 zzw-J?if!>~JE11|+(-iUKKX@(Am!I4A14O;M_l8msr?GPbka_p(|rjwj;GW;^0Ou3r%GZrQI@B*75=4I0BU5O?0cjHD8emYt8+JjD3$-!ldlv z=H+pr7DL-U8V_H45b%If`P~DCZkUVtJ^wQNByGw1xQmoyEW&}nEjB#kn~6NH+un+) z)~~SJF%ekm1KwxFy<&LF`sewrueLaCHuG2@UHbS?A`!FY7iaA&h8b+mupyZ!Cf7*; zgWmN2+={%$n)PN~K($W2(}&OA-X4JUssVe6Lg^GCAo~)j1*jbu2g>t${T}Ea$V9qJ z8z24UzrN-K#)YODCSESr`^~u0cK@!(7d{fuO2ix+S*d^m1b7bdjrEsHqc;w{M5E92 zk;Mt^B}WI}yGrN~AIHf!3+oHpJ*I}$N8R3V`QV}HK}3uC^rjY{*GA+VPP)q3;N4r5 z-n4i*a)ZtrdqN=GsiDR+lhq?kCRR=U%#b=`yOZC#iO|FsBlw zz(e4`yQ9H7PA>Cy8+?$}cDT-9*^BZkv!b6e*e3RyH2LN_{vQILG@ofCEb6!NZCerP z_l1n_=Qxaqve3-GQ5{W>rKc{&om?vJKR*-O^ocmSk0XcM93`tXN5xZSn$hcP-4e@yQ-wMhURk8I$0 z^EgzpJZ50Qy_q442%eIQJ~B*;KW8K5*8Fva7i?-YCpfx+cx30E7NAd!jb>YIYk`NL zVSI3-0%}jf-&O5(f5AN+DOBURn1h_BzzRrF1-p<<*dK7^Dk+6EhZ z^2cp!2*Yw=i$)}l*>yNNidXVZw{SNi1x7=N|9zf4H6jx6rvsi++4F}|Jm|o&ou~QB zN@Ix_*dqwQ9#J}1&+7@PZ`nGIse`jRzxYK-!;4D$jk_&a_=e5h=4l+i4MprI2!fLg zO50mg7mUdYNy??H3YR!~vcn(n7&jHtSw{8w5%@@ZC@vkcIn@EVD~=mIeV=x|Ic0fY zPHIRFqJx8BM6Tz$q2cvDs~1H@$=Lfit>2O9?2ojiSKtrYJE45!j%Jm0bLaY&r@STK zK^r$v|Jmt~y%OD?tAd1pF|Db|>T^5hJ&WnFlV&TrexTZtOGm0>hOQ&#g~xO)rh~tY*9!PZCz9|53$9E;;`n=O>)Qs&FiRjQf4!8}%(x3*9u2^C3er2wcbB zE}#Vt#^3-s3-VifX^_H3#h;xyjGq$)zmp@9%O7MS5&2J8m-FSe5{H((CPc>S6)bZV zV}O>n(5rez@iZ`JN7LcO@J@o{^6(WDvaylh%u(bx63R*Ywh7`b)cU$w8kiJIQ4A7cku|#W0c&SrlzS+R zR&uH3d)7|J(;B~N&a3sjU!abLZ+b2Z+8d0p3`MY`m+Iyi`V*nfuXtmtK?iP6%wu7C zj!1jn986MN3~qFqxTz$E4$(*vGU7c+6wF{;0be--u-KZ+7fqXLqxDO10^g1OL0t6s zKXjbBDR*cCO&TQ_GTM$Sa&U~9<_eki!~+t!6t_NEjB+{_nin@$m{+*k!I(P1%$pHv z2P)$Mx!SeO{Iks;tStvFpJVXUm!L7_6Y~H`dkinaF;PUM4|B{e!!ub4XFUnx2dAg` ztMYg2V^qxN`5Y>Dk$>`r95c)j#srtY(GdB~*gE}F9qUDm_X}+EIO(Gmkidx@w|6b8 zH?o%2_qdVpHo#H_l~JCAig%M#tmq&Nio2 z0j#*35G8OHwcVu?`MD6;L3GA;?GFfv(8iA+|)Jwg*;=qBjGX8OT=l8Z3CS@{Sx4FJA1d!)V`sz<0@%uvOI{zV!Bnuh@ z_2C-ufX4GG@9tH3*A=1E0&5XGVV~qtv3$viO!6)w?dvn>;(7CfGL7-e0PV`TQyeU8 zRQGU(Id{d^mB=qHmOqx`Rtf$=S9no`ei6|h|IJu3)jb=D82{^KAk5VaMS8X6mo1w4 z#im=#Ic{SXqRhe*(>*}~MBS2bI40%FP}>C`q_x_q1%XR*i~fd@m5F;L@U`BV({%_< zUZ`dqG71ORKxMe0bme?N@*>tw(RmM*QlVjHLYQJ(ef=?EBVTgmyEm+nA#dJ<&0pi-HZy&G_)9Ef+##>Dywe5$W^#YwX=grw24Sa*o(Cu5OQ6b&U0 zzHFM=Y=#ZnU+hFWE|oRB*UE4kkqVYgk(9JR;U~ZTNL+MO%umzlTwDuQ_{H9Ev{z&5 z3kqmiFg_kz?>_lvP>NWX3gcWY2Rfwn(E|kg}-f{Gau$8kp5ZlcG_~qQz~AQKq9h!f9CsSmYj+Gp~h9& zfq(obS!qq?A5Y>Q`1tYK0^X5Ei-@h=QB-$%Gkyr#EkDk{1p7QC+^qt(_O^;7lsa0X z9lMgP3c@ALZZyw$Zh4`ty%>1{Y)|k4S*4<4OMg5;&co0&QMMu)T4){bY1;5Wz zsLM6bMK9H$>)dW>en6!$ae=$OzKB(9$kvm>5XynGi}(P&vr`}u!uR9S)l!S9nB7W?#luNMGlD=?I0@&f!lox;Y#Ikd>GvXV-E#|m z4M@~FdN3sAThT-mWJ!pyxY<` zoz6L;GB9%e)6SV8Q21-mv^|mXx01Xl*9*m?uMfZ^j@$D{0E65ykD*_CA6(ONM^4{Z z3sbz_^CeCKOz0MBi7eZ}IoM+N>Ux;1n|!BR!{&a5l&MpJhrB#bAt0DfUPDlDkvL4< z!I<;Z8EHF$6P_I@6H58`cXX`?!D)Wc^I+qh&4eTGVPn>@AwD4;nGXG$?gMPA|Bo}9 zN8wT1Q@1P>Wz9cwFJTEw_FhcY47LzSu~UthU#GAA7dZ?;StGwb5WQZYkcuG}y_7HK zQ(ZMZsO0P4Cb5Xkqo=(aKXJUXasAZ(!&~@L=KbXg<86qmIyv-f8|=X|Y`6t)5N!$c zFvsb@UfQczXD-sQ1b@$PNRPVR9Yu&Ly$wcY&a0~<&N}s53>jga6zepp?OP7%@O6e{sqoDu6#Y`tA;sxXSw*! z*m=ddHy5E!A5ysbIXo$CqSF)LY6on9?&*0F5>i_$ zmcKz+qDa?T+(JW2vG>P#oHp*wdZ9_ysrB|3Y4My=xC9iemIj~PRaY3eQ_Wf#C zF$JQsQihOm(KZ-P1=*_@WZ7SK`YLqx&N{Fjqu%)!2SerPVSgFhaoc8MVqzyiL}cCk zYr0vReA_P5kTgoi{fa?Pc=IXusuO$v5e35)fBX5*CWp-5NBkY*e~{V1TJ3F_STg^B z09ZIUnsax+S3oj~kZ%Ey)|6gv3&#hhd*7Sx9Sc3~cC0YgO8P5u*T(o|kgO4eH#q~F7-eU0Sqvx|fxANr)`tHtuxA!IrVo}2$$ z3)b7-09Nxqzd7|Q?o7mKusSG$$}sOU2@CKP{Up)zq&G9gBlBCfv?T7ui3&;LG_q>7 zuS8UXf=^{v!(0X_^de23_i$!1_Cj1MDeYA&$6C6Zzk{6ibB11D22V%{4ZS7hWQ8an z7c0`cQZN)&_P>2T3ppm0Ld}!D$~0roZ&56WE$uyY#X67%eTE!U1Wm;pCSo4H`F`zy zpyzFqbII#UvwJ`qHQpj>_nooF(`?W+5*WwP8CAxgmCbMwLPmju4wds&8ZUU=xJ;yM z%#>y3+Y~YL%fBF8s1Uej$NQPgrZ0lI(}AD*Z!8^UnV0-1#UlInyHim!|80yxopgGL ztgb(mqg0v5p!4i9Tju3nyQT z>}uR?vc?s<%P{sl?N~V5d(J!#ZIO)|B`c*P-t`?9f5Nm>Zj;k+poHo-y!EefA>bye z6Es8Oa2}iUfy~h=s4_gyN$cduricyv)kIOR_zz)-K~R_jW#-6(l~4jg7&!DE{+ zUt44gn3B3qW=mM9-5Ew^Z5~6C)zU9&izz0yJo;IC=?9b~iyaHnL9lD_w~3&`^F}K0 z2}i7f!n%AXA2sw~ zHA*x*Q;{jlGtZ&m9}z4?eYJt)_7_F!m6>e%lNNTK{Xrh%cud7_%R*?wNVr_5#2x$* zptUSyod#;ppldRQ&!wkZom1O3D17x?au2EATPzCA>{3PAN)xlASdHwLaQ=>v++l%a z`Osvs6B$TJzO~1_T`{?Ipq=HvDDZD(bG;`+E24yu<%qqJ`D5yxv23w#K=0JE5l6a+7>sJH00CAd`dF z6uph=b?(*(cYNdKf=>|`kD6I0eCrI^eg&!Fn*z1H*@4923-^q?c-pQWk+mgdjd@Tg z=Crnbhtk+CI#LHlgpH6!>0?6nF~z;&jYTA23anHpbQ+Bx8=^WY7a9M5pG;k~QC$9S$*z(lq7A?vr= z@C0L76mJXYNBd($Zvm}crB$}OfT_jKJ}C!7AFgSZm9!~kf!}=LVY3Z$|!n`U97>x zu(2(9V19HC8HkRB3ST3flKkQPk+5|H{qN}_-d*p;4XKH^E-1%VM?SNk1neXWLtZV! zr8j;~Fn#iNPF&ZWxHbH_u8xGk?TLL3kA9gBsSrCfG$_uu(SyIURUf<|H{p|M&iGmt zO%5eCUbJsje0SH923a*{{W$dJ3Y(F~@7yk15)rqCJmTl&-02?FDqTENVAmH>RLQD#LpvP(acaJ#=ADv!3tbt7bh>v*6Lhg8};UqNKx^< ze$U^`{{x(U^r8s}33s`9;?1y+-B~Cz+nK)6$!TL*-bPU{xvX#-I%QPT^?W~L^#?DmC zR+uQoiqzk)OVp^|5&lDxklM{XMIvFp&09po1CNoBijLOvXyrkUcc2XA-Ojur*p~&f zgED4LJ>J|4Y{Z!|efv|(fbwRA`QEGe?@L!4R^vX7Ts6ii4;_abN)k}_IJ-wwUZ3E9 z0}4;ezR$dsyA&WL!IEI8YLddpnC{0H)jv0sIBjIDKw9MBsfXErzR+}S5j0>(c4rTs zQ#hemT7k^D7`Uh=Xy}5LI*nP+wrIx3lR_(!&A7YKhYrVd>$hpTjiik&Llc%-DKn5G z@iZAL!oC>A^z>)EbZT+;7HqiOv8A6>VL0zF=WerI47x|n|AhqJ7d@uD8sbuH&CqJ< zx2GKoA^8=)Sx>fDbR%;%f1P+~ie!6QU%hDSYVv__=W#Oy8uSH7Q=2=rA~3)vma7$$ zv-5Xsv>czaolVeqmY`Bs(yKsS9b58UEdnjN&OUN@Bp60()x@nQwjvuBwnvakK??{c zp}6i5t#T|Ebr~J0Kto3R%OdUcxvu#`1j4kd8eoRR-q=9v$?G84nTXiBjCl${9|FqB zoeIM*`p<|p^(7gHVeRC6tkPHg!Q(aNCW)enJLlWc88I^l(>I$oZ7k1o0z{7|%rd+9 z?eT*|Q5_0@v)ud3B%u1OZY!hEd@$KAB!qAE1XQ0J-km;=_-Z6YZ)pMuuQ4G zQ3q{X$@%wi9F^6BVVZSQR~vT`^^gzMUGDG6xMPy5LW5%}RkA+r$C@}S=rCfO%ocmI z<3YPK6^vyjd$A(-`>l%ZDt9G0#G{gwAFRBQD`Z!&*oIM!Vq|n;!<6z%%%4g$6TR$; zUeOqE4R51=?*}+`E`)N^!8cG%r$4c$ueT|%o@zj*HSS}?6Do$e6{7em<5*r+cBso| zrT)Ol8&5ehCl99@*yka0&Wd!gGOv=5m()EqVls8OGdM=WWdyUhPDSl3yIAU-TLjG+ z9-SG$02NN$c`o^w)~g~a>xzzI5o5_V0;U^6tjs1~eoFGLU&!q`l6HseLup{8%s~!? zZ#rJgrwT&Fs<0)WO%1d>1%q63BlxxemN;#EskbQM2zldMBj(&%2eWYzx|bC=`}1(a z+2AJkwEMb5StBtqf8B^6Es7sw0&SFSBh6T5{*09(by6GX%FcfE_!V*E`G%qKbD+CS zb{E#NR!|IQ&3fzP_icrTx`1|>>rU$e%|^civW@_d_tku>i$papLfit~&~K0R{M z@@Plh;9{P8{v3tg`=V+vN7DGO(EcWuyu`6(FdSa#tJ07l5id`2JVBubleXy!D1=V3 z$C@IQ0gX_=VAB7nLO-3@O)#LA%>F0~j#b}jcwRVSEw8^bm7LW_iKCJC^GevT7`VDS zxG4VL3iUM~OTDULWpca;n4`Cu>sg~ke(whav=3)j zkuu?ZVbF~`v#J2%0=}|q9u!Dlz*^i%6;#goRI=nZ+a1ooP?uz1cu3BZ@KhQ+3Oy331^r|N9Y9PMFF zf`)xUh7Ji{(o6q@^Bz-+5Twpmpk%--BC290HCXizsuT@>efGbYU^0E+2`#zCX7 zPC99LlfW@!4XxsvD{lc=)Roy7JtN+K9(Gz=jnM0UY+I*Rut;uuEpv?FV9<{`POVjj za{n7;s`<0tas>7gessqE6ILoHR6eiJ+={}Nn%Iu0ZO;Q}%$dG+pChV_dpJE9>r=n9 zSNkPKeF^~fN}c>pRC&VIX?)9B*>upkam&}np#^Ua9#$2fxbg%DDTPlurcKWJ?~Zxe}xghfrfvsi_kcmp0fX%5?Wnt30XXsZicp?h1h%qskqe5MNu| zzYkhyPo;nD>y#xeiXTzwBMpL&IBiWt;BUILVWXN~u!6F*Okp#6rxvLimL@8cp>e3Y z6fIY}7TH7`TQlV;C_l5>>6w>~$#5K2Iju&{Qsg_S8YC`?cX$|+kwvYQc$^F8w-zhq zrNfXjH~5akNAToGiMjA@jRRVAb2=uKO9~qFB#pn}wATP*TY&tIdF)iE z{&7<%P?0+Gd1}QvlkSM&zWjt5A z_$vP2F&oOkp>YW|WXA%uI9LHdM6aok_AI)OFTy*?Z_*H9eGLcSavE0ZO`k;01raG7 z?0!44L#}Kz)|zsrHdMr!&&b1TiRzM8sC4I`^&g>*f5Y%@7*O$&Z3Y9*GyoSnks}Hm z4d+9r7Eg(yM{4DY&s>@A@73)EyYs$+D|lX{YxG2 zD(2Vz`t6g0n)x2;MSf%`shqrE-xx4z}D)zq8WDbtviM10euk?e_1*>oVRzN8n#XkSlFR@l>LlI-m( za{-|?7dJC!#D%tL4ERxo(>RKp<7A2XjU(>4?=qVG0w$D!^XfV`wKNI7WFhCi;9MR4 ztdk-l4H3-qH4YRbjB3m;8>MkH9MeLm`Ek)5w!?J)$lQ>7AbpBT0Py@)EUkY{EYau0 z+0H9XUITGxYR&}|yu5xYvP>4*TFLV%F*R$0!9;)xKzrSYJd@LJQfjD2OGRmH{i-K& zMI2jrUh-jKS9g+6iRmM^`yM{7;0^nRTYKKCHaK|8D?twol(HZ^5)Kp;=dBzSNfEU! z;CBiE6%iG%5{<-l_wzp?WF?8(OzYg&uNAo9cyMSahUSLWyHZkG24I4+c>8GMr#}sHx~(_ZH+w2uMmI#U~|gO@QP4~Y(57FX6_>2=65Tu3|8x?}$UQxDQ~||ue14$iSfB?BQxXSh_+!49eVK`09ekJ8=h8H_aufaaKLYQ)hY3mhaz%8x z!9o}r%PlU!+m4&bVZfkK!OaIffZ#RjeOY_cW-n98fHOJMR5TP4$p2MY{KKpM0dI>}`eR&tyXEp!l6v&V z3SXQb%)FV2XSE$y?@vu!n;!PDer~T-Zi0(_XKf()X8Vpabzcl7J9a)~`k~UM)6ZYK zs`IW&0g{4-2)%xg4-h;ADwS+6EAADID#hvUvD~So`Sx@d?(9A)4F1ZZVvV|u2tExym(;>!QfE3jvJJQEbwFd9Ff}_m- zpT<8EC11{F?CV&*rd)g>S|heuaS=pRSG;1SY1I@81xqxI7-XffYl?FAlRa69$Bi*J=Ps%cVbl^y(=VZ>^3c^a$UhUwzI%0`^CAde#Blkmd>fvG2mYq$jMg^LI$4ZpovO@+{9g2Cp8kB?BFGHV!L$M!Ze<~tRhb5Vh6RjpC7Gl zU_g^Jsq-w3L^sV2x^?A#!8tE$Yf_8m*6WlQ6AG$(umvJj<+5%Va5MO%*sT_M-R~X5 znjzj*0q+P*EDW1rZlS27vzGD2;+?z!Q6s|h4ouDQFH6>ZylxL?PloDf&I=x*fV`%2 z4XG|_Wz@1}V07Z-$b{8fv8u-nbM$$f=(mr+_;Vm%Yc9Uks;>fa*K#z{v z!4&y94uiM^_RE)5^q)X^P?#?}X zg^}WD;8XGq4uQq_z%Wxel<~j(w+Qz7JxS1M%}Q z8jq@T>rO>-D?oy=q_6;~QPIF+064-s^PP_!uG@hkdbXG3-PMJc! z5?TZb9)GXO5MK0iLk!uM$8>2L9hD0FW4imlB`Di9u2{rk8^VEek5oy;XZH`+# zZCsrDc#9#kFmV8N#fu28$!G%7kT?gXWm*W&sM>swNBW;j%`%Y%KsuOd2+;P?#s8@H`&Rq^Q03_`J)ok@B~N?aL8o4?O13R^owqpz1{S1O%QbjQ55^c8C9HIT#yX zv4N7}v#Ji$CMm)1V9?6&z(55f%-Uo}F1(?v zdLpt=>lg0PF_!J&gq-nH6GcS|LpCN%tBTLacaWMeVm00Y|lW^kw6W*@SXv_A7NMaYaCrlvd*KnhPGIFqmRb z#4-}2shysfo%iMn$|fjH%$kUKf|B~kl43cI(wq z&B6PrlrYT($`tZuH$1b~LeZBKh=2%00?SIy#g)6KpJ*MP(7N7uX%WZMjnSFZgB}|1hQAl*-BP34(Zb2eYmV zTp*n4xIX8(g6s}^(v-uvQ3e8kV@m;^4xLE#2eRyrq$0x$O1QJnFjpRA*;403Cb^g~ z)QcNPXCve`n_5`yBH4XO@6Bv#(7seMvT8r`rKma>#9)DMpiEWO5xvJ|iH6{+ML3sM*eiFc zWM(MUX;a9gEeXnR5cc`>vI3> zy~t;rG-y8g;72??70t95QsoCdIs%c;WHM%mKqEU>ebZrBi6daI7#$?kCTjNdo1a(G zUO9~kjccNSnf6fuP|OMx^7{mpU%6;EXQ0oVoe1rWsH~nTES65!nw=|M+_LGVSBmT% zOD;j|Pcl)TvKaEI>i`7$eJviRS*|XHvc&6<d;dv@f?h%~>~KDF z5`B7uX*TtzMEdkEK@a#iD@J zXyY9jm_#+v#vWI`%o3gf<{-S4HZyG6rWU65)mPBCb(x2*=|#iwBCl;R6wY?wxQ~>X zIYRd7<-Aj-IQRLD0;B=|kb8Z&KYjWj?p?lPM^y#4++mC|DK#!#kz+JCvqAJ{eA3!* zD#PMO`;G1gue1*@ufHceLG$>ZXlHnN{2jK$HF-u$Hw2nKZ(jXQ5wOy=`@91;#ev;# zf?`2AAq9R5$|ps=^Xa_^CY^@i>;?JI3}T(KlC_J2`w2kHL&Na`qvt*tlNcUCs+r+}FK zZW-at37S&kbdoLOECyEdsAIvm8iqZzyg>1$y#^w6YZD0gXBIg6sgK>MH`DhAsJRon zHPp6^>zBsZM$YiAuGoW^f`>WO3A#WfIy7QmXD@-`z0xWV8(w8$hJU}H^I0YBvyGZ6 zGl3R>{R%!01%`l7<>=Bh{x(UR8CD(Hi?6^ zBw40@PoQM}Am?Pv5{l0)^%X=2aPJ6k`MVylmpb?CyCTGy45Z)-X?EeYSz>LXf|ofI zF77A@;m}eS<>_A`W=)I-4$~-qVBvy*Xld+de`Qag(j&8k6e6Mu)Had!IqxP2TdBT@ z7}vzzml;j#%=o~5q8-E?iOu?ccX!Cc<@CG~F*|a(X;!6k5Ati;EcI65W#52_xRW|+PYXm?o(;fNHY6QO5@@ttuGL*kyaHkZ9Q zQ4hyq3;1J<>`KriD+fpy5<2vjrzt52{luZqd)N#_BE*#uxVlbbQo*&6i`;orZff^x ze3)f_H`UV9q8HtNMx|@uEiT}q_9gl>v-ojhrkLIwb88x7fpwj?&zd!0$l5ZBvAL7^ zH+`GbJv_=dqyOuuytO)sjN=s}vz^(w))l*p$-DiBxJv8)^@NGFwjGypKc~U*BRXLP z-*T$4)mUDVj`0~+TIV`PswM^fq5aO_UFB5H#ohdDVs40iZZ_D^At0ncZGY>Mr&^E6 zX$%+)B~ADKc7z5G0e)s+$Im$t7Wrv6Z`1fMx25-<7h|J676MvO0+D>j{%pRl8lPFW zEaKz#6&?;sl>d{N0|T*LZRu(K5CKW0FP{auIt8;%&y}s>eR3CT|7X|4-Sd+-$%Kgp z`!lOFIq^~fqJ5lw0T9FBND2ngDzi2seG9)_ zX{Jn92U!|m1THHET64H_NDU8Ghax_(%tW2llp7lfW_Zqm)qasAL`Qid<5p>aAx6G$ znC~6Tu{`NF{7C}M0>QQ5T1z#3!qhDPPpS|tJZ@T?rcKn?`(@~35ZhH>$qf0NI(ExS z2?{QR{|T)b{d3L^RW*~+Cn@^J$Snu+P;zAvv%KJSg0M`@wBdfM`G;1}tatj)Y0_^Q!?%cI6X?|3Jhm>NOwk9920#*Z_u?~g~XF524%3{6%V zb0AhZ!kyo~&TGLc#KM-RKPB7*p{INF`7;X$ zCX9^rn6)<9+K;#b8y2Dy!5Y=*wM*Mi$rR5>Ei>{y-|%Aj;j}66Yki5Cuc4sEB;o;0 z{-33(joMX?m=kUCO+w1w#LPKvS1yNM*x@G$a@-zo_uuoax@60H6801MUH6MRIaUXt z5=CFE31+3Wdn>OK#;D;O;A|z=)J`vs10e3JF3Al~w}+W_jBZne$PYCl&2~{E>D>#O zX?<=8mQ**yzfJ4jCn99hdqaa#Vg9`>?M8rQrmxS8BYY0HzbonTG|<~~iVrl(x#_*U zP?tQxH*EgVlxMD$`2?aNsmYznoJVi?8bp(k17h*LC2#3kj&0MtwBTy>e~YEelARPk z)SqKx_S!lh*msIcC`h!AarZ}J?lD**W>q8xG`Uk$Z!a)bjq|-Ma(R`Rm1Cv*)mrpfuRhe;O_G0?I@;eOCaLw!?8rxJ$GoGUTFteas=u6 zcIcsB#1d#fHyRRi(2fjZ9VCk>FqIwGKx@n0vgd5-Y@c9GDSf;ZVaaf@kg>{V3qz3V zwms{Ny$VNi%Atma`<)YS;TKVTu3O3KlA?|l^MD=(ml`k9*hNX_%Hy484qUn#jh^I* z+{fU-cjl0tm*MDq=xm1*MZ~#oT1rzW`KFCadYdge%GPV>V`<}TaP#hL>oLQ5x>#FM z)x;>WuZro7PjAM@+l>cXG`v9cYx-@WC~-*W|F{7B2A>g8BxX2@uM36GD^NVfdK2u@ zZ1yx`{!TIQJkMj#lembH$aQ)j5sbuUjknG;>W)<$U<<{6|EZ^aUCpyNVf>;tGi}PB zUo9v3Wcg*2zSW>C*STEn3mB!`t>R@1LKiidsf$feOoY;JjWdhoky?)%&}i_Z)7|_v zPig9MK2l@W+IaMe%|2*Dx$pJ1_!7wG1%Ge5@5O&eM*I`XjGb=RB|lnp5&#toB+!5# z^Y|aXyi9Gr>z^YghyCv#G0z^DT>5WqP{AGUQ^_Yg&d73wK}!H4EiwR7Vp%V~vqpHP zrSP&B`0j#^$&J>-YVVbNI!av6p&LODvhy?ic<#jszbX4X>|2&g1@LREhfZ=fvupXg zegqdt5C}TiUx_%+LCOB0)ES;uN*M6<{xdd$$Uz8aw4Ab!2`xHFhExewRPb13yON)l zBbs}NN$Hdd=phRLbI6hL9Xi~?#!?d1m@#26%Ng8h?-BW!z_fVxGU8mnhy|8HD`SPm z%|fmEqzvjsFWyeUeAU0V%Ypluyq^A%-aGx#e@q{#z55vO3yU1C{Ti?%h{-FqBc+@9 z2)c-YFLf%dvy!v-^|!5t=BRtl@_A`ZKR7^z?_ExbFFShhe$!3Ko~|9Rh`a*9sU9OC z9l0~fRj_0`9jIgX+xcUPmG~~X`oZgiPD#vXs1h0u< z0^QuP5>pQd7kiw|;uoI0%&L{9I(P`}t>?RCzx4ozR*JoOjQ@=~o&lTPo4tZdJiKA< zd*T@Rjl&w(sP-!%YhHWx@*bMosBspjf#Y#sSm)K81BD%ZCDy8YR6P#vkK7iqw;Mx5 z6}9;5V=)1jMneOAy-k;iG|CF4sYyYONvCr)dtqvV}tw;{IMm+4kp{B zEUT<3Bc=rVmaG*SH`8A_nfUGGuXR&?>>U#89Wd36Pxa(WdKOA_Uaq*pZszP^QKl#? z1V1AKc3f_;ZM*qOQkv6cJt#jSSe(+I z2kTX1&DV*p(Kd`zuP++VTs~|VY~BLr%*rRM#(c-NhaL2#o7`>}vYiKb7ZPnt(=;Qk z4eQ0!yk})lK92Z}XSK#3O_vAKWO6fD6Y4q2>CYE2V`RUq=1c&1MB&gKVZM7N|HV_R zH#m-CXc|=fC<`hc+fonIK0FBQQKW}QiKHKq+CRUJ4V_MCFNp7R?YGI~hN4mDMECKc0o|-^{!kd%=9wDkD~OX~5(D(8V1J=QuO8#rlTW5#3^t1^2_^bbI}=pEze# z0-UkmQDu*9b`*dXnls7TAQ1OX{gELr-_Y%NKj3PuIRe_%xr2qj_oMc8Ur#pNxjdl# zA!6S6gi3epB5I?h-e|oU*>n0~z6~xwo-7y!aQ1+ns(Ss^Rg4O}vk~nJ9#<%N+Vcp` zpRusEw)4F|b(u9kPlX0&ankOMVY~p80Pw9}EeD|ky>zmc?gg|%Y^=bx{YWbtJ0dZ@ zM(!jRf={^Y_d0ptGq&>@i`Ap?Ydr6tS~<;r4OuTK={Pt{mkXT_F!jw4HhBcL-yJi0 zLb<77L!9yU`VgRf%nk54zc~KgEf!aGYdv{p|3bvQy?qv{R0(XQkJV_u*{AukV9RbS zTwg(^+3j{WQ_@G5)fzjEkhOTTQ?2>cUx>n+(KEQs&Ggc{_67OIYt4xzY4qG}&1;cw zCXKe_q`hizHaR&=>Sroq=$kXK#+As)TF;&j?cd?-S4FNk|5cBcF!%DgPjKVAPhscU zO!|W)xhK2jzQpEFJl#qzUy+_Q1QSSmy^CB1^yX4)p_yw(n>t@3r0bR9*$CO#X|nm+ z()jSZhjh=Hg14%!KRb-f%z!mRd!IcTjnCvdHf-TcuChhedTv+)*Jin1FJ}8z>l+OX zqw4t(LIS~*ey{r1zn8Kog9I~tL}^x8@aIsvgxWt)BW$fc(XdKcWrB&nJ~R{Dy1ygf z-zHqA{Wrz0L_tL#-(oQ0is~F)>56L%EB-0idBSCgs?ukhT+sync^6lJe3stYZ!wGl z0!-y5d+Q|+o_!C0`iYwOIF{DoOCA=kzP6NiN);%~TX9EO#WV6Lib@t|Ikag~NOu;~M_r@b5ITi0+&h zD^4V6=7u$Ui-d%o!Z$v;gK~DC?oDGQgQx$9eF@_;koZG7Q2$6I-Lom3IlHZN9@?*P z_&(Y`LgVZA?Xu;b&zWoH@1=lu)~PeFvkuZ*(Uw5TF-1SWwHw6`kC;loa{{|s0?uO_ z-&fs)w-CGp`TJf6n+W-}R4!hnl6-2@?$T`1^A&T%DU+e5TKdpcArukGa7V+E4;dcv z@bua}mY96834bwsyqG&`9b94+_hXw|=KY8umJCaQllzY(bzWtDL8ev2MqyZZ^?VFn zz_&{=6{MFTS8c5?0Dj8tZlMhW>(dyaK*#oxZ9wQDV&3?hIma`gR&SjqgP|?%_puz0 zIE9dVoxac3dR@#$Lfo#iJ+Bu|w2|v2r+3{g(NapYV-rzCmdoshroO$Bq&% zH&?cBM($STC1yI&@rm(1q4cG2@%v@}Q7lHjj;hBoliaxj@t#vRn~!yw zN!{kocJ(G_x{seiGk^PUU$1!8QewM14AvI-QQB}nZ%^5zE|0A@q1`+?J`!&swG4GCat4Tx+HoO*n;JH zhPX-o{|y4B8hKfC!sunEb-d!fA3ut=8vtLP^AC2Rm{f17VkHGfYsm^pK~stXQbeai zIGtawO{b|Lz>p9gOOImtS2E0wUOMnVpD$#e!#Wb19Y7*dP?tV^668L)lhd18w5rf# zHc!X%u8nF}QvpJjrgT`+p*t_Nj~EK&NgCBd&WK56Dnv_;b}TAoD#h@575jUS9&u4xY+wE0v$=B7N8-As) zf^47Ndz1dBJ0>P}>O?e75JlIdihob9cb2n67pQi2n81EE0FvUP2S#caT!bp{o&#eHv7sbQy+3?vtg4O*{TlVQv%@0YMpGh^ zrN65VmDa;fb`=N8v~Low`bsBXxU(?yPVA4x5yO8x4pPLT3})u}MxjXa99{m_&$*E9 z%(hYHo3S@uqBBR1*M%;)@%86`bYyO~6Md>So9oOB=JS$E=K>0Kss{0S1KQ^kqs@<^ z)P4!>M^*}(WD)QDC`{xWKRa=>UN{HN(Ie(k*AJ!^XYB+h5~HG{rT~)cNxCK|Olvkq z=77x7Z--j6>yQLQH;0=gH(NqZ{5VUEBxg1zNo%WEgT*avO@=z~sbXo(q1#+%@Ge~>0_xolh>iF0KUDZLNN0Se085xf(d^ei`?8&*e10y3faJbCb zNB82nX4k^K4W>%mQlXq!{}<_AsTEhH38E>P$dv)ijY@4vYH-f&2qGnC=K|=O#^pUk zPVa4T$a0?WQ&?iXjJ@__&=gj;3lqGY&$pmGyeT2|H%#AM%;{)7#8}JoPjWqvd*Hr$ zUeUq0yu$;|1>fGO>OIqDH)UZr6)Cj$wylaJ7Vmade2CJf`B1`Yu$lZm39GZ4n4ByH z8b)Z6D~+ktvIKdmCU>$*`0sAw%I^Ve?5=}q1LAB|2jsX%Gwp>ZPdD@@B?MWtE@ak~2fK_I6;5kkXD=0itm;jt{X4YkE$i`|!R!tcN$EN?LE z5vx0zzcax1G;(AhI2eyT6|n-Wv#EB^`PmjM43zN>_#F?Q2RI3ZMbp33lT&JEzf4;A76wwuO+;h7hGwW zl?69d+3SNysENVAP1@f@ngA`G{?>n|t}FQg)58voA72jmA&~wOYvUwK(UAs!u~C#f z-lfr*~Vh`yD>z#t2Du4T%?=37~s&xSd%bk>1|J4JCoZtk1ayy|o zcyvSUDmOUMnCS4q_X(dP&L#pLOzBDa!SO<3Q+8`C{kxayOpC|zx;ZKH8v>ti1DwHCY+H!S&SA`M*ZYh-4~kk> zxmFt@UXYoJ@!{4%>#+S%m%x;kL2F)3{9UEk~&SZ+yU+gulxK{ECj2*rT=+cE9rIqC#?5cb~r zMYMc&spj!&h+2O#6cl_@)*JamE}8~Y0Vj}cPH?M7AQlcvImf1A!WZyWH=o((vyWsp zbNE}2?Td)ZQOK*8MD5KE@K)T^?#PJKYmqpGt=x1Q_`{yKl@&G^)vt)a;Qfnrx1=_2 zq^k5R{<_r9+42_SloBYr8m%$&Fn!F~L4w-4r|7RoWPnhBg7e?2mGo!7Y_WqS#wJ+% zp)uxYHUiSQ4s9c^KsDLS<>T8f6TZ!$&71OP2Ia3HAKu?5~ita^69 zrE*J*6xFXLke>tcz`bj1hkvg#Zk(qyr|?^8%{d1pd1NpMMU zf%4+r_XM7miSe;wkCVeA$4lz2=T#$ zqVXAx`{IvkC^H9qYXSp$X4~0+n+Md zM*bMF?RPX+PK)f`7^>P=Ucoy#pS#+=dTVV>vks~{NxVqB$k>zLSWmV4?)kS)40Rwd zDiu(%n81F~&Zzt;u2xd0$}3nlbiSV9D|2?u?XQ3`gfVo%VaGI3Riw)n| zlW4@+Wr0kj;K&%;&2^H9ZX|NIggV`qo(P!cKzF&5R-5~h+8^D1H7=5-*koDoW{hU5 zPHMUSk~#cGz}4W`M>r7jM*LZ>Kav8)m_22<{oNoD-TJ#i)%^js=-FwyjMj7yLw|P^ zotKNZ>@&-Pi!-nfPy1ytpDdCB3la{T#AMj z{Ir~xl^*C4(@ORi05Wv=Ysv1tBuW^1h8O{`*r&$18Dd1A@6y)n z5KQh(KHB*KFj4ay&B}4p6@!K45VU$XgriY+mtb}1j^-Df@G$;!0j9a3C=PT9y}pPR zJlxS8KtvHRboKWI5Z!#Y{n}%TdhZ3nQIZ36i5)fje5VAW;c`7d|d`gjhXVj8ng#(OgJV<;m^<+Xv&E*n~#qF zbr~Xf^XzZ3BL`c76y$g_zo;I0NHaNoC5lXYzRw<>D{P>d($UKqA0JD-$vqFM z(yZ(KN|I~RvHyf5S1v)%T|=ge_5b17?S&&)vJAbQkU|CT5vP*~Q0m|qkf*o5Xab9i z(&MLLMd!x(pGJc93n(-HZ z$>xUW%+go=z^te)@A8~}<$#c6MVXa^nc($`g#Y`C=xCY4^0U+zFd!9MD%>XEyy05) zU)fB18l5z$VpCoANY){^JjrEA{+YnI)2P-{BulGNzDB$^P0!;L-AQGU@-S)_xTK|8sG#xFCQfe+FqdQ< zyTw5N5O0&%8Ht@F#_x)9#8;otWf|3+Ylxa;y-&9e*2cUcz~?K|x`&;(=16G`^qxLR z5;llKFavq*mDgG#7&)`J_9dcfyP;Zo9g1rVX7VD`5n_G|d@afB@pNM`44|=t`J=$; z-;z7YP|ce0SZMd+;zYI|RKN{9j?g#zMt9t;ONxV)vABb0Nbp$~xYEN(31F2C(Q>C2 zfzo$bs?;tde$0K*{zh;HB((d*8y(<@%W;hT7wP!laYGE02b(+pb6%hYOh<~^kTn4d zfv@{=jEsAHP3|S!!k9!lm>YYP?w?ZYRSe#jQb)oC5Am%sy<^Ra>ntoE7n0OZTglh*`?`>a%f ze<0S|mC*GJKl!DwXI%gG*92282?*Aq^w^@|&0dAQ`L>~|kFO^5K~iJTX~tuJx3US4 zu0cVAhJS68~Qxc^0)V!fx5 zr|ggz>Y=rgzg1cly=Zp{dij@Ubk3oSV#f4_wIj97cdDzf|LQM>>WQegm zo*E17L_MQh3QQ^_c-7v?rNZVQvUgR$v&07nx@(QAFk&&$in1m(=b?+~0m=GjThm{> z2>l1KzpEY^KY1t`q`Hk%Se-~V2vQ3vT$0hNa1KJCynQWMj~J2=>q_~@hK>4Uvm^qt zdNmy^N>TB#MpX?rzCi-`0e&2}!zcgyl57NN%+T!2`Vu(yDJ^FF?``?{-H1EDo7$HG zphe9}ZSBH7RWTqj^}#JSfQLa!4S;+6w*C=Og#RHlhJP8-z4wfZE-ptYmH^-&c>u^K8+ z%$oNBs>pd&ML?@K$r_YN7pBo*1*u>JMK%1~f@LW$bW6q6e;crBjK;7j{L%kA+8l+^ z4~2E{+U@V19ZWhSayJcp7ZtPQV0#pJo=y{c!DPrFrSYit&|$@E7O925P-(KWlI4s{ zr(KsuDsu`xhjIZyIijbc)g>&Wv!xWi_1i#-H9ixj1m)D)gFj{v_g6$TtEBZCQ=Ip0 zhThL$ojILpEih1EZC~^bElT?|mQ#4kl zkyZsN9G(~cw{)qv9Z)|0bpN!ThAmZlN+*R6RW44c6=^7c>&=Mv-6KWqAJ4*NFGAy9dzC)k5~#ypwCQ$LYKwOo`v*( zT!3=iPnwjVbRsHw+JHE)`Sw=uT!Vb)`67ph369v3sGXvL${+$=B36#`n88o?5GM@U zG*Td&c$5VCSCmtrSBQp{`^MM}9;p8R4_ogT9ci~k3wM%E(s9SOjf$Og(6MdXw$ZU| zRmVxkwr$(Cb<^*A&i9==#{Kh*s(NZX*jQ`rIpr&ub3zVt?2TA-nb0j+!MP5F^mZZiK4shka@X# zz3+2Ltz(niS7-U8%!C@L{R)A&kiftFzUqA56&Npc z?xzOp?Fz2Nc5WixW4|sQ_h<28@1*tW2rH_n_}j%R4xqz58)WOaR9G@u`Q~#=;APJ} z#;vv##_c}(s9QBEp31KO-Fx3;mgk|h-u?T#rD@n%44o1WOH2KP%Mq+e_tGa<_E5ZcmEZE zDl_%!MCuSlr(dOlsu@vu4E|SJW%a>vmz=d(Grr7KnLLWy`J1|+0hW`F;DRg1b$gWc9N7% z!omy{SwsesP+rmwFcMLdG%pn48u?qN6U7@J_L0!}RYnLc)oiv|)kc;f0<$QEf;$6+2Q3 zPwNXl3j~3pEoi6;-4Ke)GiPB4>|pZ*k|Zkd?bRVeeJ3qb=C`xr8zLg4(r%)A&K(^2lE@6jFTCRd|C!C25&aiqRyO`;=Azh% z>H4rZsppvurm_Lmd5&A71 zot5Ev4ek+_6}=j1dXV!s38%hj6Ed3%MfC-|(L+B03g6d&jw+@~glwB=V(J~xlcHJO zmB)d{OWD4gIV(QL-c$1zm>LKV;`51_z14Y34)?t@$1%WeS64_`EympTJaWEqL29sSiWzwKsuJ*u{7K zIQr#2txq%<8i*b3`of5#+do*y{Natf(Z(3)EXT`^VpHf3 zTKMOu4r*ASa4W0E?i{#5v4Xlzc}izkDom)=4dzM`Ecs?I8hs9hGk2nE8VCKy&&Z?f z8`G-tDvN`M$GZ{qL!6_a8ViCke4ltkxa}nuhBNrDRKylpS3P}w;OOW!5VYg9;DfC@ zH12x>SP+d{yJ$i?-ZqybNB&Gvgt6`Kq*$V`o9Q(o{6l+ZFiW;q>i6ZZJHJS1@dR!8 zZO}Ap{S!tu@`LOgFr7nP4O?Yed`-h3sBz?)ZGbjbMazY(9+(|Y?WIW+XtH%_jF$mBf4NOO~xnD zs#;2ZURb%cQcCA{5uL>%_dL1VDh~>OI!qm(y|)w0zsnH`nWEvfXm!?P3|Mo>F8m=y zpOM-2d+8l>|9s@nf*Dl0lt=39Vmc?iOP0Qj_(`an88>v%`mjyJRo~&i9UinsgN*{8y~6I zr&VCWIQFrHBgfg6xJr+{e{@B7d2U~E@SUUQP?r9i3nYvB!goBigurhYbg$zD0%K5j zw%v1;bZZ0NV}7UGDzQPmclb=LT2if}XdS#RJx@~{_9w69A7uTT3Rq%;g zya&B&$}=-x^_VmBJ#Fpm`-vN&CO(oRg)2$E?3Q~46?Ng&JD+WYg3PeS$yq1@bx|MULh|T+e~#k?Jw;s+bK*Mn zH)lUjS}IJPGNLmpRTvnZi0@PV4U35+pkkZKSARf}qekky1#R$R35*Pn+9deIv`uX} zo})OAL5(9!mMeik-e>jr2i3g5=h@69I;#Naq0Jr7B@d5ya41S`K3Mg6n?r~{|5=c|+ue)7Z(oVIG~T)=nMRUIcO<3g zt6TFX-fDv1%zV6}`bA^H*v5=WEYmizh;S04vM6FdV535RJVpAeifR1M?Kn!$okwVV zZY2IxN(lv`l(LBY@$zG?uO}LXf>daE4p%n0AoaIt=AH4RlvWgzn;|PIgR|} z2Jb}bsppeFY(M1YaOc4U`WP5yibrI|-D)m2VFs6PYq}E#w_?mk_+Mms{@Ms5mz~3X z+O^CYgIrp1YjYkl2;a?EAGYpTSn}Y&_Ub})mb^7Q3P)bj@}aVz-8$yYs}O)jVZf$X2IbpR1(BH`|~@Ke)jd+iu`I z<9Tl#cf-VfwHBb6#vLf80Yg<0u79VE(GeMBgOICch}~b=E|fb2El=2g`(bj?zLQU} z{3Yt+{(RcQrgl>Bq76Ebh(L?~JE~tYS!gIh-p|B!a5Kujl+PCMqOOA|UCsvo!S7pALrEcb0HcANweoiJP}FKU#MfHrzY zOYCdR5uD(v3vCp}t%W6?QZQjB-NsiK9tiEct!tRTJ|RR`eeeMD7NaA4zVs5EntMZn zDX8kzjW-tM4zJ-HsaToy1ytN#ze|d|yu46$KroeBYEA<2naDT7!0#6&8F-aq07`Xe zcv{J~;N}eak#LeoN?{)=$pIx^v!ZB-CH-rr$^%oGx4ovzdQY(9ECD9UW1%%naMQa; zTr}w46zt5`1xKx*vo=|`FUHczS5NW&C%`9D06D#qIoX?SrTG{I@1HN_Kv#K+%4>)O z#uKsKS3nGj*mQb&Z|My@c*dWBM*?4heCjOgAsat%ysY}KkYDwBe@?#Xo^r1;$T$b~ zufez&eD_26bN%+1Oie}+LxGN<8IP{E69tqY0r4wlB@`#!x+Z#-hR=VI(V~nw9xDA> zfi71XS^LMi;w|^X#BZ;PJr~WmoX{(`(y!+yQnUSktm&1lut$+uL+eThc~dWP zw-7(!Z;nS2Wqy&*rkY?Or>LT#))UR;Cf8Ar(*GFLx3XCE)bO0)yD$QhGW$-KwTjcjVge`J88~toI0! zm^{$TVcG6SO=M60pp@OStRy_7x?fTOStWC%2$5wqf6w_KzEoxzILX-1Nm@wDWxG3O zS7psa6UkszCP^;U249#j7PKQopBV1=hh$=M0z9B+pW=dQ_=s)~t}MTKy~@=4*M&nm z7pXfcSFo@A*3@CjQaT4yV`v8N)3mg`^p5uKkk0E$6_eaRM=<|(LOwRI=tZZ%M;8r@ zW>Cd?4C#F2=(-^}=^NpEK&~H zD4z4Oz3GlnC!_4vP|o;5kj5LwrJ!GQhSKjVc#avL-#`1W%77X{;beYLv!zQH)D4WJ znxHfpu#U_|tF94b`ld7bT7hXy9w=%km%T@La@c1pcFSaL*300QaFCZa^arHX!7z1x zki;f*L(Bg7C~&D^AC}?lsye^8Twm0aHRlAXQI!IuaYImn} zCIuUi%VR>OVA;Wv*idOKnX2Cpr|mb;E3r1}=kDl~L!(Oe<>-M(b>+B6;SW{ESGs;I z5F9Qm!9r_N1<1SG)HQT2-be4RKAwvS(*w)Xr7kkO7V0@5E%@zf zo&n2hhlh_kJ`O)0rHF zDwFv_oK_^^Pe$p|A*_sky_RnoCjP&mFkqvD2eTix!{9T=L%S|-x5Jb}(0XR@xuq-s zTeoP<&AbD`+D1)PxqJR6G#0UIn2ylnQa*;yb3?D(JILxdAAz4GUS`^jwP|K3Y_&z%vPPw0Sj|J%NSBGtCif-JR7!|Xc6E`^B$UmYmn_)V zyCEU9P*yw=i&UI}HrPrK@W^T zW-W*7Sh|%YiFv5|U(F-&T0C%(?px}~b9Rw2*jkMkS16fP<&tXX1VL9FGzUleF$);n-L$4q~Q^zPY(7LKqQs*WvJ9>@5b^n>#(K1m*kF=D>Ee2Eow%JyJX7f)I3UuqjNWyO5<6nkpCzNHl;0~RE{c!zq5lH<4#Qq8ucS) zi7jl>X2*^0Jfp`K!L!P3Jlbb_sRxG*E#EQsmPnZ_fkGw1_wA)AwG6Xl%lnk)=;slW zw&~w{G5ZliN&~e)=}I5^W&?)tKL+JH${nM#Mtk{s@Y`~`=7Ee}MOIxt#+dJTJjn*B zW#PaM*9bsV}Ykh_woc4T9Rt7NPIcjWpF+AryUk-6}ERj!u)G}+m877 zl}JB79X-Xly1oAZWax7+)dBdlP5UEKaR)1Dg@EmMsho{`4i@A24qFt?Zw!HRbXrTw zd}g6*&EE9QoXq-{llv%m+K?v_Fpuuns92FF3B)7glojQNGu9~CuoyBuMsJi8h-BV? z@$Y5$@4;aaQU+lmSI6A#nP-EYGavbf z5=%5x;V9(diFOAei4yGVWst&+V}%SV;>j1|zQ;=AWmj0cA&!V?@6E0SI!7CUt*P3&y%M)jn+YA;V+HVnlqZr?Gtg< z?u8OkzQ{$%SU;yhi&%dC$(~c%3nTfAKfKARExpk)5GuXghoB(Jg_J349;tDK^ldE% zdFQ_4lJ+T|X$jq`Lfd+dEM!acL1^UXm!6PdTJB6tf*5HIm7r%Fhm~f}!AA-kC-ZDX zMe5F_qT_Bbrs}!znnN`C+#pM-_6EzxcGbAglWDGHPi`1zpFJrW9zMEG)oRq#>d^wI zd36UWI~+Q0`B`PGBpF|3aZ|6()!WajaTWw%Fg1HP`|jeR#JKBzp+XK%wU)s^sWdRu zA6O!BmKQWRxJ$&DU!LE>l>=RNZty)*UYu~b^P+B67Qhy@%lKZ*Q~kCDN7`{Q*4nAP zU?%on&;tY-*FnHxf&FbAfelAUe!`)#r{Eo$r_w4_o7hSh`qdN zm`tF$lGNo+#IazbPwDvn>2KD*F=1RAtInTqQ+J3A@Of%Ku>`s<{KS7UeymvAQ}K?> zey6w{uKe^%-XEF1@oD^>YCiCIw1U5(pQbjK^mtEto@Y9YK%ODZhZIg6ytTLz!lEH} z$EX<{&p=_lI&$MyQ1x-dr?%7YBwq%JO_DMdPEDEJz>p!m#14@1o2+bs)1t^r4Amgc z;#f}1tGkcC_$%;BqsQonuBX^$3-6FPmlx_-%M1mW$DSuNUolrbXwlvks2(juqZy1WX{ zBpz$E%R=MlM&bnK*wJomREOkxad)_e4q@J@v>Rzy|cM7KsMJCeKb;x-Y6Db z_>Xyh;Jm={jAVrIS?jm$@drCsb|W<4`CC~J*W!*0UxN>p%Vn3>{P>m(kKemN6(iKi zoM{gyl7^ZXyO` zXET_u|0)8D6K3-c6PUiAD}Q3M$lL%a>?r2cV8IY%1tW5K7n+1}f)+6~AFf|MQ0+Y* z;%e-uV;R8yr;1O9map&46r4JGd-kL`1yibg3V)+}P3y?>HwoqxaiVEO0^x9wlya6 z`R91%c$NbF$11}z#XO5VT@94+#ccJ2Uk%{S}EN|U%YK9vvy?Nc%n%3hHvZng9N1E4nxw3 zJ;@=?<>^RufoLW`*42)80__Dnw9b|#tE zE;ea7FH?(`p1b^gtQ>mxQae+IL-HL-im4s7nP|^=6pCauW)l|fM5@&m4iC~un=V`; z#6drs&+tZ~5~%XV{=wW;fo7_lUG2`zQ3c9pMio3CmIzXVAE8z$>FZXb@(*n#ekUiO zOO4c7?B!6x%S}_YX=2llrMy{MJ?F^FOl;*N77n)I$b{N-q%<@%O7xo-7c!q!L>=FU zo-mB_`RY^l$D6zyinkJ9zt4gll70jRl468Ai&-V2k{6Tnyur(R`a2Qd z`)?*sSLKO8@6O_$FhfAb?G(;AdT`HgO_V<}C$NbrFbEtCM1fo&5J+K7Gh}PN$T`=i zyt*Y>>3|E{sgVG8tI;Wt>H{J-U28uNI$HCeum>g^2kvwTY0fp`cjZPqd%E>Biocm= zF$aEsyN3@a+rW^yUgDp<=Q`yy3%K~w!u*hk%5i#?kYn=k^Vk~&To?>1(|=yEYgWFe znzRc?4JGgyx(JCftediq=fx$Hgs%UmzCU6BZ*h5%8^@j*M_;@_1L6Kp{A`q4CG*zzn5lU*Ya;@K5IF>T!EY?p;Val z3wM0oj!Y_sFoOeuBH6F&>5>#?WE|+@jR+wWnH06tkB{vRu&g4<`~!29R}lSb<9@yB zlyI?sf9vD`W6t_#x6@x06mDK@1)ik8+C-WpYmKKk^S}-=$u7Gp}Dy*IQZ+W3Vmu_U0Z zI-Xa))nNK07@&Djuxx$nVtemt--$|_WCs=6>*=-Ol%}y$wL>B*@bCDer6y73LSxDa~$Tn>@6W*k-x*R&aM4cn)^x`>6z#mrRYr`L%g9wJU$!I zn%%U8`F(i9gqC8C+RvZ|tP#CrbN58NkPIzF{8g+Kc3Skkq44Gv4g_K8HaRwo$I|Yr ztP&`FZ*KFF?41G^^zL*{2X>#10;sB0eEK+?B-1Jz<*_Wr0PWdfT3&R-HG-Z@84vyf zKp@@IY?yhtaOi*^hNa>Af3*N35B^KHZ*X~1Qj|}r)Vv^iqg)mOZQ4+lczA<_vO${d zbj&vTQ9-w{+qeXPjDP09KZkW)bqnMSDo*+G4&?n1!6vjze@rao#&!VOfqVU80V7jE zAlfx0b z?R%be2~6n36ZpSfnUkzXZAL?FsXATec*g=W)g2o_YCMxrM$3hP6e$OPa<#z#=QvSY zdLmkJjIqdEJA+CS5Wrn8Lr)ryc@p`e062IxD0oUNdlvirfdo%ap7D5y!pPSaZs-GL z*1!)$aZ4VV=eJ$|oh|}r34F?RM5Z3G|IG40SFl0Zcwfn7zfulq)?98_Svj68+%v`a zw^TI^`3n*Iop#c`VKV@`a#!zIoWE8E)H_p(g~}PF2Kx@;JVpnk<}`;a(2|HeNiC)_ z4!%!FBI^%y$ONe_jegQr#HYI*tavQdqsc}LS~<{e)e(P@#Lu6i=oP`rIyxxBt))P8 z&x_OrN^!~Tto@W;Z^%(@GPwb0{@Ru|1-;6*-h6nhV+Vs8)*@SX%o|XtUo@T^;<<&{ ziW$!uH@dUbF#as4#_33=E}o>M>O+TUfT)m)OSJBe^mlmwYsCTwlk_+ome_e18PzQ< zb!gIm$4?2N)((s@m74s zwk#Dd$bwB~ojz9zaZ=fK+{)iu21fvbnj0THxf|?`f-Y&%`1=wR7v>LD0WRo*L*lwV ztwX*IQa7l^=1nqeZDqCNH;#jHFz!$;IQqM?Tg! z4x|9WWYVZ{tl3ak>vu%KoMha>Ys%u~$hgyqfse{y5xdugkQ5dNU}N;YSbHuya@8H< zfTY;eyCN7N`S#Q<G%h|D-!2?&A^!l@<(5mhedivyj$ZYh*06)b6Srmjf=`M}nM5QOqv3GcT>dGxB6=9#<+c+P1$pz~L#LGbE%UbIOepW$x@${;^i|38q~i znX^TGA%Z*Pu;vW#;+f1jn7xEfz;}*IsOe|X=6QNqra4Y)KZY*cqOCMIecJO}abX`p zSS6YtPRGg&X7qB@fKDfG%eJ+kMsCL)f2AF0LbnmF_#^dXKdtzzXy-b6w4wKsVuM+& z3nOYHs&}mH|Geg{zZ?_nT3B;#{upQM>e-yyORj^=PAcdVQk=t$NVTNGbTDb!g=GQ|7-1iVofMAII*ZEO&~ zweEL%Nqjch@L>S~vIV`f=a;li6rrswerO%JlmY&oMwJ0XI!0)iQzMr7#X2WiGA=jkEcOEe{{mB?^Fo|m*=4LmI5izWba!Yp~_ zi^~*$S34}gI<`X00~5z+$!{<#5%z+oEY@hSkM!(qo=6Tja`ZucLKJA{n<5!9(W7h5 zX<}h~vwf=x(P_~zm&q1@NJ((bUg`dA$0Jl}RCzw=2UG43jWnN~uBmJelMI{wl$4O>%g##XR3AqK*^qH@wR?} z(H&u<9-}je&Vv0#1@*M&9?a_i^}#G z@UB4o1U^OSk|0wR_&3T7{C+fjXh@*+jc_{O%SD7H==(O`Ugi^TB35k@rjP2h*Yx+} z-TE#v7p?Z}S=6;@voaWIgBg|`tM?_ZfMK5YLIFC!s6QCCjJY|$Ku+()H-|F6zgOb>roKP$)k49z|vd{@1PkBt4<;9I}pKs5*HnADbqk{cL ze4T6iGL9-@tB7zI4cO^1M;SQ_Ei9%~mVPRpzK5LG4D+gA{q=$&+lyvlVNr%_4mgS< zJi8_-806a`*hsYR2429&ndc!nnKSCJ(nPgrnP|-4D=33K*d-Lm0mK|}motaz5Oz!ja zq5^Chqf(L#E~7GI);AoenPRNb7!@?RD;}gn2l|nzzrf*}My>@ugmI#O`po>?$u#nm zAH~&hP#CpzklV_q#DSJ@_6$Bd_zKg0Sr{75l63F(`*Mroy*>LLNI_W{qPhGG({TJmvSe|$$|SMNTsK~Ry*rTX#!8u$ z;yjr>`szjSEHr2in?gj{^D>NDqC3IS(LB@v{*2ohC=sDPonUpKb>anf8u|!KdMSKx zWaCAe5F@|a#BDrq=X$>)In_^$GBHsqLuaSxu=@<~1F~lc347KwRsj!1IVM>6o4p~L zD_ofzq7su^>H0^^`$FeyFq?-FRJ&=JA=`6&IV+tYo~o;4W~_?49XQ!0- zN~4*-+n4? zx>nKptwVXzk&nDr5@=^S(ncY=_hW}r$-&wA9IsqwJS-UJc$*V5eV9JO(eu_=L_hKv z+5855AZ8i|)HrEw4fLc`@SZ0&Z|cjR=q~Jv7OH!hRK%#1!bmG4Ay5FTVaf$@@?Rjq zCpWe>))1MWcR=&axw-=YmT8OWSx)q^n;%^GZamH5WGtz!&g43~-p{K@2nBL@3V?Bv z37r$6TR&*?$_ zoq69_4f;z9YmaX{-4RhzPlk?0$KfD zpS3Ha{238RSbkWM&6vE6i=8w(;y-~9p8kL9<>?QQ^EH)9Uu31 zrqHglRt`2duq*}R8?}mrkH+Hvs03aW`0l!$Do0kdMQ6m}ig6iZt{heX7e1)A@;@3K zA7TB1g^-YE*a@H63;CrL$N!fXtE|&7-tjUu z2C>dO?vT(aU6D+SSm~3-q1qbfQC%x(738hxK7+T8ut+w3U}ZWe`?TvJQ7|q=*P@%r zKa(3ELngXq5#WnR z1xC)_w5L{z*2I|Fx+&A+Y(|PF%IC_~anz9yP%Hr0sdE_tn zjBwVi7o&J~Eyphk4~{$N8_f4806RjPEYXh(MZa?`b4K%KK9|V3%P5(SHxj(GzTO(C z)TR=!LYgn0raXW6!l7yOZ`RH|W3h?otPQS4F-O(i&;}s14(krTo=|yQKgK5khisKu zF-OjLQM319h!{pCArWzi|XXy=J&Ly+|34dJ0ypLLWfZ+sz-)+h+I4?$zh(2Ag zE7W?b?~DOuhfXEM7qjxG0wlM$Zy`3N2@R2*HeH7AsvYS$RU)hAw}k6M1I}iBP;5neTBF+%4B_DTldziJPYAr}nP5B8Sm`|x{`AvRxb|B=IpY{=2Vox3!QP5 zt#KBBc+4h=lUFTFEPiJKjtg&Js6b7~4cSXZ^j&(pa*iMw3&w{(9HVWo1dxZyj%?p@ zyA@EzI*`+C%2k8nEk$T!SmPcT3Sqh{NqJv5qEu*s9bk%bbOQmYCaEGDeKm<+ShG=))49SB1UoiuQY*MO^`Oq_XWX@?%2QIHMt!7 zV{d)R8s*OQww^Jm>i1XaUFyCqz9^=4W%qz9`kzYi_se2{q?Gl34k`+>;K{h^|4{rQ z>2A`TnfGR+H>7L?0qZfw>?}Wy*6~nT;$LngwX_p{L6PDC9CPHM=Wo~0g1KxXN zmbPEG(#Pj}RzwIL-8`UW+-^TZS;)ymoL=pZ3yL&lO^SLj1G2A-cgcvgR!*UGZ5}gq zN~GM=X+JSsqJNYgFgeT%ItwKf3=~wFCd>Fs{S&_q6(7C7()ES|*x1N>Q zA5o~2!~*eaEY)n@5$t*$YJaoQN)zvc)ok%(Tl!M%XQ9FBP%f_EITPvGig%%oI~UV| z$#L~<&fQDAdSGV;$|QBV9tJY~eLOhAUuwdzKwmO~{1g9uhB#ku59wcGF`FRx|5OIo zBvI%NF+3r(#j17RIHj!Jt>H+y*?oO|3mh*FO-s~#I$N=U+FDgmllzncjHL=V>^Og!cFS(-_o$YNZ6teY>%Qfj={)e(x16S%{f4*R`Po(n81Wi%bFODSS z+!yxIWzKb7%KLh{HfH3tFG>y^(+HlfCtq}7cxsz*_|*9ch9&2_y7Orilu1a3a|C~2 zVv8q?N|k(Wz2i@#K{B z{XG|naD-E#Ue}2Cd5i6SBMO}0ex4T%-b^$IbnmeFnoud^Vs|=KO7cuA+A|fxJLun4 z$Uj|qk>*0SS>K8|Y{q~c>K;o7znJ%WisV!4qH1^*uv=Hdx?!S2?&jfCVib}gwI6q zab``_|B)NqmfCy-o9q&ur+h22Z`B+L0>;KVzE%0i8S!=i4P=upQhDa}R;9T8>26WGp|^igU=m+smksx>Qk1FUM) z3z{~hB|GirVJ(C)Ww2>*q2+xJ%}PcWher``@M%knx@vkn=Mid+#702b*^_rlYn+~( z&Sa^I>O56hUuWEC^};KRQkaaTgK~ZTl+<)j%M5Q>pYX2_<6^!&;6|i*elp>Si0fR3 z3M_1NZWfH2vSMVG8b2W`BY5Yb@8OE7yu_&U4xDq!T3A@mNzHf-Sq_Mm_(`Ad#o%!{ z$0L6l9J7Mw9ES4nM%I&RRXfp20a52~R~r4Z!0~u~{!4?OTWG{?toyyTzp2{svYu@h zjBb4IF6@E4;=}(*l*7)Vw?AI!iE%@nIqactaK!&k7S1?Z5!PTP@-Utdw^3&D!^R=W zg(@J#f^Z$*Apqj0fw3;PXNL5SsdU7m=nz&@Z8bK}^X--9cFLQhYx zcpv)laL|8xES z{3h7CMzTrr-!K0;ryctBe|VmMPWHmY`o{l{OZe*-TnYaY3WnkD`|c3!Iyh|9fYGux z)3!_a{)0hLINttdrw}M*9e2bU(NC1x=e4hRFoQ1if3MNnwqEY5aBGJ4aMd@^hb1u} zIQygzb)XMm=Y6L-fnWpHCiBcIJMU6`5azqA`Y^jzd}6UoDHPD+VFhDz;aX6YK=ES3 zhNx(afcO3MuyN)T}KC9~d?)(i1 zu0;@Q9V(#{wtY6Gy*k=Q9te6WAGAtJUvB}p5j+#X(_?iHv=im&i8;$R;aOh8MHJkmC zbayn=ehuKjLlV_o1V^izxYNWH+n!3jeYQh(nK1*cjQB)jshSk_fboYQc`ks86`^;i zn zMy(bigY^XB2{{5Sd8fYoO28iDieo%ix%Bw}FS zyx#tq(5*X_EBOPmMcbpfo7;~!~-B{ zli92Ai0c59TS;?GqoyEZT~yt5%>5&FB9=IhYpS&bW+k00{J+C`9r4rSh{36kMDWoZ z@tS>evR~1SZ=u_eIS#WvVxBLoGfuu@{WxtG*u&eHCP>CKJ*8NBqF!dl>$<4rJ;I1& zT`EJT)x+-CYoW$XVQC~vs%&uYbeKg9(il5xc19LayK-Pd+)G?5*Ae!Kg)0` zIg(<_I=rx)yf6(`_h|_@QxUmR+)}XJr*xrzO^YBhzc^ zgvsMI61Oo2;ef1A#v?xC-fKhinv1K{ip#+EiO<@4wJ*7*?;@w&9t_Bhtz&(ft5b2< zeOdjm6LOs07J2E%+ckmOE39~n#c5OR%^6+ORpn-P)V-CSXL2{J@yMdQ0~5QHX;)LpCWyarTl-!my)przP@d6f2X=(}owW$BtvG;(_ zHNC?26&OxU$)_$|GON}2A3QV4ep5*Q7@67n@o^Cu3+2_(tnLw#Tb5e3Txhrx_Pa+6 zRL`p!LkZvBsJ0j?Q{ovXC|X@eweywiGD-t+R*ZNJ&tDx)%ag6K$u6r=pAQgHZUCUJ`1LA(9`t%z^2IXFXS15zt{g_cd-Q~ z{k9xx@k9L+c`@bh`Umi&`k{>tM`7HETK=;T8r;_PmD{oKsf*A5_iu_kBIz3iBjgCb zfknG&5`*XsVj|YC^mJZgV-Ys*=HroFA8SNRr|7je8nyX54OSuU^4y9;0{B2tL{wDa z-c&|KMln0Da^xT?#Ugz~6-j2d-)aDK^4IshtNh9AwE@2z3mk68hp7Wq(%l^V1?u_2 z65;0u0@V04$<|ATVre~(r{*d&7E>@DY=kZ288;jR@4>ep@f2+x>`-_Azn@v5S$#UN(R+3c*2(=GQysduavkO|6~Cz?4V zD=NRz{+h)Ka*iC0f^lysz0a??TG0u=LN81V2X7W|u9;rRSRby)9+^?)E>OI$gu1&8^V@=MQ zyU{nX_VZdtLUELz#82;e^g_jKv;E!F;Cs%mHMTr1bkG-&f7 z8HWEJvA+BcBR-0+xmllGK$ViW+pcvTHO07A<^g zBi&P|SXX;+|A!@$_2j}4PYkNW;P2lD+LZIIJp%0*7j;kMW3hR>gT@~g&%z|VD?l8E z-xST6LJCiV|FqdNLVTeKVSQW^m<*!6)t~z(>_t*)eNCNfD{ky>iDwS?Dceh6(!vQv z!bqoz_uxEEerw!Jr3V=mOr~F3LfdgwqO%C&9FJ%mPBaNW1SqzTFQVGpIW8_*%|1@O z^a=X2)h;duQ~n%mi<;`zp#`wJaAp$x0`vN4H`cBs>#A*ke$BxCYUpzjl5>5G|8bof?UA7J(DmxmpF8PBnq$pVXTdnIQp@!u=!fPq=H#RGDk zqfRusd?bPIF_J$RT~g+sKK2(ICh17}_b?u*&1-%9B>`#0*APYdT_ zy2G~KKgX$;^k+b+%_t=vUkux$)N+^Qa)&)1?p)vc1eiT{`q^H~Qm0Ja-tRKju1Jz? zHggIecSx#*f`be!&nH*-G*;M34D0s4BS}g+qW1IuV(8mtX|T>}IevtPH3nW=&;!eK zK_YLL{L_c@@bqpljFefH?wrDHoqX^YUNWWFyV&0L&8 zJ575);3tuj?2Q`pS1q$lBdVEc0MT281jGId9G0+9Tdy%bBI97>DX4sFU_&iwXc3_OnJPqwU%tC5(M)+E5~yI+$&}s$4Pt=Ro17itc4)eCvXt|L<;|m zrQ3I)GWu^k|IKs}X`GV(hv@1bv{Pwy91iq*j-SM?rIsqx2dUEPO1CTu{vV?2+<|`m z4%5jKS}vG`?xnip?b3&7B)77RJ}3Wf#s>@nf*2PUmuzFDS4GI-dL`}j%#CLooR?Y$ ztG5+~DMG89M5M+}VLyWNDY27vTcQbJd3o!qIFU`5dT2H(!0EI#QGdJtE!FfH};ZN3Fl5zV^M|c1w!Z z>FY$lRG``znUc^w9!J^8R9h*=V0hFb;iS$gc{O2k;zGB2zJM(8PPuHCHUu=<=JC_i zTREBpWn1<1SQE`3CkA7<`@BIF4olx|NHqA4-xi%U#y7=2XQ+ImdcHTLlAAXx za)&m2BQoTU#6k@lShUBOwc%-7K+MidP&nmV2N=#YyHH>LZN;hcox`OUF?@Ogq{zfo&DNr;TJ(;vxwnT--&hYz@YF`!3p=S(#Zytn*b!WV5Apf8d?ijkt~NRlbRx-EOB z@IlQU3l)#9Tl~!&=)1`~o9h#NE_u#%(QDoP9kZ6`c%L*YtGis`r&!ta%79EGW%hwZ z%UG)f_%p=*=qBz%dCUg8@o)6H0A*=t#FCB3Y#G;Cn_D>LFS!cHl(oBboBo|nYh2X{ zCHg_*VAYJs+c8%X6VIY_+D}8s-IJ)5JJ)Jl=%gjrgwKS+q7Aj~3yUzGuvOJXwBaH( zB{sGk@T>>umM2D&E;Afv%46|ONNzrrXlG%s7SjF%ItOyd_jsjDq$F>7XcrC88|90U zqwY)Hg}waSy2^;FO}^~1R=ZJ+@bAIm^m=Rj?5;+4waWk8T%Zoe)%?R~qq5eHM(h@^ zoDHWGhW6ZZ4= zH)pD;p<4Ph5B;ILJ_7r~8;^$z7WUvQsO_7_sMZ-ZKPR6biD(V}PkBFJGHa^MJSgef zIewjv7705t`E@dE(trS9a9}WGTvZ^r9q)&aSl$5F>#0Mx2e&`gHZI1_&KLCEnW+nA z-<{4lW;)erdlz43^CiEhGi}_w)%-!Rd~_~oP=gb1e;;{R3q4wPF?zoyYT--~!XSY_ zEE+6XaY7%)F1_I{!u{p<&*Ms3Dy?Ah5hQ-byzzOHUIAgJHRS_p+`z0P__rFExi>?m z!n6)oVkyzXD$d(-8TiA{%MqH|_OdJNX|+*mCE*81dQj`OX<^Su2WDiD^?|bf)UyI; z_D*wEqaCdZE=%oT>;3eZN6O8RciK#RM5U2fy%No)stNo4wzs$0c|>I`ToJU>apzl{ zYP>4!hXDBe2U1n0WU@y39Wm7`iplIi74f`{vDX||V7?bsta#yZI-ihtYl2p{CcMj% zb3u!@T~LNweJ1cskVm~CF9NyhR|{JO-NW^;+V;!%3={M~zc@i*;V!##FE1~%&5q|_ z(dhp?JhGdst8AGf86?zgb5OB*rEvbN?dyZp$wH~Z(QOvHQ`p1#+V1u?-bWW=fXhfv zkMG{z0yV?eNO5tS+v}G&V4;G21!C$&lB1U-#QBTT)HByQE+yvJb(>!J)9O)1z8Y$f z{M0p`wlK(K2M!i8G8IHQ?zS0Nk7s6Q^2qT6pqQ!a{#}QwSIt|FT`@qbq)}~v9Kgoq za-$I=Q|rL`#a3PAi8D2gLqDMf(88nYLJ2g*c;((qoDJFdkB10LiSE;_-Cd?A@HSp? z(qtDJ{l2}*^^jlrJ2o(E+t%q_l6G-4yB5uVO#6A3nJU4CT$X04cQp|qs%k33CqcFH zHDNbN62${0liCnNGke^_VHVx zI$y(gD^Ibm6GT~MireM9A~d;-owHPg^-yt{b;}2C+)5;dDPDcHx;wQ-=SLDIq9^E@K67SH~4%n@@v z)n3LLAqM$%KIp>e7@A=HP$oyt&LAS3O(N$wP^ljk9_QjmdJ^Eadtra-5 zgAz;FCpx~HC3zl1nn?R8b1MpM#^=(kXvw?&2iL{GWkvW~kXrq!{E}jnROU7%x>8&> zvdsbC(r@6^idj7e)}rT6M2|C ztC=OGDvmG|p3KaO7{Bjc;JRgYmtr$Ck-1x1IYN|ReI&Vd5oy;DXP|2-Q+c^zW@MMW z1~VWjY?JG6rUosSLmkjaXcKHoUoxS5?Dq`_lw?jq-Q^tM7MW{02L z)Jg;*EHK{){v$IP1(}lfTRav+cYcu)bT63`Enn|%%Fi~|!gA(o00z5^*mPf9B#6M2 zCf*9_;icYm1HqfR9?8QI_Q!ogBB?(()Y_Sd;C(HN76?LfEH;4&Qw_2_GkH*`jfP9R z*$&Wnce+P;Pr&NA2t-NeEQxP1GWGVJMDA2M;ptk$U;|*J03&HP)ElX79uzzU6^2Vy zunoR9tNcTmR%2&yO5m`lfc)pM;(=k1>uGQl&TbdIKe;uStAe>Tf<0pY`3AszPnKql z@t@~#A+4})abgtG8=R1~u}#f@(#o@q!NK(arCmzYWWv|;1uJ_^9_ zSQURq-lxkPlwVRE24CAjZZ@TKop&28pJhH_9uMx;UQl)?JAE%YJJL0>5Ss{>jbq5V z5rV~IS6)^(+`=k}Y@GnZ;UZ@bbTCb)sa?4s`qfvgKbA0x!wzN>&F6HoIJ5ncDN-V5 zciMvogl&@}K&D}7WmQXcFx0m;y}VqC2xYQ`+ngjnJQ8)1SzMi~92qO`Yq2HBm#JxoFy;501+#dW}-O2qkz8+rI2YH&Z z%z2oCHcz<94oXDn!@&#QiOA!xgyfiFW}$d=a+r0n-^LtL#D2 zY=R>usFf;P+9F%n5-mkz9y)waw(wV4%qeIj_=B`o|FSDri6Y6^Zwfb!VuW!(R;fg` z`m4ZIjoKpG)&uYuXPM9;Ilx?u(lsnO&5IJ#PnHztM|Fvs zG~TP@)ogDXV5Z_2!=N}?EaUMpL=`LRsP)sPdab#Tez7>=I6d09;ZS}KhaNo(@r}pz z^lQ*lS#G|>*LyzdRNmt0O^W|M;1k5Pkydk>(fJh8B13EQvo)xXL^|E~GnOg?OmfO& z%aZ8k&s}x~BrK2NzkJqrc@db-({rYKgQ}N6*e4H#h(h^-YGy7CZ`Ldz!mA079|r15 zL&AC?VxcdPtHrukzkftHNvErd>Whc}1Wi%t_2*8cB9-6vkXB9^{{1{TV%_N;szjoU znI&GD?`7M>oF5{EA?w)LSd%@OYDe22bZyt8@VvVkpOg~1@+v=MT>{9bWk{AfHQ z=@wtebD2MOSIWo~|Bap1v)ea&g5~PA%vZCx2p)KQ_fC6lv{`y`d<`^ptsa_45{iy& zyYDI0>A}Gj3s){^bLh~I#zm)S z+NTV%)4zIJ=pwNAakQlG(tdXMi{Z@6Cg#Xi=^^`n!bR(HGGZRxEt>NZ^##bl^#pw# z&wiWYto%`HokfL*JBPlRBMoPirs*rIr-9oNXzoE9EsI^5(sQVN&9B>cQDpNChJ5%*+yQr zWJXb%)>4B>JPucOFS_DZEi4isjJbY|m|R>xou?{M+h4iekY*F~+-&y4AIVJNfO0xI zGJ4~o5eU8SJ9Wy+C9AE*lNqY6ZMx;_x`WjFTpkMo`oH=px^~Bs7CxGei95L43yFv7 z`Q#IZGVmk~cP*l08?A->x(qmA10{wwTuh`qU)QLJ{`ZQ$$J-3lttrlzubeAyv@EY( z)7FZelk?H?{)X@rCzzO)uza}A-We4OWjVqaBdVQxndD|m!>R?ailf)G^raq5Z5K5&w6bAI^^MAxW;gM1*JVe41YV2EY*K1 zOW5w3SV7;d={nG4Q!y8fM>RcCpPPZ97S%y~g#gUwN|nq^rtxC6jOd)ILt$cRW~hkP zf>iC1J3z5mFu*nZIbi<+pq+e9nKPH{d>{=zz9Uri;quQ1WWi?^@FyZp)?0|EvF8vQ zqd}W47=nF=V@Z)yN!o54To1o8X$tptkJCD^rP!f{Ku6yr3I1fumgy107@UngM|Ii= zOjOB2u(v|e(ZUdGuPqK4efUA9nYNncT|7oN%o5PDOoHhYyr>0ILq{qhQ^uUo9n}2Q z{g%LpXRk~p+Z}uHQFZkmgLz!U;lzy6ZhD$w^aHoEoo25Io!2k7FG*dX*`AFxY(&H) zT7)VEEe`Y*3Mux@H+3W$uag;rBTn6NilenTRSb z9p@A2Vs_`tBhY4GgJNUH^OKdLO@@KCSY1iOQH!sBhiHW{W0^VLNfuvVwIw?2w0$$(17|E?8?5>C2*t>b= zsKQs0H!_sciZeKFL)d@#6sgg9&Cm_DPK&aqfBFL=Bc{WOZ9UXzh63M5rdq94y?dmW zWVoYixih-dqc#mV6r`#7@M$E1XSa6uE+_XJ_$24(Cy%ci8JvfLMk*=h?QBiPxLT!n zO@I-%<{8;WdBitXN)HkKV*efzJW3d3i z(yq{dE^W}04Vr2=-$WImU;`O?Sa{jYR7QSmegC5sO%`%E1mdDOuAWC5XQ13b{_YIi zjh$K4#+B}F42}r?vnbK0R|G6(!ru$QRHqZQCaTcWk+i5!^~nn#E; zr>pUs$|?NaGFlk2EDuj_LewIuxp_crEz#HRxgM3&zE-%xnoFOE2>)YvXzyUFbF_8k z!vtw4zI1lEg(kM{Efs&}wC?qBT{Q=D^>1`h_468XYqQu40%$Mb7Go?=QK`e(#;O%E zp^phKtSRHO0EWJ~TC#j+o(Vm=Zpy>)I2EfSb@h}6T`SX<&b-KFQ3?bWWe|(+RCvKY zMMhR4XOob#@y7=0dB__F9|b|jFqT1q;REmHaSXyLq0o zUSyvG#-=|Qw_T|&{3XmB_Us+R0jHDt*&z$J#+}e=V`MJsahF&o8?7csV&4*R#@m^C zR(jzByCA*h;TN8*xzkl5^Q`XmQ;tsYnoA|yk4@;MzCyBVgLhe$j0!~5)WnCmAL-*>g?$qy4nPg^#ygJ6Bu{(=q4CVwX`3y}y;#pYfQ_YQG2Id7lW2ImM%$v9d zw=u3ZFuNR{e0B^3kW!YUTlRD;if)U5jQPZlZqS29V?%Iv5}Xh0Q*?#a>lY@d-yFd+ zH5o)f?!v0_@i)EG*=!?O$NC2gd7f)d0Th)7Wh{{iuQx zFW7?oLU>h3(}NH$TPa22nXm(ai{q?DONllXhc>xWee$3cx_@LIO+OB?@DX-?U9U~C zs-y#s6O>VdT@*3x?Cb=$GDef>L#|N;Bpw+OUbp0c`UqTY-KnXp7{0udx@?s(mN}am z!4JZs!Vh4%h`M3`i`UpL}xN z-wUx15~RU|2Qx~w+R>qh*l}lvi}lkzZmD*MNNjQ1UJE1t$q<5EUc6Moi(dH(Ffa9xFP)@y?(Yj_yG zrJ6zx;^jKsWYy6&jd#h(W*lBg$9Xdp_vKp5yJ=OZ`cmaqHr}HX5b$#S=#M^e@4=_3 zPxzgrNa1yb;Wx%_%g=akEK#t;Y% z4=13$AbRbpcXdE@!>bBrzWe5{F8#;ult~jRe7kkpvn&foFqzGpbQZqtT*RAocVezw zZ-As@Z)!WWfvgn5G^Fi(bTE%+`;{?m;D{tcXEiK@$jXduUtispJ-RLB^nS#p7yRGB z&0m0p6FN=&Cjl0nKO+%q3C*FvAiPtFl7>I7?m56Osh)YJ&jit5`Atf9z~Cdz>Dy}{ z#jzIUIqs%~h&i&}2oO19r1}Ge3CC+Ws=Jq~>46?Q8_8}BOX^I^; z=^_!aGJ}wwihr+4m;Tbi(((!76{?vdR&b4RS_G08N&PTQz*Tv<8s@V9q{4~8H`6{H z*s|K8O=5#wbbS*W{^jr)NDHk(TTfX##GygERLn6J2=6>TX9|xlq16^seL`bI3hO9H zuoq<{Qk(Sg0ku`y2dYiK*_gzMv!jT{^e;nQ|%^A2VOxV42h0cMMe5rco!8QJ(`LD zsK-(wqO+N*Fw)uOaAzDpfH5*3z*UZLl;+Smcr&Ix^%JEca1cO~cN0^GKK@-lkd+om zp>KveSmMeMf` z(9<^r-ztgnqa6hDR0F5f2nk)WS%7+RxNTDw#Pc3rXKT3R`~Kio>fu(}Geo<>AU{lM zDM4mYWZuDGCMMY2p4uf5y+TDL(ntY_sdnWLz?8C%{?_i43+d>Jj zed%}Sdmlr27_5i$JG`Gz(M=OZGPBt$w4GRq75k{9mbPWGkp7$}7QZitS&rnJ%uvud z)bUyB21*wRmp(M(1j3u(pO^vtY>pnsA!_-%TU^#fU8>~%iQpSBM zKhJT!o4GeQiQyb4(|rjYCgQ7a5BmzwSnpYeIe*YgEe16pE4=H6z;yOLP)DQf&jm(C zGydclq8NjH>wFJYR>gCxi5`Stad!e9JXMY-C)fb%W4Zh$gg^{TUGa3CVtHc~i!x@Mu|EF~!LBsuiGfJ! zNdnfEOLlTzjQ!En_C~VU7F2cmraT@cmvx8QVfUX0PgMFF#WA+$5AaQVL4ALWZ2U=z2(%s0= z-)LezU*aO6V+Lmux7NHKZqnuwN4#3QJYRD7h7|@V3;e2R|5l$(a6>h6>R!^~wUIrN zFHowR?9C;9&&JEo4=JaPC4QGOX1ao}Y3`_dHWt4gJ^sEK4{+F(m}H{fQrgd_M*eXB z-*i;=bW!;5$SR1?G6X+;B^DgNZ3z&DML!EqPRB?Dw?u7Pga0wt>a4?rKJ>HY;QczU z@Ee++GQvQufATMG#y?IX8Sf|vS5X`TzHv%V$Lk!YcoVb5St9qH`){<|QN3%(^Ud$g zkm;QW*#w=goJ*Pej|*xKNjWxZp-DWI`znX?70YnGZ9T&?@HdCQKy)s9hMWYBmd$YI znceK2Gm=`pjP2#X*pr%6PXlf^=ko}}I$W{+3o92fSVE#=u@*UGj0R)y&ou2qu3XR_ zXKITl^B~r_xB}bg){Zsu$+?6{1}6FAMA#wtRKgHv@`&uWWLKwt6~3$E0A+(-KOIQF zBvOr(#6=7*3ZW2@jrk=;OUeYPXF6Cn&?U2N=Zgpx+d^C25;1RyOdX>iT$5>$$upKP zXqsr1B8Cc2o+jUErh4qM(P($Lb_R+F*)DV*aoF4u$&;!l6NiPmvc!+S*Svt zVU=p`T*m-ho=8eq@|M7dIy9 zkES#V@)%Tfy%ba#{-G%KQi4T_^434w5`Eolg;`im?$Y|<0J`;E6xvH!#Bah-uk%_> z=}^Uet+osC@;WTT7fujyUhELk{q;T{cnh`UNJi#B)!HH}IV4+3&HOq*@LUEFQ1p(S z5;t2=G;+)O26q9$tqYAn^TNAydZ^e^%D0cPD+wzHQ=cDGIwNFj7Ej1WNhOj(`4k!K z7|(DIyArhJifHW~#cT)(YpUfy3&jBxPn(QIbq;oph5IG)e1kt>vN12dlSwOKf<3hM z%=1Itk8x|YEjg=l!|d6K^Fw-d3Y^vK0tR%1EhWe43hapUE@Jrl}FF7PV(QRX0(NyjwdE=Ykk9plcyYL@PAe^2Zns(xe!6q z*UwO{szZmwJQ;`$%vW<8`#pt)IWx+Xsg5rrQzWov_lA_WANi7+h6^NZd3e{EOh_mQ zYir5QGvUr#j?=E7E`kyazQ6q@?Ytf){4+-!pRAu2J~R|<2+pPTo*&GRO(&@%PbD}h zQh%A=>AMNea7;oR@7^u#&ZiKJ_j>DS1ncowUuh&A&8B~#f_S~bvSSB=>pw6`u*DOz zzMil(q#ezWj=>ATdGdJEu<-AQiACUEomlZRwgLc}mm-JAEu%2>1}AVp z`0!ts4uni4ljPLYclxstVq^H_?+mCba3);Bcb~02X)teIh?7)h3)84R&(;ed?5&$su8?SJ3q1OSM>ys^f6k^9re$Gj8o(qHPRm0s=8UK^a|2nzK@?!o@LRi$G zYU}HU#CfE!;)jpab!ZsqhMBSjsQ{KEwH??^gF?MYzz=bh(C2mP9du-sBj5%RmV&ZB zE(a*-KA~kmNA)^dH@rlc&L1d4pn#SsT$tr%)kv2cC<5>NBpM7;qAsu}@OWNf2)~UVKMDlVvQtwzTmc@OLovy=YgS0l* zS$nn7rwp;>g=`Rh5)b#$v1h%hiF|WH8ampcn6RWJL?QptJH~%na>rRmTPEjPc$%Un`g6{P0W}tCHgpK^7O2_ zAyvLLewQSOyfHK{ze!17z1_d+CgZvNWx>9N&*vwUEc7-@mF8wWNJkfEV7V5-v(RWK zjiF9dxez~VmBlb?^lLb-K7&R=J2qwBZdMrt!#ZDs2x8A(VvJEoM%+MUZ2Y#QV?}P$ z@Ob*Qgx-r>YLd_w8hVT756LamTL<|!r=&Myatr&epwF!Djl@{SpST7Otv+IO3O|we z({FZP#!FA148K^e@nfxds_B^xkyO>CaJxtey&_V6vgGfS51vs$>!)14f@KFEMcxZd zEQ9N{ToMNDb?-gE=0`t7X)T&!5Yjo|yW2Pxt&mXH*ypYLtgVNOk}9LLP?oULMVz`r z8L1S25z@mLahgIguK(K0ywTBEgH=44gE20XRU{p`ydOJ5wp_a%(gcz3fN$NsmYlUI z-zdIt3S#BxBSzeD-8o`-8q(;nqZ|E6gtj=(B7GVnQ*RwVhxa>03_le91pjas#c4E6 z?|v`kDyv!!EtT+O)jaEi=+6>2cV{}D1)4BTV_&yH>aV6GsXW9ByH2h;HyHx?ns&4d z`HJzQ>sBpk1P&MmKznL`IV~GJhyMjaJcvRgB0Teh=K+A^gUNKCkh?EqqMPS5F4ger z0MRm8V>t-aRss2D#d@ldXsprhkzw2X?lKJjfsnA7U%H?$n$|91>-AZm2A*2d%F&MP z^fm-nS4r@K!_M|~AO0#%PEJy#GQ^;um5SFis7Ccj;r#6{Y0a0+rg-L%%jpF1e64x7 zzhB_Zi|p5B%z*nk>pXeQNs}9!py8BQ4 z$n0O`5+OX8wzf8-*<8T;>*MahLDKH^AQtg2er!cTVpH0g=wX{dTjask`r?glNap{j zC<2*{#u5OZ%H@lxADP#kV)4YC0LQP(o{;oS{^(LK8RzpwQUW3*US0UJ!r*;tBtX(% zN)%})e>bRf+QTTscJ|Jc`gfDEW3MAYu)afreN!JycI)lQ>mlC;%=~H1{bu!@Q}ura zYNAdLXKKciX^H2e#EIgAd!zBc7h+|qs;U?*m#^N5Wxu$^A7hn~8SG9cr#|7#^01pw zv;IWC_%8|;pSK2oeLOZrQ`8n*#-hG-o$rEYQ>k@6uX{Y@DqKGtdvzuqhFb7A(u0el z4BK@HdL0@5Gg^29ZEb7Y+1m@q&eo}VWn*U#i&Er+{csS(Ap3=rB)!@G-$BJMoN%FV zWIbc!q}^9zV`ED7h9j;b(XYR?i?=HJl|266Qh;rQos$#s(KsRp&#m5GKF(H}UwuAF zrUXB?`{BNm{1;^!AR{R)a}`eV_>&LzANLLWt7ExZk>7gBw9%OSlXoIeyw@#-u{)T@ zcgvc7m8*c?p+kp>u@-~V6rB4$BjyINspFAHbhbbVLVeBmycAWsy>bE9knK*LY*6M_ zb80g5j^_JPnfYiCoBamyHCy_MNs39H2IF79&hsJ>17L)JGk~}A6G!~#eeCJ~7(^u@ z5=W}%*KlN{J4tP!50O*|QY|6(2g=J_#>~VB=D*KT2;t zPanpDJA9V;U2R^Y01S98;;3p|lBypcIsQ_~4l(y&-q(9wUnau-cY3`(y8X=9v8gdj z!H7}@kC+&rH9a!6COW9Gxv}nIO2CaZ+_*FAcvD-&X;H`bOq#nauTxDMp=+X)iO==1 zW%zhEI)?f=N?^{}TPpqZFz~vIsIax<47msF&|?YT>T=)TJAEne zs#%0ysRR$5J}hsOv+3n;o@Rho+1K4!}1B%=uX-oD5^P0_EyQe z+L+^<4P@ZrH$Gl6m$8clOC`7TsU-`XpQBoun?*opx|YR1ogG5YYJ*>6?E-W(q$k4( zjyPA+Vdj`*XE@EVZvjOT38=N<7`d^WpC(-A`N?xmBh38u4^XB8&lav8#=;!($}drI zIL<**Bv40<+wgpFx$EoaQN2-@U;0Pyud>wekVs7n=xvGPy>f!w?EQ`;Ho|u)gZq$N z&}m1aJLQr?G-`q3iEV@Dm!W1=B-404&!hzVjVq29E@|Qr)Nsf+lkZwv@a&CL0uRB5 z&xkF9UQ%UBu{wIC*No(#B-CWc`a#m#?W{RL;kDMprneQ zw;an5bcM*wPaHq)9)zQxQ~CdYk>Azy-H0i8bMn2u=5q8|#%a(#$4Ppes*CP+6Wd*h z7N$Zg8PUmmPl+`1)AQ5y37e5w>cQR9DjJ7@z}`~gF>4FMe%3lMF4;S4+6;=|a$=5Y zmxT@W^LTxPqUKg>CU0}Cq^UaOkI)4C#f?<9ktlx)=nk|NkJC7|nKkr4{@?t1e8EyQ zvp}mU7M7T^eq&9kcQQEc#cmzJayo#Vg2FHHfBKtgvN4VkicMzk-g!|0gHuf$mUDWT=HPf%KK=G&pvl7Bmd%Sd{iC65E@!1nSt6~=6|a1orkv@P1v9DI${mD7?6 z7evgQVAGm{VZ(`BNp$u^Tb5uxUdB<1XJ8e9a~j;XZCKbtYUQ{#3dmoplZn1r0@AFM zm@zORRkvdFX-)2}J6LGQG38`oZrN2|f)9+vtj|+AZYJs9A~~J2xpGLPghj zUM;ZG4cM80txg2x)(+mhb)eL4i@PQx)DP~Kv+?$-x`s1oq>k$j7cJCxs%-QHZ^f6< zF?0>pvV{AcuaE9czfs|V4$m~WqghHq#-OF>s7hImhsDdPdlJ6MmvaHQRgcwoQePK2SN;S00R?l@$ zjx?-U6)Y+9TPx97k{$caeFOd8zY@}1-M@~R7Hkhzml8d1+AU>z1Z|cxiz7rm&V4Xa+64BEM8Gp~FdXat zIeman#OZc%DKrT;;Gq(-=*?=~*Kfa(-xOC@^Of3@M5l3rcBJ60)qTas?J zzu)NiEfTX*4t1=SRL*}pnBaEN_<*K6Rh(MFBX2yu-h$1J=Y5#3>d}Ln)TScWWFsyG z?I^d#spx(hZ7saxQs`ef=$eeK`NiIhjFm_li8KR4UH)41%oHCyW`);G4~$f=SNOzZ z?@9X&d6|iDhTG#^b!>`2Z2BJxXPBpRMs6=hHS5z5fv=mRl3$e=x0D(N2Y6w;;`JBK znqad50FK9|x5-x)91=x$`k_%mrb!{qR3Dklee4-{`zI6iCGzOZQapMtf(mla$(F|S zl$Eu_nE`<)tx=Tf4@}e#dT=!|>g=W%7h`FI+G2Udx-G zZVR8Djan=3(to|2{Z~vAR#-@cjEuZ9>h-0rp!^%LTmnoGY~{7@55hXQp~?$*i2g@U zJ6=x0+h0%BT{mV6JkPOqQ)VOFEctVcFc2;&rE6<^hE#l^)c+lnwACkBFD4nW(>q$b zkYMKO_Wa9)QrO|zjZ=(H<|=CzSSZU(^v_ekrX>~q_vt^Z@OeONw;nF!9oS?^Qs}pw zMZoFa!66}Ec}41^KaG_$0OCJBJ2VSiA3JuE@Ygg|HFfb^?4qUXJ`gDW`eM@HhuLiV ztlQiXlhgD3=uq=f8rRY zjkP+##pFyE{19)?xa>;j+0)lW&V_Gm1tNe3L?&}UUR)<0d?+_FlDS;FF23R9;-EUtp#Gr3dw{3GT&=i-u*z&pm+lhYA ziIYuDP{~@BCglr4mrTA`gm>R_VP%NwTvCz(b=fcNXQsNN(bDJH&^wC z<;ay7t#wy8v5`!LiVLluT)3MSKveDL1Fl!tZ`q&@RvI{tEZzd?rOEQ+vgx9}#h{KG z!y7fgar)S{y5A~J*&?vaxJuGuC^Fi#LXVTbq*@8QBZGsUc5aLtCfeQ`c-S5nDC*F6 zc|({qeg=K5T~$9}NF4RwQlC5*CCmctTtx=qP*6nfCR071g$YXb4&I;}wBU!EF{lv` z?UMy~kaktOks~-$n}(uZ$hb-ROdDRex{)|XrUl^AhTmpSEr_{I zC;A+sUD2)sBse}q>hsu5b+GkCZ~0u=rT1KUf5~{N%&{9XS9YllbvE#i)K@Uba~Ti& zN>|kA^z>q4TaI)}-j=18?k`%aac)(03(6X@!f>+CbyTT!txmk{v1l=$i!E{8v~ArA zScsHAoBWCh`SWqDL$*ujtIU%r2F3VkF{+umnPk0&1i9+v9}`lnsk-4$-*$4>Bc*L+ z2z?%ISM?(_qmM!Lx8(8<)pYKVsLOL_OLsVQuKf@fOZzsoHP1^lryJ~c{;p$MzJ ze|!LO2k3F0%bt1b-T617z0Eit&!Y{qp$^@pw4`|&!8CoF=TfaLdfwagA0w_b2pN2_ z2%o7|ov<3OD;ig&jFH$^xq)CEeP=b8pvdKs`i8oZa;h;UY$K|2)(j;cj9Skb&ngm& z(A%`Jn0KZWo@OfLYH-eU&waJe+eK%iTMj&&LCP--uY(K=+w+Bpzo6mY7*6~z8hSkZ z$~Y1Q1i_jZ1v?F49xvkloX{lC+L&bdM3lz}LiXL*UzG&t}tCN5!5Ecj$qZwA->rPHrAHaG3kS zMCjc#Hch-;L(mU`DE{Ai{a5yB_=;C-Vq)%y*G-O;x5lK6t=1W%-TX6@Bx>D{O-cf^ zsY!GLS3|tpDUkSrpGuCnDAim-h4Kg8yH`~5T55xs)9PG3cx`PjDQfB6RrT-+7M4izj`xx^wX|a*B$;t3M}m+QST%{KzEyNY zmX-{2*~lKO^ZK^-GVSIluUB{TWOvg1J5Z_aw z=GWv#)uVp*ffR!e(y!;M?^ILoq-&Fyc9_8!bc;gOlF@ujaegjM=qzWFBfF1CVjVMD zAu`ci?2Vk_Mc5RH6q|w~V{i7mq8Rk}rtw%Gj*MX^Ed8|7WTb;<2t#5K2?F*i2Ty2T z{JF6HX}~N}E@Kqvfn^C*FoKnK-F;03w%N(zHPAY{oAPfhK<)sn1ct+%lM`J-O`jrp zq51d$Y6ja_;n&tUk?eyJB9kuXdcio`xIl@WqjpPntFcu4`T897?9vt#j-iM7`vEbZkql3AkafiswSMeX6~Y$_&%F!ea~tC-I2N8iqxY zuO%;4m|(f44g+rdz!sMuqHX`U{^OShNHIt(ePZha(d%Kg&~8_n5Lb84=JGd!aC${@ zBR|2{IIr8KoRjV_qE2&N6Mga8)%PbaA(}j&?CIQ+J9pw{>Y&0{M6Dkf<}fdbSxH%# zk77RF+IcF#92^RFPDUw7R+9rbT5ir`{=w$F7-Lzj7&WN%VcF=3ywprRRUgrkC%OGy zncM|?<=O{$*NovPpmWx<=pX=^-=(!sO5K^mFK|PaS?`CZQ^tfrs;e6LF~@l(+Nl=^ zpz_ZrU{m`B8`z7Bixc-RacCGra}Am&q{(@SShcfXv8)?(yQPo*yUls2sgY!?Jk+z@ zhG*JF#x(r2;fRacj}u&INUL$f`)W+h+28sri-nxyjL~hWu4zQ!IP-Sb<{*nMJqtas z48dawjWK)qlM^D|^9X5H9&u{imyetyWer6do$`ou^12$05Vy*Zf^BAWul-wnGpoQb zNtIzar+q}8I`yPO!bOPHE3praliHuO6FL~r#;le6X!5 z?O(sk`d+s4H)(w#&VL?#!>9icH{8qUh60ZmQQAtWR9mlU%jHDg7zc*evSXquV|pZ);SG*w1)>dq}ifEk_3eL>i>e8xnVTpL@x| zx7>oNzrjx)y^ot&WD<$YsY~96<8j}CthI9*CrF?$bC1o963+HFxH73jYfbjH>il8z zGGkU)MWpmuNroHQFs?UIH{`1gOw5)BE+BhSR z?3IY)RW#gYXDUC= z5VwBb=&e$gIs?~2T?a-D30F|cIcf4 zEhy{k+VesWFHs4X>Wc*g3>?hFiPJihip5?Xmd<~a0=0NkAa8{>*7_)2G!2?HU$BMN z%9Y&8(cI>F+b3GgE;?Qd_WEKl2I=>>4w2P(dz?XJC(MO?;bY6~WoL#MoE84Tv?OTOa_VZS}KqE-c- z>s@t6CC#o0^lxJ_>8a6gUF_;j=c5>@WOwgb7;5|Mih%3G)pUU>aLM;Qb`}E)g+qvb z%m-H%E5b4k%^UwVDf6#xAzQO<_5X*gZw}5hXueId!N&H+wylk|$s60=aAVuH^TxJq z+qP{xH{TC+tM2XppPHJgdAfVfIejJ;#U+Gl16QsjfX-<^O2IP%DpWF*^zW82uKI88 z#btFdWSmT|2}jFtgZN*9BE(;}og=2|(sARPo>%?49fZ%@%Jvw=PV4SLS zq)D0SA-dv}o16ET?Qz;K{*c>R$RKjlvg@DDSUG%ieU{L#Nr`4Ja~fWJnnB-VTE}dY zOz8v5WNO~($+JY5KSTW8M48(Yuig(7#6VCK%@MT5wtnb#AoclP{c15*;mFG4hhbQg z#j~2U2qPTXVQHE9FNuYem8_}Ae_k=R4e%Fg!;!3Sy zOBG(;>D#c;+{e+g)aWSUcW#QNTeK@1D1_r{3#$qA0F{-SSgP$_JBS_eht#)jl!l%9 zMTz3d3<2q)n9V%LSl;iQ0iA3n3Z^qmpU3)$#|oC+UYGO^BPKAC!?w0C9jgu}gn38! z1SV}tJIhgZN}o>bqK>n1#Ydy-xZ)>SZ2w?m6rByDPG}P+_>yAj6|~u;j4$#0%oAI; z2a58-l_&%TDn-i{awl>QjlW#!G_0&0m4p63nmf{O)G=c-M=}eC;>_uL5^AoQEJP6TvDHWbZ*-ZmA`@xX z4_cj2UB*HU`VBtl%wL~vKk2;66ABpTdE1^q97nF0%boMXwI|Y+q26v7L!a>fQUAeb z1F+Avjj>Z#aSeDh(Em$GMsj{eSj{zA|I0UCR%UzQj1u>K(0QRkVN7!h%g|rdG;UXE zAa+|A)1|aTxa2&Jp&{?&Uf_A~^6MOE>Vl?>>9du3k-l+{uiyhh^!qqqlPAQ-4^3WB zE+!W&OivsQi8cfsmQqw+1Q|{4cfPYw@U%1-bUsD8(VB+d*@=a>tzt$HafkH6cgIz)s!KAXeWoTpaoKmFMf+p$-NQ& zO^)Tl6RF7(R@w@+uukw-*bF{Ld|YL12icNh`6lk<&ad>nMRJj?Zqz|LGbs0qnRc~; zi1Y;IkP_>|4mv1C=L%hir1)AKM+=SAR-&kP>l)QD)5|FD<&uybQ+3;9Ok;F|Qq9zM zC)~nJt7%yvRhlNIE)|nZeh<` z!82Glt$ECH$UMF{$h}0e2H=$xi!_9Bjjwp2SsYE4;l|nYITDbeeje6;h>4Qlsz&h3ysKbhQfBVCcT~;9KzZU zpLNA-dV$4^RU$&&z0Tl|e;Rc!q*5&kr>|IB4A*N6$mEcgdh&R5r3}nB$hZ_}|12FR zp>=cS51r7>FIn@h*qJys+ns9CG0}axQd5hHN?40>{AJjN# zh>a}2vn|(s<+72{lwQ6Uz|y>9$0XEd6*+wrVQ-Z)Rlxxz zkIVI0Z`;QY0b;j@IZ@*on(?^S!74+u4Kn0;~UWb zapebKXQPH}iM4~<BJ|0jUaS#k0{0G}#`p=Y7_|lwW#b_nPInxwFmru#YYyfJb%{$@OS&Ye{ z6N#duS_2WnmV9TD7b!^fuF$_YT|nz<=*0vD_q$C`9(!1Lx!B{TblrS#_)$tI5bOr3 zn3%`_FaONcJ(2uL(yv$Z)+A3jBLf4avrpDoZckw9@dQ$y+D(|7|6;YS!~WugV1lPf zt~NrKC?Ol6ifz1<3Gzr8n`S7Qp(CtP1MW(e(Zgf2?6GxPzw!St@IYdwkB!qzAs|k~ z&ywf@Kr(Ho67qYua5(xY8DZW~KqY<9JnKvI;g_EUzwApDMn3<#%O-H0Rs~sOIz+%7 zTZL47IZVY$5VgD%uVVV)(zYcxs08ez;WNBWjN@|C$rqbIa*M73+f-|S$1 ze9m$`NPWQ)FW(0RSX&=z*n$hVMPUyo1dB1ZFDC!F)XX9@CNFx@?yj)q6GAU|EAUp* z&SjV~>M5O>Ki!sjjkOP9S_m)9u^3BX+NsCp$q?h8JJD6_Ygw4X;->NUg@u&c!KnW# z!{HxD;g)eZxjA|EVvI;a=B}OUkH1t+9~NY|o)+)Jx;1AzjIa=vd(`Q~+1- zfCQg_U(rBb2Cj@1%@i&C78|lOv99XQH7@^?p0tzx`WWH1nv9SajC}XN180d9!F&N; zbW2>UYyzZGsG;=?A3n|Hr?MOAqPYr`NFC7{A0|^v9Ba9_bA2fZbcxoP@ZSWJQ&Fdl z@@XsLaiu`CWv+snX?5ZiXr!jg!W-sUO@*^i#&%vaj+>>G2om~gMt985QR{@bn=a1$ zgZGNYu=pG%soh(hu`yUH1HJrXEY(_aYEU7IXW)KGslaU+T7`Cf*f3rLxjb=ribdCP zCGJc@u9dcn4VtQA383uXy0ELx?adM`V;RE!LYcg7uDFqx;65sxdQf=3Hdf#80jC?C z>g#!)#j8B<<6x`~8*452=+OCKi9!fjdb(`2>+uU^6V-s&x|u~o?9ke1LqPnh{wMQy zIMZ1ymbc9j5C13Lr7zPFD8Lc9ySgG$Q8SJIO*C=Yz~;+;rP8LaAR~gtjB9b%11s9{ zBoea~daSr^#3g=~0E{semDgAQm<|+G-Ss{Xf@e)a0twjKqD)raa{{(PH>#|a#Mjgx z2gz*R08kNGTmvNEjen`2>51iC~EapGu0a-m>}@{6kc5$=`;F=<;5k(QT`!T$OQMmK7SOLa4h|lN5GY& zx^pc)m0))jQD{;|%zQJ%{5W+>ZUJ4r^Ve;|4$Y85=CNi6@V~-c2`IH2&(2Sg>KvuE&-lBXXI0>kIsa&!73prb9%f8|eO*m|$=2rR>L1+-Z=@wG5G2((f6SIh zX5ZF^f{-Q7zrQ9t#aQD>**9lJ$tw0vg2Pm<7f;A^jNGYXe0YGVlP7%Ol^?ztFaUQr zm#FwxnUor!(fL3()o48t747-P&+E;7@`UI?|8Y;Qy;B|F-s$MZ&FHQ#A-RM!*g-hm~ zaRe5t{+K~u>e0!|hQb~_)mUjFT-2R!>$Y#AK@+@^*#qEB6_!SM<4uJ3kR=L^W#QnR z)&5+c3{qHNGVUchV<~x~na)-md-R2vKaipW7trWyJuGPk&t>{g)gu=H<2%tKR<=;p z1xQ_nG;14i)gUmY>4yDzdr;+{Cd_-}PsDD^#^;wrA_A;HU4aYXf8izNTC6}!-7CrY z_>rH!jwkHz6kY_XvzbgB{ywHdh|l()n(d6nAdW+%F?shn2L2SO9~Rm|e1QrdwVuBP z3T1W23;_$>Jfebf8x$!YYi}axlEN|-m|n)Imt^GdrG3lbG@COQSnmtwI+y$AVxuzI z#l6E>D&64>OsywIll#RtWmsb=HmZy{`iT76RwlRMRWqHl^0N#2tZv5;D&yF)N;6I%u7cOXB*-edBZ>bI0^gP)`3(Ht(G3gVl5ZqV_^vT_Hj4rhmFE}c}RpKshK zO}Q@^+1*`BrsdbY1Lw^52{4q&l)L6|upCTqOAg1=>%IZJuvV z-?A+Siv92Q?7Km$s%`2K>=C$M7Af?4DS52|ozQ=BBn$mo)O zu~J)lAPjxxWR&B**Q8FrX^qQn|7T524F~Wtjm_eCrH;a4sq&-is}l_zl&X&F?2{{O zgjAp2b>JU1Hg;+oOvCET4(pdY>K#U^^%}mcG`mE(dL4p&Cv=%|C8_cE67UI9Z03=f z+PH`R=?>iu5Q`?#Ip)5dmWJ?84tT)@_3jQQ>MfR{biF?kza@FeSY7Yh3?%;(cUgJ> z?%_!^hEipie&N5n>xWY=7B#<di$l zddvYbkU#r6{)q$|5LGztu`>oE%0d3iF?b5LXAKxXl3C%INsyXFt z0f-@dSSc#l0KF#JHD|DXMM;d^MigG-tbvev_WV^z0>R5IV>)+p(!{P(CHNl4beuG( z?v>Cl%O^59&y(O0x?b_04K?qcj?%>SSL)h@*c9a!erd02+Ql=ITQf0jyrWsw~9JuJ(nyWEh33bL#hE9N6fSblN>1&G&O%i4M$K(HxrKUE*mE(@t8 zoQPB1VNNs-C?xd+`g#b8;^kh*xY^;l{8_^H!(Ni?RF&hL3G9O7zkjO#C&sly*Lmsf z8Hl*>LUf)**oEP)Tiq^YM;(idn+amt(niHD|NZ9>{@-Y!iCO-T;r2h`@>+14k3UL> ziLo%;t0Gr9ua2|C_*q<9C{BPk;fFVYuMn0RxOGsw7Xhr-nuhp`^^}&mZP+rdZhq*J z8~H2U2f*?}(S0?QZRXFp5{cD1tir^I>0r3YX@^yQ2gNcA?R#za_wa?2-T5|lj`2Jv zUaY|gbJ7Axd<|2&z9!z+84h;JLqJ@e_d)eG0nHJ^S`DShDZInciV5+M_t^ekmu3Eu z|Dvko*rxTp4WU9aT|C#wID^1J%}sG6tz>smSklpu88m&8$)O;s%~aFO4HwCU-!JKo zkGlE(TRas7OE4lf)_dISyrf{^DpdM?Z0(uxwX4G&mT90V&d>gdsqH!z9P-ZGyj)6K zulUfGu5>OS0!)WQo_GYD#l9N%bcWI45rN%`%eZW&3TbIc)R22Av>ccd?pYUG&23Dg zbSg@2dRXw#NzLTMzLGc~GrYrOZp_={_Vh0u0e^)UjbwinTg@>8%lEhkpOQ?nmm9aT zomQ6P^DA%GV@li@*>GwjS-Prs+%eaT1e8E%$KVKcvH`v#jMb)GzXNl$>()^XhhT3ZTJdymWb#|-w1P@zYvWLQ!)8~lsR6y$RNEZQKKbn>1c}Tf=gMqbu`I{zGx>qq%NYeK&C{wx!%uJ} zmE5V_Qt!3+yB9Ydd?=Lj{Pf=!L<$u-N&I}QBF7*lYW%C=qq!}h>X+n~MPfQshAccw zmD{_Cq<>y8M67-aGHFBC4B74;dnwe>QErsUw+zh9SM1v>J8P~wJ=$oEp1?|~DT6*M zd#41+;26k(Jl6GBeJ1T;GQ?Po4pd(EmnoQ*LKN9PcC*JRvc#|of-idc4Myfxz4*S+ z7BzZJ?L52GP7s?vwa#C>z@cwU+F34WMd|(_T}uegejH?}=UPbTl}Jg(v0JRYY>Nbk zcLAQ_L!d}ua=q`R=d-1V){PdpryYztzc-ltNNsufxvB?d&L?GQN#{WN@88W?{vuFx z%skMYP}1lMIDRUveLS-nE-c{n+k70izT_bKd%&oKL9Js!Vz2YZjcQ_2Y2+C0U|rx2 z?{uw3PlOPzDtqTJ`C9zF8D6BBfb@ITmK`sXuVp-RdiD_3dd8~NE9&71<%q(gN#kd& z4=nb|G8(>%m_vd78vV2RFDIl71SBT1^1a+bo*4*fgH@KNe`a$G8NA;3@z_dk0=y({ zv-wsmaDF12Cj`r_o|u16REL`+DA#*`R^DMno+=-lhAkh~Wvti6^xX`N)ZkwKw2%g> zAfoK6$dyR`)mm@x`G9(iH>u6Ji5iq14rDZ4k!ngjIk!3AY)uNM;tCm*=*8s~+c=g> z@JVR!=dMoH9L%!plEYLt+IDXvp|&sa(}oJ3a<vlmjaK!5xZ4i{UU zgcG|*;y^SzT^|Kb$ov{t>tcV#lRc6y({w-T`Ck<$3QDZ&LY~NVh}&8+C75Qr#a?~? zq!R+OYZVEj1K&YwdSS@W4{7`5@};|1ck_2;6xBG=^~%bJ1EE2??U~;0VE+u9O|nqf zVA^HeHkP(Ju*iuhNjr!6fJVk^6;ouy#QpD`Gcu}##%HX{82 zvb`v^iqh^S72(fqm!K_AWk1&s)EdKW=XXNFw8vBPj2%8lrlS3S}7FqsMTUE zKqw?<6LWadX7J6~>Z(kox51m*j%{%o0)F zMSGx2WE{IMN4hhiG3QEcitNFlwc*5(dingB%d0XxIy}nA&~2)~l-tB%DmwxN4fb5!U3{-eQ(*N1+EPTGdaZQR1WM|J4GF zRq5JMS~AMud%+NNM2-t*)mcvEHbPiLT@YM4)HUWh*i# z`Ac9U-y(Xain}v1qs)-HGo?mdQ3bWP@|;3&_q7T2pR}B4J3>BUXGEGsQI9|PB%#%n zo+z<8V!ZvvPLKn@6FcOKsf%gopdN>0in`NULodeoNhZHM)0ALHDd)rYFlF`1RNA^t z>m)n>A!m)s7b{)PTP}qOsE+R$J-ZMq+n#T@q`_Ql(dN~aw5P`RCf2EgJEh8Az+8qL zf=^mM`&e1LUwaj^K!SpM(wRSbL=TGb?E`n0OR+6ikR_iml*cwX*u4eXyk0WqLwf3Q zln4M7!1Z{0^^ONx*SNgCkqo<(vDQh4iv=8x2H<)Cy5;AG;p6KhA^V>nv&ORJX_D=n2qBGV_9O{50=q_)iJGxAg-2VX5Bb{BzmU?si0>Vb zV90n|*^<>zNz!F2k3&%XEr0?+2zUi2UT7-x;G!NUuYTXV-**xE4G&M=8jOca7m3nV);Rc-!wE`{Z$SuIq_F z8Ri4=%?+vVD@^Br)3f5MlY21UVr^C8ckk{U!_0BF6O#IF*g6;YU%1ThySt_7ojQJ1 z_7xHx+XqBFB!r_|^Nn0g;1>Hb<@&3pT!;%6gb4EeC0O8vwV!Oa@Q+-D?moH4Kcqfd z@d2aS{lY&&z&ZtqTi51a^VndH6SAj6uo|Kmfyp7kI7r~{<`$t3HOStZwwI%eT23y1 zu{SkkAe=!H#Yj0N`dffZUt0Keo%M2t#lDw22t8IscWSZ426_T)`jB$RXso9t1+#xy zZ2yvtk+_s#-oZ_n?smBxfSIL-R&e6Nb(Rgw$qi%frL$FbQaNE!w>aP>?pr zz@L5^r2ShvLkAOcyN^g4O7j&)hF?T)3p1yf2x`?zs>nIbFYPo2V+P!bm}{vXDnQkl zD_c7GPB5Gb2{Sr2`OJay5~P35m~tUtgM{dO!6%`eF+-zyvkxyW@wLP2CL(;?9T@UT zxi>cAM$PyI&gdSI8X|$XnQW`5w3)khY4FwYj{6)aA8*seDx}AAIs*)b)bE5V(=U(E^$%vMu_mgMS93j@%9u!*f0g zIl@<{0V@BZrhF8Xvgd82ruym{wWu!LATBp|*~pzO&J14;1h2&ste@$K@a@R|ct zpoS+R7jYk}g%pe&oWLufBsLcg4Y7(oI`|+xK%d9T<*^pyBPKMbkuMNKdr6knqqcty z7nz*d;>^30ipP+-o-IQbq0{JCdw{<;2i)VJ`(+F2=0LGgYf5svBk+xg$+d4JSH*-H!54-535Lnr5B8K`uEh%G^AOEM0zsPWoc4|Eyq*NiP6Z?# zAaoOQVYL5Ynu|hbL<@ZT1^$?U2g*hg7R#N%tMb&Nb0e}!LHLbEZ>N~JK8QsuOvJn#t zjp!>=11iJ^YX)pC>e#ryO72Ff4S#fH-nlV6%Y!b0H@^Q0&aqH6i1<2iNpc?+`%3$& zeBY3-MXWZM5%VaDWEdLw0=)d!ec!%nyB*W<5`u7+j`xrpH3I+8k$n zum_D%^hQeS=SNMB)Gi%UgqEO>^U5}PI5v`cZjqBCOv5I>8O4LjusqBa)Aw8>OQJ^# z9WLu1YVk;uX<9mJehSNV&I`X#CUnWfazkW}{{VRs_x=7hPaD64X+``mc37p!#0+A( zQsKXLNnvwjlbytz1+o7&gH%N!bS7Wq{i3W>F}tU@}|sep>}ZlVWNnE z0`ZWv9jIgaogVZk_C_JbO%H3|$F3?xNz`lpY8cShN~|_OY|L9{hPbBgOAGa6gMO!f zyhE*En{7yAY}EX4Irvr|KPrPGj4 z%&Kds`SgdztEzN*mr+2W^Z`xh@2`O+dMDE(+Bm0jww8@yu6qUgsNr^I$*YfqJ zrReTI$7KlscW_}Z36>}(;@KHSTbo9Bp7Rr_039lb^r)+y-R=sjbJ5&_)A{pWIs=jV zRID{ZL0Z?KsMf&TKi?0Oza=eNk{*c#JQ;lx638tO{HM8ot+KjE_c~_hP$L{asKuKX zCs1$eY9zpLwh-n^9`V<@yc5IOxD^Ua6yGD83`$ok_A|>#uPC2X$D}4fJKaxQH$U}p zkE{gy5nLhxCUC?WN4u-71G&T5RA&y9`iDbycqv(OpDsfF**uJg`#Fim#$qj9Y>gpz2Z5GQ zdROC}lniEkE1l`fUl)p?^i86hw@tEgri`JPD#&U5eOb41?hy(rsSzX#W*&_3-8=}I zZ;4_DJFD027LwUHGb@&=cl9^4s}VFGNM0k=X7oXx-3Gk@A%++pHN9wD+O&de~zE<8&7n0~MBD4Lg48WOcsTt=LOFS zp&{8wqo$ADh1P$_vjQ+VK?GJ+_wd@T<*I;>k5ggki3Kc}ro_Xt8AY3*xEAC9vkYnCBlyY$P zaLPHyg7U7F*EhHIRer$Is9^B`FMXwZXqhK&m_#oP&9dDtr=xIh3zj$56!k~=czr{` zCB+I#1QZmPJyCqN)Ik{Wo* zlt{7vQ1EKHp)NrDFo`NQcYRYlGgiCethN~XiVmb%BX7Ho+7iFATD&1Sq)*B{J_QFa zZ6=T0>Er6JU^UL&Gof_VgbT~g*br}o8zI6ER%2Tp*l?6DCg%A3k>yeDPOWbw^#5w7d3<4W0~uF;~md<~niT4QuFMj@-Bw?4S{qi`|Gl!$~WXsb!6iF777@6F%n zxliwmXbv!1L~~--r?BmjbCZ?Mxr!E-kX`ScA1!mqeOf!G&p?d;M%2>^iB*y$WK?I+>G&Uud9)O5(2%7JvjnFUS8u zEnDy-amEiVWPJQUtaD&I=+#4}zI7^s-IFzj>|ni#X0WDQoD*^X_+j>_ctwCAMXR!Q z<~0^nPF+&@Qi^zaI_$HX8AD;7D1IO~iNa#4e-jcLbnpPJ)dZn!^8biC<5LIeT!gx@I)}Y4C-7NI<*;OE7j$~1o^pl zzeyA!sg}ElqgmpFCnF3oP%aAcRo4$8#1Bl&@7-^?G7L=SB+2M-LqfnPJTT`PvE51Fm6ew~&Q0zrMP5G8z@b&5GGoIA z<4q5SU46F+kpt!AHW@TH2Rs)H4n+KZt3inK9>kt_{YDCKzxU~dqeW3Ue`;VP z!}k*0*X9%{RIxmBe8C#LWYs-wfP%NWuz&mdk}*XWEkHTl+G04_9x;Y$g8@4#wr^aXX&9NGIGxtWRuy)hg1EDUr;MthL$_Rgd!V-o;~N zo%vSSCn=>fdOqNm>2+ni8i2zT)Oe8i6qKvQ7_Lj-c3Ojpgf5jZ>euHLf;i+M-M^1` ztjWnLrj$*|Tcs59xF-FFo5=@2!7lQZWq!0h=oqkGk#+k>O(&dOX=q1+Nz!z2q|SVy zC_B_S^$PLhXL|*{{=syUqFTa&ziT>wVf@-dpsJMi#2bqf64PI{uE-J%14G-%$0n@j zObSKs43WH99P0u&6#&*il8&?*n>*>onfP{rQ|qQv#tXrhXyOe4bE!UvHGV8F=LMdd z_NyOYaUV^~=;*cp&97JZ^7o!2|5)brkRGER&CDrOJpIOS?^}~A?f{qL8C=hj^+UWyO{wG>2hzAsFAy7lQ2!o@>*=kL zq5en2x0!Y~Mtlu)9772F%w!)tSj|mKvF0XRoCf?Yb3ClJeKd@sWxG9Z6NReR0Jqbb z7AU!fGg<^K4n5J`+tZnTrSl$4alDRD1(RP}5$1q|EYJ>9Ecn+yJ5}Ld+4uO%ucdM`Zyx zH$X2A@Az34uye7-oM)UvcvXRB9u}w(si~*A$zwz_eM3R+5KS~(Jj7Rwu|ZCIVC1!9 zWjw5;fWMKztCH-Mpj`Y`-osk8ADN!ck(kEcQ*KqFDlSV_;SyRIS&DpKB3v=Mjl3B1 zd1Xbuu%b_ki%oEF8M9D^el@Tbb{Y%}xWXqUtwzntq}-B|S{n(BIF^eZ263*ZM%TEK zs>ganf3wf1vNI-8<^LD-Fr`B3a!hRpyk!4)#PW^Y+zDw&`Iyj5McWf@V6qqn?H~7i zf1Q)1hdrndeL)k5zWU?fgWRzR^f~Q#IXBDM#Go%v6WR*OaXE&67Hu%xZ~tM0*_u1% zmQA>EHmbX_kN1@-*>Q?|S~7G@AlinURYy`GC%5zLkU<&|QRvnBYW zR|r2Gp;SZF;atci(mpEG!I%TeZG+|4hq?VtJ%VYCm6%D^soY zT_G*?ihulYX)mfox#)A;Ecup5NVBie^b^ zF+%IXX^y0R(Ye7hsR^HfzzR%`?0LY$Kfps!%u8`MVyv`r25%v*4KoXSH3CtKE(+il zX|*RaNLt!C#`-t^RL&s^5^{@4?!Ib$@>@|%!Mv=XtjH1=eoi>BPK+2nI=}p4xGp+( za7XqAs|Gez9|Qn@)rpw+{f8FRw<^u?1+89F`*Oq}sH)Sav$h|ZOx* zMuBiRO`lSP0&CDLm3-p~zG{T6w{)tY-LTw5uQ6rPMe1*GaJ}13i@8OKoVv1f zdFUGi3(CBiiSq;!3rR;);)V;b)I5~;9lEMN60<|a)K;9Y8j`UAh#59U1z`VJIxkH# zX*BZ_gpUw^`<59*oC<}UAU1x~rOuygSuSeXDeg+rxbJ1h8Nyov61L52S`HPB((YNy4Vh}YX_ zT$H-axVuwXM%2haXq+@>1W){&rK#P|O}0w6BoS?d1Iuuw)g{+hk`*tb<+{Q!(?Y!F z5&1?lC)}kgpkNIKkaTc`cdyRF1pt`~VG|jGI-BqbaH2?;gVI`@ZV6gtdo6lR8o<+E z-z8H*R*E?3%o^jScoVdXMtY^ykU;(AMXh!!&gE5!%g8y5tQt=qv}on)2%se~(r?aU zILSScIPWKhU@Vh+3K3iF#SR6~C7d#_Jxs^c(#yqrKNfsy>psxYzwhX!x~|DV>E)JanAPeXx_e~bIt0=#9_X{?c3fGY&rmnAOzD?Q5of$UD1yj>)MyZOo)tIJ3>CV*%7@NYwmhr8 zUxn6uK@FOG32G95+WRk8B1^7avDni1x~nmV=8A%D1X4Cjc6wZs)ow!+F^fKE?;sry z(Z}k>*Xl#ms>Uoc_~Iyd;04!j_&)qZEul5lys{y$F1N;oL5bC=xXhHDCNDqzyF3SL zzINrPi)FM%6b!2AsD&X>%XGc`0(I(^Q=X9$>$x(*0X>S6J=4;d%t@iYyW_{WUew<# zq}?P{Iq#3ImLaXT&%>FpXsPnoK9GFVgfXl42L6Gux@M^JXI=`6Db}sP1c5_Ks3x)0 z-mzqya|A2v))&?Cr?VWH-adfzY`W4%G#7P+{YA zYvJjgpC7NqOFUr;Le9X39Mq+mFne+9Z2mBCpuH0QGm--;lEOY(RjCo;9MTindn4_m!s6dPaUZ z_im#2P+>iqTPQmDWqmO1oafR!MJe=1yRR?6a}xY%Qg>B%uFe zxqb6V9`a9x^m@W81fmcoJ9Q6*BY#EAU%btUB*S#b&iz3 z8jjeD{fAV6@rnrG_G#*7eyicx_NuBLUM0rs7LAK23b0cSk9xxWj`ZWu+Qb^xo4IMq zL}DvA1+6nVdbK%G4zU|l^`HtaKj9$aoYn8S(_W3Q2v2aOEtSKXapP5Ox!s&qInhZ$ zYk}b)qD#aQbNR&?5{OA)EJb6}Y!`#hBF>y^#d~`G^hM4Ph2`=%bB>T4ta3SyhB>Dh{>_aLp}8?d+Xv}PrlSSMkHs26rz>=K#ia@Q zvjtFsFtCvfO4EKaC1c0ej#R_AfK#4u_e(TGEtwQ;8#IOz&b9;NvIOd8c9;_9m=(Zc z`6Ao1nQB~g$io%Mro%*7XYV{j+4%h|@XiZ{Q{&9{o+U#yTnU>MMFa#vx>T$qnlt(k z35>2CqXMKTb_sHGD{KUD#LxkuZugdY- zbOasjImjPXaxg(8cW3|7P}Dn}`+)`4P25>o>c;iynx5~kLahHtWF^=YlLjEktt(~f zJd9DoTRFJCU?SuCTR|iN$X^?XuJFUD*hbO;w7WsYez^F>D_cI@swI zab}q0puK#Vt06740(>IFms_4q?aHNCFJnaZ??rBh_P~@h{9a=O!~F@6;Mtu*;}_et|qJtZ0hfc7f~u{#mH;Bw76z+1=|C zSkB8s-2?+Z6jhiS4GKz@39pk4e581^aSawOIZ=bas0I4#NTGTQh$(4e@mL z*!+sv!oktlR@Q98o$+Tx(&%krXzPVe6^?Y7A-p-Cm$E{AqCFk3xdkXXfi2ms1&R@$ z6gj_48iM0;uMZ22G$nZ#?0E`foaqt(w6<*26#=S;t0RXSxGN|4ebNlFN1+(8SjYbvEtT*DdoHy)6SS(yOQP`a;+Q-WCk71HUiQwKTq+N? zGBmq_ZW=z_(>0$wdfIStZy#f!>k*MaY+5_nqd6MqjaHMPPVU^`K>Sw2K?Lw%AY5K8 zD7B_ludgYlvqgM)K=}#Dk~5Niz@}oin3V;g9+QKK^_Jq|6hHj&0U2d?k2*Q|Ne353x#m;~j= zj>@}`Qy<*~fdY0*H#6mR_n+ouBx&fZ4Z%zOb=&LZ>P4>C;Q}s&seN7zZfr7d4M5qZl+$I+td8Y@$~uxVbn7@ihQ(G zDGl;F-C_ICvyY#1F#OMy=|tXtc6|Lj|DD2Rj?d{1-S)N8;(PVaZ$X-9Ga~O4M)I+GGO{>U+wH+umV<9+Zt)0!m(6##DshTZr4HqX1a1B5Dl1u zg7{`=*`eo?7E=udn?)8}4NVcfGN&{>nca&OCH?%WQ0S|< zk44n}T`&GU5M7l7J4$IX1|t*W{ihfr%~=?r*QHwU3?NjgS*GIyrtdmBmV3vH&K8fq zsu17P_yw#DP12pBoV(6Webce$%#QBgonNS~MV-1f=58|XEhg-ByM`)H4PVUm6g}3( z)U&0Tx|-2{YPA3Gn5IETQVZ_9qTjvE)M&qZ;eTycPG^BNy%BFn=HjeDjq)0$T;-ef z9p+Hc2^T&PcTwy4;)i#69ZCB0V#WjgSg1rxr9G!>{OquD^}}!Uhqw0+YO-gQt3evr zt-aq5Hy+JiSXm8Yv?Pd{_KoGWi1eey|6NKWwsUf~TmY|_Hbpa3W-P-J>4KG4iYhZK z7KaZzL>n8aszThJnGe~~W6L5q0++ITL((xbiUU5-nQ)ZK@w;GQte)$@!hu~loFmb- z-6d*~Qvu8T&ptBoBG?5+m-fR`o<-%bHNk@Hr7|5Y&v;hK6W)FaQ`J})Fe~lhyy9%* z+o9F6t~10NBkP^$?^7W&r-0Hb2M>styACLer$*1?r*F}ppYHBX)@|d=K`L3;_+4>X zd1aq?G^((FZ>6s%8{VrcB>w@DKwR??(f>#zucYKfjgM2gn}ae53q~o#7J;4P^f6hb zz`TuDWx=o9e@OhH`l%NF(B1d`r5ya71#ePn-2ch0lVLe|63!*2n`dzmJ9|kEJvAq+Ldjj705&#ACuU8H|y@r&>3mY&+ z{e4ZCzcMy|OJybD>$Ay$2DG80hAJIRg*E3bh0hUgAS%!z$qxM`lf^V36nXKNquz6d zu_>wEtJJtf0g_H<1~5bQO0`s^y3a7dv_MgJ&im zK#_(Ppa$^lt>WSPHHMbkowvh-R$-?d@V5+U;nOiUEUg#%M<*N9T;ri5VAL*mP_2ca zg1W8pz3NFHNXH_?v(l+WzRB|1IfW>GQorH0wqe#?Oq-EO0s z5OmZn!=&ORBT&IR$$>WKX4uIO!oCsZ?LGsAJO5MDT5RYIVyqJ}cx^Rsn~n%OjDaQ*C53fH%QfqkM#`ek2|Q7V7aAarFsYuawJItU@wNtvLR} zR`_0|NhU-KU98tJp($$9bl0vz(M$^aZJ-pZY@Ki*F6M$0eUvA51NO@Et1Dikz57bC ze6VR6XO^NugR;Gc>D6I2x;WI7qZ(K37t&uEae2Pu7Ks!so3 zQe1;oK>ZQR}6-KBxc^StN%zPJ(h{OXP!6}>yEGAncCT9q}{ zqK#kZ+8GIl^$aJ?vyvs^|7BtKj3;B@VLTEl%kJE2I4-Cm-okDy*YYPh$3PVxFLo%qQ7xc18n z6H_ePkl21VfmJsL?j0ZEeDt+yjjw8=1E9-;tUyz+j^r4SK=6oEx?_JE9(8l@MsvC$8&_j}))l%XQgG zkC*VvhH|0HRXB2!X0Z;w0TPkj)nH7W$T@SNph*9|ASTb8g>?Bog%4{;H1Th5Xztk_ z>ydWNzloVhIFL1&!b{liwweSFt+reyihEPmeoLsDFzF;fg+W#M{n@g^)v=r-O&ft{ z>;eDq_iN=t2Zuf@Po+!b!Qkp!&#%~rQF6Bqecc<{RTVWLqT_~@midaWyfQ6?XGqf4 zMvU)U+|$8+@pIk-B3!j=Wd*sp4iLM(8^sueIFNn%;> zmV+08*%)(PJ)eJ()Oc39o+#~M75Qy~8L314W@Sa6v&-viwkKm!(PhB(Nmw?f<~o6J zwY5}zP4Vf==DbCjcOdM&*l{(*&ky~}irswHt}pNFo$VSIcDCQ3!O1T!taw-QEOSl5 z(ZkTlsV$EY%cpi`X5@M*Sp)S32wK!_nsuh*GtMugiHQTk|3I^EdTfvlpIwZ(TN^z- z97L8V&@g zUc{sgVhTfkBOj*mDAVb@!lqof7}(D8$Ju|8EseSK&z}gFSGjp)uBb}O#1WWQ(kDlG zx)Y}3skXz19B>Gd$3C>#f9I-@VzLTa14la*t%PaKUe1Y2?vb9v+3$w;N9P=)M6sDk zC>3Ixb!p13k?)r1P;JUrhh|FF*EwcvGm%_XZ5&7zdJ8MufrI?HP;tPwj~DR7x%u;z ztm!(DSF)7gH3TD$YS=4h=iOm}?{JXMLe}=}4xy5%S^Fpv^vs_J+MT zOz7|ay$rQ=EDd+jHfb35H$WFD%lW_!j7;MK;5H zYbveb^#r{1`LCANC%1or`USqVu1(l@3+adcvj4bonV zd5Y(j=C)C%JMBB$NPx0!*a@&!*Bo3^?S)JCuUNE}VeiwNbFM_Nf=2@S%ZjFHxoh$< zc2|F$5r^vrPTT=p`bSXIT4Vb)|9KPRk!D;|eACq1V_gE3o^k)+dP>?N!7fJ$*Qfu5 z1nn4x+nG-z&~O0adA2*{L*Wchk$A-5N3H{KApvy|-+#1TBg?`R)RV{|i-AKvX#Q$3 zuZn20I^77=;?W4 zu?AmpS$NL!<`KKlu$}l?ro1%CDvb%iVG76SZk6EJ2#C6f9EjtQnKJd3Card_H zGC+sh>UFmVL-!pLHXrkGA^uVHLGV1acf0%I#o>|7I*Yadvi=(*)jpC{~0DYW_IQ`y_k+N2dVsKUogFQ7z$JWFm{wP8e{D?=rZ*}O!|DAh?Q#lw^mv+=iweZ9O}n(emRag!_)5Z6_(Hu?om70ZXvG*zol`m$ z$-R$A-?1z6&TWgWpOHF53Av1$)bu`aDt^4V$0c+TvK{L#h0k|+p{lY}jLl=+EmA-d zwUkcA>NXvcS1!KU)v8LD11ayUO~n3y08EC<8|^1w@!Uw-Qpg?z+%&%iH1`2#4gYc? zXhiqug&quEXW~fyrkBi=E6*oeAVa$&xI4ic;{oHKbLvyQNZt9p&M}$gc}Qo>h$u-n zg38`kx177im=@aN5>m4|f%-rpV1XqRUgZBo#;GdFhP!nB5Zu2#v?bmb-=Ht4>UKvn zc5d%8Ay}mA?|ZKw6q8VGFFuhR={m2?q+k!Gu^cO1rw3I^&$wT(RtuVZ#p zQhBx|3Pwt7Aa)nrztJ_Evt_O)=hvu6u8F_H&7v2GRM}UgwP#|Zf44z47G*96G`@Jl zSgyEw(~c(N@3}>!E@8i>OdEd*ufNYx*2FDP4?f;}{pS1UmrJLphLm{74+1{9_(e=n zPze4tk=M*Kk^w@s#9!#)bfI0P-$g@0>}Zd`=AmGax&a*JAoE}KZ>W^bQfIo08w_jG z3*jc5D1-Mn--nacyi<2M$tFeisAIwpxl^y8Ej=n_e;WSn4RlZ}n)==RbL0C;4gDk{ z)pM6pZxlWK^6Gof&17fFIw5|S>d4H8fabdfdjJfyWv|2O5AmQ*{^PT(BhOoT?;$#X zwASdPSb|836d`2iWVSg_=HTkb$>Hq)rMfKEL+SvX{z!#K$@91bw{1kx?c-~0 z3CxW9HCE6bqM^(>=M#p!=O^f1U-p++6E4OVNpqG}BX3**6d( zBgI*C4@b&PWw-7*i93XYSY=d}2L57;yzUVo$@^@$?jjNTfq6^2hWo7ucgsrXbx`s9 zv&mXAIE`m;2Y1&V{4vn)_if)pu1<$rz)ypPez?$+R(V39vOU~7SqSw|poYZ5at4KUO%is~ zrm(dDu!uddB8=b8oPWPBB+>gqaDs_XSzPJ$>dm}=8G3q44oE1ZkY9L{XpHBF8pi$; zb)%`8wa=hLeIdU6@mpC<)9#l&sq*2RGSl#JA&|oz69Q*QT%J65bUjo) znZ@O9vQ)EuJ0<+@^JK6?L(*UcqPyMvF>f{TDrh`XD-wKg#Qd#;P7h5`E8?pQl?4Y+ zp^7_pcXpNlbuCWTrz1~TthK~)y58i>8d_{B2lx5T9urehMTjK=xNKXonJsc$Zx23) zTsMArQHUaoIXE&~as>_-=rsDR=WaQ)ZG14;8u8t1h7yV+*MW}M{Kf{5j0z5|r>rA_ zPq}n7byl(aAj_E|@K2<+DAlBk%$X4e%Sx0~ygD9r%QGFjF;#IOm|opxb7cUgr8bVL zsM;G-be+koDChR+%jA!{7))dR`+ul4-A^b^ciE|DN0j5dI5Mx@St(!;iTBwkVhf%M zIpzV-EG$!}Bu0}gboyD)Bq$I5am1kmtEkKtO94z10qa%+#|`=96OisCJZoJAkc%V$ zDuRIz+F_A8kj{f2F<#s^-(TNDC+WVEc!_cigyxZx6LS&AXE};ac0f)Nit@!77^p_7 zod=~WO}<;!FLxUX6>$<}Gw0^!DwN8RY#hLa5sL-{1<6%waV61eONoexls$K|Gh~4O zv$8`DXDO0OsKVgEMR$gL7S3HJq<)hBF8OC(k3H?KcRWLuq**(u9)CD`k1JFZ$d2b; z6uV!Oy4<|> zWH0m?s?S|NSSRSJii`nGt-Eg*8hKgGQU7l9=Pn!W104u)Ym+ZHSY}G|G4jcSOx67B zoVlJw;X3Y%~><)s11d8t4&bing_hxrY&o_#bZk3V=eoSrfdHctYf$ zg#!Q9li$+$49PO?&1ua9?l;D(8T4`9`FSl|GoCcka5LCw9(za1vswD@Q>pmk{Ha-i z#O$xDdKfVWqUWsk+nC2hpXUFK_nY)Wa-NNcDb;M#a;xp4Xa-e0Q*=;_^IZcu3D6mf z6&J)YKNP0#WF-U;fq*}MKNJkluz4&=J%**m>B#%_TtbeYSFEoO?-kmojuu$sC}dRK zX!}s*Hz+Q6UZv$gNl8EfqiY@^p{>8qhx)YVlz$9~$4K#`8$;;~6IR|hZP{6|rz&Yk z5%|{`m+|Nf!D)hp-nc!}hT-TNW5e5Ae6wAFu31mYP&D#ug+X}cdV4OPxuxGGn$-ozVtTd^^%vM%Tb3zTH zf!$((M7w+c*qtCcf~IUutLUET(Xcc1{AZUhx7`s2!GKDq}E+2 zK!5Y+D5SVz#EX>hM1Yey2ZnfYw}Jz#zbaJ$NDa&Ec+P8rGvdHL zNLQ(woxlwisEWJrSdtYs-F)&;u;W`5Eh;W=DDDxj37fSSMmCrKbSP?=9fIeIA79D- zb&!7@@Y7F}QCnPBg4&9HcC*Ol*K@FSul7r$1L_85JJ0JeOW_V8MA9{l~Nk6?8Jik2hp=h;&2sC5&z0vT{hc2`i@q(3cTbgpe)%fpLVNr6-%ie zxV}ZK9UI!^|F}7JmP?OH$9Qkg3|sFwk{1(-a;PyjrFtF5EDuV7Rf@t~OXZx;q%sRM zVQ48nB%%%B(APUMp=48Y`+Xu0ugV1D%nY0|4n{%QO7yKuB&U=YN~>X}`)%ukrCAUe z7_WzU-#m8YJdZJZ-7Kvs?R8luy3kXw70)loYXFD*c{qEryyy=C+cC~}g)-Ey$*87D z@}G0oBzyNcXMpP7z6;_ts`j%Xv1;lvYqb=5An!A+$1`bn0P^`(6NCS&6ot)x2)wZ) zZ4r~=?uCt>!-z;&{C&Kh%6{=-__Y>zInG%se%}%Wa(;K%X0x~w?+CY)vgyCOW(OdI zje5jSXv^`}HIz9|?yf>TTkhBeA`*Jp|G zB4afQbgi&sJ*zHzY4FOSN*G;@5bhHAS0oN6O7cNe7<~^`B28BYePk^wfSb(J%F4-B zMTE@;TdE+>oEcl<+|I~wn}a^;f+b>%J+kn6)Uc9#?Tt|$Hf^^zA-yT3oT{VvSEv2} z!vG!)$Xd&Db2R0AnYrXOggpyw3*yLI4{wt6AP*Jx9cfFO5b$vd%E-!ddF~g6mD%ab3+$gw%DiJ5((;Wa| zb1R2*7M}zkU3A=Gz&V$*FyeCh*LI?}uH4GuO-39~dg@3~!Mrg4FB>J~1yFkzzoOS` zWe`(ej=O5+Jsc{_=diOP`4cZ>WkO}K=>^6Ao6Gz+3Svdi0tpMB;PijKfv-lsUOEv;D0$C1v&Y(*;GgyvInM8IL#`QMNUwuTTrb;?7Gs zIikGNCMMHTea_tiH-f6pSS)jLB-Ws_Bp)nxwX^X3dS2!eX1l|2EG)k@(c~>LEAWa& zyZ#*^G~5g}lRlZAcnxSSA|$f0F*Ph;Yyb&}k@f85%|o;TThHEXMjB%wAKT3G^6 zLvt7Ac=~qcK6Yns&5&G9IFh;1PIEsKq%gJE0j%j*-WA4!GQFGRUD;nmnBdp4U$S8KOd-wB0I&U=M&u ze!h#y22pO3=yVtzIt66Pc2*9@p;^R*@d^EcJsLT2aaKgt_T1Skr+@&MY8G?R=@ljX zn-={3CoORKZkv(8mp(~|*|I|k2y54fftd&w$Kd4lbJ}f_6F)7+1RatavLW||rzCh^?4;C-Jy4*P2yP2P_qR``ygu8xq zJvDXU%{|9~2|QsEf}l~Het`v(v@3i|B86;|B{FBs6-EOj*VcP>3(QX{4#AK+Q%7K9 z3pZJ1NtEUE{-VTrVThv$Z7bHb-oZyGj{A{mx1E0{qADfiu@{oP z#o7zANdL)8ddmG&zjNJOb!{BP-dg4d#?6oi!Z&`-R{kz&S<%Pdek?{Sz{Zj zGjwmgwPWE;?igu;{&yuRL@=d?LJB%AE?gmFdCmOhOEZYdhtg3d0@!Lu#iKclcOxnY zt$&HAD(uayKywsx%4vb;@mrB9A> zJef!0s=GOj0L&KTY#q`Q)qy)p$%5kNIM={4790&`lM;2_z+f0u(cX1Uu{t(Ei+uub z;-{TCI*ud=N!+tW9~v+ttMijfhBQV5E#XIF3cPfTwY zPIKC6@S&~6OD-6jsUp+XM~V>#7u^@i*8YK0qt;p$pK+Ozqv2GD@=wR^Dpq{Xa5?1~ z)+!>|v_=3vn-~e4zxlfH?ms`Y5RK~*8M6qImCKf+2uep@|HK=fb$!+5E{_R>khd;$ zF|D_C-PcJ=g6fsO^!0vUaroH%F@4NcFZGsMF;dm$io#yX4N?PJ`vtMLtgR&tZ9%7zf;TX zv$K>*gz@n)G$PcH5OJBAd1ejAz#V2vB9*uRIx1Y;WDxG~RpQ^?msL z;Znox4vhC7fcdRQerphkQrH^MKR~g@YH?7Eh5+a|7t%3{4d@iB32G-tIY^+0z7JPZ{;XLo*{H6cK>>V{fm=FOdr<%){f=(Om2PqsW(8Y{8>k8 zZMel=<+;$GUc)jAK%MaNlG9SC;8)b|< z?pj@JZc|A=J5>8#{|>luM*OI9=8_{aJG4S8W5R-(YD@n}q*MC`2 zy$7IQHV&9iy15-Hf87dU9WK)j`u)_bxfVJil*uUWh>V8BAEkv2ovr~JSuS7E%vR&_ z%|Cd2f5{U?P#&iV_c`{l&4SS2f*U;uT#p-sote&5nEXLt)HoD(Fr%j4Y~=}u$VH^E zTne&MZ`k+WNgi@Jj=7>*^I51Qfx*_Q2plK=m|-+sO~bKnu`u;xjJ={!^4o$9X2pUh z@kE)Gw7^l zRt~CuY@=hG=Ju&jb8~dOIz1z2k5HCT$a$2N?WheX@X6p8zqP9!!_Xrh{fu3OSD{Tl zS-N0`2Q+jy&)*=YnW6o&!+I4JkLYufP~Pn5WN7A4U?}B6lN=ef3|jR zO$^VyRRRhR^Xcg%UQf8=(NZuab^|QJdhhL-rOY#zP|?Z?j2>e; zDQ5q9`K7MWN*soplw;nJft}Kgj?GUN5TI>VCpeatY*^yBdVnFb=?JR1Zi@_-tT&T& zj(+*3eR`kB*-j+Kgt6hY-1r2J=uMfeb_iMP9RQP{to!T!A-Z7a{9Q}rR&opjX;@O< z2PCYDr-G8RF7zSgKQDRBVmrOL zU#coOlrTsgb#x^*P^?p>oj{Q|;)rJviYQi$0&gAyC?FaQl!QFJ!gjGQCQ)cB{rQln z3_mn%mK|B~yS^2kuF{k0;OZDwyeER_NRRAmM+8Ps^bTfi7THuh{2p{9kLnKQA>qaV z#+X5-f?nSPVr879#z0A~bdssHyJT2y1&g;wdlQxJ;QJG?W;O#I<}jO;j>V@lk`XQj zeMB6>KWVn~hK%8=@FUs_2Ad_Eg}iY3H@mC1oQk^3`)l*`mEa)2ApCi#jn>1xX(PFN zBc!opIj3Mdm7T=8xMEz%B1-piZHzVX#STnI=HzXC!YI{0blzGEuGLHr5BFXwoQIK? zVki!Aas;lWA3Y2xc>KVe{@TWwW}eCs7Spn`uyB4mrGE(UZ`nV;D!<~=9X6uquY8HA zFch{K`>JE`I&`2worJ=J$iip^;U~tdf7FJ}$ynRdl<6DTx#PK|9$ssVGc+x%(q)40 zT;THBtL)jMjs}CgSINIyM3ys@4)>yuPQEsw?X2dM^<)t z-`Cd{-J!@qZ~OQbusDgztnPfRDC!M!zwSc^-${Iq`!J?9aE-A)eBV3R*tj`U#wXQr z5Ah9VF@G^azrRTRr_+O{YJktfXuN~M1w(Nw?fhVtG*+hZs#Lc};IoA@$VT!ov-?<` zo5jnG#lh>jWcnO2iqu-n^o4@UQ$Ef~UG67<-}V^;QkocB%xtB1%~W0W1NiNM6g$wA z%gv`IXC`4H?h4LELq{k$ZtrR#_kKuh8{$pXf#&(F`0idI50RnL87dGA?#R!4@iA2BDKr>-rPS`{7o&iic*-)s866dzyi^C~Ed{yZ({<9toKY)Pa!(=goTw{CA|Cy!LV1LGJ(XD#r%SR~Ivqqpcuz6M+!?BN z`v)hs=ZDS@eHSF9llbm2Yo5Lh&wI}?2evSrKH0A_t>IZHcqM1Xx68Hbe1YghY#r$a zPIc4Xe) zqzC0r`J2`o4KQAJY9Ddz5MH~Z9jRVpnMaZzq*xvW28SZ_BvR5|75q7Cc4~4|#%J;OjFt%~bN0^(@{}kH*Jv+N5`L$mHS)mj|P3v2HiRIVE!r z80WxhTqYBgk$6L!rJmHW1>srZy2QoXeGzT+v4_LzO%Kf30eI~%)+}vskA_O#=l(e| zYL>DS`T9pH+UOJ4#=~&hMIPt8of$L!b@$I}#~+cjw}(&l7D@OFKzD|QOl+@yNU2l! z8-0P!f-h(OTk*kZ&24Viw+cK-6W971(i?CR7Mn0jXV*?7-Ghp0Dr_g1F{-1E&`5-P z`$#3JsyBe9!Ve|OOLJdetK^yN}YUqRgaD1(z`zE(l|hj8#|IqK2v6>-VB=O2z@+FbUNHfxW8nM#F|-T0iQ~ z#Wzln#Fk}q*Fw5Rr+Kh#b+6hz8kbX~G)pLC%h|2m`PtH*SVLBUOSzm`@RY^7@1?BcxpjFTqusUfUi) zAQ)EK8{^d?i8^Pa?b6pzGB}y`8~XHi-)75}9yRR^=Bi_Y>-$5`I8hzJs9 z=A@EcqSA~FgX%kryOQ3IBBz6gkPV6Uqd%?fVUs*uf9kV?W&6IcR@GYDqedEK>{Jc+ zXQGH>#+akvFP;y9PH8d@o~=Ix9u9f?^gPQUaXu9=1?!$gF6@XL+aNIKT5t; zTy6v}v&3J#-dWb$dRV}wNre^8-+^f$@{Es6`m3%MBE5Y?MtBc$RMok^;hun;64Yza zBNmx?W@!Ff6rF^O{(^SFa;DK(l=&c+|5>>j4)tbMlsioSFEAj`N+r3MXzmMzCl_AW z%U-hkwG5`mievS4Mg@Pu%_kd(Teux1O>X4sJ=XLRO{MWT=F({4V52|q zCkWScB$_8N+&W}pb*+s$mYUqW?k8>PUmRbNW^21u=vMsKND1Lt%x1bbq|xzNca^t8 zmb>!SR7!Q`-!%e43vhEXdAynqmOut0b2jesR9we9!3Vt7u=NW;2vSr|$1})cdNcR$ zU9i{!BFxZNyOjhC7)|x8JRq-(Bz|$yj8Yv=M&Bgd1$pNM05_$v%MQrn6hU8gy`OT8 zV$=F-IljRc5%xNRTmEu6U+*vMzx3NNL|qKvNyD`w1(*R|Ag1%;j`6m2+Xl;>cNMSL zj&D#mPQsBKf-84>nMfS9$`zts`0z45&hCu{PwMTg7N=4*yV`@&VTHFR)~0g5-sx1@ zW}(_XR71a~>$zNdR--ciESPPWcW|SM%=$yyyh?@1h5O+f{L~>uWAHa$jVGw-H9iT} znW0Fklkt?VR3RGh2&~=D@COJgEJa)BDL2J+)Os_xQ(2lERsHC4L`eoX7uor_lW6`H z4FA4c`1OWGSEO5QETt|Mwr^DSHw`XNV_iY{Wgo8w#d%XdwgUMj5Rv0i_beS-I!}ei z8ccA-x_C3N*`!_P=cZz1V7pzkXFIK2fi4>-GHqh#kbK7vUkEgeuvsS&L@$c^EX&8& zxXBNx7q^Sd-V~I`XIEKuv6C+_*B2~cDY4cN<<-u`&R2c+fJ2j>Cq-&J&_|6X^n%-G zciPh^m?2f;AO2~Q`|GU*W ziNC?Yx6h0C>_)|s0u-uoT#=J}=WhpBxm;+KiJb6ir`gE;&0Eu0=xlcc(;qjrWUI;82;P?yxF{haVL);X;Z%B--2X5{9ax~lukA31Nl$L2kRR;0 z6;I4zY=@fo3keTf(eEpV^=hGXQTQ)dY$+o?1AdjA&kvG{X`_uiO0F;+N8(MUVQsT? z+EhFb(Ro7n)+;~lG9T*l)}r<2Y)}^M+Gfk$Kx5>^;SE0(I|Ag=2*n6!C7G880B3HOJb%2;xT3Tu#$Ag5fk8ju* z$6w&s-{v6v!shcwTW$;@$%Pi_%%5G1W(y`x7_Xq7V@WnWM*c>R(7)5T7}E;K_I;=N zN`MIYkp&GCD~xF0D1%!@dP$Y&Um{D%1NMkR5ESR~W#ldK&de-#ZK)5g{2+cLIXPng zFuqlYBo~HMx+bu1n~uDN=lLfM8zrwQA)C!20Ua6n(t@!9YjoirSTfZ@!;Q@O;;G!r@QD4i5s7$p&>k0+eU>~k z|9H)9utu()k36Ex!Di+jT_#(eQeVkf351kCIB3Vfn$(+pY;+O#^wAWylkpU&QaBZz zxQ1v|Xl```LT?;DD-t~P+tO?K*B-c>4t12vATy0*k1GE;;0c^h7#jn zs@eQS`;1*8LGEC>6I`Z>1sJhZyOm6`x>I{qqvHioE3?JZW6kTpGVz zO`dNijhhw1it@;A0o?UFS#XJASel)sv05wQ|4|B9w*3!TX4XOfmCSR&0^?$hkn@?I zZNkQCjX8$8oFqGi#iNSg$o6L8K%D4(B+W$1m3Z*VM`F+4=BVjBsU z*O2ON#H0fCJN|qbccpwA5*O7Dsy|DkCS?DJwF%vn{b^s=mcU-iEG~($QA+;Ynw5rhWQif2g_6^pY@{HKGI)mD-3F3I&1{GZ2hdqUT3f=AOobb27-& za+=be6WXU2J4zQPd81=7SNH!7G;f*b3b@C{oy$~7Znn&7&~}GY4-KzalMgG3pM}ye zVKm8TD%es|^QzMnQ{Dt-em_!eWHz*#B)!Xv)W9~j%UJ+Qz{beDHWqtUrHyHLX1_c; z*C|(xlpvdW1c+iWnvq381{D+*^HJr{(1ldGcjbkYzrIkwh0V5@#;rKH?SIudoEe1I z7*&VcpDMD;v3&Dkzcv-49?>5OV61R?eC+FlpDIS|Juky0DS`e^k`h z?t`3PYu$l2>pZQ`NBJiPCriYl(OQtC`4r3MaRJi9Gkct!nVHVTl$^(oqYhvG8`9Vdk1A zBdOhRrGXos8Hp`QV@@l(bznQ#05PqTdS+>c+?v8_2lC~n^N|MHvtBYEp6ncnb?e=WhfUmQb(@k-^PP{RCEcO!~ zt^Gc){{YFO>j;i+!9?og9&^)zQt;9WO_Zo03Glyaksno>*h5;~d=gRGWt*2R?A}=Y zzYZ|83d`RV8yx?6pv@T1Z8wr|p0s(gATG08$5gST)&A@`Q3M;?zri!wcF9f$5&+}A zR03`1A`fczr}gHJ4~l=(nqp9=A?#$^1kSB26Fn!H2!sGPJ$Uq( zj4#t`)ACy!IoQ_?xAFMoBj^JsYAl?{St5-HY48Zo9^{{XPX$kvyZ`=xJL_M9H*Pf! z>vOo|bbrVM!o|!O0ZogXz0x1#qx8llJ~3{!cD1*nPQzz?P-e3>dB<#bDS@ zjk&;tMrccO96gW6%}^^44ar;%;Du&<{`Q#30@iSqk>q#2CC?|GDW?_D;&$2K86Q?} zf;)}|@K|?dCTs4xT?MzoYnqb^B^vl`oC*zvLf<_xMU+LJ8dy9x!JU-;zg5xbnD`d) zoL3J@w$J?(dT2IU+F~E1mvnZ*5wPtuWa~E>$xro{CK+I)x3XWY0f<==?dE+)EUy5m zKbkFYf|nJB6D#8VDtWDLN34spt*tZED3rp$dn2O@Ym8Sdr#i-$EL%sX-&f(@E!ieX zyX@-8727T@R0k!WWX)P;&+5obvHCKcRH{;Jy9n=6ZA`b6&;aeJNwbb9Ga5aA)A7kn zv$d5H+)>Zm`eH~_XWuo6{}DZ1m{WE$$y1_hJJM&Z+;>YhBka?Fe-K2IRHKE-|05X2 zlLnVgR4x`5x+$7&L5rZMigVB=e>%SAg^#~w4_5z+n%gp1w?{U={3Uka5U_CN@joe!7`eum}v z@usio*I6&`Zor6(K2hx4LpD41zsQ3@^qlIvt)K1DL7v0P<=8y96TIE?uc}LpHf{Ck z?4Hb_nB`Fg6sGyPGsW@8dLN-~Y#l>#1(@0&_DJm@h|Gg3LxC*#yVAILpEV}TmUi&` zMakmIa#Ni+M5gZ4VX~-chj%ZY=>CkEap@kHd#HA2{2jRnhIyw4s5UhZ25*dK=Eoe5 z95S~}4FxRM$S_B7BhIB_z>x4~_Z*vJj=lS95^E72^ng?b7hAReMSxp{f>*n28_nSE zTuVm9=@r@{F)Q9%+~cw^=>@rKZApP*@|U3od>n$7luF1|fmYd(;CZ&dTwda5r^zxY z!Ix41-|50~3&yff)%dIy-uX2sA*(BgW+6*Dea%4RUaKNzB8nof8?9Df3@8__EFBWxpVqa^S%2pty}#SdWh%$n z+b_9WJRbVcRzPd-@5MS!rxBm1DlI0#_zmjC8&^QAGdn|*aL*I_UCH30ewvNP6H7%j z1ckb}(7Q^ICO%rxrWj zIuY&kRGCI0Y zp;%&8%Njg3^Hhhc{pXOU74Pl@VrY^!_VlK#UIV!rwVn$Z8+J`+aIz|~jGN}I=UXq} zYpT|uHUo&h6Rzh}e20N-|8h?Rd5FG55!uQrXHk4C%UW-cQ@+SyOf0T{#}ejK(c6n< z+-udmzeq}p9#9I1##X>$!+=o}BJ^rpK^=Qj`=<-*b(ty^p%ktjcEZCAfZlsCf}j`szJY7cOsl^e~2o@}FsQA;IJZVWRCR z{9a*nz&jQ7#Qzy=t(w9*AeO|OB>|vCsofk}%8on=a+&#EeoBQ_pPAU5#)`Eys7}pw zj7qf|+u=-}Ngu!4qGPne%f!`nODt`Hqc4=Uq&W7+5o~S4NH4sZqfabU=TnX40#O2x z>s-_*`@BnIr7yWo_e~P!K-}=yaUfz|D*;BcJ1-}=oB`Fs*66OVt4K66@Aym-Wnkbu zye0`pD4dY}!UB6_1B}F_-As%46guj>P3+=jU9d*V;uzoa4X(it|c3lFtSpqRCraZeaqCg8WaWj-e_nL&~S=(9rZk6`|4+N z=A~W^*+zofLwqR4BUn#)MWC&x-GtPP78|ky4K=%V-?RLpoaR96$G@h{^wyPp{_koY z@&~j(I7zgE3d_M$C;QI-NTsD6w@54{Rbl|V`DvmRR>HZE!*_y%nHwMW{#r!ztdBnS z+(eb`bnrVg@O)kN{1baM84EjFs=kz{lZQCAFZ9672s0PrMCr1R^nc4t!To;zFfNEE zk53#1vwVUmq0m~+G-~|3h5pOHa4W2oouZ7sOU*;3#5N(|0>1_&w`4d@?{62frQx|2 zSrJFAN8224fhJ)~0SE3%oBU9WFTMO0dI4Ye!k}q>kl!@3lu&8OX|at(X=-{R=MiQ~ z>c(e`^_!FVovAj&cAp-AD&kpIgzS!Qzh>nm!Qs2{3jYnZQ?M3UaEdD>@DsFS54`n# z=r2!GeHI6V-|ej9S2qW`QtZBLa6h(qux0oS+^@AcAZ2Rn?+r!bz`*7Rl*@$(3WHaJ z5pEIxe~$bQ0Um$>0=dO^ychm)4wAM78adeQHzdxB>ti@A|10v6%McLqPi_Vmn`t`O z;HH<0h!-JyDpqM5b{d}e?#v{)IVW511LItuDaoLZ$)kP%ys&Z+Jx)j^^dUL4JIp^3 zGV(wXZAcFrIp-vITLCo1hiK9m#XM0x^L;=`A4=9e1f(hx@Xw?{n&PJSfr?S z|L&lqY`ldLs>(T)yWa?nI|*1|jBdW39ocn=?$bliSgU{)y61ARWz7R=cdh`!!!b^K z59sSRe#$7dW(EIZvHxsupR*!wyd*y=x=RpWUBSWidUUm7tZV)hzpo!^eR0&d{YiSA0ETb5k4-Gi2F)rml;!@lC7iz85YeDy)G z!ial%JY=+vb7H*l_mL(H!P|*llcnF_edC2V(WO$No49A4KO_pB^h?$ExPzN5`iOUm z{FG6VY>=VA5p{WP$U1|oi8VZ~{*{^iNBQH;CmX~y4^mK_+k14dd1L-H?BdSnFxU4Q zCLqWz%egOXMD(>pF((BzVwfhT)1U)aDo+BFoB{XZlDS^0aQ7`vs@JNdL`_L3UtYct zHG_NqZ>ksiXBa@!f4b70Y!N_V=|k92$^m#qFW*wT9+HJ$Ajrm+a#cD!L4Mbd?g(Y; z^kR5*l?uorEqO^VvC862Zw7c-VR6DJ4AO$YopGa!)Bjx6{W`dS2KNiL$xxrn@TR2O z@+P)yApid@*YC3)MN6$e4 zS9lU!VZ;CWy{^BQ?UKEvToFlU=ufKYu~9>V_*ZkO-kyYNg*jA0kA1$QP+$#jD+zje1t~8TlT*|JfJU~fw8FEdh z=4mS8ld`^Z!?#m3n_R+mj(3nHWMizPjSTy)H)VD`q7^sjpJdmQ=4KjOpPm;#Uosz> zS3LLxOK~P2!u5)%@6J>#*7FN=duPyz_V+*1xE+3IMH^}6LfH%Gi6TmZOf0V2pZ__@ z^l$LGI>Y4F_!eX=g7?Oq)x;T)XY-zuD8DVWbEeZ~ZG4NvKvrj78q4vX;K9_m$7yJ^ z;RT7_-aGC|nv^L!o0EivmLs5&%0-ccX1v?}e<*vas5rN#TR4gY3&Ej*;O_28u;A|Q z?yenzdvIvn-QC^YoyOh0(bL)ceaHWOW1MsG-*CYg{dBK-YR#H8t5z*GrN-Ks!5!V3 zP0lin4pa-1QLu7Oh&uG@2j$(@#pa`P$_S2MTj1ed|F3pGC)TCyLKG-_wZ!e}H)Yj9 z?}S6+FP z&cFk`UP5h(5lgqIclCS!!SE-H^1T%!VQ&|bn%7ye&x9D^w1mf(Gs&%lc{hAlwFdl+ z(iuaMt*N(h^P6zo90|UUkzWeG36g%VZ-IkD7A%hx#qy3>hGkz*XvKs8(oQ7?gilvn zMI9{x;#eXJsYoe}=;0QX%!&DmXXo81?WW%=sdtz}p<^$!^-t4?ev}6_djzZl{0B2f z@2^OPlW)zUqDNS|#QZB9ekufLzlPkHa9qIWC$428Nr`eg>?SlBx;c4N$Cm^LnJ8r` z+Fu{Q&zyMru8nGH8*VBVU`_K}o4B-Dd-6FUI7n=hC!4`Im5<1Z?u?%@SwVK~g&&X} z{E6DJ$W-F@RwsVlWl7g)MJ2dB5>67gZ|*LAXSG^{Pexlli0g;Lu>G~;F>+9|jcyTY zM0DLH9^kw$iIpD>z?T0Sj8YEs!?$~I5lE=AH`hTrnBlmh1&+KG@V|g$5Ir%K@%jVk z(rY7`$l7@XM;3$>(6K#L@07H>=~v>!^Qcr@jxw!m=A?hX5&3=o5fs@~%ms7dt??{+ z>*B(gd*1$KI4>@W7g@oyv}T*esITJfZUg`0Ww8^xiL6d<)y>AWP6?v}^G0=>GOMk- zyMd%I-g#RoiATk|aF}rAVJxrGU>&s4*eE$6N+dTLRmPe)rd?1CJ`2@DAX5pqjpTCW z_|T*|q$C>$0W)w4*cgzaM2n-UNb;Z!{NWi?Hx^o3xf_S*la`pF z@!_HQb0~~O796l55{U95;;IP1b?~s?E|m!HL#mwRUzNl1ihR_5@O)cr{(Gy`THSSAq5mEl8{8AP4(i!u$Ax zpV^y>F(v6QLz3q6DNCepu1**sgoFlJlcZ-R{1aWf@nz1M z;y~V8bZ=nf`tTWt-Nh}*?@h@{8rk5y)_$foVD0>P>*s&mjgW{0qDd>SZzxnd#;$mj za%sF6s==6Ze*_MA8>Tr5*}%F=nBQT{R05)@KNI$QT5)h7kV|{Hr3w=`1V}Q-6&qKL!^X`jVe+A_fi2NjV^awP z*e*K#UUqmuD;|U*E3zm1!g8?}VK-MqEhtVNrpH956>5$-%;n6VHg+SA8=}9yF=V4= z%9QxNG)0@VJr1lhqJ<|JAw1<=_BU?j1;&I{;KTW=oLFn{m+X{rnk?cknOB@4twoJVYia=n&A0d0$|A0?mrLy-Tt>5QC zCgvg5dRqU_o7vH3YpvN&&p{Oocb5b3Bovy*-CSo_?{ofy1@m;bBw-Z_+{mod2CES6 zD#k~L5pc!Pr(#@2dRe|OYOJW9{aY|iBC`zDSoA!maydv=q#IzLpsMRSje!rQCE4;5x(;>_D3yvYeLuREUdH;GraJKE z%ggP-6YTF_%TXcFLT3dQ9wUM$G{;AJiM${|8O2p0vcW|96AH@in>wtgw>$?4r*h}T zJ{r@t2LMO+&8|0s5@6-~w_3|kq}ox87Ix!U?S7KE=~;$5RB}6} zL*S7+d-pG5D7+HbI)~N+Bh%x5)Z!`7?@hBxxAhT(6;#KHCDUJ7B~8I&1;1d@VBzQM zp9wRNovdL#?+ngR-I3_g8CP6xVp4OG@&zEEc?{Lyj-HhUpgDSfB;j$&I__EnG(Dgx zMK6AQvx-Eo$&_me?mF-!3EBg*sMr>9r}73beUGW7W|QF4J7^TeHYa;#RV$`;Dm26% zOgiFTQj(_P{2OfHh?prPBp@4cpMQ|{&m5-;w0whF7)UuynnH^Ys6;;R9sgsR+lfL5 zv;g4CgIZ`}wP@<1N@uNy+8kdse1n6T0@f)$;H^V6t<^=b08WADNbKL@U3e0X1GPoN zE1CKNzNUS^>v&o048ympL)b~t0qB0h6y9g_z`{2cSf;wA>=t+3kgpZWqPo&=xYQ5@ zjSqI+rxu$fQ}6|c(khn7hZT@thif1iwgrbA@=foSHpwJ7MgjAk*R$Bl%tNuo z{gu0u2wNSH?%_|@<8 zrobO)U?w*zlBYMv3}iSeP}w!B-DDuxGltqQ+(4cH~-)&Tok#4 zim=hYw&eD*HT#b(&rhq#ZG!tUKfYfUsnWe?jdwv?fseVnC%;DzDfO7 zD1UK7w##Iwi`-S(hX1YkX#{d5AdlUz8H zNFn-kHoBGQ*2KKJ+(PXJ@*H~*YgAs=hm^HmQ<06%L~UQulhI(8Dh9rm&8WC!0P6GSr}!Rp9Fn6 zD}5jX7x>_ao>wqZ*kSXp25Db2bBgJp|He9b>S+ex>oZ4PU0q^g;`vc|yFMc}GcUQC z;pBm17!C`}8#FYu-O*$Uud7$hZ9aKSHNz+}h_GQ6s`1u38c*2rU@BKAXaA$*PA_My$x| zh0J2yF|SDB2ebIc)V<@AH@7wQCW4Pb!pR)QN#o?)ZI>g3lwgOaoytGoU)LyvZ2TQ2 zaUBznfp3r1?_U)0JVHBE6{>a|^X;374aJBkaw;vl%9}T_1?&M%S6cs4RnZD7Il@mA z-YS#4#zzylh}?U=^cFK{ps-kLeYzXdn*A73JSS&%FnSBu8ZsI9Qj;v z9Q^ptZ%k{U14}_dy&)Ko!{YLNaJxW%B%#Shm%pm|Q1$P>3DqlzoRjmuc4j#kjiNxO z-CLxG#BTuoS~FeUKV(mIqrT~**bcYwTX!YP=vzRkHS*qpax#w#OK{dW^P*OayC=B-LEO*XjG)<2k62@x+k+m3l;K_lR0eitXG=` zkvma<<~QKz{_i6Vsd=1_GmJ+; zJv}|^2M1#P5%?_Jq(oD~*^?9HIsz{c=)LzlL%p$73Q%M7zr4KQ;DeyXz37bbi}`Eh zf5NVOt?Pw8h}G;zo#i4Yv@B9Oe^{M@lK&kldGkrD%Rx;P7;BCmv6mG)V-10j#dCH<>Sk_D%DF0#OL%TiucTP?Gfl$0<-O}a+y z;^G2bet}jqJM_J0LTA;udU%*X9SrIiup|y-DZ~twp1N9}7Bvi#R#VmAE8CKBCs`SA zO!WQm6}dXp7ddf5VR}P-jxBIkB8Uo0BPmsCM}P5=gnfnhR%`A+>7bD0*Qc(dgfhk9 zWPBPL*bsKWDEKZK7n5K#4y^u%0{j)ZwN!44+Rp)qnDw2BqGWVrU{gL?B%9ry;f!*qYcs;bH(=iQN>asf|tfBKdjlfQ30LV@$@ zDFVp7{0;IV7MV8(x1*X1{*aCvuHb!5TvVmj-iV{?$M#CLP+O<~lN z(vhvE!h7{R5Eb}#MRh*DLu_=vdJH9Qu1>r*>;qH6?o{5gcNNxf0Quy@J%WPStd~Hq zaT@Vg=2(rR<#mBJ_-78-Hf52isi_3{qe!ZsqWJ&7vft0KC=yZA#VRs-divwlW-M6P zz`r}pWvHjePdF6wVp>K36&w3!sd}wyv$NH|AlUNy+t+utI5a;$|6u~#6RUW<8eIXL z65>%wf7?T;v``nNerWTv_1D6cxhG_$XcW{>y;!=1T;$WJrFgbl`6@PY5O>ObSXM}d zQKD3<2EFJK(iC)3lpGp!*hIH^(0+3_6P-3XvknQ2y?ewHCqY+G>db1TP-rFm7Hs? z0?2!Tm)ad^*TV}}bW(curKh5YG%t|qd^6u;*i6LL8WNjL2WlLo&09%eDP{C9-pIJ( z-m2Ig?oYg<*0_qJUU!>C@-|o7Dk)n3$$(KlHMMq@7j{mkLlvmW7iY2^8k1%HqA0AU zl2&GYI3+*N8hi0H;geRRb0B`0oc-3^g-}IlY3p~0iZ~TUxinBga>w*F;gbxf*x_N$ zCH3%LI}O;RGSHeZN4#-?+5rvC=s;GJ?Zvy^{fW1L^*UGaF;+gz4($MhO`id1!4-TdUohFS{F+xZ7&2MNLgzo%$OTENP(sfj;tu0gBpk?ycGf zj@%f8gyox?n~Ii}B_YAVa+8yj)YR1C!XhF?K|ydB1O#PWU0sTM9_dk*W(YTRl_tCi z+6wj7vzc?QDb$Vb!L-q7I>Po2*-~*($Zpc;l4UP0nxQ8g9Nd~umqs%VnOWccZ!f^M zNP=*3;%YuO*XY`JvM&m#ysn=^EM9n9*0NyqpT?@)Z$+{|E*W_{)3pay@2y^lwYB<4%X<_`|b+$xg@#eu5^6n+O}ACprVh)0XTzD zG&HRHO5{x^KNiG8L}Dd;T2=Jla2~D4h`qnk$ny5>+Y*&bRq z0{{Tn%!Y6RPv`H&2LXSXjgUOER2S6{I1Uw)126!FSfJLhHK zw828hBTCCltFq267ZfN^Y7&vtWF=2kSkKVv#QNc3)5f-=m+1l-le5{7sAE1I5rln? z^IwrErky^}ZZp9#A6sP!(_*jcn&**9p^u~MKn-JR8D{CnGsNmQ{8h8t`MjIJJLrv6 z5iX+2PqG*eZ=U%g@b7wS-F1vlngW2(TORWmt$&dEihNjx7^X|B|Xdx1XipMbx(ihT|Q{3@;9ns&I& z3QFO`dAdMg&B-j4XLyh9jVH=T(!a-uy@KxVb^40j2={C6R~^{Y-KU7D2{z z?^R2^X;ov?_!uHdjUm;NPa2XGr`MBu*50?=ud^?v>!(8pPjL>c66EZ;iM|-Sa}ntD z(Fj+pvvU9DN!!r<7OK_k%w)g)^Kh;-d2$0P2vBO&(L(7}-|Q>~l$Qq}7m~azJo*<0G&kU7$Cwu|Q7Q<7`48T7U z4tPC}c8~?5AEhUgD{DO|YBUQ@rW}v-{=BP8|DWEJkD*@52Ep}U%o^ikw{A+QBkk0P zTY^+)JSFPvyWVNQoRyDW+s#%;&%fAU3aghD*UO1~`AAB=fpiFJ2pjNy+P8}=&vPP@ zVLl5}cFMn;_wzhHQo_XIp;#$)(i^J7>uLE}aB#uL>3e!39@W1XoYELN<<*UrWuQN; zJgxX>z?(3p-B&1%u`&gnhoH{$FKN7Jn$DMDF&@FDRxIf7gGXcL1`w_Bd_=(99*81A zr&ay4SZy+$EsEmU6xMD5)m(nG$UxnFaH@B4d=t$p37uC&Z!^~!L| z(j^6U|D?+>+El7(bKa{*6NA;K(htTRtIOAOlLKS$up~f&M~XEf7;wMd0@tF+#9$;; zQa>?eTUz9)%UgrfuIqfdY-V*Iv@FM_dY{cEt5DG5 z33n$bd#}r4xNuNR!&%eM zK8FKrt5ohEVMvSe-wF^4#I~Z{iu{W_sJGJ2evk(WNe4J=Pr?lo!$<|>Y}Oj3L!+M5 zg`V5Mgd}YmENN+JPOipD<3^MOV3E|?Zm65#8`e6{sXZ5D?K`slV1~7Xx~XBUK!u&5 z?djheFK3R{5ZJ!fIs4NW1Apv-Y{EDZ)5$a*tT||8fd(S65DG%GSNIlA=_s ztfwE%G(CWD(dOXbuywxC9ZRnTB9n-pDAVH6H!x5D|J@7sKMg@VfBw5BFXIsM3)^1lgxBkQSktYc2k6=-*=3g6dMI;)Lc`xfLB^e6ts$IW=Ix5MWL!Zn zBu2*eqH!=4`&)9lzJHD8Id^v`Jmg=cYcwHw-dNQ$VaonRe~dAAocU53-23vemW$Vv z67N|F3N1szx#DA*+>Wt-_TUjQdZK(^H{~neafMG!texc`8iXho2K62Yru@97Eqi=@ z>V77UgY zwZzknmnFYD+G~*NX4bogU9LEyc2|XLEx{D#btdV9vh2B3Y2QOTuLoJoR1{+NLw=ri zykVF}g6ndVn^T$$p6(Gjpu%o{y4em$v1z!YwOfPJb%#lh$*$)Ecw>U$ed4B4Slwu7 z1{s~zpF}4Aqq$CNFo68bLx@%vu6d})jOM)XmP@JRjTAEOow>jBC@1;x9og7t|GQng z5nipN(~}hz_Kny>Na)t)$XnWBJ|CP?%?b;~;H26JM$c>K--_zD9GO4HUt&2K)1r=S zJ`-T|-PDpmZ|$Ihxi0wyZ)SGpq7dNJ>U}*>7tgfC+enBZF#+*MOm+)}@#W_8I#tJg za4#~-NlH71HCf=!r>IRX>##bM55NR2rH5Z(<^Fmlm~(YUGkc@R%T)Og8!ACN`%;kG zk>G3Q>hUGDeQ|)S{pV(u?>WBk&x9%l&BHSRVz0}dWFQmP~K49wX2iZow**&PQ5{xxh!+Skba($dNnPz$smk* zN0tKh!g<7DOe_@QKIgRTI?E|t8Y*F8aw%p$kj(V)*ok4{yEqoukMqdlOFrzpCU>53 zD1jxj!)nxXrCL@CChB3OwhZJ#K_uTg{%Lll&=1#vnSAy+eTpi*RiJ3j$f1aKS9z$= z5P@xDwkdz-1@fhQX7p9`-<#bYxS6gY8i2nkusooSQLT+D0Qcj zkr|(FqscQ-YxiyqiHv-;ZP)SsfC%GTq}5!<%*LkVa=J3OZ|irz4#f&;3JMA%L&F?o zeC}8%VSw)7LT>x*psAx9@%-t+`+HXmEUbJevMNbRhJ{2#D9g#oQ8P2k3kwMqoS)lb zU}ENVb$7=>lQ-xWsT|Fh%UtmM`bWmg_etTj`~7fpWFqt;@gFs*2WSdz^V%Lw#zF{< zO|we=Rz7dQh~40RLybYB?76iCO*0ap63bt458@3;gJuBRSK|VEP`%f9u0#z7e~c%0 zvgO%KB6jFkpg^e0X@}4InNkfb=n}@#1tMEpy+i`9=LT;`pvq5KwrHe$qrFkAyqPvD zE|O>@L9TQvx8+((Qj^mWxrTRMlDy1+ewE*6v+VgJ94>oOwJJlscWejGk6c}(k>q{@+oT($Q1|-BJf4Pswa8qF1Z);#L}zDbsBLz!_QsQx zz!MefD9Uc4Wa*agCRAf;WexI9TL)ugf%GqnY5O1XacbDt7rkLxu$;0^LQ2AxUW0;y zV2>e7eqeRy&AVoq7<}sWH^X;{(`JKI1Ro(+X8~)8 z9_%dvWdw`07Fg4HQp5YU%vjxgbqoUj%j-9qmngdz2Lh&;+zHOlDqH3MFRdk8LQ&F` zF^Y_jldxDY-=j&$^5x4iI!al$hY{8iJzJJ)nNYo~LOA>;gJiDK-7|W!Tga~gS?f|8 z-`9KcYcL@3@`eHGW}N>+U?hx6OvG+`zSXeb9T{Bj^kv~riTrVWp#zzmP9ea%V`=+s zH-u+VC7-FJQM5Z6k?T*_Sd5b4frQUd#_tKDWnS4kQ|hN=8}N-kox{dilr30A4@lta z+hpUIe2*KrJEG1P9)Y1*SzFaxD$Wp4=eXT6?-mW%HkA8Kj&H6ou=dFT4{ONfbQFur z+U}-~zOb~Hrbe9(_yVDbh=}lfJgHCNaiPx^9O4bToII(I3$xJvWP(Rig;6kk?sKzI zgO!l(B(wE&b8znlpJGK@VNWR?>RoEv4vb4?g73wC@`AT_CNbeY^%F@;{ML`7meg{J zN3BW$5@DQ8bkx+WhdFB^!Pa2F+Ql`fiaQwHZX0`gUXI5SE5w>XtGgSJIgP+O_qYXZ ztP!$~X@yfIMB~ImLF|m`e_0TE*x>wpL$K}fw3$_4S>g#uYwlWM2jNu5s3y~rX%9x( z8e2ff`>{^J=e?v~V76Qj~A= zK1*jIxfb7wI;XDIb!>1j7z}*HZ`B@$;s?t!OOW?{Cy8%_<=6EWLC40ctvs*iTI}|# zlC;*Sf?fOqGl*v^aZi83pH^SWedVA}I@kJ#5j;IDO&-M5hJ*$Wr(0LO)u#J@mQJQs z4CMdKx_|r#QooF+3^@p=X^HQj-OkvkabxA=NziA|O8j!UZL*01~%Z<2+<%*HZ0Q@feYtu26L zj;NEhDC`Nei+>w12-6jt#3IMzH(WJMySB@Rqn_@IIGt~tdYAg*%n4p<(zv1z>kh;t zy{>WD%zLnOJUgL94K*l-6A8SUquRL`i^Oqppz<|_nf8!Jxuj?-F68NLn5t9dNyr5+ z$|Nzjp-MAo&#zBd;ps)aU4h;`9yyMI*H$F5oa-XyJ~d#A>@ARTW#w(8xHuDYS>A8> z|HyOE(uHt9$2Uor!+R;%>|r{_h`OG#I=d)TJ~_$IJ%#%}Jt7!7P|faq{rghAO=NAY z6O?>L=Ms4YI(-lcSod-zwk`(AI*gzazhTT=;jn#p`vEO2qP z*k@A^oH@0cGLpyYCGSo45aoV|rn=b^_{#i*n!l&AWxHp7y@>mDJXtJXt=d@6+`n2WgH4DaRorEskRHiah}y~zl@c(_hbX_)_5T50Wg@&>WryIT_b+Yi!3#VsduiH z6$zGh^Xy=Ulh=-3Tg(u+$uOcnlj$yrugN^?Cu7f~Hk&%6gLYY2uA-)SoMKx(=TEh4 z{n_A5a_W5^tKp+1GmAOC?f{3&0jCKZbAuS%1yS_hN{rzr7y@f-$?A(&Qiw%iSWa_A z;#&brFD9(oe;|5*6Zmui`-acsc4Ymh5D0tuPqVy<@#yKd?M-2TD?e-RT5o$~ z+UIST^x{X+tKhzX5G4sx5HmvnO?z#2r|WwIm$E!j)GJo}se!2^=F&cvb|CxQC|Eqh zGeQOnTHAoi218r@kx86(`XYfbVw7Fd(z#;O;6XkU4g`%|Y2%zU7?x&zRse(t9KSIB zR4bEi7C+;R97i29eg?xqqU|NsA4jLr=w<^ydd0o5z3rz^)-=-e@F$ncEj!t0@** zj+86YYz#x*Yt5K+HFShC%SI2lSJjVc@gV506&9%Yu^jA;WR-sQ?SEx_59z%*&_war zw;h(_5)1cy34=&$viVk4omE+f~{Pwh`Q& z^Lr~JbGEP>q%r08r4n;;zR}SE=MQ(cB2q<%(gWQbY!%^aY(6XW4B|XB@Nu-dD|OQj z{t{f|aQ^yuNV~ZnQMj$MntX;VJ_PlO4KeS-H59pRviL}Xu1Jd(*9@0D{Xn~sGnjp~RD$_>VR5QfxL0 z?p1Lz$s64UK3|#8h;2W9J?b}CA`)&j5D7~i0m|>%?Tq?8fI3ZA$r3YZ>KeS^l)_Z1 zmzLkuaOG~@*!%_vdr#qmTG@CCyD}MKrz!$}L>+eOBUn>XLXGBuc_QZN^RG@awPH=) z4#{M|cAEpAJHe+Ux9dlK3LZR_ju$W2e0M@HihLGNe-Cwfb?G8~gr@>^OzR@TUzsw9 zW8>gp5bCaw-DH~y)9ppx^Y?FXM^SJ9{lm~MdgoU(iB9oe+e-G&j#gOJBBfv34s2$s zBx^E>Li!UTE1WJ!`^q_t?{_aeAD*06af~A#l9>gxv*Rod!7!WUX9gY_cuL}pQL~g` zJg-lEe{@<3Hzq&ln`n`}OnEki^9!!3AslGRCRbR`h?$Q)rJnkymrd(vNZw@z7Vti! zxMMwXwg;Z}=UMEjt_5O7V zXWGHNi!fzxz3-!BViih#ArH1X?|}vJCLs#USz%`hTJ2z(64Oy&ts*XG6gx$pd@u8$ z7XQy?=aZrR&wlp>v*kLkrGz9`Pdh3^$AGEW%=vcx=S3;cE~-2iQ^9G}u@p!yy_)vw zS_v8Sp=xpaSa*~86teYKNbVdCop5jhwIShp=`2SR`uui5I}hZt#0!Z~~q-6Wf4O*$0mIdNlRO@jYEG3|pZ3ytqIftUQ%Y z#P|F(9ut-!5jN~vMk<}Qx8BPk^mHSO4kNzy#&7V&RXKN_(*-n}A!^nwVo08NC!T+G`{ZCq*HUJ9?3-r|`PG%Z%=B|M^?gD<%SVGKr zlcm930_vewWt%gmJkV3tps20I)RP8$zQJj0l0G+a`I9RKHIF_*fxAV&EX zg{_zVFB35H=5x7S*AXebPZQRWqt86ofz$8fx7^IrR{7WF;5NSYmj@aim(y*id;Pr? zBwO%dyk+`G6d38S_k7N;fszafp2J(5v^w&_oQ>Sts3b$wnFt?7`7xovDVyOb_+Yiz zmGMR9G5wQU$Av(9=Ibwudq0bTP4y6WEtM)SN2N8&gcx(lyaEauQ?&{KQA-5bJQ1|E zH9L^AQ56N>7wQ+C-H0`z!QlLY1f}<;1FLT2Bnc`8*Ifhv?9H^ow)yr@yfra7@m5tt zn7Dm#K!Ub}ck@ibwRQT-4&)|b>7nP<8}h32Y9l}I*kv)!_naEC8|MT{(Drz4FMoZV z_R@a28$Qz3eyzhK$$mBW$TqC^0JjwT+5AoIAU1}l>Jskb*b;Oqc$S(LS0`Zv*ySnR zd8B*WQ@S~9eGB)C`VAw;--icSyfLlp`K>UMaRn;ewHeH}$0OpxS&Wt=w~O`f&bfEe ziBLwLCXNd1Ovg%Qr}tGmF*Im2R-hAOOeI9eJW}@{Urqk+tT|Z34Cgk zl`1Bb);Ld<3p4+q=RN%NdhhB+`34f@p{Z}K5hftCM)#$5Pc&>Vb5Cd~)}?oD3iq+g zX}|sYOK`r-oK~uMT*(|1)eJN|C8JTUJk@yny$i%okChXX)b&*g2n0D+hzMlc+9$tS zIPH>pn-+Z1H>w8AXsipa#No`?77g@Vjne=+qfp7HCX5i?9=^6JZ*no`y!jXa5M#v` z=Is|nCm!$c9iJ1nceQ6JLU$l*j-w*ao8wHd9J*`v{4y5x!t8+8XkcR%Q0S(7seCj@ zn2;6Cl^6dQ=xuUWDs&UxGIAItMHxdwv)h-ax-%eH=$^fB`g-jf2(FuCEs=#wD8Wnw zCF&h1r`@tde2H!N9q=t;>_EKMbFs2&NV<($D{%`Wwg5@LvDJ)rbZEP4t}DgTjrwZm zEMb}nNp$GB%l3>Rz*{?>*ZFT+|0jC}JEBJ*wAJ%_iWMCrQ*BS%z+Tvc7gtR4Oh!g3 zS)Q@lph79fa5>>dS;pV}iNn#&7D8=qVl~e}jaaE~CHa59`;rKhf)VdEO968yWS z-kzylIU*flzY&5)dzYX9XZF9Wq1W_Y&0b+K2fcD|T zxm)-%guahB_59qg&eS=dP`+p8Mut|`6p8tEe z!GzP9zg8m^hM?>z-XGI9X@MGo;?={q-3C;?+!>DVvR$mFn4XmrhGr;~Wss}$hhxY8 zSti~MO;4fx;pqv#$P@J2+P~mjm?7`9q1&PwhCh2$$M1>h?FS)>*vB4$5A`7!c5d7~ zq5myW_v$LeE$xtSxjk`nzaimhqFw4a=ARu*+7@a)BlHEU-D)#*#yar(iHO?TXkcaS z-WpDwG>ts%@o%8=1MZii1w6uxCHAMiVqY;I*a5?>BJFncU&H#fSty-y7_9PBfgsum zf%Tp=ikasvW^Ys`KW&XplX~8LK7jz_P?-x(X26-$30%@*Dj`b$+fT2vI@3qr;CIsy zzA_)e&(}%i*`?(Z#&x~o2zK*}Qt_Nn^6U9%YOeg^G0?ai^%=mrVWejcF(5(}5)%6S zaAruI%YD+o)ay~Qy`62VZ}5(bZ!~qa6}D=#^2e5J;CP76Zb#G~KLNZ+?PCG0Kkl{o zOmy5q{@HX%^DZ!01{kQK{nJ1PHDgJe-(K=q-9WF>0#j>(HP{n&JHD?Wi51J12k@co zfPtkw99}Da^;dhivp=f(g5^~?yX|57TnQFp0v~UT0z^v=FL4CNWA!%Ic>MZA&hJOC zHD&O3%@f};ezu3a5j}x%Sn%g}ApdVK08iq6;@20gxuE_S9!BHwD~B(`y=J4P^=nab z%`NcoZG(N*l_OSszuMD!vFmI&kvs@Ho|{uw5D_I>Z=nI6NrySjh_j#xt$Kr2Px7kplxaN znhk&c7Jnz*xiZfWEyT#a$cxM7IG1cEaD?&u8%oI!=FN+8w1QT|etsoOP!$JTjL+xu zchNMu>d4TNCESZCgyz~J%X6bJ*d%CHQCsbJjfmw-#@L2j-jQ^z;RN=uG23_b@&k0* zx2I6Yds6k*MnY!@7xC}5z1cH&=L7MjHP7!OW{HJxU*s*N}aut1EEBYOsv zs`*rXO*4&>)ZwYK<9taRFy&+cF%t4>9V#YHN2!6u#l2?bMzMK~mU}`@nqLmxOgfl6 zk+D4)7MOZRa=t>m>*`|Ii=xB?i|EsKj|Z@|lUynIPji64AhQ5EZcGU$_kymmrFevW z>wy*+GnqK9aNpcF*otN5hq-T`XyVA)0zFDbvp^S8PnA5<>GC(ly?e)_&Jcaa%U?ed&En;dMOXJDt zr0$QpC;tH>WJ=%*sLc=1jm2CQkEh(X%c=4VI^)5uG)u)A6qsX~RgWPv(FV|kbqqxF z>Re~WEptU^1;J*ou$m^Kl&8Y#gy{$0ERB7Cd|(~O|0qextNYe~c5l*Uleq1uJ-CX+ zJ4;M{F|Xda)U?8Wz&|yYN!>w|`uaJZ;RLyQ_y|$}KuEc4(6vIuWictbI}ti4?&rc! z($9RePfxEul(y+g`LLAcswyNb)<(t8gP?_bfX{~a@9Bywx-UVR6>%we` z$sDEHJ0fG50nO~#IyBe}S=6QZHz{2N9R!VF{EfRQ|^K^ zdPwZ2LHk6aarflfyVAL^668g`E*0~soI+x4j51#(hJ}GbC|ZI#*Gde#Ha4E>tpX^5ktjX^ zO>2;>Ct8}fb+I6iLjbn$E-Im3tgSpKA6l?KXjHMmDRb9oCay1jlmuC?2d z!SP&V%OYwWtJfR7q~9J~uPh<-#^1Woo)3jF_<>XBMj!>yE{w8s>q|!=nR@+Vv9xAj zT3bik`Y3)VY7kOtVTOyW+5?^*wsYNF$;sCUz1zC19QIRR&TecC=Bie%IGV7&)e>hN zA}iDw!5vK|p9CwAb8?$d3^05p3N}9W3)P~O`Hh~jb=W)n;SBH%)r2A?C5_&#*_w!& zI_wYW@(f}1ot!(xdQ>}ns1R3yg=y7F|6n>cSAbI8J^5s!dga( zQ0WR3DY2yK%eKq8l347RdU|r^)G#vlq_?yR*x6F2uj*`mTM$sB@k+EEp1lt5P|QOb zz00t?dZ_mDC0jp55p7^Qax~cX##Ui`YG0H*c{OThwC`A?|M2=vtBX26g|iO^6&)#Y z&Tt_XH)UTv0C$z5@-|F|JMZ~lU_1v1cmsHNctX&rq9-%qF690;O#35X8Lu`utw1}4 z|JFdW#2W7LeC5cAy zZLHKG#eI{9<^B!MdmAUct=%ZafoQvS;5fi3?0>a&kT!Cqvij(0jV)wyN%x_bS+DS z%jYRtw9Nk&f+q0N$|s|X_lybdA2uI7FHnfLcP-7yS$KsFEW?z``~S#N_VuYF2ITio z8kUbWc+GP$)e|kP3Kq)OpgJ({6NZ}zUJ10IMeH1ff@5Ugsnqpss;M2|S!cnmvHl{D zc{00g##)Ocv^xVkIWqkDgF1z2DV*#HyHLS_j^PmNq)|PU?3P=--D7j)my;P|r0+S&8r+MVxnw*Z*cXuv;Ygwxhnlpm#`(nV-$-th~R z5GN+;Mvo0I_sncBUZgXXc3w+F>Tuim5Pd&BPQ6;Z$r81fkKu)|^Oo8G;{V1|IdQu0v7XWEH$q~U{XeOxF4#r5a(RjOXAKqPNg|>pzCI^N z*k^;LA3@`Dc*GuZZy6ktCXA|2le*?+X8*Yw)O_M3C zT!!N0dwR(Q2Gv)(AMe$;&fUkP#=G(AC-5LEv4r8}!av9#& zJ!}0a^z8$zAZiqYv`|16agatz#_udnJD&_445Rzm^o)!QOT)pn>@ehg0<)Nv$43&g z4V|?{yXB6XH(K26sU;+gSYLT$rR4iUp?@9Vq=r;ErvT zB(19mAc+??l#&{^SfQulyGa-6_9coI5RSW$_=1I>RT1f5AN9dD+pNx~lG)w(chh`h zgmf7Qh+X5wdmq2(h2x^So3bp^FTI_Zvk`N6Jtis1Q%q9i+MZM8@2bt((Ft#_*J$ zil6Y5+h<%Zwp$J5_yk{ljn3TJHFzdlmE6p}^FP4Lc%%2B5r*d`oyFmW@{~O=sn=c1 z)exH99X78h%Sa7HaY5$o=Z<+qBMPwUcOIU7jy1Gvfi} zm+MZ3plyTff38&qvJl)UPJaQHFqJ$;*xVTzysb!NO!_~Jy;W44UDK|c;4Z=4-QDRR z!5xAVg1fs1cXxMpw*bN2rD;64yX)@v`@eUswbwq`WAqu#7*9WQ)~uRUcU?u~*Jc&g zc_!>>lRfBH@*%P6C5fhgt*Rv4YnHeno+FrAA02S?JfSO*oikO4m>B=m`VbNF_zo$h z{eV5~+ZuWsU?|saQDmyV-9#@>DW3_`_!j!jTENcAj0;PJTQEE?j#N(a!BK5bipS>x z7B{si!NO?&`kb8b*CPo>K{(YQ!E3yiNyH@D^ zFO#dSV!6|e<&(`yQ@NocRKQC3kJ^L!Vd8#k?whBBT#aS{2$AYxElMSvFu^8Qbg#8l z4IH7Ym)~vXL@owZzeftGU)31NM;kmVya(b1{KmZRT(goh8SH8X2lZ4Btd1Ck;jI^N zd*SKQ2sX5*s!S`}TTOWLC(rj=O&c|ZUpxb-zS2>fT$pm(@`;D^LyQ2LayeY^=fCcK zZt$4qb~X8!>W3FTbnckj-Lyg(uG|*8)Of;8j>@(MWK3)hv=_U5D1U2YLxevQbx`&zGtk3TUpGXw)oH#NrX_e2CnN(4crA(h9OK-8HsY~jXN z%^P$^6t4v6X=lk;@sT^bo0C3+-RaImSMVKvsLAk>W3v$eUYcrhHd?H`zo@&28E3=FHgzV5yyz}`8@;OR-_m<<+r(ABq zdS`f4{Obg5p^$=EHA5XN$m}1F;U+tfTF*Z3L zhyv=rqi?xlF=!M%#U&-FHOIg~HK&1V&tQNMg|ZCnvD$| z0Ju;PZ_=(FX_$C_dpVjb9{={xX}L%rf`r$1yjVH7nfiTVphtBj5MAX%{rHW+34^FT~nw?@i+(Kse#DxcIInMW1sN->c zUc;>U0%GOCD3-{VGPmFj_?ie0F!Af31uOI4-Q7haB-E`IHeo;kHL;?ZS;k3Xy+vC!4rk}|`qWpv?`M)8H4!JpHdAebowypo z%(_@N|3$_rL5@yO4{z$9E$H}vwWpf3Y+mzuSO?!J0|z5^d6a?Ae(T=byxG*$U=bY*COzUiWG;--bv6X}Riivsc zs?A=05RID1b~P|vTi34Jq#v}c=d`Zwd2A%q4#bq&+%4Fck3H>nS9jQGbjBDt9=N&u zoD4zMC?X-_{ojZ)|F24c?pK32+1&ugT{zRmcHMVGmf!YN>)=z+&^=%o*SuVzAQDx9D zu`$=ujE`?Bx>ARGQd~rNOq>SjsFRr>Dha7_^k8zQ)U2wa-}P~wG?81a>qw_+rDLc` zUtMUrnsT8FY@b*(y+cc`fE4`)(DpBH&Oa}t&h<>&bSf)+)Rk;^ zvu5BAz5{S1Gyzs@gEbb&;76`j9|u3O>^KA@WO!s`rRW9Nclis{OY7m%qnrM&Kd?!6 z|+c#j05wEOV@_ z(|`~;>q}fNQdt#CHxf7qD@pKH+)ftAtU;GLKkaxJrN$3FUz5)E{F^5C&BPTGmS7L& zFa0k%V(tZo7ZSpP2trJ3_)t69L6Y!*KEf$Z^MjcwP(`Aw!hJ8FrJPFr-?eT=_EAuG z2d)~s@7)N~u;)g&LR&OMB@?aqm>9{QNDA_$>yG!IfH9g$E8k#IYh29(Lk z#~*9Twz^c=9T<6Ql8EJYE~Oh&TC~^3^Vv#jOGB&GH*2dE$YG_U5dMGgo{(16FSe>i zPnNa*bGrVsGnAqa*E@OO;o)t^!AJc!=ftn}!4|+8qFnE5cmo52y)ilJM6@iZfbkd} zh48OxP3H9N3M} zk%Ku^*o^87raYsD%`3-A3p}ef%P)1>Hup^Gm1i#ro`X3w+CBs#ZZ}(kZaMpkHs@oh ze89@bXHTn>?IjKQJX3gur_QJAzrLK|NPhrT=sv{SBi(%w_;kBO@97|n(3xrn#=qeh zkIijWj7Iai=NF*3Fj5MC^ys6P8HoE`{S5>6q6Q;1_T}4+F5N`jkXbO>P(<;1N7_lt zqgMnQz?F3G5_oxsOj==~Pd&BSzeiLcdY_Vh8+|sy;8k_}eS${I+fTJmk~x2*ng|0{ zm`Fls(^KxXaM_0G6QhN7`Uld-5isO&MC!TAi?F@P8c~sZv2gKd29&6Hi>uz2ace9V zTo{a1`q^#c1awh}61=8|=zcr2Gywg)y+b0McrtYJRP(#SRqlW8E8v6m`#-xVE-udT ziM&Iaw2VIlm1Jrs7VzVIW5Jvg3JOXs9&7-ULsz;52$lgdP&0$8j@E^yCn}Lt$K2H^ z*Q16okvqTypeb~tR-XnQ z*fo((pUw0#(>Mm))Ryj%wRtB@)GF9#Gtc9WCc;dSOk6kt`eua7ego0!*P7eZNRT%B z6Z!$kE7|7$^RDe%1{EKqhu0ZA`xJ@kU?Y5$hdEU3TXlEsWzS$nd+c5I|SQBh{cG?3&ReHcw~NcyKU!g?^G~xs6sm2 zBvOBhy0H%Rf+kZI>v?-YvAwtS3-?-`_o8-e`99fS95k`g&~C$35F@+dYcGR5&*ey2 zx^<_ZtjxcQfL6A}JSHv91NJTB;Fj1)umpq^Kf2h%Qdtq~x-HAxPdX!3+-Xoej-o?- zvrgJE^eDex6pGZQ&b%xq#MZ*->m$SHGl4(d!Yvm*-p!9G-WP?vKuIgb`wO4ud;XRS z+0_@z%d5qtmDamrk%N6!61M~WVf24iB?n3d{A2J$JFzmRrl9- z?jUn2?~_}jCc}GjCEmHrs)YrN7GI(cuA>IJHhCdlSMKdNxCI7zL z$NrM@V>C7qztF{q&hKVCDubvm>LRDa=~pN+Te7vff>#)_SdZD@TKL)mnZFWA>T?O_ zT$M{1$^Lm0AB(jv-7X6_fYnF0vyjXI&QjDEY6Yv{{tHkRq^^L-~9XnsURR4ivX?GuYkcU84AgnWaKuo;Y= zPqAN!KCZB0{iO`4gHCW5%2q+uPrU{vi@xw#H*`9@(g>%0(3H|g3IET;@_%;02Mmyw z27MhIyrBMr8{l_7{1qE}mRJK;*MC2a|Ejdwt`^U-vFJd9;(xMaWBIhPN-vT?dF$CF zbTo71a_H>YL?vzaQZau7=AX1&d{3}3qdnl(&L*x_k5#A@s|6A>Opf*6xM%Qtw zp1Bcf%}LU9N;Ir8GB2I*WO)->LV{=)KSZ_Tq50b(&|6@KnO~wpc1>j&Z9`i)b{LlW z?}ft0sP?4ljt0kNV=Er!cM)gRW7LT^?!?={)_G3qv|lss{IMC6&xj^Gzwah2R2q7A zrk<}jFNBjqH+9Cfoc64BZFjT8s?uKI0<}4drk$+#jOfKbUjv0E^OQF+*d5q9RoSJ5 z>LNTInud5PP{HMxzYongGJZ{TAf09VHlnW1=RofS=t9`**Vz;ImuU>$5#GTiyWE{; z{=EZLX`QJ@k!kzu62;~Coc?zCMvF#UL5hv#@Ph{wJk?``cL6haXt9f3Z9kowX5QRF zfLxxcidP8Wj?WKa5A4tXCq8OpY?I&%Kcx20d;TXzI$kIbq?BF&Up^|<(cvsvQ}>_r zOa{aKXrkO{xILQouDisanC`07>$EYABLfz57)_xowbbRYowT~KRkXl zRW(|Bckx*bZ45;bwG1DwP-8spnh23>@MM#?mN@zj3vTcUM+CFn|FJg-*%LPzrp;9( zlaC>1Ua9vdsdo@5DlJL|_(_faoJ5>|ES?+}oS(j6kJ?g+8_&Qgsk^yHOw6LC-q%!#+naG_>zD0$ZcUp@rqT5() z6C5!nc@1t)gqbaH@cXfWKUMZ5z}Ip|kkgaTjK2plZt55Dx+S>(S_OtTr@Ej z4_$jB+nM@!*~>hD>x{cod(d7A>u~=-HJ?2+9CuNtsC#l@}L{(hX~6{ zu)g?p@^!q+KkN9JssLe{I?w2Yb-WPBg#K5=o|=;IUz-NR5DA1Rloo;;PQIT-fSoS4 zu0*dm1U#}YjLng&J4~Prfl#1GWUWYKZ(je;nbpLawTEsPCVziV2#8xKm}5AknJ>6` zRJ1du)8K4pK;AXFUO;6kJ_T>`R!0#Pyc$kb))+{QNxr5t9mOTey!kWbx?py>k79%I z(GQ*5Sia;gg8Kv_v~XuAz*zC zjF{$^>`-~5*#@()=kxE~yvEf<(Xx>)7f8=7?aA#2cBZ{xh{wm?P1Nz1P&;Ypagmmb zrTKPAhUE5{x!t8z^GDzpbyM13!RvliGF7y;cL{PNOQ?hF?Sk5NM05&|EQM{wORuZfqL4^Z!=y40F^>(j+rV0tt zL8b)LcUD6-`noaIg#QA716E`&HoC&Y!bk-Lb^r0ai*eiAhQiN>g%z%1q-YEHlyQHC zfho7$EgadmR0phlRMkSsXN25O|7rpDo*pe(b+8!Kp&PLn?~`(C<1&3Lrt{jpl}%E4 z5Gq=B_77_+Q9sJeA?8%2Ty%ks^k(xAVKFML#R_O*n1E;?nASi(9+FF45Qm<$;y-dF4bkP!|LfFh@ zPq=eL^O1B9ob_`%d<=v~;Vq>vs|=2|C{}Kp-iX=kphsDG6QmN_1mFxcxop$oD}Xlc~1hpZ{J}VS7>LA z#&oGskvfrd_z?cRM>c2nhoWjUBmU35r-t^6Wk6U|vM2^7xc55H5FlNC3V7hAf|qUx z<&{#xpCy^G2T->Wr*=BhI`tMii0CN zDG5`Dzbj%mV{tEsB&Kml+PVRKrissG+Qwi!ojrNVahtaE6+}8@QMPs5E`(XcG>UKc$lj(z>yGc{pB(moM$C|VmkQRWxz9NdBz35#z^c$I+~UxlGov(cA305eb}x8xY0(4I>b-P zQa!Nt__C(=RukAyE_^?<^-h(SY-w~K@^OwVXTxv%FGPNeUOa&kJjDCU$v4V)@h{Rs zi`LAOk8AR(CO2J&M+wU+Rt1Xz3N-mZ=|h)1pua*X7)O-{w78tLnmOS*T%+vT_%XaO zv;@wW&KyxzsW+QNpTjC~bCW?VVp>kj>(N=_qQ<%hcb5F_{Z&gn@86To*?fJ{q zmB57NdbHWgl1Of2ZtbS#2*@fYuk*?h*N8$w@pw9`l$xXVfM#u#H+D1EE98P=xUQkc z=F|18qWg6rYtFJzifMPwOAr2Rb5ACl4Pd+40%7TO>ipoV`Q}_;|hlij)^2}ue)CT5;apJSa^FdNyUdMlDeBPoEekvzeAL21Q zEE4dzi(!+q6pS?sZK4*H-uc=Pg3WZ2CrN(1B#C!5r&-d2HeBvMJvUAaS9-yjB5l)o zwYy%6adi)QD|1P4F+kQpzB)64D;IV}`0`+8XoD}(uN-T?vx?E?=O`xZd2Om4#9&Ma z>VF!kX^L^hS~FI7zXQaCx|R5-@CpZ)f_@*;nc7+xsH3DzoRTVYyrK$W!YSY@EHxTR<;22iI<1;~?flZwbEx?A0rQsjc;`JtxlCL@@bY^~k>vNE1v&d0)@i#y5Fjc4PqpnQ11@CR**BKzdZP`@5_<|Xp_v#yPtzxdvY2MV)Y2HhUfg5eH3^vxcC2MK6rc@Hk-m@P^?&(nYp4!adF0#O& zR76g*J4?FS8sm8NP0RPtH@sBhe}$w$RqsA7|15XJ^2b4Zg{#Ove@TUVfQaD{jc3s<1(2zKI-Wb5xbHfC-h6~Jadh;P}3?ILd+hd z9&%^k_J{$`E0QfP!EyQhSLWWuh0Mg%G({&GjLkFs^Xv2Xzp>zS`~YSZzG9^X8{_&b zM734S0ONo6;AO&MrAA^}T3BxG7^sC_vs&F)%k<;@&G~322a-hL;b)0A&JaX&XfF zF5aWw%n-ZAdCJPxcOUf=?l1x;>lJ!1K)oT2fSgzLMh!y><6(t>kh`M1OTh+RSHZBx95qMg-zo~F z*VHv*Two(@{`GMlF|WjYp&H0vk)WLHaWJ)8XP z(4lCObDQMH!hJ94%;P^9$OO2oKRIiyi-(m(n;B7#uMn#T+7_3lm}DC2Vwk)Mxzkq!Ov_ysOYJTltKu;L6=8xpd z38L0%=P^|E@R#ZW6$_G7xB`N)z_k*koj5qW?y$!G$`#aqe@32Z-pGbvUWH*qBVXI4 z-TQ{KzhGj>ubg#U;TDvd(wZe0N;{O8wkUIZ%ueI$kEs$;{-6=h#koUy2KvQIhR6y6x_6G6`)~$7EVu8>^OsCFqW(Jx&0a(4+6# z1-pziV`Kgkg=E&x8(Q39#EwZ})Hv26%Edafe3A0Y(+-aGt#&>dlz8Jd!KKvf$x#QJ zS-@RJZVw}1OSsj6Q#v#pJpAet`u8fTCEL?g z^YD|OV#4-0>$!(VOXFerw=i*hA4{L#6=F(&r}nRv>FhKb7d#CN0Rqay*@u+UV^o-7 z^w>Fs!N>=WgT-ra7gTsQD{Xwa6|bQ0F=mp5oADsE)E+m^|sfCC&edQh$q z^MW7_krzRmBmG`jkB46Vs-ETW z0&5;1FD})cA%+i0N>xg^lf!cyF^ku7{~9y6HR*N#u2L-L92;d*BvLdvkDDJt z2P6b)7BO=tKk95}7PhIWOA58)6c7qJHRP-R@MSz7&pJ~Ve3;^{T(8!L{!ek?e-~?5 z%2TG%F&We&lbsO#_8Y-n^5V!B)AHGk%eNrm7?Me{9<*B*e2&XS?g4X?YqvAA{_R4_ zTxJOjVuirov%?jJpacv8#)E_>uh}0cXVZh$aHK6}3^nc5%p!}oVFkx;wBPsFWIrx@ zG(K~R3(aOR2Rg^;kCexPXKuE7Bgo-L+4P&g+B3Q&S{tCt$Y6V!me||o!mA1&eknL? zAdUHRcCdV<41+fuLDyn=Ns{z6*TJ*}X}n-6A!HzQrE{|V&rmPk0HdwAAr$R_6Yt|b zo`N+~AEOm3XJo<78m=f8v3J$40+QJ48%~+7!Dy1wAL|FOGxfyEZc!ZD-cux22*Hu@1uk1 z_-q=-Cx7ma7B#;#C&F)P(hJ;Xq6fZ~e??e()VlQ>*5uGA8Kps%s-h5xRb+QK><-*r zTg`~+k|n>2;1%5eq*bUhUmnXe?MNB;c2Q#goJ-CqEF9GGkgvpy2%>vFLGqg(CqHmz z8^lKLc8MN9ip~ewe15e2N6*q`MdiNBqjm)1kLWg=vjuOqcH=_3vR-m~{!;+7R=XYBy z9+dYjMwJt)mVFXO@Vl3X{u&Wo@Y1L0t*~c*9Y->0$JGADWbEw+epe=m=_Ol(-8;H>?)+w>>+SLzaEe>>D0K8D zmNrzWj(6sClJuR>Kc$H@;$@V_hQ3o@(^i;K@I^?Ph{o;SuC<I4(s?477f7(2Sf zBz8nM2zNy$sj#SyR?nE)h{A#A(B+x_pB29{)2q|}Ia8nlDwB@y#YG^R2G(6Lc<#88 z_QyhF9NGEOYHA;7=rjv>rvnri41_OH0e&g+g${2=^8=~dg`tl-*>~14ZT?mnC!`0Q zeqC3nDFmGktAz%S#-$BsUD2=is-MI{L)80kbShfcpzsZI?Yx&7TjI zMzq|cdVmBLH@30`hM0~YEa^;&R67qQJZ{~-{ zc*4N9%kBIW$_HE3In{9Wq}xX#yWOz?b8E^0{lw2nWiq{w#?2Sr-Q}&UU|j#Pw_{Cj zE;U{MZjP1yJG*_RhwV+`>2G0uT=OYrroI&QRwm_fNI-(}*A)lI$$cJjW|_b{YZTZV z;oc?acU_(-68O4P-tnRI;02#<__Nhx)(dH~;rZcZoYkl$B2KItS$R<;A?%?qx*ZBw z*ICpmWCtfk*4h=Npd0z4IjMr5V)O#~d~&nn@)q-2h)ag-+&W?2?RZ`MhclRIyf^o3 zuD`d%Mqrh3D>qTsDiNd;)F{HnBEL=*Bs!(5%68K33DcK*4`3{d6Dqo9ztM-QJL)Ua zRcjdb>^8s9(!8|G6Msk7@o_6q)@n-F(J<>Rr`)t)5DJAWY7o5JoiS?P{cV!d+3t`d zso^l*823G0itfPk+}bneA|w2}8GYqhdI?KEh-e&-+xvy% zV^_T&aJ$Q4tfBgtltP3hwi8R_| z-}lCoPebkYMTn3HBom2Mr!Oy0veKIcG!x5SH|O&^d;U?3+}KR1jb7NeWKCvb*+sh&B}dL&iRoxPm)*In zC)F0jmifsl^|^aM828&qx17rS`iqz`E7PV-kum`?!k1n*%9*=SrB^Nl8Go$EqF%J> z=4>LY<486_Oyg@`1+r!bMw^i5ofF86w6FM^7(qu5a*kE^@07^G2Kbc7<^j!zO=t^3 zaAz&n?2%`UNvUGT!hiyP3uQSF;nS4N8(qCPaGvz28ETzSR#Xk8^rIzd%BtP!d`S^n zZy1+0?HNJ?er|H z?Mn_uy*pfVRwo-R0-4S68C1M`7yIL(FSC^FpFZj&ZavJ&UJw{8i{jiK%}f;uMO%mJ zgJ=}CfZGiXQSwghJ!WF$0Z~vAoFx}xh|7t79-)8Z$6}O6f6jY7ILaI1N*G~I!U~PX z(oCA{LBndjE-0-4k^6u=zsgE{(s}(*Q{BSAICRrFo)(JQQ->R1#1m!Yg`Ldf2o+{~ zv(fgS(*Cjoo!D?cwY&t)xn=eby!ZK9kj&+4(<$Qi4P3|FEl|9$d zXb3(*cQxz!YHOic4-3aWQ?0{;4)#zrue7bQ@WB#55+^s3HR%Jk7Qh+_Xtdm5;T(HH z{{8Y*KjRzT)fGHx%VMP9o1MTa#`Fh&ujSW5Xsd3l!AzGU zTjH~)v3Ajhn^!oya)&Ys18cK^1F6q^*gQb)Iaok-aKS{RPQQXL+$W9tuhmt$%v;i| zbXGd1tE%muBzw>V({}cI-*DafVjGDRU4+6Vr+hjqPiiJSvMhJ1tLoHT^?qmaOmh?2 zcpTQxS(O@3;|JNV&XEN}Iyq7KG(|6SwmLhsx-jREWLv)A*% z7P5a@z=o7=@%O0sMGM56`h9w1NT?s_wZ(v+$eYaV1jsmwH?nF4C^S+-hfM{HA39LD zSMM?ZICDQr4drqo0r)%$PEV{#dAf4?j4jgc-geZwzm)?uT{#oCH_jQgc)JgYB_@C~ z1vO^tYNHzinQjWZz@iuT+q=C98!W}L*nT}0vqY`s(1%0~c4qarQjuV=cI7^Hlc7r0 zWh868DZDY8ct6Ip_wbWmUVTM&g{#!i*Due4{SCklG`kVghgQ|LosF0B)pt~4=?blk zuK)VPyp}u)hc%|#v$F4WiJ#$jT!ipEG>nm7k}MauUQ-LVF9#t~ipbIcm+-g!duP86yhsQsgoIAMWJ*qL z=t0l=F=Zy-xikMSpT8;-9!NUSHzo@-JJM?WUPe*$Vffv^bh+@X^f15f!f0{w{v%Q5 zBsp5k0Zk#Rs{Lp<`!Brrw5r#8-A1jEseiMz>M2=J8F0)Wn8Ry*j#9$@4>flHi=Jg zfezouUJPd1J360^_RQzwktT+@D8XDTy$GTu$){EV@_%3rS5|LlE#G6~s&h1v*h=804 zrnBQOyC;Ec9aMq(PmrZ1X77=7@_}%0aHOP7JdLOx^}vvMcWX@IXaCYr*SsHL0AG<= zPU2gX&dB>~nNTGdmI?dU#9-uDo|3;tC)Pdl;|E%RhtJQP;pBg&rQF}{<(}zO-+;iZ zZ0~m^K!ya&7Kuu3Ouzo$RwvunJF7t^w;z&;QWPP7c=^}gNO?j?3_g@!B;zN%G+)no ztmWI&y5Rn;b#r}ghNaaIY`-ej&Ebx_|S}d8`kEk1lKyM(x>yTU?^|D zR8pUks5@WDeJLt~vyc>hX5i~=jNrAJ_22zw2+dfmxMEzm3L1}lkrP>^0MUIwa2w1D zowj}e0j=?wGjrqVB!#LG>2hz&N>2aE1S_cQd}GTV zpDSA3&c&1hp=fSmkga*{w^o*(=LBr9S***RLT^aTZT-*0s&JXwR> z(PW>nmAo@lEyyUTf`5cei~XOPElEXb12K22CqZJ8G-E_9b&>y{qAk1rC5}?*(f1gE zli~T_FG|F|c1&iI$&C)}@lGi9hUXMWma}of)75)X`*L4ce+v8tw6Ty zy37@rSyC@k+2-g6p+P}@>*~(uYboxbvA$31_gFaB*CXTOJADbWLF1#=?`IHi!ke;| zIpL>}i0$tq&cv@X1KGJYd%C{N!>Jo0OF9oX$NDJbk=-9X0771u+xf~#ay{gE0J--i<8Bk_D51Y^FFQtBX1 z)j4cgFK_L z1OD)J_jXG)!iu)tQH$XJ>Xo}ThMz(6mRH?wM7bVLrIDVl`^lv`1F{4suE6G zQi*~{tCD?WDxuyKD?ZygxWs_6neOL@Re4cv{m&`3c$%OU z&x!|%YOoBY0NEYy2W&t6Xol;I*SM=C=~w_FumR;j;m>2`PC)o(<|| zamglw2`1zMpn{dgLR8;qBq;%QUZ1&u<{7!Ecyh+#_rGeuxGlua241H%LxJ2edZT6~ z?Q=gdDCJUB(7ORP2v5E08}>CSmSFP{i=3g+6~(A;`rY`x15grq3BrFENoD5 ztZItKYl(y)%YqR~qfusZBmGhDj`n@qH#OS#7ebwqEGjB7I)JdWn7f6dc8qhBcqFl z($Y4BtJV@4t)d2N>K*9Leev;vyG9*7eedx`xG|~n7&Inf8ltxd@8&>Cp=mq-LdZd@ zBcSU3e8|K*4Egp2>(HWv=w_iNP%LL1_Na9JR69k|w@Uw^7I zm-1j4Ft;e#m&J*KicvQ|eUW&KyH;SQxyLkO=1ja9)^hc=DxUHaWi|gz5Z?L$wXvgx zS)rL_&*Ip2QA*AxDuh4M$#l5j?SPe)FY52*sE>j0k(208?`JjNXb&MG(ajm2U3T$jp(|TWPAb>%d2QWb!@@l zaWFe)G%$1A5`T9;goZCWZH*-QtL5xUC0C8fbAP|0C0=C4+<~rc)l)8~u9O+G)*<_o z3A3`nL9}qLXa?ao-6kVQfZatiBwQb{9&sD~yD162LaBD5~^`3iQh&4)wX2IlvhaxQL>#8+Afm5mB7;i7Y z3j1cMG4QB;N0!^y6Hz(Ajhh*j9R#a!z4zkuwAI-aq~`~~*uOBgNNt-tBlMEbneGUk z2i3oFZ1|b6P7*5`kFbZSO@!R-VLKniW{zDj(B9qNN1ppK+cr=~KocE-XSmCgaKSy-=K?f%_eU&I7wA8&&^8m${@m43-K3zV zH9t^q=zG&tzkSNEKN(L<-~1{3;kGYATv1AGh~4Pu4H+^aAm*T{nDcfoSx z2a=>_yBqICt9}b|qo3+ruKyJ7HlGBdiDja! zUC-EZGlKqXKS8H)bwhEPf1Rc?fWj9+h&<>pGj8#2j{V5|9WEM)-IE5l5_KUjF13g} zH;*m8h!Z}ed{_1%%6?)e9BF+^wVmV6zhRKK5gC4|jNmm?lwdda(;7#n(XAjn(%IMf z^pzHL-?2tvDlK+vbT{hp#<(g`qNK~1k2sGp!v^IS7(kvm`g+9p;>$Fk2DbtSiQc+U zxX0CE9=|29QwDfLBCrzTd=zQ5e zVde9|yFv`;e-C}QQe>7A#`iXSD-fuZGs>fM zD2RSa`{QYszT1Iy{6F~z2;sSw;wMTp?s5=B$OpW|KK#;nKi=Qv_7C7!RG$3Wey ztx~$?xCtS4bZB;&vQtiBz#n^AqLnPZe*e)IP&Q=7C!+Uo|ApB4RaBIsJ+C{yXJveW zb73)&ngDKb#jh`{ueT_^Ql=^gga*@Oh%&!e{_-s5k88dxS7Be@?9cK3VAHjk$QJx1r9Sdbk71m_Tb+-S`C0>drD5_MB>k&dU zuLjt|n;h2q*}(NpF zhf>!%wlmQ-XKJDQ#>3|5M~)2;JNv&<-I>ovhbkLd-Eqk}tv8bo4{=-B0rrDz7oM3GRxdYGLrXPZ3J?(Z=;$yd?hI#|qv7Wv z;i_*B@SU1Om{DVf8IogQ#XbmN~JT7iPc$1@EhYXgRBb@}U1}c3s_*vkV%w zZl@jjC65*vjwvkMRujEFS25XLnRmly3oSmb0;|>mk>|D-MwSlK4(`)zg$Tq1>IdL zxSF&Y7y^GG{8k!S=?qOY<6&iY2p0=0O(v%cuSsVwLr@S#dSp$11syEk`CVK+t^--m z1ChNQos5r&N{}svqGKoahNNp9%txKiee($HYBgoX0y1k~J?#EHktz!5iJ7HfGK72t zCUqOQcs1Nd9_LZ)oiGZuzvW_OM#J#}ROAIKV|D^R~MiN$XoGUnIb>l)Pr zS)Y~N90gry1b`ZtFT-Lw5>98RpBm;bqd%k#Km8&X_z=QARxC_Mwosdb#DivHQ%wv@ zuTUxZWFV38do_((A!a1!V`Kg>EIUKIDpLNVHA5BXK=>&kWd!yF<0-Ea9ePlh*G7eP zaucPbJ<@M)U*te6&|}hEY1=5z|B(hG%x`Y7DJmeO!}6UnWd1DGz7rPhPR*!><*3$z zH@gRZI62{drRZly&)#vV4`By*u=1BJPG(8xu9KMy#j#Xb?LJ9h@ryYSyM?XthZCyr zGfIVGkI21mT-H!2?Jsr7Td^x~fLe`9xxPwNe8@b8g=P&bNL^An4nJt_zL+%iEK#N= z(VtqMtFfByn`@5Ki|+GXM1fc>?IIgNhxkm5Y>*{h1#po{@$6lB^kx^bO+PgP;xo7( z+^O#7^{;0&C31fM&LOwpRC{ECV>6d(FTdkp3v@;vxl9nu0o`}qv9opsmmpK(V2T7gck4OKxd z_A#s~IMIUNcsv-?zY`wqYEO5TabY1U?37P}Q3ay_`ix(Lj*-XRebRuNo8*{@t=1_TIl z+A#8OU%%Dy1fdcbia~$-3iu42^do7b$gOSTg{pItq+OSLbRQo!i&YV~L4ZSmg)IxXO=*-4;wf9x&1PZG+;$IurD4;E2ALy0%1`ZTFl+hM zZJR&vMRvajyuw3&(EO+W-n!Dd2!mUZ;a9h)!Z4ts5$H#_!Y>v$7JFq~!FhjaK4>8@ zrn%%WS*RINBC$s`rq}G`QM|2}T4=M9!#t@cg(Ntl={#E@exAL%Ve9p*wUxS)#xIam zGQ8J$PQzT@GIsCkh@Ck9wiRQBTv{JY40LrzOVc@ZuuW_@Omcs3a9Dtq)3d25)xGGb z+O*p~U}tZLxUVuX(&PEtw5SkrduOqzF~y8;yJ0xF(?qy_itHI^<_-&CJ zS-H|!K}+8G<$fS*bMV@mp<_$M@cSFjblV7ZiXCU@T1m64!RY#U?y8KlJ6+A=o>{~7 zAOtV=2U*WMXBNf63TD4RCAD8XyC|l~@?etdo219(kA{v8`)pbuoXk8qOXjBx;WunWo==3%6QM zy8+l8M6HcbkZ~F2EffY>*!T0MWJT^jT0(O8;h8lwsX1k2e2kTVrxCN_{bQ1M!6@@p zTr;7-tD!b*^MZXTRU`}n2D&`hKN+Ll`ut}1XR#XIRDs%|F*}v(kmiKcMHK%oeFLp< zBq_!4D?W>Mvxn516i0nAVPpXvz*l9*3C62Ee2z`K|f|Q z2&NK`_Rh|WRBP$wKA#w&XMGJol~Broi62pH=_uQ?SoNW&R_`6s+?&x`9cc(n^iuF5 zNEXLaYMDxeQT5<0tr4xk&6TG9NmO_-VR+HXvGgrh-uD$+&+ozrbV)ez#~yBY_wwOl zw|N_$)Amb|g00^U;HW3FkJf63I?0Kv&iL!)>w5aorBxUKq(he{TJy7yX$`+P1d$nm zaqoA1hxZWesO+DbLwcgTEU3HydKZJ>vT__u@Gh zWzD%g2LekgDt(+(+>zW6A2HAd;|nKf$hrN1ZZ}bX(jYle^KQ9C$NY}Bhu}GM^W%li ztu;&*)HWE^_68VVi~B-VRGt3UpXcvQ&o7$y!ih>88(hMJ(pq*$V#?S?bRMp>qwV~; zM7|@7tF5_PKK3AS3IX)cF7j@a1=o<%S&rb5EKPT4aBTGYq z>+I(hOltvxp1u&v*!K;GB1jP!5^+zTVUSAl@i6?&hacc z{?NTq*Axv^YcSqLMQebk8kgP4#jIsH=tp<%sAW_{i)%<{W$I$%lJ2aM)?TaXY;J<|D^udJvKI!m6x8{JPc)&Kb3X!6se|=`P6>eR7b%>_H=&c+ z?oIoY*IR@Mgnhbx$RK_1hKT6+D?57@(v7xs)IlcXN!`()V!ZsKT!WVs`gzXG$iVb0 zmJV>HaHyUpvxu7E+-lGG|KIVXZGpwhm-NDdR&{^B(amDB( zwkIzv-kEgDoXbCN{a&D-(dXkwOJSwH8Zj5aa8Z}nNaD)XkD?sADI@wfE{R0gmp_6V zcPpQcf@|IProTA;KwoHuhb&<@Odg8tf`+u+oiJ%pH8lH2+f+kQvwa;Bxnr3tHbn}o z+6Kd4YhjEjQoBdot06C*a_m!NKn4b{E{Y)tty7Bf<#rdV>lK}5wsvdj0k< zQY7G>d3~^SHn{1urlY2a&a4h)foXC_Ra8V+HA4C+K`h4#otmD0;(T@!oCXK>?Pxt7R8myx-px_Vo$=X~%IPE-t^N0?ueT=>OzA7P*y?)wuq`$({IP5z8Dj=#j}?Kl zxsz+$7AjP4FE?|+@AnbWEb}p=$m=q4nhi2jGZwyo>S*p`r+3xC!nQDB#9PF z19Y%oR~&!-)akNuqtBu%1g886G^H)8i>ne!mPq<2e&w zjW6;0z~D{smg7y00WMKl{?%KlNP=I+(|J~|_V zR6VC`_Zw%I{8^(T!=~~pnKkK3^AwKm^`YLi*GPn%(lwxo_-p;o8=1yYQHo#@f5hc0$;VVZ@1_SNlu7%*7#aj+*gQ>dM*69G*U5F_WfYm&RqLTFMpg zV-?iCVB`laOyZ;qZMX595>lp;u~oGHw10M8d~T@fskt!Ie>^3dc{Epw{GG$L$mwu$ zzmqa)=NO-y-F8i4Du)l8cLvwxc?Z&%TG#~}hQQY7C3!pEL_cc8UY)0YYqSd5lc^K+E}OVT@D2g4OYfLiny^=iY&LW6kbY-d&= zeedHa1wukMS*?~qy&X+OAD*1#h$GJ*hKvf(>WA{#_k_73w-Lv$y`seA7L&aj&Gk?P^`*14UY^}2)a<%)zqY_% zwSo^+{! z+8AHrB1p*)<0XX+Dx&aBwrBb!9ao%|9n%A+_SZP~Q?rbuXEDMJbi;&8K`1moLl1Ws z7&K1GZ|C(?KL5)|D9|#tX3{=|60v7?;y=&SD6k9!Ot|^c8XXdem0jxdXwjL~+>j6) zPo#s#*&sDD3|>hrH2&(hu&Vv&ROjpFq!qh;@;?$w6Ta@^(sKJrYNpBa+R*9^Hp!yi zRytbq`eWcT>qR}mbt?72O6PokuN<$15qQK@b z|EcN!^`|u|4C1TVuSFH^R4W@ySz}t9NM){fBZhQ1=5`{|_}v1Fc~KayV4X+S;JHfp z;jz>oO`yX2L}HOe7o9E8GM?PH9Hp-rF;-B3f9B_M0G+@84mIr=WXXM#A~PV?Y!eB6 zViSSnk2tZP8AZ8oz`Ui|7%Q$*JgcbuTOK&%Z$Uq(q!EZyLzUIyUn~ zVfo$&sP`OFWivTeHp`V0>ZOd{8ISZLf(8<=)9dfw1T%+xrZHDN&O&wS6EHeY9?|$X zlU6vs;aN#1o77s`(uzWm6K6uoml7jJmPoomF0!;`0e%+g`4ygLOi}OkiBo53Wy^P~ z#EWL3K zr}?D31sggO>_+aHkQ;e0lyvw0N=8tiB|nD=?sT!hd*aQ!RaS##LQ~QfRP2;qY!_@! zxHh5@$3BS5hW-m}Y8msDyg9uZnDmGj)159i3kb&8^UrrBqCd!xz#))V%9P;d-HimF zE2`gDE8u|7^E#^0FO8mg`w;4CE$SDz4$8WpU=spu5+kJU%Wv?0N=x!32>UB}0l454 zY2yD%Mmb*FD{*u=votF4g_#jJNzgcD^ZCy!i=q;*Hgp zci}Kju9ofSDNSz30lELT+5{}LU(^XlwGYm^L|eLP2=GKVGM~{vE*Hgkk|M%(*iMGW zYk`_JmV*j-SDW>d+1^?bc_a%Q{y{{VDr}T6a*8T;dd=^Ci0NpkhSotz+vr94DRDy) zd3cXb(iohVo6(6W;a__ye~T$Gwm;pfviH(|%())M&z76WNM3jO9rB!zEpXZ2PuZ?B zLKM!Y6&pep!&|1=vC2zn`OT3appf*)y%1G6KKNCKkJ~sid+c^T6GM3H8SP1X_Q~wv z%h%Gu>DC}5W9jAlinq+!8%~fAE+{CT^|VIZ@h&%>KFg0fr(wI7RUai>(~kJKPy|T^ zICF9CDA#88Wm8+of_EW|zcg+jHgO3cm_Lz4^xE0@>Y@{xuKQM(1+z}?SNi)U`g31S zBw0-&wvr{p$W0;`V8l!9A`0;N-evJ>OP3!X*Fciq#YnNs$CwpHL`0ptx2wncNRn}d z^4pZ^sIz)+&;wmT+w2rO^+t`)x{@I*;8Z%efnp1V2JiHH^2S5*jpHoHr|~VffQ;10 z#YKgNLD&V~>&jDUM7APOcSq+J1zJ!w$cWU@`1mBoSj66AmOv0W$JiS9q z0+Gtseg^IGNH)&P zL|BmD3`-N%k0L1z3;I?J&L;^qS0E1A%)zpS9u>3XrfL2f zj#v=?#y45Cb^%728SbP&Z#hO95^LwQ3Asc9r&yGoiy71p7xpK|r!#sSw8`<^rAG{q zZDgJWpf^xLZKcwh>agD%>St5_HjPWeRGJ!eDI zgL6+@yfW*t4|o`&3lxLJvV%=r=gE85(`qi63Qcg@gslgvI-ZLT7mb7Yqg%&Wk>Y9f zs03h<1bAnoq^##&k0-$DWjXT1??|itSH~Kq)17Zlp6`EVP0E&0?&oUT?^xv@sKpNj zUB2Vu`C0orr)SHhC& z=;!+CuMj3Hr@;_kjA^k;D1}Pxx!3mfQr8K_!i7MI@JoJfF>?xKQ33vRI3%Ob!qN^N zFr8hnWtH~t=BQGXkSmVx#RHYIRiehZNmqmh3La)$z*;hlgiK8l&=O0QI-5E*)Q2_B z7Z*RZz4k6pv#!Sg;Yuo+ymF3(e1p0c*M7MywI=rHzWJdehPF%`1_D))ma?jPfzm9$ zXcKb8>b_-*phL9%+ZwI>oOExKjVswg%^bJJq+JD~i{xOuR2@)hL_AC-0k;a7SXEM# z^7z`>xA99s#g${9#paE}zaJ*P8ge!L3h$@99&9fxk~X}}Y)TdlvQk^pRgzrkO+du5 zJ=%=Fs=U5ga*UA$T=do2>1#dnAavrMH50r=*ZN%!EMcQg_5O1&?={(H=J!vHQnsAe zWQHw^rzqbc2l8I@a03^Z(2a(-=TFn2#=&>uDJQ*49un2ldli&Heq-vkL93O5r4`0x zx-g}bF0`XtsE0d>mr|9O#%Pk7QbDf@YJm!DiBfS@*L#gB3qu?kjf~2 zSK!b}SP~cF>~}(ow3qA>Cv+eGaeY7h?re)8YranTRE_edFDF%Nw>V$r#EO3;qfgo{ zh9bEhb#GwPqel2!7a+pzg_D}M`xo4s7RtUI$!}LPg6uc!@p6S)IHU0RAfyjgmLSpp1_tAF7g z^o|gr5e9p|as9%qAerlFht$s|T{rmQu-h?&Z(@%6A)9V^wQeJA=jI|Dewe99sVGe2g`>N=^$!2pNL=rwl;n#pwB!5MUA|r2 zdqCU!sYi>6=$V$JGFfTiSRupi&>YsV*? z^IeyCv2p(3qI1U{{t8|l)71)L6df&^-I)SRUcy5)BM0B19Bt+}Ib81S4Xj)4RHFR3 z)9K1U!CLX!hA-J0Tbyd8!lh1=R`8zMKU$%>Tz_D|$RFp7In)3*2tsW_5?sIQfGTZV zz77tZxU}y$9XSiJy{x`gqSX zx0ju9vfqg29MaX8U+ItI-3c-ik=b>6aRM_A@BUMC)8Bg737)y-C)=;S=t5zZ1>&PLB(>A;oM5Od2 zh3Ia9LcJD7g8?w1x#c4N?G)Mp$dEV55uIyQe#E5yF(9*4!uYbw@cV<*ZYAcncZ+%W zaFr)+`ZAG=b1bqIMm7yH|HztHRFMBM*gqkue}KtdQ0NsG=aK2*@#;#CEFk|Nm?S@HlR}PW~AhXlxM>R8nc=VNSFAJuJ zLa&0bq#faPR8+^EFSxN4We56Oyw0R{({EXO*_{gPB;XiWE66;HiQLl=Csh&y4-Bp> z8KB*WlOoBgaLnMYC5u&>MF6&AM2}{xiBtxTZ7gh}*gpt>%V0cl%M-B*L@znB?aA`# zuX4rWZGnZn=d1xN>d~Qz39V_Go0;G4@5$PSo_7gXgGTI-d#~tXbt)!pwePlha>G>;0|X9g)$^ z8HnBBm)UP-;dZlP89=6B7KUFJmUSh(Er0MeH+v;Z~f` zp5V5zCYBvetI6Ju>WNIgZ|xHdvw`&UpfB9{mUjO_jah^1otho-F#r>+#jMrEH>-mq z0>q10AzB8z(}+`0^9LWTu<jP00iU9Vqqd)yg;3y6P*j-<^CGd*mVapqH#_qR@+;XwUUQ?Y*z(3h0< zl>D|$DVhX?j9gW@9+cUci)2g217noGp4W4X?}3*r1#V6SuTkzVz`XT5 z&DZSxRv@#VPsSo!;1wG#lX&x1j71N6t*aNU)>LcjCvE*NZoB9GYRImu^@sR+1CPG+ z;rASc%e;E(b)(59N{XJsu@CC-F#H4*^V zZ%6_;H zx;^lyuhf;be-S)ZF+C5jfaCs55LqA!kVx}QLF|&Rw=gFueXkGPh)lDXnU5Vgy!t&? z)pN=#{@^utwGLZA5)q5cjQ(KzO^|YK$zzI|oj*rJN#RGeK4PCW)ud6wbo2SC?D+_> zd#%jQ(q@EZ-fT(@{6pz+hapt9wAoaia@Dz0Mo(!;Vur$+I2Ts^J$78S@oH+~*O4WC z0pFZ+_gXo_#&q)%d&AG#&)LRrUD%Ii4iKUdNMLBOu~V94JK4$l%q>J$UlTSPMYizt zwz+=HsMc~oC4sE7BV?v1HQ30`IG(a&se%LT#gf&<>cGkR)?)FFd!z()Bx~7-o~wHv zWk*&bS}2CsIiiVAzJ7n1mHG77w5gbZE#zS@Du#?>~c-)pP@o?E&=h`?}tlE zhh(Rhv4_JSU#XRJe6MS5JLnSKh;pM6>*+VZoe1RB3V9SA?$LXDn%vYxN_pH^Jr&%O z2na|VDT;LCxcq-zSUnw~&g77*wAnJ`;Hw{Nn2w(T{bkHoreuybNQQZsq(LgEOQ$qL z=fur2w`*OB8>eWM4rI{KE^qCVBw3PhJikd^`t{rw*qzNV*&(T$`lnZQcUz^=37uyg z$Wl_=zuvdh&cpHOJ5r)6%DZ!bs5r0~d-%JDC`D4&vhG6#I%H{*_i@(RbZ!05eIYEH zN2Z6m;&j-WT|I2VajQaS1 zWHL9_?4|0ojH?A}qNXQ1e=2tO!t_1$(T!2`%**SOn=kGd?alj7$qv}0Uq)mvUx-Ia z(>|u@Wf(j&p1xyEQ*(yjHHd-SbBCsFUQ%9uI%`bX>>PGYxEoE0T|~^sCnp5OVjEXy z3T$q_;M_im@%JzVj#oFhpXGFHlIUM!?*0U;sH9C6>%(gnB8pA3#U8WSa)kYP~(%(xD{82~fI7j{4 z!Z(2J_PDyT>q2UGu0Nq%heX|R)ta)NN5)_xvMIS2`x222XzT|VBGw{#hIwt44IN-> z@0xJ^%^ll0etI)}@1}<0;!ExmfOHh1J{)sQ>3JXCO1bcv zOL?(663~zB0O*WuWiX{xV{5#O3S8iFM>2dLLN5&+U)Vk3vF33b*PR^lIWdx0>YY^& zxwcX(X0ZJC%>8qKf(8`kRs^!)F~To?49O#5TnkgXpMOQrS^nN^KWhMRI}gMmk&h2x ztQ-?9^&^n-CKzJ^!f@??#UV873Y)oq?Ox_I3hByiQO^AkX_g<_*dl6LxcC3ev;9)j z6>ij$x>`qV>{)G~2Up1^B~P`cs=s2k6zh^?)^>iCTpC?IT0JJWi3J2Ama@W9t`%yO5 z=sHg37SK6eSyCU)%{~h}9pr5hB=_8Nd(LFe68CU`b3G|?lV!eCy!~Wesn&s1*0GD* zqGg%EHiQ<})M&L_&K*`L&M8T`_{seWKj*DBmGUA0Ic5G-*BU6S6Z6IvjXRNDJ$frC zGflwNorh$eL8_hVXJ5loavXAN7}KJ%^$MQ6H4vxs<+ZvoMv3$8G;;0j$}eqsi?O6h z5!sHGjLtKN5mNm3#Q}|~0gXto4IO&=0K6oWnE1}FiZNoro(mb-7>dGr)>i$a(+Q{R z`puWhmFK|X+qK|T;ElR~w4VFB3qe;MgfGtOllzgtg=am5S5HT(J>0GA-v+KzE|9W( z{o#~oC(DzZ?b;K0CZG%45dM?Ob3mE!V!@7$WU~*P(%Vk2@w0!PQjM~5fGc$w)9#14 z%>B7lI)>xmb6rv1l!*?d@%|phvFNt75VU)YNew}Ko6VeSk(qI`Liy6_WhwoM%;-n_ zcK6X83dS~~6v<}S6IcGOE6Dpn>*&+X-PDh}fT>omZLE!xjl?a2xySO*E>!+TzXKlM zrpuJs%>;Fnc+_|I?|(Yhe}vqn_l`yL%uz2A@=b&4P;ax}Su>Hbn69|xLGj>+1XAd! zm*n0PIw>^$b?wMva@|@|Q2gc7%iZ^QSCwY{1PC}~4GiLLJ&F1J%V8Mv>vP+c2Xm=3 zWR%rR#n{HZIaR;-8dg%3&(y70cM;mC)6OWuFLEaH2Y$cTX7U3GYt&+VuerxoJI4Y2 z+{*(6D_tHAKlKmfb;jS}sx1(Sp3tyx0$cQu^S2*AC%JT!5ektAGvVB#EM$0XXb8;4 zV2!7BWOnFV1VQK*oII|8z84&G?+|NEJxBLA5lsTZb2mP4=Z2fc@s|2bmAk%wo3G_E z+Nh2T_@@U#H3VAw9DvJ|Kn|YHfZI7a)RTRY6T7nnlx-2bljNRZE%N21Xku}&Gof6w zIwCY*lI>MrYibreGI&Mrdc=hrZ_M!$a#Q7{q!N_9%_XS#DU{PmBAZLdJcbLgf_03} zX%bVmj3e#Q64plN?wRM%Qt)IkHe^+MRY^IQ9ry+b08o2SlaxmmYrC#13%}dw7CL-z zr|z<*qH^z9+GMT74>e*zcSD)StY=2JdiPulYt${#$u)n}gk4^Z%FG`+qV&C3F70eJ zkvwFHS@?50!)k83%2A0ui8>+sOZi8|`Fssz7rWN93kXlH;L62^0(PI;+hV@2c8wlR zZT@}yTY+wSMcS?@PvBR|hiWnL4Yc@G%-aFRG&-a5^Xakf*`)d4@agN?nbc-OBujM1 z9J!1O+fhMVoi^lOn{`<%`leMAuD%>dP3!fg__X5+v`EnJjSRVkiGm|hV`tcMILTpy z%!p|eQ5g+rGi655XoAqw)S6A_%mRa!w3=J9!#t&)#nBg=D z329>80EH2$*W62{;{`^|F2(?oLc+hV7)63#l3U&A@YWh~8~5dB$ewY;8+9anxB83J z_b19KM`ZbML460h##A=26YEof7L1_YdYi2Agex{-Ekl-zDfK@OKRS@VS@4itoV?Wk zgSoA#3;}*aEnrD{4a?cn$0ZM5bf8Hv%u6M~;weVDK6r-HiV(H_Dr18RohIWtw<;`) z0k%uy!(eN|Oa7+FyYt^F6DB94W_YN-{HzMoL zlL&x>)O#H{4#6GgtEBXVSa0{A!tk=PPYJu|ir7YckNBH-C!>$6{k2Efp8q<{@V6BW zjS1w6K85-v;rYct>2fOOFHCfd^aT_Hoc87=q37S~$rvcv@Skeh&j@=07k0JF7Bv&D zK+Hs?#c^r{p8o%Ui;aA(2rjjS%8!|^PV--*n=t-<;&!nRyEqJOBN7ghUXYC_cxDUo z$_Ndb&mAGSGW*ks?mgGT43zPR|MBe>Y#TWAkXAHg5LY)!)3YhHuE6abL>eC9n2aZ# z`Kg=tZ6~UT!$9D#CB3T!^er@Gg7sj9zxT5;L7NqU%fb8b0oWeni`!N z6F14fe_(ruutRFWC)A&FY%zYHs{J|~8%|-NfVlhD3W^xTLdvJ*rSndhxoAo|aPKSw zSfx}IZrC3bqPi)OLy6pO7RtMA?v@tKs-!hUb^ed^*K!VQJo6$ku2bi24`kxUmS)5) z)-hmB6oJ#dc>>-OhB}~RC-EmBG+Z;hI6?+9S4zIBK@Fz{w|As+;;wPbC-v@yer@fq zsoG3Vll8dO5IFUHEji)uHTv`Pi=8h=`|w0gp6lFl9BtY6H8E`Jc#zd4uc9j$s!1l;$}4x2 z`(6Fx7Z(>dd)UCF3~cs>^DLP|Zg!JlN6@!YS(mc!CDjKfZWy zmEf~x{5W-T6Tw|2wTS_^hXcr*EcR=<#?L981SnT8L zTNuDgK#S9y?0R;!OE!)Cd9?}&G}rSW>Y=-vp1!2jYK}fVl4ONrHqh2lucy3I`6QVw zIVX=qg#$Z9ic}QAc)p8k9RC6O`|iz1?B<`-DIb~lw?dq63=kf#ZyP>q?A{PV)&3Zx z=kl*GZ*I!&CbGQ2jtrD3GTYfbJU5R%i1>kt2_uWqFc!`}lr_IMR2;sK1#C{pGm|}f zQOIca9KJceY%`v1otqTf}C`|W1xyE#zs#RjgCA-(e^mmZoj z*_#7|81~r=PX?9y`!qN$sTzQ}M0`E|XYen}P9!is9fnaIzTXo=I|nm%kBv(S4EHO8 zJS_a|atK$}8WQM&=lpIVj}BsuABVnkrRT^Cx{`cYul3^4`U|aOSRk|-@qS1vl#R2g z>LEDTm}fKw)~aWb(E<`#cpe-kPn3G@P;vpT-;pPmgAS}e*O;g^>(k7=Kg(LgJ@RZ; za|*^FP>JqgmvN8$f1yVzuix78K`=Q7&A;3{yw5&2=Z=v80Ess*>~%vU#WTr{S&XM# zi|?&jK5)x>CRsn zV|}h*#OFX%$nhO2?wRhcqzv#Suc7`my zvAIW|x$NozO-9+x_H0T$pKS$+yS-?|EVpxeJ#UOuB=Ceo9AHIQ`z{;$lH^$|UrgDV zNz(_eYJXu2(_hlStJ!@9hyRiJeyFkA#geK9cuGKR-b0LS ze4lC{)kbyY##VmQi_-a#OYEXTxVC77$4RGnnXjee@K)T%o&uR0BcsWSe0p?v^vxq_ zZ7b#0&lc`-lhDWIsOA3x6E;-ulm6dm!idY5wa4sN&T~?+OCH(w1sUf#TZD|JK#P{p zq!sZX@h+eE=!g8bq+HECmKbb8+p&hjuRp}u3V?f;npYx5(0&b{EkN#KF23ozZN98W z++iH*-2-{J#eD7(Pa|F#mnOU^IXK^i z++42}M&SX&hrQ|*5%{6RIPz8-@|(p0KBPJBW}h=d5Q@OTK9BP!14kY`ASDj^j?Kf# zw3oD&*}lkK{oVUkaY?#c;)3lv-kW75EiupfPP{{D=D3y{p9>2IWO3%Gd2=^PqZFUM zuxHOdshRq3Ax!cyCI&&_WSA#c_wc06=Wp|%I^FdHhB?hT)&y*Qf)T{Tm;@aIvQ{x+ z?oHq?e{f=>(7zkzTeJ4`Unv4d3lj%E)q{^a73BFGu1RzGYRd9i)w+ZRaTNz7jKKvK z2|>jor5I{}()F45y<0Ua&}OxJZ*RZIq(j$Tk#INT;tZnXe0vXNer*#!N9LaM;jw;@ zLcT99rL}1Pe2#m0O{gZv%| z=fz=dkYNAeL1Ibn)kFP}ul8Va&`eQS4a3;D$`SR8BLvr?%%pct|I?F)>ig4CcrvXz zHy^qT?fYa_T)Kzm2_yWFid#)@yy*vsnm(4BQ$Dpc#w{B1?$$HLx%&7aiR zPkH*;u84sD&r7Wiq}e=f4EvsL$Y6VAhg~tx`?HY*a_OUm%G4px8Up11L^=MuhN)pS zL0Fk+bL%Cy>r323Zr^i(t$>zvXIGW>ZL^&suPxk6NlEQQ0<#)Syzp=*cd=5Al`+bP z4h;PV4tp1E#t$NnHNrF_msh_W`4`_>XRr2{$dl*uV;ZX2_HBdsbW4@J;LDZPIy3QJ z_OUb0F17UJIg3~CrLi;dTsfpuw!0 zVR9n|?gE0&Xr7nnHblh{Y`J>dNdmc2DtDlb1duWl&8E3{-)4DuXTNuiyO^37m?zv_ z#Fa-`#vqejaWyMF$1u`GBbjjX)N&X5uQ&WygXDm%^$CdE|_iToge22G-Ciac)HXWU$ zlI9c*`^*S29oO&LwxR_Bw}2777Ax$*FeG10bXw)&rXMXXHDioJZL07FUP+eoxi`yAb{FoCMhWC%T-e#P{~^6C+` z?!fXT$L|9h{Fa`zKzl~*oW3lQ@h&g>k1g2L;#1vG6%g0!9l=-6hrRsoonarAn5TX* zdw_@FyfNvm%V3;&!i{hgAD5Gku^@0tpB&JiPFZD3JksQ@99`{iKt%A3UCv$_c|=U4 zFzgSk_#Y2sJo?i!`@0gK&q#fix$v?bQwDJJ=Lt1qQ|_YOEyC5opCBAic2*a*?G_b3 zfe~MNi3Y5PWX3r*aDp5lm=di}4N!npTPuHvzjBqN(aI_AJLpbHI*y!f%%kNscDY-VM0KdwSP`0s%bc8p;N@17kGNKpeC;T0@<+Bui~)aD z4Q4^EXi3rk2fzr+FgJ_6|F@hk_-Sv1;#FqfyfkYtAM+aOz9ZT6&O7cZfAX!mjK`Gr z<(_@JguR*gS7)P;jA3^Yo%9}@6~l2jMFH;9XS8kEz~S|{XjM3pr&Qf6i?xgZ#M_0M z4+gfMtx}&DfP(qKLY3Ghe;joPmo>9i&GYVuh@qaP{o~8><3%=Ha#&zU%{PQT4@&Kv zITdZFq%yd*da-g5V<-6ZN|@6!I-~t+ye>8nCH~rN?CTUTpf=-dRf7(!-pUZt*06^K zoHWJZ&222tN-6Qm6dy9+mOrew=yqj~-YeCwD)8G;$QLB4`2u|%Ff;KSv`7*?JLq~P zbwkW9DTbJet9!vGQM+$q7lS&nluWU@=KtQky{Zuj8Pc{F)M|W~v0Ti*aDf~pvbC_| zTFOiNr3v;MGBR1qOwN1wa?#Ift^^}Ow&+nv=~<1u3F_~Y>XkCTSZf3mS9if`d)f{V-hrThZB_@wih$!H5>Uh3 zvadKhKbsHSDZXGKgzuwy^vVd4wu=Px0;@cK_u~*oyrWVG7PQ2~)*}tA=%IYQr!B;j zsOM|PueqPrP{aWY`^TNvvGjXuDMBarz{E~+4PG5rhWKf+BZs=W+PK33T*(dJ@~Wxi3qPN}rb*@AFivPE~j| zSlxdkQs>qjb`&$0P(&ZQR&osS^F#S?8|ih#6D}70gsT+{&9^BdJmy!OQVssr@Ch|8}Mw6wVKt=@@?ZI^R(gdZ_%10+jhQ%X+9g4*~FlOD-v#aiOH29pHV)r$XH9mAxvha zAn|nGvqFjO3l+T`rFaLcmb?WrHBx{iPwpZ0T_Y6bIVXMmNPMYv{>XJrQ%zzAM`C+) zPFMCCFn<;;UFStLOb&CG#xDaMOv+cm0CwwO_zhY_y?UNWobE$Qk3T@%5v&iju8zyk z+lk|k^wldd^8X-LAu9_?3ubf zIs&KisJa@m+&$B&F#JbD{-}s>oSmuXBQ8fMJ_#<9Ya@y*JqJqqxqi znz6daLM*jg6wjL{+51T<7yp{(QriByRel0mYp!7#)Ly~`k9W}*a%i!W6`RO9fO63 zL%Whx6_Y+RC_4N_-&lHkzZhpP$N@#a%*7T8D8G+%hQStA>+|y=U(($BtS^GJQGmcZoiYDDhXh*^dC228j-KUuqhZq&#v@ zjMCO{wBm3%Rm`)3=*p|2IgbkIQtJC83Y2AVSg-Fi&1OTZidK7d^@UJkvxeFX|2#j; zNw-q2y4PEdHHJ!7rL_I%@>pu2=M3i>va*D5JKzoz1~keoLHsiO;qGPZ$ITHz(Rh*g zs&pUz7k4FthN?#d z=iLq2g*j?;&xnk95SXhtmSbi-X6X?6+YCIyS>E+EH3WZWj#zJ7&9k>)@A7=!NXb(Ji!AMa*D8WAA^ zH$F6>U*BnC!AstVcMBu@7pj$8Iz)sV0)B*f+`Gf#n=<8ur!zJ5nFmG^#T$%+`s+4| zw2tnN%T|H_O9$i;Yz=M`PQr7l-5b4sl|O;fhOV^J(D0N zH)hu<#PM~orT03a!|nys+S3{hGarHR_FjSE+Uu73o-xYaa*9yg1hbjBR;4V)+#6 z(Fn7T%LWg+pKPRFe7&*MU-fi14$4FY6)}?f063~VA(Xzfzt3s0Gvi1XECc2)9FNpuAR?V|45pB3eyP6e6sBuS>K z$t$n4u3$L`p&|Kk0hOsQO)jq1RQEfq3ZEJe&n>O%Fv#LXhDdBAg<>#Rp+k{C93VqM zt1c)xb+A-gKFyP4uRrBV3rBtSp(x&u+T2w5s+CT)lqKjIFVuVBJbRiB$F6tmL@Kmj zYtcKX)SQ=3&YgUaU&xoNCmN_#HIrkDJkfda!ky|U)u1bFJ-gy&8&oNv?vX#z0IRXW zWXY5Dg}ZvnT+pP=5}ab|z|_#q!E|;m=^T4E1Zajy+$0FxK7DwimV60X@H`SG@Z<64 z@cl@kMs_ZgEE;JOPC`(N@r#g!^fef)iodl|4>j0L?l_{cF03Jl^#QRdLpPGFY|7XN z=|z2CfO+5O^M&^h=7aCq6{$T;b9R#TCWGv?31M;)VIAYwJlRft!VdaBXfL1gqME&I zjuu6;9jTOP+g}^Pl3d#Q+W{>>X>Il-_-|5NlqT82r>Nb}S8lj5Ua_UzB0rI=|76!r z7zn(IkL#)ODpbuCa(w&o4dEMt=WI&XpMk=lG?skGl2JFYw&B-13oRgQAJBXNZ#j^s zi~566=GPC;^xD$6}4TL2@z$(^;_@S z|3leZMaR)BYuc7Ai@{=MW@fUO87yXIW@g3~Gcz+=vY45fnW4oaf9LG8_y5n##jKS# zU29c!S9N4WR=iJS$kBn;a|F6;;qJfDl$MDS!=HowB-pNBg}lD~1iZ3S2tM+^1o{7= zda_(sJ1f>1UK@Wcw#ju<`cmYJuD-z<{rv@J$M=4TEwZJR2A1&K)%8s;SphB^+#7(0 zL=Aa{wR4v~`@=8gingqDwB(8@b#><#^C|xrmO<#fGrtzj(bF?qp<#AI+3*3|? zA8nQr#Jz90K7q>nEKM3Fo<39McUOLH5@I&w1ut@QKIew5xLGej$=A&p(c6w!AbjG6 zcqZL=VWQF<9Qfj9a0i)p0V|l=%+_2`2{}4o=aBv^1P0vNNBZe_y8)Bk=+`&H3%@4? zR}G}geoOWD7uZRYeL89;a?L26KIXLTywtkP#a@^^lT9|R4NQr0#dr&~b|X{#l~vqV z0Ns5(^Lt?qf?4kdKHvKh!?H1ZOW~yj;2;Phi5FF~j^>>U17s9_J^n1Q{A}wUe48V_ z|0mSuvU@MLfEQ|b%?038{o30higvSKK!eS|yp?3K3&G+WD7CSSj2LaC5Nh?i|MEF{ z&AuHiWb-L?GNe*>c3G?~#!9AOTBV4EC~=q1T6a1j0_b=&`C3PwIV5V6XTg@mnh#oUb<-S>ZqI8TrC_LdZ9P7vShO*)~+E0f`e~FJL}KR9Man< zP0GCY{Tl^DsuvSnd9sG4h~w`W-PI;{jnI}ib`S@z=Jq8veEKO zuzR*yO_n!~QT2kjA+XJ(02-b5>bRb_u>j|wD@V!I;*tXywQe$OD;K|SR7RenT>1o^ z&+iDU^;W$;2^dn0sC!=w_{nztN==}tQMhX|HLfU5pQ65djU-YHjwF@?fxD2AxIDq1 znMV0WbKPji*6LvZ(~#jxXq5>h<9P< zg>lNc;--B}xDeChS@st>WSYSwIwI!fh%lCjAMP)Zl+~DbX-xn+&KG^SA$Yc0U^020 zjdPF93`%eOImNm@@hw_Z(E=4P+@yo!9KGVJTjH0*f z23=n09{xmV5en7TabvpqPBfUH1GoEeqXkVBfP3M1*T7}P&yw;9HYG+n-TQ1m=_m$E zUakoJ3Cf-Kr??)P7y7OlBX3iKgv;8S-A#-QNAh27)Fz8J!LEIo`u8ba4QCZ3u7?m3 zW%%z?WDQ5HAuG_V_FYyOXNnhgOatGUhGEz~ewIn_vWwv_qZBL5|I~z1D}v390cLKG z2itY7+BZS%5`BPHRIF0&&@75xC+hb|8u zS*vO=2HC@GR}G3dn+s#1B%QmbT3jqfqI@B)iSZ^!U(CvIrJqr)xcBdUaAVUi%Qcu(Vl3*FxXDrM`M5;RsG#paw+klK3Gu%|^uVU%2cG2}rLE#(_SOQ*uimCrV>F3YcdGI?!i9$+fU-*JmcHZX3bJk*TfKqg;Ax zM?pK7>ywh%yBnI<{_|gp81>-ijH;?neMAq-ZS=m z%F|_)_K-H8r89CEf=Tb@BHgvo5EqJ$K}{^@KI=Bj|Ll(1tN!R4knNPS`@4S*1~(>K zLH4d6KN{^lv>trp&n#iwYKV_+Zw_li=#uzULzyb(0`&n@yN~Kfo)Mpe%B*iscB6<0 zDaAFd2ic;*O^Jjf9)+xg*53KTmfuER+?zvB^$ zzrx3>1bniPv$SW76C5;T+a`idgzJT-TV&@5VA0ju3kfV`*4YWY+u3V++nfIfDF2WI zV_i}Y7Qde0FbFIK2YxfCF*4Rn(!1J2>h4AdCr;X-eV`9UnvZ;u@Aul@9{?%$4dHc^a%7UKkh21PeqJppfFcU0o>woiRAMPM&y!AS+naztjZt*W~( zt#n-jH_RdmTb!-PRl&!|5}G3JiarJ691y)4vhl=wcUzgYIf2wB-cJP8j$ios`7_$f zd&LtN!mL(&Fa`_XlCK-30+0;fP4?UZyjnpq93pr|IB+SZTb{q!@?lp-RTDWkXyr8u$cKWihG;tP{X);f+~RGkg#iV8J}kUva`VUB&*LsZ$wR?<*h4AA~|Go4t#x{ zq9Ba4?W77?gTb0dj1g$!hJv1aumFtxiKuUSTY15 zh|yDQ6d zyL*!F0+Iw1p=dX*Qs~SiHW@X0^v{x}6^0HYHr4 z-J}>t;qSw+s&ROVvV>Z7BO6& z>fS=l)ZPB`kOJh3m@%BI>ylm-sKcg;6s~g2{OE(dfx3~ziGeF(`9P_do@FAwn%mZ8 zLQ*%Nu(PhQ5GJRu+3pu6z+U9*tGdkgOU$FZUJs|WGkWGZ{Iw;qWX3u+!!OF7;qQ_h z8T0kOwwWJb@aAQqGt?I``vF;0M;+Vb4kv6voL!L|xs|GIwcsF0UkZ| zJsp?I@m*9dQ*3X@#;`?oh5SvkD{WhPCVo)RukG;pfA0Pis8mK&cVd$B4{m3sF6=-M ztn|31eKSW3M-}H=DuoJoSu`5RGgzBlEO32h^Qp*Kx2usOP;-^<++E0s8>TVSl3 zh6vxE4>s!@dcyEC?fUF>Cm~S=SMd$KA&(9P`bR|UnWo)hiJX2!I#O&6PxdKYnDkRa zGbg9NSSJy!Fy_onfWA_8Yua^`=oXD|o!0}@oS^ee?~YvYO?KF3~!+QI_ULe_COLS`z{C zAn6-M6K5dJJX9P47Tt_JCYHL)*2Z=ewGL+xShkng&t|U9HNf$0%UC z%BSyf6-F*=HrD^KO)_-X zfVwBEn1I;8kAP#;2aLb4L$oH9yK8(dkaPoSl26SzhfLDl0Ngl|aI}ePegRV4-Hw{9(3G77DYYO#|#QFhDaM9jhgKD z@E487ieoty@m@3=6dx(`CKtKYI0MJG=4EiyCITA9<1eqra%&b7MPktweSxb0Bf8h&9lIbf=g|gf?n(LNxA4}!;-=(BT~C5atGjcKmqlCwd`zT zkC%ojiB;G3cG6o9=L(U2J&3ixb?M4rPlpYu`cc~aTLeQg&*FQ${rh{OjPx;>lbyPK zJhVD_(j|ld%04O1!5-j?k}vCn&cx+=TJ4|H8LoTK?mO~3DTjdrpSSgq;!b`HoM9wM z4a<3nj+f}-^;d*3zNPH%xSLFeay$ccv_T4OF_j09anr}u2S7RRv}cM4Z?1_-RY*yT zPzegbM`8^7r;6ID<eX=qX$Jd5ZWfp(98Zk4D@qiuxV@alc`N$M)2C4KJdb5SKu1ey7k5&5Y>mnpPKwyyN zU8=r)VJ#`7%I;1)XO8k*GAMLRU@*Z^s6nu%0FQ`T2)>$(6-Qx<0}>m?!5aa$?n9!x zY<!{;t$ow4L z4~f&A(78K`AqAh}gGc%4JCy@(@ipOtnAAzbYQP!TU)l}LsMh=ME7~=auPe2*N9-G( zxi)8ucO}BT>cg1MCAM?I4IXQ5V5r1MMWE8$x^+a7jP^`QqcTR>Xrwr+A;(S_US6|d zY+39B9~SD9U(EFEsX8p#>)6N|F&nkhc^3qw!#2tx&FqpYH|g~BUhKy#o5M4qHFh4T zg=deaBoV>79guxt&wp(gYH=A{eq*%rEn9Pi!+in$IJ|C4QYuk$cWfJT1^_4lM^mRp zQ(C$zhrF-LM+x$e6 z|3rLS?(m_RhgQq&?hGy3cSaxopa?=tB=pK@cY*K9z?tx`E}kv?BmWGKH{lDXd`*wq zBH1=Mt&?2uxJ>!LO)(hTiZ|1^blXBdS2CY7Ti7X3K^3h#%9qLBq{k7#_gNghEV$I- z#mItx$;!%^Mi~XXm;LL&Q^zW@K8w_PKZ;$o#>#0wP1pYl5yh}T(U@}xlYV#z2HR38 zHN+&Fk}lcoCUSFw(TlS45G9ZiwPKjZJHcm#diL^@=-w*IBu2o=_e$e^ahGnb+$Z*z zrrXd!vQ_w1A-o^>1R7K{)1t55*l-Akn4y|lixTNMt8pxtsvxm|!y~M)NosbgGbF#( zf)VvAGmji3O2>>F4|^NP!^09@7=)`}qjeaq%n;7i{kqNfy^1KO`Z1X8I_NEk$FBQgBgnzWkR_7Ni_{k#82A# zxb-&TOW;bsPwjt%5Dh305^)y56CG`_c$y~-V$KHON)+EXVt?SAd#DuH9n(D>|2gnf z?Z~-=SEs#zC(JwZ04RT}Ls1z4-9eHf4=Mu%c`udP^Z*1p-w$)7X9Rks*12jf559Zg z?PfcuuG}UgkEBv{HE1k`?y#Dn`88H5^LkYuZYGdH(#1{ z?vsCc1Yq9=s#38rBl^b2zYR{iYQqNyH5kt?GnyGCFfXYO+P#Ir-Nk(Ne=fGqexIW@ z3<4#$w5%OFtQ5g=&tQ6<@l+D{p44x~txpiOncXwLZlT)NLWjaPBlpp%2ZJ{kw4$9v zyaB2I)-0*o_KYu{=s|D!d5Gnv|3J8_Fx5@2auK=A$!u`$#3}XmMXbqrs5r`jNKMZO z67m{yNHuIL;jaPrdM>y;d>Zo=*z)@0*?(Jg_#SErz2@k!?OcN16onN<-q;rjLrn>h zn_{;;818*`#b~_!ubxqlZm{l@VjTy^1+EGUoddI&*ojsx3HecO>aWMg#<446z7&TB z2$t^hpYI$AdnXarp;m3Ps z&+km{$Q{v;XYZDTCK{D`T3J1$IJGW<>@`nVyy>mUggWQCj?2xvGjjW@AtXmUbqvs? zk;ZPBmq!|m(O%gyd%u!F&de>RhV5Yn{pl3yR9LhXIv^8e%{>47s_{OQrfi^&ZY&_7 zP+}b%umT^ReNmxBW)2ew zso!cS3M~^_U5*J<_TO&#=-e&Ot;E}KNA3$)0Zp-WyiMkPS3OzG=&Q-Ca1KWr^$Qu125T(E~6v$~hCj#(FHaf0nKA z5xO4R76q~DO_bG{DK_jc@;`$IVtY&FQWo0D_uDb;5yk+TmeeJVE-{*tDNj!oK=6_9 zu6>6V-fmFihs)pWKmpzdm5+LQEAX%;E z=B&EVp6q(=EZwHCySyZUNq5UkikMj;pJY`kYRskO@aroeRw{)Q+Gy|Fj&k_im{`)? z($tXohanHwDUx!+_$VH6+W4hk*29|X6#iG9l`xt@)yMEy?q=+a$2Tk3;$VO!`LrWDbQ;`l zy@iKr`x#c(s&)XTpjXTNYV&KHW_GGibxv*!>7j{0(kcEI{$Yk-wD>nB6gs9k8YWNwO2u(!IBjm{l{kk;GhS87w>p$&sVn7j~bFm`F z4SCyDnzpw69{|p^{mad`0ZG^gakQt6#G;#fbJO`5FKi$fa;}a>{SzEYFsV`oc;J@n z60?qdm=lBP_S)ZrI6a4cnqk~~RFNo5yf69xrdVC(oC~H2DBm?J?kP1i5UJ!A4&=?Y zKkaFIQbKL)_*3u`&2o)$#^M3w!X!IaQa-Zl)S0mph$}L8)mvz)HGoSqc&1f;B)`3m;>tB11O{}qD%wFen#`kuEr);yRv7-}R4ePdN zV14@qX(Yb!aW6~y0>;$EZ+FH5hg{7Q8^jMkBjSII>#z?*h}QB9MNI{^n{c?_!8GgN zc8NF?mJI<}bma7P_(nKB6 zjkO>$`wK9xrjDWt)4~$FlYG(k3-lzi-rPd~wfVEo*B?(hK5os;QUaFL?o_ZUve3D` z)u)ceca-TIgmgSx$XG}rkt9)kI)fn#7ROr_F@=d>hfq4#ISRtzs_5?u55AkCi}@v; zJpEWQ`!`&p%M9)Ow36K7OF&lRqYmj;`UYYN0I<6|N#_tIH?CF}$*}!zeiB*;Z(>+E z-)zq+A~h9fV;n63xPfWhG?Owl&?>KJHKzYuOq;8xr;+it%6J@;E(`7j_4mE!7JsyE zX=6uPlTIHgeW}mb6X_jm7X)^A%WmPYE^@%Ft?+zyRJU5*pjv)cH-CWO?XW>Qs8b|J z>)E(kUe6@6ORbctInrz4ggU*m5sHgyH{A-PdF`f_&3v@D6qlEmXWvnd&E*_fB%R(BegpCOlWe#+{1a(9pIXmRoZ+3( z@)d6icW-{Nl%g_bh48t!dUXBxTr%KyFnc^+6uMsTsHz`uMl(Za%7kcFAMW%=D3z<6 zr%tw;OyaIIS`2Pio0`eD9**HZtV!3~b#7Zn*t&{{bBtaRWmTn97Z ze83FJEQ{wze2DEuZ1QU27^#>fDqi4Ohymq} z79VB}(_X;W+5jQ@TuChp{r<<%w-TE?pNHmdmeUSm35u6q4~BZr_y6_w|2=w$Nx|6} zzslD2gyP^+lAeb5*Y$LB>@8(7tuys!NEucXwFWLriGwcpNE0+FuO(;Qy3v&fo?J5NsQo zh)hu2?P2rnuNeq6aofvOKI#v!iz_sFk%AW1!K#itksE~bK<4Q)eU-&H9Bq|JfMD(9 zB-|qY|2=;QIS!~{0>jX7-ye}-%p}b zU48W@HrD#RLzk^r89He8QuNTl6WhA}Vc@1QOwF^p3u`lcoBsdttFF{X5s}?6vEt&y zp;wRg8)(jJ0a@e@3A4qkOlvUqY)e_jnHoEm-$&Q7`nb1$PHn0+4IPQ)FCWI9KH1}D z@R(`ekFU`Fz(|izvS>295Bwz?X^R-Rs6x2-Xtf>u3k@@wd`VIn&nq)FLLD)CacJom z$vtieTTqVKwEvg%k7D>Jlgk(G&btSI+s&kmGY_p~F{k$D_UKbcZU=0v!NB@G5&&Vq zMUNh<)#8lhg~J5<1^22CRIl5C>g>$vrJeaD#@G0bJG7Gju=DRm=;-1y^9Dwxo^8g8 z14BixCL^O_2~69w_gVJb7Asz%OP|Oo!iO;p6G=@C^SySw;a>6CDI-RKE2hsV(FZkA zCZpoOfXUw5hvVke{e$!OjGf8)gI)M1&o}vt6Yi1)14c=c$D#%!ZSdhft@WN}=&s$x z?r1yV)vRnfy8JM;mHKJErk?ysxCYrvK$^V4!*@9Eqc`*55R``K(!r+|OB!RHgzT;# zC^DDVtLv9$#$fh+dZ@BMKy@?DzI#KUVyXLGT3swgrYD2XhEZ}pN4+^C(t-8XR3kaE zOkv5Y2W15Bsc$oeuhiKxi|6yOChMzLDnuqtf4PhP>+A9zRW%5{(T8mZ7R|2qgC9uQ zm!b^r{<)i0&6(0|Y@PO4VT)!Y#|K^#UY?1IxPJLBHwg)zOzz0_Y}wm>6VVE=;Me;* z;>?dqsTm;*E*Nn(36Ur5iPGg4m_7<0zE%k#wyr|$huDtneb%r`Sb~b#TkyNy4#C6cHi0keZgfQA?Nj#@Sn3fGnHdrtGr|vD3uY-g~nHl_wfCdq*GD zH!Wy?aHR3^S&kl!Ue;aTp_bYDFY~lbIcI}1s`6Y9W?fyK>TI|kRr9t7Eb|1{ zueRgmcdZQU*bCdyCon?w7wMg+WO63bu)+P%waexsbKY8yHa_FJgWR7Z-HQv!2_XCM z_`_o}BA|4+^zD`smt4n5Eh{WAN(ZUUDdB3iBdDi{En`o~Z-J*xz9j|ohvLBBbm+f^ z;jeCQuam{eHrb^8;-2CP3-)2zY&K6?leUbPTYc^$Vu!OO1{F^(zR`_?DD&wY{%?`O zaW{CHhAm(ThPF}*Z^5&>M(fG991MBK_xB4G+r%L@#@BQzFj-u2&)Td@t}MDUYak^H zw1%TZyL7bc`y_YS7NZs2-p2Nm?m!brR*)k%raAs zbh=3Nn?$Rsfu3UMZ{7g2W5=k7#q5tAzIY3dgAeYAEalFc`lfubasJ4QBaxx$9||+G zC7x+B^==&8mejS*G?F2DndEkdgufCP8Rc`7%H&FCSo_!KJ=rBEON<05jHn68`Sccn zj~r|7znp1tWy(kT>%hXZUi;hl){fX(>GpAy#FB(f8LSY;tt<8KKR2{+>yR6^^q546 z5(yC56?$QbnLVJ4;ut-FPm^C*N2OX70Byh?mCe0w-6Oa)U4-}=-n_jkIqVzH1>SA) z*f^&9bK~i_wYi8WXg$5|EDdgt?3G~&{E%{%BN6K8UC*c+3~+1zdu9yA;t^~Nc0R77 zku2Iz$5!4%F8$ogwvsvXmebeOzspi|oKc%yJw2Cob_-!DiK6|&z!t(Z0R7Y?eE|&a zKE*>ggz$h`gDq<-4xrKMK|Y~!2q5xA+!88{^B6Kk;UFch9Ot&IZKx}j zOVxgL6}1uY@V|KG3}&qgtWKvvHD^JM-k8Gx@*LJV>REf?^^QDZZ6D_KjC4ULb53qm zgCEnN`UFCvaGO4iN8Pak!hn@@Bwq&*k&`t%%x{l2m1Lx@1`8J#itk0D4XwDq&Br^l zFl1)H7)AzT>_i*I4q`rubS^Y~rtY9yTVV@q7EESLEGSrDK%}-9FMDbl?}Tc4y1_5v z*b9e%FlIAH7w*GfutizbOqf&_XVA&TA9Rn|No3a}q`YQ27<3*~)K=@{%lZU|vzs$_ zG2RA-iFxemsd~pp3H9r@9keqiNG{f}c+4~b?NQ%VCP+&8^og3Yv>VjnypAF+LqcE{ zif8N%H7UZQ*ke!Al-(YvAh9*3qE_C6YULsY52+r6)*boI=2zq;e@&3|{Q6Ooyp8hx?`|{F`N*jd%t2WUx{`lLAUPhVPjQA>Tu+udfcgjlqKHi_G{IHYdHI)kzPIOdQc4;5)tF!g{<#NF~PD z_#mk!?UgFk{qk6$V#n-m;S0=B@UV&T?>WJ}q4T#-sD#K!5fPrDze%M2$V>b$u)}Km zCLc6jzpCB}zv7eY(1H0@a>6;*ikz?jCJ&CG$mp@*o+tQ-bjBG>A>GOLN30sxV z@@r2aS^cg$^ECn)aKwd+Oqkcky zOhTztBq!TqrLnAg9KJK1>B;@u!G~AGV4Ms4^=L6OMFo8>5C6v%vj=UG-auG8x46XC z_H~dFFH114P8K24oJGZ@O2AuA`SPgcYuyQ>4jba^txJR23_j0{lsm1q(;jT` zD&c^efekj|wdW5RLrrWuyV|vnuKn3#&UtShp!=mV?x3*!c8Cf$unrvQeT2p~P2A<) zBG}V0sEALCkdh!egy0lIXOhM$d@S^n>#{`ec~4k&+eeUpoV;FLi#8mSA~&OC*bC9M z7VbxI_VjLYp1f@P!eL4DO+#wSe%y#me_I;pW<)Pp`xjEqVSy=|(@e%LH{k`Wr zg?Q(W%&qjGfXyU$%fEvZge;GSEyX2sYK#MzZyi{{%nUk};sSFkc#9M)InO9*$NNUw zi!$Uke3--a@A(=LC>N;KKEwxlibIfp!6vz5n@(X@M=#h%_I(!#d~9yB;=pMTj7Umbl(7?vmIagpvvKNUDF)K4c4}rb#zgU zMF|=v7M-~)Z1HVJOo_!~p3ZO13mdG_xq2(0z0!V#16NU&uKjl|X52({=QcQ|;^yA< zp~SNGu5dMN*Z{5jhjB>sua2a41BSBttus z%BoQ8U71HRp+b>>W-Eis6rwxCcgB&P_?SB5SpJ9Dkgy8zdDL?x&nt4BZv(ER=QdIN zWi}{0pCJg;@we#?=)&_i<1rd&^+Hz0!l1m0nU);LP9OUP9yqBaXI%E-7T}mR$d%bS*>A;S7;2(ady43r@`Is ztKAxd=R&!M{4C7AOY*eG5LtsDbG2q0h<{;(hrifWxgSt!CRfd-SbHVc83TcCWYNAK zWa_F+dTXED=Y%2;z4qq$dAYYaszAU#xy+wghklw`jhZ&Ei0xpZ?9zFl1IjC8!Nw;In&;wD4i{Fa#A`L*tBIY z(txFw1lFP@i^6wC9^zo&)osOR@6_qW_PS@4hd3^ZjA!rf%^I+U`EiYDh}sO7&aF!V#OQR zQp3E-Alqgo9CJ>rIgM=y7X7Dxu>ky;?p>OEPDzo3!*ShA9pk^2q}~!s8_yiniYM=0 zwV?{h`gsyufmeLd#7ciD2x(WL11hy`ZS>|nF+wDMm&32QF?97MrDUI5>6mG3b9y=< z?BpBVrj|*OKA&M+04ju?iq9&aE6yqY1!+v)?=ep2qP2MW;H*cq^U*Ymrq+T=LJEl* zVVjC?1(Mu#u!O-su;@PQt?Z##kHi>m#xAf_-7a>$98KGqJ%XIId&x`^`qZBSQ1?vX zE_h{iN5PgpiZ?;#Tu~ej?NnX66#Gj@;P)eNA2DhGa3{}aXXm_8{%1V_y8R(p(>ZXg&d|rwTumtFc>M1oqC#_hiW_P-ce|70({w^CY#kcM+ zu^Z`HfKW#K_4ZS4lU`)Hp3KJB%3h6yfOI%yYycj4kS4AUE_Q2<|{uw zyRAtj%ato1<>MN-PO2QMKs3*V1QuQ@p;?VbRu;D+A@t|_RNESA6ICE9muM}o?Qv@( z=i5{c{JQ&Q7|R$FXZh{;$yyvv7JIB24bg!wEbIv-MIuNc-M{JQVjN~8reoXPhWA2( zB&8B152x0_Fi9a43FqDOotN=_iq|`co}Qj;bLf%SJGLhKCYuPyBJ1w*s$!)`<)Trv z?+8iiU;JPLnu+|8C@(LXfdbzlRC=a=9Xs+ipVWtToM zSiIPlZe~R``cxT~dS0Nmx(+xMNG=<_MV2W1Y9%4z7p^DK{WpR8-{b1z+)YZ-g!+e( zM9m-ofAMxHkZtvU1pPmtLB`RhQ(0VMw)#fqO7wMAp)(EVB*{Lh+< z$m*Z@yBq5-ZNoq;r;uD-vxesG_XA+iTi7LYH#P_9~VJ2iSja(E(BJr+PE>KLa1-bC;y zle4`^c*K;2BEHj0^!7L%-&ZKH;}gHzfb_L@IwrX^HN8S9eBSLR5xzv>5rvf#W)Cl!qUYAQ*y=)&J_g@k(3mp$P+D%|OB;2&AfIv~rl_wND+E(#S zYZ_ah4JRp{{*LQz`RiL@#~a3nN-v(~fF`@GZ16tf!dPUdfB?x2lY?nSN2^PlAE0v% zk5>q|N+rU&ycPo`l=pf+gX;fk>u3h|1tQo6Jq=FeZI8vdJ()fn#|X)ARYnS{?w+po zT;+$2BnC~)inp#8#AIJ7TO8~^1<5cmVuBqgf?xUaD!5~xd_yX;F5HW?5E7bOqE_~! zk-8}oFQzK%sk>@C%D$-8Uh+gT`n&@xfk@J`!hU+T)Ld6-gAG#_AMVIr%Y=O{UcEb++!2JHdwVa9O?=s`ysQ>8hi#XKbLFXjAFrR^Su>d>U zi@)1FJj25-Uns!ALRPEmjjFY&NHmja4DS_(p^{TU=KIJDj#=?@>b4@hT&YL8<^~-A znNA0_zsKp-o+7OPNKc#5F5K&L#>>G7_+xT`Q2kBom1jZ+n2e0YWl2oZ)ybRFe@}OG zJ8V3`7fD9K5T9;BYm9@DmhM>%{e8oWbc)6vjwiTs=jfb_xRCe?*h93F4VH|IY_*1b zDR`UGu>Ca#Q9&#m_D~PNzo%UHXX6I1iWz}h48bq~2$AjWPFnAm_-EymuU z*=GYy>EP+=mSMG4!mv-E<)c3Ct8X?Q|HC<0T9HdGHx^a}u8TR#GwUS6rLLiI*C&hf zq;O6ium7HBlYS3J2fKpVRq5i7dVP=OI%6;+<(JeFERQE=)o&8)8V37ElKI#CK$)im zr?&{+#|Xh_nC-iJQOmOnfAvCv#9FhJdRj1iKOgvN{m-_8EoRiUGzt;98q6@OO;L9H zFgCNr_V+tZ*q&ZDI!q$}7uETgNV)<0r2zW@7sZs%4%$w-Yx`(ZXL`!^Ez|Rs-dbt1 z(o+4+CQD;uP6?|%<-JtWRCNi}_T>nn1IpMhfkv=aNjaK-S@xheC_)=Xs zrdPyWx~}-bgzw7Fn)R)VT|7EPvFmBLZ}DK^zRmB4^Y%e{}i!dvMbfovj=z2-$Z}s+`V5?s`%-#etC{`9W%^+lPcq9o|&1 zS6YM9MHW7d;_Y~0JSu%!8hvVlW${ty^gvNiGfTh+xuQU zE{a{FlEJ=8SWn2;l$UjgS3u6y(MWivLkT)#>Hsg?Qd?rtaVSd$o7r~fYZY{ChI*aj zr8*~BMUKn9z>+G**1A1xn>!1+byAWD3Vl*zgoVR#LYwQdy56r6!hBh`*^)ZZm6tL# zSTHZ-p=$!zr#n~5z1ix2hiZI?BFdpT_kpj1MF**yX)=UEcOT;6;o&28qrjzF17@q0 zJ7s|A|0IQ6qaQBTC9ik-Ya5uY)_?s!=$!BZQlVjP%EE{+h81ewdqW6mEEY-ot)F7# zbcsCC7X)s1K^%6^luzXq5gS`73X>()Xn9XMp4dMiz-+08K%?2(%bfq8Pu}u(w4}43{%r7V?PFgWKj7&kt3E>kj|Bq9LL_W2m z&(GRa#^c?G{5y};ZRr2`Sp4ykeIeNYGyVL}>c+RQkc316fKN#HRV~TR&aP)<iXzouvPz6q3iz)l;E2&L_%}H4jL;&lkG&Gz_M$dVQyL=sH-} z5C5fw`5!mB;fRZCnx9`wu5_fFyjPhcEB*c0s+<3QN9?H~lURJH&eGoduT&`DSFo`$ zLMBLh^JjB0G%3TZ5r~fU;;t~Iaeb3NYsmwUICoaIbHn08Qn<8pRfI@GrGcm!uGm_Y zv-umg-^wxb`Mzg4;Apo$OmFVaZ7_inr@r?*dy?3DZK7JcI_27hvF#CQc=v`CUNY%F zTxP&Z_E+_)Hi1eP1}~S>ru^IecJM0T@p;b+yHb&#ooPEEY>*=$Ae^nYqZKNcDY&a8 zK_lC`w|7BDpy- zyqyAfEG?p<9Kt)-0yyK~_~tC9uPWyopz=BZmS&7|%5dtmarL{W*5& z0Hb8*Xwkd3s=Cwm;$FvTTO{U_b>efE9`wbqUIi8dAZJ99g+x45b}h_Xk>g zA59kjo;3tmiSMoP(@Rw%s?P;>+;fOMx{)NLL!OU%>LbGXt7WlU-b4c z#0tafNsBdWLjXmFzFib;CHi)2E?#5_lZQM}11n_OO9p0K)O%=T*biV|@9uuf{m>_C z8ErQ48bDRDlQy<*m2kvUK&1oV89h|i=AT7ri8g1R*#FdR=v9*JL|h)fI6U7MtdbeE z+*u)2eCF(MzO76In{_*!|NN4q=aI73#gti;v~_ltmcn;Pd6X@LfJdJgo1d+~>hVKa zJ|)+OgkCJ$%|JbR<^Il&d&w>NXA8%E$Kz)BK}I+s3`zWu+rc4c&!+<9E>h=@7HTJGZhtu>f6?hnU(3rKDHcPBJh4BLfE=8k z9S@Pk5YBUdaO*}i*pS+;A5(WJ1-XH>7>2lj7~e>DK6uwdcSm2m6PXn!suy!juAb5X z200Sr!f>#jU3z!U3HE{oZCGGyCvG#qhuI&z9A)dR_ULzy07{1|_NGu}N?mD3sSp;o znjZ_oaX?+pXDXuRqETT9jWA_T%@<@0Cq28l(RXx^NPb`$m6}Q-vrlF#2A#9NnaOm7 zcodYG2!{5LW5{Yan02s`7KLhOGg@ZB8IlDW)WADkEj zFau57=_&e33E@T+t7^&klq;Zk_YP0>-t1Qyn#=FrF~s8dD2&I|b?SU91v+nGX%AS; zD6sZAB=)j#=lB1Ssibh#fR^WovQx=crSdCC;8aX=sL{YVtbv(EXpEHt>I+P!F)Jv= z)~LA?*EX{mbWD$%TpAB1C9PWiC<71>tm8XR*57bse^_cD@Mu0}+= z8sE3{41y{}In<*!$L=HJ9NTG z@2aO`?!l#Rbsp+6{;2B8JX0`9w%&+`0hHkg|6h!qV{~L~x30T8>bPUuwr$()*tR>i zZKvXNl8V)_jp{fZ+vdspVZYxVd!IAT81<*>&$CvowVpNSyzYC>RtrWAovlr^gvBBa z=RK*xnbu~F_5BnnX=!3dXZvh6OW%X}KN9$3<+^LHd{E~V2|GhCOuxbk`TTbcpAK)D zHD=o^Y|V20oyDF0juU#X6Or-ho4(le9v@djb6QT}?g3rt)}e=S?F`im_#5dI969@c zS1bL~KmQ1aVvXK2GKk(hD~Hu^iQPJxoO;FJ-g|m^=jO3KmYQC?>B>q7_~v=Yu~*YN zz{8g%0RKSg`*JDe)n5d-&%5PCh%1$Bv_kbd(+HEjd1WlTk1jSzP*%0&3a@or_25?1 zcPqz8V{&x1%9!dLRwEP+evfjX%rk7 zr~{9MtTDCv@TL5-2)sCqXBXTx9S+bD?a$23zJE2&yfc1%1?sG{d51l|`e2#mL=iGE zWgh#-6h&eiz2^!>5%s8MqK&_%f1Rwh7>tR)do=OqCH$vBoJMh8{Bt6g-CKZa^;VFG zc%$0xGG**o!P!vvsEKoQ#%(c3PMCBABj` z*02;UADSNr{hYb@IFh}~hO{@dGG4{TVk31`zue@C;V+C58jAB99n9R$Vr1|Ll)!sXCY+Y zJm2lHH}WF3kDhCjNS)?M&jTbBfeI`fenF==pd)!wO*V2x}Kc?RzNRf2{a%5$#8jt>L?8q!&9JFM% z#mk|(o|E$FBfj8yr?IVuNq5qq*z#V2_QKmSwD=;$3@&Y*PW5L2T?(=Amava zGawpwM9X=fez3#&g|F1u`;~c}iCR#sWe7T3j^YnKrh@V~i!M651cSh7dPM(EQnsV{ z(!v;oMA}=rBByOgi#CJZ$;uVSD_8o~0VmKvPI`1&vY$q4>I?it#i+Bz+==qGw#zbd zq96W1h_k~FD&)l01|Uo4oi_6=P(Uf63l}qcd>OxJDX&iiJ)>Nhw{=q^$bc^>Tqe5z ze*DYPYMxl7-LXfyIQ?x(_x6#?hLne{7fvX_L7e5@N@;&`lZ<~+%(F=;E{s^U*WhB^ zbp-As6bemLLezid2fgnUaw;nGmX=0irXpnIv(K}>Y|wz<`0rce)pf(V#PBpW+U|x2fi4)mbj@epWv1R-!hlVNT0Ohk^(R3spkJsN)b9@9n zrcKrDz*CEA>PJmG=4k=94vqnr1wtM89%Po$=*rD zm?)&iaLIP2z*?ByV5!7J>}xeVQR!*C4}{xC8kbcEDP2nc&XAUDf25FM0PUqss8|}B z2X>Y7*_zuM+J(%~t~;E&3X2w6MwhfAD3_y)qmx@yiu`^*Hj|}zABmHTDxjGge9F2U zKdkKO;j1Yo>}grjMZnh*=8=KUm#e;P=c?;sD7n&23<;zk0W~zs&Z7*0TusPR)qoJvco(Qc_VMv03 zs7;GDrWvndoJk;=qZ3+D@-3&WD^PEMNw*wR{M}?Uey?`X>6W80#w-;br8FuF3tx<> zdeHZ?$;nRFJO1^_pjG+G;uu+8J#UI27D3Z;Y?^L8GS3W1_I=}XhyJNzVNga1w6-!8 zcj+GH=+%rqvaKwwoYJrfWmU!H+N|C-U zO&foU7gVIuvDSPeGkP10tTbD`!n5?F%qKIMz#1{7aa88w<Vp*xnM zfH$7o>fPyVFC>iC3rb_smD6k8hAQC*oXHll&t=eE9=Q~*I?DG8@OL@v6XtB>*`~Xh zoRGCTf?>>|!)$Wp>=yj7VhVN7K6fMa%~Cs|nwkehX_m(7U3d*{^nk;hlB%|gz2}{r zr>Cz&5b^0iv&%Mn$1K(e*D(ne0V8mK*|KO!j=$fY?^E+X##gg%B+NH#MrLN-L0RLg zxUH?81x+#W0tQpq@>uKgP?DsAZYXf&fzV%5jW(*l*rJMSRaGncbW2$`dV7QP$1C(t z<#x*gIbV&<2Uj0s*PbDjiQ{7L73elh#|ay8##)DwMETlAYdniA*6Fhw_Q{o`5Q-tU zceliV`f{XxP+GnoJb<+cD$izyg<~w=%miIzIXK(O8Mhk)RnV#A0ED&UX?yllhS$%9 zp6ADF3noZ|>f1$bWvPyi(?I(~wCbeupVqtlZ+!PZU=X@o8fryj%LOy=7tyj1FjTWTXk*A-V>+^{gbD7Tk-T#lw3FP z+e*lfVkxFhvy=s{SN*UTS#4~FOqy5vl_Azn+%osZa-Qp2^s0j<0%g5rncx2)ehl=*7 zI#W=LjUDiale)jkKny?u{(w3Ba$pU?&;W~j`C1niL1w!;e|wn5g9t(+{b*jsT}mo= zW08@P;l;W_vZO5bT%k5k-hk8F`wzi1;Y4OoRI*LzVhOSd9slu?=&V@E^#+G%vL|7niBH5$0yBH zP#RK)H7?1y%b&t;hvyEEY!p(qO%cW49%g28x1WZI*}BGn5vpY8mv%VXio zt4#!YwS$4>=FkO7U%%}zEXU}MhRhyz`ldPV(2X2 zuc|^!%|W>Sf5dF&3TdZ2B&2OH9kC}U4_m=Ooz!OpUzcQ4>j3>0~QKzLnwoHFhPVS3f(~ zrXfxAp^OAItVb^>t~0K%ZYe76lW&L`?8An|Yu?~_P#LnB2}SZ-v(X=dJN&`V`*`z> zO8Fnwt;G}t6R+k()?I*ZqkOC`g7oh3r8z_y0b7ryC1Dzg|CBTJl$#F$GK;pm+xM&7 zByg^#&FG{=v&yU)dbK=1z*(a9QeKgr+F{G$$?N1m_FMi^y1{v_N!h@V7wbpIht!pA z*vF5sb8hhn6LGS|O1VH6Q{vsx*eWK}XC2t}cg@O2i_HmSd zYXPnoNV9hqx(m#DrRlv(X6n7X>XgrQcdU%$$Th}FpVhT9Bkz>m+Pvaf%haBRFW}?U{%gv=GrA8C=7E)B zYC$Ov{A$9v&=h10c~JR=W&w5JtXb7P3|4Y`;hY@3u~h0qE&Ko&=I{OGU&T$l!j2xf zGd0@!%P71L-k_(TE$gIJr(eM^?m4^cb3Buiz>BCCe)@V~rTrX9t{X3dRr{94z+dR{ zT5*sl0;sEkQyT>^ZH%M@Ps&mCzfd9LL2yT0lKKF32chO=J{R+KjPNC~kDuux7<2t# zfAVtDpXw_u7bKD+e5?LqW?+Z4TairW0rKon!BN~*(RV+Rh{8NjFuAj^&>_xv7$kF#JaiN4PJG17oH z=bpE5P-pXw=W2ekchRUBJJ@6@CPp`E?IOs&NvJ{d?A$2rp4vS{NgMGc1c9`lB47NH z8}+XfSFGAx9gqyYVSMB)QbMuYa{FikXKXWaDE~AQTNyiOxrdpZ`982q^%<^ue^}vk zRdYeKIQzf0m*965WavzE^bouO3Ub`bDOU70G9l$9cXYQ@o^7~e~`MD$ur0An6)w1~}^vqIB$Nj4 zc$rS^0}n-6hZjv|G5#jV^(?sLi{+oy2V)2*HM)nU!S>FZQ3>JFX+q=IRs(T;)VtYIsp)f`HU_Ls(MAsj8lK20qE5PB5o^|WT9F0ib{<<@4mne{ zd#u*E0Q|&Stw*X~`364bErrWP{&}qS?ojP=_1P6+N3@gZ-Z+m>yr)k<1M0&d`yaym z4#_sXr}E!Y`@x-3yd#@JS}W=5O@Lt6E&bhxIN`1N0zqk4(T>W^cW8#*gG_|{tasa# zR!WKqeAW4nyn|g7(Oyw!GDB<2JL!Gkwq&&5=x$@|c!z2bQ=b*#93w!NOT_MMBS1pa z3!WwkS<091>qK;o0D?rFCuI}%AK)N}PG#t|`eE$j$GFiTt=MQMSwuS>~+NFi&{z5Qc>~*7(ZAhbK z_o8~ZVzjmytvke`J^g&~^H|MJ&;_|lg=)jrVD9NEj0`E!IF!N$<$NT1v=i+|O)E&# zZ!+y7*aW%v)uH`N!ie9!I?`c(x*)3~T8@-{lUrv{)+BG&L7Z*O^k!RvlU{JOq&1pn zB|YUj%g|^gE#+GJ@;n`)g|uTx{=aT@@ITf>NeRekWKBLc!(Ew7tOg~u{c@-e>y@9>{xBtI}>?AW=4))ss1xuk=YVg5t-3NcDn~Y z=L|v*HKW~2@|p@-nc2ru5k|?7z4KCG)ed{(T%p2;A5(=Q~ zNIeadMNg<944)*^onY!;Sz(T3lsZRiSz-MhIR3a<3L{BDWj2Pvb*5;L_zWWqL;gic zhwYs$&_d0>1)6CVY;Ke=S7w~<^p0xy2JG?pXTyK!+TglmW$7e3tvLbYs+S~|cqDsc zKePR0W-M(HG4SJh{*odN4SC8t@RGQXsWOFry%m#wqMc!&U|*_`u!=1os8pGfy|kY4 z(EX&iCXMJ@IqH(xig>QcmQVinXsfGde@UvIGU0lCDM{;HXCdWU^Qwr15>)8J^1@&j zTwafFfE63}%(mJQBsDQ5539>?u23}Z@TR?ic|L-mc|<+UMk&3aLOxG#p6O?tlDZp) zHdn_zouVv0m~6$AyS5&|m8=sJD#yT{qb~y1J>WuV{#Ua=B0`s9v7j#`#}g~ z+L$fpbrCqyas4}-1Q&X^l451;zsuFJ5*0P%$chJ(GBMfVSsS*qvjf_#wJ~lXeLOMz zsXIk*l-z0clV{@<$sm^`0|M6vv*i`MO`dKNpZ5X8k5av|9=TGT0W}VOO~EBaF1Ol8 zz)I;OFE5`=tA&i0^HTk%T$Ng>T-C$ddwX+JadSJD$31p3hi7|dhxE;#>Mw}-1)z{O zd6o6uZV;?+&e#Zsrdaq-ha(C5`uZ>s7@w7a=Eiw0DKAe>`Z1o?dT(#f!rFTOBBD15 z8$C3%fM~Ly08YQD>Sy0$p`L?df$Q-i!Hs{=-b9x3!8DOJ&>~^`dEkNN+qLFrIw_((pOxxV?2@C0rAmV`f7KKwpJ{aK_P*GRbN;n;^_;D`+Kcu??;C4$GT)8z_tL7f9Z^D z@RbX5?LF!Ay;G8g(dXPY6vGYI;7d6k3c~<16Nuw>VE@uQ@^5v z!wXQc73`z4_{7kZ=RHVTHou$QbQFPC3S0*Ky_b1AXqu@a3DoN_tIccFniF9;otm z-!KfFiC;>|JQ3MGNR@|Qa1NhYfA%>_F20(so)bBc|DJ69?~$<>c<6i>rqrN z^31)BnW{=STF+@Ns{(};WMILvsDXK2KZa_Jl~BCBX|`9?dc+N2z-a&mT}s6}BG0IVfvGWi~Mb-&-`G+0h8 z2^8aUN1p#q%l3Bkz8UsrVZlkhMoa3(B_rysQ0eqt_r0nH6A=SE;$a|#I*!;L6#wHtxkR$}yN7R2Og zL02^6P1d9M?RfG~%Z^B3eJ>6WB_45`tK!dVH(35uGWDln10~S`-vB+!4d)x!^?&}G zUHAqHy;67sBhWeN9)Qos_K?Uv&kESX}&n@=nHf`$E~&x3C)e!CMUg3PV^P79WKRfUhz?#rq|5g-Fhln|jt;R_!%2aj^F@ z?#$c0H{{zLrEvufG`E-fJboui+z(3PJkEyvY}oB5`oV2Ah6C@#0B#Ek5uR6RlZ{v2 z$Y_$2JNtPj0*U)p`eOGXfP=4}d1y9GpK|l}Eu%cgS){iku4p=jjU#BaFb5wSb9Av4 zV~R0@jPupj^ka3(V)psd#V|XBMM+qSKtxiZZe1F$TPqw$!H^&*ixeO79s zcUCCLmVtUV*MkP06!q&n=e2t(#M+O(P-Cobc$Vyl4kYwp>&gsH;@qs08(&~0NdxmneuJidjc``XIJg0y@6~S1Gtqy!h!+xmtTW;WcZJLtNOi0Q*ZHYae4xB4ImHImrxm!d3+(MFu zv4=^C&fNXVsOIr;zO_lNQDt;O1@e#gi0uAf0s=| zzv9z^Gd-6;kKqHWHrm1$coAyEOhNY~9No^WN3D0%4$!T+)q%&|%kr0osYR*G@jn6R z2Zx6R0CVXy+dpNWUB_*kqOC>9Xuwb**;Oq$GW7H>xlTmn4i1@Asg@E12uQ({bM;n- zGbHShty<3&2F57jsIopS);DwF3cBHD zv>SIKAj4*A<^tYx7bMh2@X_j4Ui4gzA!50luCO}}w;zAwL)Cj1NVmgxL7qMZdEA?s0;x|2c^gDM>evBYPiBPAs zTZwgKP|vw%YM=_bdV|5%K`lDZ{Wfn;kfmzJfHT_h(JgNWm~e?_ZLHCsNNjpEZP_b6 zNq{eW(_27$EBSfBSe=zIe8 zg#VY=6k5f7 zyG)vwJJ{<{$}dSB6u!z)V4mBjZcv)YqIck|Jq3yO;;+fsPqwYNwn@sYxJ?N>TJkL> zF9?&x&bNh_M;6Aqh4`=RUc1gYlx!pXfXC-qSXQ$5Ja3^cqtV+;brd%{{2aW|@8z{2 zx+~$cu@VioJpcJJEcpKMaH$;8Vb~q{i6l|m=N^Ho7Ly^l@moC1KoF^sD+b-$DLzpd zw^4=__CHKR7G;;8DUEkzun~1@lQi3@Zc%Ibi-aNuW9n|5diVfcuL=j1G)3I8`%^`A zbb5jALq=tiFn2q7YEY5~vti^|m!0-_k}_!k7iG$V2>Ux6d}J{uC@34&Pw+A)r!nDQ zp?z6G^qnvbkF@7&Z_F@N3WtnB?Wl$_;a@3H%-k{E=>h%9MkIH(rAd1`Cq6aui_WWGg^FL^n=il zCvuYD2`oobY+1zm$^x=iXhe=#66+}?j@NJE2?biNk>m8GWFaHXL~K_pl^JoUk(D9& z&Yru|I15%o=G)s1zHGl&sc8F`VE?yELbW1HcpUVB>!VxH+)0V2-XPfy6G+#{77SrHuM5Xa-C0WyOL9>uk#$9NNv-99cF+nS@I^0eRd=jecXQW zCK&BzQ&vXX?kjKHw$Pc0{qP{}m?{%nxqcZbz<*g@%&nim({ekYN3x&rPP5MHL>SF< z8_T(j3*VeM)Jk^47H=oWX^?9v3$%BdY$t}T5uo6v3iy7%Fn^OSnkR-|3s{YE%#_uU zFduz!`OqkReVKg!P}F~kjhYzhBs@?5n8l?Q?4A8<$p!f=;F5HJx6jn@+I?C6@0#{U zqR{e@dMAHFmd$QuL(h;YHVldJq!sFsI`(h>@!uq2|=_4yh^DTbLbopYcX9i9xI-%lVI-=zIfb*L4?e86~Dzz(a5zadjaU z5T(mcYQvyhI8yCJR_vKPt94Kd5@MNAcx;NXcGXu%j|zh`?e-T44MCsPnLKBeHCw09v1w~2A|stiO-iPcF6%@*0Y9pI#)>Zojs;?xTRO^2$bZqfsf z<9Nyes`ndOdkeX{G|!3yswk}md~1HWl63cc*&gMA;1r9mSVFZa)(>6kNH=JqzZZ|B z_qoN-FFy)q%Zw@M2ckf_NaK>q&&s}$DGUJtq@DVO1i2=sznfHPR}%ATvu53+DB^0* z&D^pIEhNV+Ssh_DUd|fqMWyho&O|+*_c-6Y!rQSj0J?n_ z$AS~YZIUHk$w!Q7R1lCO&5-%TUMF6Hx38? zn!#Se%*rhFpKtLj`wDv7TH+L~?;9$QfK^>H_Hi@GA#=b#2_Vh~0qpoE0SwCihdtggOZdLxq6B!Z(~VnBIQ)w%6!h3Sk@!8S?)_%!EU3oM;;J5CU5v8*5!fk|_!)sb0L!(-`V$+_(Y zQKa6vGoNq_j*BPmd3+-;o0ZAp7{N386It!yau&*UW1ni}&DT}F9Ft9Q-ACL=F{=w~ zbc=Z|LExNh?;_*n{Je9-L{ddX(zkXz0k;)5c{F zn{8t7#fu|8KR+L&qp1o`ffu#oYdp?gLrOn!&>y^lK4T{<(xlk z?mZ$)O_e~mYq@Mr-4L`NT45-e9MtWvhfU7GQ_nO#0Xc7_&T3gTzd zAKZyN&3VRpO>k|WBE?;-@OvYS`HMq%9FHu)mRarlns;?MJ?Romnu|Cynu~19vjN|e ztw%Sq5r0%&snU1Bjn3N@h>Eu{RgPB9hb01mfu%G8dN@bw1bscv%}X;IxN&(piUlLQ zHKLUXsYiW<24PBr&ios$c%xJAEhy=IpStH6748_YgzoL;TEEYPiP3~9wdP|HG$NNA z-Cjd@CXFRjCTEB%QcJ1$jY=@Aw_)s1lp?m4{wfC^&u^N-RmA*=xp!Eq~fbad(wFp*i=?U-6W6I7Q5 zN?>~CSb{SLW>>^)e7lr$056%z(1#;nd-%*%}`j@7_5J|o(RF4K4qm?};LZEC5>gXg|5MGz(cBlYKA2U@kZZ|`B@m57maik156?}i*5!wAyg$IU~W z5MU`uQn#b|;nOGjdtp_ zJc3ssjUEr+#H%^Mv#*6B^$5fr)xJB%oq=P1d(`KfQPq4mripl9!pr8T4xb{j@!Sr0KC6~5!SR^$nMsRi=-yXaWd>4 zIC6C0%-5t8a)X0Wu6EEV7U2P60Qduim0v(@d-b$g>%}w#!z>5g6S29ojrLj&%(M*kY%=VkOu^u7c^0Fo88?suw%t_N-GFc3bwB%!t$9hrj zp(7N?smRkehv#3&h*-x3lyFKfZ=TS6h?Ny9Rl+ZBca)M5AfGZm^!UD`K>UFUe_@K=1UTC)p8 z9wsc76nS!pVNI#=gQsl=)T}UOj~JFXViYNrC{&OOZqZHnz4IY}V+f zE6o74O7-zfwy4<>84NegUVqVoJAl6`vdD53=XibjHm2$!MlMZt*_=04SXyJR;+Ha& zuUAgnnG#&Y^NKLggQtrQCJUY+JVZORB9PG;EpswZih_ag_!+rKeP$35m3<@B)jF->5{evX7KnhCXy5kdD9?`ZX z<0)uX;07UYOz%$_k6RW$^~AU@?!1>uHCS!2!GlTJjvNY)47O_-f+Z9W{qEqlU@ zRFy}UbL0&{d;MlR>Z=9p&j2LZxlAIEV*J)eTgc@wJAOjYUrU?3wu?vb-T{dJLH@Qa zkt)ALHBU9*9#9B^Kmf4!7-~07j zjw1cG2W$*j&bN;lhAP0kQb#Yq7xfuTr^2^6osUh%xUyhU+icsxXSMcEX;zn6oEd`S z#Onvj6PGWpT!Rx1z_VkF=B_JmdEUy#Mlw+wU&}GBM1>cj16$H>8DAx#(to zRm@qj>pSHiBl=Gt+)$#n4isv|5%^Wfo&TA5EDwi*tu$L!vl}9PW*%q19_m|2A!a~U zEXr!cr!C3X2*U#CmEWx}FMb3SxOv|bfpkdt3JN$5ej! zx%WcDxe)$^Qf76&l&+GE?N%sDE%9@nh(X|?tZh@E1d{Yk70s;KB7$2ohj3(eBnqPA zvID)d8PMo3h!B|-?kBxoireY<)n#x90-b;uPVULz+5}3*&trsuOJtX7npvL%_Qj?t zJIr!$-}cmK&6z|btlAqL|8&1r(dyXI>IqiHifPQSHJLI~meev;y7963lDXeS*_dkW zzpO(J7Y%-u_ocqD`lG^|@e|W(p==YImVqjHAw#YpGC*v4m`s0jG2y%HoE*;w)adSD zvh4Ah&Zc?}-a_y~|6cUPTvURrj|X<+ z>rf&@2138EcA(!XUm|JsELzF}cnAQOq7?nr}8$D_KT3fuhy_5Ek1QRo{?hBmzT zQ)UR>V4BpIx;kQ_`;z|!%{_b(6(u9t19@kGqZn8BA_xydC43l7|5TQAyz_JdM428h z;EuxEb3R~ZPgxNc2S5N|WxETC9SmQBeM2m1Yv{P&NN0Es`b@?O!}D(^=fhFClJgKx z+t0oSV%8dKc~uu}!!V+qq=P`TUtuP1Tc>b*%DZ@`djrID4k8V<%RDlu9p52rq8te8@?jL^2l*(^qXog zGLHcUKd*)GyplvTW-X>$5%vroOdqI>W{#ySS0V9Q#8M{9D+ONBw zw>JrNaw=xVs*jeNbD{+3D?nt7&K0p7YA#{>hR~O~$vLcgM5Xt>bl-V@l&CIt7vOA_ z(OK~1fHD89eM3+;bkJCNOSrJE&j-ks=KmQTD&yB8nip+nC~bA6Lth=kmKkkOd6@;^ zlf^X7D5onwWu0qnV?TO*vy8l^jFcvmB(=A_su=Ig@s-dV-4>CxH^8Ftg$NxuX1ZuT z7B#gKaRskAhMdVShHh&^SGC}YAyH7-c;Wj)hfdJTrt+=1*W>o>Pwss0ba>=g~y1jThUAW?pL3w z(N26MZy#b3oMSJQGw!`%SBB!)QfMkmw{@LK*5lragT5%Pn{QX)rPE#PT;}Q~%Dp3b z<#1dayb-8!v*fUWls|LDq^|bf3+kLr>5h+d__h3Ni|fS@77-D4v8njY%nlC^&&n#= zd)U1t@MbV3JFk?TJslC@j{Wn^jIgKqz4*^5v`eN_YazpB2-CA z@2fA*WhGk}3jd#AfzJM`HpGlIV`_6|ZfMr5s$ENI>hkj1Cee;XV6jVs;Z05a#6b)x z$DSL0|A!{lVONZj-`fkh=_gCyJ-9irAo2&E$YBR(_W7L$3i4)e;Vc02_bmo*;+#8@ zw=V{iQJ3lN0EKAqqmz|7G=22hNH313yE9i#d-^f?*Io^2E0Hxj--EsHF~Fb3o?($c zCGHdMmWLY3Jhn*k9JhZ>0i)L>n@f$kUd2cUNDtDi#cK8GC;=Mj0@Mq^c-lRFWA^19 zhGbd3-9KbQxZ_Xz+Ul8abQblmD0rFUz0E-^g?3DEu4D!etrV#EWCcPAsI{8-ogjvg zZ>V;112EzSQw@PnXywYpf%)+q-As*R8U?c2UwcBH`u}a?-icHFq6}YW+B`7$28CaW z-j)0RBA~c$K^dsR;q_k!Wb2>p-LT#7P6oRNP|ccXBb!7+9bF~RV}9-phxTP4v*%5D zrzsVo263ExcT21I0GviPpuiDh?LK&o4*h*;)nLdFW4(@H^MiEhKI`KzZUb&6V=1n{ zMc?*ywsLkyo-FdFf{s>tuRXLw)>o(_iMH*dw3!i#89{1a7HpaAM|5&pHKwHV1NT(sBn_svCbG6rHy7=WczPlK ztCiLp+BCmy&zI{JL{@s+z?v>LHm~>LACU@g$;Mq~=I48WM=mwR70^5!agzw9Kj%Th zHMXLbLq+|;HpMYQIP7NJY6Kl>5!*TPS~;rQg4 zpP%rZ#d(=jL|M2>aXQ`!|qrIvnSc}|sGy-y$CW<>V=u!+dX*~Fk8 zU}`Eiom!oBB_XP!czMC8NxSV#K^-oMx<*rEi8tqGx8g@eBmFQpi2F3opF96dV`1|# ziosi0v9gHkOcT37`})$ul~)Z@1B6gi-U;Ew-Ik7C+4I{NtLR46u%!x5I;no|fGV0& zy^_Z{_Tl>JSlKgAE+|WBX%Q1oPR^w|vu;%;Lus&6*46;R+7|+HF~ySO_LJX8Eq|(n zELWpaz?Rh59dD$s+TB8sh2kiQ8qC_MUFNbaJ}5?E~<7n6XH=|l$?wNQG0 zRjDNr^Dktq8H3ZXDv{CXQ|8fHjU!g2ef>;g8hcti+Tgq0(vpwodSc5sn^06@F0>>L zrs?wJnd(eaYfZOrEi&2=g~N4Csba*JRsWgv@yuL24f{7X)pRqZ#5K~fW1No~)sjJT z0MyfWeM4>#OV0k$v>cGlvM-d-bdh`XF8fnP4N@o!9jj|}UG;LP<44fUValO1)s4-b zH5#P?`%u$F7Yf5SKE`G~pW5EvN&Q@O3TLgHfxPEsG2+VDR4pGCq0@qv(<(IQ;b=zW z`!U!?R0Csf79R8B*mE5Y7_)z7CV{<@Q2>}8D3eqReZlzwGacSKQP2s3_I*`LSo1CV zv46SNPFrRv3pho}IMXO^k)@7W3#PH3{9M9;qp?=<+EGn=L@<%ptw(g6zjd;$0r@6H z69ifHF+Th?1yM^Gl;{_>MKqDL@yuFk1?_wT+!;pOOSWbU=!|22_mJNnI1ITd*ML#c z!@HS~xthqPAqDX5v;mUUNd{-fCsFnEj3ZMk6+nhLWrS0Et7D+LWN@yJ7AxEJ>BpAD zAol*{E_^=!G0$Yx8j!}{9NzO7Kjre#T{e%KW=YwVSx+1u@HnY8^wm94KMCJ?rTdHf-Nq!P~= zF%S0z#BOvcl%@&6TSzv%-Dr@Ijmfn7hIKk4pkBO{-Iy8LqCDY`&KpS+q%N`*@4wxE z>fiCeJlE0$BUPUkuSSunl;ImSowfc6-X6B3J{gP1)$P_n8#Mhz-@L{`vf4JLi$}An zg3KCAS*I_54X`{@1tv2Xd_(OF;~$7rDe%J>tZJA1b=+JbB_|~$LO+!po}gUs1i|Wi zwyp8?dLFenTC4o4?_D5ptAy&VO|cvt9Q+ScExp*O)%4Xf*U4q%tLQsLnv&@Y{hJ=^ z5AD529U1Y*E3D7UWOZY9xfrgbL;DOw7Qcu?Qzq&4QU}36$E1sP(tHDzAXdtm7N#bB z`j#QEI#BDLUiM&2WM?a;OLu|RsmE&VE~A>lC~R&Brc%ZHz9y$$l)R2`cvKV#J$-f% zm~28>ZhfOaj@cu-;X(=p_%lyKOA|$sHlmrkpchopRSB%bxw^?=4kI#3(FG4rh_(~j zAAAZ2_MBkv@4Qk>Qx00cUnY_!xuiF5#sf}Rb4PZ6a3UAJIKFiv?iz3yvNP?qDnO@- z#3CbMeAsPiKjW+vl*s=&9(PRkjk{(gk9f$>sya6df@G$KUki91(@ae2ZK(sond1)q zL2^r0vK4n~e6f*#3`WLs2qPJ5PjGD|{_mWfPVw>DJukLO3THeQ3Q1IeZ;bW2 z^XC1H0l)GG?p6YJYO#3Ehk9&AhA)Fw_2$h?*&ZvDP2Uia`v| zDILky{0jIAX%{m1?Vv5YLPLDoxtyZ>2Pr7s$*L%DkYwZ!uk_yJ?Bva|^~n6(o2b~6 z({~4EYwu_?|1xYvq}|u?F;o|z) z5Aji8p@r$ue7U%zZw&5=6v@<`T<_E_^3`Vw58Mn%n|!DhNUOSbci6!^&lJM(W9_ly zWe*v4Qbb#2RqYZRJFLc^ql2x)@2!?TRNsh13Dafdve3pgSP>PC3|i~25oBu zq8t0=2uoDgh_uTbU2*@7R@+r)s4qIp{AD2nc-jf|n(%xH1Gjl65EO5BC=~VxlKPU| z<^2LKaD{m%51NNr#1e<>b}CfrA=&mx(vIv z%M^v7);!AP8wn1kl6U{tNP?G`K12I1BIig0tC?hUv@^%Itt{{k?qxiTjU(2Y%1^t_ zx>LckH?=(A17t|=y%}1ILs;B9h%d}nqL&(G(@p36(c-7Oc8a7uh?AF~U(^<(C*^u_ z5!ZKX4;={n7>}Ilh0nUnZtpm*@24J+LrIv2W0P)moEsiOO~!3kZC%Cxwv7*e>}?Qb zBnm8D3xu&cay#+voDtAOt2$b+?@`TC1nG&OS5m+=k$wG@iep6N$FmoE;2l*)1Na3h z&_}o@kix$|c>zi_)k&{v@mgSA2E|^Jk_Y_8u(QEaLg{ordZ>OjdBO_UhG%Z&opT3b zH)@B<-W{c^cJ;0EL-Aj|UeYbS$BsOAfiv$g3+6ngRecO;+R6H7t{lvetr~O?42y;><_e`xwkU6F+YBUqXTiJc=@t9~-Ykx_C|txQRdDeP+7&gkbI6Ndo&&W7YH(P~HMx@X_6L{a_p%2XbW#JFeFA4feOpSgL+4)`Wgp8?<%&pEn zW~OZBw|7jZwNY(tE7tI+(nHIAkNH~mN##p;pyt~s>A1z3nw=JVQ*x`pt2v(%S(<5q$u(p7L|Nf|wq@~ydhA9r%I*71Lg$P2)Timvr|@pBwhA0F6Tug( zRC-V8Au?-BO%OhbEnf6#4wdEqq3a#|>)O_E;h;?#w@G8$wr#7iZL3jZ+qP{djqMey zv5ghyTl<`|@7?EjzWX1{Imeh|yzlcqpz?cUrqQ?wbd6YIDy_y9gGyF8;mJYhM_A@E zgQlNDEc=%*nhEnSVf1Jc&pJ8H}B z50ERIdo;g@@lN+0+C~a9FV8}1Uw0lu-!rpkTBL`!U+l*++N>Dad_n4|`%sJY&RJN- zN^=?JEqR{AZrK?>OURGpGW6}2L&}yzwWW!0qE$y01P~kmlxl{bHyNL>;ZfZ$uHJ94 zEp@MVq3fs`sxuJt981Gi7|8oZyhxwhwgr)_>;7{0(XmgU;tyDLmU?8rx1+ApQDYL+&e!G3_eK-OjoQ?ZV)BXfxnEVL8;cnn%7g)PC`$*g2uz zcPXiHRu~z9n1<$VHrC4MM>A?dGyU)T18uY%X)qUxm%|#-SvOW`PnE&dnJ-!?e3h>a z&~eo0_8brZ#yAnl$U0h6e<9p@Rl$u7SeXNeWVq1a@zV`y0__G(xEr zoks>4HrySnv3WYjyh}ay)F$~OxeUUI0wRrih~;lAG`E`IC_lFL;^;&-1`(pV;^Aju z_$76@1g3>!9v@b6{DZ4piRNM0!rqT{A* ziZv)zkp#EX+HkWqvxW_G5^VHW0a7^JJ*;PSb*z%l&m$(DHx7GUcw{Nl9p^Dr!))Bc zPVpv61)-6PzX8=0!A!9yn#zlePlPdDjpu)ysE(Rb-n2A6lpnHPGf*v!;Y3#X%J#6U@R_M_V{Z+ASxDLYo3RhnMw z3vg2aySp!B8{gu7#*Epv;ExvXiO#2C%8{m|g>P!93)RffmX#*JBiOW0iJO0#Y5i*EiCoXoG8yV)?go95GQ`IF=US^Hf>Itt-O~4*`CxNJe4u z3XqcP?b|V`SzOB|isTChA(-o~h)~Kvh9=t+?Sd~6|& z(-sStBbV6LR*IYUb-B&?goXFlE2OUBIkaXAi$*HV98Na^HlpXZKeS+2->Gdy%}skZ zyhcx-7IL+Bp;Di9hUqEUU1)n;h++6^*3=iOQu27?FqVoGixfJ$AnWPS!&q&&mxg?| z2(v*%XXdFKqyoVPOJk-EbF#$sG#zYj8=J<)dq(QnUz9PL^~dX)T&dCm*S|7a3fj0< zK4$=KEmR|2PxZdxPuNl2@S z!>z&KM@XfxzB@#VpndX#pkWlE(9{K^dnF-6?PHG{h2E5fK4Sb=k9+L+=3V4)`#39p z>Bd0NI`c;}Mx^VgmS?=$Q)x)rm{vyCcBZjt!?yx6_C3oQDwZ?m zge+abwx3{9_lCGO<37`M<;ocQ@!Cgq&_1BU%q2B4Lm(m%!^#y{8_6g7k{YJnB#i~Y z;Y|w{=TaMnTs3?;w@D4d$ZoW+C z@cw=oGHGmjJQ^N7J+1CxFOkFB-W&b+hK2k~j_-lg%g2$kZtzfksSo)rR=|JmLr(4U zHIq8Nbi}$#-UzOx;=C808$MO9Bos1nc}LZ5rq&6XBF^h**xFE213o9nI!z02LBwOK z$S8$ZMcc#wiMBqyoFa*k^)!*(cGp|CEY}zupye_`In;vp`@VUN0ct7>DRZmHBiiz+i`2>hv_m>5g% zp`{*FerRplURnxRcKeNBwgE(Z$QfZtES{q3%Y~9e+Zv6Q363P$Bq?j)BKoNsT>tUl zroGV=Y%-t0%Ny@DG26-n<&dOXFI%b*j$gPpQxL}e5 zvmQMdeZ?k2)ORWBad7g{Qrklva(X|hNt~?YJ0y)Qkkch5o>xxXw@@yay6zmUJ$L)% z8F8}Dlp3peu%zfSm+*Nqc%s84vKuYUoBNiI4?z|oaSXt^dgC}#xqtjRwl|{8KP9R& zUT=%heVcT4lRRpsu{6}8S8u%pxul2QaQpssDC509jr{&tgTEyzA+o;bHDLO#d}P5O z6GJn_)<^R@bvhr5u|Nod{PHH>?ThpQ?gnN_^;qYpgp!ANFbNy;hDRnK6rS1bSkYZI zS44V5Oha1+h)EZhbE-Zcbit1?Yq>^w!1|HAR>E8RZ2AdN35*ojTz$B1q&JmjGionT zG?#vlPFB0FO1^eCF7+YMas#*QN)SuOYs3MRCz3V$G5U-|9|ncxh&s9;7^PVymesGC zYoZe{wQF_d{m~Fvx|Z#Cdg^GXPgm^+Kdh9iFg82$Sc~n`d`}H66@-i+8-+6^?|Lq& zd&oLH3x7yLDtL*j9iYU^wmmM_v}J4LnzX8Rf-FyI ztsAG4ehBIOZ!SQQFc1n(-R*^&EIB1_$%ol320rgScHjL@OU9eFz9*!Mn*Y3@8@^3O zQb|cyC}gEYI(NYCT&OeMy5ILUW*MXeaB`*L@So4e#=+RRgIT2ckJEnOdKhAN=-I}5 zfR}`n)J`8idYk(-1gz-LmaHk)bm@<-lVNUkJW=sMd*^8{mGZqUNc4@~#l>VY+4P;G zqgXZtf|fo=|K74y-#8V*7+Bfgk_A#a<%6_4{yqMts__j;&bK@%I~9z@ju-9b3yk2M zp$vJ29tT-;jM;Fh*ZiK(IW;L2E)aapouE_8YFQ02Q%?|8WX1$ALe#0)0+O98&2NVzL z^!rnYnaP@h4t_3F(>UM$@*tRs>RAG$j6(|NVVQK)ril`j1bm9m>F?#=f;bzeVk`14 zH>B-HCjubdt>vcNhLLt1)8K{9@7;S0siFslGKe<&cfx2Fs=VQlK`D~wAL=K3yVuGl z?YUZHELCMI-mt9PEoPQgP!s$-kv}(>q>ZA*Ma1Pp27Wzi^RQO$g#l&bu_peMP#uik z%guv9o&$(SDV1l#QbOo&wN;sm3LbGCt(k`pku4UKwFml$$VqBRb$GMHXKj6(E zee$e-^cV2OPE4ga{`B41#_XS_WeOHDc}OZVZq0;CF*;hIa!JhdfBDcRYV@KX8z0Sw z){{#MFM?Th2@aUIc2u@>r&52BpO2)uJk510638>nsv){sLfc3-I8#O{oZ=Eu|V@nC8WM_^>Ao^0MzPDu|HtqQT5aeO>4W?l(+Mu~m&47bhLD_|W3EtLAC6 zX`Ep^(GoLvfp-@fEM+4u1Yfnd(Im2eDoky2%@y2nuI^D$+!9Pb=cVCPwEKwbd6vYbz;Mv&E)^vvIqkZd?KN+lK3{)%l&s5t>s=B(mwcV%l z4VTj~&|!B7WX2o;PiaMmo}^a%?~! z#&O8WLticX9XEf;#Z`=5)0YTj@1hT31;5akUX%gwnStfEHAkvPg=R#?N*;=I*4 z2~YJ=O5HCU($tbpm54*KA>T{`j)i^+`l}4%Kau)em>l2V{`CrIg}T?Wd@cMQSa4%zw@RBPmv?v>v?I(Yp4*m0v%7 zdE^Nd_+bNPbn=s_?9ywBCz^ol4E%!m=XHm#)6l)s`xQmKsjOzZSe?zP&=h|Z(Y7qk zc8yS|?CGA;)!nM-MM#%mm5=Z;BtD+lk-m9uoC#I~68eaJ>0adBtE=l~6agXI*Pe_I z*&x1E)e39gvZcNdi5|5ND z25en@d{e${9Be0mVX;0lHa;WxTyh2pzxNK14WV40L8)CB=lH1p<`Zjviq=3c@Ti$O zW-}Q91FhTna5$WNsDuQ`K@vZE&OrX6#YC;a0}jsq&a_-RC2(To_pjP!(ABPoyjcy1@0$h6dzM6+|}qqUqOwWvh^%I zQY5+8$+VX|klyL~&m;a_&RggY;&vq=*c+TvO*R)|H4n%TJfq_~OS~>$?um^V45A!e zeT^Q*ObYh!JOoEe`#TG+N_?(0GiKLu7fUzWv^m=zEwRf@Y^FT5jIBjP!LETQPF)xR z#*(*MTM2fAA)86k_b3ShMzYk76x%DF4L%ivY#a{zfL}-MhPt{&^D~NPb&6GWN94g5@A)eRqw1)}zp5V;U_vX)k z@%f_L51{mP2T7M$M$YGak)62UPo?_Cy5ZLxqtHRVC?b5r!}PxjMf6BA!B zNeHRHE_o9)+?8V55yre9c?B$mO9;X3^)PA?xCs z@hN3OZLz!7-0$?WFp50D2Vw!rNi$wAwJ94{TjTV_LbXTjJ7NyOAkA-sC^_7LBk+uX zxV`+WM53f}Nf{ZLC8)7j$*tJb)EhZhAVb)PCh&*6>049E z00|i~HkO3B7a|h&Ua^i6dHfYr;W7HRALQ||6oO>5q2y(M_uoDD&-Ye8?@vURs&$wV z*qY2|A>`%d$>cKC!OA@KvbbDYT~5&|)M|=Baod=D9q!jepaYRcz4p(!HAf$M$qXo_ z0pohIFJOY+GhKJ8_&enbGx_+`;UE743`wwiY>2lS$CX>38;H$vd%P^oAgzM<@3RZT z?W@z)^Eki#-UnaBvs4{jW_Q+VYUi?hmbezyM}fDCO3hi{&|1=RPldC2;N->9!(MniA#XEBQLP@& zQ7;w04xAp7%_v^|nIaBPbe`~P@49Q$N$s?Ql)a1`9Gpwtv0*o0cmT6%S1;L<50_nX z!ND*aSwxKf4kEBid<<1?v8Q2Cl5>qt!)=W)!_!u$(aAbGW78Z+vU!U)nBDEHF37HQ z*Ov%R&W;}yfMA*Y;M63W;ldAIK|)%&n)Bhb1N-p2rU(PutzV34R1DMJ#5xVwd5NfD zFu3S;X0cOx&XjiH+1Z%Uv}{+!4#HBW;N|+&b~vK*$ie28Sc*GqCIi6rzO%(=jS|KX z;S+D}q@t>9#=B^kChBpJoj1Cc|9t#ka;GUt@a>}#_4pj+_Gez^` z_w(*;*$#7>X|yOqBaJvHnxggog2))z2uo&b!hWCU=X9r>e&gp|w!N+ju9ezK%cvivXT18Ehspv+VUn&oT3y?=?i| z7Gozk`@Fnq^Tk7n#5fq{zu&=|YFX!o8=aZOtgm)lY+uw(ijI5iA%47YDfP8}Ax)3| zbIE;;c^i_xeAunswlmo?yK=^na;S#q;Ac@KWn^e9TxD=b!Fx(!)zcuAc0n7`o@}!* zqO%FAur!Xl{o7l}=Nkh#MJs^&zgE2`BiniLBOU~>dCNi@RR2mCE(*p@O z$g*oY)-dmFK35{RFg8|;46nGA)&23Mu}ev5r|!+~&%x1B%2-p*P+A{AK6GWi3Y`-m ziYq*mk2w$)GxW?wu|eY(N>YAP3U=JOWm3B&sbNg4Lks}q(XYkG^leWtSfbMUyDU`G z^rVGqQxS5K#X5P$zr0AaXKKEWRC21euJ`be1vdj+wEFLtPP~@--Sz>dhbNWB$%`fL zXW28+^<=B&Cu>T+eIR+BmKYMXFlN2#Hi#8 zT9fmD`ZQQX{RXGyvOm|uN|f7}PRHLd?0OMzFTmZMoxaU3&#jkAkuc=5&901%4mXe= zQyGMW&f=aUr%t%S zl)F8VtuASc4Dd=2UT{i1-?pt*o&0^n%eG$no00LBzEruTplq#==WK429HyCC= z$D8e~9B z!zoz9AiQP(#%m|tZXSWUXJVH6cVCl%5wiHLmS|9E9K zo6h#WWY*Eq8H&Ol{?i*UJ4aP(fPzoL$QXBjy2R{qqJoT!Jh#ct!eV?dk@kA0+$;ZC z9V7GhDi`$io}O)H3MEkxaX19#i)9;+q|39W3$Vm71zG>oRb&tB?`tpR1#NM=*x%Eb0*4i(>vR36G$K<5Zz^@ znC-qrLS^$qlUr)JdtexN4_y^>f_1&qY8XL$Y@Ap382t`T7Iu+;?tfJB|bkO;jwECgVdU%w;b5} z!f0#^Bd?nURZ)s1=OR86GF}dQDws?m(MTaX+LmA;yI>X$KG}?s?95hwmOW7Dd#*~A zC;WU8o$*F{wx&%LNI7E%_;)M*0QPyB4kHU@Q*J%A{v(CnEKs3JFz zIjOhb7DB}33he6g1}|dx0CA-`yq{T!#UcfWJYJ}O_6H+?Jetx&Lqj2CvSPIirFj2l z1_uhs$dr-)i-3G6AXApY4h{}}cfOV)okG_I)OdTo*JyLxN5?fEn`m=7JenLf()6_SR$p~YiV&MizOgb zxkS#u%q-<0yQYTTX04f&jjb6>nQTww)Bo_Ax0&l5cEz|{&m`R4-GhMo3xgb>PAI9!_bSVv>m0`14 z4UCIZ1TU)AKLV|l$v7IQiA{RgoJeEl1U(9PvD8TTUr{}={N-Ssa-O$Lf?2Oyu^E}b z*AxsF@Dl0&dWr6@)7iWQ7W2h8{60MWFJfY1XIniv-QF)maT1`n_&h!{6WjCgjtvPJ z&l)Q%}zsbMmE=9ey|C=Esqhg@L(p+~O|q z`cW1wqla&D`6Sq!T|@G>=e%Qi*j=V9%IV4LcFDxka=1PZyKJH7ywq|=#=P?}%A9K% z;_lduF2IRFP=4Cqk=A#6X1)-9cuX2|;ZQf4zDxJqNv`Anvf%&Pot6cB?>&LV^U;LnNm)8b`HMa!)YMw&CUgoNPi?Cb!`YrxB4;nk;1z&9N1m1srw z)4G!(YJV>^Z7~B|<@PN;3=iqVW>ON@ zHFSJYaR$f09oL-p&-bF588#q>)TH@k-2wG2M&43;2v3U3kP;>;3c30v&c6XXodP%*^9gx&crK z98L%Dq|(VWxhmd1>FjnO;e+?w`&OQCNR2CC>98EsP5^pe)SH!4z~RVtG$+6U45QK- zbLnqvq?hsIMH86`Q}s<0&t#Z-C=zTlx8r-=A}^F-jM6?;<;!B8{s{RsuEvDvaJ%&~ zcS})s^gF-jU~aMqL+q#6^1|_Ssa*1-_BCNY0r`T{JJz3}1-UhiwYFt$^fHK#N#9!H zcG?U&x{@GWM1*iIWZ<>%)lqH8?l{zZyQV|9dGSm14Y*cHP9Y$55WdVHEL%}>buwEr z%009)N(MDH9ioAqS52S3kuGnINJMi z8H?@=!Iw;8hh%%SU}8MjvTMLX-Jv9J28YKd`MNH1+jU6+srwb9Euz}htk$7YDxsP< zl_8gqjj|(_GkK74n~d-}mS;jt)ZG4)E31{EGFV=_P>Q$<{B(}EQl;^=b3y8N9_@vk zmHN?EBsVcnV2573vcmwPD+I|wCqpKeIsLe6Fv@iyvkBP-Im2wKBW*~X`7y;wx)i}P zR?6k_e68Qc+V5mt9jv8Llf`{c3Qg5FnrcW@fVZ5rZtq-@?{>vjD){_b?vDxy_-jMm zaf?W*g*ZRqkeLpb&oXlhmSJ&_sydeXwgmwoYl&u5dxx;$O##@_&{ik#>HOU*-X#>R zbNB3@)}}%q&5d5(nQ%QFxx&aka9lI4vWmqvH#_>GwC}uZ5%1pJ+xO8GGWRz{o_;2sjkzpc?ZoZOE^ zqXgk^B_{uUv}()`V#szZVOVH=cUJ0lZ-%7~9eriSeC9}QerklF3N%Fa2a&DSW=0>U zzo-9FE_V~xB9Y%t-RLt=YLu1wGv~mwmRSU-VGD@b)nwe1gpw3|^n64q(?b|7c%$KL zMRk*R!m)KJkUICrUb)^sWDFzkST|=Ujh}^Xo?Rr`r#SigAgk$rFf!lB`hg+-0|gaB zqEi%7lZ`4W44$FJZ!Iyq(zY{%d6xf*_sS5j5gC{g% zu%kkwXojI=%&n*PqJuH;$=XCa22VwY_t&l4fiihSjj?!*oU&!v(6)Ikfk z2w8zpLvTWM5a~0a-zO7wPqNUI-e+~qqdiM}#}G{bi*Mfs;NHd+-fU(P(OQ!~H9BiO zwR<1paJV9E$qFr7@H;CUq z_;%849dITJb9Dk%O>!L148h8&9!Aft)=j27PLvAVF!w%t;2v?zSRWy$BRmB_lQ-8I zYS5QQ&JQ;o5GqaBL& zM((JTN9{S$>4s+-k_5-lU@tzL!e(x|DCnW%M=(6%Lm3kP`FDXn8ZyJ;6BiedABy$s zx7%&v{zw;5%)=i5HI|!d(2@185k^1$D~l-|ZF5XM)>0b(Un7j>`G1cv9O#SJZN}?S zbtnlDhMhhnk+ZNB7dF3$&LFtTrS_%Odp~tqI@TpxtbS@(kWO`JXG=)`E=qiY$bDeD zZ_?!ep!{Wl*7L|i)v3<${#DgkoSjain@<4?fwkv*W&!J3mj~eW;6JOo8 zOFsfn%pa~brO9RpXT4vVt8j)l^T`;&yX|gem!|s#(St|=1v1l39i)X~eA}jMuF@V{ zC2lI?706LO{n$}0a=_79k9Qop3`tV~f5A9!hK#tzPE(N2xnk~__jKU#UYaUSGPz-s zWRJOO5m#MRWjvfLFq*(?4QJ+cxA1p_|g=amtb1B8_uiTSV2O zBw*}QzeIJr2rQs63Lcoj2&lDMN0iC8t&_(|`bSE;;MeY-$YqJ41F@iM!A!nN<-p^- z;wVtR@X6yuhawR)5QYi+wnf7;@_OBWyw4MGC`GN^K5_Wx^&j|Z1-yYJMIP+oY?R`_ACUsyg7FjsbPh?fpD~r zh>}fb$$`M@djS^*6O}};PQd2Bpi<4zju9)&jU(1~Vly#+!BJCq?d3>6%1dXEp~1z9 zmoV=2ry6QCif1B3)}I4>!2Pi#^>nd{-%KtoH5@$Ug#Dd*`3$c4iqot(S&ONA^Ldg% zbIHK93CkV{ZdUkDmVK0)P$*&baV>g8G6fOUfvu+!`Cf^^6*`yQz@5`Fet9YOIY< z1TaWQzL#_DKkun%H5(~H??7g+qZDoIC5WuvLnwD@;v(7-|Edu3?72X_D@h>%a;zR6B|rbBX)7tD zB#*%}DlVAw8%(c6p@PVCcxd5Qna0W6rb~Is8C z3oW9(Wq7r9bhchL%d;RETapTDOaz?`8JH+BPDr6Mtz|2|eS3LLDCFliMB%r&5?5p9 z{8Om%E4%sgY+dij-dB6A7(#DT-?_&B2RhSz_3y;if0mtr&O|7}p&lq2GovGM-NlN6 zCL#h{E@4_OaS?uS$n#3rEJ6ge`L^~;bO*p2}DOURL)9tP$VB;!~Q2dR)L~) z;kOz`JxLAG&J7xiQaPWaP@s_D|1|l5^x1XIshhMlM95A(%?Z6~t!#?j-lX7M-|UeC zg(N&YNp*(A7!Ll_w0Z?KY2RP^kkLowj<4$`eyBXDJ=Qr4u-y$!6*hF;q2M+W3_5y@ zO2{Rp^%tUv#(sBp>~;^T;6d#^#dP@XzeU#4%M>xOXPQwH6OQgF#l2-K0!LO{#QkzhqGyKJamg`rbT>}<>f?p+fL+Rk2TJZ zwKw~suDBBUE19|;>%w?rx=(=(-117RHez@&PeF>qPQ!0@@3YxIW+*9fU^P6b@U!*T zM$;po5$eMoK~GQb$F&CqO-EI_wE6kg>rhx0&?T+yHM6(N^H}K9Fa3edLs|BU;VF&g z6hX{Qr_bg&F}zmt_c{ylb;A0D?U*PDk`qfKc|pR!!b-jL$VHNh%J5B(^miSBomDK1 z-!)u?D&!O0@jHa`P25{8f$X?dDs&;!&Th}GB|^h!k4Rq4{sSlYV2(!i)Ldy>$WB%# zXfW15?>pBo>u!(IPr0N46|xqWNLkNE^A16+HOgP8!hR%fM_@pAjRb%ud^`q4gXz#L z^PW#OdGnU^P!ZP;I5aAwUKVy zUuHj>JmW8!U*x-Ax#>)r4KK}%*pqM3eLP=IeF`a8!^u1g9-eu?$FUzv?#tjm@fweN zxX^xWQX;ha#25cjeikH8d`u~BCaZtYto+CmB~WB1yrNl!$>zz&uQo$>1c1?NcUx6R zT7rd=r5vkcqf3oA5c<38*-psVG*bor-&(vs@BZwJzF2(d@febTKwkRa-}fg2!8>5N zU^tm_jO7dsi84}jMCEI8xvqL_bId4M>Hl9S#<-8KD#`YBb)|cDpirJbp28Iw1?yMH zli%leHlQok*3$z6uit75RRxGM<)+Ll@v8D9bqP0Jz_mxCC1*Krw95+7=YPG%40fKy zKkqasyy^X*iR%$IdM{z|6&+ry<{q#>x(H54IoMIDPk`@Z|c5# zW;U+bpCl!)C&~i2!)db@s;^AgmM$k!CFzTMUyD%!>0{$PrQ7FBG7@vx^>#(wF{W*C z3=Dwf>R86eD`r>Te{>CxzbyJ*o;Zxwdjhu6&FvdyDQEkT5Tev=HcMA!C)7;z_9fF*P{X~5yi!!7c zBkv+>E9P?)vuLi{cKS2!u*K~az>Brp#UcVRj$thV)8HFv?^UYM^T^r2i2<=&^X zaZ3`pJPr3~gV9zll$IQU(oFa2Ulm{-6JNtzuB0NRMVMWj5m+nybpKMdYZ`1#;l9n6 zP~9Z0UtSX6*9PfQqD^u=Y2s659zI{IJODAa24`VzAl{ zQYir>plNyG##r!2FG4V@FDgwGqetW29bS>RmMDVbs`(B~(iz<6X$_V5RQXn?eiy4! z7#RF&4kmkE1TmCsn6Z6d{4^0oXBI(%VrS+dyr1uEtrd4>8mCIBr&f7oHj%Sm+xmvMOTy+CNs zrzT<5aLiLfeZ^ohNVV;>A5)gPInP-StkMP+xk16rjM0#YC|OEV*ugACk!^hKL#`?x z??`gpQY*LIOZr2%IRjNzEwvXMnYvBqjEf7y6PszlIzMk^uK|lBFE=9F{o!;7rTTWk z-N{gzVy=K!3vey9n0y;lE%+vu*#_t04SX|6tDg2H zb6(DLCc$y}W)f?kfl7;8%60O+W+B!{M?iLBk`~@h5swlohjxjk_4Vb^~>nFd(!!!8}2SMyMyD*jQ=)%R|{Nynw52SRW zi7*oSLU>(vS^5Pb`ggUdtTk-z9;^=D0wa$vo+RV&&aoIHrF&~Re}hTF3eS2Pki6&L zaNE>#*hyPzhXih8iV1f=%*sX@(+{yD^t$1qH3+((V&{HQz1&!ItB0Gib;$^Df^|93Z*iQZob{T~Zx;tiK4{cs;9q*@eWnu=GDYVlyAs zpOZa8zG9<~Z4ft@D{BY)beRIYT#_4NwjN~2bbA8rMxg-ADY>xiy=C0{PrIv5L?}C7 zZnTA1Yi6CZ_C+ltsUCJ}8qb=ccY8b9P%-3YHk2!mHNMHFQ5Kai6^dcPv4H&>0#w)( zBeyw`Xc7_|#j|RACd&N!!P|OleHil4a3vY`AwiqL{Pw6Lw7d27b`MOa|L|%)a{Smo zM$V+C&J6!PjdP(+cKTQGseh}=E4af z^1FfgC4cxQENec4K7SW3sLaaqyQA;>4jbEw6yyyQ#FqEy6iIWmB1!5H~mGTO@j zWoKnUwGlC0xZ*O!Ne?`i2yjYaCM&-CM3o#^YUVy+?caR@bK22tQkKu&aS{CO>5il) z^|E+)+RD9hC(G(;m8b^NYjZbl@^k6Q zALTrfdk5$;^$Fae!|m2gOEGBVfQPpNO7|R(Cpc$l$d?Zn3jD%MHsJv3fqMTi7r!c_52Hx zuu&LNSMLt&2gA>0-N=xOzDpOJ?Utqas|OJ%nzpx1^L3S*`rYf$P(skIm*~*|%!T%Q z8_GAMr;P-qFb;_FY@dx)CrZ79d~YHfp6D%I2*wATVk7OV@Y2pX1#G;mrM#FPFjVhksP+?BCgB!B%`!0=@Wju zVoQx=3Con5Meb1R;4A!Hae?ZFGf>^2tZo05@nBKCa60Z^pfE%LGSgdh<(WI3xHyXY zAx*{*fXLW{`=kSxh@*&D^nfL`X`c150jTou{ONkt6G1xL+357rg&8LB6$q<_m({a(v6zT;&oP8@9r1AB(q$=;?nZ*3SA59-K8~GkmMTVE_bPDW4f~A*_mqVyV* z54+c$0P@PsxR2`&{TTOB?8yZFLlcNBka0r%esEN7n;2Ub785R%XBO4f#)iy7v2zqy z#U?SKHM6*nCmzj9%( zY+58Uv%pqZ+JKT=c55k#;9&16g@xC#N>^r&$A?IH30|tL57~V+H1+ zWk|L*_GvqD>YVJ@yN=o%gfTLL5e+T;OuUt$rVSXRCWe7RGaG9JW^SHFV0ahwoRUx0 zxSJC36qh<`i=ZANGOZOB{F&fBcS>6}Tc3=7bsDN@1j?&Uu_ zq9jMd6Iqr-g@=(-{1T3AUBQ9WVZXx3wGQjoEOD?+8%r|Vz(sMe7!BVb(qCxxkAqrJ zl%yhQYUfgj=UE2kRM3MCr4C+NOndXIRah28`bWa#=EW5^oY2J~RW7vyF0*|x<%nJD zZ$k)ts`f>?a*Au8`WjjUv$T`RK^eQWlQZ+Nc{Urr^14om*efIgs1}b#n`%p$1AXp= z+kW0#NM((e@dz5~M>oZxIno;8=!7&lsG|2JPw(rm-d}GemXKVKL?jQ*cs}7bQknz? z7gHvA${PJTcX;PRdDHc{k$Z!H2^Phz1;qKQr0i6!KTb_yBW;l=gw*0C&X?*oD3J_MpT*tMB@ z`yM53N~M9mj%Q>e$Q#71xX|Kxwi6r&XD+#HOcLk00kWCNQ4w8*XSF-9Kn_KNJMVLm z?EMTgy>J44x}s90D(U})Muf=zDiszwyg89!uHFlojwNQ zjq_1{@kR0_x}S{WS)Gm-)pFqX^`CDcs^8f4Js+PoLY8;EKg@uMi{z{IZ$_i!R0^rJ zn?q(1pUSOz3V9ea5tCKXC%8tVYqVCuLXo9RKVY@bm_xZm@pNBJ5uVxaWFnk)QMitXE=^ zn{$O?gV*;KuY{5$&)YqQ9yc2S4{qv8n+Ro-35y++9m$cK!t$`I`vZ;Rb^z1l#y#cV zT0Bk!hp}QDRHRl9?(~WTo!YwkzOgY_>I%a~t&i}>qfi<#ZF7AK8zGQ8w1s%GmVptP zq`9R%@nR0H5x3I#C>nK!AqQJ}&{D=Hv!#-K z)2D1dTuv2vHiX!bciUOYzlXQDfu*H{r+@L~&+cBALk|JGY8vSHz^9Nnhy)%Q;z_sB zPLiMZ0^d1JS(6+sWq8L}aE3o157u(T`7*CBu@VyMdrSRY9pW1g@Cn)bMlsS0pj6mI$>k^L1>h!;#nqF?STQ! zHH@Xg`Y^DWOjTzr)&!mi^aBV!-;BHA&BtkiVurs9*&CmEZ6_&aH}CFpRCTJkC%Pfh}i$~#B`E6a^AFI!K&5YDD1mqnPVHNuul1a z1Gn=ET+nm%Ny2dbwd|x^YRMkjb*-Vt>crI8m}pMaC$S}7Ra-wOg({;E2-VicntbyW zf|=%1yo7>2%fTYr$ZZ=gh`=3Ne{!8D@Nj~YR!7|RoZRcZs_=%5+gSWY(8Mp6Cd0gL=eP0i_4c3vFHzO@=Bg;vnoG*-I ze5@mlplz9A#XNH3Ne0c?T#T@2$#o7;86xM|!9iCUer}06fm%XihzgF0wlNV4&~{`# z#T5-E=DZ9l*c4{QX*p=Z54E`Cp0enL%Tz#fbUjj06WBy41k0#TB#x$*o##w}XIgu>#_=GHXLE&RB=gP36o6Q3^N6O+iCxCkh8vV_Azuk_o6FO<=k z0t{J>xD#kE@M!2>(cj3=3P-#G>x1uM?>i#`xuUW^D)Z zGyE`5UU`@@g@xhvuR?x_3}nY~Oe6G~Y>2)>Lb^L!K?g_`jsMIOE1Ejq9GPUaJHCJH za>8Yfr%a@n&6QKr4N?k|DImM0GJelxDdh_1ZK-K!iutVOUs zS)dh(2m8`+!i6?98V<9cGpP%#Sbi?!MUlzVE14{kQ8$5YNyr4Re zPHt-o25vvZW)0%__;EF*TXt%aR%+qago!-xUdwk+doOJmD*0oq-cKDy$>ifu`yKZw zagM^w+CAn}klx%Y^>DbII59YthTp%LO&4L~e)aWJ&VJ@tqzqx=U-Of}_vQ_aiI87f zA@2v|!HQmF559S{(+S9aHIv^eF?$6ATkAokXSld3q8CaA}f#LP|Qj>0DZk@P0I5(z;1^r@7*Re%%o8-P3 z{oNrLWl6S7sfi+7#IE`&xzFSmimRBEfS+I8dj+)NkGdL#Zy6mI?t2`fn;l~$(PMKQ z%N4EmuS9lPPLttepXh78mb>AL=xBL=|1~#)(PiC$hLx?`EhDiqmN>nzD;5#HFoxoM zffF;w%1e2}Q&N+m*e3Nu@>e6rOZFdQ*IVpvgdo;?>_pOSKYYO(dD86Io%2yvSr4j| zDOu!mon4sld{oLB-z1|F;!Q$_+@X(E1SPD8VmnnhXJdkSwmu2=D#x*Um8#N%K8Kjf z-Lwp&O~L2KE7#Vr|u0W0*W=|sOTB*1!% zz<&`G|1jPkQIl+jQ#)UddsM~wryEB zK!8AkySrO(cXxLP?(XjH?(Po39fG^NF5KOnZ|!~0+2_3X?ni5^UohHibM?{Z997k; z{_O8>m@!&0|(cyv?a|*M@Hhu@s;mBw1X{a{|VFG4&$r+z2R1n32_Hwzm@erz? z_i^C#7jgKBV4B)g!f|y3`!#<2@|6d6Hlr_IhvwDBRyamUW&E+C+nLPAg?&hEC+c>R zxW81|?maP~%y@plts(4J$&Ss*olEWhK3+@|OvC`MmTkO{xG?!uxDOaROm6f}tJ|~& zU~qO@(;#&-Sh69iBl<={zjAftre|2mj5x6fch9NH7mAbiH~QZ&ovIu77}H!d_p_T__A|@d%!GcM2 zJ4=cdMAzQ`0eIB>qX+nj7{3d3f4}pq|JB9L1Vc0poVLJ&0Kn$2XEAyElOe8f8U7-( zdo;l%E&n^8HGiu;DR4|YiE2u5&h4cCs{(_P;6x{pk6kZo&ro`!IBAL7APA1mCtJ{5~}2UL474QR~*s)5Mz0sOtOhNJFKx%RK?r059z0S2z*K+ z-`;M8)}0%Dl+hqd>h^6)z8#<;Ui;$lTeLVa1wp(h8Y|;1E1c!&Qb7wt_YM>V>&GtL zV~7xGKY~3lC>riFYmdjk)FwM4V`pPGtP6=K81>*!KL|CubIk82;+?*?tCqc#rlti8 z01n0l9r(c6*q>IMH!n_T6kp1?ezc^7w7Khcy`#_V(LXt|_Pm6Cdx2uvL4_HEw5&tG zcj@b~+GxD!IxXeVkha=|a{~IX8Ln36h%b%dqA8QxSlN-pBuL&<5wCIX=ve!SRY`i5 z=2H1NtlnY1OX`? zNfEG)CSX2WevGgNIccg~sYeot100X z=L2USW=g(q7Xiz{Ypt>exyPeau7E~NCw~I?TXQ+=64~P5V#60yqqTY&59HUT76ocv z!pF2FdFaQTavgF`54GGPrF_p?3};rCy@ho_ zLma2QrAM{FrOTk@v0Oap!2M&f-SdrH5w0%`SjoM{N*~P zD#PdwuMbkZTQL&9GC|hXQ>|Pg<(cV_(Cz8Lxo0X zziW09O5AFsoSxV*8W^68eu7pgRdMW^{2E8pE+VG_MP9++cB>_kgDzI&|8j(ts?WL@ z!N4q`PdrBY5I!5Z{EU}iwT`ab{Ed{fNy#p4{NnZU_4e~@hrF+0fFauQSZqB#Td9$y zf4B|tyyQJVcD()J#09wf#5;rEpGmtK^uC7w)&lHjq8On+ zPZ1a=a!lIm5B;&(3S4u+$gpVVJ;Z z-0AbwioyTel9wX?{`Y_;< zA&Hz>WXbDir4m2tPJ6bh*DyH-XK%t8qE~AQC+%!Pr1jGaaKm2R3=2h}O{}lW7f5L& ze%u)GQ)iwl?wl(cj07fO;Tu>0%+;{aa@DGSjkCYpMFnQ7gnd+MtLG#TiC!-;QVeprN;VNA{fp%N6wYE+?*<$Zo4*W4Y)@tI`)I%3GM*8d5gjf`27qt1Jg|zNl5muiw{J_iUm_9OixeGQss(8&hE=vgCScxyX~jz-5o&6 z-_K#&r>pHBK1k#7MGtIJVIOkcLZ3`PNk)j*>qF<;CvY|(7dXc8td>ug0uXm72p95;(A77gJ>y0Ot+g<2@;m_*0pLPe#NI;wM1Z=z*EuDzpj1Cy> zEAdc6SYd|$S5B}SE&KBV^jZc zt&sx2#7vOL(aDK%LyE(Sh_ZW({M)lo__6?G!uTbOwl)zaozInu1-I8|S8*HkKeCE4 zL?lLaCGBAcVA3=x2?@9ujx^aG96d0aI1r96qY5>^e*?QU5IseelqQ-Cojo z@=w6bkMT=;PkQ9UtTTQ?Z?s(`5~}GCZh0?zwv(|?or@F7f8YsEao|PWwtv$J0wyUN z*x1Ml2!H~0F~gTTo{j&DcRP>oO1;S(Fja!ZW^=s3Y>pf_Y2}S}9ARi^*xwBBmNYaZ z2LFya{=fhC6Ll7s8$)+QED`WWBodt;_}4(`!szABUa5Ejsm<9d_KsGItsXQQ?VEZc zb3M8b)c<=4Pt*e^6UU^eX25B^_ z;*mTdlQreFP<({zwkzC+!pS>@__7q#7O)?rce==izHg-=y8nn)@FH#EBgg37UYqXx zl6E;fW%s^LFVv4-5_&cvBKkYL6R9W~ zY`|$G)2SOe3V5S!2P7Q2fZQmpOS8n-=T`<1ycidmG1z}^u7CZZob9P2^Bs5?W3ZZQ zvLB$S5>#`)n4Aj8(6ce~ZE7N5Nm~7RobyWTl{jj+U3F4Q`g8`Zh4ojGE0*?L{`}Dm z(h#VQ6X>BV4oN=O;oNvJ1nJ&Ja2ej$0yp(cTiOo>-xp_Nbf8;u*Zt$$D>+Huahzu6OhUIgnM4r}Oft8t zQ?!oA4pYwmoI6o7h`bki#e_dxzf^8Suu0t;jhEF^L$hWIc*?4dxK2RDxg{> zG&G&5ubj@Pd*OZ2efp?0Y9@k4*FRmLBM5P(O>A#(iD9``xt|9S$~ofS6Tyy@wxqs} zJV}$&J92V*)%ruSjSuos@)oH0)zu!a(IfpSB zt03XGjt-EN(j-T`Ycc{euVTFR=#$X3vy8PMcWqUBD$4ptv904)r%3bF!0QnIkv1x= zpwGn*$2Ia2f$g)5T4blVp!06M6BNxhy5$g6zHdV!Yemf7FjGx!I}E&$)tsK++zuu? z;;rv4*+Qu37_~9;3H8aDcG#fI&V`D?56@Z<**Ez@Wbj6u$IzfP&0S8!OEg$6?}7;T zz$g$u*ASSV-+A=xu7x1h`>oVj?;c@t^#E31_vVO>$BLu03ohoSsjP8l6Xn9`BydrQR>i|OoyTb;l?1nQrJ-Sxi_oCqK@kL?gGRRlUr6(=_S z9r6}5Q@rBg@qT8c-K#n!r|bFCsF^mC$G{>2flR*N-!D(IeIwkVqqJSHC?E7#oU@F( zJ3i>pJw3ZM(IUAWqTV)~u%iffdwIm5PAas+78 zt`ePVb-#l=E#uPhf~)DxE^!<-hH-2fwbNz5dP@U&9+@cHjzDqfgi>=HIg@VmDr_2u5k#{-=;)A4%wxfSaGlyqje-BI{{?1T%1-@(1``D43{c$U7S#KO@fedr=#LpE zjC7?UOI3T)SCAHJIUJVra}TnccCMFb7XgpgEV~y;0^J9zHkk#mt`c(du1o(k2py6*jI;1BPZZ_)=j7=lVEcVb@?m|; z=Wj5v4ZL$ihQ_qi8;78!tsX?T6=HEiX$)^{fJ$Boi?MdC@dKQqE$zxTPJgk_>cor- zNx686ea#r-m8sn_RXGUyl$I^UND-KbuF)>c`GPV7>|5G(;e}#M2?f^&>QE{w3&p2L zVEX)0bPqtfP!hZ#CN^s^?|l&e<2cz`hGv3@<-u&=E|K1nt2s*pT!6jZ{%e2+tkA_X z*@hLPvzDmNTU<0?v-pws6~l&bL0bR~NusH($_ALbB484T_5jazk!`7q*ljC4p< z_5($~9NMfxK>{zt_l?w*Mpo`a<$=?Z6LP;62Su}uQ}p|k%RS>)=u}e3Ej%l~Cc@BJ zX7g>eNkkexV|~%a^k}u^69O=(`P6v zN8}VYq^#Z(ZS!0*zv~w66Kguixz;-KwFhCN!L>n0H4u+gOW(#xvQU>d8!MfV=xiZK zDdx$;mdv|eJA93hY@D&5IgBsHN3GtTpjq!2G@Z%0?7XDEsxfzN>_>8##L_niH~C^`+iM zLiOd2DztsgR-)rUy=zm2Um%{ZtSITi?dGNHOdsb)gTTSOgf!6DZYutqIFF7;mRIH- zOQ^ovt95<(hqNKv2q|=7tk(}4?x=oX(7=PNQ>4G_&k_^7HX7=lpigD~?<5)$d=fdo zMlEyxt&;V}--{~j*X^q7`p}x2?7Oed*N@+{DV;|f@Bdly@j^ERgdoG=phy?_@;5&7v@Kt;mHWyzbxaX=lc$Vj%c;Dj_(ejGPPC^Vuv zX+N^;ciFAU_X$z-GqSBWy$9#`&PvbvVUAppz5x_bCpBArl4}6^1{Ni;OG`W1WPzOPl#) zQXvgQBs>A!0m z^7+LZQGCgrLp)61U2#96M;cou5FI$bO_{D(%TX|cl1+04v9y$$miLt@YZ^yi3yrFX zzTFBrNsJ4rbs6Dc5{&uX;d0-ws2vXt4ibs+dmA6@7jl^#Wb*qGfncZMD){fZX~DcN zijp{gQc0!589w{nb|6}G$%7nTl<@bdNF5%=k(d_FV###2`@yNwqv?XVyk@`RZ-1py zd7AUS=kKaA`O3Fn9kH_I5zXDH^>rna>ySoqL_5eO?z&?sCIyoV2(`N_QFNk5qt z!{GLG`n9hxmr7}L&JH2xN)5hribzD0Zas*9q~1`)U{2;r6R>jxPn05cB`N|F&TCb3 z+r0h1tH2_6dm_g4<2KAEedF=+4{3x{&`deI#G#1fj#$dK=kA~KD$fIxbAYe#ZVwYI zAvwc)MALSZxw;J(b<}bR-M*h)RjCb6?X4i7I{EStg*{At?+E+0=+{e!xFPQ6`c|Kw zM788VUCgi%GO@Q@LqlP4v%^z4xdn)(x6OvL&QQ6w08mQNPJ=GV7og9MAXr$bvsfV( zYS>^zNUceto{n{r4mhAhbgtc;PG++KXMKw1Vs=;5?1>=3f*I<>(k$1?PgeBStJf5~ zypDrhd5Q7gid7^2+O585>m0XB*VMf3P+( zW0hPCdPty@lSfgQ*o<@thCn%*#Y7>-8rA*Rlsu2`Ug6anZ7-Ri<`DH%jhTMFppq0~ zt(|_A?H9p-8oHUgUbLmoTlaKg=GOF$nv(sa^rT$lp%Kd}@)G`g_P1+oEYcLO8OCzy zF+etaGjZ%Gq>r9J_sb8>{mE5l)Mu8*owX%|9h<6R*+b=2y0;W|n8;rq_j45nDhkzf z1~3%$?Xd1U26juw_WRA)0q7xMTE%b;bvr0Kl5(()FQ83kF1 z^PmiDDocTO7J?zrH(>l97^xTDmsfR9hNKN5%oi%ky%zA&Vm7OgJnhpd4|9^EY-@ z4BJn;4e<-k6v7I1dX(aa28{Z6mV(2bb`R8dh$X$WHC%s|#xc3-rIAer)Jq<}QlY{} z8eDFNl8}uHM~C@x@%W@%=Rk;X!*@l>i8&`WRrJ9Hm9>^%ifQ{i_q&<;?-G;eN-`Cd z7b`Bsy@$c92q7Z+HjFIV;0fYP$C8C#bA_v*7;Q^u!|Z7-uAW`+n{LJ*Iz{RKqd?te z<}f32bG)$?nmVMVAb1MUTIv^rL@GFQx9k!)sR1UQ(vfZ#+L#6DnZoxRn|z)9dQ068 zPc^pgbV_+{t~jQT;BrxsXF14zxc9=Mbk3ClxWF5@ZhjxP;DnEe$&7Mq=8zEbpe;0r z@v&8N3It*Rby_W_ilqh*O6!}=IUX^%vpv?gV_tUKai$zA+uSQry9rc9nkM4G$;pV) zLg)yXuGjv(qo`C1X8wds!xL4ti6QXl`F;X)gb9B$>kp{v{iCTGPE_!pA#{G2!eGdE%S8s^Z`t=Xfe#hCE1n|a{-Dnkl2Kcn2 z@8?D+zw$7pwPkjDi88cqVaPnO=K6rWSJD+11tpV`SXqVz5{rwur9LRBI2~Y}d!Ic@ z<;zrJsg^hFq#Q_`$h;&VA-tD4Zoxy*q{wSWup&jP2a5|xL?k56&Fk7zrT-`$x>q(p zpX-lu+6X`VAfT?-p9b+!VOAKEkZ?d!6q~6^&#*q=vt1PI^((osD*7xZhc~1UfZyGFJH$&Uvv9ab#&V%vD~h1AQhk69e~L z)Kta!w|hKV+bg|M=lIH78=Was)2#+P6dxP<6^vj1Q*H#8#DiX`d!xWwquV9qR}#{n z9D>pbqAT_g{SUEjv+!TUy1UT7iS=O)YQ$V|nNdT>!H`ERBnkQLf}a5%h=E!)yVoz<6{2HwwhZ^>TCJPS z`N00CePQ%F(PZ{_*G6~oH*guZlU=nEE!2@O%pkJa1gyi&tuQ94&FS%4ET&TAr%hrnylKScY6dK=1qCmr(kDw`P5MXUJNN4Ht{9_c8wOZ z3&Ig#?K1X`Q|#DU=?3N}LPH#?%U)NY9*;w_O%7ElP*HbyHTh~Nlvo~i4+55Los^za znPim$%#`FtsVb$R6WT6Zz4b@cDaUg8b9C6BC5$`;tMnIFw=PG^kUy}~UWSyU7GVuX zXNDUrhJTx{C9VEyUvbfcdo|G5Rp5v8NO30rTexh zpFNkk;+h}{0#lZxl1Hu*a|^&~@fb9A!w3iuShhFEi4J+4-5!eQFHEY}UC16^clUc; z`p6AQgRIX;8SGm4mVEw5Dgaa3*V2qVF04|4~`*!aSO3C`t)d227JGbJD;^)X1hwMCLm~DMAz8Qc9=_Rvj$Ya3!jFtUjR%WwYEk=xARI7FCPd^ICneZhM?ltc~56K zBdGv`BeoVAV?3s9CHN4^MXk-+Ad4xaP@jv4rA{&Ag%F0?Y_d6RG>092;@a&;HZGGd zysph#?L8=rq$7qeO1SL_-Rv}HGw2jcEKb*lq>TAPX1K+*_D7EW>Ks-h{6NE#2a!vB2c$m{DV0_m5QwO6k*p{Fk@xo0!wSRGioSKDt z*k?tc?b?RV_!_-97nAPaK?jdN#c<|{lSf9#HQqm3rmH;eH)xE83!-cGWqTj4{kHJ1 z7AfhVW+SCZwsY;XBptutd+y0Ijr=a(H3#}YOBYyk2+sAxUmOHE?nA59HBLZwH*VPb zTT-39&P$ToxIg0eXHuG1quFvanSzHcMsNIuU(pRea|ed)Do+7n&cd{|`dZ?Y(yL=U zotJ5=CD6NIN~uXUgT787X^VAy=AFnz7`3;&PZu5{3t1C=Y*y4~=Jf7!K2pxZ@Q#&h zHM$#V8jjdESTGweT@RknV{+Z&C9#SM#!pjHZ}qrH$*}#mA?BuI(6f^ZY2rB5jK4)S zZshf3A1_qKwH14JWDMM0g;UCxzH(TJ_-;&I_BT207LG|Ia;;j`eSH10 zZl;l}bjgy%+t#@}6D<7MG;*MrSZ9_X2fAft5Qij;5lo%sPgNuKj~1ZaLR5k}RJhrc zM#P3~7%pn`=S_~_aCc7cUa@2=UiIK~F=$n>jZsA>1+tr}8av zyT_s)p{6R&?phfP#Vi%3_*A4o5#=`1AnDrW*H$YR7HpO4?vYzF)oNaXkv6sD>Y?TP zSKHq&?r5X`Ol-sRbhWtBM{S0X5+LB>q}=-Ts*0i_AY2co(gKa&G@2~a@b(eDZbu~e zFO8==Lg|b~B}qTI-#q)rVuhVuSdZC6b+R*zlhS}B0m-M#1Bz}A+dARDit%POixcwauq67F5Uoxq3CU(VdnMj&_FjG_+L-bt*h-m zj1SUO63KV8%fzFj$F_@)PQ{5T=rfx zy_4fcXU3AG*fzxR(*9QvBm)wty+BQ0K1s&>OnKVHYlryEQtQiac#;T;x{dz^rUd|7 z8zsRJ(c)PTo6F?Q-(wDI5)+YedAx;=5lQ%yo3kKBC{|3)q0=!Bj$gg1^!skwzv4xU z0Zr_#K>lXpIV$0VyIqNbtAI>AqE0_?lz;3{q7qK4Z*-@qe61sr%7&UW=ll4>z6(1* z`)spE3D+%sr&Hx;Ngc!3f3Zzhv;qRz2#Dq%GKUa6-?#S46jM`EXWN`IIwk?0@E2(7 zt%s+QVTduMnkBH%MJkhI`wE6a_zT#KJ~Zs~$z*V}hbtal6TR7@2QCOa=rdzP-+Cc) z1?To3Lp8Di`@lPuvtG>nJ*L@M5B8NuHJH}o+F``)zi_~g2!279&U^NF;#&86b5uIb znATSIH8jg&T2DH&h3Qf?j%YM?p6zz;vvy%~aY@NgxpKwReu|EmxOg7W zBd=1Yo2NU1{_yB&XBG{oyzwWJ;w_piSxZ zh6@0=a$rsBUj_0Xko7Mygbq;5P>g_uHP&Rc_Peg`FHHi4hlfWr4yVX=Z;*PYJF{pE z?!+Y_luj8P)i$zA!bCwPIZ!s#rEZoUx!!voxSmOl;9v$KP2*U4(+8tO_gHde`pJ|@ zwW3>S!`Gt_h0%6_IrGd;d2MwHgbS}zK<1kn`~Liizp3gz1c*``!{NW}NI)=?2esCM zh#VN1ve+Xv@9-Fs5dXuVE10;FJlA45xG(lp`_gi}fK#Z~;PL82ZW#?T%d#_+u-;3e z3dw3hCJ+Mi!(M-aoUm^06%Er&9*rcCC(*HZVN!eek|x+2ZNkpVbL%bT+8-Ue?B^7o zS-9aU|3#s{*L92W#OHUcv|njzog^2<3iFRcr|PNab5uc09@JRqrS;QAQ6^FWP2@`( ziHa<#$xT&!{aK7k&$%*#UN7JiR^EvQcIrK71JYIVL(_OxIYiWw$^=8~GFma<@DUo? z*hQke5A2FG@2v*UG?eQ`mgTi(`CaMC)oQ(RH4mU-Yk5#9o+bb%nS_dIBP9bmvQ|oD}vr!*7hqfd%>V)7{ zeU0yggzo1%-a2q4irz7MUk3I$(Ir2+WumK9yL8Ekq&ffB<%I0P1Mk{Z1${z5 zmtG(@M_>G^OWKSPgXk~fKZe1n$yP)D29Mt&ptlE`aEaeGBW8R;jIYy7L&HP!rbBm$ zOO^i87L>bAMpIpj>`s;AbPxyG>Nb48%^y{eNNaz=lZt>AU&y z{EqzS-}^1*+ivX;D(O75i8Pt$D($r=5fmXe??5GLwBrV{;0Bat%hS)m-&fxR-es=W zw(8b1)OXMgO}3{{`?2ny9SqQ4WPb#(*=5$K5JtGD`5aw39%cM zZ<;xIlEpKHp$H09>c@2I6ry;W{7j? zXdx-4>6XXV@R9n{Cl29+uDhygsxz{wlYpwevq@ zMi-DWSx?3T`IG3Cho>p_`U~}(pjvF{4`*;}r2kufa1*uZX;mHx=ncNE`rZ8V&vZWM zdC8AF>p(KJ_t~~fqvH9q^(c2|zLlS$A9R+^~_(b{Qv)7cv)$-14zoJ5bsl6PhAY8&lj?NrqVy?Nj*M>xju ziWrtvRul?NAYhAPKRE3o#lz~4x$c>2%}NHcGW|}?tw>vOjP|Ed*^nD^ebZoy%o_%s zq(nyzAA5`>gS80249V}I^$(ZBjuTe%Ty7~{=7MVma$Z4}a*{kynH00XTOODe( zD^~w95rHXAV!b{LGz-3MXX{lxh!AHh`X8Dd zlk*>%y?W~+DgIeJozFQsJ@7MjV!YhqZM76NEOF0pPebDv5e52;GD&ppk;OZ0viU); z(+S6b)~=@nJqi=m>z*I$Zzen&?IIr(ndF=z4PD}B%8Tig#8=S2E37Qj<2q5gQ*hSKr}59&(sV{#-KR9DH1qCTD@EFSN!=HjwzU_0F2o_bvnJtox2Zran=K?? zXlU19&x1MrKZ*ttigI7s>;Lmf!`|+8OA7R2_Pm{7eb!El5fBv(0cssVegl&^JZYn@ z8lUwMWv&T&`Zbny)4D|drf|PO%Z%^+2;167c%nCEst8V=EObzdkf2G=cd^BYkmV(k z2USkw&0fmMRRBJup-%(c*Er098Qb}_{fm_EQ2(M{{UPKMae7wq8lSATsGZ%wZ{Joi zRh}O8$KdXtd^bK@kzArkc3JdZ-Yq#y&=rUgx;m*2g2Bn}C(IqYvlXW?!>2t9uTx5cP^>olSiH4|fIgc$$ zmY7A8+x050S>KZrbJT1n!%4gZJ4&Nq`JTFl?Akc>eK>_1lkNTB7O&x z^ADU69;sgoZSSY;eyl#`Hf5*3`qT^+TsWdS(uQ?W-#?)pj#_EXRllLg=tw|FRR_M_*&TW5Ca2-K_>+ZQkUqr&#ml zbWO!JZ=ZwDL&=>}@thdyC8zPbqXwj3*x87Yz=25lVASEahu``usIWOaz1JCW&%l`q zAPM1i-d?w4N5e^`L}ED`o8-6W)|vx220^q)!4d3-2bqQFr^th1Z2&5`~?b6 z+O0A_Bq*DnA?oWzuf#z0WgYrUcod6Sc4y*t10{0XoDvl^Xr7Am5$9#ykktYiNE-Qp zc`tl4XA;Z6@xE!6UNy4pR5mEFgX3Mf@FW^PP#6$dPztqP*n;sJ2YaH(D231{Nr=J* z=j0K;k&@KSLaK@jLmqYg=)E9xceeEYexZ0JihhVq-N)Fk2P|e&qwQr&iCY6Oci9S8 z0Qx``#r)LYkE)q84qe7?RSS2f&7ikI9%%{HR$k@*S9^{UDcGJV5dDIV;RWsGffv%$ z!~#4@5Ch}DfdLYEGkedg6#lVgbj7axb`ZM0(euG_m|+Yzw(H46 zK50;GQ`6vT8@V^aQfe7UCIlSkKAbNd-sC6JbPk1&&0 zNCO*#sK!R-riGGi-1eNF0i34`DW0$5c|zhEbwZ<*G~|BN%uxlEC*!QEqRW?QLT~J> z)kuZRmOL^fGEh+Tl zI3C1F69|g3LyH>O_2BS|Nbm6tc*O6&bq^UMQxrnlV@agU?{31_Y(6~|e#6zeCB>(u zQ^t0c)NwyOlG>rl1Q1C@$Au^BxGH4E>p3BS&Z|7r23jU2=Ny;`KR@r zBfN^O_e2IbT@w7#pUwH-XS$pU;GwhDYK_H^0RxN0@-IQV63~+pOrc~UB0PNbaQ2%{ z#9tJtawG1-?gsnmyLntfB321|j$9Up@@6Kvbo=qtlW;H+BC?R296UdN%*xDCGBlFx zR<{X!5JQ+$-3$J;Z*i%9=TFd3ep zp}-##=9EL#Ytdwj%cJ`Rx3iMOKF@49v>L*yw*1qD>V6LgMwU*8w9wLM)MDhPQ>j2) zyNR7oHZ4WLzbP&ZurKH7j!rW>f2tF(HymIgQw~yro_4j9D9(5te>UH18_u=9TYV9h zmq2!pjIkjct~6a+f&%O(thiD^!%zqfkqAyBfgbBZkRJ zy(fkI;Kp>Z3elfp_-Og8zNEQ+>y~!}`l85&EY4QBPM-7Mx_*Cee-|HE2vM7zr5p;e z_2zZ?uLcXxqY8}SD0VoUM90En^n3!UI3`6G@&qB>AI`!80>C&p@>Rv3*!~g%kJk|u zv<+Op_6KFcNf6T?VSgvBBB&iQ<{2G%d3phh}rburMr4rkV-6fCmFJ zM=A~#R&RF>D?Ln~y}=73EGQ+2z4L}t>711t&5)bDG0n)B58L9PTS^|v8(bZTd&7mh z;wgWB5hcr6`1j!bv+rP5BRy|j#oy%_GZYAIct#!X3kp?1=PFQp9J0YbU*9dvxiY>P zzRw69G<}57!)V@4VDf%IhvReHK*3pW5)ePnQzr3QO8uY@)12mAB?{6;L}O`e=2HZLEMpNZqvD z4OD#l>5h~{I#F!(k@ym{_b!9nPU<<*YRvdFIY(a<^cAVbL+02e5FY64_4mi75%6`s z1UP;a#_0Va>HT&ss?lg3RA%N*b@MHb*?#I<@nG;E@6%7KJNUMgtbVJk?+Y9NC>Efp^uxrRg3OIDZ)|}!Y5)UhAMPrT_Ft)cJK~V&K z7@Ota`tVFCT{cfH%2VeKA8(dx~N+A2&Cy5LdX&*aHq z9TY<6EkrI0yt?Rgi`xWpH+`*yemRGE(D5>jggRPEXYHe!?QVR#7aB+ z0FVp6p?bl0yI=~jfIg)opNw2}J~5vK*vc|Y&(;g7cBJBp8VM@7nyDAdklvMXg*Wj+>K1nl`&cA&Z%jCa+4GSwv97uE zY@d#)37!1Y#GOnvId~wSzNt5lPn_$-$!KBQkIDNND};4}hU?d6BgHh93aUo;PeHi` zf~IFl`Oa(I(T3+QDNGk&D4xl4ATi4uGht3R2`VBE$s22YWEF;wg%_wMlRR4o#}I1o zsSU8$$F`G&;FN`doP4CER(gJ7lft z;IaB-yWsvf)G2xwS?L?TAFAU-!$8D-NwpRZR%o`~IGYT2l}?Y1ja9{s-8noIEf9&S zBjObZJye2^$Urq#j<7g8IlUPKGZYureoD*6^O*=@;6`6PHNfmvMP$`P`hnQ%_9PUM z5a1U6EKeYswz`s`4#%7OxpmQCw#6UErGkzmdQ>1Lm31BqQ2e{DJG4C>F9tR?a2V@V zb6(RCvt&Yk)g0dJk15ssdT*R+H<`i(au_4PM6UhUqQCaSGBC47G6!nVEIXPu3hMjV z^mVu82Lkr)ZsIalmHzvxL#pOhB>M>WymOMR zI|%Hn`yyce(&tG#KQIDlrNxdYJ3G7VBFDp+4T_*H(yf)Jl4E})d!M)7QiQHp>n$S} z9ftIv3Hi7&@m*($xTplh?9BXXCAppnP?wtHlNI_1RO=vNhvcP^z$%OUcHY$;HC1Gj z``6a}>zj$3rVuC)1e{R7{FwWsa4o&K)*Ke6eHd*(zow1%O@JpfItaQxd*sf3XK>3L z3|5#3p)Yv;%^F|TOa{_Tk5bA?Za1;dIqJ^WN zriNm{kIk%(glrcxSmW{+hBk!v$(mw+#6vSBcEw_>^zMc)Jl{+(6y7;~2)oBFJpWlm zAXL>f8zOKsaVbl<2zeQc)7Z}1z9zOM_iWOCSOC!$@!y^3uXc1jN&oZbmiXD4_Afdj z+<^_z#Zbu{2pWyOXROw+ubF>$tbc!WBrrj!XJj{q(a6?qVaPWP%6uIFH|CGFCnGaJ z6WH~y=fon65dS0opDX!B(9b!xIG;SuHBUkPAWrEsvZF5U{+rc)a+L}2d&mwBVEO_( zIQf;VW5pr#j0l8`z@A{E=-Ck)WmGJOu9Vc}nNvInM{n_{Kq;Lkp5tI83+q37B$I~v z0ZQHu0b_M(efcJ7sZ!eVOAt?lflSOUE+T5qZSuQhRO3vB2W|53yCJzoC^l8PuNT%6 z)#nfsd2*|!G9Koim9cA~?o-JD()EGIn0hr9GXb}X$WI9p)zFqd$pF~xr}yErN%4yZ z9qYf08UMZyLbM2&w@kSP=83gB=O;-#K}-U^C6Xsb7y%`>)s_v2{cf~sk3Sg`X8Jiv zBUD?)jpNYNc!m=YWs+0Z&*T6;Wni@xzS%Gk^N*hjjY46}t4yUYio+^&6fEpM` zYK<`)&9`CocO~@}%HX;CH6nT3#EBx@u&L;sjg+X8Sbu)GF=IAm8eu7bj^s><6$ziw z(pXLx2!eF@$ulxmd-}9Y9dyS;I-lVJ=_N8NHh`g0+|GRQi#-aR`a|P+z*`uw{i1vS%(zUn z>`o5X&GA!mo(hOcLkJc*Sl*|Su3`yDYvuMG{(+sdvBa>?Li+Ct3v~%QsXE(s0@9wfQulF z)ElgB|BBV<@jOLprG|qrWh1^RmM<&w{}J|;L2+%{x<~>c5F|iw2=4Cg?(XjHPD6qR zcZUw{?jC{#cXxMd92$7N_qqF=C->I-K^4`|QmfaTbBxc%ulg1CrzSVwFY)q}(QNo! zErPwzUF$W@5Kf-92Psc}Wr;OC=hzWSCsI1fZ9p!MtBG~TFh1NwRmRw%NxHokM z-W%+-ohZfR5D6pXbLG42I-1xc`3a;Spvgghvt`Ty#uk_{XQPsf4e4fS zQh%>kNmoOAJoHC6dUKZ<3{(tbq76&+H9uT)%?3 z*(2X?=4qr-^q*XScdr@kXcPl>GY17*9;|32NV`HUs6UGDQ3~y3-QL2 z2VI5XQHOFbbX4l){7>nlm|2?o8vPKv*1coN!i2!Y5-AP-I`AIuY5wwI!i(G-Rs21q zV>2NBMG&`q*_Xk&0VGmW*DM)8-=Sg3^%|u+)wxwn?GO48n_Kh8WHQ!-8)5wPImocx zv)Y=PVy7|1G#yP>c6d#)mwXZd24q;$d@@$}hL%*?=&L;0cVt32&3vs} zjl&P0`D0&OMMZk{F{F%nf*Fn(22)4PW9{MhHAZsM@zC1$uSV9ShVRS&oQ`;gvoXr7 zttY16cT4Qb=U8I?p1(>&IBg4E7gtH&Fjo4e=)Vpn)KRU-W4YjZuu!dK#1Z^ruWxYU zo-RXLo<3<}{qm-WrAmyr+8TSW7o{!h0(wQ6^-C6qh6R%^uYu7EAcnjOqkNQ zXLbdDh>43WjSG)nPqEmFCgUXRP9*F_k3bpCH)t?RXEAI(8(w*Pnx`W>M;|&LC2hYu z_4}h?60N{tAq?tqH;{Ckl&iTI?{U<$wW~}p+BZ6PwQtWZ^AY-eW3!gvA?Xv6woK<4 zV8{}6!aT! zURv+2CQiYv0##_mT0D8Y@+jPLo(nc!GQPk=aGkpS&lp2s7>&T!7ghI3Dqj9Y~JVVpUzc^y>-C)Hz%qf#3e!d z?y12=&pNJ8MZ3cJ-9wD)2BTy5{I*qg`AJ*yegl4TsSd~w=ZA{MK}i$Fn2B*@)kDYR zpCrTuwgP})p%-(!)`e0*Z&h2$LG;+4GWl&x+fZ1aakq^h8dd{{z=?4v0>x#_!EfB5 z6ffW_1D9ZOA};iO&M-ov=B>y;%QXE3m^FPv4nwNmq-#m5#KCWugYjv&nNG~szVb&p zMUw+2jD^}Av(-Nw7zaK0^UfU)xK$Q#b}mOMaON`%_z{)8-%ulVGl1UzuHcpi=V$lN zvZR1RC5dpS@>%P02EktNyO6Bf*FaF3%w}nWr#KB#% zzT|i}QKpt}Bn{w42Io7X%ef2JQu{=L+&wP_LV?gOE8oyjT&Wk9yFNXGvB`OYqhqgx zHCV)MHgZL*nqcac5oT}{JcYLNQ;X{8^A=w;MRkD|vVkqb)(uP%e%WxrNdDDmKJ=+I ziwE_N-w5dCcVxqJ8@QeyGv3l-x@v^s7_upr(4sR*jpR!8m4@+(x)`S=U=u4&kAEYA0`)*$15F2en!qnVA-425j>REz4;p;namZ z{xT<5U1VzI^2)lTKe6rM8g?=%B)Rc6QvKT2M$-s{W9`wc4Nk2&wQ8Teo*2r;j{#c^ z_Kw*Vqy0x+UU+M&eDyh*v_&=Zd#nobHjfQwT!255;xpXZHeB^6x5f@@X6`A?n5<0= zGJFC?dvs074+)W`xL0iZ$_YC9Kwx{yYJ!`xCCnhA5V zIz(+z)CbDQW5*+_lfnj4wb^86vST(Jd%chrqJsxImPKcB!~Ec``{CyWd|GK3U1r<6 zuz1Ud3I}K;sL1;;kbALsgRX0owfjjvG~0m3t}}~h`Fq`iV*XxPci?{GXou!8?VUFF zT2?LFbRa;xWAJWgP^SHX)r(0Sf6tQlpCY`k8x8k)_bg}fE5yHf>+>N3a1+W(DmI>C z3!^YW-u=Qpk+SL|z6(te-7db+2vnjEdHBkAkkp7KiVCZ`9 z=Y(#`77GQMyEC-@^7x?hFa-s`DEYK{fbHpuBYx(>)nP?X!mV7q`>~78?))ajlI_^r z>kUl<6K}d^b!G{^$h-qb@DghnTpQgEMBeRx7tE#bQWvf}Rx~xhZtH^6bwH_67E@uH zHjmz2mq~I1;ZIUaWXB3%7vQS=l!~cHU&hfCyy>7YJJO*InAOt#Q=BhWid^VfQ*zES z>1Pp_{aOwpw@!ncFp8=-UXik#-I4axlwkNW2Te8;Qu%6M-$@2BT#~%TR@c=k3w$Bt zmKH%$`}9dN)}3M=FZy7+RJNrLj9tcW9SNgERN*=7-h(hv654v)3 zcIm~S&qQH}j(V8kCF?`}>$DBWKkexi+CDcwr5tw@krNwMMSX1p1W7{YI&yQis!EA5 z-O`wOGPnq`vet8^cqkA6N7!5s__PE*?g}Rirv420)ftwu>+IhMvjgU+xs4Ira=&JN zd9>XU7Q>$HeK|FYd6`P=faCDGc@BYn$sxiFRb2S2S63;#Wrxi=YKH?<2_^uFoWz_^ z2?k)z;O#&W_YkXokpN!_ML{JnGv|D2X)p=uaB z-D@2qB27h+;gZ_26x_@Ro2Y2Rm2;n^&3-MKgv8GoeqX2YlN>$M@Z4O&?DPI1b{)}% z!%0ElM|-`nQmm$#5$;G_?~g+zNrnpX48r$qvapoXUmQmULk?;jCeU>_xW_Wani|&C zD4eyGdR_9l5AWX>s+@MaQFlHt_Bo$Fc&lAKVa&lixuHN+lx%b{2MyS74j+)$uD8Iw zwE?FmHLmSwk}xa3Y?zskB&~pK$N1#WU^+FP<OAwSFV)5U5@r{$EP0Q!>4^?qp#n; ze}4z}+M7}3kc873n~CR>z!{S9gG(S=S{HG;Sh2D~`!{_ei>?IJiy-Yo!3)hW!{Yc2 zTcEr&wGa_j=BE)jT$MaKggnKktGsZY*VZeD0(H&MCsY-jUU?c-rC6VluIoXst{k@L z@+)RQet0*Fs$u}&l{GvfO6GI;q>*kH$zwI)Z%(QqsTP4aQFuqC0+si4Io%eAGC@Ch z7X`>796NmKo`6Xb?X2+z6xP}L&K&`XxbWf`;->*?d*ZNSIP$ex{PGX z>dWWhOt!Q64|q_$CP|FRzot3|6Zl$b5DTUU>Iz;y{`1v9;2oj_$d-tEFpp$IG^9vc z>_o;Cn)a(#j_{V1nE-xfSsGT%)x`on$g54Vj~k$f>lQMSZU~NRb{5rMR4b7LR@b|#cy?Vm{;}*F>B484NmDgG)bV%p z49U$da^cvYo53#1%poz`;L`Dqwm5+mm$)GBb&0-eSnO-wX?Eu4%4j*Hd?$c3vdV{> z&!R@-9Q}zl#z~MRBQDdj1%&%f4Zg1CJNty>bioqAabG;W6Oh>o;tHf}MrJO)m-0v^9RImYK~~NBvUZDPyPk4^ z90m)Aan2En+%t3e>V`1rz)wuU5HWa9gqD6}l?Lr#@2m9O$Ho)8BHw?0M*UEc%2~FY zDNI=WxSv?PwEGtKXVFUJjeD$iB-a{l&0yRX)O0E5$A-pyMb;+)dYH#)dV{F6i2Qpx z?;_JzddOYT=j3ds^Anl7k&lLc*Tfz@=046Wuq!|^&=G3V+* zsTc^kv8?Q@z(FW8GG~Rp+VH0`?xvol)(>)y;n8qP&hMU{oo^o416jGk%L%K0HU<%h zAKzGD*VkG0*jeMlJ+{@F^8R_E` zU=Mkq{lZu&8q9@RG4{uFFRjfLVVzojYzC4o_?sp4uxNT`C9&hPs|+W<`0kl2KCee; zckeAuck1Xdlkpr^#OXy?SS04Y$NQ4kHG;A^+E{=@b!HP6z#ltkx)3#76Km5Un#6+Z;QTc5y-sJk`=NhE z`C4bzg4rz$HZNAs%bi*oZA$kLIfRm)^gCFYA^)sBl7?x(9{f99n|xyopB@n_6`s<&WN68PGN zh60E(eYQk7=ziUo(Yf59lV`3>Js-k_sa5J=qM*d~RYGRZ<0u%SfBp;_mdb{xx1({{ zho=Mo=}o+z@6OjE+uMCZiI6L1jwVlT^}77hkLSv}@4W9%mqa0^gQa%QYQJs_$oszI z^*r}GV(sA?n=y2YfzSZVd=mC*~|E0r=#^c;R zI!ZtGc9}q;{Lha1_iyp^WSeCFfR_LDfnG6tPshLggCG3Y6TEyO*@Tx@SI|PGSc(QJ zGESRL$)KABfw+*V?;tGyz9JWLKn3_WH5qs2N}J&v{lj7DlXx zeBOOjexIdv8xPF9|M=lXRsP5!S}N95{*2vtr|X&MM7F!A75D1|T~#jznV#9uuL*5e z*|(yJBwzxfTE zO^+qQ%C}e<;Fc3K_(rjvU9KU%I7f=#Mfg;KZ1_g#O`Oc;#<}$N>`=8A-jw=i3*M(D zd4mwoylC81ic937-C*R+x5Mnn&Ho&UW~Hl`P7xiyK*BjmPIY~&UD)pMRhueq63A-7 zz%)62N^}~3*BmTSiL2FRFuB2=UM)-WuiV41o0mN#<0bC4JBC!{5EqLpBivNyp6R}K zw#^_myw05N&jEzh9|W7Da^**MEX)IIThYmnZ!ND{!&7I5)Ci~oW5;q2;KiLAntlz} za^1o+?3kD(=$pVUG8MhY1TC9b8;f?)oTW8TCjS)rZU1_x{h9STRvRnR!{y;eXgwC!z1 zrCpL`W?*9$@NbA6LdgWkoqLBSel&ls`GVBFoZMK4fwh*qctFdzJW89Y-wW_c5u;{L2%d zwIVldm%xn?t?S*@YNY$~0ZN9wCBX_R9kw*>HYN8>8Hs9!2zQ7pX}~>CviPFZ$%8~4 ziPaFE#)-UBM=r#X!*xfSU*S>@K%1JzP&D)zEw?c-a zKotHHIsL|yKGpjB3qL%pe|966;%r_|ir4mm7y>B#%dzh}mLlN`kWD8q!I9VFROtd2 zut{IX%-JlM|jXG~6a;R(-8BbC9a^q6mH;@!LA%UmZC&moP+eN}m zHgb1s4^j1^GZ)Ou1-si^ciu)0inD2XwHdV;*vgsJ2!IOB?K?MUo2Tc2;mKJMZ7nS- zL`2^?7wx!d+QJ*f@iOutyiykLilp@9a+(FRU>$E}lB>xU(q47ud*NfD)fpDSRc57R z+F899H*N{)7t+QI%tS$_MC>YbNsYXpg9# z??aO_wCg&B*RCsGsa5b@*}5yiA>9f8zL8Q&WZq`&KgOy#ryT0EntcuR=aK zH$L5kZb=K!^=y8<{lbsrr zEI1z3lbg_It9+$tn768yZ|&jLJAY=gu-2-0)3R%hm2s7*0EslOWT;<)LdxRRNgFSF zBE@cFQ%fi`Zs%e>fn8rbooZnQTL2;@&;kb6QA{Ph(YKe2Z-6=C$uaQzWT&WeNNQ_d<^^a>{mTKv=YG=}z zx>Bm=^(Zy&KSlZz_xt>it+uBwY=0>~R-uWlV_{Yq(C}7IMiwl|?~!)N7-v_w1XkJb z?rE-f3o1RC*|YojD(>lc9nSdi*^UIfD9J9d8Kx9uhm_y0j~{nZFQ^5|bPGziW+|@J z%ZCZ(7$5@DczCLcXq0>tcwQ^E#Ps#^1#EQ`TD3B$X;bUUhAab0@FpC4FrK4Z8lE#S zsyEQx-Jkt#M_#`cYN8!4f|hJ{LI!Dwlh~V^$)}aCwVq!Fum6d5&SX9Kt#7e@$KVT9PR(YH*=5Fuo%t6ZS z88C93tp!&<@M^E2{~3waG3g=0xgRnLM>pnT$*YZ>$^Z823t%1@w|`6^U{iG_BZxNP zRT+&4E9`r`a)RL>TW3a1+#T`M4^rRwB&rJ@B=*`^%04)3_=~*eg?Qbv`Ir!9S+0%V ziZwn(po{^#GM0EIzfgX^Rmk5)f%_T~yq|XUJWq9$EA= z8QMzjk7^ms9GiH}3={%1{vw9d%SN>`NYwWxx2JE(YRiX5Ty;uMumOwb^x!x=ViAvT zfA+UP!#Q90=hXSFvm3j+geiEr>y43LV8&rgq8kKo4`?Y$8bHIVl z0VRiRm!>iBx4>oH3Zfe{Kqy_|`d!y9Yxo{GDaCD3CGwHiak|9_=5+*qx@k=alw%;R zTWn@N&!aS&&!aBOV;a1@RhE0RWEjJ4$)PeKFKhM7P;4LIgC$P*8c30Sr)0aY!_Yt# zWWTEK5XybkVUC-8vp?AXLJeF|cU>2576!gyx%^i6d2k5|5r_yf??YI=g`f?QGy z^qtk%-}Ugy8Lx}VD|E@7esb;B{5ph1jyoWj!yD;C0RE5CS_G`brl8d050h&Hj`yZ~ z0r!?B_L2?4Vd=9)IDiRHAAk_EX~(&rZ1sQ^rj1!0K7`6_OhWW6Cm$r}?UK z_AEJD{W>!NlR`dhv|)e*MIP%pfvBZsXlUOH3(0)|^8(Ni z-IR)ngG0`%!nPna6_wO-7%GV*M2sT^uE^5XsZ0@k17CmIJucNlcZ%?N`1YZghKrKG zmeZ+cj5@0x#pK&=PZ|<4j|4N|PVB8V|Zr!{&vzacpyB@(4ty+NlC?9jO z$?oW8;2xDsA=7J@Y$hoq7dq?_im{mXl+-Eo38O6uKN%?|*2>}>=%!p?85FE7>;cb8 z!}(YfJ@h@49Us3)s7trP4omrX+z!;{u3wYNU17In*#AI)!ezz3UNp^{3k*WH;3KG3 z1i~R6_;F?6-{ZN<5c)Fks9|`l@!7$)T}9o!rObmhW_ZO-k!ECod^y*6af^Rw~R_| z907&UYg<0fnhCV1M>b`b@h7hTky>$E99eze4VyNPS zxJ----Y+a89bXC5Yor!?kZaPh87aGd?G#KK=Ihoyw88RS``p0|H{X&<|2?ws3*E>z ze-n|Q#&SQfCy09cfo=Fwrc=yby4l`%SF8tSVti{;cq6AVoogkw{VAhmupl7%6R62# zAVuTHl3!`%vF(nwGb+SWX`*(;2boT@AJUZm+62(q{h_PFIP^JD zv-)l3E-GT@sjjL)Un^XDrMoyM2yW8Zbjhqq6L%uHYYqq(`H^Ecz>v-O2q@(?Lz|w8{dacC z3mkKeCF}4KUXeazgR;wVu4NO8WcSAZ`l`iqZ{B5gMnUuPN?yqkC%BDZjH?UUYaaaY zi7Ppe%dEn{b*z|&Sj<5UBBGQd+BrFqjVBQgkBJ!=9X8OIKO_ovINLs_xuq^PDgvk;XqDh|A@t`ki!Viw_fMj9d)&;Hxv8CP|&z< z!0^gNL4%~XEJ_e>AXPYZvHcJcZr(=mF!$Ckp#T-9D(C_J;rZ#hukK^Uw@;%W?Vj-+ zg%-m@u|nJ?E2ZEKzM-p?2%33CuDS)uYM)-C+&ew$IRjD7MA~poq9uJ zhPBiq5aq^9`c+?QkKM9Mu2>=1GxZa(Bm8Zr+|Dj3_Pw-10xGP!Xz!G%{PEbvH<;`J zRufZ88+8G(RA}IIb3LUvJ$sx}@(fl~$$a}N?bb#TPJDM9Vr;d5SS-ot8%gu|L9w%!I^;6H~8sa>od-i(pI&kX^iY&f9kKRi>2@h+PDZ${|=)hpfx? zvqlvHS32)O4@mhdNUbOTPJ8DRGpLa|_B{?!SE{Y4`FAzPy$ z$!4n0Viu?N}aJf3-FzK17p(iArKCIkcI8>6swCz zE0`#tTOoCdoJAWx0>1mx*35M!*D3F}1RbljB2GYRL!Qh1MG3%|WsZBidF^2H=7pT1 zsRmkSvegh?pnNKdxO;A(SQej`BYShbbUn!h;NN8>LbA=JTf#ROAlRR@?C5}&j{(b2 z>OuCAjWK5G|KZl)YFN_@ig0lAqGL$~R9|xb#jR!XU$s##V4GN#NEZ7#yF|~U((~Lr zSytmL7$1k6-N+DLj!J><8gbh1Y1WXz!?E>Ys@E$pV8;<6B$`4cnOv55XHwIAtiAV0 zBhmXmH?Wdqwq9rSp(o?xeo8gEH0U(+p<@9D1gdND=C(5{?9yHEhRT*=a3K^Q55ZI4 zNM6sRQlvq(FRqhMG{^Pw*|q!hB(||x3FXk9jJA%wtmk*?v$onbS4>^-$&N!I@k28* z2JmWH!P;<#4QF$`f;*(x-XuWF7IgNI*zrk1rF->#Xh3}=Z+^v6?JZp6^ZWoz_-pEY zxAIZb4YN+~u7GMumow7E5}%-mY}YzoD^I@0@JNTb-N#+ohLFxjhhtHTSFW5`U84Mh zZ_qb3`kyotHSeQQct(oEuhZ_YEnca2$2>)SRnHCmwxv(H1!n4wMx%-p&`~0<{sjz{O22j`qYG$S z?(k(-n5!KD%ETUB{s85>*5Xj8D_=vX2> z{<)Rb;)s;m4&!w&b229nCr|oraF0jb zq-m3q6DLe@zQq*EGj-gEZvt-;LGo6k%$z|be@e)9(sAuZG|ArPL?7VDkCbk{{D(O_3-h=CUYEonIF=LiW?>PH^Iq_D?EWOScKPIEq6N{{ z@fz$s*?#Bg``@&PT1ARM_UUyk1}lTF?bL$BLb^31dzFgy_eNG_kMmk>af4u@*S2H{ zHL*l9)YZS9Nbclz!JNmEeNd5n#F4nopCGLrFY)=PsS-P2T#r^}a??X^^w_N=AQ~y@rDJJRthTH)^pa z^&;E9NIvXQFXBB@f!X6WA6HH6h;}PSc{=YWxG|IRH+GM=YTQ^&k7AXnV>j>y3-U$` z-1Y~)+KeASKcQNdvxlRL1f(^$^m1P;raEtg3Ikf6^jE;6TQ%0JJ)YMv)X8UgY&Kh7 z|7)5$-|2}g1uxORH?(+_dkQ6fwmmQ=;UHoZMhfgU2+~R94o~_o5g7o$4F^?AOo#4d+P_ z+Iej*$JLLL!ZZMVFyNDbg`Mt*eKoCf?h*$zi|j0|#x2ffs?G&rh%q{k@@DnOz> z(k2@0tbcI0&^LhwESmdg)&=@D3gi{NH&PA?y;!q&yr*q>5QFb?e*0x+ap9V4+?%k( z`(9~B=rMvJORqkVRd`D5z5dSWtx|*f6AS2N$8XLS_xbfBImrVLFQxkd-dm{OeEaE$ z_x%5t^;}6eZXNK_vD;p`T4^D~m)E;Nhan1$FD!d z%^%~(ndBtWmGrQD$~obSi6B3AhVrL`35Sz1uvk-1xv`Qtcs%$4n^#5Ddtrf%aQMU9 zDgBz0w6?BRB;Pr2&dAYZ|K8*hguaRpNxna1KW@;GnX4@#q3+B2XNPUW55%4cCi;wn zCx0kd%MY>XOp9&r^h z29i(q58SzUad}K~8>E z4|ooSD$VoukIee=Vn#EK(z7F`QY=nFp_^-FC9cs>Y5KpBPfiP^79 zyUD~HTL{C`FO{jzS{tO5VXVIicyg@SRV5;l;5S61?&Ogg$U8S{8m;bWjlbBkunklF z@da$c_W_96No56v-7@33Cjp1en5*s#e|mZiTEBGd=r%&!nCE!Y+|!>2pe>{rD7eaI zqRmN;=34&NJ3eR}s8vJkTJ}kN7bN0l*cx9K-k`%y`#gE`LI^0%$ zceItvazf~O_Mi-A5d2C=(2O#kH}ULU%f!;(<>ouT957zTXloJ-ml?{s*xG`CV$qb> zkjNe*ellrU#Td)NLRdjR{>yy@NB8DK{zJeYC_mI1fJQ`=j9}A|)9d8H2;k7~uaUEqVC$Y( z)PY?@EAMIALh`7zlHZ+JS97WD8!p7B@6sFGyJTZ<0(o4j5Ofx~twcdPREACIvI5(( z{mHcoU1H6S3m}Jv%25M-uS}G|D}Ddn?a!nrV#D7dI~8M3Wdvm;60J~43?*=`4(#L3PCrvrF*W#(CQF&%D9$Lb zK@c%e2^;A-UZnBso{jP7TcNxFACH{T;D+^_oy9ZmCk=7D$+s{;t@&;)9av7Dx82?j z<1uRcOi)J6(SrWR=}ty^D=sJOM~J$7m`6xs#R-JGy!QZCI5HBTWTe3=s(2kpO=KUygKcA3#!7!4sX}(4OJeA z55((V(x*ruZK{64{Ido`z=1C4*lqteyBse|zR6hxb8GRg;Ng@BHq61UrS+S{fFu83 zo~OP51sTK8M7V6TZ<-(C(INgb$YbqfkIyM8G`d0ZUm=#QVAR$W;<@f?N>Kahc-UWT zvU~PnKh>>i3Zh(+#S%v(4oU8V5qY!`-LfPBsdS+EzL znKVD-#V!(()|=~i7vKGd`o8*Nwm(uWkq<3YENgA72e~}1b-q6pcIj}V;R=&Fh#);q z#lmCM3&nrTXMK2uNyD>Ps|SWzw>~eK!wHZEh@>8n{O2WSf-vr$lvqsRrVUuTK?b3w z|BJBp%8$Y42I&myclmKgx3A|h5x zb><$HJA&pr1JRH_{SCa3$OZ=o|D`)b#YysoQJ_gW>(yZb+i1?`tbc>2OgnyIrk7}S zv8ke*x5meHHAW`7Ukn4_@FsW~9uF}W`JfnSZV@77pY~?Cysu>)FS_+()s=XmZ( z{!VP$x~YvZ1236=Z%xUAMAcU)$g@-N{Sv*qR^XJ>0$(pU%#}_P(3L0Kednipho8t- z*W_Iy`K;ZwlEfgZaOsOx4O`gRL1k(f2G!*My&fM>AaRgxRRNQ28s&S_g|cry>bj6CHE2hY z%+akA9b<1`mt>Cx?|uC3`T6r9Hf_;y z_~`>36sW`A&+cSL9kxXbBXV4CMGX7hO%nlYTyKQ@bSq6I8Sm|?E96qAelqcYd41Y` z)5wX!pAAokjoAWD;#iW_lwE%+Kkis8bQzpD*~Tlx|401 z7FFi6k7Ptd2+3C}0>tSaAO>CuRYcx5eIcP5;zgmjA{C4RsSEaocQC;>>kLC{XBV+# zs9zDJ$q+W5y>omw@0vzO$Jfmd*3y-AACj=;sA>CmoCRKvu4pf^mt%JO*MZaK3&buZ z(eyvd=h7){F$f{yGxf!E zmOuXyZ+Zpf%;y^xCIZ=w!3Cl7iJ>;!F0N~$rB@b*FODu!Ri4u$P`f8KHl_+DXU zjO)p10aHsWJ3~U;qUAgSIFeHL9rq4fxuSd&39~yh3xfbd0xyWF23f;-^p-o>|WtsUSI?zSqHe#sB3EkYH90rE}|_b2K9{HB5fN zm)7fVG+Gj?6|hT}npij08Z!B~gmskj41e9d@hew~^p1U*^(NmjZK@aekyhrfJvIET z=H|xj_wV0T+8SQJH@9B{Xb;`mSme$8D5q_%Un5=^cN_UW4oj*DlqM!icEOjWB)zT4 z`ilm)T&qDe?kPk3EkUq%XI{DSufr_m6eyGH9%#JCdmy<%l7aX2Vpk)J2w8ZoS?cx# zOLY&+v2$sAqXr3LpjQ8P-Od?@c9zHP1cSx(1_jx+dvD*8X=4{^P(8Ok7ahlbR$9Gx zA17z%<3Yzhbde2`7gw2S^k){=o{058!$$JKjTC4le#1i=SPxWGhi4)$Cr4ISDu7%iQH>Q5qu4MIc*V>!4tpmeeCxm zy{>fIy}FK0kNB^q36wtY?1_xfNA?_b@j_DLPZRF`DKjr$YW#ady^1d^w`4^R=Q&f) zP6kvo;ZxFJ!R}?mtjG?9#l`GLVF#CLsc3R1TwxbY{^GKV=(&cUibgIqtzGxweYn+? zwEvb;fx~c+;Fg>0Bg@J(VPxLFF&?X5K2{zk=TQyoZ#-zmb-#%ijED{w97|2+j5*Wg` z`T#s%0=y~TZIsj2II-;`bEM&isP#8pnbk_bWwqLH&Y#mDKKZXZWb`cVU%pjjvQhqjwnjA6fhuVU>g9I3+c_E@(VCzrTv4ktDga3Mgk*k2wAVte$o4&b4ud3L{36g*#6FI&P%L$KY+3gE#l;1F3FPZmR zI;^Q)|1p%<=Hk6njnDc0XcIG@EC6caFBc zHI9E=*w4qFTS5qn<7l|^6mr}X$3244az-P%zljm1DzJN1%$I_9noS{3s zF^MKlk_fszql#Gd{v&W4g!1Ety5wi80(W|9S59)q&z!)^{^9PW1_w$knTUYhH)z90 zXG!#2$(Y0j4za)v;6P1A zrXh@>`Hw2~ufw#D-g)6&)+rw!bDuc6YD@CU=iWUn*c?L6yR|<{oq-#S1P8tFU=ZFJ z>|ygE%b=i7Tt;kwe1W)d^@6dq%-7OMfyNvGuMCrLg)(sQ>u|;(A5E$6da>k0b%}s0 z?6t3Ew==%D>bnsM50lfS9&norNhLll$eN0jvI}1*)swEH>y9!sY!RbAC$Lc$(53*6 z;KX$>Sx;b1qqfB<=>v1+yb*Y3g%}D-RelMs8V-Eu)e~pO^BSD62s~+ zL6;V|#v!=j#Pi(v9`@u(Y_a!IA%k8|r!{E@<)%2%m3zV)Ewfadv8(R8LUF$xhvBDr zY8^KoM2pCOq9&-=*^?`Ykhafmsl5Yq4+zcHFA?ZEsrYBb-nVt-sJnG0`WV!mj2t}O zZ*!Os(=tc2xVfrR7MN(UNJq(hRb`MTe!V`2;D24#ycw3~Mk~%U$>yz+O*Q=G-1Zr( zTt+!_LG>SG70B*n@?YMkAcJyWM>4sZO|ZLgncgk*GTNZ{l2rTaW{ih9_f)~LuhDcL z4rfS0jfC_goxh=f+OOW*8P5^Vx^!z&^v^<=dNZ9%m}0A;HKA;dk^wuKdV@uI*Y%&t zUu-e5_wkxk)LrUaztyFDPnRw3Wm+iFw;HFRU#HjEbbUnHlK6!^a3p&^i_E6CHaeN6 zaXT6e{LvQ`6W3H(yVe}Crv08)tEI-B7E5G#^0yHA4BBc519K*)^34agkK?y^n*ba5 z_Q)e;df`ox7(@Da{9fDyq|7Zbt0*U-T zG)J?hcQGf}{YtLRQY)u$-r0vcBfuJ>n@;%d^*;Axjme}G>;`y+u*4V8|KAa{-fGmz!9pJ<6FJ#;E+0V3(F5)Ksc-PW5DcUgZW{ihM%B6wqenosa62empw~Pijv#}uU@iaG}MQY<*qBDO78DnKaUTRfF=z|nswBAJ4y>s0PXk~bJ~ zL1t>~6#Vphk2lnu*HVy^da6JL%({(2D3k3D!HTHLFPiaNujIIw7j$_pXl0vaBT>vI zBPmT|`jH_IZcPmC$EDr7@6Ey;N@tBP&~T!piewI1{9kRoWmH_t)-{}j0D<7{gkZtl z-GjTkJB_&h>kDme}{6;hV8TLx=*h2QV zS{Iu4SPL?##nq#+lju#GxRXX_2pvjUV%XTstGyRrRD{|}jy+f-J;%Vu z3>FZsT{?nc-0^H25G?xZXb_$OGt?BU(+QObmBw$so}Sskh#~qNu*Bxr%R!!(2{RgM zbVPD4h}k8B#l&it=`&zny*bPztB&QXO~b{JH=yO+4y=)l*2Qks+PVGyM=cL-<-C4j z0*Z2zF2OUjJ-$@dZCQ4PWxdTZURHjnd$SkX#gLKp2?KQmIp2C5#srtWtGDv0*^Op> z-!05Dmq^>AXb=clmdNXe{utKtLQ!5<^~5xf1IAh`sij&=lo^M@vL~b43&CQBJ17Kd z<-(RJJ;4tB-gbdAFe04t@8w|Vl5jK;-~L2if5IJDqM9n2iV{ElxfmTo_`3q(hnE$m zVk*(hPMJf(nQ%?`K6LK~XKZo0v4U=z&ks&#s7PlTBk4?cXa)c$?3MDj&s^9G*9%V0 z)@o4dL3FO89n+nFcOYLrZ6@HN6?YCzAxo~4u~id~f%RUb{C6JD&iZ8I6ikO-OR-X$ ziwyTGFG7K?wChEa{59GHC41zpB100g;Uw%d8H8EwV~hBBGqj>_+`#6L+R(7@X1>6} z0#kOwT8)9o2s-8gV<{jx$)7+fu;b&2ML8lTH(qt+FLZ!Z#HNlYtvC&~cj;WN-3s?d z823$+3Nfcv6;6^K!jn@gtVY1E>@g1J{j0D1%IW*BJ6vcHEA!zYd%ElEF47Gh87Xb` zEFgfT@Y@gO9j4&9+ud_5xYoMU2fmWTGk^zkM6Nr6d4_{KoHdTc-H_C6yCaA&K?R1R zn!I=ow*2%?sK(lY_jKn60DLYC=T!sm9eL2n*LOWhz`V8#_=By`BJ5ziPh5h~egTDm zn6*0vA<&-MWUp{VHttv)POb`n)!E_*bSWXN)Gj68Pgo&K;wCiE1|v_Y-%>T|T-yrl zI=gR>b8E>7boE*zEyZvk_h8k*{_Pi2MD@JhcQ8F2g8*LFOo+yZ3Lt2HJZNY-Pkh+Y zhkeE$5;5s^eG{4yQUe_+9Yx z7$!9N8}yGiKSH+H5m3vxS1g^p+(R;(K3mtC!B>)J6m3_WnnuNM)>u+!E!V2khEm<$ zKZ&t|Coh0&?%u}oecr7hBxj8|O$$lVw`*m4rcONZ@>sA~ykqfVt)JRs{1wmS_ALFX zvpetD)`E8LHPCcg$H5Z<-j6d(+_E+U3Fhi`w~PQ+9Kt8??%f}z{NnY>W!e?o<7a0w z00WQseZj}tdWhPgcg*{-&b~V2dxgqA_ROZW94Q4gBF$LpJ=amywrbhThKhSOUPSEnyat*Q z%V3Q~b* z(`C(?`M4Dq<(Q)1zcV&K>C}t>6sNdgQ7{N(?Clv{d=S1_VO_*tXrx0GqZ0_*XQbXY z&8v;d)*gJ4PRcBp88Ejr{njy|u{9f@?)w@t9vTsRru)4rehl|%p<{9+*0A|k^cd16 z&6QxsvTr!cldoa=5?)`VwoEYemk@L(7#1rakFoFSpM$B^dou$hePlo28oCN2p~>vM zhY8YsK)d-Ex~m0UU6!SeWS&nl=k3Dx)kxp)9eixY4zJH2+I1YRDrNtilHx8<0Y)|c z6cBq91|W~Rvd?qFjvt1N%_pdItXuZ&f^lkog(tmfjFra{T4Jn6NwWd0!0y3@TSP^b zSYJRZEU=FHJ_0(&NJaK_+$}xs6Ru47pZG|LZDg{G zNE>Hwo74bu(McT;^1KGik<~}6lOy+a1M3KB;4JUO;O%7exMKv4n_vIfVgQ2Ojwq-r z=;CP$e>Q;wRcTDxl_x$rwYOx%&+znx@9FnfPPc1${{3B-?z`+%4EbJW-D_#5p19cVPsmF(I~ZW(_cYUs^bcd)G>qOtXrC%2~5 zL4fZoKQ2Sm4adr0uL6gv718A=js*7RX3rd5+C{Q)mse0*#?3}Dj*5FsRd+}awrgHj z-al~Xs0*G=CJV_gi~J6N5>3c$r@vbl-pxGhiFfl<921LPt-)H+!CcSy7>HT-Ef{T3Tj>j95^(=y-W{Fq?K^lAC#ciV)r8EW&Bh}m=}@qd z1}GmDP9FA{v|Oijmk1*+KV}CEKU`rrlw1&K&I9*x<(UquBANFJm!~maAAQlgf*w(e zp@C;xyi>dH*60|_Rx{ILA$!rK9Pf7u)owREidACfd*TgW1NC>eMJLmBE-W~M_q5K8 zSAQFO1dwhKVR_^B8XFULx2ArMvJ4}`JiaYFP^+%k4FEMDwReT=LYXsq1=Dp7>+}^2 zg-a%nXs~KIH_|sPn#RV&3#fxqC4~dwBseIYwdl0!aGkoMBXh;7#-3~y>l>RmmkdXT zhVXZlN4_wTU`sFnbdIkrFrX_|Q-2H4Y0==9*W+P`g;Ye~kG3F8vPgfbv8>;3fX@~J zoc&A-k`fnR1!SfseD`jAFV8?}Qx6~B2QLwAHE&W7=PM$rL{3SGUv67XmGiqqQ zVJ{(Qv=Z-BE+#S6p6W8BDe&)>@*mi~cG zvoP|b(U$VS0DBHK9>xqp)Q&}m(y{$b{odw|{V2XpvDyeOEWR!|qIqE)hqu7|Q*{fp*8AEm8vAgWm;G*&W*@McQ~~ zpf2Ce76W6uPj}iEyh7{i3(Pd-^2x)M&6$gr!2FFIx|&_9EBKT&bm0lGb`R2oKGvI` z{0Z4wqH^5?)m;e&O+(%EdR4FGn!IqvkABhR(^UDrp-LRd8|)@-4EubmQen&NVy)Z= zWLsY|{EF^HyZ(SEF>o?ps5G>K)*eob1UBvov3ux7<-RzdT1mcrM2kXB`QFV8{vyf8 zpgEI-yjmb_uH{IL6$AxSfxps^V6e;Lq`_f?L^n6G0mjRFXis62Dki-x!Iq?RYq^=W z^+TrDC*IA6d#(uA+M8ugqbt^;!*k|r;YRmk%B;8Yoi8AdG24+s)P|1KVohA85mIO4 z0N&3Bx8-cBKm(1sa_a`4c;`YVj6~R$l=#=G_9u>1BF#Q4q{0$mTzeAkrnh}>Acu&} zDpMql=3`PTVXD^r9xV^-IPSDav>|O)yPel{YCo!}1AnEU!DpE@!LOTGSj1Q>F5ivG zuq9!+A}HGW%mB{Dd);ZX7Y<#@{MTOu={)S^=`Y2nW{BZ16>3hlttNJ8Netw9cekYD zU3e8nBz)A7`;LT?T|#~l3`?e2&AiBN$(g$de^=!&14sPVdUhL6eE^luH0H7 z?ObgbFF3DZdw1UGeL#R8?2O7Cd7dN|$7cSSVBN1=6zVE-rTMFr4q*2hOSuwm7mskv z69GgfHa|gLJKtGH8um4z5NLm5JjEnRpwSia9&KJrzUY&9u60r!?Q);` zd5<5Rgk4K(l$SmtbIOdI)huvnqu`K6S#wv$;o;TAjU`RztPkzbM-f`(4s>c|%Xl!eZF@Ja8MjWrM7#WzihoyEz!$+RxOki)-!} z_ty}W?JYGo(<7{9g{K7zYxCYGFDKIOn(?Ybvny8M_rc@omb>*YBl};g$x>GA*;M8& zY~0QTva@ssHOY#sc&z?L+KB2)_GoCzjD@CK>pN-K?$Z|I8kb(2R10J}Z89xgN&Z^5 z`)u&L@zUvZ$uhd_yID#KmAK4}D%#(M*p6ss?WR9Dcee!F!bcT=YXhfkYE>%{IJDK& z*;_5+*f<}jyTHMErNW`DAcPXif5wD2lJMO4`$>z^dv?VYNqGgTU-a+N~_2D*xZ`^w8k*^ zLJ=pH9!LUaRl;8g)}qF#x^RCmyzc(Sh&))}7^>vNBP_2U6d|AX@QwJEc(}!1vYg*DVBoX)Jkv?QBx3UuH7kFmJD1)}7ZI~qi3M1PDwyP{xu#5zg%npvEr-KdS{OeHLv zm4PRX+0~T*{>yo^x|yTr2^%_1_BrE61o)Y5yU|&%9g|K4(-4PAdEk%))pciDg8V^o zeo?-$b@kX1xG$nV^GNLS7+Z6jH$JxF>Jggy7Uo!zlx;+*)Ol?{czQ#mXRcCWMaqL$ zuh*y%<8fqaWBV|a>y=q@(W+^2hhffQ!l29OJ?#RHQ@Oo8d-mAd$ppqL;-t251yfvt zDwm-Vuz{^59f4W;p|ya8)5u(of@DPHm>bU;|C%esM6WJ9Y&epj+@@Snp}MiAFFr*` z&H+2Y=to0>l$p30IrQ)k6fnrB^EZdxvc_(Psi`SMY4WlP?LYhpqNd@|(fy;Nm8BpO z(J%-K=HTD}2jM3(plrr!p~b}-I=}Ri%G0`BMl!!py}M5+8U|CEiU?s&TRV@eASTW> z1f)k!DIo^Pivd}Rn_B~sK6CZi$b?e}#Hdb`TQI#sWiGG>7r~k_TK-0NEY#$P`u?8C6IJhcy`b3u32VczK4-oHiRSrONT9=K_cSPo6Wgtg58}mMU`x= zF0!^Q`IGH*NMeh>$7_-Cw8HWW$0>q23zUUgKV0uE|4)c7`7Zuq*vT^@fbON5Nsof{ z!y#Kq0FeBru@~svN(o!t76v;igF?vqS5E$*fE}jKXHS9C5z?%&4Z$q1h2Au$TOT?B zuWnjq1aWSmh@O4_>IFqWxDm*%|K){P>~7fH`%X(DvqCj-){bQliHQ7e)y<=mC> ziY6p2NqJYumDnx&RpU(wF<*5evenNlp&9)OPhTK8Ju16W*Z!;__~t@)|H#>vZ%R-7 zta2n``YKLU^g-wD)oe$()VVaj$kfQ%??$-Zk}VaiKaD<9De(RQJHyC2gzg~Uv~=AU zH^4UApcT0W7a-v^I;j$ngsA+KN znmHwKP=wp(%cz-29h8auO26b^RD}p{uyg#=Scxolg1c>npk^YV*Mu5FXL3VfB+QmdygYYAn+>{4fbbvEgGKmws!l{{D3{oB3GGL z7ffJBab#6R+53O>w(tc)0;12&IcmP z^^&|V20m@JBl&u5qP06#V}ck8181n8{G;$lTsHdkHSjzIatrrVr2pl^2;3WqreTGV|uovkAkEMzdJLgr8KyHPjR*7iyj99_7|1i$fC)Y!%#E z9>>fF0Z9jZ_m7^gFHhDC*>75W3}xE-{4-Tvesz~rhzXI`2sS#$m9rv1WWs*g4p+8z zy2eGB$w}35wkFAzt9>Qcs|)2U`*kA<*Y|WD$2Y{u%PV zC036s))dw1+OCngp#A+n!Q{!++1V5OHLmu<$gVW=EwJlFJt+9Na*>09;^x5iG-6+R zOjNDL+*JK{J$-2S4v7SHseZir&*AW#IgrjRA?t$%7v`W7Io4&Ors(WhZcFG;=kyUs zWWLtsBc2sa+MnAsGzvF%c&hG4+yK*7_7sPLFK^<`+6?_v?bg9;8$Y`)<_{pKafl6; z*fUae-nUJ^xC9IN@rW+`K*1E3l#*)M09>xLx?MqFjY%Hn5FVZQe;|BKc{U5RVjtu- zqfoH0)GD7UAa6qg&-!$W%Mmj`-Qex@h1G7k>GJ2tvagVE*6tAGH_A&bBFIk&oN=nv z?F=%;7ZRBcQW7)!9f%`kM39R(^t9Ts5aGQN)%Dej@-b#zYzdg$bbYHGH$K{wKFGNj%}oCa{~f`AP7_9nN=ycc>n(N(McUKQb?v|5`Bb?2n~ub$A_(PAiB zC0*-^88L~`SBL3t2OFRPjunV>ZnSba@s3M_;<9(_+L~vS$R zm7Z+{rCa}>>9gj9b#@9Q8-6*0Q`TeV)Pt;caS38NgWc(;lcv3M(Okdd1OaSm#XnMj zhsz!3{c)i~6Y|)z0A?lhhl4@2^8MN%@}O5Af61Y@Q&oGrG!B5?j3laXyi(1WT>`7& z3PeS6f1%Di1&scWZ%Wa3ZViy&44~-AkaMsZ@TZEvVJt|-Oa<E+Gnhy567IO5OR<+Alr+9|HenxFPl)5DGZt z_nkK6u<%3E#%uX6KS*3Hr$g?*&JBcUX*+&7Jt=8Ub}lR`sxmnKQ>?^c{(TkIDMua| zc+9NO!({fp%PKWn_EhVa%k2;UQCX(<-IbE#B<2S7s= zF#y_wR$A{(8u4zDU%@3`PA&7&8~M42Y6jP6(lr6)TUisF>yYw$a^jp^DYLO~* zP?w;I30X%+hXreDtDSpipc5)CZdgLXjBW6q}i;H#1m*0%;ONbl7UP++%L#>b?_WKKTt`f{{M8d28|rg?E3!w1orZEpN5}b2KLN}i zD9j8tmo>MvY;JFh8WVo!n z#}I|4VPF4fnr7d4ZsP6OP5O7@>BsQg>{4p8y5>^QSJ9G$JAT=JZ77BDr4KneDcMgc z&z}6JYZX&Nw~NtYNkd8wRiZlcN&N?Ntino8MRo9#Ck8U0sf%g-Co`mEY!`3229ox7$^#g_SEt>*Y;R4E{Ya~RNhLfO&tT$)^0 zXMcRf_7n|~*?~niRGmC*@r=~=h#?K}HMYr+sT~Hhj})l3wCRowJiNT2;OS3Hz$Z*A z`_&kklVQ5KmThW)lc~b_I_=ioJrCq_xy+EQXNf5$%A|22XK%@&I5@1tCqa!UycTVDRly!qxhZaIh)-my@Y=T8t1f7md|+T zRTRjE2p&=IJgdow-@U`>lN1$F@mh9AT1jmiJzZ8h{^Eq61D#<6(@0&K% zwNRYHmPcl=VsDG)A7e0(pX}sqBu^D6Z2$GUyI{~hz~;AF=SVgzzS7Dc!m3o+eO6C; zFFQOjnhnqTo{KT_Zue^6cdM&2mgnc+`PUNcWl<&x+Eb!K_(Z!?JMQmcCmP?Do?Scc ziNd+|{Ps>>ZQ(AjD6$PDUhih!!)H;m-F#V#JP`)r@w-33I-M!A+#fj#(Xl!_zj!$? zY~s09+@WbpAL3Ys8^1*><34D3;muZ#7+;ZIUK%zJ7e-4=7Fd4KBg*_ZefEom*oO9! z=U1F#U;;6)U<^vOM7s=X3Q0=ve?@WLQhC0v=c`zMPwHs5z|V$^ASyUcmsGF!CX5*& z!<4KySmLx8>Y0z^srmV2adBu|HP`w0jaN_Hz3uXW|ctCiS3Ev(sE zVpv5pdESI6ey5lE7a^;8H?~fe#XXgNDWfYzj`@8kW|a%w7vp(0y4=9`T{G1+72z#~ z{p*V{zQk2q`wtPpuMemvV}1kUC4L~)VJvd}XsoXt_#@-wa*_IkQYPCDYP@CU6WNNi z)S>LDW3>kR28r{0{X<(xp}Y-=>TPf9ex$pOEydXt4#X^(-cdj{in?1-oTrWxvETjj zmz&}9Z5!Wa+|@d-$?BLh*PleNL%FZ80c!eV`$Bb`v7fW^UV}$O`+HH<&%?QflG?2% zY{o(Lw1xNusV&hd^4F$^BmhtOhge(6(P~GPn>J#YUZSE!U3ae?-%(x8?pp>g(9Otq zJxx@<5tNiZ&}ZD+`3Rf((*YX&x87Kmw3KNT^+nKs#>M}P6;09@kh46pYOgMLB3n&Z zeFowW_{(E7MJ73`_r`;QIB z1LIXuJ9k)vMr&<`&X$-m{z*KUzBlf>{oho|+;3mnGRK`0+F(w=2Tuypfn{DB8>fD4|ecwnF^rWi=8Gr)~b`BBmj!-V@H2sQ=BJja#Py@`cF3-8ChcD4{5H35V^k| z3|6Ex`K!SoHrxxZ96hz&y5VD-U*j|CNh$JU`N}T(-abBX7FX;~QmUL3a22!lJ5U^8ED+zsY+ zZpo91u%)R+tw%l7rz#gjK_5JCjr+f763VW8LPP}huyA%|$^7L9)t+NcXR6+MM zzr5;HG(N=hOvjmJ%R-#czCrUjUJuem%kao?F|ban)xoQcPXQ1v>72a3N*E?u3Q7#9oL@$b zX8OOm>=TJy!`D<8C%Bhqs?2-lpnich(ftQ|q{^2-MY;@yaa3lD_xK_VsOW^FYXI2V zgCAkEVN|;QT}^&4K3STPi*KJBGs4IYpM5)y<3}5V9uSkQC@489`j-Jz5)7i~9-g1G zuFqTOW3iNk!8NUm7PCbB@+#L{uA@gX!$sX5duPuu)=r#j2!sP|^_jz~7(>m<+7F&4 zP&QIHJ5)}0t=EJdA3`=F=3NfS-n?(f7Kve5)`OsMq}3DOSsX=~i-?lN#>VQ*KVZ>G z&0L8}yqI@xrh#F`ryJlsVKnG3r;1nG@Mkj*tfTFB&+!Ubd4`VK`_8SeXRsb}FAE*~ z{O6L+NO-9er9=F(<%eX%{o0b^#LuZIP&v4&s2%?3Z#~%F+#4Ke(o6iPGapQIzL?tu zRIR^GkEKUMk}+SRQJTB_Zc(`@u}EnxoOIi!P8X*&=|8wiU#N>qk)IyR0+fXsC*(my zTtXHcsgpU_2`RrmG8b`-W)J3Y;JgmzI3gkz_q}vQysYjRC8gN#c*QFFwMz$OFfcjK zPLv55`rsFywtt5#tYi25%w=rptbTLronA4w9?6+JbTE!)J%?ni9tj!vs;V60vpBDM zMx}T58~~@JzUN78KAve>&n00$LMzzFc33W$)L0(A-S<9_&t^uFQExI@^Ph^0ClG2) zxIp-ac}0I-z4%*M5R^?dX|f2n*-|m-lANS;8Js|AX-9&LFNB za_hei^S{k!-~66{K-xzM z)kP*o`or^J`A69;8eH#Jc{2A}cWY11wzfYM21O+M9qX+=JAbr=O0Q;fSE@3WU-ZPD z!l<8v( z5pv6=|0(Ojf4FW{kh0;KZQVHA3)Xw@(@==uQA!8*bc?Dws`x%apo+S$o>j&*^S*U4 zJG;NxjL_T0uBBgTrpUTD?D_TKD{YW~v1ccRJFZQ2g&Ju65Q!X?1)j<_g<5UY-ILp> zVrC0xJ|%3daqBXXE}fxr+=>0%2=pZ|5MB?6^Av*0i2gcd{ybxWMvQD15CTNvd4o;4 zSCYt8tJpB9L8bkSoj7XLaCbHAkw?>Y>N-E~rBGCf3@uhR|A>Nd92C@m85N)l>7FT9 zMcquW9EEyJB25XEHja|gY(^)$(M1_B^)SzeFzX7!8`f>DEVhGR_1gLsj=6WwA9&TK zlC&y9YAgGs+g2!1Y-_BO1o40Q5SG^Ml||2^#D+n$;3h@e-+RL}H$OmE07fI6LeOqD zG|b*ye)h~~48M?sVDIuuk0Zi1pb&%n^NOC>0;GXO$eD?pM(DKPMs3}T2Q53^oS}=9 zMhuwHye?79L3GKERRXBgH8kdzbz2I3&i3Ef@wq}=9mjL{`TFP#wkEF9m+lCmnD?0W zcQAX()bA4NZ9X+TTaA~Wfi)1*`o_hfONPmv=lE#@keMIr*0?Z;x+XA<9klqF26&W=O#jQkTYVtK4FbnbtEAR%XAx!SnF zhS+g~`CGj`75_(sl3B;XuCXz^efOoZ3>oRmE%`a0I_Gd^t%hWJa3m~F3{DXAC|VyvE*iA_ukVwG$ZcN{m*C3&^MzpGS4auU7*@nqownkVCdq`G>U zq9dwM7HeJ~oaQpg9&IiBW=72=g1OQQKHp7x54^L*_7j0hw7)6SK#IM>%J z6_lXO^0V#oV`Z9J-HjBqs{O$gonb60f!-4gBvxbcB|~=`2ya0`&rKxK4eODe$-|CE zDA(na%$m#pr2eLPnqBul8@tWX7!XKAZgl2iG~C8W?fuP97^{TsH44UakbBF>BdD8F zgsbIBI}?f4miRq7MIBOmhY`j7_`yx`$PcXoI*MV_Pzj9oQmtYFR*MERnP&16m7Hl^ z13QwVr^0H7JvrCY<-t)uUn6G)+>o8t3lnT_g>TYg+aunAnn77R3~xyqwMU#qas=CB z;M2TN1=D6s{;F_kZHmgpetlwR(bfYOZt&??;Fc-wIb)Vu#7`YZ+1@>ERn=)fCuV8Y zLb36q3GHF>YwEhL!EUH%VTK|aMI}1^vFBek%0-;2=iE+Fp)XUGRYmtfrB)8ug1A$~ zy#kEycBYx+5-|p-#1`h+@CNT%Uk!czIFNkiL9;7s=IU0fu5YJkQ(GAGnTA;%y-?;I zJ0U?k;hX`ox{osYqod>YqZ#~tnKKOIDoo>TE_~Pi564N7S*fGdRp#d^;C@`L9qrfW z0s-tb?@Wdo9cc(fp|130KA8e`#Yf|msaMKQkkzvClSn{=9F!rLQdF$4HH~-byD_AId zCcV4)!-i$|M5JgcCN5d0nBKzv>QXR_hU5#?|9Cctn6hK>XW_qjLctvU1`X@awQ1hBH^R7-n%ldtr8thrD|^Dr-QaI}dD6V~ z#MkyJtEY*RP~4tzO1b9r_CYOi7268tX>pG?Z5|ZbUpyBW1-%ak(lz$U)KV93$=2BD zUR)u6`SQj8ogYK`&@>%oCuARQA}^d1tZlSV!GQ{d^dix{&mXFqxC zyV6#yD+B*hqrnl9rMRTiMUsX=?xw2o7@!&b4}HNceZn(Gp!&D}easpEJZ3{L0aBRO z+RX$9?*^7Y3}bd&CQ2@%SfIXw?R-NZW|7iWpm?E8?dKabMS^rjvWyKAn^WK!n;tKB zE`Hcm)?S5b4w*?Is*}}czvDJ~_YE9pKHeX@j31cty|3P3pIwR2Y#u)Ba~G} zgRw7j(_NGTMgj_6tCZuDP~8i*TDoLcGOqOg#b*;tFlpJYvrSgtB=TBs5zw4Ce?7y;u2#nECCFIbx zqdqHdtz=oAu^R7v=EG$=LPpa4{)LS$Wj-|Tx!su!c4}HF9vBt@>*~Nx1*l{w^Ba;Ck_-pPmPe|SFiyqKtGD`ymJh6w52DNPJ zLAUe$k8%nMX4yHTX914V*v~?AKeG)GytMe5yn-Mi9zSbf^jk;d6@ab5 z8@Q1&vM)M7zjEWH5!L%u1>fr7QK_uA&c@kJo&|uI9f#`?+5Qw|Pr_HFy3#R{M*CC? zJE+eqS;p6^A7L4X%4LS?6lWFv)B1bkdv7Nz z02x&mg8qv!G0jMwG|T7C^rdzKe}@Nihaf6w5=3fLEoG%|gQ=TwuaTzk#)de_eiZYz zO~2u-&8pe+*a~3TD$;GZsX%I^bv97=HHyaAO<+nB{Gc}BZ_`gV`@Ebydv3nSzViG} zzM})zJ;`gc*kzM*)Fw|hK}zLa@JLJ;;1ZEW_lVtOvO)a=>p7bG1_hdQT@V6R$jy{P zkyKqSrl)j_olE%ytdsPP_pRK6)Me+LM|<0KVB;z}iPN?bU(Me9ycWI!SHWP&FAfuXS#^^rS!b0;CBEq=4v++q*YxB1CLDag;nPy?^*fudeHqa7;L( zm1&^po-guXdUkkBjIQ~k%BBo|W>%&ALElDQu|*d(5n5{4TV^Ii{$LZ3Npl8#ijyOp zFJHd5hyEB$^O^&Lh{36v;*v+EPi|X0QJUkUg=L(`)-0$Uojzx<=8<71P*O!7#q_|s zT)`63Sn`cdYj>SdtbniN<>k-xxo_`||Grgol$}a^jRLN<+rc~{a@oa{$YS#S-M2W) z*Pum@b7)*!a;Y#@6U~^?8wrGj&b#nh0MSs|@eVg@m% zBRf**OD_hlV#Lk&nUF_5pQ5NnHEP?{Vsk{dgJm}g&2qz*P5+7GC6(Gv#UGh-3E0+Z z?vq=O^TpHJm?f7<+a1#-(TB5^B`XTwi)Et%R~}at%MuB>l0@jUViFdYt|LEaM>Gr= z&HjGoVD(x_Tl9TvJDQ+}fA{yt06tPZkE2CtY=Nw7-(eX`08@LmRf1c3=Oxu_@S}C|Mx$`1Y0xv9*QL#s=SA1oW*p|(ip}c zsm2M)jz=glbZ7GE+#YmWO=UjYy)3KblX<1b{o#*2%CaNmF5_#4tL-YINP-Z5slLsD9lqJP2tHx1+{`?m4%-%I~Dq7Rh+Uc8eDkM0-ABl_<( eMPX5qgx?1tO;5u$+%ufN(j~>@L@R|20{ NOTICE: You should make requests to the services you want to profile. For example, using the [UDP load test](./benchmarking.md#run-udp-load-test). + +After running the tracker with ` Date: Thu, 21 Mar 2024 19:12:35 +0000 Subject: [PATCH 0767/1003] docs: [#746] add missing flamegraph in docs --- .gitignore | 2 +- docs/media/flamegraph.svg | 491 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 492 insertions(+), 1 deletion(-) create mode 100644 docs/media/flamegraph.svg diff --git a/.gitignore b/.gitignore index 1bffb9842..c1abad7e0 100644 --- a/.gitignore +++ b/.gitignore @@ -6,10 +6,10 @@ /data.db /database.db /database.json.bz2 +/flamegraph.svg /storage/ /target /tracker.* /tracker.toml callgrind.out -flamegraph.svg perf.data* \ No newline at end of file diff --git a/docs/media/flamegraph.svg b/docs/media/flamegraph.svg new file mode 100644 index 000000000..34e7146f9 --- /dev/null +++ b/docs/media/flamegraph.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch [unknown] (154 samples, 0.36%)[unknown] (154 samples, 0.36%)[unknown] (150 samples, 0.35%)[unknown] (149 samples, 0.35%)[unknown] (146 samples, 0.34%)[unknown] (139 samples, 0.33%)[unknown] (80 samples, 0.19%)[unknown] (80 samples, 0.19%)[unknown] (76 samples, 0.18%)[unknown] (63 samples, 0.15%)[unknown] (8 samples, 0.02%)[unknown] (8 samples, 0.02%)[unknown] (8 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (155 samples, 0.37%)profiling (174 samples, 0.41%)clone3 (17 samples, 0.04%)start_thread (16 samples, 0.04%)std::sys::pal::unix::thread::Thread::new::thread_start (15 samples, 0.04%)std::sys::pal::unix::stack_overflow::Handler::new (15 samples, 0.04%)std::sys::pal::unix::stack_overflow::imp::make_handler (15 samples, 0.04%)std::sys::pal::unix::stack_overflow::imp::get_stack (15 samples, 0.04%)__GI___mmap64 (15 samples, 0.04%)__GI___mmap64 (15 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (14 samples, 0.03%)[unknown] (12 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (14 samples, 0.03%)[[vdso]] (102 samples, 0.24%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (119 samples, 0.28%)<tokio::sync::batch_semaphore::Acquire as core::future::future::Future>::poll (13 samples, 0.03%)tokio::sync::batch_semaphore::Semaphore::poll_acquire (7 samples, 0.02%)[[vdso]] (63 samples, 0.15%)__GI___clock_gettime (6 samples, 0.01%)__GI___libc_write (5 samples, 0.01%)__GI___libc_write (5 samples, 0.01%)__memcpy_avx512_unaligned_erms (5 samples, 0.01%)__pow (8 samples, 0.02%)_int_malloc (18 samples, 0.04%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (60 samples, 0.14%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (28 samples, 0.07%)tokio::runtime::context::with_scheduler (10 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (13 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (10 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (12 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (6 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (6 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (25 samples, 0.06%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (17 samples, 0.04%)core::sync::atomic::AtomicUsize::fetch_add (16 samples, 0.04%)core::sync::atomic::atomic_add (16 samples, 0.04%)tokio::runtime::driver::Handle::unpark (7 samples, 0.02%)tokio::runtime::driver::IoHandle::unpark (7 samples, 0.02%)[unknown] (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (21 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (18 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (11 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (11 samples, 0.03%)std::sync::poison::Flag::done (16 samples, 0.04%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (17 samples, 0.04%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (17 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (25 samples, 0.06%)tokio::runtime::task::list::OwnedTasks<S>::remove (23 samples, 0.05%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (22 samples, 0.05%)core::sync::atomic::AtomicUsize::compare_exchange (5 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (12 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (34 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Parker::park (25 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (25 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (34 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (31 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (21 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (18 samples, 0.04%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (8 samples, 0.02%)std::sync::poison::Flag::done (12 samples, 0.03%)std::thread::panicking (8 samples, 0.02%)std::panicking::panicking (8 samples, 0.02%)std::panicking::panic_count::count_is_zero (8 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (19 samples, 0.04%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (19 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::unlock (7 samples, 0.02%)core::sync::atomic::AtomicU32::swap (5 samples, 0.01%)core::sync::atomic::atomic_swap (5 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (55 samples, 0.13%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (55 samples, 0.13%)core::slice::<impl [T]>::contains (140 samples, 0.33%)<T as core::slice::cmp::SliceContains>::slice_contains (140 samples, 0.33%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (140 samples, 0.33%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.08%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (34 samples, 0.08%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (165 samples, 0.39%)tokio::loom::std::mutex::Mutex<T>::lock (6 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (167 samples, 0.39%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (20 samples, 0.05%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (20 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::unlock (20 samples, 0.05%)core::sync::atomic::AtomicU32::swap (10 samples, 0.02%)core::sync::atomic::atomic_swap (10 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (10 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (10 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (7 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (5 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (39 samples, 0.09%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (8 samples, 0.02%)core::sync::atomic::AtomicUsize::fetch_sub (8 samples, 0.02%)core::sync::atomic::atomic_sub (8 samples, 0.02%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (6 samples, 0.01%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (6 samples, 0.01%)core::sync::atomic::AtomicUsize::load (6 samples, 0.01%)core::sync::atomic::atomic_load (6 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (6 samples, 0.01%)alloc::sync::Arc<T,A>::inner (6 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (6 samples, 0.01%)core::sync::atomic::AtomicU32::load (6 samples, 0.01%)core::sync::atomic::atomic_load (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (32 samples, 0.08%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (26 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (13 samples, 0.03%)core::sync::atomic::AtomicU64::load (7 samples, 0.02%)core::sync::atomic::atomic_load (7 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (63 samples, 0.15%)tokio::runtime::scheduler::multi_thread::worker::Context::park (299 samples, 0.71%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (108 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (13 samples, 0.03%)core::num::<impl u32>::wrapping_add (20 samples, 0.05%)core::sync::atomic::AtomicU64::compare_exchange (14 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (14 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (446 samples, 1.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (446 samples, 1.05%)tokio::runtime::scheduler::multi_thread::worker::run (446 samples, 1.05%)tokio::runtime::context::runtime::enter_runtime (446 samples, 1.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (446 samples, 1.05%)tokio::runtime::context::set_scheduler (446 samples, 1.05%)std::thread::local::LocalKey<T>::with (446 samples, 1.05%)std::thread::local::LocalKey<T>::try_with (446 samples, 1.05%)tokio::runtime::context::set_scheduler::{{closure}} (446 samples, 1.05%)tokio::runtime::context::scoped::Scoped<T>::set (446 samples, 1.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (446 samples, 1.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (446 samples, 1.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (95 samples, 0.22%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (86 samples, 0.20%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (82 samples, 0.19%)tokio::runtime::scheduler::multi_thread::queue::pack (35 samples, 0.08%)tokio::runtime::context::CONTEXT::__getit (12 samples, 0.03%)core::cell::Cell<T>::get (12 samples, 0.03%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (14 samples, 0.03%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (14 samples, 0.03%)tokio::runtime::context::set_current_task_id (14 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.03%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (480 samples, 1.13%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (479 samples, 1.13%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (9 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (9 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (500 samples, 1.18%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (20 samples, 0.05%)tokio::runtime::task::core::Core<T,S>::set_stage (18 samples, 0.04%)tokio::runtime::task::core::TaskIdGuard::enter (7 samples, 0.02%)tokio::runtime::context::set_current_task_id (7 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (7 samples, 0.02%)tokio::runtime::context::set_current_task_id::{{closure}} (6 samples, 0.01%)core::cell::Cell<T>::replace (6 samples, 0.01%)core::mem::replace (6 samples, 0.01%)tokio::runtime::task::harness::poll_future (504 samples, 1.19%)std::panic::catch_unwind (504 samples, 1.19%)std::panicking::try (504 samples, 1.19%)std::panicking::try::do_call (504 samples, 1.19%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (504 samples, 1.19%)tokio::runtime::task::harness::poll_future::{{closure}} (504 samples, 1.19%)tokio::runtime::task::raw::poll (512 samples, 1.21%)tokio::runtime::task::harness::Harness<T,S>::poll (510 samples, 1.20%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (510 samples, 1.20%)tokio::runtime::task::state::State::transition_to_idle (5 samples, 0.01%)tokio::runtime::task::state::State::fetch_update_action (5 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (8 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (8 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (32 samples, 0.08%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (5 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (15 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (23 samples, 0.05%)tokio::sync::batch_semaphore::Waiter::assign_permits (7 samples, 0.02%)tokio::sync::batch_semaphore::Semaphore::add_permits_locked (16 samples, 0.04%)tokio::sync::rwlock::RwLock<T>::write::{{closure}} (32 samples, 0.08%)tokio::sync::rwlock::RwLock<T>::write::{{closure}}::{{closure}} (25 samples, 0.06%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (14 samples, 0.03%)alloc::vec::from_elem (19 samples, 0.04%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (19 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (19 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (19 samples, 0.04%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (19 samples, 0.04%)alloc::alloc::Global::alloc_impl (19 samples, 0.04%)alloc::alloc::alloc_zeroed (19 samples, 0.04%)__rdl_alloc_zeroed (19 samples, 0.04%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (19 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (58 samples, 0.14%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (32 samples, 0.08%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (7 samples, 0.02%)tokio::net::udp::UdpSocket::send_to::{{closure}} (6 samples, 0.01%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (6 samples, 0.01%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (6 samples, 0.01%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (6 samples, 0.01%)mio::net::udp::UdpSocket::send_to (6 samples, 0.01%)mio::io_source::IoSource<T>::do_io (6 samples, 0.01%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (6 samples, 0.01%)mio::net::udp::UdpSocket::send_to::{{closure}} (6 samples, 0.01%)std::net::udp::UdpSocket::send_to (6 samples, 0.01%)std::sys_common::net::UdpSocket::send_to (6 samples, 0.01%)std::sys::pal::unix::cvt (6 samples, 0.01%)[[heap]] (1,068 samples, 2.52%)[[..uuid::v4::<impl uuid::Uuid>::new_v4 (8 samples, 0.02%)uuid::rng::bytes (8 samples, 0.02%)rand::random (8 samples, 0.02%)rand::rng::Rng::gen (8 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (8 samples, 0.02%)rand::rng::Rng::gen (8 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (8 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (8 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (8 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (8 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (7 samples, 0.02%)[[vdso]] (104 samples, 0.25%)<alloc::string::String as core::fmt::Write>::write_str (29 samples, 0.07%)alloc::string::String::push_str (7 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (7 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (7 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (7 samples, 0.02%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (7 samples, 0.02%)core::num::<impl u64>::rotate_left (9 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (23 samples, 0.05%)core::num::<impl u64>::wrapping_add (7 samples, 0.02%)core::hash::sip::u8to64_le (7 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (52 samples, 0.12%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (8 samples, 0.02%)tokio::runtime::context::CONTEXT::__getit (5 samples, 0.01%)core::cell::Cell<T>::get (5 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (9 samples, 0.02%)core::ops::function::FnMut::call_mut (7 samples, 0.02%)tokio::runtime::coop::poll_proceed (7 samples, 0.02%)tokio::runtime::context::budget (7 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (7 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (35 samples, 0.08%)tokio::io::ready::Ready::intersection (9 samples, 0.02%)tokio::io::ready::Ready::from_interest (9 samples, 0.02%)tokio::io::interest::Interest::is_readable (8 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (6 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (6 samples, 0.01%)core::result::Result<T,E>::is_err (44 samples, 0.10%)core::result::Result<T,E>::is_ok (44 samples, 0.10%)tokio::loom::std::mutex::Mutex<T>::lock (52 samples, 0.12%)std::sync::mutex::Mutex<T>::lock (49 samples, 0.12%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.12%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (129 samples, 0.30%)tokio::loom::std::mutex::Mutex<T>::lock (7 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (7 samples, 0.02%)<tokio::sync::batch_semaphore::Acquire as core::future::future::Future>::poll (46 samples, 0.11%)tokio::sync::batch_semaphore::Semaphore::poll_acquire (18 samples, 0.04%)core::result::Result<T,E>::is_err (6 samples, 0.01%)core::result::Result<T,E>::is_ok (6 samples, 0.01%)<tokio::sync::rwlock::write_guard::RwLockWriteGuard<T> as core::ops::drop::Drop>::drop (23 samples, 0.05%)tokio::sync::batch_semaphore::Semaphore::release (23 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (13 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (13 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock (11 samples, 0.03%)core::sync::atomic::AtomicU32::compare_exchange (5 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (5 samples, 0.01%)__memcpy_avx512_unaligned_erms (22 samples, 0.05%)[profiling] (24 samples, 0.06%)<torrust_tracker::shared::bit_torrent::info_hash::InfoHash as core::fmt::Display>::fmt (32 samples, 0.08%)[[vdso]] (359 samples, 0.85%)__GI___libc_free (47 samples, 0.11%)arena_for_chunk (9 samples, 0.02%)arena_for_chunk (8 samples, 0.02%)heap_for_ptr (8 samples, 0.02%)__GI___libc_malloc (48 samples, 0.11%)tcache_get (7 samples, 0.02%)__GI___lll_lock_wake_private (57 samples, 0.13%)__GI___pthread_disable_asynccancel (13 samples, 0.03%)__GI_getsockname (115 samples, 0.27%)__libc_recvfrom (167 samples, 0.39%)__libc_sendto (48 samples, 0.11%)__memcmp_evex_movbe (38 samples, 0.09%)__memcpy_avx512_unaligned_erms (173 samples, 0.41%)__memset_avx512_unaligned_erms (53 samples, 0.13%)_int_free (70 samples, 0.17%)_int_malloc (151 samples, 0.36%)_int_memalign (29 samples, 0.07%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (11 samples, 0.03%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (5 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (5 samples, 0.01%)<torrust_tracker::shared::bit_torrent::info_hash::InfoHash as core::cmp::Ord>::cmp (15 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (15 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (15 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (15 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (15 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (43 samples, 0.10%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (40 samples, 0.09%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (35 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (35 samples, 0.08%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (11 samples, 0.03%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (5 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (15 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_amortized (11 samples, 0.03%)alloc::raw_vec::finish_grow (19 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (7 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (7 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (7 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (5 samples, 0.01%)core::hash::BuildHasher::hash_one (9 samples, 0.02%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (27 samples, 0.06%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (6 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (41 samples, 0.10%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}}> (8 samples, 0.02%)malloc_consolidate (35 samples, 0.08%)mio::sys::unix::waker::eventfd::WakerInternal::wake (5 samples, 0.01%)rand_chacha::guts::ChaCha::pos64 (5 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (5 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (5 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (5 samples, 0.01%)core::core_arch::x86::avx2::_mm256_add_epi32 (5 samples, 0.01%)core::core_arch::x86::avx2::_mm256_or_si256 (8 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (10 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (10 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (6 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (6 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (6 samples, 0.01%)rand_chacha::guts::round (29 samples, 0.07%)rand_chacha::guts::refill_wide::impl_avx2 (43 samples, 0.10%)rand_chacha::guts::refill_wide::fn_impl (43 samples, 0.10%)rand_chacha::guts::refill_wide_impl (43 samples, 0.10%)tokio::runtime::context::with_scheduler (18 samples, 0.04%)std::thread::local::LocalKey<T>::try_with (6 samples, 0.01%)tokio::runtime::context::with_scheduler::{{closure}} (6 samples, 0.01%)tokio::runtime::context::scoped::Scoped<T>::with (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (6 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (7 samples, 0.02%)[unknown] (47 samples, 0.11%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (213 samples, 0.50%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (78 samples, 0.18%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (28 samples, 0.07%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (76 samples, 0.18%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (66 samples, 0.16%)core::sync::atomic::AtomicUsize::fetch_add (65 samples, 0.15%)core::sync::atomic::atomic_add (65 samples, 0.15%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Parker::park (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (5 samples, 0.01%)tokio::runtime::coop::budget (5 samples, 0.01%)tokio::runtime::coop::with_budget (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (7 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (21 samples, 0.05%)tokio::runtime::context::CONTEXT::__getit (9 samples, 0.02%)core::cell::Cell<T>::get (9 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (10 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (10 samples, 0.02%)tokio::runtime::context::set_current_task_id (10 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (10 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (11 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (8 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (45 samples, 0.11%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (8 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (8 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (8 samples, 0.02%)<tokio::runtime::task::Task<S> as tokio::util::linked_list::Link>::pointers (5 samples, 0.01%)tokio::runtime::task::core::Header::get_trailer (5 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (37 samples, 0.09%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (20 samples, 0.05%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (98 samples, 0.23%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (30 samples, 0.07%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (30 samples, 0.07%)tokio::loom::std::mutex::Mutex<T>::lock (30 samples, 0.07%)std::sync::mutex::Mutex<T>::lock (30 samples, 0.07%)std::sys::sync::mutex::futex::Mutex::lock (30 samples, 0.07%)core::sync::atomic::AtomicU32::compare_exchange (30 samples, 0.07%)core::sync::atomic::atomic_compare_exchange (30 samples, 0.07%)tokio::runtime::task::raw::drop_join_handle_slow (7 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (5 samples, 0.01%)tokio::runtime::task::state::State::unset_join_interested (5 samples, 0.01%)tokio::runtime::task::state::State::fetch_update (5 samples, 0.01%)tokio::runtime::task::state::State::load (5 samples, 0.01%)core::sync::atomic::AtomicUsize::load (5 samples, 0.01%)core::sync::atomic::atomic_load (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (7 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (6 samples, 0.01%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (22 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (22 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run (22 samples, 0.05%)tokio::runtime::context::runtime::enter_runtime (22 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (22 samples, 0.05%)tokio::runtime::context::set_scheduler (22 samples, 0.05%)std::thread::local::LocalKey<T>::with (22 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.05%)tokio::runtime::context::set_scheduler::{{closure}} (22 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::set (22 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (22 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (22 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (11 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (7 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (24 samples, 0.06%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (24 samples, 0.06%)tokio::runtime::task::raw::poll (30 samples, 0.07%)tokio::runtime::task::harness::Harness<T,S>::poll (25 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (25 samples, 0.06%)tokio::runtime::task::harness::poll_future (25 samples, 0.06%)std::panic::catch_unwind (25 samples, 0.06%)std::panicking::try (25 samples, 0.06%)std::panicking::try::do_call (25 samples, 0.06%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (25 samples, 0.06%)tokio::runtime::task::harness::poll_future::{{closure}} (25 samples, 0.06%)tokio::runtime::task::core::Core<T,S>::poll (25 samples, 0.06%)tokio::runtime::task::raw::schedule (5 samples, 0.01%)tokio::runtime::task::state::State::transition_to_idle (13 samples, 0.03%)tokio::runtime::task::state::State::fetch_update_action (13 samples, 0.03%)tokio::runtime::task::waker::clone_waker (6 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (6 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (6 samples, 0.01%)core::sync::atomic::atomic_add (6 samples, 0.01%)tokio::runtime::task::waker::wake_by_val (12 samples, 0.03%)tokio::runtime::task::harness::<impl tokio::runtime::task::raw::RawTask>::wake_by_val (12 samples, 0.03%)tokio::runtime::task::state::State::transition_to_notified_by_val (7 samples, 0.02%)tokio::runtime::task::state::State::fetch_update_action (7 samples, 0.02%)tokio::runtime::time::Driver::park_internal (10 samples, 0.02%)tokio::runtime::time::wheel::level::Level::next_expiration (12 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (17 samples, 0.04%)tokio::sync::batch_semaphore::Semaphore::add_permits_locked (13 samples, 0.03%)tokio::sync::rwlock::RwLock<T>::write::{{closure}} (17 samples, 0.04%)tokio::sync::rwlock::RwLock<T>::write::{{closure}}::{{closure}} (7 samples, 0.02%)torrust_tracker::core::torrent::Entry::get_stats (15 samples, 0.04%)torrust_tracker::core::torrent::Entry::insert_or_update_peer (7 samples, 0.02%)torrust_tracker::core::torrent::repository::RepositoryAsyncSingle::get_torrents::{{closure}} (47 samples, 0.11%)tokio::sync::rwlock::RwLock<T>::read::{{closure}} (34 samples, 0.08%)tokio::sync::rwlock::RwLock<T>::read::{{closure}}::{{closure}} (30 samples, 0.07%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (8 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (8 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (8 samples, 0.02%)<core::time::Duration as core::hash::Hash>::hash (7 samples, 0.02%)<torrust_tracker::shared::clock::time_extent::TimeExtent as core::hash::Hash>::hash (9 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (24 samples, 0.06%)torrust_tracker::servers::udp::peer_builder::from_request (5 samples, 0.01%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (20 samples, 0.05%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (16 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (7 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (7 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker::core::Tracker>> (27 samples, 0.06%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (27 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_sub (27 samples, 0.06%)core::sync::atomic::atomic_sub (27 samples, 0.06%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (17 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (17 samples, 0.04%)tokio::net::udp::UdpSocket::local_addr (6 samples, 0.01%)<tokio::io::poll_evented::PollEvented<E> as core::ops::deref::Deref>::deref (6 samples, 0.01%)core::option::Option<T>::as_ref (6 samples, 0.01%)[unknown] (6 samples, 0.01%)torrust_tracker::servers::udp::handlers::RequestId::make (9 samples, 0.02%)[unknown] (8 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (16 samples, 0.04%)torrust_tracker::core::Tracker::announce::{{closure}} (10 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (6 samples, 0.01%)<torrust_tracker::core::torrent::repository::RepositoryAsyncSingle as torrust_tracker::core::torrent::repository::TRepositoryAsync>::update_torrent_with_peer_and_get_stats::{{closure}} (6 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (6 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (34 samples, 0.08%)<T as alloc::string::ToString>::to_string (10 samples, 0.02%)core::option::Option<T>::expect (5 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (41 samples, 0.10%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (156 samples, 0.37%)torrust_tracker::servers::udp::logging::log_response (17 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (238 samples, 0.56%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (18 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (13 samples, 0.03%)tokio::net::udp::UdpSocket::send_to::{{closure}} (11 samples, 0.03%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (10 samples, 0.02%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (8 samples, 0.02%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (6 samples, 0.01%)mio::net::udp::UdpSocket::send_to (6 samples, 0.01%)mio::io_source::IoSource<T>::do_io (6 samples, 0.01%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (6 samples, 0.01%)mio::net::udp::UdpSocket::send_to::{{closure}} (6 samples, 0.01%)std::net::udp::UdpSocket::send_to (6 samples, 0.01%)std::sys_common::net::UdpSocket::send_to (6 samples, 0.01%)std::sys::pal::unix::cvt (6 samples, 0.01%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (5 samples, 0.01%)tracing::span::Span::log (12 samples, 0.03%)tracing::span::Span::record_all (15 samples, 0.04%)unlink_chunk (59 samples, 0.14%)[anon] (3,194 samples, 7.54%)[anon][[vdso]] (27 samples, 0.06%)__memcpy_avx512_unaligned_erms (35 samples, 0.08%)_int_malloc (9 samples, 0.02%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (8 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (21 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::grow_amortized (18 samples, 0.04%)syscall (5 samples, 0.01%)tokio::sync::batch_semaphore::Semaphore::add_permits_locked (5 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (10 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (10 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (10 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (7 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (11 samples, 0.03%)[profiling] (167 samples, 0.39%)unlink_chunk (5 samples, 0.01%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (7 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (7 samples, 0.02%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (5 samples, 0.01%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (7 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (39 samples, 0.09%)core::hash::sip::u8to64_le (11 samples, 0.03%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (13 samples, 0.03%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (6 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (20 samples, 0.05%)core::ops::function::FnMut::call_mut (17 samples, 0.04%)tokio::runtime::coop::poll_proceed (17 samples, 0.04%)tokio::runtime::context::budget (17 samples, 0.04%)std::thread::local::LocalKey<T>::try_with (17 samples, 0.04%)tokio::runtime::context::budget::{{closure}} (11 samples, 0.03%)tokio::runtime::coop::poll_proceed::{{closure}} (11 samples, 0.03%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (40 samples, 0.09%)std::sync::poison::Flag::done (6 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (12 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (12 samples, 0.03%)std::sync::mutex::MutexGuard<T>::new (8 samples, 0.02%)std::sync::poison::Flag::guard (8 samples, 0.02%)std::thread::panicking (7 samples, 0.02%)std::panicking::panicking (7 samples, 0.02%)std::panicking::panic_count::count_is_zero (7 samples, 0.02%)core::result::Result<T,E>::is_err (20 samples, 0.05%)core::result::Result<T,E>::is_ok (20 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (36 samples, 0.09%)std::sync::mutex::Mutex<T>::lock (36 samples, 0.09%)std::sys::sync::mutex::futex::Mutex::lock (28 samples, 0.07%)core::sync::atomic::AtomicU32::compare_exchange (7 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (7 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (79 samples, 0.19%)tokio::runtime::coop::poll_proceed (6 samples, 0.01%)tokio::runtime::context::budget (6 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (6 samples, 0.01%)tokio::runtime::context::budget::{{closure}} (6 samples, 0.01%)tokio::runtime::coop::poll_proceed::{{closure}} (6 samples, 0.01%)core::mem::drop (8 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::sync::batch_semaphore::Waitlist>> (7 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (7 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (6 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (7 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (7 samples, 0.02%)<tokio::sync::batch_semaphore::Acquire as core::future::future::Future>::poll (69 samples, 0.16%)tokio::sync::batch_semaphore::Semaphore::poll_acquire (40 samples, 0.09%)<tokio::sync::batch_semaphore::Acquire as core::ops::drop::Drop>::drop (5 samples, 0.01%)<tokio::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (6 samples, 0.01%)binascii::bin2hex (11 samples, 0.03%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.01%)<torrust_tracker::shared::bit_torrent::info_hash::InfoHash as core::fmt::Display>::fmt (20 samples, 0.05%)[[vdso]] (1,290 samples, 3.05%)[[v..[unknown] (99 samples, 0.23%)[unknown] (386 samples, 0.91%)[unknown] (295 samples, 0.70%)[unknown] (242 samples, 0.57%)[unknown] (152 samples, 0.36%)[unknown] (64 samples, 0.15%)[unknown] (33 samples, 0.08%)__GI___clock_gettime (35 samples, 0.08%)tokio::runtime::time::Driver::park_internal (16 samples, 0.04%)__GI___libc_free (44 samples, 0.10%)arena_for_chunk (12 samples, 0.03%)arena_for_chunk (8 samples, 0.02%)heap_for_ptr (5 samples, 0.01%)__GI___libc_malloc (46 samples, 0.11%)__GI___libc_realloc (7 samples, 0.02%)__GI___libc_write (22 samples, 0.05%)__GI___libc_write (22 samples, 0.05%)__GI___lll_lock_wait_private (17 samples, 0.04%)futex_wait (9 samples, 0.02%)__GI___lll_lock_wake_private (14 samples, 0.03%)__GI___pthread_disable_asynccancel (19 samples, 0.04%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (19 samples, 0.04%)__floattidf (23 samples, 0.05%)compiler_builtins::float::conv::__floattidf (22 samples, 0.05%)exp_inline (18 samples, 0.04%)__ieee754_pow_fma (30 samples, 0.07%)log_inline (11 samples, 0.03%)__libc_calloc (19 samples, 0.04%)__libc_sendto (92 samples, 0.22%)__memcmp_evex_movbe (81 samples, 0.19%)__memcpy_avx512_unaligned_erms (484 samples, 1.14%)__posix_memalign (35 samples, 0.08%)__posix_memalign (24 samples, 0.06%)_mid_memalign (21 samples, 0.05%)__vdso_clock_gettime (5 samples, 0.01%)[unknown] (18 samples, 0.04%)free_perturb (5 samples, 0.01%)_int_free (305 samples, 0.72%)tcache_put (9 samples, 0.02%)[unknown] (5 samples, 0.01%)_int_malloc (235 samples, 0.55%)_int_memalign (28 samples, 0.07%)checked_request2size (7 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.01%)<torrust_tracker::shared::bit_torrent::info_hash::InfoHash as core::cmp::Ord>::cmp (20 samples, 0.05%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (20 samples, 0.05%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (20 samples, 0.05%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (20 samples, 0.05%)<u8 as core::slice::cmp::SliceOrd>::compare (20 samples, 0.05%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (8 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (55 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (52 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (51 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (51 samples, 0.12%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (6 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (10 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (6 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (5 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (5 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (10 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (9 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (9 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (9 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (19 samples, 0.04%)alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (8 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Dying,K,V>::deallocating_next_unchecked (5 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::deallocating_next_unchecked (5 samples, 0.01%)alloc::collections::btree::mem::replace (5 samples, 0.01%)core::ptr::write (5 samples, 0.01%)alloc::collections::btree::map::entry::Entry<K,V,A>::or_insert (5 samples, 0.01%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (6 samples, 0.01%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (5 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (13 samples, 0.03%)alloc::raw_vec::RawVec<T,A>::grow_amortized (11 samples, 0.03%)alloc::raw_vec::finish_grow (22 samples, 0.05%)core::result::Result<T,E>::map_err (9 samples, 0.02%)alloc::vec::in_place_collect::<impl alloc::vec::spec_from_iter::SpecFromIter<T,I> for alloc::vec::Vec<T>>::from_iter (8 samples, 0.02%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (8 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (5 samples, 0.01%)alloc_new_heap (26 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (26 samples, 0.06%)core::fmt::Formatter::pad_integral (53 samples, 0.13%)core::fmt::Formatter::pad_integral::write_prefix (10 samples, 0.02%)core::fmt::Formatter::pad_integral (6 samples, 0.01%)core::fmt::write (11 samples, 0.03%)core::hash::BuildHasher::hash_one (8 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (8 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (8 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (8 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (6 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (65 samples, 0.15%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (6 samples, 0.01%)epoll_wait (76 samples, 0.18%)malloc_consolidate (9 samples, 0.02%)std::sys::pal::unix::time::Timespec::new (6 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (46 samples, 0.11%)std::sys::pal::unix::time::Timespec::sub_timespec (34 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (5 samples, 0.01%)core::cmp::PartialOrd::ge (5 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock_contended (17 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::spin (8 samples, 0.02%)std::sys_common::net::TcpListener::socket_addr (10 samples, 0.02%)std::sys_common::net::sockname (10 samples, 0.02%)syscall (52 samples, 0.12%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_finish (6 samples, 0.01%)tokio::runtime::context::with_scheduler (28 samples, 0.07%)std::thread::local::LocalKey<T>::try_with (19 samples, 0.04%)tokio::runtime::context::with_scheduler::{{closure}} (19 samples, 0.04%)tokio::runtime::context::scoped::Scoped<T>::with (19 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (19 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (19 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (15 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (9 samples, 0.02%)mio::poll::Poll::poll (12 samples, 0.03%)mio::sys::unix::selector::epoll::Selector::select (12 samples, 0.03%)core::option::Option<T>::map (6 samples, 0.01%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (6 samples, 0.01%)tokio::io::ready::Ready::from_mio (6 samples, 0.01%)core::sync::atomic::AtomicUsize::load (5 samples, 0.01%)core::sync::atomic::atomic_load (5 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (80 samples, 0.19%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (46 samples, 0.11%)[unknown] (41 samples, 0.10%)[unknown] (8 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (154 samples, 0.36%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (41 samples, 0.10%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (17 samples, 0.04%)core::mem::drop (10 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (10 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (10 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (8 samples, 0.02%)core::sync::atomic::AtomicU32::swap (7 samples, 0.02%)core::sync::atomic::atomic_swap (7 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (6 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (6 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (58 samples, 0.14%)std::sync::poison::Flag::done (5 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (11 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (11 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::unlock (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (512 samples, 1.21%)core::sync::atomic::AtomicUsize::fetch_add (509 samples, 1.20%)core::sync::atomic::atomic_add (509 samples, 1.20%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (550 samples, 1.30%)[unknown] (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (24 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (9 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (9 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (11 samples, 0.03%)std::sync::poison::Flag::done (34 samples, 0.08%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (36 samples, 0.09%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (36 samples, 0.09%)core::sync::atomic::AtomicUsize::fetch_sub (9 samples, 0.02%)core::sync::atomic::atomic_sub (9 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (6 samples, 0.01%)core::result::Result<T,E>::is_err (9 samples, 0.02%)core::result::Result<T,E>::is_ok (9 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (62 samples, 0.15%)tokio::runtime::task::list::OwnedTasks<S>::remove (61 samples, 0.14%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (61 samples, 0.14%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (10 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (10 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (10 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (10 samples, 0.02%)core::ptr::drop_in_place<core::option::Option<tokio::runtime::scheduler::multi_thread::park::Parker>> (5 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (6 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (6 samples, 0.01%)core::cell::BorrowRefMut::new (6 samples, 0.01%)tokio::runtime::scheduler::defer::Defer::wake (13 samples, 0.03%)std::sys::pal::unix::futex::futex_wait (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (16 samples, 0.04%)std::sync::condvar::Condvar::wait (8 samples, 0.02%)std::sys::sync::condvar::futex::Condvar::wait (8 samples, 0.02%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (8 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (12 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (12 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (35 samples, 0.08%)tokio::runtime::driver::Driver::park (11 samples, 0.03%)tokio::runtime::driver::TimeDriver::park (11 samples, 0.03%)tokio::runtime::time::Driver::park (11 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (66 samples, 0.16%)tokio::runtime::scheduler::multi_thread::park::Inner::park (66 samples, 0.16%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (129 samples, 0.30%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (6 samples, 0.01%)core::ptr::drop_in_place<core::result::Result<tokio::runtime::coop::with_budget::ResetGuard,std::thread::local::AccessError>> (12 samples, 0.03%)core::ptr::drop_in_place<tokio::runtime::coop::with_budget::ResetGuard> (9 samples, 0.02%)<tokio::runtime::coop::with_budget::ResetGuard as core::ops::drop::Drop>::drop (9 samples, 0.02%)tokio::runtime::context::budget (9 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (9 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (21 samples, 0.05%)core::cell::RefCell<T>::try_borrow_mut (21 samples, 0.05%)core::cell::BorrowRefMut::new (21 samples, 0.05%)tokio::runtime::coop::budget (46 samples, 0.11%)tokio::runtime::coop::with_budget (46 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (34 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (59 samples, 0.14%)tokio::runtime::signal::Driver::process (8 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (8 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (8 samples, 0.02%)tokio::runtime::context::set_current_task_id (8 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (8 samples, 0.02%)tokio::runtime::context::CONTEXT::__getit (8 samples, 0.02%)core::cell::Cell<T>::get (8 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (22 samples, 0.05%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (48 samples, 0.11%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (45 samples, 0.11%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (42 samples, 0.10%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (42 samples, 0.10%)tokio::runtime::task::core::Core<T,S>::set_stage (89 samples, 0.21%)core::sync::atomic::AtomicUsize::fetch_sub (6 samples, 0.01%)core::sync::atomic::atomic_sub (6 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (22 samples, 0.05%)tokio::runtime::task::state::State::transition_to_terminal (11 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::dealloc (7 samples, 0.02%)core::mem::drop (6 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (6 samples, 0.01%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (7 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (7 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (7 samples, 0.02%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (15 samples, 0.04%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (39 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (8 samples, 0.02%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (8 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (8 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (8 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (7 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (7 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (7 samples, 0.02%)tokio::runtime::task::raw::drop_abort_handle (20 samples, 0.05%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (16 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (16 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (5 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (13 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (12 samples, 0.03%)tokio::runtime::task::state::State::unset_join_interested (6 samples, 0.01%)tokio::runtime::task::state::State::fetch_update (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (10 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (10 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (10 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (10 samples, 0.02%)core::result::Result<T,E>::is_err (8 samples, 0.02%)core::result::Result<T,E>::is_ok (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (6 samples, 0.01%)core::result::Result<T,E>::is_err (5 samples, 0.01%)core::result::Result<T,E>::is_ok (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (7 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (7 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (7 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (7 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::park (27 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (10 samples, 0.02%)core::sync::atomic::AtomicU64::compare_exchange (10 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (34 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (60 samples, 0.14%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (44 samples, 0.10%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (106 samples, 0.25%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (106 samples, 0.25%)tokio::runtime::scheduler::multi_thread::worker::run (106 samples, 0.25%)tokio::runtime::context::runtime::enter_runtime (106 samples, 0.25%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (106 samples, 0.25%)tokio::runtime::context::set_scheduler (106 samples, 0.25%)std::thread::local::LocalKey<T>::with (106 samples, 0.25%)std::thread::local::LocalKey<T>::try_with (106 samples, 0.25%)tokio::runtime::context::set_scheduler::{{closure}} (106 samples, 0.25%)tokio::runtime::context::scoped::Scoped<T>::set (106 samples, 0.25%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (106 samples, 0.25%)tokio::runtime::scheduler::multi_thread::worker::Context::run (106 samples, 0.25%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (121 samples, 0.29%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (118 samples, 0.28%)tokio::runtime::context::CONTEXT::__getit (10 samples, 0.02%)core::cell::Cell<T>::get (10 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (12 samples, 0.03%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (12 samples, 0.03%)tokio::runtime::context::set_current_task_id (12 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (12 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::poll (136 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (15 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (14 samples, 0.03%)tokio::runtime::task::harness::poll_future (164 samples, 0.39%)std::panic::catch_unwind (161 samples, 0.38%)std::panicking::try (161 samples, 0.38%)std::panicking::try::do_call (160 samples, 0.38%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (160 samples, 0.38%)tokio::runtime::task::harness::poll_future::{{closure}} (160 samples, 0.38%)tokio::runtime::task::core::Core<T,S>::store_output (24 samples, 0.06%)core::sync::atomic::AtomicUsize::compare_exchange (5 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (5 samples, 0.01%)tokio::runtime::task::raw::poll (213 samples, 0.50%)tokio::runtime::task::harness::Harness<T,S>::poll (211 samples, 0.50%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (207 samples, 0.49%)tokio::runtime::task::state::State::transition_to_running (40 samples, 0.09%)tokio::runtime::task::state::State::fetch_update_action (40 samples, 0.09%)tokio::runtime::task::state::State::transition_to_running::{{closure}} (5 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (5 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (8 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (8 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (5 samples, 0.01%)tokio::runtime::driver::Handle::time (6 samples, 0.01%)core::option::Option<T>::as_ref (6 samples, 0.01%)tokio::runtime::time::source::TimeSource::instant_to_tick (6 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (13 samples, 0.03%)tokio::runtime::time::source::TimeSource::now (9 samples, 0.02%)tokio::runtime::time::Driver::park_internal (42 samples, 0.10%)<alloc::vec::Vec<T,A> as core::ops::index::Index<I>>::index (5 samples, 0.01%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (5 samples, 0.01%)<usize as core::slice::index::SliceIndex<[T]>>::index (5 samples, 0.01%)[unknown] (5 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (28 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (8 samples, 0.02%)core::num::<impl usize>::pow (8 samples, 0.02%)tokio::runtime::time::wheel::level::level_range (12 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (10 samples, 0.02%)core::num::<impl usize>::pow (10 samples, 0.02%)tokio::runtime::time::wheel::level::Level::next_expiration (61 samples, 0.14%)tokio::runtime::time::wheel::level::slot_range (11 samples, 0.03%)core::num::<impl usize>::pow (11 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (103 samples, 0.24%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (6 samples, 0.01%)core::option::Option<T>::is_some (6 samples, 0.01%)tokio::sync::batch_semaphore::Semaphore::add_permits_locked (50 samples, 0.12%)tokio::sync::rwlock::RwLock<T>::write::{{closure}} (13 samples, 0.03%)tokio::sync::rwlock::RwLock<T>::write::{{closure}}::{{closure}} (5 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (10 samples, 0.02%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::next_unchecked (5 samples, 0.01%)alloc::collections::btree::mem::replace (5 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::next_unchecked::{{closure}} (5 samples, 0.01%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (7 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (7 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (6 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (12 samples, 0.03%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::next (12 samples, 0.03%)core::iter::traits::iterator::Iterator::find (12 samples, 0.03%)core::iter::traits::iterator::Iterator::try_fold (12 samples, 0.03%)torrust_tracker::core::torrent::Entry::get_peers_for_peer (17 samples, 0.04%)core::iter::traits::iterator::Iterator::collect (14 samples, 0.03%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (14 samples, 0.03%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (14 samples, 0.03%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (14 samples, 0.03%)torrust_tracker::core::torrent::Entry::insert_or_update_peer (15 samples, 0.04%)torrust_tracker::core::torrent::repository::RepositoryAsyncSingle::get_torrents::{{closure}} (30 samples, 0.07%)tokio::sync::rwlock::RwLock<T>::read::{{closure}} (11 samples, 0.03%)tokio::sync::rwlock::RwLock<T>::read::{{closure}}::{{closure}} (9 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (14 samples, 0.03%)torrust_tracker::shared::clock::time_extent::Make::now (14 samples, 0.03%)torrust_tracker::shared::clock::working_clock::<impl torrust_tracker::shared::clock::Time for torrust_tracker::shared::clock::Clock<_>>::now (7 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (51 samples, 0.12%)core::sync::atomic::AtomicUsize::fetch_add (12 samples, 0.03%)core::sync::atomic::atomic_add (12 samples, 0.03%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (12 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (12 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_sub (5 samples, 0.01%)core::sync::atomic::atomic_sub (5 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (180 samples, 0.43%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (38 samples, 0.09%)core::sync::atomic::AtomicUsize::fetch_add (27 samples, 0.06%)core::sync::atomic::atomic_add (27 samples, 0.06%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker::core::Tracker>> (5 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (5 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_packet (5 samples, 0.01%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer::{{closure}} (14 samples, 0.03%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (27 samples, 0.06%)<torrust_tracker::core::torrent::repository::RepositoryAsyncSingle as torrust_tracker::core::torrent::repository::TRepositoryAsync>::update_torrent_with_peer_and_get_stats::{{closure}} (18 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (123 samples, 0.29%)torrust_tracker::core::Tracker::announce::{{closure}} (103 samples, 0.24%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (6 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (158 samples, 0.37%)torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (8 samples, 0.02%)torrust_tracker::core::Tracker::scrape::{{closure}} (7 samples, 0.02%)core::fmt::Formatter::new (7 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (19 samples, 0.04%)core::fmt::num::imp::fmt_u64 (18 samples, 0.04%)core::intrinsics::copy_nonoverlapping (6 samples, 0.01%)<T as alloc::string::ToString>::to_string (35 samples, 0.08%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (7 samples, 0.02%)core::fmt::num::imp::fmt_u64 (7 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (6 samples, 0.01%)alloc::str::<impl alloc::borrow::ToOwned for str>::to_owned (5 samples, 0.01%)torrust_tracker::shared::bit_torrent::info_hash::InfoHash::to_hex_string (8 samples, 0.02%)<T as alloc::string::ToString>::to_string (8 samples, 0.02%)torrust_tracker::servers::udp::logging::log_request (58 samples, 0.14%)<T as alloc::string::ToString>::to_string (7 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (273 samples, 0.64%)torrust_tracker::servers::udp::logging::log_response (16 samples, 0.04%)alloc::vec::from_elem (36 samples, 0.09%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (36 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (36 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (36 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (36 samples, 0.09%)alloc::alloc::Global::alloc_impl (36 samples, 0.09%)alloc::alloc::alloc_zeroed (36 samples, 0.09%)__rdl_alloc_zeroed (36 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (36 samples, 0.09%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (5 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (5 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (5 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (5 samples, 0.01%)[unknown] (11 samples, 0.03%)[unknown] (14 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (646 samples, 1.53%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (227 samples, 0.54%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (133 samples, 0.31%)tokio::net::udp::UdpSocket::send_to::{{closure}} (119 samples, 0.28%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (111 samples, 0.26%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (93 samples, 0.22%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (65 samples, 0.15%)mio::net::udp::UdpSocket::send_to (58 samples, 0.14%)mio::io_source::IoSource<T>::do_io (58 samples, 0.14%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (58 samples, 0.14%)mio::net::udp::UdpSocket::send_to::{{closure}} (58 samples, 0.14%)std::net::udp::UdpSocket::send_to (58 samples, 0.14%)std::sys_common::net::UdpSocket::send_to (56 samples, 0.13%)std::sys::pal::unix::cvt (38 samples, 0.09%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (33 samples, 0.08%)alloc::vec::Vec<T>::with_capacity (6 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (6 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (5 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (5 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (49 samples, 0.12%)tokio::net::udp::UdpSocket::ready::{{closure}} (49 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (65 samples, 0.15%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (9 samples, 0.02%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (7 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (91 samples, 0.21%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (91 samples, 0.21%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (16 samples, 0.04%)tokio::task::spawn::spawn (16 samples, 0.04%)tokio::task::spawn::spawn_inner (16 samples, 0.04%)tokio::runtime::context::current::with_current (16 samples, 0.04%)std::thread::local::LocalKey<T>::try_with (16 samples, 0.04%)tokio::runtime::context::current::with_current::{{closure}} (16 samples, 0.04%)core::option::Option<T>::map (16 samples, 0.04%)tokio::task::spawn::spawn_inner::{{closure}} (16 samples, 0.04%)tokio::runtime::scheduler::Handle::spawn (16 samples, 0.04%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (16 samples, 0.04%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (16 samples, 0.04%)tokio::runtime::task::list::OwnedTasks<S>::bind (15 samples, 0.04%)tokio::runtime::task::new_task (14 samples, 0.03%)tokio::runtime::task::raw::RawTask::new (14 samples, 0.03%)tokio::runtime::task::core::Cell<T,S>::new (14 samples, 0.03%)alloc::boxed::Box<T>::new (7 samples, 0.02%)alloc::alloc::exchange_malloc (7 samples, 0.02%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (7 samples, 0.02%)alloc::alloc::Global::alloc_impl (7 samples, 0.02%)alloc::alloc::alloc (7 samples, 0.02%)__rdl_alloc (7 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (7 samples, 0.02%)std::sys::pal::unix::alloc::aligned_malloc (7 samples, 0.02%)tracing::span::Span::record_all (23 samples, 0.05%)unlink_chunk (56 samples, 0.13%)uuid::builder::Builder::with_variant (13 samples, 0.03%)[unknown] (8 samples, 0.02%)uuid::builder::Builder::from_random_bytes (17 samples, 0.04%)[unknown] (7,184 samples, 16.96%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (72 samples, 0.17%)uuid::rng::bytes (54 samples, 0.13%)rand::random (54 samples, 0.13%)rand::rng::Rng::gen (52 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (52 samples, 0.12%)rand::rng::Rng::gen (52 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (52 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (52 samples, 0.12%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (52 samples, 0.12%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (52 samples, 0.12%)[unknown] (24 samples, 0.06%)__GI___libc_free (12 samples, 0.03%)__GI___libc_malloc (11 samples, 0.03%)__memcmp_evex_movbe (15 samples, 0.04%)__memcpy_avx512_unaligned_erms (15 samples, 0.04%)_int_free (14 samples, 0.03%)_int_malloc (36 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (6 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::grow_amortized (5 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (5 samples, 0.01%)__malloc_arena_thread_freeres (7 samples, 0.02%)tcache_thread_shutdown (7 samples, 0.02%)__GI___libc_free (7 samples, 0.02%)_int_free (7 samples, 0.02%)heap_trim (7 samples, 0.02%)shrink_heap (7 samples, 0.02%)__GI_madvise (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)advise_stack_range (12 samples, 0.03%)__GI_madvise (12 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (11 samples, 0.03%)[unknown] (11 samples, 0.03%)[unknown] (11 samples, 0.03%)[unknown] (11 samples, 0.03%)[unknown] (11 samples, 0.03%)std::sync::condvar::Condvar::wait_timeout (33 samples, 0.08%)std::sys::sync::condvar::futex::Condvar::wait_timeout (33 samples, 0.08%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (33 samples, 0.08%)std::sys::pal::unix::futex::futex_wait (33 samples, 0.08%)syscall (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (32 samples, 0.08%)[unknown] (31 samples, 0.07%)[unknown] (31 samples, 0.07%)[unknown] (24 samples, 0.06%)[unknown] (24 samples, 0.06%)[unknown] (16 samples, 0.04%)[unknown] (12 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (12 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (45 samples, 0.11%)std::sync::mutex::Mutex<T>::lock (45 samples, 0.11%)std::sys::sync::mutex::futex::Mutex::lock (45 samples, 0.11%)std::sys::sync::mutex::futex::Mutex::lock_contended (45 samples, 0.11%)std::sys::pal::unix::futex::futex_wait (45 samples, 0.11%)syscall (45 samples, 0.11%)[unknown] (45 samples, 0.11%)[unknown] (45 samples, 0.11%)[unknown] (45 samples, 0.11%)[unknown] (45 samples, 0.11%)[unknown] (45 samples, 0.11%)[unknown] (44 samples, 0.10%)[unknown] (44 samples, 0.10%)[unknown] (44 samples, 0.10%)[unknown] (44 samples, 0.10%)[unknown] (42 samples, 0.10%)[unknown] (34 samples, 0.08%)[unknown] (27 samples, 0.06%)[unknown] (21 samples, 0.05%)[unknown] (9 samples, 0.02%)[unknown] (9 samples, 0.02%)[unknown] (5 samples, 0.01%)[[vdso]] (149 samples, 0.35%)__ieee754_pow_fma (6 samples, 0.01%)std::f64::<impl f64>::powf (168 samples, 0.40%)__pow (163 samples, 0.38%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (186 samples, 0.44%)std::time::Instant::now (5 samples, 0.01%)std::sys::pal::unix::time::Instant::now (5 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (5 samples, 0.01%)__GI___clock_gettime (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (11 samples, 0.03%)std::time::Instant::now (11 samples, 0.03%)std::sys::pal::unix::time::Instant::now (11 samples, 0.03%)std::sys::pal::unix::time::Timespec::now (6 samples, 0.01%)__GI___clock_gettime (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (21 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (16 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (16 samples, 0.04%)tokio::runtime::driver::Driver::park_timeout (16 samples, 0.04%)tokio::runtime::driver::TimeDriver::park_timeout (16 samples, 0.04%)tokio::runtime::time::Driver::park_timeout (16 samples, 0.04%)tokio::runtime::time::Driver::park_internal (14 samples, 0.03%)tokio::runtime::io::driver::Driver::turn (14 samples, 0.03%)mio::poll::Poll::poll (14 samples, 0.03%)mio::sys::unix::selector::epoll::Selector::select (14 samples, 0.03%)epoll_wait (14 samples, 0.03%)[unknown] (14 samples, 0.03%)[unknown] (14 samples, 0.03%)[unknown] (14 samples, 0.03%)[unknown] (14 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (11 samples, 0.03%)[unknown] (7 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (11 samples, 0.03%)alloc::sync::Arc<T,A>::inner (11 samples, 0.03%)core::ptr::non_null::NonNull<T>::as_ref (11 samples, 0.03%)core::result::Result<T,E>::is_ok (6 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<()>> (5 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (5 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::unlock (5 samples, 0.01%)core::bool::<impl bool>::then (8 samples, 0.02%)std::sys::pal::unix::futex::futex_wait (2,806 samples, 6.63%)std::sys:..syscall (2,774 samples, 6.55%)syscall[unknown] (2,721 samples, 6.42%)[unknown][unknown] (2,688 samples, 6.35%)[unknown][unknown] (2,633 samples, 6.22%)[unknown][unknown] (2,595 samples, 6.13%)[unknown][unknown] (2,514 samples, 5.94%)[unknown][unknown] (2,379 samples, 5.62%)[unknow..[unknown] (2,149 samples, 5.07%)[unkno..[unknown] (1,931 samples, 4.56%)[unkn..[unknown] (1,811 samples, 4.28%)[unkn..[unknown] (1,582 samples, 3.74%)[unk..[unknown] (1,264 samples, 2.98%)[un..[unknown] (954 samples, 2.25%)[..[unknown] (635 samples, 1.50%)[unknown] (334 samples, 0.79%)[unknown] (250 samples, 0.59%)[unknown] (186 samples, 0.44%)[unknown] (154 samples, 0.36%)[unknown] (28 samples, 0.07%)core::result::Result<T,E>::is_err (34 samples, 0.08%)core::result::Result<T,E>::is_ok (34 samples, 0.08%)std::sync::condvar::Condvar::wait (2,843 samples, 6.71%)std::sync..std::sys::sync::condvar::futex::Condvar::wait (2,843 samples, 6.71%)std::sys:..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (2,843 samples, 6.71%)std::sys:..std::sys::sync::mutex::futex::Mutex::lock (37 samples, 0.09%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (2,863 samples, 6.76%)tokio::ru..tokio::loom::std::mutex::Mutex<T>::lock (11 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (6 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (6 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (6 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (6 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (15 samples, 0.04%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (7 samples, 0.02%)<alloc::vec::Vec<T,A> as core::ops::index::Index<I>>::index (5 samples, 0.01%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (5 samples, 0.01%)<usize as core::slice::index::SliceIndex<[T]>>::index (5 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (6 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (14 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (37 samples, 0.09%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (5 samples, 0.01%)core::option::Option<T>::is_some (5 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (65 samples, 0.15%)core::option::Option<T>::map (18 samples, 0.04%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (20 samples, 0.05%)core::result::Result<T,E>::map (7 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (7 samples, 0.02%)[[vdso]] (17 samples, 0.04%)[unknown] (3,290 samples, 7.77%)[unknown][unknown] (3,266 samples, 7.71%)[unknown][unknown] (3,253 samples, 7.68%)[unknown][unknown] (3,203 samples, 7.56%)[unknown][unknown] (3,044 samples, 7.19%)[unknown][unknown] (2,959 samples, 6.99%)[unknown][unknown] (2,283 samples, 5.39%)[unknow..[unknown] (1,822 samples, 4.30%)[unkn..[unknown] (1,568 samples, 3.70%)[unk..[unknown] (1,352 samples, 3.19%)[un..[unknown] (910 samples, 2.15%)[..[unknown] (688 samples, 1.62%)[unknown] (498 samples, 1.18%)[unknown] (326 samples, 0.77%)[unknown] (119 samples, 0.28%)[unknown] (101 samples, 0.24%)[unknown] (65 samples, 0.15%)[unknown] (61 samples, 0.14%)[unknown] (20 samples, 0.05%)mio::poll::Poll::poll (3,385 samples, 7.99%)mio::poll::..mio::sys::unix::selector::epoll::Selector::select (3,385 samples, 7.99%)mio::sys::u..epoll_wait (3,364 samples, 7.94%)epoll_wait__GI___pthread_disable_asynccancel (13 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (46 samples, 0.11%)tokio::util::bit::Pack::pack (34 samples, 0.08%)core::result::Result<T,E>::is_err (7 samples, 0.02%)core::result::Result<T,E>::is_ok (7 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (3,497 samples, 8.26%)tokio::runt..tokio::runtime::io::scheduled_io::ScheduledIo::wake (40 samples, 0.09%)tokio::loom::std::mutex::Mutex<T>::lock (13 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (13 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock (12 samples, 0.03%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (7 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (7 samples, 0.02%)tokio::time::clock::Clock::now (6 samples, 0.01%)tokio::time::clock::now (6 samples, 0.01%)std::time::Instant::now (6 samples, 0.01%)std::sys::pal::unix::time::Instant::now (6 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (5 samples, 0.01%)__GI___clock_gettime (5 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (6 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (10 samples, 0.02%)tokio::time::clock::Clock::now (10 samples, 0.02%)tokio::time::clock::now (10 samples, 0.02%)std::time::Instant::now (10 samples, 0.02%)std::sys::pal::unix::time::Instant::now (10 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (10 samples, 0.02%)tokio::runtime::time::wheel::Wheel::next_expiration (7 samples, 0.02%)tokio::runtime::time::Driver::park_internal (3,528 samples, 8.33%)tokio::runti..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (3,603 samples, 8.51%)tokio::runti..tokio::runtime::driver::Driver::park (3,602 samples, 8.50%)tokio::runti..tokio::runtime::driver::TimeDriver::park (3,602 samples, 8.50%)tokio::runti..tokio::runtime::time::Driver::park (3,602 samples, 8.50%)tokio::runti..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (6,494 samples, 15.33%)tokio::runtime::schedul..tokio::runtime::scheduler::multi_thread::park::Parker::park (6,484 samples, 15.31%)tokio::runtime::schedul..tokio::runtime::scheduler::multi_thread::park::Inner::park (6,484 samples, 15.31%)tokio::runtime::schedul..core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (11 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (11 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::unlock (11 samples, 0.03%)std::sync::mutex::MutexGuard<T>::new (19 samples, 0.04%)std::sync::poison::Flag::guard (19 samples, 0.04%)std::thread::panicking (19 samples, 0.04%)std::panicking::panicking (19 samples, 0.04%)std::panicking::panic_count::count_is_zero (19 samples, 0.04%)core::sync::atomic::AtomicUsize::load (18 samples, 0.04%)core::sync::atomic::atomic_load (18 samples, 0.04%)core::result::Result<T,E>::is_err (24 samples, 0.06%)core::result::Result<T,E>::is_ok (24 samples, 0.06%)core::sync::atomic::AtomicU32::compare_exchange (18 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (18 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (92 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (73 samples, 0.17%)std::sync::mutex::Mutex<T>::lock (72 samples, 0.17%)std::sys::sync::mutex::futex::Mutex::lock (53 samples, 0.13%)std::sys::sync::mutex::futex::Mutex::lock_contended (11 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::spin (5 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (5 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (5 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (52 samples, 0.12%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (52 samples, 0.12%)core::slice::<impl [T]>::contains (141 samples, 0.33%)<T as core::slice::cmp::SliceContains>::slice_contains (141 samples, 0.33%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (141 samples, 0.33%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (36 samples, 0.09%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (36 samples, 0.09%)std::sync::mutex::MutexGuard<T>::new (5 samples, 0.01%)std::sync::poison::Flag::guard (5 samples, 0.01%)std::thread::panicking (5 samples, 0.01%)std::panicking::panicking (5 samples, 0.01%)std::panicking::panic_count::count_is_zero (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (157 samples, 0.37%)tokio::loom::std::mutex::Mutex<T>::lock (11 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (11 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (166 samples, 0.39%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (12 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (12 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::unlock (11 samples, 0.03%)core::result::Result<T,E>::is_err (15 samples, 0.04%)core::result::Result<T,E>::is_ok (15 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (30 samples, 0.07%)std::sync::mutex::Mutex<T>::lock (29 samples, 0.07%)std::sys::sync::mutex::futex::Mutex::lock (29 samples, 0.07%)std::sys::sync::mutex::futex::Mutex::lock_contended (10 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::spin (7 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (48 samples, 0.11%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (7 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (5 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (8 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (7 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (7 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock_contended (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (28 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (16 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (16 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (6,882 samples, 16.25%)tokio::runtime::scheduler..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (85 samples, 0.20%)core::cell::RefCell<T>::borrow_mut (8 samples, 0.02%)core::cell::RefCell<T>::try_borrow_mut (8 samples, 0.02%)core::cell::BorrowRefMut::new (8 samples, 0.02%)__memcpy_avx512_unaligned_erms (45 samples, 0.11%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (88 samples, 0.21%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (88 samples, 0.21%)__memcpy_avx512_unaligned_erms (87 samples, 0.21%)std::panic::catch_unwind (137 samples, 0.32%)std::panicking::try (137 samples, 0.32%)std::panicking::try::do_call (137 samples, 0.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (137 samples, 0.32%)core::ops::function::FnOnce::call_once (137 samples, 0.32%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (137 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (137 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::set_stage (135 samples, 0.32%)<core::num::nonzero::NonZero<T> as core::cmp::PartialEq>::eq (5 samples, 0.01%)core::cmp::impls::<impl core::cmp::PartialEq for u64>::eq (5 samples, 0.01%)std::sync::poison::Flag::done (6 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (7 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (7 samples, 0.02%)core::result::Result<T,E>::is_err (70 samples, 0.17%)core::result::Result<T,E>::is_ok (70 samples, 0.17%)tokio::runtime::task::harness::Harness<T,S>::complete (240 samples, 0.57%)tokio::runtime::task::harness::Harness<T,S>::release (103 samples, 0.24%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (101 samples, 0.24%)tokio::runtime::task::list::OwnedTasks<S>::remove (98 samples, 0.23%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (88 samples, 0.21%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (75 samples, 0.18%)tokio::loom::std::mutex::Mutex<T>::lock (74 samples, 0.17%)std::sync::mutex::Mutex<T>::lock (74 samples, 0.17%)std::sys::sync::mutex::futex::Mutex::lock (73 samples, 0.17%)tokio::runtime::task::harness::cancel_task (11 samples, 0.03%)std::panic::catch_unwind (11 samples, 0.03%)std::panicking::try (11 samples, 0.03%)std::panicking::try::do_call (11 samples, 0.03%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (11 samples, 0.03%)core::ops::function::FnOnce::call_once (11 samples, 0.03%)tokio::runtime::task::harness::cancel_task::{{closure}} (11 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (11 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage (11 samples, 0.03%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (10 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (10 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (10 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::Tracker> (10 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker::core::torrent::repository::RepositoryAsyncSingle>> (10 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (10 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (10 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::torrent::repository::RepositoryAsyncSingle> (10 samples, 0.02%)core::ptr::drop_in_place<tokio::sync::rwlock::RwLock<alloc::collections::btree::map::BTreeMap<torrust_tracker::shared::bit_torrent::info_hash::InfoHash,torrust_tracker::core::torrent::Entry>>> (10 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<alloc::collections::btree::map::BTreeMap<torrust_tracker::shared::bit_torrent::info_hash::InfoHash,torrust_tracker::core::torrent::Entry>>> (10 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker::shared::bit_torrent::info_hash::InfoHash,torrust_tracker::core::torrent::Entry>> (10 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (10 samples, 0.02%)core::mem::drop (10 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker::shared::bit_torrent::info_hash::InfoHash,torrust_tracker::core::torrent::Entry>> (10 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (10 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,NodeType>,alloc::collections::btree::node::marker::KV>::drop_key_val (8 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::assume_init_drop (8 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::torrent::Entry> (8 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker::core::peer::Id,torrust_tracker::core::peer::Peer>> (8 samples, 0.02%)__GI___libc_free (8 samples, 0.02%)_int_free (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (20 samples, 0.05%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (15 samples, 0.04%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (10 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (14 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (13 samples, 0.03%)core::sync::atomic::atomic_add (13 samples, 0.03%)__memcpy_avx512_unaligned_erms (20 samples, 0.05%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (22 samples, 0.05%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (22 samples, 0.05%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker::core::Tracker>> (28 samples, 0.07%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (28 samples, 0.07%)core::cmp::Ord::min (7 samples, 0.02%)core::cmp::min_by (7 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (15 samples, 0.04%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (8 samples, 0.02%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (8 samples, 0.02%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (8 samples, 0.02%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (8 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (16 samples, 0.04%)std::io::cursor::Cursor<T>::remaining_slice (6 samples, 0.01%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (5 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (5 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (5 samples, 0.01%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (5 samples, 0.01%)byteorder::io::ReadBytesExt::read_i32 (16 samples, 0.04%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (16 samples, 0.04%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (10 samples, 0.02%)byteorder::io::ReadBytesExt::read_i64 (8 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (8 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (136 samples, 0.32%)__GI___lll_lock_wait_private (6 samples, 0.01%)__GI___lll_lock_wake_private (351 samples, 0.83%)[unknown] (330 samples, 0.78%)[unknown] (328 samples, 0.77%)[unknown] (311 samples, 0.73%)[unknown] (282 samples, 0.67%)[unknown] (264 samples, 0.62%)[unknown] (157 samples, 0.37%)[unknown] (129 samples, 0.30%)[unknown] (56 samples, 0.13%)[unknown] (47 samples, 0.11%)[unknown] (22 samples, 0.05%)[unknown] (11 samples, 0.03%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)__GI___lll_lock_wait_private (664 samples, 1.57%)futex_wait (646 samples, 1.53%)[unknown] (631 samples, 1.49%)[unknown] (622 samples, 1.47%)[unknown] (619 samples, 1.46%)[unknown] (605 samples, 1.43%)[unknown] (578 samples, 1.36%)[unknown] (544 samples, 1.28%)[unknown] (488 samples, 1.15%)[unknown] (351 samples, 0.83%)[unknown] (326 samples, 0.77%)[unknown] (284 samples, 0.67%)[unknown] (227 samples, 0.54%)[unknown] (163 samples, 0.38%)[unknown] (103 samples, 0.24%)[unknown] (34 samples, 0.08%)[unknown] (31 samples, 0.07%)[unknown] (23 samples, 0.05%)[unknown] (21 samples, 0.05%)[unknown] (7 samples, 0.02%)_int_free (788 samples, 1.86%)_..__GI___libc_free (1,148 samples, 2.71%)__..core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (1,177 samples, 2.78%)co..core::ptr::drop_in_place<alloc::vec::Vec<u8>> (1,177 samples, 2.78%)co..core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (1,177 samples, 2.78%)co..<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (1,177 samples, 2.78%)<a..<alloc::alloc::Global as core::alloc::Allocator>::deallocate (1,177 samples, 2.78%)<a..alloc::alloc::dealloc (1,177 samples, 2.78%)al..__rdl_dealloc (1,177 samples, 2.78%)__..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (1,177 samples, 2.78%)st..tracing::span::Span::record_all (28 samples, 0.07%)unlink_chunk (27 samples, 0.06%)core::result::Result<T,E>::expect (30 samples, 0.07%)core::result::Result<T,E>::map_err (6 samples, 0.01%)std::time::Instant::elapsed (18 samples, 0.04%)std::time::Instant::now (9 samples, 0.02%)std::sys::pal::unix::time::Instant::now (9 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (9 samples, 0.02%)__GI___clock_gettime (9 samples, 0.02%)std::sys::pal::unix::cvt (6 samples, 0.01%)__GI_getsockname (1,369 samples, 3.23%)__G..[unknown] (1,334 samples, 3.15%)[un..[unknown] (1,330 samples, 3.14%)[un..[unknown] (1,271 samples, 3.00%)[un..[unknown] (1,256 samples, 2.97%)[un..[unknown] (1,067 samples, 2.52%)[u..[unknown] (826 samples, 1.95%)[..[unknown] (410 samples, 0.97%)[unknown] (85 samples, 0.20%)[unknown] (27 samples, 0.06%)[unknown] (27 samples, 0.06%)[unknown] (25 samples, 0.06%)[unknown] (23 samples, 0.05%)[unknown] (11 samples, 0.03%)[unknown] (5 samples, 0.01%)tokio::net::udp::UdpSocket::local_addr (1,381 samples, 3.26%)tok..mio::net::udp::UdpSocket::local_addr (1,381 samples, 3.26%)mio..std::net::tcp::TcpListener::local_addr (1,381 samples, 3.26%)std..std::sys_common::net::TcpListener::socket_addr (1,381 samples, 3.26%)std..std::sys_common::net::sockname (1,379 samples, 3.26%)std..std::sys_common::net::TcpListener::socket_addr::{{closure}} (1,373 samples, 3.24%)std..[[vdso]] (28 samples, 0.07%)rand_chacha::guts::ChaCha::pos64 (20 samples, 0.05%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (6 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (6 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (6 samples, 0.01%)core::core_arch::x86::avx2::_mm256_add_epi32 (6 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (5 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (5 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (5 samples, 0.01%)rand_chacha::guts::round (23 samples, 0.05%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (79 samples, 0.19%)rand_chacha::guts::ChaCha::refill4 (79 samples, 0.19%)rand_chacha::guts::refill_wide::impl_avx2 (49 samples, 0.12%)rand_chacha::guts::refill_wide::fn_impl (49 samples, 0.12%)rand_chacha::guts::refill_wide_impl (49 samples, 0.12%)torrust_tracker::servers::udp::handlers::RequestId::make (88 samples, 0.21%)uuid::v4::<impl uuid::Uuid>::new_v4 (87 samples, 0.21%)uuid::rng::bytes (87 samples, 0.21%)rand::random (87 samples, 0.21%)rand::rng::Rng::gen (87 samples, 0.21%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (87 samples, 0.21%)rand::rng::Rng::gen (87 samples, 0.21%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (87 samples, 0.21%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (87 samples, 0.21%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (87 samples, 0.21%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (87 samples, 0.21%)rand_core::block::BlockRng<R>::generate_and_set (81 samples, 0.19%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (81 samples, 0.19%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (7 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (7 samples, 0.02%)__memcmp_evex_movbe (34 samples, 0.08%)<torrust_tracker::shared::bit_torrent::info_hash::InfoHash as core::cmp::Ord>::cmp (72 samples, 0.17%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (72 samples, 0.17%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (72 samples, 0.17%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (72 samples, 0.17%)<u8 as core::slice::cmp::SliceOrd>::compare (72 samples, 0.17%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (10 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (104 samples, 0.25%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (104 samples, 0.25%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (98 samples, 0.23%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (98 samples, 0.23%)core::iter::traits::iterator::Iterator::collect (12 samples, 0.03%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (12 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (12 samples, 0.03%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (8 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (8 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (8 samples, 0.02%)tokio::sync::batch_semaphore::Waiter::assign_permits (5 samples, 0.01%)syscall (5 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (5 samples, 0.01%)tokio::runtime::task::raw::RawTask::schedule (10 samples, 0.02%)tokio::runtime::task::raw::schedule (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::schedule (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current (10 samples, 0.02%)tokio::runtime::context::with_scheduler (10 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (10 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (10 samples, 0.02%)tokio::runtime::context::scoped::Scoped<T>::with (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (10 samples, 0.02%)core::ptr::drop_in_place<tokio::sync::rwlock::read_guard::RwLockReadGuard<alloc::collections::btree::map::BTreeMap<torrust_tracker::shared::bit_torrent::info_hash::InfoHash,torrust_tracker::core::torrent::Entry>>> (20 samples, 0.05%)tokio::sync::batch_semaphore::Semaphore::add_permits_locked (18 samples, 0.04%)tokio::util::wake_list::WakeList::wake_all (11 samples, 0.03%)core::task::wake::Waker::wake (11 samples, 0.03%)tokio::runtime::task::waker::wake_by_val (11 samples, 0.03%)tokio::runtime::task::harness::<impl tokio::runtime::task::raw::RawTask>::wake_by_val (11 samples, 0.03%)torrust_tracker::core::torrent::Entry::get_peers_for_peer (5 samples, 0.01%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer::{{closure}} (158 samples, 0.37%)torrust_tracker::core::torrent::repository::RepositoryAsyncSingle::get_torrents::{{closure}} (16 samples, 0.04%)tokio::sync::rwlock::RwLock<T>::read::{{closure}} (16 samples, 0.04%)tokio::sync::rwlock::RwLock<T>::read::{{closure}}::{{closure}} (16 samples, 0.04%)<tokio::sync::batch_semaphore::Acquire as core::future::future::Future>::poll (16 samples, 0.04%)tokio::sync::batch_semaphore::Semaphore::poll_acquire (14 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (8 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (13 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (13 samples, 0.03%)core::slice::iter::Iter<T>::post_inc_start (5 samples, 0.01%)core::ptr::non_null::NonNull<T>::add (5 samples, 0.01%)__memcmp_evex_movbe (20 samples, 0.05%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (15 samples, 0.04%)<torrust_tracker::shared::bit_torrent::info_hash::InfoHash as core::cmp::Ord>::cmp (45 samples, 0.11%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (45 samples, 0.11%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (45 samples, 0.11%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (45 samples, 0.11%)<u8 as core::slice::cmp::SliceOrd>::compare (45 samples, 0.11%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (116 samples, 0.27%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (114 samples, 0.27%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (108 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (108 samples, 0.26%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (6 samples, 0.01%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (8 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (8 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert (6 samples, 0.01%)syscall (16 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (14 samples, 0.03%)[unknown] (11 samples, 0.03%)[unknown] (11 samples, 0.03%)[unknown] (11 samples, 0.03%)[unknown] (10 samples, 0.02%)[unknown] (9 samples, 0.02%)[unknown] (6 samples, 0.01%)[unknown] (5 samples, 0.01%)tokio::runtime::context::with_scheduler (8 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (7 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (6 samples, 0.01%)tokio::runtime::context::scoped::Scoped<T>::with (6 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (5 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (8 samples, 0.02%)core::sync::atomic::atomic_add (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (9 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (9 samples, 0.02%)tokio::runtime::context::with_scheduler (40 samples, 0.09%)std::thread::local::LocalKey<T>::try_with (40 samples, 0.09%)tokio::runtime::context::with_scheduler::{{closure}} (40 samples, 0.09%)tokio::runtime::context::scoped::Scoped<T>::with (40 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (40 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (40 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (40 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (40 samples, 0.09%)tokio::runtime::task::waker::wake_by_val (44 samples, 0.10%)tokio::runtime::task::harness::<impl tokio::runtime::task::raw::RawTask>::wake_by_val (44 samples, 0.10%)tokio::runtime::task::raw::RawTask::schedule (44 samples, 0.10%)tokio::runtime::task::raw::schedule (43 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::schedule (42 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (42 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::with_current (42 samples, 0.10%)tokio::sync::batch_semaphore::Semaphore::add_permits_locked (59 samples, 0.14%)tokio::util::wake_list::WakeList::wake_all (46 samples, 0.11%)core::task::wake::Waker::wake (46 samples, 0.11%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (7 samples, 0.02%)core::iter::traits::iterator::Iterator::sum (7 samples, 0.02%)<usize as core::iter::traits::accum::Sum>::sum (7 samples, 0.02%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (7 samples, 0.02%)core::iter::traits::iterator::Iterator::fold (7 samples, 0.02%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (7 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (7 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (7 samples, 0.02%)core::ptr::drop_in_place<tokio::sync::rwlock::write_guard::RwLockWriteGuard<alloc::collections::btree::map::BTreeMap<torrust_tracker::shared::bit_torrent::info_hash::InfoHash,torrust_tracker::core::torrent::Entry>>> (82 samples, 0.19%)torrust_tracker::core::torrent::Entry::insert_or_update_peer (22 samples, 0.05%)torrust_tracker::core::torrent::Entry::get_stats (20 samples, 0.05%)tokio::runtime::coop::poll_proceed (5 samples, 0.01%)tokio::runtime::context::budget (5 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (5 samples, 0.01%)tokio::runtime::context::budget::{{closure}} (5 samples, 0.01%)tokio::runtime::coop::poll_proceed::{{closure}} (5 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (11 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (11 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock (10 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock_contended (6 samples, 0.01%)<tokio::sync::batch_semaphore::Acquire as core::future::future::Future>::poll (28 samples, 0.07%)tokio::sync::batch_semaphore::Semaphore::poll_acquire (19 samples, 0.04%)tokio::sync::rwlock::RwLock<T>::write::{{closure}} (29 samples, 0.07%)tokio::sync::rwlock::RwLock<T>::write::{{closure}}::{{closure}} (29 samples, 0.07%)_int_malloc (7 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (9 samples, 0.02%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (8 samples, 0.02%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::Leaf>::new_leaf (8 samples, 0.02%)alloc::collections::btree::node::LeafNode<K,V>::new (8 samples, 0.02%)alloc::boxed::Box<T,A>::new_uninit_in (8 samples, 0.02%)alloc::boxed::Box<T,A>::try_new_uninit_in (8 samples, 0.02%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (8 samples, 0.02%)alloc::alloc::Global::alloc_impl (8 samples, 0.02%)alloc::alloc::alloc (8 samples, 0.02%)__rdl_alloc (8 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (8 samples, 0.02%)__GI___libc_malloc (8 samples, 0.02%)torrust_tracker::core::torrent::Entry::insert_or_update_peer (13 samples, 0.03%)torrust_tracker::core::Tracker::announce::{{closure}} (416 samples, 0.98%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (253 samples, 0.60%)<torrust_tracker::core::torrent::repository::RepositoryAsyncSingle as torrust_tracker::core::torrent::repository::TRepositoryAsync>::update_torrent_with_peer_and_get_stats::{{closure}} (252 samples, 0.60%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (6 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (9 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (10 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (10 samples, 0.02%)core::hash::Hasher::write_u32 (10 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (10 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (10 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (9 samples, 0.02%)<core::time::Duration as core::hash::Hash>::hash (20 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (10 samples, 0.02%)core::hash::Hasher::write_u64 (10 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (10 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (10 samples, 0.02%)<torrust_tracker::shared::clock::time_extent::TimeExtent as core::hash::Hash>::hash (31 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for u64>::hash (11 samples, 0.03%)core::hash::Hasher::write_u64 (11 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (11 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (11 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (11 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (5 samples, 0.01%)core::array::<impl core::hash::Hash for [T: N]>::hash (22 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (22 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (18 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (18 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (18 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (17 samples, 0.04%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (60 samples, 0.14%)core::num::<impl u128>::checked_div (10 samples, 0.02%)[[vdso]] (10 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::check (79 samples, 0.19%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (18 samples, 0.04%)torrust_tracker::shared::clock::time_extent::Make::now (18 samples, 0.04%)torrust_tracker::shared::clock::working_clock::<impl torrust_tracker::shared::clock::Time for torrust_tracker::shared::clock::Clock<_>>::now (8 samples, 0.02%)std::time::SystemTime::now (5 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (5 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (7 samples, 0.02%)core::array::<impl core::hash::Hash for [T: N]>::hash (6 samples, 0.01%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (6 samples, 0.01%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (6 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (6 samples, 0.01%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (6 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (6 samples, 0.01%)torrust_tracker::servers::udp::peer_builder::from_request (5 samples, 0.01%)torrust_tracker::shared::clock::working_clock::<impl torrust_tracker::shared::clock::Time for torrust_tracker::shared::clock::Clock<_>>::now (5 samples, 0.01%)std::time::SystemTime::now (5 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (5 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (5 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (516 samples, 1.22%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (10 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (8 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (9 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (9 samples, 0.02%)core::hash::Hasher::write_u32 (9 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (9 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (9 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (5 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (12 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (23 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (14 samples, 0.03%)core::hash::Hasher::write_u64 (14 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (14 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (14 samples, 0.03%)<torrust_tracker::shared::clock::time_extent::TimeExtent as core::hash::Hash>::hash (34 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (11 samples, 0.03%)core::hash::Hasher::write_u64 (11 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (11 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (11 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (11 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (6 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (62 samples, 0.15%)core::array::<impl core::hash::Hash for [T: N]>::hash (18 samples, 0.04%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (18 samples, 0.04%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (16 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (16 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (16 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (15 samples, 0.04%)core::hash::sip::u8to64_le (5 samples, 0.01%)core::num::<impl u128>::checked_div (5 samples, 0.01%)[[vdso]] (5 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (77 samples, 0.18%)torrust_tracker::servers::udp::connection_cookie::make (76 samples, 0.18%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (14 samples, 0.03%)torrust_tracker::shared::clock::time_extent::Make::now (13 samples, 0.03%)torrust_tracker::shared::clock::working_clock::<impl torrust_tracker::shared::clock::Time for torrust_tracker::shared::clock::Clock<_>>::now (8 samples, 0.02%)std::time::SystemTime::now (7 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (7 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (5 samples, 0.01%)__GI___clock_gettime (5 samples, 0.01%)torrust_tracker::core::ScrapeData::add_file (5 samples, 0.01%)std::collections::hash::map::HashMap<K,V,S>::insert (5 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (5 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (9 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (9 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (8 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (8 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (620 samples, 1.46%)torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (17 samples, 0.04%)torrust_tracker::core::Tracker::scrape::{{closure}} (16 samples, 0.04%)torrust_tracker::core::Tracker::get_swarm_metadata::{{closure}} (11 samples, 0.03%)core::fmt::Formatter::new (5 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (5 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve (5 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (5 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::grow_amortized (5 samples, 0.01%)alloc::raw_vec::finish_grow (5 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (7 samples, 0.02%)alloc::string::String::push_str (7 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (7 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (7 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (7 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (29 samples, 0.07%)core::fmt::num::imp::fmt_u64 (28 samples, 0.07%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (15 samples, 0.04%)core::fmt::num::imp::fmt_u64 (14 samples, 0.03%)<T as alloc::string::ToString>::to_string (55 samples, 0.13%)core::option::Option<T>::expect (9 samples, 0.02%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (8 samples, 0.02%)alloc::alloc::dealloc (8 samples, 0.02%)__rdl_dealloc (8 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (8 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (12 samples, 0.03%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (12 samples, 0.03%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (12 samples, 0.03%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (12 samples, 0.03%)torrust_tracker::servers::udp::logging::map_action_name (7 samples, 0.02%)binascii::bin2hex (13 samples, 0.03%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.01%)core::fmt::write (6 samples, 0.01%)torrust_tracker::shared::bit_torrent::info_hash::InfoHash::to_hex_string (32 samples, 0.08%)<T as alloc::string::ToString>::to_string (32 samples, 0.08%)<torrust_tracker::shared::bit_torrent::info_hash::InfoHash as core::fmt::Display>::fmt (31 samples, 0.07%)core::fmt::Formatter::write_fmt (17 samples, 0.04%)core::str::converts::from_utf8 (8 samples, 0.02%)core::str::validations::run_utf8_validation (6 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (127 samples, 0.30%)alloc::vec::Vec<T,A>::reserve (8 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (8 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (8 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (8 samples, 0.02%)alloc::raw_vec::finish_grow (7 samples, 0.02%)[[vdso]] (5 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (12 samples, 0.03%)alloc::string::String::push_str (12 samples, 0.03%)alloc::vec::Vec<T,A>::extend_from_slice (12 samples, 0.03%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (12 samples, 0.03%)alloc::vec::Vec<T,A>::append_elements (12 samples, 0.03%)[[vdso]] (8 samples, 0.02%)<T as alloc::string::ToString>::to_string (36 samples, 0.09%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (31 samples, 0.07%)core::fmt::num::imp::fmt_u64 (29 samples, 0.07%)core::option::Option<T>::expect (6 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (5 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (5 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (5 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (5 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (3,697 samples, 8.73%)torrust_trac..torrust_tracker::servers::udp::logging::log_response (72 samples, 0.17%)tracing_core::field::display (5 samples, 0.01%)__GI___lll_lock_wake_private (6 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (6 samples, 0.01%)[unknown] (6 samples, 0.01%)[unknown] (6 samples, 0.01%)[unknown] (6 samples, 0.01%)[unknown] (6 samples, 0.01%)[unknown] (6 samples, 0.01%)[unknown] (6 samples, 0.01%)[unknown] (5 samples, 0.01%)sysmalloc (23 samples, 0.05%)grow_heap (16 samples, 0.04%)__GI_mprotect (15 samples, 0.04%)[unknown] (15 samples, 0.04%)[unknown] (14 samples, 0.03%)[unknown] (14 samples, 0.03%)[unknown] (14 samples, 0.03%)[unknown] (13 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (10 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (6 samples, 0.01%)__libc_calloc (86 samples, 0.20%)_int_malloc (74 samples, 0.17%)__memcpy_avx512_unaligned_erms (10 samples, 0.02%)__memset_avx512_unaligned_erms (9 samples, 0.02%)alloc::vec::from_elem (110 samples, 0.26%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (110 samples, 0.26%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (110 samples, 0.26%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (110 samples, 0.26%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (109 samples, 0.26%)alloc::alloc::Global::alloc_impl (109 samples, 0.26%)alloc::alloc::alloc_zeroed (109 samples, 0.26%)__rdl_alloc_zeroed (109 samples, 0.26%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (109 samples, 0.26%)byteorder::io::WriteBytesExt::write_i32 (18 samples, 0.04%)std::io::Write::write_all (14 samples, 0.03%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (14 samples, 0.03%)std::io::cursor::vec_write (14 samples, 0.03%)std::io::cursor::vec_write_unchecked (10 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::copy_from (10 samples, 0.02%)core::intrinsics::copy (10 samples, 0.02%)aquatic_udp_protocol::response::Response::write (44 samples, 0.10%)_int_free (57 samples, 0.13%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (76 samples, 0.18%)alloc::alloc::dealloc (76 samples, 0.18%)__rdl_dealloc (76 samples, 0.18%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (76 samples, 0.18%)__GI___libc_free (76 samples, 0.18%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (77 samples, 0.18%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (77 samples, 0.18%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (77 samples, 0.18%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (77 samples, 0.18%)std::io::cursor::Cursor<T>::new (5 samples, 0.01%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (5 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (6 samples, 0.01%)<core::future::ready::Ready<T> as core::future::future::Future>::poll (6 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (49 samples, 0.12%)tokio::io::ready::Ready::intersection (9 samples, 0.02%)tokio::io::ready::Ready::from_interest (9 samples, 0.02%)tokio::io::interest::Interest::is_readable (6 samples, 0.01%)[unknown] (9,280 samples, 21.91%)[unknown][unknown] (9,225 samples, 21.78%)[unknown][unknown] (9,186 samples, 21.69%)[unknown][unknown] (9,178 samples, 21.67%)[unknown][unknown] (8,810 samples, 20.80%)[unknown][unknown] (8,693 samples, 20.53%)[unknown][unknown] (8,408 samples, 19.85%)[unknown][unknown] (7,899 samples, 18.65%)[unknown][unknown] (7,670 samples, 18.11%)[unknown][unknown] (7,204 samples, 17.01%)[unknown][unknown] (6,378 samples, 15.06%)[unknown][unknown] (5,353 samples, 12.64%)[unknown][unknown] (4,967 samples, 11.73%)[unknown][unknown] (4,431 samples, 10.46%)[unknown][unknown] (4,170 samples, 9.85%)[unknown][unknown] (3,514 samples, 8.30%)[unknown][unknown] (3,283 samples, 7.75%)[unknown][unknown] (3,155 samples, 7.45%)[unknown][unknown] (2,925 samples, 6.91%)[unknown][unknown] (2,830 samples, 6.68%)[unknown][unknown] (2,515 samples, 5.94%)[unknown][unknown] (2,384 samples, 5.63%)[unknow..[unknown] (1,991 samples, 4.70%)[unkn..[unknown] (1,407 samples, 3.32%)[un..[unknown] (1,100 samples, 2.60%)[u..[unknown] (942 samples, 2.22%)[..[unknown] (909 samples, 2.15%)[..[unknown] (805 samples, 1.90%)[..[unknown] (609 samples, 1.44%)[unknown] (533 samples, 1.26%)[unknown] (391 samples, 0.92%)[unknown] (109 samples, 0.26%)[unknown] (23 samples, 0.05%)[unknown] (19 samples, 0.04%)[unknown] (6 samples, 0.01%)__libc_sendto (9,358 samples, 22.10%)__libc_sendto__GI___pthread_disable_asynccancel (22 samples, 0.05%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (9,377 samples, 22.14%)tokio::net::udp::UdpSocket::send_to..mio::net::udp::UdpSocket::send_to (9,377 samples, 22.14%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (9,377 samples, 22.14%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (9,377 samples, 22.14%)mio::sys::unix::stateless_io_source..mio::net::udp::UdpSocket::send_to::{{closure}} (9,377 samples, 22.14%)mio::net::udp::UdpSocket::send_to::..std::net::udp::UdpSocket::send_to (9,377 samples, 22.14%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (9,377 samples, 22.14%)std::sys_common::net::UdpSocket::se..std::sys::pal::unix::cvt (18 samples, 0.04%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (14 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (38 samples, 0.09%)std::sync::mutex::Mutex<T>::lock (38 samples, 0.09%)std::sys::sync::mutex::futex::Mutex::lock (37 samples, 0.09%)core::result::Result<T,E>::is_err (36 samples, 0.09%)core::result::Result<T,E>::is_ok (36 samples, 0.09%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (41 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (9,803 samples, 23.15%)torrust_tracker::servers::udp::server..torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (9,540 samples, 22.53%)torrust_tracker::servers::udp::serve..tokio::net::udp::UdpSocket::send_to::{{closure}} (9,513 samples, 22.46%)tokio::net::udp::UdpSocket::send_to:..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (9,485 samples, 22.40%)tokio::net::udp::UdpSocket::send_to..tokio::runtime::io::registration::Registration::async_io::{{closure}} (9,477 samples, 22.38%)tokio::runtime::io::registration::R..tokio::runtime::io::registration::Registration::readiness::{{closure}} (44 samples, 0.10%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (42 samples, 0.10%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (42 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (13,858 samples, 32.72%)torrust_tracker::servers::udp::server::Udp::process_r..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (13,677 samples, 32.29%)torrust_tracker::servers::udp::server::Udp::process_..<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (41 samples, 0.10%)core::sync::atomic::AtomicUsize::fetch_add (41 samples, 0.10%)core::sync::atomic::atomic_add (41 samples, 0.10%)__GI___lll_lock_wake_private (65 samples, 0.15%)[unknown] (54 samples, 0.13%)[unknown] (53 samples, 0.13%)[unknown] (49 samples, 0.12%)[unknown] (48 samples, 0.11%)[unknown] (42 samples, 0.10%)[unknown] (14 samples, 0.03%)[unknown] (9 samples, 0.02%)__GI___lll_lock_wait_private (91 samples, 0.21%)futex_wait (88 samples, 0.21%)[unknown] (87 samples, 0.21%)[unknown] (85 samples, 0.20%)[unknown] (84 samples, 0.20%)[unknown] (84 samples, 0.20%)[unknown] (81 samples, 0.19%)[unknown] (78 samples, 0.18%)[unknown] (68 samples, 0.16%)[unknown] (41 samples, 0.10%)[unknown] (38 samples, 0.09%)[unknown] (32 samples, 0.08%)[unknown] (25 samples, 0.06%)[unknown] (16 samples, 0.04%)[unknown] (12 samples, 0.03%)[unknown] (6 samples, 0.01%)[unknown] (5 samples, 0.01%)[unknown] (5 samples, 0.01%)_int_free (141 samples, 0.33%)__GI___libc_free (212 samples, 0.50%)syscall (5 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (8 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::dealloc (9 samples, 0.02%)core::mem::drop (9 samples, 0.02%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (9 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (9 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (246 samples, 0.58%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (246 samples, 0.58%)tokio::runtime::task::raw::RawTask::drop_abort_handle (245 samples, 0.58%)tokio::runtime::task::raw::drop_abort_handle (15 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (13 samples, 0.03%)tokio::runtime::task::state::State::ref_dec (13 samples, 0.03%)core::result::Result<T,E>::is_ok (5 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (5 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (5 samples, 0.01%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (9 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (19 samples, 0.04%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (19 samples, 0.04%)tokio::runtime::task::state::State::drop_join_handle_fast (5 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (5 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (5 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (8 samples, 0.02%)ringbuf::ring_buffer::base::RbBase::vacant_len (5 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (19 samples, 0.04%)ringbuf::ring_buffer::rb::Rb::push (7 samples, 0.02%)ringbuf::producer::Producer<T,R>::push (7 samples, 0.02%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (6 samples, 0.01%)tokio::runtime::task::raw::RawTask::ref_inc (6 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (6 samples, 0.01%)__GI___lll_lock_wait_private (81 samples, 0.19%)futex_wait (79 samples, 0.19%)[unknown] (75 samples, 0.18%)[unknown] (74 samples, 0.17%)[unknown] (74 samples, 0.17%)[unknown] (72 samples, 0.17%)[unknown] (70 samples, 0.17%)[unknown] (66 samples, 0.16%)[unknown] (60 samples, 0.14%)[unknown] (28 samples, 0.07%)[unknown] (24 samples, 0.06%)[unknown] (21 samples, 0.05%)[unknown] (17 samples, 0.04%)[unknown] (10 samples, 0.02%)[unknown] (7 samples, 0.02%)__GI___lll_lock_wake_private (96 samples, 0.23%)[unknown] (90 samples, 0.21%)[unknown] (88 samples, 0.21%)[unknown] (85 samples, 0.20%)[unknown] (80 samples, 0.19%)[unknown] (75 samples, 0.18%)[unknown] (51 samples, 0.12%)[unknown] (46 samples, 0.11%)[unknown] (20 samples, 0.05%)[unknown] (16 samples, 0.04%)[unknown] (8 samples, 0.02%)malloc_consolidate (71 samples, 0.17%)sysmalloc (18 samples, 0.04%)grow_heap (13 samples, 0.03%)__GI_mprotect (13 samples, 0.03%)[unknown] (13 samples, 0.03%)[unknown] (13 samples, 0.03%)[unknown] (13 samples, 0.03%)[unknown] (13 samples, 0.03%)[unknown] (13 samples, 0.03%)[unknown] (10 samples, 0.02%)[unknown] (10 samples, 0.02%)[unknown] (9 samples, 0.02%)[unknown] (8 samples, 0.02%)_int_malloc (206 samples, 0.49%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (60 samples, 0.14%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (15 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (13 samples, 0.03%)alloc::vec::Vec<T>::with_capacity (402 samples, 0.95%)alloc::vec::Vec<T,A>::with_capacity_in (402 samples, 0.95%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (402 samples, 0.95%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (402 samples, 0.95%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (402 samples, 0.95%)alloc::alloc::Global::alloc_impl (402 samples, 0.95%)alloc::alloc::alloc (402 samples, 0.95%)__rdl_alloc (402 samples, 0.95%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (402 samples, 0.95%)__GI___libc_malloc (402 samples, 0.95%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (19 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (19 samples, 0.04%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (24 samples, 0.06%)tokio::io::ready::Ready::intersection (5 samples, 0.01%)tokio::io::ready::Ready::from_interest (5 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (5 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (55 samples, 0.13%)tokio::net::udp::UdpSocket::ready::{{closure}} (54 samples, 0.13%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (27 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (23 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (20 samples, 0.05%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (6 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (13 samples, 0.03%)[unknown] (2,299 samples, 5.43%)[unknow..[unknown] (2,285 samples, 5.40%)[unknow..[unknown] (2,266 samples, 5.35%)[unknow..[unknown] (2,243 samples, 5.30%)[unkno..[unknown] (2,045 samples, 4.83%)[unkno..[unknown] (1,928 samples, 4.55%)[unkn..[unknown] (1,828 samples, 4.32%)[unkn..[unknown] (1,505 samples, 3.55%)[unk..[unknown] (1,087 samples, 2.57%)[u..[unknown] (1,002 samples, 2.37%)[u..[unknown] (677 samples, 1.60%)[unknown] (563 samples, 1.33%)[unknown] (354 samples, 0.84%)[unknown] (167 samples, 0.39%)[unknown] (50 samples, 0.12%)[unknown] (12 samples, 0.03%)__libc_recvfrom (2,332 samples, 5.51%)__libc_..__GI___pthread_disable_asynccancel (6 samples, 0.01%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (2,365 samples, 5.58%)tokio::..mio::net::udp::UdpSocket::recv_from (2,352 samples, 5.55%)mio::ne..mio::io_source::IoSource<T>::do_io (2,352 samples, 5.55%)mio::io..mio::sys::unix::stateless_io_source::IoSourceState::do_io (2,352 samples, 5.55%)mio::sy..mio::net::udp::UdpSocket::recv_from::{{closure}} (2,352 samples, 5.55%)mio::ne..std::net::udp::UdpSocket::recv_from (2,352 samples, 5.55%)std::ne..std::sys_common::net::UdpSocket::recv_from (2,352 samples, 5.55%)std::sy..std::sys::pal::unix::net::Socket::recv_from (2,352 samples, 5.55%)std::sy..std::sys::pal::unix::net::Socket::recv_from_with_flags (2,352 samples, 5.55%)std::sy..std::sys_common::net::sockaddr_to_addr (14 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (9 samples, 0.02%)_int_malloc (6 samples, 0.01%)__GI___libc_malloc (5 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (2,972 samples, 7.02%)torrust_t..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (2,479 samples, 5.85%)tokio::..tokio::runtime::io::registration::Registration::async_io::{{closure}} (2,478 samples, 5.85%)tokio::..tokio::runtime::io::registration::Registration::readiness::{{closure}} (25 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (15 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (6 samples, 0.01%)__memcpy_avx512_unaligned_erms (26 samples, 0.06%)tokio::runtime::context::CONTEXT::__getit (5 samples, 0.01%)core::cell::Cell<T>::get (5 samples, 0.01%)__memcpy_avx512_unaligned_erms (256 samples, 0.60%)core::cell::RefCell<T>::borrow (12 samples, 0.03%)core::cell::RefCell<T>::try_borrow (12 samples, 0.03%)core::cell::BorrowRef::new (12 samples, 0.03%)core::cell::is_reading (8 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (5 samples, 0.01%)__memcpy_avx512_unaligned_erms (70 samples, 0.17%)syscall (364 samples, 0.86%)[unknown] (343 samples, 0.81%)[unknown] (341 samples, 0.81%)[unknown] (329 samples, 0.78%)[unknown] (316 samples, 0.75%)[unknown] (306 samples, 0.72%)[unknown] (240 samples, 0.57%)[unknown] (219 samples, 0.52%)[unknown] (145 samples, 0.34%)[unknown] (102 samples, 0.24%)[unknown] (49 samples, 0.12%)[unknown] (26 samples, 0.06%)[unknown] (15 samples, 0.04%)[unknown] (15 samples, 0.04%)core::ptr::drop_in_place<core::option::Option<tokio::runtime::task::Notified<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (5 samples, 0.01%)core::sync::atomic::AtomicU32::store (5 samples, 0.01%)core::sync::atomic::atomic_store (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_finish (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (12 samples, 0.03%)tokio::runtime::context::with_scheduler (40 samples, 0.09%)std::thread::local::LocalKey<T>::try_with (37 samples, 0.09%)tokio::runtime::context::with_scheduler::{{closure}} (36 samples, 0.09%)tokio::runtime::context::scoped::Scoped<T>::with (35 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (35 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (35 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (32 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (8 samples, 0.02%)alloc::vec::Vec<T,A>::pop (6 samples, 0.01%)std::sync::mutex::MutexGuard<T>::new (9 samples, 0.02%)std::sync::poison::Flag::guard (9 samples, 0.02%)std::thread::panicking (8 samples, 0.02%)std::panicking::panicking (8 samples, 0.02%)std::panicking::panic_count::count_is_zero (8 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (19 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (19 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (10 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock_contended (5 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (265 samples, 0.63%)core::sync::atomic::atomic_add (265 samples, 0.63%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (309 samples, 0.73%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (280 samples, 0.66%)tokio::runtime::scheduler::multi_thread::idle::State::num_unparked (10 samples, 0.02%)__GI___libc_write (84 samples, 0.20%)__GI___libc_write (84 samples, 0.20%)[unknown] (83 samples, 0.20%)[unknown] (83 samples, 0.20%)[unknown] (83 samples, 0.20%)[unknown] (76 samples, 0.18%)[unknown] (70 samples, 0.17%)[unknown] (67 samples, 0.16%)[unknown] (57 samples, 0.13%)[unknown] (40 samples, 0.09%)[unknown] (40 samples, 0.09%)[unknown] (33 samples, 0.08%)[unknown] (28 samples, 0.07%)[unknown] (15 samples, 0.04%)[unknown] (11 samples, 0.03%)[unknown] (10 samples, 0.02%)[unknown] (7 samples, 0.02%)[unknown] (5 samples, 0.01%)tokio::runtime::driver::Handle::unpark (85 samples, 0.20%)tokio::runtime::driver::IoHandle::unpark (85 samples, 0.20%)tokio::runtime::io::driver::Handle::unpark (85 samples, 0.20%)mio::waker::Waker::wake (85 samples, 0.20%)mio::sys::unix::waker::fdbased::Waker::wake (85 samples, 0.20%)mio::sys::unix::waker::eventfd::WakerInternal::wake (85 samples, 0.20%)<&std::fs::File as std::io::Write>::write (85 samples, 0.20%)std::sys::pal::unix::fs::File::write (85 samples, 0.20%)std::sys::pal::unix::fd::FileDesc::write (85 samples, 0.20%)tokio::runtime::context::with_scheduler (805 samples, 1.90%)t..std::thread::local::LocalKey<T>::try_with (805 samples, 1.90%)s..tokio::runtime::context::with_scheduler::{{closure}} (805 samples, 1.90%)t..tokio::runtime::context::scoped::Scoped<T>::with (805 samples, 1.90%)t..tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (805 samples, 1.90%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (805 samples, 1.90%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (801 samples, 1.89%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (801 samples, 1.89%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (88 samples, 0.21%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (88 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (836 samples, 1.97%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (836 samples, 1.97%)t..tokio::runtime::scheduler::multi_thread::worker::with_current (836 samples, 1.97%)t..core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (15 samples, 0.04%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (15 samples, 0.04%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (15 samples, 0.04%)std::sync::poison::Flag::done (15 samples, 0.04%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (20 samples, 0.05%)<tokio::runtime::task::Task<S> as tokio::util::sharded_list::ShardedListItem>::get_shard_id (6 samples, 0.01%)tokio::runtime::task::core::Header::get_id (6 samples, 0.01%)core::result::Result<T,E>::is_err (45 samples, 0.11%)core::result::Result<T,E>::is_ok (45 samples, 0.11%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (122 samples, 0.29%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (95 samples, 0.22%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (89 samples, 0.21%)tokio::loom::std::mutex::Mutex<T>::lock (86 samples, 0.20%)std::sync::mutex::Mutex<T>::lock (86 samples, 0.20%)std::sys::sync::mutex::futex::Mutex::lock (85 samples, 0.20%)core::sync::atomic::AtomicU32::compare_exchange (40 samples, 0.09%)core::sync::atomic::atomic_compare_exchange (40 samples, 0.09%)__memcpy_avx512_unaligned_erms (61 samples, 0.14%)__GI___lll_lock_wake_private (13 samples, 0.03%)__memcpy_avx512_unaligned_erms (26 samples, 0.06%)__memcpy_avx512_unaligned_erms (7 samples, 0.02%)__GI___lll_lock_wait_private (94 samples, 0.22%)futex_wait (93 samples, 0.22%)[unknown] (93 samples, 0.22%)[unknown] (93 samples, 0.22%)[unknown] (91 samples, 0.21%)[unknown] (91 samples, 0.21%)[unknown] (90 samples, 0.21%)[unknown] (86 samples, 0.20%)[unknown] (79 samples, 0.19%)[unknown] (41 samples, 0.10%)[unknown] (38 samples, 0.09%)[unknown] (30 samples, 0.07%)[unknown] (26 samples, 0.06%)[unknown] (20 samples, 0.05%)[unknown] (11 samples, 0.03%)__GI___lll_lock_wake_private (151 samples, 0.36%)[unknown] (140 samples, 0.33%)[unknown] (138 samples, 0.33%)[unknown] (133 samples, 0.31%)[unknown] (127 samples, 0.30%)[unknown] (123 samples, 0.29%)[unknown] (85 samples, 0.20%)[unknown] (75 samples, 0.18%)[unknown] (39 samples, 0.09%)[unknown] (25 samples, 0.06%)[unknown] (19 samples, 0.04%)[unknown] (9 samples, 0.02%)[unknown] (8 samples, 0.02%)[unknown] (8 samples, 0.02%)_int_free (21 samples, 0.05%)[unknown] (117 samples, 0.28%)[unknown] (112 samples, 0.26%)[unknown] (108 samples, 0.26%)[unknown] (104 samples, 0.25%)[unknown] (101 samples, 0.24%)[unknown] (94 samples, 0.22%)[unknown] (88 samples, 0.21%)[unknown] (80 samples, 0.19%)[unknown] (72 samples, 0.17%)[unknown] (55 samples, 0.13%)[unknown] (43 samples, 0.10%)[unknown] (30 samples, 0.07%)[unknown] (9 samples, 0.02%)sysmalloc (326 samples, 0.77%)grow_heap (201 samples, 0.47%)__GI_mprotect (200 samples, 0.47%)[unknown] (198 samples, 0.47%)[unknown] (195 samples, 0.46%)[unknown] (192 samples, 0.45%)[unknown] (192 samples, 0.45%)[unknown] (187 samples, 0.44%)[unknown] (178 samples, 0.42%)[unknown] (162 samples, 0.38%)[unknown] (139 samples, 0.33%)[unknown] (120 samples, 0.28%)[unknown] (91 samples, 0.21%)[unknown] (62 samples, 0.15%)[unknown] (34 samples, 0.08%)[unknown] (13 samples, 0.03%)core::option::Option<T>::map (1,857 samples, 4.38%)core:..tokio::task::spawn::spawn_inner::{{closure}} (1,857 samples, 4.38%)tokio..tokio::runtime::scheduler::Handle::spawn (1,857 samples, 4.38%)tokio..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (1,857 samples, 4.38%)tokio..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (1,856 samples, 4.38%)tokio..tokio::runtime::task::list::OwnedTasks<S>::bind (935 samples, 2.21%)t..tokio::runtime::task::new_task (803 samples, 1.90%)t..tokio::runtime::task::raw::RawTask::new (803 samples, 1.90%)t..tokio::runtime::task::core::Cell<T,S>::new (803 samples, 1.90%)t..alloc::boxed::Box<T>::new (737 samples, 1.74%)alloc::alloc::exchange_malloc (697 samples, 1.65%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (693 samples, 1.64%)alloc::alloc::Global::alloc_impl (693 samples, 1.64%)alloc::alloc::alloc (693 samples, 1.64%)__rdl_alloc (693 samples, 1.64%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (693 samples, 1.64%)std::sys::pal::unix::alloc::aligned_malloc (693 samples, 1.64%)__posix_memalign (684 samples, 1.62%)__posix_memalign (683 samples, 1.61%)_mid_memalign (683 samples, 1.61%)_int_memalign (423 samples, 1.00%)_int_malloc (395 samples, 0.93%)unlink_chunk (8 samples, 0.02%)tokio::runtime::context::current::with_current (2,163 samples, 5.11%)tokio:..std::thread::local::LocalKey<T>::try_with (2,163 samples, 5.11%)std::t..tokio::runtime::context::current::with_current::{{closure}} (2,128 samples, 5.02%)tokio:..tokio::task::spawn::spawn (2,169 samples, 5.12%)tokio:..tokio::task::spawn::spawn_inner (2,169 samples, 5.12%)tokio:..tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (19,388 samples, 45.78%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (19,388 samples, 45.78%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (5,501 samples, 12.99%)torrust_tracker::ser..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (5,501 samples, 12.99%)torrust_tracker::ser..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (2,174 samples, 5.13%)torrus..torrust_tracker::servers::udp::server::Udp::process_request (5 samples, 0.01%)__memcpy_avx512_unaligned_erms (5 samples, 0.01%)__memcpy_avx512_unaligned_erms (212 samples, 0.50%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (217 samples, 0.51%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (217 samples, 0.51%)tokio::runtime::task::core::Core<T,S>::poll (19,613 samples, 46.31%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (225 samples, 0.53%)tokio::runtime::task::core::Core<T,S>::set_stage (225 samples, 0.53%)__memcpy_avx512_unaligned_erms (103 samples, 0.24%)__memcpy_avx512_unaligned_erms (170 samples, 0.40%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (176 samples, 0.42%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (175 samples, 0.41%)tokio::runtime::task::harness::poll_future (19,897 samples, 46.98%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (19,897 samples, 46.98%)std::panic::catch_unwindstd::panicking::try (19,897 samples, 46.98%)std::panicking::trystd::panicking::try::do_call (19,897 samples, 46.98%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (19,897 samples, 46.98%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce..tokio::runtime::task::harness::poll_future::{{closure}} (19,897 samples, 46.98%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (284 samples, 0.67%)tokio::runtime::task::core::Core<T,S>::set_stage (283 samples, 0.67%)tokio::runtime::coop::budget (20,267 samples, 47.85%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (20,267 samples, 47.85%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (20,263 samples, 47.84%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (20,262 samples, 47.84%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (20,262 samples, 47.84%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (20,196 samples, 47.69%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (20,187 samples, 47.66%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (19,945 samples, 47.09%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::state::State::transition_to_running (36 samples, 0.09%)tokio::runtime::task::state::State::fetch_update_action (36 samples, 0.09%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (9 samples, 0.02%)syscall (665 samples, 1.57%)[unknown] (643 samples, 1.52%)[unknown] (638 samples, 1.51%)[unknown] (583 samples, 1.38%)[unknown] (558 samples, 1.32%)[unknown] (537 samples, 1.27%)[unknown] (435 samples, 1.03%)[unknown] (392 samples, 0.93%)[unknown] (332 samples, 0.78%)[unknown] (205 samples, 0.48%)[unknown] (110 samples, 0.26%)[unknown] (21 samples, 0.05%)[unknown] (7 samples, 0.02%)[unknown] (7 samples, 0.02%)alloc::vec::Vec<T,A>::pop (9 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (11 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (10 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (7 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock_contended (5 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::spin (5 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (132 samples, 0.31%)core::sync::atomic::AtomicUsize::fetch_add (132 samples, 0.31%)core::sync::atomic::atomic_add (132 samples, 0.31%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (154 samples, 0.36%)[unknown] (231 samples, 0.55%)[unknown] (228 samples, 0.54%)[unknown] (218 samples, 0.51%)[unknown] (215 samples, 0.51%)[unknown] (197 samples, 0.47%)[unknown] (192 samples, 0.45%)[unknown] (163 samples, 0.38%)[unknown] (99 samples, 0.23%)[unknown] (92 samples, 0.22%)[unknown] (73 samples, 0.17%)[unknown] (67 samples, 0.16%)[unknown] (35 samples, 0.08%)[unknown] (31 samples, 0.07%)[unknown] (22 samples, 0.05%)[unknown] (16 samples, 0.04%)[unknown] (10 samples, 0.02%)[unknown] (8 samples, 0.02%)__GI___libc_write (245 samples, 0.58%)__GI___libc_write (245 samples, 0.58%)mio::sys::unix::waker::eventfd::WakerInternal::wake (246 samples, 0.58%)<&std::fs::File as std::io::Write>::write (246 samples, 0.58%)std::sys::pal::unix::fs::File::write (246 samples, 0.58%)std::sys::pal::unix::fd::FileDesc::write (246 samples, 0.58%)tokio::runtime::driver::Handle::unpark (264 samples, 0.62%)tokio::runtime::driver::IoHandle::unpark (264 samples, 0.62%)tokio::runtime::io::driver::Handle::unpark (264 samples, 0.62%)mio::waker::Waker::wake (263 samples, 0.62%)mio::sys::unix::waker::fdbased::Waker::wake (263 samples, 0.62%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (17 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (17 samples, 0.04%)tokio::runtime::driver::Handle::unpark (17 samples, 0.04%)tokio::runtime::driver::IoHandle::unpark (17 samples, 0.04%)[unknown] (15 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (21,372 samples, 50.46%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (1,087 samples, 2.57%)to..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (1,087 samples, 2.57%)to..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (1,087 samples, 2.57%)to..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (268 samples, 0.63%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (267 samples, 0.63%)core::option::Option<T>::or_else (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (9 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (19 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (17 samples, 0.04%)alloc::sync::Arc<T,A>::inner (17 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (17 samples, 0.04%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (40 samples, 0.09%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (40 samples, 0.09%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (40 samples, 0.09%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (15 samples, 0.04%)alloc::sync::Arc<T,A>::inner (15 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (15 samples, 0.04%)core::num::<impl u32>::wrapping_sub (42 samples, 0.10%)core::sync::atomic::AtomicU64::load (17 samples, 0.04%)core::sync::atomic::atomic_load (17 samples, 0.04%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (13 samples, 0.03%)core::sync::atomic::AtomicU32::load (13 samples, 0.03%)core::sync::atomic::atomic_load (13 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (23 samples, 0.05%)alloc::sync::Arc<T,A>::inner (23 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (23 samples, 0.05%)core::num::<impl u32>::wrapping_add (8 samples, 0.02%)core::num::<impl u32>::wrapping_sub (16 samples, 0.04%)core::sync::atomic::AtomicU32::load (19 samples, 0.04%)core::sync::atomic::atomic_load (19 samples, 0.04%)core::sync::atomic::AtomicU64::load (46 samples, 0.11%)core::sync::atomic::atomic_load (46 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::pack (33 samples, 0.08%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (306 samples, 0.72%)tokio::runtime::scheduler::multi_thread::queue::unpack (58 samples, 0.14%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (419 samples, 0.99%)tokio::runtime::scheduler::multi_thread::queue::unpack (10 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (41 samples, 0.10%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (11 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (898 samples, 2.12%)t..tokio::util::rand::FastRand::fastrand_n (8 samples, 0.02%)tokio::util::rand::FastRand::fastrand (8 samples, 0.02%)std::panic::catch_unwind (29,494 samples, 69.64%)std::panic::catch_unwindstd::panicking::try (29,494 samples, 69.64%)std::panicking::trystd::panicking::try::do_call (29,494 samples, 69.64%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (29,494 samples, 69.64%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (29,494 samples, 69.64%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}std::sys_common::backtrace::__rust_begin_short_backtrace (29,494 samples, 69.64%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (29,494 samples, 69.64%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (29,494 samples, 69.64%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (29,415 samples, 69.45%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (29,415 samples, 69.45%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (29,415 samples, 69.45%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (29,415 samples, 69.45%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (29,415 samples, 69.45%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (29,415 samples, 69.45%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (29,415 samples, 69.45%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (29,415 samples, 69.45%)std::panic::catch_unwindstd::panicking::try (29,415 samples, 69.45%)std::panicking::trystd::panicking::try::do_call (29,415 samples, 69.45%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (29,415 samples, 69.45%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (29,415 samples, 69.45%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (29,415 samples, 69.45%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (29,415 samples, 69.45%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (29,415 samples, 69.45%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (29,415 samples, 69.45%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (29,415 samples, 69.45%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (29,415 samples, 69.45%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (29,415 samples, 69.45%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (29,415 samples, 69.45%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (29,415 samples, 69.45%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (29,415 samples, 69.45%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (29,415 samples, 69.45%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (29,415 samples, 69.45%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (29,415 samples, 69.45%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (29,415 samples, 69.45%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (29,415 samples, 69.45%)tokio::runtime::scheduler::multi_thread::worker::Context::run<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (29,496 samples, 69.64%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (29,496 samples, 69.64%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (29,496 samples, 69.64%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (29,496 samples, 69.64%)std::thread::Builder::spawn_unchecked_::{{closure}}__GI_munmap (26 samples, 0.06%)[unknown] (26 samples, 0.06%)[unknown] (26 samples, 0.06%)[unknown] (26 samples, 0.06%)[unknown] (26 samples, 0.06%)[unknown] (26 samples, 0.06%)[unknown] (26 samples, 0.06%)[unknown] (25 samples, 0.06%)[unknown] (23 samples, 0.05%)[unknown] (19 samples, 0.04%)[unknown] (19 samples, 0.04%)[unknown] (18 samples, 0.04%)[unknown] (13 samples, 0.03%)[unknown] (12 samples, 0.03%)[unknown] (5 samples, 0.01%)clone3 (29,548 samples, 69.77%)clone3start_thread (29,548 samples, 69.77%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (29,523 samples, 69.71%)std::sys::pal::unix::thread::Thread::new::thread_startcore::ptr::drop_in_place<std::sys::pal::unix::stack_overflow::Handler> (27 samples, 0.06%)<std::sys::pal::unix::stack_overflow::Handler as core::ops::drop::Drop>::drop (27 samples, 0.06%)std::sys::pal::unix::stack_overflow::imp::drop_handler (27 samples, 0.06%)core::fmt::Formatter::pad_integral (7 samples, 0.02%)rand_chacha::guts::round (6 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (8 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (8 samples, 0.02%)rand_chacha::guts::refill_wide_impl (8 samples, 0.02%)[unknown] (5 samples, 0.01%)core::ptr::drop_in_place<core::result::Result<tokio::runtime::coop::with_budget::ResetGuard,std::thread::local::AccessError>> (5 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (12 samples, 0.03%)core::cell::RefCell<T>::try_borrow_mut (12 samples, 0.03%)core::cell::BorrowRefMut::new (12 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (20 samples, 0.05%)tokio::runtime::coop::budget (20 samples, 0.05%)tokio::runtime::coop::with_budget (20 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (15 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (36 samples, 0.09%)std::sys::pal::unix::time::Timespec::sub_timespec (16 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (12 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (6 samples, 0.01%)std::sys_common::thread_info::set (19 samples, 0.04%)std::thread::local::LocalKey<T>::with (19 samples, 0.04%)std::thread::local::LocalKey<T>::try_with (19 samples, 0.04%)std::sys_common::thread_info::THREAD_INFO::__getit (19 samples, 0.04%)std::sys::thread_local::fast_local::Key<T>::register_dtor (19 samples, 0.04%)__cxa_thread_atexit_impl (19 samples, 0.04%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (19 samples, 0.04%)syscall (5 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (14 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (14 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run (13 samples, 0.03%)tokio::runtime::context::runtime::enter_runtime (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (13 samples, 0.03%)tokio::runtime::context::set_scheduler (13 samples, 0.03%)std::thread::local::LocalKey<T>::with (13 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (13 samples, 0.03%)tokio::runtime::context::set_scheduler::{{closure}} (13 samples, 0.03%)tokio::runtime::context::scoped::Scoped<T>::set (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (5 samples, 0.01%)tokio::runtime::task::raw::poll (19 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::poll (18 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (18 samples, 0.04%)tokio::runtime::task::harness::poll_future (18 samples, 0.04%)std::panic::catch_unwind (18 samples, 0.04%)std::panicking::try (18 samples, 0.04%)std::panicking::try::do_call (18 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (18 samples, 0.04%)tokio::runtime::task::harness::poll_future::{{closure}} (18 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::poll (18 samples, 0.04%)torrust_tracker::servers::http::v1::routes::router::{{closure}}::__CALLSITE::META (5 samples, 0.01%)__libc_calloc (23 samples, 0.05%)__memcpy_avx512_unaligned_erms (75 samples, 0.18%)_int_free (61 samples, 0.14%)[unknown] (37 samples, 0.09%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (170 samples, 0.40%)__GI___lll_lock_wait_private (39 samples, 0.09%)futex_wait (25 samples, 0.06%)futex_fatal_error (16 samples, 0.04%)__memcpy_avx512_unaligned_erms (90 samples, 0.21%)_int_malloc (8 samples, 0.02%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (144 samples, 0.34%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (10 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (13 samples, 0.03%)<torrust_tracker::core::torrent::repository::RepositoryAsyncSingle as torrust_tracker::core::torrent::repository::TRepositoryAsync>::update_torrent_with_peer_and_get_stats::{{closure}} (12 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (31 samples, 0.07%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (22 samples, 0.05%)torrust_tracker::core::Tracker::announce::{{closure}} (17 samples, 0.04%)<T as alloc::string::ToString>::to_string (5 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (7 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (46 samples, 0.11%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (55 samples, 0.13%)ringbuf::ring_buffer::rb::Rb::push_overwrite (5 samples, 0.01%)__GI___libc_malloc (12 samples, 0.03%)alloc::vec::Vec<T>::with_capacity (21 samples, 0.05%)alloc::vec::Vec<T,A>::with_capacity_in (21 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (21 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (21 samples, 0.05%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (21 samples, 0.05%)alloc::alloc::Global::alloc_impl (21 samples, 0.05%)alloc::alloc::alloc (21 samples, 0.05%)__rdl_alloc (21 samples, 0.05%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (21 samples, 0.05%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (20 samples, 0.05%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (20 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (20 samples, 0.05%)_int_malloc (13 samples, 0.03%)tokio::net::udp::UdpSocket::readable::{{closure}} (8 samples, 0.02%)tokio::net::udp::UdpSocket::ready::{{closure}} (7 samples, 0.02%)[unknown] (10 samples, 0.02%)[unknown] (13 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (69 samples, 0.16%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (37 samples, 0.09%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (37 samples, 0.09%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (22 samples, 0.05%)mio::net::udp::UdpSocket::recv_from (21 samples, 0.05%)mio::io_source::IoSource<T>::do_io (21 samples, 0.05%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (21 samples, 0.05%)mio::net::udp::UdpSocket::recv_from::{{closure}} (21 samples, 0.05%)std::net::udp::UdpSocket::recv_from (21 samples, 0.05%)std::sys_common::net::UdpSocket::recv_from (21 samples, 0.05%)std::sys::pal::unix::net::Socket::recv_from (21 samples, 0.05%)std::sys::pal::unix::net::Socket::recv_from_with_flags (21 samples, 0.05%)core::mem::zeroed (8 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (8 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (8 samples, 0.02%)core::intrinsics::write_bytes (8 samples, 0.02%)[unknown] (8 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::with_current (13 samples, 0.03%)tokio::runtime::context::with_scheduler (13 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (13 samples, 0.03%)tokio::runtime::context::with_scheduler::{{closure}} (13 samples, 0.03%)tokio::runtime::context::scoped::Scoped<T>::with (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (13 samples, 0.03%)tokio::runtime::driver::Handle::unpark (13 samples, 0.03%)tokio::runtime::driver::IoHandle::unpark (13 samples, 0.03%)tokio::runtime::io::driver::Handle::unpark (13 samples, 0.03%)mio::waker::Waker::wake (13 samples, 0.03%)mio::sys::unix::waker::fdbased::Waker::wake (13 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (12 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (12 samples, 0.03%)tokio::runtime::driver::Handle::unpark (12 samples, 0.03%)tokio::runtime::driver::IoHandle::unpark (12 samples, 0.03%)[unknown] (10 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (105 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (105 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (22 samples, 0.05%)tokio::task::spawn::spawn (22 samples, 0.05%)tokio::task::spawn::spawn_inner (22 samples, 0.05%)tokio::runtime::context::current::with_current (22 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.05%)tokio::runtime::context::current::with_current::{{closure}} (22 samples, 0.05%)core::option::Option<T>::map (22 samples, 0.05%)tokio::task::spawn::spawn_inner::{{closure}} (22 samples, 0.05%)tokio::runtime::scheduler::Handle::spawn (22 samples, 0.05%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (22 samples, 0.05%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (22 samples, 0.05%)tokio::runtime::task::list::OwnedTasks<S>::bind (9 samples, 0.02%)tokio::runtime::task::new_task (5 samples, 0.01%)tokio::runtime::task::raw::RawTask::new (5 samples, 0.01%)tokio::runtime::task::core::Cell<T,S>::new (5 samples, 0.01%)all (42,352 samples, 100%)tokio-runtime-w (42,171 samples, 99.57%)tokio-runtime-w \ No newline at end of file From fbd046904536c96682dfc8f0accbe92ea754e5a1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 21 Mar 2024 19:13:44 +0000 Subject: [PATCH 0768/1003] fix: revert number of active UDP request to previous value It was pushed accidentally trying different configurations for benchmarking. --- src/servers/udp/server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 98c4bf726..7086b6ab7 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -203,7 +203,7 @@ impl Launcher { #[derive(Default)] struct ActiveRequests { - rb: StaticRb, // the number of requests we handle at the same time. + rb: StaticRb, // the number of requests we handle at the same time. } impl std::fmt::Debug for ActiveRequests { From 5c0047aadfb08c05f9ba603fb139b29b69924954 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 25 Mar 2024 11:21:03 +0800 Subject: [PATCH 0769/1003] dev: refactor torrent repository extracted async and sync implementations --- cSpell.json | 1 + .../src/benches/asyn.rs | 41 ++- .../src/benches/sync.rs | 42 +-- .../torrent-repository-benchmarks/src/main.rs | 50 +-- src/core/mod.rs | 8 +- src/core/services/torrent.rs | 1 + src/core/torrent/mod.rs | 7 +- src/core/torrent/repository.rs | 301 ------------------ src/core/torrent/repository_asyn.rs | 188 +++++++++++ src/core/torrent/repository_sync.rs | 122 +++++++ tests/servers/health_check_api/environment.rs | 1 + tests/servers/http/responses/scrape.rs | 4 + 12 files changed, 402 insertions(+), 364 deletions(-) delete mode 100644 src/core/torrent/repository.rs create mode 100644 src/core/torrent/repository_asyn.rs create mode 100644 src/core/torrent/repository_sync.rs diff --git a/cSpell.json b/cSpell.json index 16dff714e..6d8b68c92 100644 --- a/cSpell.json +++ b/cSpell.json @@ -5,6 +5,7 @@ "alekitto", "appuser", "Arvid", + "asyn", "autoclean", "AUTOINCREMENT", "automock", diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index 33f9e85fa..9482d821c 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -3,17 +3,20 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::TRepositoryAsync; +use torrust_tracker::core::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; -pub async fn async_add_one_torrent(samples: usize) -> (Duration, Duration) { +pub async fn async_add_one_torrent(samples: usize) -> (Duration, Duration) +where + RepositoryTokioRwLock: RepositoryAsync, +{ let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); let info_hash = InfoHash([0; 20]); @@ -32,15 +35,16 @@ pub async fn async_add_one_torrent( } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn async_update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn async_update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryTokioRwLock: RepositoryAsync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -81,15 +85,16 @@ pub async fn async_update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn async_add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryTokioRwLock: RepositoryAsync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -125,15 +130,19 @@ pub async fn async_add_multiple_torrents_in_parallel( +pub async fn async_update_multiple_torrents_in_parallel( runtime: &tokio::runtime::Runtime, samples: usize, -) -> (Duration, Duration) { +) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryTokioRwLock: RepositoryAsync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index dac7ab810..c37fa9f4a 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -3,7 +3,7 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::Repository; +use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -11,11 +11,14 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste // Simply add one torrent #[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) { +pub fn add_one_torrent(samples: usize) -> (Duration, Duration) +where + RepositoryStdRwLock: RepositorySync, +{ let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); let info_hash = InfoHash([0; 20]); @@ -32,15 +35,16 @@ pub fn add_one_torrent(samples: usize) -> } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -77,15 +81,16 @@ pub async fn update_one_torrent_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -119,15 +124,16 @@ pub async fn add_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) { +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync, +{ let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(T::new()); + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index 0d9db73ac..eab8e3803 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -7,7 +7,7 @@ use torrust_torrent_repository_benchmarks::benches::asyn::{ use torrust_torrent_repository_benchmarks::benches::sync::{ add_multiple_torrents_in_parallel, add_one_torrent, update_multiple_torrents_in_parallel, update_one_torrent_in_parallel, }; -use torrust_tracker::core::torrent::repository::{AsyncSync, RepositoryAsync, RepositoryAsyncSingle, Sync, SyncSingle}; +use torrust_tracker::core::torrent::{Entry, EntryMutexStd, EntryMutexTokio}; #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] @@ -25,67 +25,67 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(async_add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) ); if let Some(true) = args.compare { println!(); println!("std::sync::RwLock>"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - add_one_torrent::(1_000_000) - ); + println!("{}: Avg/AdjAvg: {:?}", "add_one_torrent", add_one_torrent::(1_000_000)); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); println!("std::sync::RwLock>>>"); - println!("{}: Avg/AdjAvg: {:?}", "add_one_torrent", add_one_torrent::(1_000_000)); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + add_one_torrent::(1_000_000) + ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -94,22 +94,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(async_add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -118,22 +118,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(async_add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/src/core/mod.rs b/src/core/mod.rs index dac298462..c392ead75 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -455,7 +455,8 @@ use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; -use self::torrent::repository::{RepositoryAsyncSingle, TRepositoryAsync}; +use self::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; +use self::torrent::Entry; use crate::core::databases::Database; use crate::core::torrent::{SwarmMetadata, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -481,7 +482,7 @@ pub struct Tracker { policy: TrackerPolicy, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, - pub torrents: Arc, + pub torrents: Arc>, stats_event_sender: Option>, stats_repository: statistics::Repo, external_ip: Option, @@ -579,7 +580,7 @@ impl Tracker { mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), - torrents: Arc::new(RepositoryAsyncSingle::new()), + torrents: Arc::new(RepositoryTokioRwLock::::default()), stats_event_sender, stats_repository, database, @@ -1754,6 +1755,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + use crate::core::torrent::repository_asyn::RepositoryAsync; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index fc24e7c4c..eca6cbf3b 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -9,6 +9,7 @@ use std::sync::Arc; use serde::Deserialize; use crate::core::peer::Peer; +use crate::core::torrent::repository_asyn::RepositoryAsync; use crate::core::Tracker; use crate::shared::bit_torrent::info_hash::InfoHash; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index c4a1b0df9..b5ebb1054 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -28,8 +28,10 @@ //! Peer that don not have a full copy of the torrent data are called "leechers". //! //! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmStats`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -pub mod repository; +pub mod repository_asyn; +pub mod repository_sync; +use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; @@ -53,6 +55,9 @@ pub struct Entry { pub completed: u32, } +pub type EntryMutexTokio = Arc>; +pub type EntryMutexStd = Arc>; + /// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. /// diff --git a/src/core/torrent/repository.rs b/src/core/torrent/repository.rs deleted file mode 100644 index d4f8ee5e3..000000000 --- a/src/core/torrent/repository.rs +++ /dev/null @@ -1,301 +0,0 @@ -use std::sync::Arc; - -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait Repository { - fn new() -> Self; - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); -} - -pub trait TRepositoryAsync { - fn new() -> Self; - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; -} - -/// Structure that holds all torrents. Using `std::sync` locks. -pub struct Sync { - torrents: std::sync::RwLock>>>, -} - -impl Sync { - /// Returns the get torrents of this [`Sync`]. - /// - /// # Panics - /// - /// Panics if unable to read the torrent. - pub fn get_torrents( - &self, - ) -> std::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().expect("unable to get torrent list") - } - - /// Returns the mutable get torrents of this [`Sync`]. - /// - /// # Panics - /// - /// Panics if unable to write to the torrents list. - pub fn get_torrents_mut( - &self, - ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Repository for Sync { - fn new() -> Self { - Self { - torrents: std::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -/// Structure that holds all torrents. Using `std::sync` locks. -pub struct SyncSingle { - torrents: std::sync::RwLock>, -} - -impl SyncSingle { - /// Returns the get torrents of this [`SyncSingle`]. - /// - /// # Panics - /// - /// Panics if unable to get torrent list. - pub fn get_torrents(&self) -> std::sync::RwLockReadGuard<'_, std::collections::BTreeMap> { - self.torrents.read().expect("unable to get torrent list") - } - - /// Returns the get torrents of this [`SyncSingle`]. - /// - /// # Panics - /// - /// Panics if unable to get writable torrent list. - pub fn get_torrents_mut(&self) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Repository for SyncSingle { - fn new() -> Self { - Self { - torrents: std::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let mut torrents = self.torrents.write().unwrap(); - - let torrent_entry = match torrents.entry(*info_hash) { - std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), - std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -/// Structure that holds all torrents. Using `tokio::sync` locks. -#[allow(clippy::module_name_repetitions)] -pub struct RepositoryAsync { - torrents: tokio::sync::RwLock>>>, -} - -impl TRepositoryAsync for RepositoryAsync { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync { - pub async fn get_torrents( - &self, - ) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().await - } - - pub async fn get_torrents_mut( - &self, - ) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().await - } -} - -/// Structure that holds all torrents. Using a `tokio::sync` lock for the torrents map an`std::sync`nc lock for the inner torrent entry. -pub struct AsyncSync { - torrents: tokio::sync::RwLock>>>, -} - -impl TRepositoryAsync for AsyncSync { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl AsyncSync { - pub async fn get_torrents( - &self, - ) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap>>> { - self.torrents.read().await - } - - pub async fn get_torrents_mut( - &self, - ) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>>> { - self.torrents.write().await - } -} - -#[allow(clippy::module_name_repetitions)] -pub struct RepositoryAsyncSingle { - torrents: tokio::sync::RwLock>, -} - -impl TRepositoryAsync for RepositoryAsyncSingle { - fn new() -> Self { - Self { - torrents: tokio::sync::RwLock::new(std::collections::BTreeMap::new()), - } - } - - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let (stats, stats_updated) = { - let mut torrents_lock = self.torrents.write().await; - let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsyncSingle { - pub async fn get_torrents(&self) -> tokio::sync::RwLockReadGuard<'_, std::collections::BTreeMap> { - self.torrents.read().await - } - - pub async fn get_torrents_mut(&self) -> tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { - self.torrents.write().await - } -} diff --git a/src/core/torrent/repository_asyn.rs b/src/core/torrent/repository_asyn.rs new file mode 100644 index 000000000..ac3724c3b --- /dev/null +++ b/src/core/torrent/repository_asyn.rs @@ -0,0 +1,188 @@ +use std::sync::Arc; + +use super::{EntryMutexStd, EntryMutexTokio}; +use crate::core::peer; +use crate::core::torrent::{Entry, SwarmStats}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub trait RepositoryAsync: Default { + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; + + fn get_torrents<'a>( + &'a self, + ) -> impl std::future::Future>> + Send + where + std::collections::BTreeMap: 'a; + + fn get_torrents_mut<'a>( + &'a self, + ) -> impl std::future::Future>> + Send + where + std::collections::BTreeMap: 'a; +} + +pub struct RepositoryTokioRwLock { + torrents: tokio::sync::RwLock>, +} + +impl RepositoryAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut().await; + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().await; + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl Default for RepositoryTokioRwLock { + fn default() -> Self { + Self { + torrents: tokio::sync::RwLock::default(), + } + } +} + +impl RepositoryAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut().await; + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().unwrap(); + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl Default for RepositoryTokioRwLock { + fn default() -> Self { + Self { + torrents: tokio::sync::RwLock::default(), + } + } +} + +impl RepositoryAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let (stats, stats_updated) = { + let mut torrents_lock = self.torrents.write().await; + let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); + let stats_updated = torrent_entry.insert_or_update_peer(peer); + let stats = torrent_entry.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl Default for RepositoryTokioRwLock { + fn default() -> Self { + Self { + torrents: tokio::sync::RwLock::default(), + } + } +} diff --git a/src/core/torrent/repository_sync.rs b/src/core/torrent/repository_sync.rs new file mode 100644 index 000000000..76fc36fa2 --- /dev/null +++ b/src/core/torrent/repository_sync.rs @@ -0,0 +1,122 @@ +use std::sync::{Arc, RwLock}; + +use super::EntryMutexStd; +use crate::core::peer; +use crate::core::torrent::{Entry, SwarmStats}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub trait RepositorySync: Default { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); + + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a; + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a; +} + +pub struct RepositoryStdRwLock { + torrents: std::sync::RwLock>, +} + +impl RepositorySync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().unwrap(); + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Default for RepositoryStdRwLock { + fn default() -> Self { + Self { + torrents: RwLock::default(), + } + } +} + +impl RepositorySync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let mut torrents = self.torrents.write().unwrap(); + + let torrent_entry = match torrents.entry(*info_hash) { + std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), + std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + let stats_updated = torrent_entry.insert_or_update_peer(peer); + let stats = torrent_entry.get_stats(); + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } + + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Default for RepositoryStdRwLock { + fn default() -> Self { + Self { + torrents: RwLock::default(), + } + } +} diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index 37344858d..0856985d5 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -12,6 +12,7 @@ use torrust_tracker_configuration::HealthCheckApi; #[derive(Debug)] pub enum Error { + #[allow(dead_code)] Error(String), } diff --git a/tests/servers/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs index eadecb603..fc741cbf4 100644 --- a/tests/servers/http/responses/scrape.rs +++ b/tests/servers/http/responses/scrape.rs @@ -73,9 +73,13 @@ impl ResponseBuilder { #[derive(Debug)] pub enum BencodeParseError { + #[allow(dead_code)] InvalidValueExpectedDict { value: Value }, + #[allow(dead_code)] InvalidValueExpectedInt { value: Value }, + #[allow(dead_code)] InvalidFileField { value: Value }, + #[allow(dead_code)] MissingFileField { field_name: String }, } From 48ce42624dea8321d93375a7f57b37aeab3280ed Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 10 Feb 2024 07:31:20 +0800 Subject: [PATCH 0770/1003] dev: bench torrent/repository add sync_asyn variant --- .../src/benches/asyn.rs | 20 +- .../src/benches/mod.rs | 1 + .../src/benches/sync.rs | 9 +- .../src/benches/sync_asyn.rs | 185 ++++++++++++++++++ .../torrent-repository-benchmarks/src/main.rs | 76 ++++--- src/core/mod.rs | 2 +- src/core/torrent/mod.rs | 13 ++ src/core/torrent/repository_asyn.rs | 21 +- src/core/torrent/repository_sync.rs | 69 ++++++- 9 files changed, 335 insertions(+), 61 deletions(-) create mode 100644 packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index 9482d821c..d36de9695 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -4,14 +4,15 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; use torrust_tracker::core::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; +use torrust_tracker::core::torrent::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; -pub async fn async_add_one_torrent(samples: usize) -> (Duration, Duration) +pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryTokioRwLock: RepositoryAsync, + RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, { let mut results: Vec = Vec::with_capacity(samples); @@ -35,10 +36,10 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn async_update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync, + RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -85,10 +86,10 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn async_add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync, + RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -130,13 +131,10 @@ where } // Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn async_update_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: usize, -) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync, + RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/mod.rs b/packages/torrent-repository-benchmarks/src/benches/mod.rs index 1026aa4bf..7450f4bcc 100644 --- a/packages/torrent-repository-benchmarks/src/benches/mod.rs +++ b/packages/torrent-repository-benchmarks/src/benches/mod.rs @@ -1,3 +1,4 @@ pub mod asyn; pub mod sync; +pub mod sync_asyn; pub mod utils; diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index c37fa9f4a..3dee93421 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -4,6 +4,7 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; +use torrust_tracker::core::torrent::UpdateTorrentSync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -13,7 +14,7 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste #[must_use] pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: RepositorySync, + RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, { let mut results: Vec = Vec::with_capacity(samples); @@ -38,7 +39,7 @@ where pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync, + RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -84,7 +85,7 @@ where pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync, + RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -127,7 +128,7 @@ where pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync, + RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs new file mode 100644 index 000000000..11ce6ed0c --- /dev/null +++ b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs @@ -0,0 +1,185 @@ +use std::sync::Arc; +use std::time::Duration; + +use clap::Parser; +use futures::stream::FuturesUnordered; +use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; +use torrust_tracker::core::torrent::UpdateTorrentAsync; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + +use crate::args::Args; +use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; + +// Simply add one torrent +#[must_use] +pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) +where + RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, +{ + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + + let info_hash = InfoHash([0; 20]); + + let start_time = std::time::Instant::now(); + + torrent_repository + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, +{ + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + + let start_time = std::time::Instant::now(); + + for _ in 0..10_000 { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, +{ + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let info_hashes = generate_unique_info_hashes(10_000); + let handles = FuturesUnordered::new(); + + let start_time = std::time::Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} + +// Update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +where + T: Send + Sync + 'static, + RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, +{ + let args = Args::parse(); + let mut results: Vec = Vec::with_capacity(samples); + + for _ in 0..samples { + let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let info_hashes = generate_unique_info_hashes(10_000); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + } + + let start_time = std::time::Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; + + if let Some(sleep_time) = args.sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + let result = start_time.elapsed(); + + results.push(result); + } + + get_average_and_adjusted_average_from_results(results) +} diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index eab8e3803..4a293b832 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -1,12 +1,6 @@ use clap::Parser; use torrust_torrent_repository_benchmarks::args::Args; -use torrust_torrent_repository_benchmarks::benches::asyn::{ - async_add_multiple_torrents_in_parallel, async_add_one_torrent, async_update_multiple_torrents_in_parallel, - async_update_one_torrent_in_parallel, -}; -use torrust_torrent_repository_benchmarks::benches::sync::{ - add_multiple_torrents_in_parallel, add_one_torrent, update_multiple_torrents_in_parallel, update_one_torrent_in_parallel, -}; +use torrust_torrent_repository_benchmarks::benches::{asyn, sync, sync_asyn}; use torrust_tracker::core::torrent::{Entry, EntryMutexStd, EntryMutexTokio}; #[allow(clippy::too_many_lines)] @@ -25,43 +19,47 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); if let Some(true) = args.compare { println!(); println!("std::sync::RwLock>"); - println!("{}: Avg/AdjAvg: {:?}", "add_one_torrent", add_one_torrent::(1_000_000)); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + sync::add_one_torrent::(1_000_000) + ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -70,22 +68,46 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - add_one_torrent::(1_000_000) + sync::add_one_torrent::(1_000_000) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_one_torrent_in_parallel", + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_multiple_torrents_in_parallel", + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + ); + println!( + "{}: Avg/AdjAvg: {:?}", + "update_multiple_torrents_in_parallel", + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + ); + + println!(); + + println!("std::sync::RwLock>>>"); + println!( + "{}: Avg/AdjAvg: {:?}", + "add_one_torrent", + rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -94,22 +116,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -118,22 +140,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(async_add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(async_update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(async_add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(async_update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/src/core/mod.rs b/src/core/mod.rs index c392ead75..56b30f955 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -456,7 +456,7 @@ use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; -use self::torrent::Entry; +use self::torrent::{Entry, UpdateTorrentAsync}; use crate::core::databases::Database; use crate::core::torrent::{SwarmMetadata, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index b5ebb1054..49c1f61f8 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -39,8 +39,21 @@ use derive_more::Constructor; use serde::{Deserialize, Serialize}; use super::peer::{self, Peer}; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::{Current, TimeNow}; +pub trait UpdateTorrentSync { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); +} + +pub trait UpdateTorrentAsync { + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; +} + /// A data structure containing all the information about a torrent in the tracker. /// /// This is the tracker entry for a given torrent and contains the swarm data, diff --git a/src/core/torrent/repository_asyn.rs b/src/core/torrent/repository_asyn.rs index ac3724c3b..ad10f85b4 100644 --- a/src/core/torrent/repository_asyn.rs +++ b/src/core/torrent/repository_asyn.rs @@ -1,17 +1,11 @@ use std::sync::Arc; -use super::{EntryMutexStd, EntryMutexTokio}; +use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync}; use crate::core::peer; use crate::core::torrent::{Entry, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; pub trait RepositoryAsync: Default { - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; - fn get_torrents<'a>( &'a self, ) -> impl std::future::Future>> + Send @@ -28,8 +22,7 @@ pub trait RepositoryAsync: Default { pub struct RepositoryTokioRwLock { torrents: tokio::sync::RwLock>, } - -impl RepositoryAsync for RepositoryTokioRwLock { +impl UpdateTorrentAsync for RepositoryTokioRwLock { async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); @@ -60,7 +53,9 @@ impl RepositoryAsync for RepositoryTokioRwLock stats_updated, ) } +} +impl RepositoryAsync for RepositoryTokioRwLock { async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, @@ -86,7 +81,7 @@ impl Default for RepositoryTokioRwLock { } } -impl RepositoryAsync for RepositoryTokioRwLock { +impl UpdateTorrentAsync for RepositoryTokioRwLock { async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); @@ -117,7 +112,9 @@ impl RepositoryAsync for RepositoryTokioRwLock { stats_updated, ) } +} +impl RepositoryAsync for RepositoryTokioRwLock { async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, @@ -143,7 +140,7 @@ impl Default for RepositoryTokioRwLock { } } -impl RepositoryAsync for RepositoryTokioRwLock { +impl UpdateTorrentAsync for RepositoryTokioRwLock { async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let (stats, stats_updated) = { let mut torrents_lock = self.torrents.write().await; @@ -163,7 +160,9 @@ impl RepositoryAsync for RepositoryTokioRwLock { stats_updated, ) } +} +impl RepositoryAsync for RepositoryTokioRwLock { async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, diff --git a/src/core/torrent/repository_sync.rs b/src/core/torrent/repository_sync.rs index 76fc36fa2..3b01eb8be 100644 --- a/src/core/torrent/repository_sync.rs +++ b/src/core/torrent/repository_sync.rs @@ -1,13 +1,11 @@ use std::sync::{Arc, RwLock}; -use super::EntryMutexStd; +use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync, UpdateTorrentSync}; use crate::core::peer; use crate::core::torrent::{Entry, SwarmStats}; use crate::shared::bit_torrent::info_hash::InfoHash; pub trait RepositorySync: Default { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a; @@ -21,7 +19,62 @@ pub struct RepositoryStdRwLock { torrents: std::sync::RwLock>, } -impl RepositorySync for RepositoryStdRwLock { +impl UpdateTorrentAsync for RepositoryStdRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); + entry.clone() + }; + + let (stats, stats_updated) = { + let mut torrent_entry_lock = torrent_entry.lock().await; + let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); + let stats = torrent_entry_lock.get_stats(); + + (stats, stats_updated) + }; + + ( + SwarmStats { + downloaded: stats.1, + complete: stats.0, + incomplete: stats.2, + }, + stats_updated, + ) + } +} +impl RepositorySync for RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Default for RepositoryStdRwLock { + fn default() -> Self { + Self { + torrents: RwLock::default(), + } + } +} +impl UpdateTorrentSync for RepositoryStdRwLock { fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); @@ -52,7 +105,8 @@ impl RepositorySync for RepositoryStdRwLock { stats_updated, ) } - +} +impl RepositorySync for RepositoryStdRwLock { fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, @@ -76,7 +130,7 @@ impl Default for RepositoryStdRwLock { } } -impl RepositorySync for RepositoryStdRwLock { +impl UpdateTorrentSync for RepositoryStdRwLock { fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { let mut torrents = self.torrents.write().unwrap(); @@ -97,7 +151,8 @@ impl RepositorySync for RepositoryStdRwLock { stats_updated, ) } - +} +impl RepositorySync for RepositoryStdRwLock { fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where std::collections::BTreeMap: 'a, From 1025125572b99504b0b882d7b54e7179d4ef25e9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 10 Feb 2024 16:21:37 +0800 Subject: [PATCH 0771/1003] dev: create torrent repo trait and extract entry --- Cargo.lock | 4 +- cSpell.json | 1 + .../src/benches/asyn.rs | 19 +- .../src/benches/sync.rs | 12 +- .../src/benches/sync_asyn.rs | 12 +- .../torrent-repository-benchmarks/src/main.rs | 34 +- src/core/databases/mod.rs | 4 +- src/core/mod.rs | 154 ++----- src/core/peer.rs | 53 +++ src/core/services/torrent.rs | 55 ++- src/core/torrent/entry.rs | 241 +++++++++++ src/core/torrent/mod.rs | 219 ++-------- src/core/torrent/repository/mod.rs | 30 ++ src/core/torrent/repository/std_sync.rs | 365 +++++++++++++++++ src/core/torrent/repository/tokio_sync.rs | 378 ++++++++++++++++++ src/core/torrent/repository_asyn.rs | 187 --------- src/core/torrent/repository_sync.rs | 177 -------- .../apis/v1/context/torrent/handlers.rs | 2 +- src/servers/http/v1/responses/announce.rs | 11 +- src/servers/http/v1/services/announce.rs | 4 +- src/servers/udp/handlers.rs | 7 +- 21 files changed, 1222 insertions(+), 747 deletions(-) create mode 100644 src/core/torrent/entry.rs create mode 100644 src/core/torrent/repository/mod.rs create mode 100644 src/core/torrent/repository/std_sync.rs create mode 100644 src/core/torrent/repository/tokio_sync.rs delete mode 100644 src/core/torrent/repository_asyn.rs delete mode 100644 src/core/torrent/repository_sync.rs diff --git a/Cargo.lock b/Cargo.lock index 5722032b8..26fb919af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3086,9 +3086,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" diff --git a/cSpell.json b/cSpell.json index 6d8b68c92..da11cd29a 100644 --- a/cSpell.json +++ b/cSpell.json @@ -36,6 +36,7 @@ "Containerfile", "curr", "Cyberneering", + "dashmap", "datagram", "datetime", "debuginfo", diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index d36de9695..737a99f3c 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; -use torrust_tracker::core::torrent::UpdateTorrentAsync; +use torrust_tracker::core::torrent::repository::tokio_sync::RepositoryTokioRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -12,7 +12,8 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -38,8 +39,8 @@ where // Add one torrent ten thousand times in parallel (depending on the set worker threads) pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -88,8 +89,8 @@ where // Add ten thousand torrents in parallel (depending on the set worker threads) pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -133,8 +134,8 @@ where // Async update ten thousand torrents in parallel (depending on the set worker threads) pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index 3dee93421..ea694a38c 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; -use torrust_tracker::core::torrent::UpdateTorrentSync; +use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentSync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -14,7 +14,7 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste #[must_use] pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -39,7 +39,7 @@ where pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -85,7 +85,7 @@ where pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -128,7 +128,7 @@ where pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs index 11ce6ed0c..8efed9856 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; -use torrust_tracker::core::torrent::UpdateTorrentAsync; +use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -14,7 +14,7 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste #[must_use] pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -41,7 +41,7 @@ where pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -91,7 +91,7 @@ where pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -136,7 +136,7 @@ where pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index 4a293b832..d7291afe2 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -1,7 +1,7 @@ use clap::Parser; use torrust_torrent_repository_benchmarks::args::Args; use torrust_torrent_repository_benchmarks::benches::{asyn, sync, sync_asyn}; -use torrust_tracker::core::torrent::{Entry, EntryMutexStd, EntryMutexTokio}; +use torrust_tracker::core::torrent::entry::{Entry, MutexStd, MutexTokio}; #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] @@ -68,22 +68,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::(1_000_000) + sync::add_one_torrent::(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -92,22 +92,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) + rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -116,22 +116,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -140,22 +140,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index b80b11987..b3dcdd48e 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -56,6 +56,8 @@ use self::error::Error; use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::info_hash::InfoHash; +pub type PersistentTorrents = Vec<(InfoHash, u32)>; + struct Builder where T: Database, @@ -125,7 +127,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn load_persistent_torrents(&self) -> Result, Error>; + async fn load_persistent_torrents(&self) -> Result; /// It saves the torrent metrics data into the database. /// diff --git a/src/core/mod.rs b/src/core/mod.rs index 56b30f955..b070f90db 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -102,11 +102,11 @@ //! //! pub struct AnnounceData { //! pub peers: Vec, -//! pub swarm_stats: SwarmStats, +//! pub swarm_stats: SwarmMetadata, //! pub policy: AnnouncePolicy, // the tracker announce policy. //! } //! -//! pub struct SwarmStats { +//! pub struct SwarmMetadata { //! pub completed: u32, // The number of peers that have ever completed downloading //! pub seeders: u32, // The number of active peers that have completed downloading (seeders) //! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) @@ -232,16 +232,11 @@ //! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) //! } //! -//! pub struct SwarmStats { -//! pub completed: u32, // The number of peers that have ever completed downloading -//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) -//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) -//! } //! ``` //! //! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". //! -//! `SwarmStats` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmStats` +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` //! is used for the rest of cases. //! //! Refer to [`torrent`] module for more details about these data structures. @@ -439,14 +434,13 @@ pub mod services; pub mod statistics; pub mod torrent; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use std::time::Duration; use derive_more::Constructor; -use futures::future::join_all; use log::debug; use tokio::sync::mpsc::error::SendError; use torrust_tracker_configuration::{AnnouncePolicy, Configuration}; @@ -455,10 +449,11 @@ use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; -use self::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; -use self::torrent::{Entry, UpdateTorrentAsync}; +use self::torrent::entry::{Entry, ReadInfo, ReadPeers}; +use self::torrent::repository::tokio_sync::RepositoryTokioRwLock; +use self::torrent::repository::{Repository, UpdateTorrentAsync}; use crate::core::databases::Database; -use crate::core::torrent::{SwarmMetadata, SwarmStats}; +use crate::core::torrent::SwarmMetadata; use crate::shared::bit_torrent::info_hash::InfoHash; /// The maximum number of returned peers for a torrent. @@ -515,9 +510,9 @@ pub struct TrackerPolicy { pub struct AnnounceData { /// The list of peers that are downloading the same torrent. /// It excludes the peer that made the request. - pub peers: Vec, + pub peers: Vec>, /// Swarm statistics - pub stats: SwarmStats, + pub stats: SwarmMetadata, pub policy: AnnouncePolicy, } @@ -685,10 +680,8 @@ impl Tracker { /// It returns the data for a `scrape` response. async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - let torrents = self.torrents.get_torrents().await; - - match torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + match self.torrents.get(info_hash).await { + Some(torrent_entry) => torrent_entry.get_stats(), None => SwarmMetadata::default(), } } @@ -704,47 +697,25 @@ impl Tracker { pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; - let mut torrents = self.torrents.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = torrent::Entry { - peers: BTreeMap::default(), - completed, - }; - - torrents.insert(info_hash, torrent_entry); - } + self.torrents.import_persistent(&persistent_torrents).await; Ok(()) } - async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { - let read_lock = self.torrents.get_torrents().await; - - match read_lock.get(info_hash) { + async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec> { + match self.torrents.get(info_hash).await { None => vec![], - Some(entry) => entry - .get_peers_for_peer(peer, TORRENT_PEERS_LIMIT) - .into_iter() - .copied() - .collect(), + Some(entry) => entry.get_peers_for_peer(peer, Some(TORRENT_PEERS_LIMIT)), } } /// # Context: Tracker /// /// Get all torrent peers for a given torrent - pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.get_torrents().await; - - match read_lock.get(info_hash) { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash).await { None => vec![], - Some(entry) => entry.get_peers(TORRENT_PEERS_LIMIT).into_iter().copied().collect(), + Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), } } @@ -753,11 +724,15 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { + pub async fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> torrent::SwarmMetadata { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` - let (stats, stats_updated) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; if self.policy.persistent_torrent_completed_stat && stats_updated { let completed = stats.downloaded; @@ -777,71 +752,18 @@ impl Tracker { /// # Panics /// Panics if unable to get the torrent metrics. pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - let arc_torrents_metrics = Arc::new(tokio::sync::Mutex::new(TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, - torrents: 0, - })); - - let db = self.torrents.get_torrents().await.clone(); - - let futures = db - .values() - .map(|torrent_entry| { - let torrent_entry = torrent_entry.clone(); - let torrents_metrics = arc_torrents_metrics.clone(); - - async move { - tokio::spawn(async move { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrents_metrics.lock().await.seeders += u64::from(seeders); - torrents_metrics.lock().await.completed += u64::from(completed); - torrents_metrics.lock().await.leechers += u64::from(leechers); - torrents_metrics.lock().await.torrents += 1; - }) - .await - .expect("Error torrent_metrics spawn"); - } - }) - .collect::>(); - - join_all(futures).await; - - let torrents_metrics = Arc::try_unwrap(arc_torrents_metrics).expect("Could not unwrap arc_torrents_metrics"); - - torrents_metrics.into_inner() + self.torrents.get_metrics().await } /// Remove inactive peers and (optionally) peerless torrents /// /// # Context: Tracker pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.get_torrents_mut().await; - // If we don't need to remove torrents we will use the faster iter if self.policy.remove_peerless_torrents { - let mut cleaned_torrents_map: BTreeMap = BTreeMap::new(); - - for (info_hash, torrent_entry) in &mut *torrents_lock { - torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); - - if torrent_entry.peers.is_empty() { - continue; - } - - if self.policy.persistent_torrent_completed_stat && torrent_entry.completed == 0 { - continue; - } - - cleaned_torrents_map.insert(*info_hash, torrent_entry.clone()); - } - - *torrents_lock = cleaned_torrents_map; + self.torrents.remove_peerless_torrents(&self.policy).await; } else { - for torrent_entry in (*torrents_lock).values_mut() { - torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); - } + self.torrents.remove_inactive_peers(self.policy.max_peer_timeout).await; } } @@ -1093,6 +1015,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; @@ -1233,7 +1156,7 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash).await; - assert_eq!(peers, vec![peer]); + assert_eq!(peers, vec![Arc::new(peer)]); } #[tokio::test] @@ -1275,6 +1198,8 @@ mod tests { mod handling_an_announce_request { + use std::sync::Arc; + use crate::core::tests::the_tracker::{ peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, }; @@ -1400,7 +1325,7 @@ mod tests { let mut peer = sample_peer_2(); let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.peers, vec![previously_announced_peer]); + assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } mod it_should_update_the_swarm_stats_for_the_torrent { @@ -1755,7 +1680,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; - use crate::core::torrent::repository_asyn::RepositoryAsync; + use crate::core::torrent::repository::Repository; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { @@ -1774,14 +1699,15 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - tracker.torrents.get_torrents_mut().await.remove(&info_hash); + tracker.torrents.remove(&info_hash).await; tracker.load_torrents_from_database().await.unwrap(); - let torrents = tracker.torrents.get_torrents().await; - assert!(torrents.contains_key(&info_hash)); - - let torrent_entry = torrents.get(&info_hash).unwrap(); + let torrent_entry = tracker + .torrents + .get(&info_hash) + .await + .expect("it should be able to get entry"); // It persists the number of completed peers. assert_eq!(torrent_entry.completed, 1); diff --git a/src/core/peer.rs b/src/core/peer.rs index 16aa1fe56..eb2b7b759 100644 --- a/src/core/peer.rs +++ b/src/core/peer.rs @@ -22,6 +22,7 @@ //! ``` use std::net::{IpAddr, SocketAddr}; use std::panic::Location; +use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::Serialize; @@ -85,6 +86,58 @@ pub struct Peer { pub event: AnnounceEvent, } +pub trait ReadInfo { + fn is_seeder(&self) -> bool; + fn get_event(&self) -> AnnounceEvent; + fn get_id(&self) -> Id; + fn get_updated(&self) -> DurationSinceUnixEpoch; + fn get_address(&self) -> SocketAddr; +} + +impl ReadInfo for Peer { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + +impl ReadInfo for Arc { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + impl Peer { #[must_use] pub fn is_seeder(&self) -> bool { diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index eca6cbf3b..b265066f0 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -9,7 +9,8 @@ use std::sync::Arc; use serde::Deserialize; use crate::core::peer::Peer; -use crate::core::torrent::repository_asyn::RepositoryAsync; +use crate::core::torrent::entry::{self, ReadInfo}; +use crate::core::torrent::repository::Repository; use crate::core::Tracker; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -94,41 +95,37 @@ impl Default for Pagination { /// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let db = tracker.torrents.get_torrents().await; - - let torrent_entry_option = db.get(info_hash); + let torrent_entry_option = tracker.torrents.get(info_hash).await; let torrent_entry = torrent_entry_option?; - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let stats = entry::ReadInfo::get_stats(&torrent_entry); - let peers = torrent_entry.get_all_peers(); + let peers = entry::ReadPeers::get_peers(&torrent_entry, None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); Some(Info { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), peers, }) } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) -> Vec { - let db = tracker.torrents.get_torrents().await; - +pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { let mut basic_infos: Vec = vec![]; - for (info_hash, torrent_entry) in db.iter().skip(pagination.offset as usize).take(pagination.limit as usize) { - let (seeders, completed, leechers) = torrent_entry.get_stats(); + for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination).await { + let stats = entry::ReadInfo::get_stats(&torrent_entry); basic_infos.push(BasicInfo { - info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + info_hash, + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), }); } @@ -137,19 +134,15 @@ pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) - /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Vec { - let db = tracker.torrents.get_torrents().await; - let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(entry) = db.get(info_hash) { - let (seeders, completed, leechers) = entry.get_stats(); - + if let Some(stats) = tracker.torrents.get(info_hash).await.map(|t| t.get_stats()) { basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), }); } } @@ -254,7 +247,7 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -270,7 +263,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, @@ -302,7 +295,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -326,7 +319,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -356,7 +349,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) .await; - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, diff --git a/src/core/torrent/entry.rs b/src/core/torrent/entry.rs new file mode 100644 index 000000000..619cce9b3 --- /dev/null +++ b/src/core/torrent/entry.rs @@ -0,0 +1,241 @@ +use std::fmt::Debug; +use std::sync::Arc; +use std::time::Duration; + +use aquatic_udp_protocol::AnnounceEvent; +use serde::{Deserialize, Serialize}; + +use super::SwarmMetadata; +use crate::core::peer::{self, ReadInfo as _}; +use crate::core::TrackerPolicy; +use crate::shared::clock::{Current, TimeNow}; + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Serialize, Deserialize, Clone, Debug, Default)] +pub struct Entry { + /// The swarm: a network of peers that are all trying to download the torrent associated to this entry + #[serde(skip)] + pub peers: std::collections::BTreeMap>, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub completed: u32, +} + +pub type MutexStd = Arc>; +pub type MutexTokio = Arc>; + +pub trait ReadInfo { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_stats(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; +} + +pub trait ReadPeers { + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; +} + +pub trait ReadAsync { + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_peer( + &self, + client: &peer::Peer, + limit: Option, + ) -> impl std::future::Future>> + Send; +} + +pub trait Update { + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; + + // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); + + /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds + fn remove_inactive_peers(&mut self, max_peer_timeout: u32); +} + +pub trait UpdateSync { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn remove_inactive_peers(&self, max_peer_timeout: u32); +} + +pub trait UpdateAsync { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> impl std::future::Future + Send; + + fn insert_or_update_peer_and_get_stats( + &self, + peer: &peer::Peer, + ) -> impl std::future::Future + std::marker::Send; + + fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; +} + +impl ReadInfo for Entry { + #[allow(clippy::cast_possible_truncation)] + fn get_stats(&self) -> SwarmMetadata { + let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let incomplete: u32 = self.peers.len() as u32 - complete; + + SwarmMetadata { + downloaded: self.completed, + complete, + incomplete, + } + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.completed > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.peers.is_empty() { + return false; + } + + true + } +} + +impl ReadPeers for Entry { + fn get_peers(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer.get_address() != client.get_address()) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer.get_address() != client.get_address()) + .cloned() + .collect(), + } + } +} + +impl ReadPeers for MutexStd { + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_peer(client, limit) + } +} + +impl ReadAsync for MutexTokio { + async fn get_peers(&self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().await.get_peers_for_peer(client, limit) + } +} + +impl Update for Entry { + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + let mut did_torrent_stats_change: bool = false; + + match peer.get_event() { + AnnounceEvent::Stopped => { + drop(self.peers.remove(&peer.get_id())); + } + AnnounceEvent::Completed => { + let peer_old = self.peers.insert(peer.get_id(), Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.completed += 1; + did_torrent_stats_change = true; + } + } + _ => { + drop(self.peers.insert(peer.get_id(), Arc::new(*peer))); + } + } + + did_torrent_stats_change + } + + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let changed = self.insert_or_update_peer(peer); + let stats = self.get_stats(); + (changed, stats) + } + + fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { + let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); + self.peers.retain(|_, peer| peer.get_updated() > current_cutoff); + } +} + +impl UpdateSync for MutexStd { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").insert_or_update_peer(peer) + } + + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock() + .expect("it should lock the entry") + .insert_or_update_peer_and_get_stats(peer) + } + + fn remove_inactive_peers(&self, max_peer_timeout: u32) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(max_peer_timeout); + } +} + +impl UpdateAsync for MutexTokio { + async fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().await.insert_or_update_peer(peer) + } + + async fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock().await.insert_or_update_peer_and_get_stats(peer) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + self.lock().await.remove_inactive_peers(max_peer_timeout); + } +} diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 49c1f61f8..608765cf8 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -27,49 +27,11 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! -//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmStats`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -pub mod repository_asyn; -pub mod repository_sync; +//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmMetadata`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). +pub mod entry; +pub mod repository; -use std::sync::Arc; -use std::time::Duration; - -use aquatic_udp_protocol::AnnounceEvent; use derive_more::Constructor; -use serde::{Deserialize, Serialize}; - -use super::peer::{self, Peer}; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::shared::clock::{Current, TimeNow}; - -pub trait UpdateTorrentSync { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); -} - -pub trait UpdateTorrentAsync { - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; -} - -/// A data structure containing all the information about a torrent in the tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct Entry { - /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] - pub peers: std::collections::BTreeMap, - /// The number of peers that have ever completed downloading the torrent associated to this entry - pub completed: u32, -} - -pub type EntryMutexTokio = Arc>; -pub type EntryMutexStd = Arc>; /// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. @@ -92,122 +54,6 @@ impl SwarmMetadata { } } -/// [`SwarmStats`] has the same form as [`SwarmMetadata`] -pub type SwarmStats = SwarmMetadata; - -impl Entry { - #[must_use] - pub fn new() -> Entry { - Entry { - peers: std::collections::BTreeMap::new(), - completed: 0, - } - } - - /// It updates a peer and returns true if the number of complete downloads have increased. - /// - /// The number of peers that have complete downloading is synchronously updated when peers are updated. - /// That's the total torrent downloads counter. - pub fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; - - match peer.event { - AnnounceEvent::Stopped => { - let _: Option = self.peers.remove(&peer.peer_id); - } - AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.peer_id, *peer); - // Don't count if peer was not previously known and not already completed. - if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.completed += 1; - did_torrent_stats_change = true; - } - } - _ => { - let _: Option = self.peers.insert(peer.peer_id, *peer); - } - } - - did_torrent_stats_change - } - - /// Get all swarm peers. - #[must_use] - pub fn get_all_peers(&self) -> Vec<&peer::Peer> { - self.peers.values().collect() - } - - /// Get swarm peers, limiting the result. - #[must_use] - pub fn get_peers(&self, limit: usize) -> Vec<&peer::Peer> { - self.peers.values().take(limit).collect() - } - - /// It returns the list of peers for a given peer client. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_all_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - .collect() - } - - /// It returns the list of peers for a given peer client, limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_peers_for_peer(&self, client: &Peer, limit: usize) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - // Limit the number of peers on the result - .take(limit) - .collect() - } - - /// It returns the swarm metadata (statistics) as a tuple: - /// - /// `(seeders, completed, leechers)` - #[allow(clippy::cast_possible_truncation)] - #[must_use] - pub fn get_stats(&self) -> (u32, u32, u32) { - let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; - let leechers: u32 = self.peers.len() as u32 - seeders; - (seeders, self.completed, leechers) - } - - /// It returns the swarm metadata (statistics) as an struct - #[must_use] - pub fn get_swarm_metadata(&self) -> SwarmMetadata { - // code-review: consider using always this function instead of `get_stats`. - let (seeders, completed, leechers) = self.get_stats(); - SwarmMetadata { - complete: seeders, - downloaded: completed, - incomplete: leechers, - } - } - - /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds - pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); - self.peers.retain(|_, peer| peer.updated > current_cutoff); - } -} - -impl Default for Entry { - fn default() -> Self { - Self::new() - } -} - #[cfg(test)] mod tests { @@ -215,11 +61,12 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::Sub; + use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::core::torrent::Entry; + use crate::core::torrent::entry::{self, ReadInfo, ReadPeers, Update}; use crate::core::{peer, TORRENT_PEERS_LIMIT}; use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; @@ -291,59 +138,59 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = Entry::new(); + let torrent_entry = entry::Entry::default(); - assert_eq!(torrent_entry.get_all_peers().len(), 0); + assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); - assert_eq!(torrent_entry.get_all_peers().len(), 1); + assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); + assert_eq!(torrent_entry.get_peers(None).len(), 1); } #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); + assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); } #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Completed; // Update the peer torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); + assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); } #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Stopped; // Update the peer torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - assert_eq!(torrent_entry.get_all_peers().len(), 0); + assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -357,7 +204,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -369,20 +216,20 @@ mod tests { #[test] fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer // Get peers excluding the one we have just added - let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer); + let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); assert_eq!(peers.len(), 0); } #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); @@ -399,7 +246,7 @@ mod tests { torrent_entry.insert_or_update_peer(&torrent_peer_2); // Get peers for peer 1 - let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer_1); + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); // The peer 2 using the same IP but different port should be included assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); @@ -416,7 +263,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -426,35 +273,35 @@ mod tests { torrent_entry.insert_or_update_peer(&torrent_peer); } - let peers = torrent_entry.get_peers(TORRENT_PEERS_LIMIT); + let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); assert_eq!(peers.len(), 74); } #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_seeder = a_torrent_seeder(); torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder - assert_eq!(torrent_entry.get_stats().0, 1); + assert_eq!(torrent_entry.get_stats().complete, 1); } #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_leecher = a_torrent_leecher(); torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher - assert_eq!(torrent_entry.get_stats().2, 1); + assert_eq!(torrent_entry.get_stats().incomplete, 1); } #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -462,28 +309,28 @@ mod tests { torrent_peer.event = AnnounceEvent::Completed; torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); } #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. // It's the first event announced from this peer. torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; assert_eq!(number_of_peers_with_completed_torrent, 0); } #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let timeout = 120u32; diff --git a/src/core/torrent/repository/mod.rs b/src/core/torrent/repository/mod.rs new file mode 100644 index 000000000..3af33aebe --- /dev/null +++ b/src/core/torrent/repository/mod.rs @@ -0,0 +1,30 @@ +use super::SwarmMetadata; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub mod std_sync; +pub mod tokio_sync; + +pub trait Repository: Default { + fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; +} + +pub trait UpdateTorrentSync { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); +} + +pub trait UpdateTorrentAsync { + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; +} diff --git a/src/core/torrent/repository/std_sync.rs b/src/core/torrent/repository/std_sync.rs new file mode 100644 index 000000000..ba38db6ed --- /dev/null +++ b/src/core/torrent/repository/std_sync.rs @@ -0,0 +1,365 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::executor::block_on; +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync, UpdateTorrentSync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +#[derive(Default)] +pub struct RepositoryStdRwLock { + torrents: std::sync::RwLock>, +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentAsync for RepositoryStdRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer).await + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents(); + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) + }); + + block_on(join_all(futures)); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentSync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::default()))); + entry.clone() + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer) + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().expect("it should lock the entry").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents(); + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + }) + }); + + block_on(join_all(futures)); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl UpdateTorrentSync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut torrents = self.torrents.write().unwrap(); + + let torrent_entry = match torrents.entry(*info_hash) { + std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::default()), + std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer) + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = Entry { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut(); + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/tokio_sync.rs b/src/core/torrent/repository/tokio_sync.rs new file mode 100644 index 000000000..83edf1188 --- /dev/null +++ b/src/core/torrent/repository/tokio_sync.rs @@ -0,0 +1,378 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +#[derive(Default)] +pub struct RepositoryTokioRwLock { + torrents: tokio::sync::RwLock>, +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>( + &'a self, + ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_torrent; + { + let db = self.torrents.read().await; + maybe_torrent = db.get(info_hash).cloned(); + } + + let torrent = if let Some(torrent) = maybe_torrent { + torrent + } else { + let entry = entry::MutexTokio::default(); + let mut db = self.torrents.write().await; + db.insert(*info_hash, entry.clone()); + entry + }; + + torrent.insert_or_update_peer_and_get_stats(peer).await + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents().await; + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) + }); + + join_all(futures).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_torrent; + { + let db = self.torrents.read().await; + maybe_torrent = db.get(info_hash).cloned(); + } + + let torrent = if let Some(torrent) = maybe_torrent { + torrent + } else { + let entry = entry::MutexStd::default(); + let mut db = self.torrents.write().await; + db.insert(*info_hash, entry.clone()); + entry + }; + + torrent.insert_or_update_peer_and_get_stats(peer) + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().expect("it should lock the entry").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents().await; + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + }) + }); + + join_all(futures).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.torrents.write().await; + + let torrent = db.entry(*info_hash).or_insert(Entry::default()); + + torrent.insert_or_update_peer_and_get_stats(peer) + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = Entry { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut().await; + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository_asyn.rs b/src/core/torrent/repository_asyn.rs deleted file mode 100644 index ad10f85b4..000000000 --- a/src/core/torrent/repository_asyn.rs +++ /dev/null @@ -1,187 +0,0 @@ -use std::sync::Arc; - -use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync}; -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait RepositoryAsync: Default { - fn get_torrents<'a>( - &'a self, - ) -> impl std::future::Future>> + Send - where - std::collections::BTreeMap: 'a; - - fn get_torrents_mut<'a>( - &'a self, - ) -> impl std::future::Future>> + Send - where - std::collections::BTreeMap: 'a; -} - -pub struct RepositoryTokioRwLock { - torrents: tokio::sync::RwLock>, -} -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let (stats, stats_updated) = { - let mut torrents_lock = self.torrents.write().await; - let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} diff --git a/src/core/torrent/repository_sync.rs b/src/core/torrent/repository_sync.rs deleted file mode 100644 index 3b01eb8be..000000000 --- a/src/core/torrent/repository_sync.rs +++ /dev/null @@ -1,177 +0,0 @@ -use std::sync::{Arc, RwLock}; - -use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync, UpdateTorrentSync}; -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait RepositorySync: Default { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a; - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a; -} - -pub struct RepositoryStdRwLock { - torrents: std::sync::RwLock>, -} - -impl UpdateTorrentAsync for RepositoryStdRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} - -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let mut torrents = self.torrents.write().unwrap(); - - let torrent_entry = match torrents.entry(*info_hash) { - std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), - std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index dcb92dec3..999580da7 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -82,7 +82,7 @@ pub async fn get_torrents_handler(State(tracker): State>, paginatio torrent_list_response( &get_torrents_page( tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), ) .await, ) diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index b1b474ea9..619632ae4 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -79,7 +79,7 @@ impl From for Normal { incomplete: data.stats.incomplete.into(), interval: data.policy.interval.into(), min_interval: data.policy.interval_min.into(), - peers: data.peers.into_iter().collect(), + peers: data.peers.iter().map(AsRef::as_ref).copied().collect(), } } } @@ -116,7 +116,7 @@ pub struct Compact { impl From for Compact { fn from(data: AnnounceData) -> Self { - let compact_peers: Vec = data.peers.into_iter().collect(); + let compact_peers: Vec = data.peers.iter().map(AsRef::as_ref).copied().collect(); let (peers, peers6): (Vec>, Vec>) = compact_peers.into_iter().collect(); @@ -313,12 +313,13 @@ impl FromIterator> for CompactPeersEncoded { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use torrust_tracker_configuration::AnnouncePolicy; use crate::core::peer::fixture::PeerBuilder; use crate::core::peer::Id; - use crate::core::torrent::SwarmStats; + use crate::core::torrent::SwarmMetadata; use crate::core::AnnounceData; use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; @@ -350,8 +351,8 @@ mod tests { )) .build(); - let peers = vec![peer_ipv4, peer_ipv6]; - let stats = SwarmStats::new(333, 333, 444); + let peers = vec![Arc::new(peer_ipv4), Arc::new(peer_ipv6)]; + let stats = SwarmMetadata::new(333, 333, 444); AnnounceData::new(peers, stats, policy) } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index b791defd7..b53697eed 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -98,7 +98,7 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::core::peer::Peer; - use crate::core::torrent::SwarmStats; + use crate::core::torrent::SwarmMetadata; use crate::core::{statistics, AnnounceData, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; @@ -113,7 +113,7 @@ mod tests { let expected_announce_data = AnnounceData { peers: vec![], - stats: SwarmStats { + stats: SwarmMetadata { downloaded: 0, complete: 1, incomplete: 0, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 91a371a7b..f42e11424 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -642,7 +642,7 @@ mod tests { .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] @@ -770,6 +770,7 @@ mod tests { mod from_a_loopback_ip { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; @@ -809,7 +810,7 @@ mod tests { .with_peer_addr(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } } } @@ -863,7 +864,7 @@ mod tests { .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] From 4b2d6fefc2840b93cb23c9fa7a3fdd34a4ee0f9b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 11 Feb 2024 13:30:39 +0800 Subject: [PATCH 0772/1003] dev: extract repo implementations and benchmarks --- .../src/benches/asyn.rs | 30 +- .../src/benches/mod.rs | 1 - .../src/benches/sync.rs | 29 +- .../src/benches/sync_asyn.rs | 185 --------- .../torrent-repository-benchmarks/src/main.rs | 89 +++-- src/core/mod.rs | 17 +- src/core/services/torrent.rs | 8 +- src/core/torrent/entry.rs | 74 +++- src/core/torrent/mod.rs | 41 +- src/core/torrent/repository/mod.rs | 20 +- src/core/torrent/repository/rw_lock_std.rs | 122 ++++++ .../repository/rw_lock_std_mutex_std.rs | 143 +++++++ .../repository/rw_lock_std_mutex_tokio.rs | 141 +++++++ src/core/torrent/repository/rw_lock_tokio.rs | 124 ++++++ .../repository/rw_lock_tokio_mutex_std.rs | 146 +++++++ .../repository/rw_lock_tokio_mutex_tokio.rs | 144 +++++++ src/core/torrent/repository/std_sync.rs | 365 ----------------- src/core/torrent/repository/tokio_sync.rs | 378 ------------------ 18 files changed, 1015 insertions(+), 1042 deletions(-) delete mode 100644 packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs create mode 100644 src/core/torrent/repository/rw_lock_std.rs create mode 100644 src/core/torrent/repository/rw_lock_std_mutex_std.rs create mode 100644 src/core/torrent/repository/rw_lock_std_mutex_tokio.rs create mode 100644 src/core/torrent/repository/rw_lock_tokio.rs create mode 100644 src/core/torrent/repository/rw_lock_tokio_mutex_std.rs create mode 100644 src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs delete mode 100644 src/core/torrent/repository/std_sync.rs delete mode 100644 src/core/torrent/repository/tokio_sync.rs diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index 737a99f3c..dffd31682 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -1,24 +1,21 @@ -use std::sync::Arc; use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::tokio_sync::RepositoryTokioRwLock; use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; -pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - T: Default, - RepositoryTokioRwLock: UpdateTorrentAsync + Default, + V: UpdateTorrentAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); + let torrent_repository = V::default(); let info_hash = InfoHash([0; 20]); @@ -37,16 +34,15 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Default + Send + Sync + 'static, - RepositoryTokioRwLock: UpdateTorrentAsync + Default, + V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); + let torrent_repository = V::default(); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -87,16 +83,15 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Default + Send + Sync + 'static, - RepositoryTokioRwLock: UpdateTorrentAsync + Default, + V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); + let torrent_repository = V::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -132,16 +127,15 @@ where } // Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Default + Send + Sync + 'static, - RepositoryTokioRwLock: UpdateTorrentAsync + Default, + V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryTokioRwLock::::default()); + let torrent_repository = V::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/mod.rs b/packages/torrent-repository-benchmarks/src/benches/mod.rs index 7450f4bcc..1026aa4bf 100644 --- a/packages/torrent-repository-benchmarks/src/benches/mod.rs +++ b/packages/torrent-repository-benchmarks/src/benches/mod.rs @@ -1,4 +1,3 @@ pub mod asyn; pub mod sync; -pub mod sync_asyn; pub mod utils; diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index ea694a38c..04385bc55 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -1,9 +1,7 @@ -use std::sync::Arc; use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; use torrust_tracker::core::torrent::repository::UpdateTorrentSync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; @@ -12,14 +10,14 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste // Simply add one torrent #[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: UpdateTorrentSync + Default, + V: UpdateTorrentSync + Default, { let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let torrent_repository = V::default(); let info_hash = InfoHash([0; 20]); @@ -36,16 +34,15 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentSync + Default, + V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let torrent_repository = V::default(); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -82,16 +79,15 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentSync + Default, + V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let torrent_repository = V::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -125,16 +121,15 @@ where } // Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentSync + Default, + V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); + let torrent_repository = V::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs deleted file mode 100644 index 8efed9856..000000000 --- a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs +++ /dev/null @@ -1,185 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use clap::Parser; -use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; -use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - -use crate::args::Args; -use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; - -// Simply add one torrent -#[must_use] -pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) -where - RepositoryStdRwLock: UpdateTorrentAsync + Default, -{ - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); - - let info_hash = InfoHash([0; 20]); - - let start_time = std::time::Instant::now(); - - torrent_repository - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) -where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentAsync + Default, -{ - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - - let start_time = std::time::Instant::now(); - - for _ in 0..10_000 { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) -where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentAsync + Default, -{ - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} - -// Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) -where - T: Send + Sync + 'static, - RepositoryStdRwLock: UpdateTorrentAsync + Default, -{ - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::new(RepositoryStdRwLock::::default()); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - } - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); - } - - get_average_and_adjusted_average_from_results(results) -} diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index d7291afe2..b935cea43 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -1,7 +1,12 @@ +use std::sync::Arc; + use clap::Parser; use torrust_torrent_repository_benchmarks::args::Args; -use torrust_torrent_repository_benchmarks::benches::{asyn, sync, sync_asyn}; -use torrust_tracker::core::torrent::entry::{Entry, MutexStd, MutexTokio}; +use torrust_torrent_repository_benchmarks::benches::{asyn, sync}; +use torrust_tracker::core::torrent::{ + TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, + TorrentsRwLockTokioMutexTokio, +}; #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] @@ -15,147 +20,167 @@ fn main() { .build() .unwrap(); - println!("tokio::sync::RwLock>"); + println!("TorrentsRwLockTokio"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::>(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::>(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::>(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); if let Some(true) = args.compare { println!(); - println!("std::sync::RwLock>"); + println!("TorrentsRwLockStd"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::(1_000_000) + sync::add_one_torrent::>(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::>(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::>(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::>(&rt, 10)) ); println!(); - println!("std::sync::RwLock>>>"); + println!("TorrentsRwLockStdMutexStd"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::(1_000_000) + sync::add_one_torrent::>(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!(); - println!("std::sync::RwLock>>>"); + println!("TorrentsRwLockStdMutexTokio"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::>(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) ); println!(); - println!("tokio::sync::RwLock>>>"); + println!("TorrentsRwLockTokioMutexStd"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::>(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) ); println!(); - println!("tokio::sync::RwLock>>>"); + println!("TorrentsRwLockTokioMutexTokio"); println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::>(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + &rt, 10 + )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) ); } } diff --git a/src/core/mod.rs b/src/core/mod.rs index b070f90db..15d7b9c39 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -449,9 +449,9 @@ use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; -use self::torrent::entry::{Entry, ReadInfo, ReadPeers}; -use self::torrent::repository::tokio_sync::RepositoryTokioRwLock; -use self::torrent::repository::{Repository, UpdateTorrentAsync}; +use self::torrent::entry::{ReadInfo, ReadPeers}; +use self::torrent::repository::{Repository, UpdateTorrentSync}; +use self::torrent::Torrents; use crate::core::databases::Database; use crate::core::torrent::SwarmMetadata; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -477,7 +477,7 @@ pub struct Tracker { policy: TrackerPolicy, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, - pub torrents: Arc>, + pub torrents: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, external_ip: Option, @@ -575,7 +575,7 @@ impl Tracker { mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), - torrents: Arc::new(RepositoryTokioRwLock::::default()), + torrents: Arc::default(), stats_event_sender, stats_repository, database, @@ -732,7 +732,7 @@ impl Tracker { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` - let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer); if self.policy.persistent_torrent_completed_stat && stats_updated { let completed = stats.downloaded; @@ -1680,6 +1680,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + use crate::core::torrent::entry::ReadInfo; use crate::core::torrent::repository::Repository; #[tokio::test] @@ -1710,10 +1711,10 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.completed, 1); + assert_eq!(torrent_entry.get_stats().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.peers.is_empty()); + assert!(torrent_entry.peers_is_empty()); } } } diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index b265066f0..78dab12c4 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use serde::Deserialize; use crate::core::peer::Peer; -use crate::core::torrent::entry::{self, ReadInfo}; +use crate::core::torrent::entry::{ReadInfo, ReadPeers}; use crate::core::torrent::repository::Repository; use crate::core::Tracker; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -99,9 +99,9 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let torrent_entry = torrent_entry_option?; - let stats = entry::ReadInfo::get_stats(&torrent_entry); + let stats = torrent_entry.get_stats(); - let peers = entry::ReadPeers::get_peers(&torrent_entry, None); + let peers = torrent_entry.get_peers(None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); @@ -119,7 +119,7 @@ pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagina let mut basic_infos: Vec = vec![]; for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination).await { - let stats = entry::ReadInfo::get_stats(&torrent_entry); + let stats = torrent_entry.get_stats(); basic_infos.push(BasicInfo { info_hash, diff --git a/src/core/torrent/entry.rs b/src/core/torrent/entry.rs index 619cce9b3..815abd4fb 100644 --- a/src/core/torrent/entry.rs +++ b/src/core/torrent/entry.rs @@ -19,11 +19,11 @@ use crate::shared::clock::{Current, TimeNow}; pub struct Entry { /// The swarm: a network of peers that are all trying to download the torrent associated to this entry #[serde(skip)] - pub peers: std::collections::BTreeMap>, + pub(crate) peers: std::collections::BTreeMap>, /// The number of peers that have ever completed downloading the torrent associated to this entry - pub completed: u32, + pub(crate) completed: u32, } - +pub type Single = Entry; pub type MutexStd = Arc>; pub type MutexTokio = Arc>; @@ -35,6 +35,23 @@ pub trait ReadInfo { /// Returns True if Still a Valid Entry according to the Tracker Policy fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> bool; +} + +/// Same as [`ReadInfo`], but async. +pub trait ReadInfoAsync { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_stats(&self) -> impl std::future::Future + Send; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn is_not_zombie(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> impl std::future::Future + Send; } pub trait ReadPeers { @@ -49,15 +66,10 @@ pub trait ReadPeers { fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; } -pub trait ReadAsync { - /// Get all swarm peers, optionally limiting the result. +/// Same as [`ReadPeers`], but async. +pub trait ReadPeersAsync { fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; - /// It returns the list of peers for a given peer client, optionally limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. fn get_peers_for_peer( &self, client: &peer::Peer, @@ -79,12 +91,14 @@ pub trait Update { fn remove_inactive_peers(&mut self, max_peer_timeout: u32); } +/// Same as [`Update`], except not `mut`. pub trait UpdateSync { fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); fn remove_inactive_peers(&self, max_peer_timeout: u32); } +/// Same as [`Update`], except not `mut` and async. pub trait UpdateAsync { fn insert_or_update_peer(&self, peer: &peer::Peer) -> impl std::future::Future + Send; @@ -96,7 +110,7 @@ pub trait UpdateAsync { fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; } -impl ReadInfo for Entry { +impl ReadInfo for Single { #[allow(clippy::cast_possible_truncation)] fn get_stats(&self) -> SwarmMetadata { let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; @@ -120,9 +134,41 @@ impl ReadInfo for Entry { true } + + fn peers_is_empty(&self) -> bool { + self.peers.is_empty() + } +} + +impl ReadInfo for MutexStd { + fn get_stats(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_stats() + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").is_not_zombie(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().expect("it should get a lock").peers_is_empty() + } +} + +impl ReadInfoAsync for MutexTokio { + async fn get_stats(&self) -> SwarmMetadata { + self.lock().await.get_stats() + } + + async fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + self.lock().await.is_not_zombie(policy) + } + + async fn peers_is_empty(&self) -> bool { + self.lock().await.peers_is_empty() + } } -impl ReadPeers for Entry { +impl ReadPeers for Single { fn get_peers(&self, limit: Option) -> Vec> { match limit { Some(limit) => self.peers.values().take(limit).cloned().collect(), @@ -162,7 +208,7 @@ impl ReadPeers for MutexStd { } } -impl ReadAsync for MutexTokio { +impl ReadPeersAsync for MutexTokio { async fn get_peers(&self, limit: Option) -> Vec> { self.lock().await.get_peers(limit) } @@ -172,7 +218,7 @@ impl ReadAsync for MutexTokio { } } -impl Update for Entry { +impl Update for Single { fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { let mut did_torrent_stats_change: bool = false; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 608765cf8..bfe068337 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -11,8 +11,6 @@ //! That's the most valuable information the peer want to get from the tracker, because it allows them to //! start downloading torrent from those peers. //! -//! > **NOTICE**: that both swarm data (torrent entries) and swarm metadata (aggregate counters) are related to only one torrent. -//! //! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: //! //! - For **active peers**: metrics related to the current active peers in the swarm. @@ -33,6 +31,15 @@ pub mod repository; use derive_more::Constructor; +pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used + +pub type TorrentsRwLockStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; +pub type TorrentsRwLockTokio = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; + /// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. /// @@ -138,14 +145,14 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = entry::Entry::default(); + let torrent_entry = entry::Single::default(); assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -156,7 +163,7 @@ mod tests { #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -166,7 +173,7 @@ mod tests { #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -178,7 +185,7 @@ mod tests { #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -190,7 +197,7 @@ mod tests { #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -204,7 +211,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -216,7 +223,7 @@ mod tests { #[test] fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer @@ -229,7 +236,7 @@ mod tests { #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); @@ -263,7 +270,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -280,7 +287,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_seeder = a_torrent_seeder(); torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder @@ -290,7 +297,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_leecher = a_torrent_leecher(); torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher @@ -301,7 +308,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -316,7 +323,7 @@ mod tests { #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. @@ -330,7 +337,7 @@ mod tests { #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = entry::Entry::default(); + let mut torrent_entry = entry::Single::default(); let timeout = 120u32; diff --git a/src/core/torrent/repository/mod.rs b/src/core/torrent/repository/mod.rs index 3af33aebe..1c4ce8ae9 100644 --- a/src/core/torrent/repository/mod.rs +++ b/src/core/torrent/repository/mod.rs @@ -4,10 +4,14 @@ use crate::core::services::torrent::Pagination; use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; use crate::shared::bit_torrent::info_hash::InfoHash; -pub mod std_sync; -pub mod tokio_sync; +pub mod rw_lock_std; +pub mod rw_lock_std_mutex_std; +pub mod rw_lock_std_mutex_tokio; +pub mod rw_lock_tokio; +pub mod rw_lock_tokio_mutex_std; +pub mod rw_lock_tokio_mutex_tokio; -pub trait Repository: Default { +pub trait Repository: Default + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; @@ -28,3 +32,13 @@ pub trait UpdateTorrentAsync { peer: &peer::Peer, ) -> impl std::future::Future + Send; } + +#[derive(Default)] +pub struct RwLockTokio { + torrents: tokio::sync::RwLock>, +} + +#[derive(Default)] +pub struct RwLockStd { + torrents: std::sync::RwLock>, +} diff --git a/src/core/torrent/repository/rw_lock_std.rs b/src/core/torrent/repository/rw_lock_std.rs new file mode 100644 index 000000000..9b3915bcb --- /dev/null +++ b/src/core/torrent/repository/rw_lock_std.rs @@ -0,0 +1,122 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentSync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{self, ReadInfo, Update}; +use crate::core::torrent::{SwarmMetadata, TorrentsRwLockStd}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl UpdateTorrentSync for TorrentsRwLockStd { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut(); + + let entry = db.entry(*info_hash).or_insert(entry::Single::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } +} + +impl UpdateTorrentSync for Arc { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer) + } +} + +impl Repository for TorrentsRwLockStd { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::Single)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::Single { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut(); + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_std_mutex_std.rs b/src/core/torrent/repository/rw_lock_std_mutex_std.rs new file mode 100644 index 000000000..5a9a38f77 --- /dev/null +++ b/src/core/torrent/repository/rw_lock_std_mutex_std.rs @@ -0,0 +1,143 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentSync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{ReadInfo, Update, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockStdMutexStd}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockStdMutexStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentSync for TorrentsRwLockStdMutexStd { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } +} + +impl UpdateTorrentSync for Arc { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer) + } +} + +impl Repository for TorrentsRwLockStdMutexStd { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.lock().expect("it should get the lock").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + entry::Single { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + handles.push(tokio::task::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + })); + } + + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs b/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs new file mode 100644 index 000000000..1feb41e3e --- /dev/null +++ b/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs @@ -0,0 +1,141 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{ReadInfo, Update, UpdateAsync}; +use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockStdMutexTokio}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockStdMutexTokio { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentAsync for TorrentsRwLockStdMutexTokio { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } +} + +impl UpdateTorrentAsync for Arc { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await + } +} + +impl Repository for TorrentsRwLockStdMutexTokio { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + entry::Single { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + // todo:: replace with a ring buffer + + let mut handles = Vec::>::default(); + + for e in self.get_torrents().values() { + let entry = e.clone(); + handles.push(tokio::task::spawn(async move { + entry.lock().await.remove_inactive_peers(max_peer_timeout); + })); + } + + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_tokio.rs b/src/core/torrent/repository/rw_lock_tokio.rs new file mode 100644 index 000000000..3d633a837 --- /dev/null +++ b/src/core/torrent/repository/rw_lock_tokio.rs @@ -0,0 +1,124 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{self, ReadInfo, Update}; +use crate::core::torrent::{SwarmMetadata, TorrentsRwLockTokio}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for TorrentsRwLockTokio { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut().await; + + let entry = db.entry(*info_hash).or_insert(entry::Single::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } +} + +impl UpdateTorrentAsync for Arc { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await + } +} + +impl Repository for TorrentsRwLockTokio { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::Single)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::Single { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut().await; + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs b/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs new file mode 100644 index 000000000..3888c40b0 --- /dev/null +++ b/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs @@ -0,0 +1,146 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{ReadInfo, Update, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockTokioMutexStd}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockTokioMutexStd { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for TorrentsRwLockTokioMutexStd { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } +} + +impl UpdateTorrentAsync for Arc { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await + } +} + +impl Repository for TorrentsRwLockTokioMutexStd { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + // todo:: replace with a ring buffer + + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.lock().expect("it should get a lock").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + entry::Single { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + handles.push(tokio::task::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + })); + } + + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs b/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs new file mode 100644 index 000000000..49e08d90c --- /dev/null +++ b/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs @@ -0,0 +1,144 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{self, ReadInfo, Update, UpdateAsync}; +use crate::core::torrent::{SwarmMetadata, TorrentsRwLockTokioMutexTokio}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +impl TorrentsRwLockTokioMutexTokio { + async fn get_torrents<'a>( + &'a self, + ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for TorrentsRwLockTokioMutexTokio { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } +} + +impl UpdateTorrentAsync for Arc { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await + } +} + +impl Repository for TorrentsRwLockTokioMutexTokio { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let metrics: Arc> = Arc::default(); + + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + let metrics = metrics.clone(); + handles.push(tokio::task::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + })); + } + + join_all(handles).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + entry::Single { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + // todo:: replace with a ring buffer + let mut handles = Vec::>::default(); + + for e in self.get_torrents().await.values() { + let entry = e.clone(); + handles.push(tokio::task::spawn(async move { + entry.lock().await.remove_inactive_peers(max_peer_timeout); + })); + } + + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/std_sync.rs b/src/core/torrent/repository/std_sync.rs deleted file mode 100644 index ba38db6ed..000000000 --- a/src/core/torrent/repository/std_sync.rs +++ /dev/null @@ -1,365 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::executor::block_on; -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync, UpdateTorrentSync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; -use crate::core::torrent::{entry, SwarmMetadata}; -use crate::core::{peer, TorrentsMetrics}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -#[derive(Default)] -pub struct RepositoryStdRwLock { - torrents: std::sync::RwLock>, -} - -impl RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl UpdateTorrentAsync for RepositoryStdRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - torrent_entry.insert_or_update_peer_and_get_stats(peer).await - } -} -impl Repository for RepositoryStdRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents(); - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.lock().await.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - block_on(join_all(futures)); - - *metrics.blocking_lock_owned() - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexTokio::new( - Entry { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let db = self.get_torrents(); - - let futures = db.values().map(|e| { - let entry = e.clone(); - tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) - }); - - block_on(join_all(futures)); - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); - } -} - -impl RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::default()))); - entry.clone() - }; - - torrent_entry.insert_or_update_peer_and_get_stats(peer) - } -} -impl Repository for RepositoryStdRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents(); - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.lock().expect("it should lock the entry").get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - block_on(join_all(futures)); - - *metrics.blocking_lock_owned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexStd::new( - Entry { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let db = self.get_torrents(); - - let futures = db.values().map(|e| { - let entry = e.clone(); - tokio::spawn(async move { - entry - .lock() - .expect("it should get lock for entry") - .remove_inactive_peers(max_peer_timeout); - }) - }); - - block_on(join_all(futures)); - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); - } -} - -impl RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("it should get the read lock") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("it should get the write lock") - } -} - -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let mut torrents = self.torrents.write().unwrap(); - - let torrent_entry = match torrents.entry(*info_hash) { - std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::default()), - std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - torrent_entry.insert_or_update_peer_and_get_stats(peer) - } -} -impl Repository for RepositoryStdRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents(); - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - block_on(join_all(futures)); - - *metrics.blocking_lock_owned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = Entry { - peers: BTreeMap::default(), - completed: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let mut db = self.get_torrents_mut(); - - drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/tokio_sync.rs b/src/core/torrent/repository/tokio_sync.rs deleted file mode 100644 index 83edf1188..000000000 --- a/src/core/torrent/repository/tokio_sync.rs +++ /dev/null @@ -1,378 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; -use crate::core::torrent::{entry, SwarmMetadata}; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -#[derive(Default)] -pub struct RepositoryTokioRwLock { - torrents: tokio::sync::RwLock>, -} - -impl RepositoryTokioRwLock { - async fn get_torrents<'a>( - &'a self, - ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_torrent; - { - let db = self.torrents.read().await; - maybe_torrent = db.get(info_hash).cloned(); - } - - let torrent = if let Some(torrent) = maybe_torrent { - torrent - } else { - let entry = entry::MutexTokio::default(); - let mut db = self.torrents.write().await; - db.insert(*info_hash, entry.clone()); - entry - }; - - torrent.insert_or_update_peer_and_get_stats(peer).await - } -} - -impl Repository for RepositoryTokioRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents().await; - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.lock().await.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - join_all(futures).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexTokio::new( - Entry { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let db = self.get_torrents().await; - - let futures = db.values().map(|e| { - let entry = e.clone(); - tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) - }); - - join_all(futures).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); - } -} - -impl RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_torrent; - { - let db = self.torrents.read().await; - maybe_torrent = db.get(info_hash).cloned(); - } - - let torrent = if let Some(torrent) = maybe_torrent { - torrent - } else { - let entry = entry::MutexStd::default(); - let mut db = self.torrents.write().await; - db.insert(*info_hash, entry.clone()); - entry - }; - - torrent.insert_or_update_peer_and_get_stats(peer) - } -} - -impl Repository for RepositoryTokioRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents().await; - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.lock().expect("it should lock the entry").get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - join_all(futures).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexStd::new( - Entry { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let db = self.get_torrents().await; - - let futures = db.values().map(|e| { - let entry = e.clone(); - tokio::spawn(async move { - entry - .lock() - .expect("it should get lock for entry") - .remove_inactive_peers(max_peer_timeout); - }) - }); - - join_all(futures).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); - } -} - -impl RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let mut db = self.torrents.write().await; - - let torrent = db.entry(*info_hash).or_insert(Entry::default()); - - torrent.insert_or_update_peer_and_get_stats(peer) - } -} - -impl Repository for RepositoryTokioRwLock { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let db = self.get_torrents().await; - let metrics: Arc> = Arc::default(); - - let futures = db.values().map(|e| { - let metrics = metrics.clone(); - let entry = e.clone(); - - tokio::spawn(async move { - let stats = entry.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - }) - }); - - join_all(futures).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = Entry { - peers: BTreeMap::default(), - completed: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let mut db = self.get_torrents_mut().await; - - drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.is_not_zombie(policy)); - } -} From 9a43815d00ed13e6867d07b64881d4a5391e64aa Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 13 Feb 2024 15:23:57 +0800 Subject: [PATCH 0773/1003] dev: move torrent/repository to packages --- .vscode/settings.json | 1 + Cargo.lock | 31 +- Cargo.toml | 6 +- cSpell.json | 4 + packages/configuration/src/lib.rs | 7 + packages/primitives/Cargo.toml | 4 + packages/primitives/src/announce_event.rs | 43 +++ packages/primitives/src/info_hash.rs | 165 ++++++++++ packages/primitives/src/lib.rs | 37 +++ packages/primitives/src/pagination.rs | 50 +++ {src/core => packages/primitives/src}/peer.rs | 186 +++++------- packages/primitives/src/swarm_metadata.rs | 22 ++ packages/primitives/src/torrent_metrics.rs | 25 ++ .../torrent-repository-benchmarks/Cargo.toml | 22 -- .../torrent-repository-benchmarks/README.md | 1 - .../torrent-repository-benchmarks/src/lib.rs | 2 - packages/torrent-repository/Cargo.toml | 24 ++ packages/torrent-repository/README.md | 11 + .../benches/helpers}/args.rs | 0 .../benches/helpers}/asyn.rs | 34 ++- .../benches/helpers}/mod.rs | 1 + .../benches/helpers}/sync.rs | 34 ++- .../benches/helpers}/utils.rs | 8 +- .../benches/repository-benchmark.rs} | 65 ++-- packages/torrent-repository/src/entry/mod.rs | 98 ++++++ .../torrent-repository/src/entry/mutex_std.rs | 50 +++ .../src/entry/mutex_tokio.rs | 46 +++ .../torrent-repository/src/entry/single.rs | 105 +++++++ packages/torrent-repository/src/lib.rs | 15 + .../torrent-repository/src}/repository/mod.rs | 32 +- .../src/repository/rw_lock_std.rs | 112 +++++++ .../src/repository/rw_lock_std_mutex_std.rs | 123 ++++++++ .../src/repository/rw_lock_std_mutex_tokio.rs | 131 ++++++++ .../src/repository/rw_lock_tokio.rs | 113 +++++++ .../src/repository/rw_lock_tokio_mutex_std.rs | 124 ++++++++ .../repository/rw_lock_tokio_mutex_tokio.rs | 124 ++++++++ src/bootstrap/jobs/torrent_cleanup.rs | 2 +- src/console/clients/checker/checks/http.rs | 2 +- src/console/clients/checker/checks/udp.rs | 2 +- src/console/clients/http/app.rs | 2 +- src/console/clients/udp/app.rs | 2 +- src/console/clients/udp/checker.rs | 2 +- src/core/auth.rs | 5 +- src/core/databases/mod.rs | 11 +- src/core/databases/mysql.rs | 2 +- src/core/databases/sqlite.rs | 5 +- src/core/error.rs | 3 +- src/core/mod.rs | 169 ++++++----- src/core/peer_tests.rs | 43 +++ src/core/services/statistics/mod.rs | 9 +- src/core/services/torrent.rs | 77 +---- src/core/torrent/entry.rs | 287 ------------------ src/core/torrent/mod.rs | 88 ++---- src/core/torrent/repository/rw_lock_std.rs | 122 -------- .../repository/rw_lock_std_mutex_std.rs | 143 --------- .../repository/rw_lock_std_mutex_tokio.rs | 141 --------- src/core/torrent/repository/rw_lock_tokio.rs | 124 -------- .../repository/rw_lock_tokio_mutex_std.rs | 146 --------- .../repository/rw_lock_tokio_mutex_tokio.rs | 144 --------- .../apis/v1/context/stats/resources.rs | 3 +- .../apis/v1/context/torrent/handlers.rs | 5 +- .../apis/v1/context/torrent/resources/peer.rs | 46 ++- .../v1/context/torrent/resources/torrent.rs | 16 +- .../apis/v1/context/whitelist/handlers.rs | 2 +- src/servers/http/mod.rs | 12 +- src/servers/http/percent_encoding.rs | 23 +- .../http/v1/extractors/announce_request.rs | 5 +- .../http/v1/extractors/scrape_request.rs | 3 +- src/servers/http/v1/handlers/announce.rs | 30 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/requests/announce.rs | 17 +- src/servers/http/v1/requests/scrape.rs | 7 +- src/servers/http/v1/responses/announce.rs | 41 +-- src/servers/http/v1/responses/scrape.rs | 9 +- src/servers/http/v1/services/announce.rs | 21 +- src/servers/http/v1/services/scrape.rs | 13 +- src/servers/udp/handlers.rs | 30 +- src/servers/udp/logging.rs | 2 +- src/servers/udp/mod.rs | 14 +- src/servers/udp/peer_builder.rs | 20 +- src/servers/udp/request.rs | 3 +- src/shared/bit_torrent/common.rs | 21 -- src/shared/bit_torrent/info_hash.rs | 169 ++--------- .../tracker/http/client/requests/announce.rs | 8 +- .../tracker/http/client/requests/scrape.rs | 3 +- .../tracker/http/client/responses/announce.rs | 7 +- src/shared/clock/mod.rs | 4 +- src/shared/clock/time_extent.rs | 12 +- src/shared/clock/utils.rs | 10 - tests/servers/api/environment.rs | 6 +- .../servers/api/v1/contract/context/stats.rs | 4 +- .../api/v1/contract/context/torrent.rs | 4 +- .../api/v1/contract/context/whitelist.rs | 2 +- tests/servers/http/environment.rs | 6 +- tests/servers/http/requests/announce.rs | 8 +- tests/servers/http/requests/scrape.rs | 2 +- tests/servers/http/responses/announce.rs | 6 +- tests/servers/http/v1/contract.rs | 36 +-- tests/servers/udp/environment.rs | 6 +- 99 files changed, 2072 insertions(+), 1953 deletions(-) create mode 100644 packages/primitives/src/announce_event.rs create mode 100644 packages/primitives/src/info_hash.rs create mode 100644 packages/primitives/src/pagination.rs rename {src/core => packages/primitives/src}/peer.rs (83%) create mode 100644 packages/primitives/src/swarm_metadata.rs create mode 100644 packages/primitives/src/torrent_metrics.rs delete mode 100644 packages/torrent-repository-benchmarks/Cargo.toml delete mode 100644 packages/torrent-repository-benchmarks/README.md delete mode 100644 packages/torrent-repository-benchmarks/src/lib.rs create mode 100644 packages/torrent-repository/Cargo.toml create mode 100644 packages/torrent-repository/README.md rename packages/{torrent-repository-benchmarks/src => torrent-repository/benches/helpers}/args.rs (100%) rename packages/{torrent-repository-benchmarks/src/benches => torrent-repository/benches/helpers}/asyn.rs (81%) rename packages/{torrent-repository-benchmarks/src/benches => torrent-repository/benches/helpers}/mod.rs (75%) rename packages/{torrent-repository-benchmarks/src/benches => torrent-repository/benches/helpers}/sync.rs (81%) rename packages/{torrent-repository-benchmarks/src/benches => torrent-repository/benches/helpers}/utils.rs (89%) rename packages/{torrent-repository-benchmarks/src/main.rs => torrent-repository/benches/repository-benchmark.rs} (71%) create mode 100644 packages/torrent-repository/src/entry/mod.rs create mode 100644 packages/torrent-repository/src/entry/mutex_std.rs create mode 100644 packages/torrent-repository/src/entry/mutex_tokio.rs create mode 100644 packages/torrent-repository/src/entry/single.rs create mode 100644 packages/torrent-repository/src/lib.rs rename {src/core/torrent => packages/torrent-repository/src}/repository/mod.rs (57%) create mode 100644 packages/torrent-repository/src/repository/rw_lock_std.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs create mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs create mode 100644 src/core/peer_tests.rs delete mode 100644 src/core/torrent/entry.rs delete mode 100644 src/core/torrent/repository/rw_lock_std.rs delete mode 100644 src/core/torrent/repository/rw_lock_std_mutex_std.rs delete mode 100644 src/core/torrent/repository/rw_lock_std_mutex_tokio.rs delete mode 100644 src/core/torrent/repository/rw_lock_tokio.rs delete mode 100644 src/core/torrent/repository/rw_lock_tokio_mutex_std.rs delete mode 100644 src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs diff --git a/.vscode/settings.json b/.vscode/settings.json index 701e89ccf..caa48dd01 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -31,4 +31,5 @@ "evenBetterToml.formatter.trailingNewline": true, "evenBetterToml.formatter.reorderKeys": true, "evenBetterToml.formatter.reorderArrays": true, + } \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 26fb919af..8ec922448 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3449,17 +3449,6 @@ dependencies = [ "winnow 0.6.5", ] -[[package]] -name = "torrust-torrent-repository-benchmarks" -version = "3.0.0-alpha.12-develop" -dependencies = [ - "aquatic_udp_protocol", - "clap", - "futures", - "tokio", - "torrust-tracker", -] - [[package]] name = "torrust-tracker" version = "3.0.0-alpha.12-develop" @@ -3471,7 +3460,6 @@ dependencies = [ "axum-client-ip", "axum-extra", "axum-server", - "binascii", "chrono", "clap", "colored", @@ -3498,8 +3486,6 @@ dependencies = [ "serde_bytes", "serde_json", "serde_repr", - "tdyne-peer-id", - "tdyne-peer-id-registry", "thiserror", "tokio", "torrust-tracker-configuration", @@ -3507,6 +3493,7 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tower-http", "trace", "tracing", @@ -3549,8 +3536,12 @@ dependencies = [ name = "torrust-tracker-primitives" version = "3.0.0-alpha.12-develop" dependencies = [ + "binascii", "derive_more", "serde", + "tdyne-peer-id", + "tdyne-peer-id-registry", + "thiserror", ] [[package]] @@ -3562,6 +3553,18 @@ dependencies = [ "torrust-tracker-primitives", ] +[[package]] +name = "torrust-tracker-torrent-repository" +version = "3.0.0-alpha.12-develop" +dependencies = [ + "clap", + "futures", + "serde", + "tokio", + "torrust-tracker-configuration", + "torrust-tracker-primitives", +] + [[package]] name = "tower" version = "0.4.13" diff --git a/Cargo.toml b/Cargo.toml index e6f196583..9610fffc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,6 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } -binascii = "0" chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } colored = "2" @@ -62,14 +61,13 @@ serde_bencode = "0" serde_bytes = "0" serde_json = "1" serde_repr = "0" -tdyne-peer-id = "1" -tdyne-peer-id-registry = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12-develop", path = "packages/torrent-repository" } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" tracing = "0" @@ -91,7 +89,7 @@ members = [ "packages/located-error", "packages/primitives", "packages/test-helpers", - "packages/torrent-repository-benchmarks", + "packages/torrent-repository" ] [profile.dev] diff --git a/cSpell.json b/cSpell.json index da11cd29a..6d5f71b85 100644 --- a/cSpell.json +++ b/cSpell.json @@ -50,6 +50,7 @@ "filesd", "flamegraph", "Freebox", + "FrostegÃ¥rd", "gecos", "Grcov", "hasher", @@ -68,6 +69,7 @@ "Intermodal", "intervali", "kcachegrind", + "Joakim", "keyout", "lcov", "leecher", @@ -96,6 +98,7 @@ "oneshot", "ostr", "Pando", + "peekable", "proot", "proto", "Quickstart", @@ -109,6 +112,7 @@ "reqwest", "rerequests", "ringbuf", + "ringsize", "rngs", "rosegment", "routable", diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 4068c046f..b3b146717 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -243,6 +243,13 @@ use thiserror::Error; use torrust_tracker_located_error::{DynError, Located, LocatedError}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +pub struct TrackerPolicy { + pub remove_peerless_torrents: bool, + pub max_peer_timeout: u32, + pub persistent_torrent_completed_stat: bool, +} + /// Information required for loading config #[derive(Debug, Default, Clone)] pub struct Info { diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index efcce71a9..3b2406a69 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -16,4 +16,8 @@ version.workspace = true [dependencies] derive_more = "0" +thiserror = "1" +binascii = "0" serde = { version = "1", features = ["derive"] } +tdyne-peer-id = "1" +tdyne-peer-id-registry = "0" \ No newline at end of file diff --git a/packages/primitives/src/announce_event.rs b/packages/primitives/src/announce_event.rs new file mode 100644 index 000000000..16e47da99 --- /dev/null +++ b/packages/primitives/src/announce_event.rs @@ -0,0 +1,43 @@ +//! Copyright (c) 2020-2023 Joakim FrostegÃ¥rd and The Torrust Developers +//! +//! Distributed under Apache 2.0 license + +use serde::{Deserialize, Serialize}; + +/// Announce events. Described on the +/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Serialize, Deserialize)] +pub enum AnnounceEvent { + /// The peer has started downloading the torrent. + Started, + /// The peer has ceased downloading the torrent. + Stopped, + /// The peer has completed downloading the torrent. + Completed, + /// This is one of the announcements done at regular intervals. + None, +} + +impl AnnounceEvent { + #[inline] + #[must_use] + pub fn from_i32(i: i32) -> Self { + match i { + 1 => Self::Completed, + 2 => Self::Started, + 3 => Self::Stopped, + _ => Self::None, + } + } + + #[inline] + #[must_use] + pub fn to_i32(&self) -> i32 { + match self { + AnnounceEvent::None => 0, + AnnounceEvent::Completed => 1, + AnnounceEvent::Started => 2, + AnnounceEvent::Stopped => 3, + } + } +} diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs new file mode 100644 index 000000000..46ae6283e --- /dev/null +++ b/packages/primitives/src/info_hash.rs @@ -0,0 +1,165 @@ +use std::panic::Location; + +use thiserror::Error; + +/// `BitTorrent` Info Hash v1 +#[derive(PartialEq, Eq, Hash, Clone, Copy, Default, Debug)] +pub struct InfoHash(pub [u8; 20]); + +pub const INFO_HASH_BYTES_LEN: usize = 20; + +impl InfoHash { + /// Create a new `InfoHash` from a byte slice. + /// + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + /// Returns the `InfoHash` internal byte array. + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } + + /// Returns the `InfoHash` as a hex string. + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + Some(self.cmp(other)) + } +} + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +/// Errors that can occur when converting from a `Vec` to an `InfoHash`. +#[derive(Error, Debug)] +pub enum ConversionError { + /// Not enough bytes for infohash. An infohash is 20 bytes. + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + /// Too many bytes for infohash. An infohash is 20 bytes. + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a hexadecimal string", + )); + }; + Ok(res) + } +} diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index f6a14b9e8..664c0c82d 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -4,8 +4,43 @@ //! which is a `BitTorrent` tracker server. These structures are used not only //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. +use std::time::Duration; + +use info_hash::InfoHash; use serde::{Deserialize, Serialize}; +pub mod announce_event; +pub mod info_hash; +pub mod pagination; +pub mod peer; +pub mod swarm_metadata; +pub mod torrent_metrics; + +/// Duration since the Unix Epoch. +pub type DurationSinceUnixEpoch = Duration; + +/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] + ser.serialize_u64(unix_time_value.as_millis() as u64) +} + +/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 +#[derive(PartialEq, Eq, Debug)] +pub enum IPVersion { + /// + IPv4, + /// + IPv6, +} + +/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Serialize, Deserialize)] +pub struct NumberOfBytes(pub i64); + /// The database management system used by the tracker. /// /// Refer to: @@ -23,6 +58,8 @@ pub enum DatabaseDriver { MySQL, } +pub type PersistentTorrents = Vec<(InfoHash, u32)>; + /// The mode the tracker will run in. /// /// Refer to [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration) diff --git a/packages/primitives/src/pagination.rs b/packages/primitives/src/pagination.rs new file mode 100644 index 000000000..ab7dcfe2b --- /dev/null +++ b/packages/primitives/src/pagination.rs @@ -0,0 +1,50 @@ +use serde::Deserialize; + +/// A struct to keep information about the page when results are being paginated +#[derive(Deserialize, Copy, Clone, Debug, PartialEq)] +pub struct Pagination { + /// The page number, starting at 0 + pub offset: u32, + /// Page size. The number of results per page + pub limit: u32, +} + +impl Pagination { + #[must_use] + pub fn new(offset: u32, limit: u32) -> Self { + Self { offset, limit } + } + + #[must_use] + pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { + let offset = match offset_option { + Some(offset) => offset, + None => Pagination::default_offset(), + }; + let limit = match limit_option { + Some(offset) => offset, + None => Pagination::default_limit(), + }; + + Self { offset, limit } + } + + #[must_use] + pub fn default_offset() -> u32 { + 0 + } + + #[must_use] + pub fn default_limit() -> u32 { + 4000 + } +} + +impl Default for Pagination { + fn default() -> Self { + Self { + offset: Self::default_offset(), + limit: Self::default_limit(), + } + } +} diff --git a/src/core/peer.rs b/packages/primitives/src/peer.rs similarity index 83% rename from src/core/peer.rs rename to packages/primitives/src/peer.rs index eb2b7b759..5fb9e525f 100644 --- a/src/core/peer.rs +++ b/packages/primitives/src/peer.rs @@ -3,12 +3,12 @@ //! A sample peer: //! //! ```rust,no_run -//! use torrust_tracker::core::peer; +//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! //! //! peer::Peer { //! peer_id: peer::Id(*b"-qB00000000000000000"), @@ -20,38 +20,26 @@ //! event: AnnounceEvent::Started, //! }; //! ``` + use std::net::{IpAddr, SocketAddr}; -use std::panic::Location; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::Serialize; -use thiserror::Error; -use crate::shared::bit_torrent::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::shared::clock::utils::ser_unix_time_value; -use crate::shared::clock::DurationSinceUnixEpoch; - -/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 -#[derive(PartialEq, Eq, Debug)] -pub enum IPVersion { - /// - IPv4, - /// - IPv6, -} +use crate::announce_event::AnnounceEvent; +use crate::{ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfBytes}; /// Peer struct used by the core `Tracker`. /// /// A sample peer: /// /// ```rust,no_run -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::peer; /// use std::net::SocketAddr; /// use std::net::IpAddr; /// use std::net::Ipv4Addr; -/// use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -/// use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +/// use torrust_tracker_primitives::DurationSinceUnixEpoch; +/// /// /// peer::Peer { /// peer_id: peer::Id(*b"-qB00000000000000000"), @@ -73,16 +61,12 @@ pub struct Peer { #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, /// The total amount of bytes uploaded by this peer so far - #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, /// The total amount of bytes downloaded by this peer so far - #[serde(with = "NumberOfBytesDef")] pub downloaded: NumberOfBytes, /// The number of bytes this peer still has to download - #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, /// This is an optional key which maps to started, completed, or stopped (or empty, which is the same as not being present). - #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } @@ -162,22 +146,9 @@ impl Peer { } } -/// Peer ID. A 20-byte array. -/// -/// A string of length 20 which this downloader uses as its id. -/// Each downloader generates its own id at random at the start of a new download. -/// -/// A sample peer ID: -/// -/// ```rust,no_run -/// use torrust_tracker::core::peer; -/// -/// let peer_id = peer::Id(*b"-qB00000000000000000"); -/// ``` -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] -pub struct Id(pub [u8; 20]); +use std::panic::Location; -const PEER_ID_BYTES_LEN: usize = 20; +use thiserror::Error; /// Error returned when trying to convert an invalid peer id from another type. /// @@ -196,30 +167,6 @@ pub enum IdConversionError { }, } -impl Id { - /// # Panics - /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!( - PEER_ID_BYTES_LEN, - bytes.len(), - "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", - PEER_ID_BYTES_LEN, - bytes.len(), - ); - let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - #[must_use] - pub fn to_bytes(&self) -> [u8; 20] { - self.0 - } -} - impl From<[u8; 20]> for Id { fn from(bytes: [u8; 20]) -> Self { Id(bytes) @@ -263,7 +210,47 @@ impl std::fmt::Display for Id { } } +/// Peer ID. A 20-byte array. +/// +/// A string of length 20 which this downloader uses as its id. +/// Each downloader generates its own id at random at the start of a new download. +/// +/// A sample peer ID: +/// +/// ```rust,no_run +/// use torrust_tracker_primitives::peer; +/// +/// let peer_id = peer::Id(*b"-qB00000000000000000"); +/// ``` +/// +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] +pub struct Id(pub [u8; 20]); + +pub const PEER_ID_BYTES_LEN: usize = 20; + impl Id { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!( + PEER_ID_BYTES_LEN, + bytes.len(), + "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", + PEER_ID_BYTES_LEN, + bytes.len(), + ); + let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + #[must_use] + pub fn to_bytes(&self) -> [u8; 20] { + self.0 + } + #[must_use] /// Converts to hex string. /// @@ -329,12 +316,27 @@ impl Serialize for Id { } } +/// Marker Trait for Peer Vectors +pub trait Encoding: From + PartialEq {} + +impl FromIterator for Vec

{ + fn from_iter>(iter: T) -> Self { + let mut peers: Vec

= vec![]; + + for peer in iter { + peers.push(peer.into()); + } + + peers + } +} + pub mod fixture { use std::net::SocketAddr; - use aquatic_udp_protocol::NumberOfBytes; - use super::{Id, Peer}; + use crate::announce_event::AnnounceEvent; + use crate::{DurationSinceUnixEpoch, NumberOfBytes}; #[derive(PartialEq, Debug)] @@ -396,11 +398,11 @@ pub mod fixture { Self { peer_id: Id(*b"-qB00000000000000000"), peer_addr: std::net::SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: crate::shared::clock::DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), - event: aquatic_udp_protocol::AnnounceEvent::Started, + event: AnnounceEvent::Started, } } } @@ -409,7 +411,7 @@ pub mod fixture { #[cfg(test)] pub mod test { mod torrent_peer_id { - use crate::core::peer; + use crate::peer; #[test] fn should_be_instantiated_from_a_byte_slice() { @@ -518,50 +520,4 @@ pub mod test { assert_eq!(peer::Id(*b"-qB00000000000000000").to_bytes(), *b"-qB00000000000000000"); } } - - mod torrent_peer { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use serde_json::Value; - - use crate::core::peer::{self, Peer}; - use crate::shared::clock::{Current, Time}; - - #[test] - fn it_should_be_serializable() { - let torrent_peer = Peer { - peer_id: peer::Id(*b"-qB0000-000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - - let raw_json = serde_json::to_string(&torrent_peer).unwrap(); - - let expected_raw_json = r#" - { - "peer_id": { - "id": "0x2d7142303030302d303030303030303030303030", - "client": "qBittorrent" - }, - "peer_addr":"126.0.0.1:8080", - "updated":0, - "uploaded":0, - "downloaded":0, - "left":0, - "event":"Started" - } - "#; - - assert_eq!( - serde_json::from_str::(&raw_json).unwrap(), - serde_json::from_str::(expected_raw_json).unwrap() - ); - } - } } diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs new file mode 100644 index 000000000..ca880b54d --- /dev/null +++ b/packages/primitives/src/swarm_metadata.rs @@ -0,0 +1,22 @@ +use derive_more::Constructor; + +/// Swarm statistics for one torrent. +/// Swarm metadata dictionary in the scrape response. +/// +/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +pub struct SwarmMetadata { + /// (i.e `completed`): The number of peers that have ever completed downloading + pub downloaded: u32, // + /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) + pub complete: u32, //seeders + /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) + pub incomplete: u32, +} + +impl SwarmMetadata { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs new file mode 100644 index 000000000..c60507171 --- /dev/null +++ b/packages/primitives/src/torrent_metrics.rs @@ -0,0 +1,25 @@ +use std::ops::AddAssign; + +/// Structure that holds general `Tracker` torrents metrics. +/// +/// Metrics are aggregate values for all torrents. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + /// Total number of seeders for all torrents + pub seeders: u64, + /// Total number of peers that have ever completed downloading for all torrents. + pub completed: u64, + /// Total number of leechers for all torrents. + pub leechers: u64, + /// Total number of torrents. + pub torrents: u64, +} + +impl AddAssign for TorrentsMetrics { + fn add_assign(&mut self, rhs: Self) { + self.seeders += rhs.seeders; + self.completed += rhs.completed; + self.leechers += rhs.leechers; + self.torrents += rhs.torrents; + } +} diff --git a/packages/torrent-repository-benchmarks/Cargo.toml b/packages/torrent-repository-benchmarks/Cargo.toml deleted file mode 100644 index e8b22f52f..000000000 --- a/packages/torrent-repository-benchmarks/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -description = "A set of benchmarks for the torrent repository" -keywords = ["benchmarking", "library", "repository", "torrent"] -name = "torrust-torrent-repository-benchmarks" -readme = "README.md" - -authors.workspace = true -documentation.workspace = true -edition.workspace = true -homepage.workspace = true -license.workspace = true -publish.workspace = true -repository.workspace = true -rust-version.workspace = true -version.workspace = true - -[dependencies] -aquatic_udp_protocol = "0.8.0" -clap = { version = "4.4.8", features = ["derive"] } -futures = "0.3.29" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker = { path = "../../" } diff --git a/packages/torrent-repository-benchmarks/README.md b/packages/torrent-repository-benchmarks/README.md deleted file mode 100644 index 14183ea69..000000000 --- a/packages/torrent-repository-benchmarks/README.md +++ /dev/null @@ -1 +0,0 @@ -# Benchmarks of the torrent repository diff --git a/packages/torrent-repository-benchmarks/src/lib.rs b/packages/torrent-repository-benchmarks/src/lib.rs deleted file mode 100644 index 58ebc2057..000000000 --- a/packages/torrent-repository-benchmarks/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod args; -pub mod benches; diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml new file mode 100644 index 000000000..0df82a2c6 --- /dev/null +++ b/packages/torrent-repository/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "A library to provide error decorator with the location and the source of the original error." +keywords = ["torrents", "repository", "library"] +name = "torrust-tracker-torrent-repository" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +clap = { version = "4.4.8", features = ["derive"] } +futures = "0.3.29" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } +serde = { version = "1", features = ["derive"] } diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository/README.md new file mode 100644 index 000000000..98d7d922b --- /dev/null +++ b/packages/torrent-repository/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Configuration + +A library to provide torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-repository). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/torrent-repository-benchmarks/src/args.rs b/packages/torrent-repository/benches/helpers/args.rs similarity index 100% rename from packages/torrent-repository-benchmarks/src/args.rs rename to packages/torrent-repository/benches/helpers/args.rs diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs similarity index 81% rename from packages/torrent-repository-benchmarks/src/benches/asyn.rs rename to packages/torrent-repository/benches/helpers/asyn.rs index dffd31682..4fb37104f 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -1,16 +1,17 @@ +use std::sync::Arc; use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::RepositoryAsync; -use crate::args::Args; -use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; +use super::args::Args; +use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; -pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - V: UpdateTorrentAsync + Default, + V: RepositoryAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -34,15 +35,16 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -83,15 +85,16 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -127,15 +130,16 @@ where } // Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentAsync + Default + Clone + Send + Sync + 'static, + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/mod.rs b/packages/torrent-repository/benches/helpers/mod.rs similarity index 75% rename from packages/torrent-repository-benchmarks/src/benches/mod.rs rename to packages/torrent-repository/benches/helpers/mod.rs index 1026aa4bf..758c123bd 100644 --- a/packages/torrent-repository-benchmarks/src/benches/mod.rs +++ b/packages/torrent-repository/benches/helpers/mod.rs @@ -1,3 +1,4 @@ +pub mod args; pub mod asyn; pub mod sync; pub mod utils; diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs similarity index 81% rename from packages/torrent-repository-benchmarks/src/benches/sync.rs rename to packages/torrent-repository/benches/helpers/sync.rs index 04385bc55..aa2f8188a 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -1,18 +1,19 @@ +use std::sync::Arc; use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository::UpdateTorrentSync; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::Repository; -use crate::args::Args; -use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; +use super::args::Args; +use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; // Simply add one torrent #[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - V: UpdateTorrentSync + Default, + V: Repository + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -34,15 +35,16 @@ where } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hash: &'static InfoHash = &InfoHash([0; 20]); let handles = FuturesUnordered::new(); @@ -79,15 +81,16 @@ where } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); @@ -121,15 +124,16 @@ where } // Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - V: UpdateTorrentSync + Default + Clone + Send + Sync + 'static, + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); for _ in 0..samples { - let torrent_repository = V::default(); + let torrent_repository = Arc::::default(); let info_hashes = generate_unique_info_hashes(10_000); let handles = FuturesUnordered::new(); diff --git a/packages/torrent-repository-benchmarks/src/benches/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs similarity index 89% rename from packages/torrent-repository-benchmarks/src/benches/utils.rs rename to packages/torrent-repository/benches/helpers/utils.rs index ef1640038..aed9f40cf 100644 --- a/packages/torrent-repository-benchmarks/src/benches/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -2,10 +2,10 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::Duration; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use torrust_tracker::core::peer::{Id, Peer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::{Id, Peer}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; pub const DEFAULT_PEER: Peer = Peer { peer_id: Id([0; 20]), diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository/benches/repository-benchmark.rs similarity index 71% rename from packages/torrent-repository-benchmarks/src/main.rs rename to packages/torrent-repository/benches/repository-benchmark.rs index b935cea43..bff34b256 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository/benches/repository-benchmark.rs @@ -1,13 +1,14 @@ -use std::sync::Arc; +mod helpers; use clap::Parser; -use torrust_torrent_repository_benchmarks::args::Args; -use torrust_torrent_repository_benchmarks::benches::{asyn, sync}; -use torrust_tracker::core::torrent::{ +use torrust_tracker_torrent_repository::{ TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, }; +use crate::helpers::args::Args; +use crate::helpers::{asyn, sync}; + #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] fn main() { @@ -24,24 +25,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::>(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::>(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::>( - &rt, 10 - )) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); if let Some(true) = args.compare { @@ -51,22 +50,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::>(1_000_000) + sync::add_one_torrent::(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::>(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -75,26 +74,24 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::>(1_000_000) + sync::add_one_torrent::(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::>( - &rt, 10 - )) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::>( + rt.block_on(sync::add_multiple_torrents_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::>( + rt.block_on(sync::update_multiple_torrents_in_parallel::( &rt, 10 )) ); @@ -105,26 +102,28 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::>(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::>( + rt.block_on(asyn::update_one_torrent_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + rt.block_on(asyn::add_multiple_torrents_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::( + &rt, 10 + )) ); println!(); @@ -133,26 +132,28 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::>(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::>( + rt.block_on(asyn::update_one_torrent_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + rt.block_on(asyn::add_multiple_torrents_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::( + &rt, 10 + )) ); println!(); @@ -161,26 +162,26 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::>(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::>( + rt.block_on(asyn::update_one_torrent_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::>( + rt.block_on(asyn::add_multiple_torrents_in_parallel::( &rt, 10 )) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::>(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs new file mode 100644 index 000000000..04aa597df --- /dev/null +++ b/packages/torrent-repository/src/entry/mod.rs @@ -0,0 +1,98 @@ +use std::fmt::Debug; +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +pub mod mutex_std; +pub mod mutex_tokio; +pub mod single; + +pub trait Entry { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_stats(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> bool; + + /// Returns the number of Peers + fn get_peers_len(&self) -> usize; + + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; + + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; + + // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); + + /// It removes peer from the swarm that have not been updated for more than `current_cutoff` seconds + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntrySync { + fn get_stats(&self) -> SwarmMetadata; + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + fn peers_is_empty(&self) -> bool; + fn get_peers_len(&self) -> usize; + fn get_peers(&self, limit: Option) -> Vec>; + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntryAsync { + fn get_stats(self) -> impl std::future::Future + Send; + + #[allow(clippy::wrong_self_convention)] + fn is_not_zombie(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn peers_is_empty(self) -> impl std::future::Future + Send; + fn get_peers_len(self) -> impl std::future::Future + Send; + fn get_peers(self, limit: Option) -> impl std::future::Future>> + Send; + fn get_peers_for_peer( + self, + client: &peer::Peer, + limit: Option, + ) -> impl std::future::Future>> + Send; + fn insert_or_update_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; + fn insert_or_update_peer_and_get_stats( + self, + peer: &peer::Peer, + ) -> impl std::future::Future + std::marker::Send; + fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; +} + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Serialize, Deserialize, Clone, Debug, Default)] +pub struct Torrent { + /// The swarm: a network of peers that are all trying to download the torrent associated to this entry + #[serde(skip)] + pub(crate) peers: std::collections::BTreeMap>, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub(crate) completed: u32, +} diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs new file mode 100644 index 000000000..df6228317 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -0,0 +1,50 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::EntryMutexStd; + +impl EntrySync for EntryMutexStd { + fn get_stats(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_stats() + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").is_not_zombie(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().expect("it should get a lock").peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().expect("it should get a lock").get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_peer(client, limit) + } + + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").insert_or_update_peer(peer) + } + + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock() + .expect("it should lock the entry") + .insert_or_update_peer_and_get_stats(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(current_cutoff); + } +} diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs new file mode 100644 index 000000000..c4d13fb43 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -0,0 +1,46 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntryAsync}; +use crate::EntryMutexTokio; + +impl EntryAsync for EntryMutexTokio { + async fn get_stats(self) -> SwarmMetadata { + self.lock().await.get_stats() + } + + async fn is_not_zombie(self, policy: &TrackerPolicy) -> bool { + self.lock().await.is_not_zombie(policy) + } + + async fn peers_is_empty(self) -> bool { + self.lock().await.peers_is_empty() + } + + async fn get_peers_len(self) -> usize { + self.lock().await.get_peers_len() + } + + async fn get_peers(self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_peer(self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().await.get_peers_for_peer(client, limit) + } + + async fn insert_or_update_peer(self, peer: &peer::Peer) -> bool { + self.lock().await.insert_or_update_peer(peer) + } + + async fn insert_or_update_peer_and_get_stats(self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock().await.insert_or_update_peer_and_get_stats(peer) + } + + async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().await.remove_inactive_peers(current_cutoff); + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs new file mode 100644 index 000000000..7a5cf6240 --- /dev/null +++ b/packages/torrent-repository/src/entry/single.rs @@ -0,0 +1,105 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::peer::{self}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::Entry; +use crate::EntrySingle; + +impl Entry for EntrySingle { + #[allow(clippy::cast_possible_truncation)] + fn get_stats(&self) -> SwarmMetadata { + let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let incomplete: u32 = self.peers.len() as u32 - complete; + + SwarmMetadata { + downloaded: self.completed, + complete, + incomplete, + } + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.completed > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.peers.is_empty() { + return false; + } + + true + } + + fn peers_is_empty(&self) -> bool { + self.peers.is_empty() + } + + fn get_peers_len(&self) -> usize { + self.peers.len() + } + fn get_peers(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != peer::ReadInfo::get_address(client)) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != peer::ReadInfo::get_address(client)) + .cloned() + .collect(), + } + } + + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + let mut did_torrent_stats_change: bool = false; + + match peer::ReadInfo::get_event(peer) { + AnnounceEvent::Stopped => { + drop(self.peers.remove(&peer::ReadInfo::get_id(peer))); + } + AnnounceEvent::Completed => { + let peer_old = self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.completed += 1; + did_torrent_stats_change = true; + } + } + _ => { + drop(self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer))); + } + } + + did_torrent_stats_change + } + + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let changed = self.insert_or_update_peer(peer); + let stats = self.get_stats(); + (changed, stats) + } + + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.peers + .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs new file mode 100644 index 000000000..903e1405e --- /dev/null +++ b/packages/torrent-repository/src/lib.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; + +pub mod entry; +pub mod repository; + +pub type EntrySingle = entry::Torrent; +pub type EntryMutexStd = Arc>; +pub type EntryMutexTokio = Arc>; + +pub type TorrentsRwLockStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexStd = repository::RwLockStd; +pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; +pub type TorrentsRwLockTokio = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; diff --git a/src/core/torrent/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs similarity index 57% rename from src/core/torrent/repository/mod.rs rename to packages/torrent-repository/src/repository/mod.rs index 1c4ce8ae9..b46771163 100644 --- a/src/core/torrent/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -1,8 +1,9 @@ -use super::SwarmMetadata; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; pub mod rw_lock_std; pub mod rw_lock_std_mutex_std; @@ -12,20 +13,25 @@ pub mod rw_lock_tokio_mutex_std; pub mod rw_lock_tokio_mutex_tokio; pub trait Repository: Default + 'static { + fn get(&self, key: &InfoHash) -> Option; + fn get_metrics(&self) -> TorrentsMetrics; + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn remove(&self, key: &InfoHash) -> Option; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); + fn remove_peerless_torrents(&self, policy: &TrackerPolicy); + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); +} + +#[allow(clippy::module_name_repetitions)] +pub trait RepositoryAsync: Default + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; - fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; -} - -pub trait UpdateTorrentSync { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); -} - -pub trait UpdateTorrentAsync { fn update_torrent_with_peer_and_get_stats( &self, info_hash: &InfoHash, diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs new file mode 100644 index 000000000..bacef623d --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -0,0 +1,112 @@ +use std::collections::BTreeMap; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockStd}; + +impl TorrentsRwLockStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl Repository for TorrentsRwLockStd +where + EntrySingle: Entry, +{ + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut(); + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut(); + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs new file mode 100644 index 000000000..9fca82ba8 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -0,0 +1,123 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; + +impl TorrentsRwLockStdMutexStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Repository for TorrentsRwLockStdMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.lock().expect("it should get a lock").get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents(); + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs new file mode 100644 index 000000000..b9fb54469 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -0,0 +1,131 @@ +use std::collections::BTreeMap; +use std::pin::Pin; +use std::sync::Arc; + +use futures::future::join_all; +use futures::{Future, FutureExt}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; + +impl TorrentsRwLockStdMutexTokio { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl RepositoryAsync for TorrentsRwLockStdMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + let entries: Vec<_> = self.get_torrents().values().cloned().collect(); + + for entry in entries { + let stats = entry.lock().await.get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let handles: Vec + Send>>>; + { + let db = self.get_torrents(); + handles = db + .values() + .cloned() + .map(|e| e.remove_inactive_peers(current_cutoff).boxed()) + .collect(); + } + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs new file mode 100644 index 000000000..d0b7ec751 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -0,0 +1,113 @@ +use std::collections::BTreeMap; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockTokio}; + +impl TorrentsRwLockTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokio +where + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.get_torrents_mut().await; + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.insert_or_update_peer_and_get_stats(peer) + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut().await; + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs new file mode 100644 index 000000000..f800d2001 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -0,0 +1,124 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; + +impl TorrentsRwLockTokioMutexStd { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer) + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_stats(); + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs new file mode 100644 index 000000000..7ce2cc74c --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -0,0 +1,124 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; + +impl TorrentsRwLockTokioMutexTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.insert_or_update_peer_and_get_stats(peer).await + } + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values().cloned() { + let stats = entry.get_stats().await; + metrics.seeders += u64::from(stats.complete); + metrics.completed += u64::from(stats.downloaded); + metrics.leechers += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff).await; + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 6647e0249..300813430 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -44,7 +44,7 @@ pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHa if let Some(tracker) = weak_tracker.upgrade() { let start_time = Utc::now().time(); info!("Cleaning up torrents.."); - tracker.cleanup_torrents().await; + tracker.cleanup_torrents(); info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index df1e9bc9a..501696df4 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -3,12 +3,12 @@ use std::str::FromStr; use colored::Colorize; use log::debug; use reqwest::Url as ServiceUrl; +use torrust_tracker_primitives::info_hash::InfoHash; use url::Url; use crate::console::clients::checker::console::Console; use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 890375b75..47a2a1a00 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -4,12 +4,12 @@ use aquatic_udp_protocol::{Port, TransactionId}; use colored::Colorize; use hex_literal::hex; use log::debug; +use torrust_tracker_primitives::info_hash::InfoHash; use crate::console::clients::checker::console::Console; use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; use crate::console::clients::udp::checker; -use crate::shared::bit_torrent::info_hash::InfoHash; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs index 80db07231..511fb6628 100644 --- a/src/console/clients/http/app.rs +++ b/src/console/clients/http/app.rs @@ -18,8 +18,8 @@ use std::str::FromStr; use anyhow::Context; use clap::{Parser, Subcommand}; use reqwest::Url; +use torrust_tracker_primitives::info_hash::InfoHash; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index b9e31155d..540a25f30 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -64,11 +64,11 @@ use aquatic_udp_protocol::Response::{self, AnnounceIpv4, AnnounceIpv6, Scrape}; use aquatic_udp_protocol::{Port, TransactionId}; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use url::Url; use crate::console::clients::udp::checker; use crate::console::clients::udp::responses::{AnnounceResponseDto, ScrapeResponseDto}; -use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index b35139e49..12b8d764c 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -8,8 +8,8 @@ use aquatic_udp_protocol::{ }; use log::debug; use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; -use crate::shared::bit_torrent::info_hash::InfoHash as TorrustInfoHash; use crate::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; #[derive(Error, Debug)] diff --git a/src/core/auth.rs b/src/core/auth.rs index 9fc9d6e7b..a7bb91aa4 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -13,7 +13,7 @@ //! //! ```rust,no_run //! use torrust_tracker::core::auth::Key; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! //! pub struct ExpiringKey { //! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` @@ -48,9 +48,10 @@ use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_located_error::{DynError, LocatedError}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, Time, TimeNow}; #[must_use] /// It generates a new random 32-char authentication [`ExpiringKey`] diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index b3dcdd48e..b708ef4dc 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -22,7 +22,7 @@ //! ---|---|--- //! `id` | 1 | Autoincrement id //! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 -//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](crate::core::torrent::Entry) for more information. +//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](torrust_tracker_torrent_repository::entry::Entry) for more information. //! //! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be //! regenerated again after some minutes. @@ -51,12 +51,11 @@ pub mod sqlite; use std::marker::PhantomData; use async_trait::async_trait; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; use crate::core::auth::{self, Key}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub type PersistentTorrents = Vec<(InfoHash, u32)>; struct Builder where @@ -118,9 +117,9 @@ pub trait Database: Sync + Send { /// /// It returns an array of tuples with the torrent /// [`InfoHash`] and the - /// [`completed`](crate::core::torrent::Entry::completed) counter + /// [`completed`](torrust_tracker_torrent_repository::entry::Entry::completed) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](crate::core::torrent::Entry::completed). + /// See [`Entry::completed`](torrust_tracker_torrent_repository::entry::Entry::completed). /// /// # Context: Torrent Metrics /// diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index c46300829..e37cdd9bf 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -8,12 +8,12 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; +use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::DatabaseDriver; use super::{Database, Error}; use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::bit_torrent::info_hash::InfoHash; const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index bf2d6b8b9..5a3ac144a 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -5,12 +5,11 @@ use std::str::FromStr; use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::{DatabaseDriver, DurationSinceUnixEpoch}; use super::{Database, Error}; use crate::core::auth::{self, Key}; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::shared::clock::DurationSinceUnixEpoch; const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; diff --git a/src/core/error.rs b/src/core/error.rs index f1e622673..a826de349 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -9,6 +9,7 @@ use std::panic::Location; use torrust_tracker_located_error::LocatedError; +use torrust_tracker_primitives::info_hash::InfoHash; /// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] @@ -25,7 +26,7 @@ pub enum Error { // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { - info_hash: crate::shared::bit_torrent::info_hash::InfoHash, + info_hash: InfoHash, location: &'static Location<'static>, }, } diff --git a/src/core/mod.rs b/src/core/mod.rs index 15d7b9c39..f94c46543 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -52,13 +52,13 @@ //! The tracker responds to the peer with the list of other peers in the swarm so that //! the peer can contact them to start downloading pieces of the file from them. //! -//! Once you have instantiated the `Tracker` you can `announce` a new [`Peer`] with: +//! Once you have instantiated the `Tracker` you can `announce` a new [`peer::Peer`] with: //! //! ```rust,no_run -//! use torrust_tracker::core::peer; -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; +//! use torrust_tracker_primitives::announce_event::AnnounceEvent; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; @@ -97,11 +97,11 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::core::peer::Peer; +//! use torrust_tracker_primitives::peer; //! use torrust_tracker_configuration::AnnouncePolicy; //! //! pub struct AnnounceData { -//! pub peers: Vec, +//! pub peers: Vec, //! pub swarm_stats: SwarmMetadata, //! pub policy: AnnouncePolicy, // the tracker announce policy. //! } @@ -136,7 +136,7 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker_primitives::info_hash::InfoHash; //! use std::collections::HashMap; //! //! pub struct ScrapeData { @@ -165,7 +165,7 @@ //! There are two data structures for infohashes: byte arrays and hex strings: //! //! ```rust,no_run -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker_primitives::info_hash::InfoHash; //! use std::str::FromStr; //! //! let info_hash: InfoHash = [255u8; 20].into(); @@ -246,14 +246,14 @@ //! A `Peer` is the struct used by the `Tracker` to keep peers data: //! //! ```rust,no_run -//! use torrust_tracker::core::peer::Id; +//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! use aquatic_udp_protocol::NumberOfBytes; //! use aquatic_udp_protocol::AnnounceEvent; //! //! pub struct Peer { -//! pub peer_id: Id, // The peer ID +//! pub peer_id: peer::Id, // The peer ID //! pub peer_addr: SocketAddr, // Peer socket address //! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated //! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far @@ -429,11 +429,12 @@ pub mod auth; pub mod databases; pub mod error; -pub mod peer; pub mod services; pub mod statistics; pub mod torrent; +pub mod peer_tests; + use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; @@ -443,18 +444,19 @@ use std::time::Duration; use derive_more::Constructor; use log::debug; use tokio::sync::mpsc::error::SendError; -use torrust_tracker_configuration::{AnnouncePolicy, Configuration}; -use torrust_tracker_primitives::TrackerMode; +use torrust_tracker_configuration::{AnnouncePolicy, Configuration, TrackerPolicy}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, TrackerMode}; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; use self::auth::Key; use self::error::Error; -use self::peer::Peer; -use self::torrent::entry::{ReadInfo, ReadPeers}; -use self::torrent::repository::{Repository, UpdateTorrentSync}; use self::torrent::Torrents; use crate::core::databases::Database; -use crate::core::torrent::SwarmMetadata; -use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::clock::{self, TimeNow}; /// The maximum number of returned peers for a torrent. pub const TORRENT_PEERS_LIMIT: usize = 74; @@ -484,33 +486,12 @@ pub struct Tracker { on_reverse_proxy: bool, } -/// Structure that holds general `Tracker` torrents metrics. -/// -/// Metrics are aggregate values for all torrents. -#[derive(Copy, Clone, Debug, PartialEq, Default)] -pub struct TorrentsMetrics { - /// Total number of seeders for all torrents - pub seeders: u64, - /// Total number of peers that have ever completed downloading for all torrents. - pub completed: u64, - /// Total number of leechers for all torrents. - pub leechers: u64, - /// Total number of torrents. - pub torrents: u64, -} - -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] -pub struct TrackerPolicy { - pub remove_peerless_torrents: bool, - pub max_peer_timeout: u32, - pub persistent_torrent_completed_stat: bool, -} /// Structure that holds the data returned by the `announce` request. #[derive(Clone, Debug, PartialEq, Constructor, Default)] pub struct AnnounceData { /// The list of peers that are downloading the same torrent. /// It excludes the peer that made the request. - pub peers: Vec>, + pub peers: Vec>, /// Swarm statistics pub stats: SwarmMetadata, pub policy: AnnouncePolicy, @@ -627,7 +608,7 @@ impl Tracker { /// # Context: Tracker /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). - pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { + pub async fn announce(&self, info_hash: &InfoHash, peer: &mut peer::Peer, remote_client_ip: &IpAddr) -> AnnounceData { // code-review: maybe instead of mutating the peer we could just return // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. @@ -650,7 +631,7 @@ impl Tracker { // we should update the torrent and get the stats before we get the peer list. let stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - let peers = self.get_torrent_peers_for_peer(info_hash, peer).await; + let peers = self.get_torrent_peers_for_peer(info_hash, peer); AnnounceData { peers, @@ -669,7 +650,7 @@ impl Tracker { for info_hash in info_hashes { let swarm_metadata = match self.authorize(info_hash).await { - Ok(()) => self.get_swarm_metadata(info_hash).await, + Ok(()) => self.get_swarm_metadata(info_hash), Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); @@ -679,8 +660,8 @@ impl Tracker { } /// It returns the data for a `scrape` response. - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - match self.torrents.get(info_hash).await { + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + match self.torrents.get(info_hash) { Some(torrent_entry) => torrent_entry.get_stats(), None => SwarmMetadata::default(), } @@ -697,13 +678,13 @@ impl Tracker { pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; - self.torrents.import_persistent(&persistent_torrents).await; + self.torrents.import_persistent(&persistent_torrents); Ok(()) } - async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec> { - match self.torrents.get(info_hash).await { + fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { + match self.torrents.get(info_hash) { None => vec![], Some(entry) => entry.get_peers_for_peer(peer, Some(TORRENT_PEERS_LIMIT)), } @@ -712,8 +693,8 @@ impl Tracker { /// # Context: Tracker /// /// Get all torrent peers for a given torrent - pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { - match self.torrents.get(info_hash).await { + pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash) { None => vec![], Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), } @@ -724,11 +705,7 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> torrent::SwarmMetadata { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` @@ -751,19 +728,21 @@ impl Tracker { /// /// # Panics /// Panics if unable to get the torrent metrics. - pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - self.torrents.get_metrics().await + pub fn get_torrents_metrics(&self) -> TorrentsMetrics { + self.torrents.get_metrics() } /// Remove inactive peers and (optionally) peerless torrents /// /// # Context: Tracker - pub async fn cleanup_torrents(&self) { + pub fn cleanup_torrents(&self) { // If we don't need to remove torrents we will use the faster iter if self.policy.remove_peerless_torrents { - self.torrents.remove_peerless_torrents(&self.policy).await; + self.torrents.remove_peerless_torrents(&self.policy); } else { - self.torrents.remove_inactive_peers(self.policy.max_peer_timeout).await; + let current_cutoff = + clock::Current::sub(&Duration::from_secs(u64::from(self.policy.max_peer_timeout))).unwrap_or_default(); + self.torrents.remove_inactive_peers(current_cutoff); } } @@ -1017,14 +996,15 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::peer::{self, Peer}; use crate::core::services::tracker_factory; use crate::core::{TorrentsMetrics, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -1132,7 +1112,7 @@ mod tests { async fn should_collect_torrent_metrics() { let tracker = public_tracker(); - let torrents_metrics = tracker.get_torrents_metrics().await; + let torrents_metrics = tracker.get_torrents_metrics(); assert_eq!( torrents_metrics, @@ -1154,7 +1134,7 @@ mod tests { tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_torrent_peers(&info_hash).await; + let peers = tracker.get_torrent_peers(&info_hash); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -1168,7 +1148,7 @@ mod tests { tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_torrent_peers_for_peer(&info_hash, &peer).await; + let peers = tracker.get_torrent_peers_for_peer(&info_hash, &peer); assert_eq!(peers, vec![]); } @@ -1181,7 +1161,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&sample_info_hash(), &leecher()) .await; - let torrent_metrics = tracker.get_torrents_metrics().await; + let torrent_metrics = tracker.get_torrents_metrics(); assert_eq!( torrent_metrics, @@ -1194,6 +1174,34 @@ mod tests { ); } + #[tokio::test] + async fn it_should_get_many_the_torrent_metrics() { + let tracker = public_tracker(); + + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + tracker + .update_torrent_with_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()) + .await; + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let torrent_metrics = tracker.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (torrent_metrics), + (TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 1_000_000, + torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + mod for_all_config_modes { mod handling_an_announce_request { @@ -1376,9 +1384,10 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; + use torrust_tracker_primitives::info_hash::InfoHash; + use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; use crate::core::{ScrapeData, SwarmMetadata}; - use crate::shared::bit_torrent::info_hash::InfoHash; #[tokio::test] async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( @@ -1533,12 +1542,13 @@ mod tests { mod handling_an_scrape_request { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::core::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; - use crate::core::torrent::SwarmMetadata; use crate::core::ScrapeData; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { @@ -1677,11 +1687,12 @@ mod tests { } mod handling_torrent_persistence { - use aquatic_udp_protocol::AnnounceEvent; + + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_torrent_repository::entry::EntrySync; + use torrust_tracker_torrent_repository::repository::Repository; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; - use crate::core::torrent::entry::ReadInfo; - use crate::core::torrent::repository::Repository; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { @@ -1700,15 +1711,11 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - tracker.torrents.remove(&info_hash).await; + tracker.torrents.remove(&info_hash); tracker.load_torrents_from_database().await.unwrap(); - let torrent_entry = tracker - .torrents - .get(&info_hash) - .await - .expect("it should be able to get entry"); + let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry"); // It persists the number of completed peers. assert_eq!(torrent_entry.get_stats().downloaded, 1); diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs new file mode 100644 index 000000000..9e5b4be01 --- /dev/null +++ b/src/core/peer_tests.rs @@ -0,0 +1,43 @@ +#![cfg(test)] + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, NumberOfBytes}; + +use crate::shared::clock::{self, Time}; + +#[test] +fn it_should_be_serializable() { + let torrent_peer = peer::Peer { + peer_id: peer::Id(*b"-qB0000-000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: clock::Current::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + + let raw_json = serde_json::to_string(&torrent_peer).unwrap(); + + let expected_raw_json = r#" + { + "peer_id": { + "id": "0x2d7142303030302d303030303030303030303030", + "client": "qBittorrent" + }, + "peer_addr":"126.0.0.1:8080", + "updated":0, + "uploaded":0, + "downloaded":0, + "left":0, + "event":"Started" + } + "#; + + assert_eq!( + serde_json::from_str::(&raw_json).unwrap(), + serde_json::from_str::(expected_raw_json).unwrap() + ); +} diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 3578c53aa..ee1c0c4fa 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -40,8 +40,10 @@ pub mod setup; use std::sync::Arc; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use crate::core::statistics::Metrics; -use crate::core::{TorrentsMetrics, Tracker}; +use crate::core::Tracker; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -59,7 +61,7 @@ pub struct TrackerMetrics { /// It returns all the [`TrackerMetrics`] pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { - let torrents_metrics = tracker.get_torrents_metrics().await; + let torrents_metrics = tracker.get_torrents_metrics(); let stats = tracker.get_stats().await; TrackerMetrics { @@ -86,6 +88,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; use crate::core; @@ -105,7 +108,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: core::TorrentsMetrics::default(), + torrents_metrics: TorrentsMetrics::default(), protocol_metrics: core::statistics::Metrics::default(), } ); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 78dab12c4..ce44af3a8 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -6,13 +6,13 @@ //! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. use std::sync::Arc; -use serde::Deserialize; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; -use crate::core::peer::Peer; -use crate::core::torrent::entry::{ReadInfo, ReadPeers}; -use crate::core::torrent::repository::Repository; use crate::core::Tracker; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It contains all the information the tracker has about a torrent #[derive(Debug, PartialEq)] @@ -26,7 +26,7 @@ pub struct Info { /// The total number of leechers for this torrent. Peers that actively downloading this torrent pub leechers: u64, /// The swarm: the list of peers that are actively trying to download or serving this torrent - pub peers: Option>, + pub peers: Option>, } /// It contains only part of the information the tracker has about a torrent @@ -44,58 +44,9 @@ pub struct BasicInfo { pub leechers: u64, } -/// A struct to keep information about the page when results are being paginated -#[derive(Deserialize)] -pub struct Pagination { - /// The page number, starting at 0 - pub offset: u32, - /// Page size. The number of results per page - pub limit: u32, -} - -impl Pagination { - #[must_use] - pub fn new(offset: u32, limit: u32) -> Self { - Self { offset, limit } - } - - #[must_use] - pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { - let offset = match offset_option { - Some(offset) => offset, - None => Pagination::default_offset(), - }; - let limit = match limit_option { - Some(offset) => offset, - None => Pagination::default_limit(), - }; - - Self { offset, limit } - } - - #[must_use] - pub fn default_offset() -> u32 { - 0 - } - - #[must_use] - pub fn default_limit() -> u32 { - 4000 - } -} - -impl Default for Pagination { - fn default() -> Self { - Self { - offset: Self::default_offset(), - limit: Self::default_limit(), - } - } -} - /// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let torrent_entry_option = tracker.torrents.get(info_hash).await; + let torrent_entry_option = tracker.torrents.get(info_hash); let torrent_entry = torrent_entry_option?; @@ -118,7 +69,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { let mut basic_infos: Vec = vec![]; - for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination).await { + for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination) { let stats = torrent_entry.get_stats(); basic_infos.push(BasicInfo { @@ -137,7 +88,7 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = tracker.torrents.get(info_hash).await.map(|t| t.get_stats()) { + if let Some(stats) = tracker.torrents.get(info_hash).map(|t| t.get_stats()) { basic_infos.push(BasicInfo { info_hash: *info_hash, seeders: u64::from(stats.complete), @@ -154,10 +105,8 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - - use crate::core::peer; - use crate::shared::clock::DurationSinceUnixEpoch; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; fn sample_peer() -> peer::Peer { peer::Peer { @@ -177,12 +126,12 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrent_info, Info}; use crate::core::services::tracker_factory; - use crate::shared::bit_torrent::info_hash::InfoHash; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -232,12 +181,12 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; use crate::core::services::tracker_factory; - use crate::shared::bit_torrent::info_hash::InfoHash; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() diff --git a/src/core/torrent/entry.rs b/src/core/torrent/entry.rs deleted file mode 100644 index 815abd4fb..000000000 --- a/src/core/torrent/entry.rs +++ /dev/null @@ -1,287 +0,0 @@ -use std::fmt::Debug; -use std::sync::Arc; -use std::time::Duration; - -use aquatic_udp_protocol::AnnounceEvent; -use serde::{Deserialize, Serialize}; - -use super::SwarmMetadata; -use crate::core::peer::{self, ReadInfo as _}; -use crate::core::TrackerPolicy; -use crate::shared::clock::{Current, TimeNow}; - -/// A data structure containing all the information about a torrent in the tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug, Default)] -pub struct Entry { - /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] - pub(crate) peers: std::collections::BTreeMap>, - /// The number of peers that have ever completed downloading the torrent associated to this entry - pub(crate) completed: u32, -} -pub type Single = Entry; -pub type MutexStd = Arc>; -pub type MutexTokio = Arc>; - -pub trait ReadInfo { - /// It returns the swarm metadata (statistics) as a struct: - /// - /// `(seeders, completed, leechers)` - fn get_stats(&self) -> SwarmMetadata; - - /// Returns True if Still a Valid Entry according to the Tracker Policy - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; - - /// Returns True if the Peers is Empty - fn peers_is_empty(&self) -> bool; -} - -/// Same as [`ReadInfo`], but async. -pub trait ReadInfoAsync { - /// It returns the swarm metadata (statistics) as a struct: - /// - /// `(seeders, completed, leechers)` - fn get_stats(&self) -> impl std::future::Future + Send; - - /// Returns True if Still a Valid Entry according to the Tracker Policy - fn is_not_zombie(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - - /// Returns True if the Peers is Empty - fn peers_is_empty(&self) -> impl std::future::Future + Send; -} - -pub trait ReadPeers { - /// Get all swarm peers, optionally limiting the result. - fn get_peers(&self, limit: Option) -> Vec>; - - /// It returns the list of peers for a given peer client, optionally limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; -} - -/// Same as [`ReadPeers`], but async. -pub trait ReadPeersAsync { - fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; - - fn get_peers_for_peer( - &self, - client: &peer::Peer, - limit: Option, - ) -> impl std::future::Future>> + Send; -} - -pub trait Update { - /// It updates a peer and returns true if the number of complete downloads have increased. - /// - /// The number of peers that have complete downloading is synchronously updated when peers are updated. - /// That's the total torrent downloads counter. - fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; - - // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. - fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); - - /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds - fn remove_inactive_peers(&mut self, max_peer_timeout: u32); -} - -/// Same as [`Update`], except not `mut`. -pub trait UpdateSync { - fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; - fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); - fn remove_inactive_peers(&self, max_peer_timeout: u32); -} - -/// Same as [`Update`], except not `mut` and async. -pub trait UpdateAsync { - fn insert_or_update_peer(&self, peer: &peer::Peer) -> impl std::future::Future + Send; - - fn insert_or_update_peer_and_get_stats( - &self, - peer: &peer::Peer, - ) -> impl std::future::Future + std::marker::Send; - - fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; -} - -impl ReadInfo for Single { - #[allow(clippy::cast_possible_truncation)] - fn get_stats(&self) -> SwarmMetadata { - let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; - let incomplete: u32 = self.peers.len() as u32 - complete; - - SwarmMetadata { - downloaded: self.completed, - complete, - incomplete, - } - } - - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - if policy.persistent_torrent_completed_stat && self.completed > 0 { - return true; - } - - if policy.remove_peerless_torrents && self.peers.is_empty() { - return false; - } - - true - } - - fn peers_is_empty(&self) -> bool { - self.peers.is_empty() - } -} - -impl ReadInfo for MutexStd { - fn get_stats(&self) -> SwarmMetadata { - self.lock().expect("it should get a lock").get_stats() - } - - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - self.lock().expect("it should get a lock").is_not_zombie(policy) - } - - fn peers_is_empty(&self) -> bool { - self.lock().expect("it should get a lock").peers_is_empty() - } -} - -impl ReadInfoAsync for MutexTokio { - async fn get_stats(&self) -> SwarmMetadata { - self.lock().await.get_stats() - } - - async fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - self.lock().await.is_not_zombie(policy) - } - - async fn peers_is_empty(&self) -> bool { - self.lock().await.peers_is_empty() - } -} - -impl ReadPeers for Single { - fn get_peers(&self, limit: Option) -> Vec> { - match limit { - Some(limit) => self.peers.values().take(limit).cloned().collect(), - None => self.peers.values().cloned().collect(), - } - } - - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { - match limit { - Some(limit) => self - .peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.get_address() != client.get_address()) - // Limit the number of peers on the result - .take(limit) - .cloned() - .collect(), - None => self - .peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.get_address() != client.get_address()) - .cloned() - .collect(), - } - } -} - -impl ReadPeers for MutexStd { - fn get_peers(&self, limit: Option) -> Vec> { - self.lock().expect("it should get lock").get_peers(limit) - } - - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { - self.lock().expect("it should get lock").get_peers_for_peer(client, limit) - } -} - -impl ReadPeersAsync for MutexTokio { - async fn get_peers(&self, limit: Option) -> Vec> { - self.lock().await.get_peers(limit) - } - - async fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { - self.lock().await.get_peers_for_peer(client, limit) - } -} - -impl Update for Single { - fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; - - match peer.get_event() { - AnnounceEvent::Stopped => { - drop(self.peers.remove(&peer.get_id())); - } - AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.get_id(), Arc::new(*peer)); - // Don't count if peer was not previously known and not already completed. - if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.completed += 1; - did_torrent_stats_change = true; - } - } - _ => { - drop(self.peers.insert(peer.get_id(), Arc::new(*peer))); - } - } - - did_torrent_stats_change - } - - fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let changed = self.insert_or_update_peer(peer); - let stats = self.get_stats(); - (changed, stats) - } - - fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); - self.peers.retain(|_, peer| peer.get_updated() > current_cutoff); - } -} - -impl UpdateSync for MutexStd { - fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { - self.lock().expect("it should lock the entry").insert_or_update_peer(peer) - } - - fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.lock() - .expect("it should lock the entry") - .insert_or_update_peer_and_get_stats(peer) - } - - fn remove_inactive_peers(&self, max_peer_timeout: u32) { - self.lock() - .expect("it should lock the entry") - .remove_inactive_peers(max_peer_timeout); - } -} - -impl UpdateAsync for MutexTokio { - async fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { - self.lock().await.insert_or_update_peer(peer) - } - - async fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.lock().await.insert_or_update_peer_and_get_stats(peer) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - self.lock().await.remove_inactive_peers(max_peer_timeout); - } -} diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index bfe068337..b5a2b4c07 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -2,8 +2,8 @@ //! //! There are to main data structures: //! -//! - A torrent [`Entry`]: it contains all the information stored by the tracker for one torrent. -//! - The [`SwarmMetadata`]: it contains aggregate information that can me derived from the torrent entries. +//! - A torrent [`Entry`](torrust_tracker_torrent_repository::entry::Entry): it contains all the information stored by the tracker for one torrent. +//! - The [`SwarmMetadata`](torrust_tracker_primitives::swarm_metadata::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. //! //! A "swarm" is a network of peers that are trying to download the same torrent. //! @@ -25,42 +25,11 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! -//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmMetadata`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -pub mod entry; -pub mod repository; -use derive_more::Constructor; +use torrust_tracker_torrent_repository::TorrentsRwLockStdMutexStd; pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used -pub type TorrentsRwLockStd = repository::RwLockStd; -pub type TorrentsRwLockStdMutexStd = repository::RwLockStd; -pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; -pub type TorrentsRwLockTokio = repository::RwLockTokio; -pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; -pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; - -/// Swarm statistics for one torrent. -/// Swarm metadata dictionary in the scrape response. -/// -/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] -pub struct SwarmMetadata { - /// (i.e `completed`): The number of peers that have ever completed downloading - pub downloaded: u32, // - /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) - pub complete: u32, //seeders - /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) - pub incomplete: u32, -} - -impl SwarmMetadata { - #[must_use] - pub fn zeroed() -> Self { - Self::default() - } -} - #[cfg(test)] mod tests { @@ -71,11 +40,13 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + use torrust_tracker_torrent_repository::entry::Entry; + use torrust_tracker_torrent_repository::EntrySingle; - use crate::core::torrent::entry::{self, ReadInfo, ReadPeers, Update}; - use crate::core::{peer, TORRENT_PEERS_LIMIT}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; + use crate::core::TORRENT_PEERS_LIMIT; + use crate::shared::clock::{self, StoppedTime, Time, TimeNow}; struct TorrentPeerBuilder { peer: peer::Peer, @@ -86,7 +57,7 @@ mod tests { let default_peer = peer::Peer { peer_id: peer::Id([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: Current::now(), + updated: clock::Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), @@ -145,14 +116,14 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = entry::Single::default(); + let torrent_entry = EntrySingle::default(); assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -163,7 +134,7 @@ mod tests { #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -173,7 +144,7 @@ mod tests { #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -185,7 +156,7 @@ mod tests { #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -197,7 +168,7 @@ mod tests { #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -211,7 +182,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -223,7 +194,7 @@ mod tests { #[test] fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer @@ -236,7 +207,7 @@ mod tests { #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); @@ -270,7 +241,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -287,7 +258,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_seeder = a_torrent_seeder(); torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder @@ -297,7 +268,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_leecher = a_torrent_leecher(); torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher @@ -308,7 +279,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -323,7 +294,7 @@ mod tests { #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. @@ -337,12 +308,12 @@ mod tests { #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = entry::Single::default(); + let mut torrent_entry = EntrySingle::default(); let timeout = 120u32; - let now = Working::now(); - Stopped::local_set(&now); + let now = clock::Working::now(); + clock::Stopped::local_set(&now); let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); let inactive_peer = TorrentPeerBuilder::default() @@ -350,9 +321,10 @@ mod tests { .into(); torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer - torrent_entry.remove_inactive_peers(timeout); + let current_cutoff = clock::Current::sub(&Duration::from_secs(u64::from(timeout))).unwrap_or_default(); + torrent_entry.remove_inactive_peers(current_cutoff); - assert_eq!(torrent_entry.peers.len(), 0); + assert_eq!(torrent_entry.get_peers_len(), 0); } } } diff --git a/src/core/torrent/repository/rw_lock_std.rs b/src/core/torrent/repository/rw_lock_std.rs deleted file mode 100644 index 9b3915bcb..000000000 --- a/src/core/torrent/repository/rw_lock_std.rs +++ /dev/null @@ -1,122 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentSync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{self, ReadInfo, Update}; -use crate::core::torrent::{SwarmMetadata, TorrentsRwLockStd}; -use crate::core::{peer, TorrentsMetrics}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockStd { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("it should get the read lock") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("it should get the write lock") - } -} - -impl UpdateTorrentSync for TorrentsRwLockStd { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let mut db = self.get_torrents_mut(); - - let entry = db.entry(*info_hash).or_insert(entry::Single::default()); - - entry.insert_or_update_peer_and_get_stats(peer) - } -} - -impl UpdateTorrentSync for Arc { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer) - } -} - -impl Repository for TorrentsRwLockStd { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::Single)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::Single { - peers: BTreeMap::default(), - completed: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let mut db = self.get_torrents_mut(); - - drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_std_mutex_std.rs b/src/core/torrent/repository/rw_lock_std_mutex_std.rs deleted file mode 100644 index 5a9a38f77..000000000 --- a/src/core/torrent/repository/rw_lock_std_mutex_std.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentSync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{ReadInfo, Update, UpdateSync}; -use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockStdMutexStd}; -use crate::core::{peer, TorrentsMetrics}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockStdMutexStd { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl UpdateTorrentSync for TorrentsRwLockStdMutexStd { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_entry = self.get_torrents().get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut(); - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.insert_or_update_peer_and_get_stats(peer) - } -} - -impl UpdateTorrentSync for Arc { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer) - } -} - -impl Repository for TorrentsRwLockStdMutexStd { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.lock().expect("it should get the lock").get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexStd::new( - entry::Single { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - handles.push(tokio::task::spawn(async move { - entry - .lock() - .expect("it should get lock for entry") - .remove_inactive_peers(max_peer_timeout); - })); - } - - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs b/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs deleted file mode 100644 index 1feb41e3e..000000000 --- a/src/core/torrent/repository/rw_lock_std_mutex_tokio.rs +++ /dev/null @@ -1,141 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{ReadInfo, Update, UpdateAsync}; -use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockStdMutexTokio}; -use crate::core::{peer, TorrentsMetrics}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockStdMutexTokio { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl UpdateTorrentAsync for TorrentsRwLockStdMutexTokio { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_entry = self.get_torrents().get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut(); - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.insert_or_update_peer_and_get_stats(peer).await - } -} - -impl UpdateTorrentAsync for Arc { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await - } -} - -impl Repository for TorrentsRwLockStdMutexTokio { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.lock().await.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexTokio::new( - entry::Single { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - // todo:: replace with a ring buffer - - let mut handles = Vec::>::default(); - - for e in self.get_torrents().values() { - let entry = e.clone(); - handles.push(tokio::task::spawn(async move { - entry.lock().await.remove_inactive_peers(max_peer_timeout); - })); - } - - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_tokio.rs b/src/core/torrent/repository/rw_lock_tokio.rs deleted file mode 100644 index 3d633a837..000000000 --- a/src/core/torrent/repository/rw_lock_tokio.rs +++ /dev/null @@ -1,124 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{self, ReadInfo, Update}; -use crate::core::torrent::{SwarmMetadata, TorrentsRwLockTokio}; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockTokio { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for TorrentsRwLockTokio { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let mut db = self.get_torrents_mut().await; - - let entry = db.entry(*info_hash).or_insert(entry::Single::default()); - - entry.insert_or_update_peer_and_get_stats(peer) - } -} - -impl UpdateTorrentAsync for Arc { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await - } -} - -impl Repository for TorrentsRwLockTokio { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::Single)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::Single { - peers: BTreeMap::default(), - completed: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - let mut db = self.get_torrents_mut().await; - - drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs b/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs deleted file mode 100644 index 3888c40b0..000000000 --- a/src/core/torrent/repository/rw_lock_tokio_mutex_std.rs +++ /dev/null @@ -1,146 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{ReadInfo, Update, UpdateSync}; -use crate::core::torrent::{entry, SwarmMetadata, TorrentsRwLockTokioMutexStd}; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockTokioMutexStd { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for TorrentsRwLockTokioMutexStd { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut().await; - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.insert_or_update_peer_and_get_stats(peer) - } -} - -impl UpdateTorrentAsync for Arc { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await - } -} - -impl Repository for TorrentsRwLockTokioMutexStd { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - // todo:: replace with a ring buffer - - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.lock().expect("it should get a lock").get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexStd::new( - entry::Single { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - handles.push(tokio::task::spawn(async move { - entry - .lock() - .expect("it should get lock for entry") - .remove_inactive_peers(max_peer_timeout); - })); - } - - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); - } -} diff --git a/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs b/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs deleted file mode 100644 index 49e08d90c..000000000 --- a/src/core/torrent/repository/rw_lock_tokio_mutex_tokio.rs +++ /dev/null @@ -1,144 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; - -use futures::future::join_all; - -use super::{Repository, UpdateTorrentAsync}; -use crate::core::databases::PersistentTorrents; -use crate::core::services::torrent::Pagination; -use crate::core::torrent::entry::{self, ReadInfo, Update, UpdateAsync}; -use crate::core::torrent::{SwarmMetadata, TorrentsRwLockTokioMutexTokio}; -use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -impl TorrentsRwLockTokioMutexTokio { - async fn get_torrents<'a>( - &'a self, - ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl UpdateTorrentAsync for TorrentsRwLockTokioMutexTokio { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut().await; - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.insert_or_update_peer_and_get_stats(peer).await - } -} - -impl UpdateTorrentAsync for Arc { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.as_ref().update_torrent_with_peer_and_get_stats(info_hash, peer).await - } -} - -impl Repository for TorrentsRwLockTokioMutexTokio { - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> TorrentsMetrics { - let metrics: Arc> = Arc::default(); - - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - let metrics = metrics.clone(); - handles.push(tokio::task::spawn(async move { - let stats = entry.lock().await.get_stats(); - metrics.lock().await.seeders += u64::from(stats.complete); - metrics.lock().await.completed += u64::from(stats.downloaded); - metrics.lock().await.leechers += u64::from(stats.incomplete); - metrics.lock().await.torrents += 1; - })); - } - - join_all(handles).await; - - *metrics.lock_owned().await - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = entry::MutexTokio::new( - entry::Single { - peers: BTreeMap::default(), - completed: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, max_peer_timeout: u32) { - // todo:: replace with a ring buffer - let mut handles = Vec::>::default(); - - for e in self.get_torrents().await.values() { - let entry = e.clone(); - handles.push(tokio::task::spawn(async move { - entry.lock().await.remove_inactive_peers(max_peer_timeout); - })); - } - - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); - } -} diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index b241c469c..48ac660cf 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -71,10 +71,11 @@ impl From for Stats { #[cfg(test)] mod tests { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use super::Stats; use crate::core::services::statistics::TrackerMetrics; use crate::core::statistics::Metrics; - use crate::core::TorrentsMetrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 999580da7..15f70c8b6 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -10,13 +10,14 @@ use axum_extra::extract::Query; use log::debug; use serde::{de, Deserialize, Deserializer}; use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page, Pagination}; +use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; use crate::core::Tracker; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the request to get the torrent data. /// diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 752694393..e7a0802c1 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -1,7 +1,7 @@ //! `Peer` and Peer `Id` API resources. +use derive_more::From; use serde::{Deserialize, Serialize}; - -use crate::core; +use torrust_tracker_primitives::peer; /// `Peer` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -22,7 +22,7 @@ pub struct Peer { /// The peer's left bytes (pending to download). pub left: i64, /// The peer's event: `started`, `stopped`, `completed`. - /// See [`AnnounceEventDef`](crate::shared::bit_torrent::common::AnnounceEventDef). + /// See [`AnnounceEvent`](torrust_tracker_primitives::announce_event::AnnounceEvent). pub event: String, } @@ -35,8 +35,8 @@ pub struct Id { pub client: Option, } -impl From for Id { - fn from(peer_id: core::peer::Id) -> Self { +impl From for Id { + fn from(peer_id: peer::Id) -> Self { Id { id: peer_id.to_hex_string(), client: peer_id.get_client_name(), @@ -44,18 +44,32 @@ impl From for Id { } } -impl From for Peer { - #[allow(deprecated)] - fn from(peer: core::peer::Peer) -> Self { +impl From for Peer { + fn from(value: peer::Peer) -> Self { + #[allow(deprecated)] Peer { - peer_id: Id::from(peer.peer_id), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - updated_milliseconds_ago: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), + peer_id: Id::from(value.peer_id), + peer_addr: value.peer_addr.to_string(), + updated: value.updated.as_millis(), + updated_milliseconds_ago: value.updated.as_millis(), + uploaded: value.uploaded.0, + downloaded: value.downloaded.0, + left: value.left.0, + event: format!("{:?}", value.event), + } + } +} + +#[derive(From, PartialEq, Default)] +pub struct Vector(pub Vec); + +impl FromIterator for Vector { + fn from_iter>(iter: T) -> Self { + let mut peers = Vector::default(); + + for i in iter { + peers.0.push(i.into()); } + peers } } diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index fc43fbb7a..2f1ace5c9 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -6,7 +6,6 @@ //! the JSON response. use serde::{Deserialize, Serialize}; -use super::peer; use crate::core::services::torrent::{BasicInfo, Info}; /// `Torrent` API resource. @@ -68,14 +67,16 @@ pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { impl From for Torrent { fn from(info: Info) -> Self { + let peers: Option = info.peers.map(|peers| peers.into_iter().collect()); + + let peers: Option> = peers.map(|peers| peers.0); + Self { info_hash: info.info_hash.to_string(), seeders: info.seeders, completed: info.completed, leechers: info.leechers, - peers: info - .peers - .map(|peers| peers.iter().map(|peer| peer::Peer::from(*peer)).collect()), + peers, } } } @@ -96,15 +97,14 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use super::Torrent; - use crate::core::peer; use crate::core::services::torrent::{BasicInfo, Info}; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; fn sample_peer() -> peer::Peer { peer::Peer { diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index fc32f667b..c88f8cc1d 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::Response; +use torrust_tracker_primitives::info_hash::InfoHash; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, @@ -12,7 +13,6 @@ use super::responses::{ use crate::core::Tracker; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the request to add a torrent to the whitelist. /// diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 08a59ef90..6e8b5a40e 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -206,15 +206,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::core::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::core::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::core::torrent::SwarmMetadata::incomplete) +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! **Query parameters** //! @@ -266,7 +266,7 @@ //! Where the `files` key contains a dictionary of dictionaries. The first //! dictionary key is the `info_hash` of the torrent (`iiiiiiiiiiiiiiiiiiii` in //! the example). The second level dictionary contains the -//! [swarm metadata](crate::core::torrent::SwarmMetadata) for that torrent. +//! [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) for that torrent. //! //! If you save the response as a file and you open it with a program that //! can handle binary data you would see: diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index 472b1e724..90f4b9a43 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -15,8 +15,8 @@ //! - //! - //! - -use crate::core::peer::{self, IdConversionError}; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; /// Percent decodes a percent encoded infohash. Internally an /// [`InfoHash`] is a 20-byte array. @@ -27,8 +27,8 @@ use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// ```rust /// use std::str::FromStr; /// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; /// /// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; /// @@ -44,12 +44,12 @@ use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// /// Will return `Err` if the decoded bytes do not represent a valid /// [`InfoHash`]. -pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); InfoHash::try_from(bytes) } -/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](crate::core::peer::Id) +/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](peer::Id) /// is a 20-byte array. /// /// For example, given the peer id `*b"-qB00000000000000000"`, @@ -58,8 +58,8 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result Result Result { +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); peer::Id::try_from(bytes) } @@ -80,9 +80,10 @@ pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result) -> Result) -> Result R /// /// It ignores the peer address in the announce request params. #[must_use] -fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { - Peer { +fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), updated: Current::now(), uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), left: NumberOfBytes(announce_request.left.unwrap_or(0)), - event: map_to_aquatic_event(&announce_request.event), + event: map_to_torrust_event(&announce_request.event), } } -fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { +#[must_use] +pub fn map_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::AnnounceEvent { match event { Some(event) => match &event { Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, @@ -153,17 +154,30 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { } } +#[must_use] +pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => AnnounceEvent::Started, + Event::Stopped => AnnounceEvent::Stopped, + Event::Completed => AnnounceEvent::Completed, + }, + None => AnnounceEvent::None, + } +} + #[cfg(test)] mod tests { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; + use crate::core::Tracker; use crate::servers::http::v1::requests::announce::Announce; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; - use crate::shared::bit_torrent::info_hash::InfoHash; fn private_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_private()) diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 49b1aebc7..d6b39cc53 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -111,6 +111,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; @@ -118,7 +119,6 @@ mod tests { use crate::servers::http::v1::requests::scrape::Scrape; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; - use crate::shared::bit_torrent::info_hash::InfoHash; fn private_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_private()) diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 08dd9da29..39a6c1846 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -7,12 +7,12 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; -use crate::core::peer::{self, IdConversionError}; use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::servers::http::v1::query::{ParseQueryError, Query}; use crate::servers::http::v1::responses; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; /// The number of bytes `downloaded`, `uploaded` or `left`. It's used in the /// `Announce` request for parameters that represent a number of bytes. @@ -33,8 +33,8 @@ const COMPACT: &str = "compact"; /// /// ```rust /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::peer; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; /// /// let request = Announce { /// // Mandatory params @@ -119,14 +119,14 @@ pub enum ParseAnnounceQueryError { InvalidInfoHashParam { param_name: String, param_value: String, - source: LocatedError<'static, ConversionError>, + source: LocatedError<'static, info_hash::ConversionError>, }, /// The `peer_id` is invalid. #[error("invalid param value {param_value} for {param_name} in {source}")] InvalidPeerIdParam { param_name: String, param_value: String, - source: LocatedError<'static, IdConversionError>, + source: LocatedError<'static, peer::IdConversionError>, }, } @@ -355,12 +355,13 @@ mod tests { mod announce_request { - use crate::core::peer; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index 7c52b9fc4..19f6e35a6 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -5,11 +5,11 @@ use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; use crate::servers::http::percent_encoding::percent_decode_info_hash; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::responses; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; @@ -34,7 +34,7 @@ pub enum ParseScrapeQueryError { InvalidInfoHashParam { param_name: String, param_value: String, - source: LocatedError<'static, ConversionError>, + source: LocatedError<'static, info_hash::ConversionError>, }, } @@ -86,9 +86,10 @@ mod tests { mod scrape_request { + use torrust_tracker_primitives::info_hash::InfoHash; + use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 619632ae4..134da919e 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -7,10 +7,10 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use axum::http::StatusCode; use derive_more::{AsRef, Constructor, From}; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use torrust_tracker_primitives::peer; use super::Response; -use crate::core::peer::Peer; -use crate::core::{self, AnnounceData}; +use crate::core::AnnounceData; use crate::servers::http::v1::responses; /// An [`Announce`] response, that can be anything that is convertible from [`AnnounceData`]. @@ -150,21 +150,6 @@ impl Into> for Compact { } } -/// Marker Trait for Peer Vectors -pub trait PeerEncoding: From + PartialEq {} - -impl FromIterator for Vec

{ - fn from_iter>(iter: T) -> Self { - let mut peers: Vec

= vec![]; - - for peer in iter { - peers.push(peer.into()); - } - - peers - } -} - /// A [`NormalPeer`], for the [`Normal`] form. /// /// ```rust @@ -188,10 +173,10 @@ pub struct NormalPeer { pub port: u16, } -impl PeerEncoding for NormalPeer {} +impl peer::Encoding for NormalPeer {} -impl From for NormalPeer { - fn from(peer: core::peer::Peer) -> Self { +impl From for NormalPeer { + fn from(peer: peer::Peer) -> Self { NormalPeer { peer_id: peer.peer_id.to_bytes(), ip: peer.peer_addr.ip(), @@ -240,10 +225,10 @@ pub enum CompactPeer { V6(CompactPeerData), } -impl PeerEncoding for CompactPeer {} +impl peer::Encoding for CompactPeer {} -impl From for CompactPeer { - fn from(peer: core::peer::Peer) -> Self { +impl From for CompactPeer { + fn from(peer: peer::Peer) -> Self { match (peer.peer_addr.ip(), peer.peer_addr.port()) { (IpAddr::V4(ip), port) => Self::V4(CompactPeerData { ip, port }), (IpAddr::V6(ip), port) => Self::V6(CompactPeerData { ip, port }), @@ -316,10 +301,10 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::AnnouncePolicy; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core::peer::fixture::PeerBuilder; - use crate::core::peer::Id; - use crate::core::torrent::SwarmMetadata; use crate::core::AnnounceData; use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; @@ -339,12 +324,12 @@ mod tests { let policy = AnnouncePolicy::new(111, 222); let peer_ipv4 = PeerBuilder::default() - .with_peer_id(&Id(*b"-qB00000000000000001")) + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 0x7070)) .build(); let peer_ipv6 = PeerBuilder::default() - .with_peer_id(&Id(*b"-qB00000000000000002")) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), 0x7070, diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index e16827824..11f361028 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -13,8 +13,8 @@ use crate::core::ScrapeData; /// /// ```rust /// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::core::torrent::SwarmMetadata; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; /// use torrust_tracker::core::ScrapeData; /// /// let info_hash = InfoHash([0x69; 20]); @@ -92,10 +92,11 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { - use crate::core::torrent::SwarmMetadata; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::core::ScrapeData; use crate::servers::http::v1::responses::scrape::Bencoded; - use crate::shared::bit_torrent::info_hash::InfoHash; fn sample_scrape_data() -> ScrapeData { let info_hash = InfoHash([0x69; 20]); diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index b53697eed..b37081045 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -11,9 +11,10 @@ use std::net::IpAddr; use std::sync::Arc; -use crate::core::peer::Peer; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + use crate::core::{statistics, AnnounceData, Tracker}; -use crate::shared::bit_torrent::info_hash::InfoHash; /// The HTTP tracker `announce` service. /// @@ -25,7 +26,7 @@ use crate::shared::bit_torrent::info_hash::InfoHash; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// like the UDP tracker, the number of TCP connections is incremented for /// each `announce` request. -pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceData { +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer::Peer) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip @@ -47,13 +48,13 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::core::Tracker; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -94,11 +95,11 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::core::peer::Peer; - use crate::core::torrent::SwarmMetadata; use crate::core::{statistics, AnnounceData, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; @@ -150,7 +151,7 @@ mod tests { Tracker::new(&configuration, Some(stats_event_sender), statistics::Repo::new()).unwrap() } - fn peer_with_the_ipv4_loopback_ip() -> Peer { + fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); let mut peer = sample_peer(); peer.peer_addr = SocketAddr::new(loopback_ip, 8080); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 82ca15dc8..18b57f479 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -11,8 +11,9 @@ use std::net::IpAddr; use std::sync::Arc; +use torrust_tracker_primitives::info_hash::InfoHash; + use crate::core::{statistics, ScrapeData, Tracker}; -use crate::shared::bit_torrent::info_hash::InfoHash; /// The HTTP tracker `scrape` service. /// @@ -60,13 +61,13 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; + use crate::core::Tracker; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_mode_public()) @@ -99,9 +100,9 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::core::torrent::SwarmMetadata; use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f42e11424..8f6e6d8b4 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -12,6 +12,7 @@ use aquatic_udp_protocol::{ use log::debug; use tokio::net::UdpSocket; use torrust_tracker_located_error::DynError; +use torrust_tracker_primitives::info_hash::InfoHash; use uuid::Uuid; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; @@ -22,7 +23,6 @@ use crate::servers::udp::logging::{log_bad_request, log_error_response, log_requ use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::shared::bit_torrent::info_hash::InfoHash; /// It handles the incoming UDP packets. /// @@ -318,12 +318,13 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; - use crate::core::{peer, Tracker}; + use crate::core::Tracker; use crate::shared::clock::{Current, Time}; fn tracker_configuration() -> Configuration { @@ -605,8 +606,9 @@ mod tests { Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; - use crate::core::{self, peer, statistics}; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -635,7 +637,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::default() .with_peer_id(peer::Id(peer_id.0)) @@ -696,7 +698,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -773,8 +775,8 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use torrust_tracker_primitives::peer; - use crate::core::peer; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -801,7 +803,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); @@ -826,8 +828,9 @@ mod tests { Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; - use crate::core::{self, peer, statistics}; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -857,7 +860,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::default() .with_peer_id(peer::Id(peer_id.0)) @@ -921,7 +924,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -1038,7 +1041,7 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); @@ -1063,9 +1066,10 @@ mod tests { InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; + use torrust_tracker_primitives::peer; use super::TorrentPeerBuilder; - use crate::core::{self, peer}; + use crate::core::{self}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs index a32afc6a3..9bbb48f6a 100644 --- a/src/servers/udp/logging.rs +++ b/src/servers/udp/logging.rs @@ -4,9 +4,9 @@ use std::net::SocketAddr; use std::time::Duration; use aquatic_udp_protocol::{Request, Response, TransactionId}; +use torrust_tracker_primitives::info_hash::InfoHash; use super::handlers::RequestId; -use crate::shared::bit_torrent::info_hash::InfoHash; pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr: &SocketAddr) { let action = map_action_name(request); diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 8ef562086..fa4e8e926 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -62,7 +62,7 @@ //! ``` //! //! For the `Announce` request there is a wrapper struct [`AnnounceWrapper`](crate::servers::udp::request::AnnounceWrapper). -//! It was added to add an extra field with the internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) struct. +//! It was added to add an extra field with the internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) struct. //! //! ### Connect //! @@ -345,7 +345,7 @@ //! packet. //! //! We are using a wrapper struct for the aquatic [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) -//! struct, because we have our internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) +//! struct, because we have our internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) //! struct. //! //! ```text @@ -467,15 +467,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::core::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::core::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::core::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::core::torrent::SwarmMetadata::incomplete) +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape //! can't be done with this protocol. This is a limitation of the UDP protocol. diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 5168e2578..8c8fa10a5 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,11 +1,13 @@ //! Logic to extract the peer info from the announce request. use std::net::{IpAddr, SocketAddr}; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, NumberOfBytes}; + use super::request::AnnounceWrapper; -use crate::core::peer::{Id, Peer}; use crate::shared::clock::{Current, Time}; -/// Extracts the [`Peer`] info from the +/// Extracts the [`peer::Peer`] info from the /// announce request. /// /// # Arguments @@ -14,14 +16,14 @@ use crate::shared::clock::{Current, Time}; /// * `peer_ip` - The real IP address of the peer, not the one in the announce /// request. #[must_use] -pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> Peer { - Peer { - peer_id: Id(announce_wrapper.announce_request.peer_id.0), +pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { + peer_id: peer::Id(announce_wrapper.announce_request.peer_id.0), peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), updated: Current::now(), - uploaded: announce_wrapper.announce_request.bytes_uploaded, - downloaded: announce_wrapper.announce_request.bytes_downloaded, - left: announce_wrapper.announce_request.bytes_left, - event: announce_wrapper.announce_request.event, + uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0), + downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0), + left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0), + event: AnnounceEvent::from_i32(announce_wrapper.announce_request.event.to_i32()), } } diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index f655fd36a..e172e03b1 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -6,8 +6,7 @@ //! Some of the type in this module are wrappers around the types in the //! `aquatic_udp_protocol` crate. use aquatic_udp_protocol::AnnounceRequest; - -use crate::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; /// Wrapper around [`AnnounceRequest`]. pub struct AnnounceWrapper { diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 9bf9dfd3c..9625b88e7 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -1,7 +1,6 @@ //! `BitTorrent` protocol primitive types //! //! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::{Deserialize, Serialize}; /// The maximum number of torrents that can be returned in an `scrape` response. @@ -33,23 +32,3 @@ enum Actions { Scrape = 2, Error = 3, } - -/// Announce events. Described on the -/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -#[derive(Serialize, Deserialize)] -#[serde(remote = "AnnounceEvent")] -pub enum AnnounceEventDef { - /// The peer has started downloading the torrent. - Started, - /// The peer has ceased downloading the torrent. - Stopped, - /// The peer has completed downloading the torrent. - Completed, - /// This is one of the announcements done at regular intervals. - None, -} - -/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. -#[derive(Serialize, Deserialize)] -#[serde(remote = "NumberOfBytes")] -pub struct NumberOfBytesDef(pub i64); diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs index 20c3cb38b..506c37758 100644 --- a/src/shared/bit_torrent/info_hash.rs +++ b/src/shared/bit_torrent/info_hash.rs @@ -129,169 +129,38 @@ //! You can hash that byte string with //! //! The result is a 20-char string: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` -use std::panic::Location; -use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; -/// `BitTorrent` Info Hash v1 -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct InfoHash(pub [u8; 20]); +pub mod fixture { + use std::hash::{DefaultHasher, Hash, Hasher}; -const INFO_HASH_BYTES_LEN: usize = 20; + use super::InfoHash; -impl InfoHash { - /// Create a new `InfoHash` from a byte slice. + /// Generate as semi-stable pseudo-random infohash /// - /// # Panics + /// Note: If the [`DefaultHasher`] implementation changes + /// so will the resulting info-hashes. /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); - let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - /// Returns the `InfoHash` internal byte array. - #[must_use] - pub fn bytes(&self) -> [u8; 20] { - self.0 - } - - /// Returns the `InfoHash` as a hex string. + /// The results should not be relied upon between versions. #[must_use] - pub fn to_hex_string(&self) -> String { - self.to_string() - } -} - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self([0u8; 20]); - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl Ord for InfoHash { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.cmp(&other.0) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - Some(self.cmp(other)) - } -} + pub fn gen_seeded_infohash(seed: &u64) -> InfoHash { + let mut buf_a: [[u8; 8]; 4] = Default::default(); + let mut buf_b = InfoHash::default(); -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash([0u8; 20]); - ret.0.clone_from_slice(data); - ret - } -} - -impl std::convert::From<[u8; 20]> for InfoHash { - fn from(val: [u8; 20]) -> Self { - InfoHash(val) - } -} - -/// Errors that can occur when converting from a `Vec` to an `InfoHash`. -#[derive(Error, Debug)] -pub enum ConversionError { - /// Not enough bytes for infohash. An infohash is 20 bytes. - #[error("not enough bytes for infohash: {message} {location}")] - NotEnoughBytes { - location: &'static Location<'static>, - message: String, - }, - /// Too many bytes for infohash. An infohash is 20 bytes. - #[error("too many bytes for infohash: {message} {location}")] - TooManyBytes { - location: &'static Location<'static>, - message: String, - }, -} - -impl TryFrom> for InfoHash { - type Error = ConversionError; + let mut hasher = DefaultHasher::new(); + seed.hash(&mut hasher); - fn try_from(bytes: Vec) -> Result { - if bytes.len() < INFO_HASH_BYTES_LEN { - return Err(ConversionError::NotEnoughBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); - } - if bytes.len() > INFO_HASH_BYTES_LEN { - return Err(ConversionError::TooManyBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); + for u in &mut buf_a { + seed.hash(&mut hasher); + *u = hasher.finish().to_le_bytes(); } - Ok(Self::from_bytes(&bytes)) - } -} -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a 40 character long string", - )); + for (a, b) in buf_a.iter().flat_map(|a| a.iter()).zip(buf_b.0.iter_mut()) { + *b = *a; } - let mut res = InfoHash([0u8; 20]); - - if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a hexadecimal string", - )); - }; - Ok(res) + buf_b } } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs index 6cae79888..b872e76e9 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs @@ -3,9 +3,9 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; -use crate::core::peer::Id; -use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { @@ -99,7 +99,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: Id(*b"-qB00000000000000001").0, + peer_id: peer::Id(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -117,7 +117,7 @@ impl QueryBuilder { } #[must_use] - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs index 4fa49eed6..4d12fc2d2 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -2,7 +2,8 @@ use std::error::Error; use std::fmt::{self}; use std::str::FromStr; -use crate::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; + use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs index e75cc6671..15ec446cb 100644 --- a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs @@ -1,8 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; - -use crate::core::peer::Peer; +use torrust_tracker_primitives::peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -23,8 +22,8 @@ pub struct DictionaryPeer { pub port: u16, } -impl From for DictionaryPeer { - fn from(peer: Peer) -> Self { +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { DictionaryPeer { peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs index 6d9d4112a..a73878466 100644 --- a/src/shared/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -31,9 +31,7 @@ use std::str::FromStr; use std::time::Duration; use chrono::{DateTime, Utc}; - -/// Duration since the Unix Epoch. -pub type DurationSinceUnixEpoch = Duration; +use torrust_tracker_primitives::DurationSinceUnixEpoch; /// Clock types. #[derive(Debug)] diff --git a/src/shared/clock/time_extent.rs b/src/shared/clock/time_extent.rs index a5a359e52..168224eda 100644 --- a/src/shared/clock/time_extent.rs +++ b/src/shared/clock/time_extent.rs @@ -542,9 +542,11 @@ mod test { mod make_time_extent { mod fn_now { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::{Current, StoppedTime}; #[test] fn it_should_give_a_time_extent() { @@ -582,9 +584,11 @@ mod test { mod fn_now_after { use std::time::Duration; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::{Current, StoppedTime}; #[test] fn it_should_give_a_time_extent() { @@ -621,8 +625,10 @@ mod test { mod fn_now_before { use std::time::Duration; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::{Current, StoppedTime}; #[test] fn it_should_give_a_time_extent() { diff --git a/src/shared/clock/utils.rs b/src/shared/clock/utils.rs index 94d88d288..8b1378917 100644 --- a/src/shared/clock/utils.rs +++ b/src/shared/clock/utils.rs @@ -1,11 +1 @@ -//! It contains helper functions related to time. -use super::DurationSinceUnixEpoch; -/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. -/// # Errors -/// -/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. -pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { - #[allow(clippy::cast_possible_truncation)] - ser.serialize_u64(unix_time_value.as_millis() as u64) -} diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 186b7ea3b..8d91f3ae8 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; use super::connection_info::ConnectionInfo; @@ -22,7 +22,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 54263f8b8..af6587673 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index ee701ecc4..d54935f80 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,9 +1,9 @@ use std::str::FromStr; -use torrust_tracker::core::peer::fixture::PeerBuilder; use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::http::{Query, QueryParam}; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 358a4a19e..29064ec9e 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 326f4e534..5638713aa 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, HttpTracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, @@ -19,7 +19,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index 2cc615d0f..061990621 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -3,8 +3,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; -use torrust_tracker::core::peer::Id; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; @@ -93,7 +93,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: Id(*b"-qB00000000000000001").0, + peer_id: peer::Id(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -109,7 +109,7 @@ impl QueryBuilder { self } - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs index 264c72c33..f66605855 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -1,7 +1,7 @@ use std::fmt; use std::str::FromStr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs index 968c327eb..2b49b4405 100644 --- a/tests/servers/http/responses/announce.rs +++ b/tests/servers/http/responses/announce.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; -use torrust_tracker::core::peer::Peer; +use torrust_tracker_primitives::peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -22,8 +22,8 @@ pub struct DictionaryPeer { pub port: u16, } -impl From for DictionaryPeer { - fn from(peer: Peer) -> Self { +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { DictionaryPeer { peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index be285dcd7..a7962db0f 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -89,9 +89,9 @@ mod for_all_config_modes { use local_ip_address::local_ip; use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::invalid_info_hashes; @@ -750,7 +750,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -786,7 +786,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -826,7 +826,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -864,7 +864,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -887,9 +887,9 @@ mod for_all_config_modes { use std::str::FromStr; use tokio::net::TcpListener; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::invalid_info_hashes; @@ -1113,7 +1113,7 @@ mod configured_as_whitelisted { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; @@ -1160,9 +1160,9 @@ mod configured_as_whitelisted { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::assert_scrape_response; @@ -1253,7 +1253,7 @@ mod configured_as_private { use std::time::Duration; use torrust_tracker::core::auth::Key; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; @@ -1329,9 +1329,9 @@ mod configured_as_private { use std::time::Duration; use torrust_tracker::core::auth::Key; - use torrust_tracker::core::peer; - use torrust_tracker::core::peer::fixture::PeerBuilder; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index da7705016..12f4aeb9e 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,12 +2,12 @@ use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker::bootstrap::app::initialize_with_configuration; -use torrust_tracker::core::peer::Peer; use torrust_tracker::core::Tracker; use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::udp::server::{Launcher, Running, Stopped, UdpServer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, UdpTracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, @@ -19,7 +19,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker #[allow(dead_code)] - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } From 03883c00d606ba0e5d23849852b1aad7be3c1e03 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 3 Mar 2024 03:56:06 +0800 Subject: [PATCH 0774/1003] dev: repository benchmark uses criterion --- Cargo.lock | 4 +- packages/torrent-repository/Cargo.toml | 8 +- .../benches/helpers/args.rs | 15 -- .../benches/helpers/asyn.rs | 199 ++++++++---------- .../torrent-repository/benches/helpers/mod.rs | 1 - .../benches/helpers/sync.rs | 179 +++++++--------- .../benches/helpers/utils.rs | 33 --- .../benches/repository-benchmark.rs | 187 ---------------- .../benches/repository_benchmark.rs | 191 +++++++++++++++++ 9 files changed, 363 insertions(+), 454 deletions(-) delete mode 100644 packages/torrent-repository/benches/helpers/args.rs delete mode 100644 packages/torrent-repository/benches/repository-benchmark.rs create mode 100644 packages/torrent-repository/benches/repository_benchmark.rs diff --git a/Cargo.lock b/Cargo.lock index 8ec922448..b8437326c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -762,6 +762,7 @@ dependencies = [ "ciborium", "clap", "criterion-plot", + "futures", "is-terminal", "itertools 0.10.5", "num-traits", @@ -774,6 +775,7 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] @@ -3557,7 +3559,7 @@ dependencies = [ name = "torrust-tracker-torrent-repository" version = "3.0.0-alpha.12-develop" dependencies = [ - "clap", + "criterion", "futures", "serde", "tokio", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 0df82a2c6..b53b9a15e 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -16,9 +16,15 @@ rust-version.workspace = true version.workspace = true [dependencies] -clap = { version = "4.4.8", features = ["derive"] } futures = "0.3.29" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } serde = { version = "1", features = ["derive"] } + +[dev-dependencies] +criterion = { version = "0", features = ["async_tokio"] } + +[[bench]] +harness = false +name = "repository_benchmark" diff --git a/packages/torrent-repository/benches/helpers/args.rs b/packages/torrent-repository/benches/helpers/args.rs deleted file mode 100644 index 3a38c55a7..000000000 --- a/packages/torrent-repository/benches/helpers/args.rs +++ /dev/null @@ -1,15 +0,0 @@ -use clap::Parser; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -pub struct Args { - /// Amount of benchmark worker threads - #[arg(short, long)] - pub threads: usize, - /// Amount of time in ns a thread will sleep to simulate a client response after handling a task - #[arg(short, long)] - pub sleep: Option, - /// Compare with old implementations of the torrent repository - #[arg(short, long)] - pub compare: Option, -} diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs index 4fb37104f..80f70cdc2 100644 --- a/packages/torrent-repository/benches/helpers/asyn.rs +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -1,182 +1,155 @@ use std::sync::Arc; -use std::time::Duration; +use std::time::{Duration, Instant}; -use clap::Parser; use futures::stream::FuturesUnordered; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_torrent_repository::repository::RepositoryAsync; -use super::args::Args; -use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; -pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub async fn add_one_torrent(samples: u64) -> Duration where V: RepositoryAsync + Default, { - let mut results: Vec = Vec::with_capacity(samples); + let start = Instant::now(); for _ in 0..samples { let torrent_repository = V::default(); let info_hash = InfoHash([0; 20]); - let start_time = std::time::Instant::now(); - torrent_repository .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) .await; - - let result = start_time.elapsed(); - - results.push(result); } - get_average_and_adjusted_average_from_results(results) + start.elapsed() } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration where V: RepositoryAsync + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; + let torrent_repository = Arc::::default(); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); - let start_time = std::time::Instant::now(); + // Add the torrent/peer to the torrent repository + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; - for _ in 0..10_000 { - let torrent_repository_clone = torrent_repository.clone(); + let start = Instant::now(); - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration where V: RepositoryAsync + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - let start_time = std::time::Instant::now(); + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); + let start = Instant::now(); - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } // Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration where V: RepositoryAsync + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); - - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; - } + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository + .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) + .await; + } - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); + let start = Instant::now(); - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone + .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) + .await; - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } diff --git a/packages/torrent-repository/benches/helpers/mod.rs b/packages/torrent-repository/benches/helpers/mod.rs index 758c123bd..1026aa4bf 100644 --- a/packages/torrent-repository/benches/helpers/mod.rs +++ b/packages/torrent-repository/benches/helpers/mod.rs @@ -1,4 +1,3 @@ -pub mod args; pub mod asyn; pub mod sync; pub mod utils; diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs index aa2f8188a..0523f4141 100644 --- a/packages/torrent-repository/benches/helpers/sync.rs +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -1,172 +1,145 @@ use std::sync::Arc; -use std::time::Duration; +use std::time::{Duration, Instant}; -use clap::Parser; use futures::stream::FuturesUnordered; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_torrent_repository::repository::Repository; -use super::args::Args; -use super::utils::{generate_unique_info_hashes, get_average_and_adjusted_average_from_results, DEFAULT_PEER}; +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; // Simply add one torrent #[must_use] -pub fn add_one_torrent(samples: usize) -> (Duration, Duration) +pub fn add_one_torrent(samples: u64) -> Duration where V: Repository + Default, { - let mut results: Vec = Vec::with_capacity(samples); + let start = Instant::now(); for _ in 0..samples { let torrent_repository = V::default(); let info_hash = InfoHash([0; 20]); - let start_time = std::time::Instant::now(); - torrent_repository.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - - let result = start_time.elapsed(); - - results.push(result); } - get_average_and_adjusted_average_from_results(results) + start.elapsed() } // Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration where V: Repository + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + let torrent_repository = Arc::::default(); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); - let start_time = std::time::Instant::now(); + // Add the torrent/peer to the torrent repository + torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - for _ in 0..10_000 { - let torrent_repository_clone = torrent_repository.clone(); + let start = Instant::now(); - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } // Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration where V: Repository + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); - let start_time = std::time::Instant::now(); + let start = Instant::now(); - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - let result = start_time.elapsed(); - - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } // Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration where V: Repository + Default, Arc: Clone + Send + Sync + 'static, { - let args = Args::parse(); - let mut results: Vec = Vec::with_capacity(samples); - - for _ in 0..samples { - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(10_000); - let handles = FuturesUnordered::new(); + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); - } - - let start_time = std::time::Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + } - if let Some(sleep_time) = args.sleep { - let start_time = std::time::Instant::now(); + let start = Instant::now(); - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); - handles.push(handle); - } + let handle = runtime.spawn(async move { + torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); - // Await all tasks - futures::future::join_all(handles).await; + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); - let result = start_time.elapsed(); + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); - results.push(result); + handles.push(handle); } - get_average_and_adjusted_average_from_results(results) + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() } diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs index aed9f40cf..170194806 100644 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -1,6 +1,5 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::time::Duration; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; @@ -39,35 +38,3 @@ pub fn generate_unique_info_hashes(size: usize) -> Vec { result.into_iter().collect() } - -#[must_use] -pub fn within_acceptable_range(test: &Duration, norm: &Duration) -> bool { - let test_secs = test.as_secs_f64(); - let norm_secs = norm.as_secs_f64(); - - // Calculate the upper and lower bounds for the 10% tolerance - let tolerance = norm_secs * 0.1; - - // Calculate the upper and lower limits - let upper_limit = norm_secs + tolerance; - let lower_limit = norm_secs - tolerance; - - test_secs < upper_limit && test_secs > lower_limit -} - -#[must_use] -pub fn get_average_and_adjusted_average_from_results(mut results: Vec) -> (Duration, Duration) { - #[allow(clippy::cast_possible_truncation)] - let average = results.iter().sum::() / results.len() as u32; - - results.retain(|result| within_acceptable_range(result, &average)); - - let mut adjusted_average = Duration::from_nanos(0); - - #[allow(clippy::cast_possible_truncation)] - if results.len() > 1 { - adjusted_average = results.iter().sum::() / results.len() as u32; - } - - (average, adjusted_average) -} diff --git a/packages/torrent-repository/benches/repository-benchmark.rs b/packages/torrent-repository/benches/repository-benchmark.rs deleted file mode 100644 index bff34b256..000000000 --- a/packages/torrent-repository/benches/repository-benchmark.rs +++ /dev/null @@ -1,187 +0,0 @@ -mod helpers; - -use clap::Parser; -use torrust_tracker_torrent_repository::{ - TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, - TorrentsRwLockTokioMutexTokio, -}; - -use crate::helpers::args::Args; -use crate::helpers::{asyn, sync}; - -#[allow(clippy::too_many_lines)] -#[allow(clippy::print_literal)] -fn main() { - let args = Args::parse(); - - // Add 1 to worker_threads since we need a thread that awaits the benchmark - let rt = tokio::runtime::Builder::new_multi_thread() - .worker_threads(args.threads + 1) - .enable_time() - .build() - .unwrap(); - - println!("TorrentsRwLockTokio"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) - ); - - if let Some(true) = args.compare { - println!(); - - println!("TorrentsRwLockStd"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - sync::add_one_torrent::(1_000_000) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) - ); - - println!(); - - println!("TorrentsRwLockStdMutexStd"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - sync::add_one_torrent::(1_000_000) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - - println!(); - - println!("TorrentsRwLockStdMutexTokio"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - - println!(); - - println!("TorrentsRwLockTokioMutexStd"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - - println!(); - - println!("TorrentsRwLockTokioMutexTokio"); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::( - &rt, 10 - )) - ); - println!( - "{}: Avg/AdjAvg: {:?}", - "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) - ); - } -} diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs new file mode 100644 index 000000000..a3684c8e2 --- /dev/null +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -0,0 +1,191 @@ +use std::time::Duration; + +mod helpers; + +use criterion::{criterion_group, criterion_main, Criterion}; +use torrust_tracker_torrent_repository::{ + TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, + TorrentsRwLockTokioMutexTokio, +}; + +use crate::helpers::{asyn, sync}; + +fn add_one_torrent(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_one_torrent"); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt).iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.finish(); +} + +fn add_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_one_torrent_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_one_torrent_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt).iter_custom(|iters| { + asyn::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.finish(); +} + +criterion_group!( + benches, + add_one_torrent, + add_multiple_torrents_in_parallel, + update_one_torrent_in_parallel, + update_multiple_torrents_in_parallel +); +criterion_main!(benches); From 3e0745b757f80ac0b5efce0e7c9459c8218cee73 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 17 Mar 2024 11:23:25 +0800 Subject: [PATCH 0775/1003] dev: extract clock to new package --- Cargo.lock | 54 ++- Cargo.toml | 1 + cSpell.json | 2 + packages/clock/Cargo.toml | 24 ++ packages/clock/README.md | 11 + packages/clock/src/clock/mod.rs | 72 ++++ packages/clock/src/clock/stopped/mod.rs | 210 ++++++++++ packages/clock/src/clock/working/mod.rs | 18 + packages/clock/src/conv/mod.rs | 82 ++++ packages/clock/src/lib.rs | 53 +++ .../clock/src/static_time/mod.rs | 0 .../clock/src/time_extent/mod.rs | 99 +++-- packages/clock/tests/clock/mod.rs | 16 + packages/clock/tests/integration.rs | 19 + packages/configuration/src/lib.rs | 3 + packages/torrent-repository/Cargo.toml | 5 +- packages/torrent-repository/src/entry/mod.rs | 6 +- .../torrent-repository/src/entry/single.rs | 299 +++++++++++++ packages/torrent-repository/src/lib.rs | 13 + src/bootstrap/app.rs | 2 +- src/core/auth.rs | 20 +- src/core/databases/mod.rs | 4 +- src/core/mod.rs | 15 +- src/core/peer_tests.rs | 8 +- src/core/torrent/mod.rs | 298 +------------ src/lib.rs | 24 ++ .../apis/v1/context/auth_key/resources.rs | 15 +- src/servers/http/mod.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 5 +- src/servers/udp/connection_cookie.rs | 24 +- src/servers/udp/handlers.rs | 5 +- src/servers/udp/peer_builder.rs | 5 +- src/shared/clock/mod.rs | 393 ------------------ src/shared/clock/utils.rs | 1 - src/shared/mod.rs | 2 - tests/common/clock.rs | 16 + tests/common/mod.rs | 1 + tests/integration.rs | 13 + 38 files changed, 1050 insertions(+), 790 deletions(-) create mode 100644 packages/clock/Cargo.toml create mode 100644 packages/clock/README.md create mode 100644 packages/clock/src/clock/mod.rs create mode 100644 packages/clock/src/clock/stopped/mod.rs create mode 100644 packages/clock/src/clock/working/mod.rs create mode 100644 packages/clock/src/conv/mod.rs create mode 100644 packages/clock/src/lib.rs rename src/shared/clock/static_time.rs => packages/clock/src/static_time/mod.rs (100%) rename src/shared/clock/time_extent.rs => packages/clock/src/time_extent/mod.rs (85%) create mode 100644 packages/clock/tests/clock/mod.rs create mode 100644 packages/clock/tests/integration.rs delete mode 100644 src/shared/clock/mod.rs delete mode 100644 src/shared/clock/utils.rs create mode 100644 tests/common/clock.rs diff --git a/Cargo.lock b/Cargo.lock index b8437326c..e28278abb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1209,6 +1209,12 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + [[package]] name = "futures-util" version = "0.3.30" @@ -2560,6 +2566,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "relative-path" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e898588f33fdd5b9420719948f9f2a32c922a246964576f71ba7f24f80610fbc" + [[package]] name = "rend" version = "0.4.2" @@ -2676,6 +2688,35 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "rstest" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version", +] + +[[package]] +name = "rstest_macros" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +dependencies = [ + "cfg-if", + "glob", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.53", + "unicode-ident", +] + [[package]] name = "rusqlite" version = "0.31.0" @@ -3490,6 +3531,7 @@ dependencies = [ "serde_repr", "thiserror", "tokio", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", @@ -3503,6 +3545,15 @@ dependencies = [ "uuid", ] +[[package]] +name = "torrust-tracker-clock" +version = "3.0.0-alpha.12-develop" +dependencies = [ + "chrono", + "lazy_static", + "torrust-tracker-primitives", +] + [[package]] name = "torrust-tracker-configuration" version = "3.0.0-alpha.12-develop" @@ -3561,8 +3612,9 @@ version = "3.0.0-alpha.12-develop" dependencies = [ "criterion", "futures", - "serde", + "rstest", "tokio", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", ] diff --git a/Cargo.toml b/Cargo.toml index 9610fffc2..99b7a334a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,6 +64,7 @@ serde_repr = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } +torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "packages/clock" } torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } diff --git a/cSpell.json b/cSpell.json index 6d5f71b85..1e276dbc2 100644 --- a/cSpell.json +++ b/cSpell.json @@ -34,6 +34,7 @@ "completei", "connectionless", "Containerfile", + "conv", "curr", "Cyberneering", "dashmap", @@ -116,6 +117,7 @@ "rngs", "rosegment", "routable", + "rstest", "rusqlite", "RUSTDOCFLAGS", "RUSTFLAGS", diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml new file mode 100644 index 000000000..d7192b6e4 --- /dev/null +++ b/packages/clock/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "A library to a clock for the torrust tracker." +keywords = ["library", "clock", "torrents"] +name = "torrust-tracker-clock" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +lazy_static = "1" +chrono = { version = "0", default-features = false, features = ["clock"] } + +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } + +[dev-dependencies] diff --git a/packages/clock/README.md b/packages/clock/README.md new file mode 100644 index 000000000..bfdd7808f --- /dev/null +++ b/packages/clock/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Clock + +A library to provide a working and mockable clock for the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-clock). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/clock/src/clock/mod.rs b/packages/clock/src/clock/mod.rs new file mode 100644 index 000000000..50afbc9db --- /dev/null +++ b/packages/clock/src/clock/mod.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use self::stopped::StoppedClock; +use self::working::WorkingClock; + +pub mod stopped; +pub mod working; + +/// A generic structure that represents a clock. +/// +/// It can be either the working clock (production) or the stopped clock +/// (testing). It implements the `Time` trait, which gives you the current time. +#[derive(Debug)] +pub struct Clock { + clock: std::marker::PhantomData, +} + +/// The working clock. It returns the current time. +pub type Working = Clock; +/// The stopped clock. It returns always the same fixed time. +pub type Stopped = Clock; + +/// Trait for types that can be used as a timestamp clock. +pub trait Time: Sized { + fn now() -> DurationSinceUnixEpoch; + + fn dbg_clock_type() -> String; + + #[must_use] + fn now_add(add_time: &Duration) -> Option { + Self::now().checked_add(*add_time) + } + #[must_use] + fn now_sub(sub_time: &Duration) -> Option { + Self::now().checked_sub(*sub_time) + } +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + use std::time::Duration; + + use crate::clock::{self, Stopped, Time, Working}; + use crate::CurrentClock; + + #[test] + fn it_should_be_the_stopped_clock_as_default_when_testing() { + // We are testing, so we should default to the fixed time. + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), CurrentClock::now()); + } + + #[test] + fn it_should_have_different_times() { + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); + } + + #[test] + fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); + } +} diff --git a/packages/clock/src/clock/stopped/mod.rs b/packages/clock/src/clock/stopped/mod.rs new file mode 100644 index 000000000..57655ab75 --- /dev/null +++ b/packages/clock/src/clock/stopped/mod.rs @@ -0,0 +1,210 @@ +/// Trait for types that can be used as a timestamp clock stopped +/// at a given time. + +#[allow(clippy::module_name_repetitions)] +pub struct StoppedClock {} + +#[allow(clippy::module_name_repetitions)] +pub trait Stopped: clock::Time { + /// It sets the clock to a given time. + fn local_set(unix_time: &DurationSinceUnixEpoch); + + /// It sets the clock to the Unix Epoch. + fn local_set_to_unix_epoch() { + Self::local_set(&DurationSinceUnixEpoch::ZERO); + } + + /// It sets the clock to the time the application started. + fn local_set_to_app_start_time(); + + /// It sets the clock to the current system time. + fn local_set_to_system_time_now(); + + /// It adds a `Duration` to the clock. + /// + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. + fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It subtracts a `Duration` from the clock. + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). + fn local_reset(); +} + +use std::num::IntErrorKind; +use std::time::Duration; + +use super::{DurationSinceUnixEpoch, Time}; +use crate::clock; + +impl Time for clock::Stopped { + fn now() -> DurationSinceUnixEpoch { + detail::FIXED_TIME.with(|time| { + return *time.borrow(); + }) + } + + fn dbg_clock_type() -> String { + "Stopped".to_owned() + } +} + +impl Stopped for clock::Stopped { + fn local_set(unix_time: &DurationSinceUnixEpoch) { + detail::FIXED_TIME.with(|time| { + *time.borrow_mut() = *unix_time; + }); + } + + fn local_set_to_app_start_time() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_set_to_system_time_now() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_add(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::PosOverflow); + } + }; + Ok(()) + }) + } + + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::NegOverflow); + } + }; + Ok(()) + }) + } + + fn local_reset() { + Self::local_set(&detail::get_default_fixed_time()); + } +} + +#[cfg(test)] +mod tests { + use std::thread; + use std::time::Duration; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::clock::{Stopped, Time, Working}; + + #[test] + fn it_should_default_to_zero_when_testing() { + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); + } + + #[test] + fn it_should_possible_to_set_the_time() { + // Check we start with ZERO. + assert_eq!(Stopped::now(), Duration::ZERO); + + // Set to Current Time and Check + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + + // Elapse the Current Time and Check + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } + + #[test] + fn it_should_default_to_zero_on_thread_exit() { + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::now_add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); + + let t = thread::spawn(move || { + // each thread starts out with the initial value of ZERO + assert_eq!(Stopped::now(), Duration::ZERO); + + // and gets set to the current time. + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + }); + + // wait for the thread to complete and bail out on panic + t.join().unwrap(); + + // we retain our original value of current time + 5sec despite the child thread + assert_eq!(Stopped::now(), after5); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } +} + +mod detail { + use std::cell::RefCell; + use std::time::SystemTime; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::static_time; + + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + + pub fn get_app_start_time() -> DurationSinceUnixEpoch { + (*static_time::TIME_AT_APP_START) + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + } + + #[cfg(not(test))] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + get_app_start_time() + } + + #[cfg(test)] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::ZERO + } + + #[cfg(test)] + mod tests { + use std::time::Duration; + + use crate::clock::stopped::detail::{get_app_start_time, get_default_fixed_time}; + + #[test] + fn it_should_get_the_zero_start_time_when_testing() { + assert_eq!(get_default_fixed_time(), Duration::ZERO); + } + + #[test] + fn it_should_get_app_start_time() { + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); + assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); + } + } +} diff --git a/packages/clock/src/clock/working/mod.rs b/packages/clock/src/clock/working/mod.rs new file mode 100644 index 000000000..6d0b4dcf7 --- /dev/null +++ b/packages/clock/src/clock/working/mod.rs @@ -0,0 +1,18 @@ +use std::time::SystemTime; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::clock; + +#[allow(clippy::module_name_repetitions)] +pub struct WorkingClock; + +impl clock::Time for clock::Working { + fn now() -> DurationSinceUnixEpoch { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() + } + + fn dbg_clock_type() -> String { + "Working".to_owned() + } +} diff --git a/packages/clock/src/conv/mod.rs b/packages/clock/src/conv/mod.rs new file mode 100644 index 000000000..f70950c38 --- /dev/null +++ b/packages/clock/src/conv/mod.rs @@ -0,0 +1,82 @@ +use std::str::FromStr; + +use chrono::{DateTime, Utc}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// It converts a string in ISO 8601 format to a timestamp. +/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch +/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { + convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) +} + +/// It converts a `DateTime::` to a timestamp. +/// For example, the `DateTime::` of the Unix Epoch will be converted to a +/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` type. +/// (this will naturally happen in 584.9 billion years) +#[must_use] +pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) +} + +/// It converts a timestamp to a `DateTime::`. +/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be +/// converted to the `DateTime::` of the Unix Epoch. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { + DateTime::from_timestamp( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ) + .unwrap() +} + +#[cfg(test)] + +mod tests { + use chrono::DateTime; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::conv::{ + convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, + }; + + #[test] + fn should_be_converted_to_datetime_utc() { + let timestamp = DurationSinceUnixEpoch::ZERO; + assert_eq!( + convert_from_timestamp_to_datetime_utc(timestamp), + DateTime::from_timestamp(0, 0).unwrap() + ); + } + + #[test] + fn should_be_converted_from_datetime_utc() { + let datetime = DateTime::from_timestamp(0, 0).unwrap(); + assert_eq!( + convert_from_datetime_utc_to_timestamp(&datetime), + DurationSinceUnixEpoch::ZERO + ); + } + + #[test] + fn should_be_converted_from_datetime_utc_in_iso_8601() { + let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); + assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); + } +} diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs new file mode 100644 index 000000000..9fc67cb54 --- /dev/null +++ b/packages/clock/src/lib.rs @@ -0,0 +1,53 @@ +//! Time related functions and types. +//! +//! It's usually a good idea to control where the time comes from +//! in an application so that it can be mocked for testing and it can be +//! controlled in production so we get the intended behavior without +//! relying on the specific time zone for the underlying system. +//! +//! Clocks use the type `DurationSinceUnixEpoch` which is a +//! `std::time::Duration` since the Unix Epoch (timestamp). +//! +//! ```text +//! Local time: lun 2023-03-27 16:12:00 WEST +//! Universal time: lun 2023-03-27 15:12:00 UTC +//! Time zone: Atlantic/Canary (WEST, +0100) +//! Timestamp: 1679929914 +//! Duration: 1679929914.10167426 +//! ``` +//! +//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will +//! overflow in 584.9 billion years. +//! +//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you +//! the ability to use the clock regardless of the underlying system time zone +//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). + +pub mod clock; +pub mod conv; +pub mod static_time; +pub mod time_extent; + +#[macro_use] +extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/shared/clock/static_time.rs b/packages/clock/src/static_time/mod.rs similarity index 100% rename from src/shared/clock/static_time.rs rename to packages/clock/src/static_time/mod.rs diff --git a/src/shared/clock/time_extent.rs b/packages/clock/src/time_extent/mod.rs similarity index 85% rename from src/shared/clock/time_extent.rs rename to packages/clock/src/time_extent/mod.rs index 168224eda..c51849f21 100644 --- a/src/shared/clock/time_extent.rs +++ b/packages/clock/src/time_extent/mod.rs @@ -65,7 +65,7 @@ use std::num::{IntErrorKind, TryFromIntError}; use std::time::Duration; -use super::{Stopped, TimeNow, Type, Working}; +use crate::clock::{self, Stopped, Working}; /// This trait defines the operations that can be performed on a `TimeExtent`. pub trait Extent: Sized + Default { @@ -199,10 +199,10 @@ impl Extent for TimeExtent { /// It gives you the time in time extents. pub trait Make: Sized where - Clock: TimeNow, + Clock: clock::Time, { /// It gives you the current time extent (with a certain increment) for - /// the current time. It gets the current timestamp front he `Clock`. + /// the current time. It gets the current timestamp front the `Clock`. /// /// For example: /// @@ -223,12 +223,12 @@ where }) } - /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// Same as [`now`](crate::time_extent::Make::now), but it /// will add an extra duration to the current time before calculating the /// time extent. It gives you a time extent for a time in the future. #[must_use] fn now_after(increment: &Base, add_time: &Duration) -> Option> { - match Clock::add(add_time) { + match Clock::now_add(add_time) { None => None, Some(time) => time .as_nanos() @@ -240,12 +240,12 @@ where } } - /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// Same as [`now`](crate::time_extent::Make::now), but it /// will subtract a duration to the current time before calculating the /// time extent. It gives you a time extent for a time in the past. #[must_use] fn now_before(increment: &Base, sub_time: &Duration) -> Option> { - match Clock::sub(sub_time) { + match Clock::now_sub(sub_time) { None => None, Some(time) => time .as_nanos() @@ -262,38 +262,30 @@ where /// /// It's a clock which measures time in `TimeExtents`. #[derive(Debug)] -pub struct Maker {} +pub struct Maker { + clock: std::marker::PhantomData, +} /// A `TimeExtent` maker which makes `TimeExtents` from the `Working` clock. -pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; +pub type WorkingTimeExtentMaker = Maker; /// A `TimeExtent` maker which makes `TimeExtents` from the `Stopped` clock. -pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; - -impl Make for WorkingTimeExtentMaker {} -impl Make for StoppedTimeExtentMaker {} +pub type StoppedTimeExtentMaker = Maker; -/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production -/// and `StoppedTimeExtentMaker` in tests. -#[cfg(not(test))] -pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; - -/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production -/// and `StoppedTimeExtentMaker` in tests. -#[cfg(test)] -pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} #[cfg(test)] mod test { - use crate::shared::clock::time_extent::TimeExtent; + use crate::time_extent::TimeExtent; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); mod fn_checked_duration_from_nanos { use std::time::Duration; - use crate::shared::clock::time_extent::checked_duration_from_nanos; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::checked_duration_from_nanos; + use crate::time_extent::test::TIME_EXTENT_VAL; const NANOS_PER_SEC: u32 = 1_000_000_000; @@ -334,7 +326,7 @@ mod test { mod time_extent { mod fn_default { - use crate::shared::clock::time_extent::{TimeExtent, ZERO}; + use crate::time_extent::{TimeExtent, ZERO}; #[test] fn it_should_default_initialize_to_zero() { @@ -343,8 +335,8 @@ mod test { } mod fn_from_sec { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Multiplier, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -360,8 +352,8 @@ mod test { } mod fn_new { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -383,8 +375,8 @@ mod test { mod fn_increase { use std::num::IntErrorKind; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_increase_for_zero() { @@ -411,8 +403,8 @@ mod test { mod fn_decrease { use std::num::IntErrorKind; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_decrease_for_zero() { @@ -437,8 +429,8 @@ mod test { } mod fn_total { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -485,8 +477,8 @@ mod test { } mod fn_total_next { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -544,9 +536,10 @@ mod test { mod fn_now { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, StoppedTime}; + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { @@ -558,7 +551,7 @@ mod test { } ); - Current::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + CurrentClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), @@ -573,7 +566,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -586,9 +579,10 @@ mod test { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make}; - use crate::shared::clock::{Current, StoppedTime}; + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { @@ -607,13 +601,13 @@ mod test { fn it_should_fail_for_zero() { assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) .unwrap() @@ -627,12 +621,13 @@ mod test { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, StoppedTime}; + use crate::clock::stopped::Stopped as _; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before( @@ -657,7 +652,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) .unwrap() diff --git a/packages/clock/tests/clock/mod.rs b/packages/clock/tests/clock/mod.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/packages/clock/tests/clock/mod.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/packages/clock/tests/integration.rs b/packages/clock/tests/integration.rs new file mode 100644 index 000000000..fa500227a --- /dev/null +++ b/packages/clock/tests/integration.rs @@ -0,0 +1,19 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +//mod common; +mod clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Stopped; diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index b3b146717..549c73a31 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -243,6 +243,9 @@ use thiserror::Error; use torrust_tracker_located_error::{DynError, Located, LocatedError}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +/// The maximum number of returned peers for a torrent. +pub const TORRENT_PEERS_LIMIT: usize = 74; + #[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] pub struct TrackerPolicy { pub remove_peerless_torrents: bool, diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index b53b9a15e..c36ae1440 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -1,5 +1,5 @@ [package] -description = "A library to provide error decorator with the location and the source of the original error." +description = "A library that provides a repository of torrents files and their peers." keywords = ["torrents", "repository", "library"] name = "torrust-tracker-torrent-repository" readme = "README.md" @@ -20,10 +20,11 @@ futures = "0.3.29" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } -serde = { version = "1", features = ["derive"] } +torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" } [dev-dependencies] criterion = { version = "0", features = ["async_tokio"] } +rstest = "0" [[bench]] harness = false diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 04aa597df..11352a8fa 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use std::sync::Arc; -use serde::{Deserialize, Serialize}; +//use serde::{Deserialize, Serialize}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -88,10 +88,10 @@ pub trait EntryAsync { /// This is the tracker entry for a given torrent and contains the swarm data, /// that's the list of all the peers trying to download the same torrent. /// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug, Default)] +#[derive(Clone, Debug, Default)] pub struct Torrent { /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] + // #[serde(skip)] pub(crate) peers: std::collections::BTreeMap>, /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) completed: u32, diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 7a5cf6240..85fdc6cf0 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -103,3 +103,302 @@ impl Entry for EntrySingle { .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); } } + +#[cfg(test)] +mod tests { + mod torrent_entry { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::ops::Sub; + use std::sync::Arc; + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + + use crate::entry::Entry; + use crate::{CurrentClock, EntrySingle}; + + struct TorrentPeerBuilder { + peer: peer::Peer, + } + + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = peer::Peer { + peer_id: peer::Id([0u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: CurrentClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + pub fn into(self) -> peer::Peer { + self.peer + } + } + + /// A torrent seeder is a peer with 0 bytes left to download which + /// has not announced it has stopped + fn a_torrent_seeder() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(0) + .with_event_completed() + .into() + } + + /// A torrent leecher is a peer that is not a seeder. + /// Leecher: left > 0 OR event = Stopped + fn a_torrent_leecher() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(1) + .with_event_completed() + .into() + } + + #[test] + fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { + let torrent_entry = EntrySingle::default(); + + assert_eq!(torrent_entry.get_peers(None).len(), 0); + } + + #[test] + fn a_new_peer_can_be_added_to_a_torrent_entry() { + let mut torrent_entry = EntrySingle::default(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); + assert_eq!(torrent_entry.get_peers(None).len(), 1); + } + + #[test] + fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { + let mut torrent_entry = EntrySingle::default(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); + } + + #[test] + fn a_peer_can_be_updated_in_a_torrent_entry() { + let mut torrent_entry = EntrySingle::default(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); + } + + #[test] + fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { + let mut torrent_entry = EntrySingle::default(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Stopped; // Update the peer + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_peers(None).len(), 0); + } + + #[test] + fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { + let mut torrent_entry = EntrySingle::default(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + let stats_have_changed = torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert!(stats_have_changed); + } + + #[test] + fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( + ) { + let mut torrent_entry = EntrySingle::default(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Add a peer that did not exist before in the entry + let torrent_stats_have_not_changed = !torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); + + assert!(torrent_stats_have_not_changed); + } + + #[test] + fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() + { + let mut torrent_entry = EntrySingle::default(); + let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); + torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer + + // Get peers excluding the one we have just added + let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); + + assert_eq!(peers.len(), 0); + } + + #[test] + fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { + let mut torrent_entry = EntrySingle::default(); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + + // Add peer 1 + let torrent_peer_1 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8080)) + .into(); + torrent_entry.insert_or_update_peer(&torrent_peer_1); + + // Add peer 2 + let torrent_peer_2 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8081)) + .into(); + torrent_entry.insert_or_update_peer(&torrent_peer_2); + + // Get peers for peer 1 + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); + + // The peer 2 using the same IP but different port should be included + assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); + assert_eq!(peers[0].peer_addr.port(), 8081); + } + + fn peer_id_from_i32(number: i32) -> peer::Id { + let peer_id = number.to_le_bytes(); + peer::Id([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], + peer_id[2], peer_id[3], + ]) + } + + #[test] + fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { + let mut torrent_entry = EntrySingle::default(); + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let torrent_peer = TorrentPeerBuilder::default() + .with_peer_id(peer_id_from_i32(peer_number)) + .into(); + torrent_entry.insert_or_update_peer(&torrent_peer); + } + + let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); + + assert_eq!(peers.len(), 74); + } + + #[test] + fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { + let mut torrent_entry = EntrySingle::default(); + let torrent_seeder = a_torrent_seeder(); + + torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder + + assert_eq!(torrent_entry.get_stats().complete, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { + let mut torrent_entry = EntrySingle::default(); + let torrent_leecher = a_torrent_leecher(); + + torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher + + assert_eq!(torrent_entry.get_stats().incomplete, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( + ) { + let mut torrent_entry = EntrySingle::default(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer + + // Announce "Completed" torrent download event. + torrent_peer.event = AnnounceEvent::Completed; + torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer + + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; + + assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); + } + + #[test] + fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { + let mut torrent_entry = EntrySingle::default(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Announce "Completed" torrent download event. + // It's the first event announced from this peer. + torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer + + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; + + assert_eq!(number_of_peers_with_completed_torrent, 0); + } + + #[test] + fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { + let mut torrent_entry = EntrySingle::default(); + + let timeout = 120u32; + + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); + let inactive_peer = TorrentPeerBuilder::default() + .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) + .into(); + torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer + + let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(timeout))).unwrap_or_default(); + torrent_entry.remove_inactive_peers(current_cutoff); + + assert_eq!(torrent_entry.get_peers_len(), 0); + } + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 903e1405e..8bb1b6def 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use torrust_tracker_clock::clock; + pub mod entry; pub mod repository; @@ -13,3 +15,14 @@ pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; pub type TorrentsRwLockTokio = repository::RwLockTokio; pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 09b624566..396e63682 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -13,13 +13,13 @@ //! 4. Initialize the domain tracker. use std::sync::Arc; +use torrust_tracker_clock::static_time; use torrust_tracker_configuration::Configuration; use super::config::initialize_configuration; use crate::bootstrap; use crate::core::services::tracker_factory; use crate::core::Tracker; -use crate::shared::clock::static_time; use crate::shared::crypto::ephemeral_instance_keys; /// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. diff --git a/src/core/auth.rs b/src/core/auth.rs index a7bb91aa4..b5326a373 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -47,11 +47,13 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_located_error::{DynError, LocatedError}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, Time, TimeNow}; +use crate::CurrentClock; #[must_use] /// It generates a new random 32-char authentication [`ExpiringKey`] @@ -70,7 +72,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { ExpiringKey { key: random_id.parse::().unwrap(), - valid_until: Current::add(&lifetime).unwrap(), + valid_until: CurrentClock::now_add(&lifetime).unwrap(), } } @@ -82,7 +84,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { /// /// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { - let current_time: DurationSinceUnixEpoch = Current::now(); + let current_time: DurationSinceUnixEpoch = CurrentClock::now(); if auth_key.valid_until < current_time { Err(Error::KeyExpired { @@ -213,8 +215,10 @@ mod tests { use std::str::FromStr; use std::time::Duration; + use torrust_tracker_clock::clock; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use crate::core::auth; - use crate::shared::clock::{Current, StoppedTime}; #[test] fn should_be_parsed_from_an_string() { @@ -228,7 +232,7 @@ mod tests { #[test] fn should_be_displayed() { // Set the time to the current time. - Current::local_set_to_unix_epoch(); + clock::Stopped::local_set_to_unix_epoch(); let expiring_key = auth::generate(Duration::from_secs(0)); @@ -248,18 +252,18 @@ mod tests { #[test] fn should_be_generate_and_verified() { // Set the time to the current time. - Current::local_set_to_system_time_now(); + clock::Stopped::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. let expiring_key = auth::generate(Duration::from_secs(19)); // Mock the time has passed 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); assert!(auth::verify(&expiring_key).is_ok()); // Mock the time has passed another 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); assert!(auth::verify(&expiring_key).is_err()); } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index b708ef4dc..20a45cf83 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -117,9 +117,9 @@ pub trait Database: Sync + Send { /// /// It returns an array of tuples with the torrent /// [`InfoHash`] and the - /// [`completed`](torrust_tracker_torrent_repository::entry::Entry::completed) counter + /// [`completed`](torrust_tracker_torrent_repository::entry::Torrent::completed) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](torrust_tracker_torrent_repository::entry::Entry::completed). + /// See [`Entry::completed`](torrust_tracker_torrent_repository::entry::Torrent::completed). /// /// # Context: Torrent Metrics /// diff --git a/src/core/mod.rs b/src/core/mod.rs index f94c46543..21cd1b501 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -444,7 +444,8 @@ use std::time::Duration; use derive_more::Constructor; use log::debug; use tokio::sync::mpsc::error::SendError; -use torrust_tracker_configuration::{AnnouncePolicy, Configuration, TrackerPolicy}; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::{AnnouncePolicy, Configuration, TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; @@ -456,10 +457,7 @@ use self::auth::Key; use self::error::Error; use self::torrent::Torrents; use crate::core::databases::Database; -use crate::shared::clock::{self, TimeNow}; - -/// The maximum number of returned peers for a torrent. -pub const TORRENT_PEERS_LIMIT: usize = 74; +use crate::CurrentClock; /// The domain layer tracker service. /// @@ -741,7 +739,7 @@ impl Tracker { self.torrents.remove_peerless_torrents(&self.policy); } else { let current_cutoff = - clock::Current::sub(&Duration::from_secs(u64::from(self.policy.max_peer_timeout))).unwrap_or_default(); + CurrentClock::now_sub(&Duration::from_secs(u64::from(self.policy.max_peer_timeout))).unwrap_or_default(); self.torrents.remove_inactive_peers(current_cutoff); } } @@ -1592,8 +1590,11 @@ mod tests { use std::str::FromStr; use std::time::Duration; + use torrust_tracker_clock::clock::Time; + use crate::core::auth; use crate::core::tests::the_tracker::private_tracker; + use crate::CurrentClock; #[tokio::test] async fn it_should_generate_the_expiring_authentication_keys() { @@ -1601,7 +1602,7 @@ mod tests { let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - assert_eq!(key.valid_until, Duration::from_secs(100)); + assert_eq!(key.valid_until, CurrentClock::now_add(&Duration::from_secs(100)).unwrap()); } #[tokio::test] diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs index 9e5b4be01..d30d73db3 100644 --- a/src/core/peer_tests.rs +++ b/src/core/peer_tests.rs @@ -2,17 +2,21 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time}; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; -use crate::shared::clock::{self, Time}; +use crate::CurrentClock; #[test] fn it_should_be_serializable() { + clock::Stopped::local_set_to_unix_epoch(); + let torrent_peer = peer::Peer { peer_id: peer::Id(*b"-qB0000-000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: clock::Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index b5a2b4c07..2b3f9cbf7 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -31,300 +31,4 @@ use torrust_tracker_torrent_repository::TorrentsRwLockStdMutexStd; pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used #[cfg(test)] -mod tests { - - mod torrent_entry { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::ops::Sub; - use std::sync::Arc; - use std::time::Duration; - - use torrust_tracker_primitives::announce_event::AnnounceEvent; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; - use torrust_tracker_torrent_repository::entry::Entry; - use torrust_tracker_torrent_repository::EntrySingle; - - use crate::core::TORRENT_PEERS_LIMIT; - use crate::shared::clock::{self, StoppedTime, Time, TimeNow}; - - struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([0u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: clock::Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } - } - - pub fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; - self - } - - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self - } - - pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - pub fn into(self) -> peer::Peer { - self.peer - } - } - - /// A torrent seeder is a peer with 0 bytes left to download which - /// has not announced it has stopped - fn a_torrent_seeder() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(0) - .with_event_completed() - .into() - } - - /// A torrent leecher is a peer that is not a seeder. - /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(1) - .with_event_completed() - .into() - } - - #[test] - fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = EntrySingle::default(); - - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } - - #[test] - fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); - assert_eq!(torrent_entry.get_peers(None).len(), 1); - } - - #[test] - fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); - } - - #[test] - fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); - } - - #[test] - fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } - - #[test] - fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert!(stats_have_changed); - } - - #[test] - fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( - ) { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); - - assert!(torrent_stats_have_not_changed); - } - - #[test] - fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() - { - let mut torrent_entry = EntrySingle::default(); - let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer - - // Get peers excluding the one we have just added - let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); - - assert_eq!(peers.len(), 0); - } - - #[test] - fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = EntrySingle::default(); - - let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - - // Add peer 1 - let torrent_peer_1 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8080)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_1); - - // Add peer 2 - let torrent_peer_2 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8081)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_2); - - // Get peers for peer 1 - let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); - - // The peer 2 using the same IP but different port should be included - assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); - assert_eq!(peers[0].peer_addr.port(), 8081); - } - - fn peer_id_from_i32(number: i32) -> peer::Id { - let peer_id = number.to_le_bytes(); - peer::Id([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], - peer_id[2], peer_id[3], - ]) - } - - #[test] - fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = EntrySingle::default(); - - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let torrent_peer = TorrentPeerBuilder::default() - .with_peer_id(peer_id_from_i32(peer_number)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer); - } - - let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); - - assert_eq!(peers.len(), 74); - } - - #[test] - fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_seeder = a_torrent_seeder(); - - torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder - - assert_eq!(torrent_entry.get_stats().complete, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_leecher = a_torrent_leecher(); - - torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher - - assert_eq!(torrent_entry.get_stats().incomplete, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( - ) { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - // Announce "Completed" torrent download event. - torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer - - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; - - assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); - } - - #[test] - fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Announce "Completed" torrent download event. - // It's the first event announced from this peer. - torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer - - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; - - assert_eq!(number_of_peers_with_completed_torrent, 0); - } - - #[test] - fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = EntrySingle::default(); - - let timeout = 120u32; - - let now = clock::Working::now(); - clock::Stopped::local_set(&now); - - let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); - let inactive_peer = TorrentPeerBuilder::default() - .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) - .into(); - torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer - - let current_cutoff = clock::Current::sub(&Duration::from_secs(u64::from(timeout))).unwrap_or_default(); - torrent_entry.remove_inactive_peers(current_cutoff); - - assert_eq!(torrent_entry.get_peers_len(), 0); - } - } -} +mod tests {} diff --git a/src/lib.rs b/src/lib.rs index b4ad298ac..064f50eb6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -469,6 +469,9 @@ //! //! In addition to the production code documentation you can find a lot of //! examples on the integration and unit tests. + +use torrust_tracker_clock::{clock, time_extent}; + pub mod app; pub mod bootstrap; pub mod console; @@ -478,3 +481,24 @@ pub mod shared; #[macro_use] extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index 99e93aaf9..3671438c2 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -1,9 +1,9 @@ //! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. use serde::{Deserialize, Serialize}; +use torrust_tracker_clock::conv::convert_from_iso_8601_to_timestamp; use crate::core::auth::{self, Key}; -use crate::shared::clock::convert_from_iso_8601_to_timestamp; /// A resource that represents an authentication key. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -41,9 +41,12 @@ impl From for AuthKey { mod tests { use std::time::Duration; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time}; + use super::AuthKey; use crate::core::auth::{self, Key}; - use crate::shared::clock::{Current, TimeNow}; + use crate::CurrentClock; struct TestTime { pub timestamp: u64, @@ -65,6 +68,8 @@ mod tests { #[test] #[allow(deprecated)] fn it_should_be_convertible_into_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: one_hour_after_unix_epoch().timestamp, @@ -75,7 +80,7 @@ mod tests { auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() + valid_until: CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() } ); } @@ -83,9 +88,11 @@ mod tests { #[test] #[allow(deprecated)] fn it_should_be_convertible_from_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + let auth_key = auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), + valid_until: CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), }; assert_eq!( diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 6e8b5a40e..3ef85e600 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -71,7 +71,7 @@ //! is behind a reverse proxy. //! //! > **NOTICE**: the maximum number of peers that the tracker can return is -//! `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](crate::core::TORRENT_PEERS_LIMIT). +//! `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](torrust_tracker_configuration::TORRENT_PEERS_LIMIT). //! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) //! for more information about this limitation. //! diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 215acbad8..e9198f20c 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -12,6 +12,7 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; @@ -25,7 +26,7 @@ use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; use crate::servers::http::v1::responses::{self}; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::servers::http::v1::services::{self, peer_ip_resolver}; -use crate::shared::clock::{Current, Time}; +use crate::CurrentClock; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). @@ -134,7 +135,7 @@ fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Pee peer::Peer { peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), - updated: Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), left: NumberOfBytes(announce_request.left.unwrap_or(0)), diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 19e61f14e..49ea6261b 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -70,9 +70,9 @@ use std::net::SocketAddr; use std::panic::Location; use aquatic_udp_protocol::ConnectionId; +use torrust_tracker_clock::time_extent::{Extent, TimeExtent}; use super::error::Error; -use crate::shared::clock::time_extent::{Extent, TimeExtent}; pub type Cookie = [u8; 8]; @@ -133,9 +133,11 @@ mod cookie_builder { use std::hash::{Hash, Hasher}; use std::net::SocketAddr; + use torrust_tracker_clock::time_extent::{Extent, Make, TimeExtent}; + use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::shared::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; use crate::shared::crypto::keys::seeds::{Current, Keeper}; + use crate::DefaultTimeExtentMaker; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -162,10 +164,12 @@ mod cookie_builder { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self}; + use torrust_tracker_clock::time_extent::{self, Extent}; + use super::cookie_builder::{self}; use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; - use crate::shared::clock::time_extent::{self, Extent}; - use crate::shared::clock::{Stopped, StoppedTime}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); @@ -176,6 +180,8 @@ mod tests { const ID_COOKIE_OLD: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; const ID_COOKIE_NEW: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; + clock::Stopped::local_set_to_unix_epoch(); + let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); assert!(cookie == ID_COOKIE_OLD || cookie == ID_COOKIE_NEW); @@ -276,7 +282,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); let cookie_next = make(&remote_address); @@ -298,7 +304,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); check(&remote_address, &cookie).unwrap(); } @@ -307,9 +313,11 @@ mod tests { fn it_should_be_valid_for_the_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + clock::Stopped::local_set_to_unix_epoch(); + let cookie = make(&remote_address); - Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + clock::Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); check(&remote_address, &cookie).unwrap(); } @@ -321,7 +329,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + clock::Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); check(&remote_address, &cookie).unwrap(); } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 8f6e6d8b4..59aec0ff3 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -318,6 +318,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; + use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; @@ -325,7 +326,7 @@ mod tests { use crate::core::services::tracker_factory; use crate::core::Tracker; - use crate::shared::clock::{Current, Time}; + use crate::CurrentClock; fn tracker_configuration() -> Configuration { default_testing_tracker_configuration() @@ -376,7 +377,7 @@ mod tests { let default_peer = peer::Peer { peer_id: peer::Id([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 8c8fa10a5..f7eb935a0 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,11 +1,12 @@ //! Logic to extract the peer info from the announce request. use std::net::{IpAddr, SocketAddr}; +use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; use super::request::AnnounceWrapper; -use crate::shared::clock::{Current, Time}; +use crate::CurrentClock; /// Extracts the [`peer::Peer`] info from the /// announce request. @@ -20,7 +21,7 @@ pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> pee peer::Peer { peer_id: peer::Id(announce_wrapper.announce_request.peer_id.0), peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), - updated: Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0), downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0), left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0), diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs deleted file mode 100644 index a73878466..000000000 --- a/src/shared/clock/mod.rs +++ /dev/null @@ -1,393 +0,0 @@ -//! Time related functions and types. -//! -//! It's usually a good idea to control where the time comes from -//! in an application so that it can be mocked for testing and it can be -//! controlled in production so we get the intended behavior without -//! relying on the specific time zone for the underlying system. -//! -//! Clocks use the type `DurationSinceUnixEpoch` which is a -//! `std::time::Duration` since the Unix Epoch (timestamp). -//! -//! ```text -//! Local time: lun 2023-03-27 16:12:00 WEST -//! Universal time: lun 2023-03-27 15:12:00 UTC -//! Time zone: Atlantic/Canary (WEST, +0100) -//! Timestamp: 1679929914 -//! Duration: 1679929914.10167426 -//! ``` -//! -//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will -//! overflow in 584.9 billion years. -//! -//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you -//! the ability to use the clock regardless of the underlying system time zone -//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). -pub mod static_time; -pub mod time_extent; -pub mod utils; - -use std::num::IntErrorKind; -use std::str::FromStr; -use std::time::Duration; - -use chrono::{DateTime, Utc}; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -/// Clock types. -#[derive(Debug)] -pub enum Type { - /// Clock that returns the current time. - WorkingClock, - /// Clock that returns always the same fixed time. - StoppedClock, -} - -/// A generic structure that represents a clock. -/// -/// It can be either the working clock (production) or the stopped clock -/// (testing). It implements the `Time` trait, which gives you the current time. -#[derive(Debug)] -pub struct Clock; - -/// The working clock. It returns the current time. -pub type Working = Clock<{ Type::WorkingClock as usize }>; -/// The stopped clock. It returns always the same fixed time. -pub type Stopped = Clock<{ Type::StoppedClock as usize }>; - -/// The current clock. Defined at compilation time. -/// It can be either the working clock (production) or the stopped clock (testing). -#[cfg(not(test))] -pub type Current = Working; - -/// The current clock. Defined at compilation time. -/// It can be either the working clock (production) or the stopped clock (testing). -#[cfg(test)] -pub type Current = Stopped; - -/// Trait for types that can be used as a timestamp clock. -pub trait Time: Sized { - fn now() -> DurationSinceUnixEpoch; -} - -/// Trait for types that can be manipulate the current time in order to -/// get time in the future or in the past after or before a duration of time. -pub trait TimeNow: Time { - #[must_use] - fn add(add_time: &Duration) -> Option { - Self::now().checked_add(*add_time) - } - #[must_use] - fn sub(sub_time: &Duration) -> Option { - Self::now().checked_sub(*sub_time) - } -} - -/// It converts a string in ISO 8601 format to a timestamp. -/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch -/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. -/// -/// # Panics -/// -/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. -/// (this will naturally happen in 292.5 billion years) -#[must_use] -pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { - convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) -} - -/// It converts a `DateTime::` to a timestamp. -/// For example, the `DateTime::` of the Unix Epoch will be converted to a -/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. -/// -/// # Panics -/// -/// Will panic if the input time overflows the `u64` type. -/// (this will naturally happen in 584.9 billion years) -#[must_use] -pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { - DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) -} - -/// It converts a timestamp to a `DateTime::`. -/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be -/// converted to the `DateTime::` of the Unix Epoch. -/// -/// # Panics -/// -/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. -/// (this will naturally happen in 292.5 billion years) -#[must_use] -pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { - DateTime::from_timestamp( - i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), - duration.subsec_nanos(), - ) - .unwrap() -} - -#[cfg(test)] -mod tests { - use std::any::TypeId; - - use crate::shared::clock::{Current, Stopped, Time, Working}; - - #[test] - fn it_should_be_the_stopped_clock_as_default_when_testing() { - // We are testing, so we should default to the fixed time. - assert_eq!(TypeId::of::(), TypeId::of::()); - assert_eq!(Stopped::now(), Current::now()); - } - - #[test] - fn it_should_have_different_times() { - assert_ne!(TypeId::of::(), TypeId::of::()); - assert_ne!(Stopped::now(), Working::now()); - } - - mod timestamp { - use chrono::DateTime; - - use crate::shared::clock::{ - convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, - DurationSinceUnixEpoch, - }; - - #[test] - fn should_be_converted_to_datetime_utc() { - let timestamp = DurationSinceUnixEpoch::ZERO; - assert_eq!( - convert_from_timestamp_to_datetime_utc(timestamp), - DateTime::from_timestamp(0, 0).unwrap() - ); - } - - #[test] - fn should_be_converted_from_datetime_utc() { - let datetime = DateTime::from_timestamp(0, 0).unwrap(); - assert_eq!( - convert_from_datetime_utc_to_timestamp(&datetime), - DurationSinceUnixEpoch::ZERO - ); - } - - #[test] - fn should_be_converted_from_datetime_utc_in_iso_8601() { - let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); - assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); - } - } -} - -mod working_clock { - use std::time::SystemTime; - - use super::{DurationSinceUnixEpoch, Time, TimeNow, Working}; - - impl Time for Working { - fn now() -> DurationSinceUnixEpoch { - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() - } - } - - impl TimeNow for Working {} -} - -/// Trait for types that can be used as a timestamp clock stopped -/// at a given time. -pub trait StoppedTime: TimeNow { - /// It sets the clock to a given time. - fn local_set(unix_time: &DurationSinceUnixEpoch); - - /// It sets the clock to the Unix Epoch. - fn local_set_to_unix_epoch() { - Self::local_set(&DurationSinceUnixEpoch::ZERO); - } - - /// It sets the clock to the time the application started. - fn local_set_to_app_start_time(); - - /// It sets the clock to the current system time. - fn local_set_to_system_time_now(); - - /// It adds a `Duration` to the clock. - /// - /// # Errors - /// - /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. - fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; - - /// It subtracts a `Duration` from the clock. - /// # Errors - /// - /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. - fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; - - /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). - fn local_reset(); -} - -mod stopped_clock { - use std::num::IntErrorKind; - use std::time::Duration; - - use super::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow}; - - impl Time for Stopped { - fn now() -> DurationSinceUnixEpoch { - detail::FIXED_TIME.with(|time| { - return *time.borrow(); - }) - } - } - - impl TimeNow for Stopped {} - - impl StoppedTime for Stopped { - fn local_set(unix_time: &DurationSinceUnixEpoch) { - detail::FIXED_TIME.with(|time| { - *time.borrow_mut() = *unix_time; - }); - } - - fn local_set_to_app_start_time() { - Self::local_set(&detail::get_app_start_time()); - } - - fn local_set_to_system_time_now() { - Self::local_set(&detail::get_app_start_time()); - } - - fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { - detail::FIXED_TIME.with(|time| { - let time_borrowed = *time.borrow(); - *time.borrow_mut() = match time_borrowed.checked_add(*duration) { - Some(time) => time, - None => { - return Err(IntErrorKind::PosOverflow); - } - }; - Ok(()) - }) - } - - fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { - detail::FIXED_TIME.with(|time| { - let time_borrowed = *time.borrow(); - *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { - Some(time) => time, - None => { - return Err(IntErrorKind::NegOverflow); - } - }; - Ok(()) - }) - } - - fn local_reset() { - Self::local_set(&detail::get_default_fixed_time()); - } - } - - #[cfg(test)] - mod tests { - use std::thread; - use std::time::Duration; - - use crate::shared::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; - - #[test] - fn it_should_default_to_zero_when_testing() { - assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); - } - - #[test] - fn it_should_possible_to_set_the_time() { - // Check we start with ZERO. - assert_eq!(Stopped::now(), Duration::ZERO); - - // Set to Current Time and Check - let timestamp = Working::now(); - Stopped::local_set(×tamp); - assert_eq!(Stopped::now(), timestamp); - - // Elapse the Current Time and Check - Stopped::local_add(×tamp).unwrap(); - assert_eq!(Stopped::now(), timestamp + timestamp); - - // Reset to ZERO and Check - Stopped::local_reset(); - assert_eq!(Stopped::now(), Duration::ZERO); - } - - #[test] - fn it_should_default_to_zero_on_thread_exit() { - assert_eq!(Stopped::now(), Duration::ZERO); - let after5 = Working::add(&Duration::from_secs(5)).unwrap(); - Stopped::local_set(&after5); - assert_eq!(Stopped::now(), after5); - - let t = thread::spawn(move || { - // each thread starts out with the initial value of ZERO - assert_eq!(Stopped::now(), Duration::ZERO); - - // and gets set to the current time. - let timestamp = Working::now(); - Stopped::local_set(×tamp); - assert_eq!(Stopped::now(), timestamp); - }); - - // wait for the thread to complete and bail out on panic - t.join().unwrap(); - - // we retain our original value of current time + 5sec despite the child thread - assert_eq!(Stopped::now(), after5); - - // Reset to ZERO and Check - Stopped::local_reset(); - assert_eq!(Stopped::now(), Duration::ZERO); - } - } - - mod detail { - use std::cell::RefCell; - use std::time::SystemTime; - - use crate::shared::clock::{static_time, DurationSinceUnixEpoch}; - - pub fn get_app_start_time() -> DurationSinceUnixEpoch { - (*static_time::TIME_AT_APP_START) - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - } - - #[cfg(not(test))] - pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { - get_app_start_time() - } - - #[cfg(test)] - pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { - DurationSinceUnixEpoch::ZERO - } - - thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); - - #[cfg(test)] - mod tests { - use std::time::Duration; - - use crate::shared::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; - - #[test] - fn it_should_get_the_zero_start_time_when_testing() { - assert_eq!(get_default_fixed_time(), Duration::ZERO); - } - - #[test] - fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); - assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); - } - } - } -} diff --git a/src/shared/clock/utils.rs b/src/shared/clock/utils.rs deleted file mode 100644 index 8b1378917..000000000 --- a/src/shared/clock/utils.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/shared/mod.rs b/src/shared/mod.rs index f016ba913..8c95effe1 100644 --- a/src/shared/mod.rs +++ b/src/shared/mod.rs @@ -1,8 +1,6 @@ //! Modules with generic logic used by several modules. //! //! - [`bit_torrent`]: `BitTorrent` protocol related logic. -//! - [`clock`]: Times services. //! - [`crypto`]: Encryption related logic. pub mod bit_torrent; -pub mod clock; pub mod crypto; diff --git a/tests/common/clock.rs b/tests/common/clock.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/tests/common/clock.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b57996292..281c1fb9c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,3 +1,4 @@ +pub mod clock; pub mod fixtures; pub mod http; pub mod udp; diff --git a/tests/integration.rs b/tests/integration.rs index 5d66d9074..8e3d46826 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -3,5 +3,18 @@ //! ```text //! cargo test --test integration //! ``` + +use torrust_tracker_clock::clock; mod common; mod servers; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; From e18cae46e74f2f38bdbd2ee064b3c986c01ed7f6 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 25 Mar 2024 12:12:46 +0800 Subject: [PATCH 0776/1003] dev: torrent repository cleanups --- cSpell.json | 1 + packages/configuration/src/lib.rs | 2 +- packages/primitives/src/announce_event.rs | 2 +- packages/primitives/src/info_hash.rs | 19 + packages/primitives/src/lib.rs | 5 +- packages/primitives/src/pagination.rs | 8 +- packages/primitives/src/peer.rs | 24 +- packages/primitives/src/torrent_metrics.rs | 12 +- packages/torrent-repository/src/entry/mod.rs | 31 +- .../torrent-repository/src/entry/mutex_std.rs | 17 +- .../src/entry/mutex_tokio.rs | 25 +- .../torrent-repository/src/entry/single.rs | 324 +----------------- .../torrent-repository/src/repository/mod.rs | 38 +- .../src/repository/rw_lock_std.rs | 12 +- .../src/repository/rw_lock_std_mutex_std.rs | 10 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 30 +- .../src/repository/rw_lock_tokio.rs | 10 +- .../src/repository/rw_lock_tokio_mutex_std.rs | 10 +- .../repository/rw_lock_tokio_mutex_tokio.rs | 22 +- src/core/databases/mod.rs | 4 +- src/core/databases/mysql.rs | 6 +- src/core/databases/sqlite.rs | 11 +- src/core/mod.rs | 20 +- src/core/torrent/mod.rs | 3 - .../apis/v1/context/stats/resources.rs | 12 +- src/servers/udp/handlers.rs | 63 ++-- 26 files changed, 259 insertions(+), 462 deletions(-) diff --git a/cSpell.json b/cSpell.json index 1e276dbc2..bbcba98a7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -100,6 +100,7 @@ "ostr", "Pando", "peekable", + "peerlist", "proot", "proto", "Quickstart", diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 549c73a31..ca873f3cd 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -246,7 +246,7 @@ use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; /// The maximum number of returned peers for a torrent. pub const TORRENT_PEERS_LIMIT: usize = 74; -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +#[derive(Copy, Clone, Debug, PartialEq, Constructor)] pub struct TrackerPolicy { pub remove_peerless_torrents: bool, pub max_peer_timeout: u32, diff --git a/packages/primitives/src/announce_event.rs b/packages/primitives/src/announce_event.rs index 16e47da99..3bd560084 100644 --- a/packages/primitives/src/announce_event.rs +++ b/packages/primitives/src/announce_event.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// Announce events. Described on the /// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Serialize, Deserialize)] +#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub enum AnnounceEvent { /// The peer has started downloading the torrent. Started, diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs index 46ae6283e..a07cc41a2 100644 --- a/packages/primitives/src/info_hash.rs +++ b/packages/primitives/src/info_hash.rs @@ -1,3 +1,4 @@ +use std::hash::{DefaultHasher, Hash, Hasher}; use std::panic::Location; use thiserror::Error; @@ -77,6 +78,24 @@ impl std::convert::From<&[u8]> for InfoHash { } } +/// for testing +impl std::convert::From<&DefaultHasher> for InfoHash { + fn from(data: &DefaultHasher) -> InfoHash { + let n = data.finish().to_le_bytes(); + InfoHash([ + n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], + n[3], + ]) + } +} + +impl std::convert::From<&i32> for InfoHash { + fn from(n: &i32) -> InfoHash { + let n = n.to_le_bytes(); + InfoHash([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]]) + } +} + impl std::convert::From<[u8; 20]> for InfoHash { fn from(val: [u8; 20]) -> Self { InfoHash(val) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 664c0c82d..aeb4d0d4e 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -4,6 +4,7 @@ //! which is a `BitTorrent` tracker server. These structures are used not only //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. +use std::collections::BTreeMap; use std::time::Duration; use info_hash::InfoHash; @@ -38,7 +39,7 @@ pub enum IPVersion { } /// Number of bytes downloaded, uploaded or pending to download (left) by the peer. -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Serialize, Deserialize)] +#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct NumberOfBytes(pub i64); /// The database management system used by the tracker. @@ -58,7 +59,7 @@ pub enum DatabaseDriver { MySQL, } -pub type PersistentTorrents = Vec<(InfoHash, u32)>; +pub type PersistentTorrents = BTreeMap; /// The mode the tracker will run in. /// diff --git a/packages/primitives/src/pagination.rs b/packages/primitives/src/pagination.rs index ab7dcfe2b..96b5ad662 100644 --- a/packages/primitives/src/pagination.rs +++ b/packages/primitives/src/pagination.rs @@ -1,7 +1,8 @@ +use derive_more::Constructor; use serde::Deserialize; /// A struct to keep information about the page when results are being paginated -#[derive(Deserialize, Copy, Clone, Debug, PartialEq)] +#[derive(Deserialize, Copy, Clone, Debug, PartialEq, Constructor)] pub struct Pagination { /// The page number, starting at 0 pub offset: u32, @@ -10,11 +11,6 @@ pub struct Pagination { } impl Pagination { - #[must_use] - pub fn new(offset: u32, limit: u32) -> Self { - Self { offset, limit } - } - #[must_use] pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { let offset = match offset_option { diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 5fb9e525f..f5b009f2a 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -51,7 +51,7 @@ use crate::{ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfByte /// event: AnnounceEvent::Started, /// }; /// ``` -#[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] +#[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Peer { /// ID used by the downloader peer pub peer_id: Id, @@ -173,6 +173,16 @@ impl From<[u8; 20]> for Id { } } +impl From for Id { + fn from(number: i32) -> Self { + let peer_id = number.to_le_bytes(); + Id::from([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], + peer_id[3], + ]) + } +} + impl TryFrom> for Id { type Error = IdConversionError; @@ -332,7 +342,7 @@ impl FromIterator for Vec

{ } pub mod fixture { - use std::net::SocketAddr; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use super::{Id, Peer}; use crate::announce_event::AnnounceEvent; @@ -396,8 +406,8 @@ pub mod fixture { impl Default for Peer { fn default() -> Self { Self { - peer_id: Id(*b"-qB00000000000000000"), - peer_addr: std::net::SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::new(126, 0, 0, 1)), 8080), + peer_id: Id::default(), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), @@ -406,6 +416,12 @@ pub mod fixture { } } } + + impl Default for Id { + fn default() -> Self { + Self(*b"-qB00000000000000000") + } + } } #[cfg(test)] diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs index c60507171..02de02954 100644 --- a/packages/primitives/src/torrent_metrics.rs +++ b/packages/primitives/src/torrent_metrics.rs @@ -6,20 +6,20 @@ use std::ops::AddAssign; #[derive(Copy, Clone, Debug, PartialEq, Default)] pub struct TorrentsMetrics { /// Total number of seeders for all torrents - pub seeders: u64, + pub complete: u64, /// Total number of peers that have ever completed downloading for all torrents. - pub completed: u64, + pub downloaded: u64, /// Total number of leechers for all torrents. - pub leechers: u64, + pub incomplete: u64, /// Total number of torrents. pub torrents: u64, } impl AddAssign for TorrentsMetrics { fn add_assign(&mut self, rhs: Self) { - self.seeders += rhs.seeders; - self.completed += rhs.completed; - self.leechers += rhs.leechers; + self.complete += rhs.complete; + self.downloaded += rhs.downloaded; + self.incomplete += rhs.incomplete; self.torrents += rhs.torrents; } } diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 11352a8fa..4c39af829 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,4 +1,5 @@ use std::fmt::Debug; +use std::net::SocketAddr; use std::sync::Arc; //use serde::{Deserialize, Serialize}; @@ -17,7 +18,7 @@ pub trait Entry { fn get_stats(&self) -> SwarmMetadata; /// Returns True if Still a Valid Entry according to the Tracker Policy - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + fn is_good(&self, policy: &TrackerPolicy) -> bool; /// Returns True if the Peers is Empty fn peers_is_empty(&self) -> bool; @@ -33,7 +34,7 @@ pub trait Entry { /// /// It filters out the input peer, typically because we want to return this /// list of peers to that client peer. - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; /// It updates a peer and returns true if the number of complete downloads have increased. /// @@ -51,11 +52,11 @@ pub trait Entry { #[allow(clippy::module_name_repetitions)] pub trait EntrySync { fn get_stats(&self) -> SwarmMetadata; - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; + fn is_good(&self, policy: &TrackerPolicy) -> bool; fn peers_is_empty(&self) -> bool; fn get_peers_len(&self) -> usize; fn get_peers(&self, limit: Option) -> Vec>; - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); @@ -63,16 +64,14 @@ pub trait EntrySync { #[allow(clippy::module_name_repetitions)] pub trait EntryAsync { - fn get_stats(self) -> impl std::future::Future + Send; - - #[allow(clippy::wrong_self_convention)] - fn is_not_zombie(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - fn peers_is_empty(self) -> impl std::future::Future + Send; - fn get_peers_len(self) -> impl std::future::Future + Send; - fn get_peers(self, limit: Option) -> impl std::future::Future>> + Send; - fn get_peers_for_peer( - self, - client: &peer::Peer, + fn get_stats(&self) -> impl std::future::Future + Send; + fn check_good(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn peers_is_empty(&self) -> impl std::future::Future + Send; + fn get_peers_len(&self) -> impl std::future::Future + Send; + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + fn get_peers_for_client( + &self, + client: &SocketAddr, limit: Option, ) -> impl std::future::Future>> + Send; fn insert_or_update_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; @@ -88,11 +87,11 @@ pub trait EntryAsync { /// This is the tracker entry for a given torrent and contains the swarm data, /// that's the list of all the peers trying to download the same torrent. /// The tracker keeps one entry like this for every torrent. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Torrent { /// The swarm: a network of peers that are all trying to download the torrent associated to this entry // #[serde(skip)] pub(crate) peers: std::collections::BTreeMap>, /// The number of peers that have ever completed downloading the torrent associated to this entry - pub(crate) completed: u32, + pub(crate) downloaded: u32, } diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs index df6228317..b4b823909 100644 --- a/packages/torrent-repository/src/entry/mutex_std.rs +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -5,15 +6,15 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::{Entry, EntrySync}; -use crate::EntryMutexStd; +use crate::{EntryMutexStd, EntrySingle}; impl EntrySync for EntryMutexStd { fn get_stats(&self) -> SwarmMetadata { self.lock().expect("it should get a lock").get_stats() } - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - self.lock().expect("it should get a lock").is_not_zombie(policy) + fn is_good(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").is_good(policy) } fn peers_is_empty(&self) -> bool { @@ -28,8 +29,8 @@ impl EntrySync for EntryMutexStd { self.lock().expect("it should get lock").get_peers(limit) } - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { - self.lock().expect("it should get lock").get_peers_for_peer(client, limit) + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_client(client, limit) } fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { @@ -48,3 +49,9 @@ impl EntrySync for EntryMutexStd { .remove_inactive_peers(current_cutoff); } } + +impl From for EntryMutexStd { + fn from(entry: EntrySingle) -> Self { + Arc::new(std::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs index c4d13fb43..34f4a4e92 100644 --- a/packages/torrent-repository/src/entry/mutex_tokio.rs +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -5,31 +6,31 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::{Entry, EntryAsync}; -use crate::EntryMutexTokio; +use crate::{EntryMutexTokio, EntrySingle}; impl EntryAsync for EntryMutexTokio { - async fn get_stats(self) -> SwarmMetadata { + async fn get_stats(&self) -> SwarmMetadata { self.lock().await.get_stats() } - async fn is_not_zombie(self, policy: &TrackerPolicy) -> bool { - self.lock().await.is_not_zombie(policy) + async fn check_good(self, policy: &TrackerPolicy) -> bool { + self.lock().await.is_good(policy) } - async fn peers_is_empty(self) -> bool { + async fn peers_is_empty(&self) -> bool { self.lock().await.peers_is_empty() } - async fn get_peers_len(self) -> usize { + async fn get_peers_len(&self) -> usize { self.lock().await.get_peers_len() } - async fn get_peers(self, limit: Option) -> Vec> { + async fn get_peers(&self, limit: Option) -> Vec> { self.lock().await.get_peers(limit) } - async fn get_peers_for_peer(self, client: &peer::Peer, limit: Option) -> Vec> { - self.lock().await.get_peers_for_peer(client, limit) + async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().await.get_peers_for_client(client, limit) } async fn insert_or_update_peer(self, peer: &peer::Peer) -> bool { @@ -44,3 +45,9 @@ impl EntryAsync for EntryMutexTokio { self.lock().await.remove_inactive_peers(current_cutoff); } } + +impl From for EntryMutexTokio { + fn from(entry: EntrySingle) -> Self { + Arc::new(tokio::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 85fdc6cf0..c1041e9a2 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -16,14 +17,14 @@ impl Entry for EntrySingle { let incomplete: u32 = self.peers.len() as u32 - complete; SwarmMetadata { - downloaded: self.completed, + downloaded: self.downloaded, complete, incomplete, } } - fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { - if policy.persistent_torrent_completed_stat && self.completed > 0 { + fn is_good(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.downloaded > 0 { return true; } @@ -48,13 +49,13 @@ impl Entry for EntrySingle { } } - fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { match limit { Some(limit) => self .peers .values() // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != peer::ReadInfo::get_address(client)) + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) // Limit the number of peers on the result .take(limit) .cloned() @@ -63,25 +64,25 @@ impl Entry for EntrySingle { .peers .values() // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != peer::ReadInfo::get_address(client)) + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) .cloned() .collect(), } } fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; + let mut downloaded_stats_updated: bool = false; match peer::ReadInfo::get_event(peer) { AnnounceEvent::Stopped => { drop(self.peers.remove(&peer::ReadInfo::get_id(peer))); } AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); + let previous = self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); // Don't count if peer was not previously known and not already completed. - if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.completed += 1; - did_torrent_stats_change = true; + if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.downloaded += 1; + downloaded_stats_updated = true; } } _ => { @@ -89,7 +90,7 @@ impl Entry for EntrySingle { } } - did_torrent_stats_change + downloaded_stats_updated } fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { @@ -103,302 +104,3 @@ impl Entry for EntrySingle { .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); } } - -#[cfg(test)] -mod tests { - mod torrent_entry { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::ops::Sub; - use std::sync::Arc; - use std::time::Duration; - - use torrust_tracker_clock::clock::stopped::Stopped as _; - use torrust_tracker_clock::clock::{self, Time}; - use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; - use torrust_tracker_primitives::announce_event::AnnounceEvent; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; - - use crate::entry::Entry; - use crate::{CurrentClock, EntrySingle}; - - struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([0u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: CurrentClock::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } - } - - pub fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; - self - } - - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self - } - - pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - pub fn into(self) -> peer::Peer { - self.peer - } - } - - /// A torrent seeder is a peer with 0 bytes left to download which - /// has not announced it has stopped - fn a_torrent_seeder() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(0) - .with_event_completed() - .into() - } - - /// A torrent leecher is a peer that is not a seeder. - /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(1) - .with_event_completed() - .into() - } - - #[test] - fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = EntrySingle::default(); - - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } - - #[test] - fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); - assert_eq!(torrent_entry.get_peers(None).len(), 1); - } - - #[test] - fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); - } - - #[test] - fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); - } - - #[test] - fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } - - #[test] - fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert!(stats_have_changed); - } - - #[test] - fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( - ) { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); - - assert!(torrent_stats_have_not_changed); - } - - #[test] - fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() - { - let mut torrent_entry = EntrySingle::default(); - let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer - - // Get peers excluding the one we have just added - let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); - - assert_eq!(peers.len(), 0); - } - - #[test] - fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = EntrySingle::default(); - - let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - - // Add peer 1 - let torrent_peer_1 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8080)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_1); - - // Add peer 2 - let torrent_peer_2 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8081)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer_2); - - // Get peers for peer 1 - let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); - - // The peer 2 using the same IP but different port should be included - assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); - assert_eq!(peers[0].peer_addr.port(), 8081); - } - - fn peer_id_from_i32(number: i32) -> peer::Id { - let peer_id = number.to_le_bytes(); - peer::Id([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], - peer_id[2], peer_id[3], - ]) - } - - #[test] - fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = EntrySingle::default(); - - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let torrent_peer = TorrentPeerBuilder::default() - .with_peer_id(peer_id_from_i32(peer_number)) - .into(); - torrent_entry.insert_or_update_peer(&torrent_peer); - } - - let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); - - assert_eq!(peers.len(), 74); - } - - #[test] - fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_seeder = a_torrent_seeder(); - - torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder - - assert_eq!(torrent_entry.get_stats().complete, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = EntrySingle::default(); - let torrent_leecher = a_torrent_leecher(); - - torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher - - assert_eq!(torrent_entry.get_stats().incomplete, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( - ) { - let mut torrent_entry = EntrySingle::default(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - - // Announce "Completed" torrent download event. - torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer - - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; - - assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); - } - - #[test] - fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = EntrySingle::default(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Announce "Completed" torrent download event. - // It's the first event announced from this peer. - torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer - - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; - - assert_eq!(number_of_peers_with_completed_torrent, 0); - } - - #[test] - fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = EntrySingle::default(); - - let timeout = 120u32; - - let now = clock::Working::now(); - clock::Stopped::local_set(&now); - - let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); - let inactive_peer = TorrentPeerBuilder::default() - .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) - .into(); - torrent_entry.insert_or_update_peer(&inactive_peer); // Add the peer - - let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(timeout))).unwrap_or_default(); - torrent_entry.remove_inactive_peers(current_cutoff); - - assert_eq!(torrent_entry.get_peers_len(), 0); - } - } -} diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index b46771163..494040c9d 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -12,7 +12,9 @@ pub mod rw_lock_tokio; pub mod rw_lock_tokio_mutex_std; pub mod rw_lock_tokio_mutex_tokio; -pub trait Repository: Default + 'static { +use std::fmt::Debug; + +pub trait Repository: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> Option; fn get_metrics(&self) -> TorrentsMetrics; fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; @@ -24,7 +26,7 @@ pub trait Repository: Default + 'static { } #[allow(clippy::module_name_repetitions)] -pub trait RepositoryAsync: Default + 'static { +pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; @@ -39,12 +41,36 @@ pub trait RepositoryAsync: Default + 'static { ) -> impl std::future::Future + Send; } -#[derive(Default)] +#[derive(Default, Debug)] +pub struct RwLockStd { + torrents: std::sync::RwLock>, +} + +#[derive(Default, Debug)] pub struct RwLockTokio { torrents: tokio::sync::RwLock>, } -#[derive(Default)] -pub struct RwLockStd { - torrents: std::sync::RwLock>, +impl RwLockStd { + /// # Panics + /// + /// Panics if unable to get a lock. + pub fn write( + &self, + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().expect("it should get lock") + } +} + +impl RwLockTokio { + pub fn write( + &self, + ) -> impl std::future::Future< + Output = tokio::sync::RwLockWriteGuard< + '_, + std::collections::BTreeMap, + >, + > { + self.torrents.write() + } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index bacef623d..9d7f29416 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -49,9 +49,9 @@ where for entry in self.get_torrents().values() { let stats = entry.get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -75,7 +75,7 @@ where fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { let mut torrents = self.get_torrents_mut(); - for (info_hash, completed) in persistent_torrents { + for (info_hash, downloaded) in persistent_torrents { // Skip if torrent entry already exists if torrents.contains_key(info_hash) { continue; @@ -83,7 +83,7 @@ where let entry = EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *downloaded, }; torrents.insert(*info_hash, entry); @@ -107,6 +107,6 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut(); - db.retain(|_, e| e.is_not_zombie(policy)); + db.retain(|_, e| e.is_good(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 9fca82ba8..0b65234e3 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -57,9 +57,9 @@ where for entry in self.get_torrents().values() { let stats = entry.lock().expect("it should get a lock").get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -92,7 +92,7 @@ where let entry = EntryMutexStd::new( EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, } .into(), ); @@ -118,6 +118,6 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut(); - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + db.retain(|_, e| e.lock().expect("it should lock entry").is_good(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index b9fb54469..5394abb6a 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::iter::zip; use std::pin::Pin; use std::sync::Arc; @@ -75,9 +76,9 @@ where for entry in entries { let stats = entry.lock().await.get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -96,7 +97,7 @@ where let entry = EntryMutexTokio::new( EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, } .into(), ); @@ -124,8 +125,27 @@ where } async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let handles: Vec> + Send>>>; + + { + let db = self.get_torrents(); + + handles = zip(db.keys().copied(), db.values().cloned()) + .map(|(infohash, torrent)| { + torrent + .check_good(policy) + .map(move |good| if good { None } else { Some(infohash) }) + .boxed() + }) + .collect::>(); + } + + let not_good = join_all(handles).await; + let mut db = self.get_torrents_mut(); - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + for remove in not_good.into_iter().flatten() { + drop(db.remove(&remove)); + } } } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index d0b7ec751..fa84e2451 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -64,9 +64,9 @@ where for entry in self.get_torrents().await.values() { let stats = entry.get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -84,7 +84,7 @@ where let entry = EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, }; torrents.insert(*info_hash, entry); @@ -108,6 +108,6 @@ where async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut().await; - db.retain(|_, e| e.is_not_zombie(policy)); + db.retain(|_, e| e.is_good(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index f800d2001..fbbc51a09 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -72,9 +72,9 @@ where for entry in self.get_torrents().await.values() { let stats = entry.get_stats(); - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -93,7 +93,7 @@ where let entry = EntryMutexStd::new( EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, } .into(), ); @@ -119,6 +119,6 @@ where async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut().await; - db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + db.retain(|_, e| e.lock().expect("it should lock entry").is_good(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index 7ce2cc74c..bc7fd61e8 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -70,11 +70,11 @@ where async fn get_metrics(&self) -> TorrentsMetrics { let mut metrics = TorrentsMetrics::default(); - for entry in self.get_torrents().await.values().cloned() { + for entry in self.get_torrents().await.values() { let stats = entry.get_stats().await; - metrics.seeders += u64::from(stats.complete); - metrics.completed += u64::from(stats.downloaded); - metrics.leechers += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); metrics.torrents += 1; } @@ -93,7 +93,7 @@ where let entry = EntryMutexTokio::new( EntrySingle { peers: BTreeMap::default(), - completed: *completed, + downloaded: *completed, } .into(), ); @@ -119,6 +119,16 @@ where async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut().await; - db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + let mut not_good = Vec::::default(); + + for (&infohash, torrent) in db.iter() { + if !torrent.clone().check_good(policy).await { + not_good.push(infohash); + } + } + + for remove in not_good { + drop(db.remove(&remove)); + } } } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index 20a45cf83..c08aed76a 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -117,9 +117,9 @@ pub trait Database: Sync + Send { /// /// It returns an array of tuples with the torrent /// [`InfoHash`] and the - /// [`completed`](torrust_tracker_torrent_repository::entry::Torrent::completed) counter + /// [`downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](torrust_tracker_torrent_repository::entry::Torrent::completed). + /// See [`Entry::downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded). /// /// # Context: Torrent Metrics /// diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index e37cdd9bf..ca95fa0b9 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -9,7 +9,7 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_primitives::{DatabaseDriver, PersistentTorrents}; use super::{Database, Error}; use crate::core::auth::{self, Key}; @@ -105,7 +105,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result, Error> { + async fn load_persistent_torrents(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( @@ -116,7 +116,7 @@ impl Database for Mysql { }, )?; - Ok(torrents) + Ok(torrents.iter().copied().collect()) } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index 5a3ac144a..53a01f80c 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -6,7 +6,7 @@ use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{DatabaseDriver, DurationSinceUnixEpoch}; +use torrust_tracker_primitives::{DatabaseDriver, DurationSinceUnixEpoch, PersistentTorrents}; use super::{Database, Error}; use crate::core::auth::{self, Key}; @@ -89,7 +89,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result, Error> { + async fn load_persistent_torrents(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -101,12 +101,7 @@ impl Database for Sqlite { Ok((info_hash, completed)) })?; - //torrent_iter?; - //let torrent_iter = torrent_iter.unwrap(); - - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); - - Ok(torrents) + Ok(torrent_iter.filter_map(std::result::Result::ok).collect()) } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). diff --git a/src/core/mod.rs b/src/core/mod.rs index 21cd1b501..6628426c1 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -684,7 +684,7 @@ impl Tracker { fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers_for_peer(peer, Some(TORRENT_PEERS_LIMIT)), + Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(TORRENT_PEERS_LIMIT)), } } @@ -1115,9 +1115,9 @@ mod tests { assert_eq!( torrents_metrics, TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, + complete: 0, + downloaded: 0, + incomplete: 0, torrents: 0 } ); @@ -1164,9 +1164,9 @@ mod tests { assert_eq!( torrent_metrics, TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 1, + complete: 0, + downloaded: 0, + incomplete: 1, torrents: 1, } ); @@ -1191,9 +1191,9 @@ mod tests { assert_eq!( (torrent_metrics), (TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 1_000_000, + complete: 0, + downloaded: 0, + incomplete: 1_000_000, torrents: 1_000_000, }), "{result_a:?} {result_b:?}" diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 2b3f9cbf7..ab78de683 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -29,6 +29,3 @@ use torrust_tracker_torrent_repository::TorrentsRwLockStdMutexStd; pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used - -#[cfg(test)] -mod tests {} diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 48ac660cf..9e8ab6bab 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -50,9 +50,9 @@ impl From for Stats { fn from(metrics: TrackerMetrics) -> Self { Self { torrents: metrics.torrents_metrics.torrents, - seeders: metrics.torrents_metrics.seeders, - completed: metrics.torrents_metrics.completed, - leechers: metrics.torrents_metrics.leechers, + seeders: metrics.torrents_metrics.complete, + completed: metrics.torrents_metrics.downloaded, + leechers: metrics.torrents_metrics.incomplete, tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, @@ -82,9 +82,9 @@ mod tests { assert_eq!( Stats::from(TrackerMetrics { torrents_metrics: TorrentsMetrics { - seeders: 1, - completed: 2, - leechers: 3, + complete: 1, + downloaded: 2, + incomplete: 3, torrents: 4 }, protocol_metrics: Metrics { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 59aec0ff3..2d5038ec3 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -320,7 +320,6 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; @@ -368,39 +367,41 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) } - struct TorrentPeerBuilder { + #[derive(Debug, Default)] + pub struct TorrentPeerBuilder { peer: peer::Peer, } impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([255u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: CurrentClock::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } + #[must_use] + pub fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } } - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; + #[must_use] + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; self } - pub fn with_peer_addr(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; + #[must_use] + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; self } - pub fn with_bytes_left(mut self, left: i64) -> Self { + #[must_use] + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes(left); self } + #[must_use] pub fn into(self) -> peer::Peer { self.peer } @@ -640,9 +641,9 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); @@ -712,9 +713,9 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv6 = TorrentPeerBuilder::default() + let peer_using_ipv6 = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); tracker @@ -808,9 +809,9 @@ mod tests { let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) + .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); @@ -863,9 +864,9 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); @@ -938,9 +939,9 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv4 = TorrentPeerBuilder::default() + let peer_using_ipv4 = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); tracker @@ -1112,10 +1113,10 @@ mod tests { async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { let peer_id = peer::Id([255u8; 20]); - let peer = TorrentPeerBuilder::default() + let peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(*remote_addr) - .with_bytes_left(0) + .with_peer_address(*remote_addr) + .with_number_of_bytes_left(0) .into(); tracker From 3414e2abea16ff79a1150aa432c6563612735d79 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 25 Mar 2024 12:13:09 +0800 Subject: [PATCH 0777/1003] dev: torrent repository tests --- Cargo.lock | 419 ++++++++++++++- packages/torrent-repository/Cargo.toml | 1 + .../torrent-repository/tests/common/mod.rs | 3 + .../torrent-repository/tests/common/repo.rs | 147 +++++ .../tests/common/torrent.rs | 89 ++++ .../tests/common/torrent_peer_builder.rs | 88 +++ .../torrent-repository/tests/entry/mod.rs | 433 +++++++++++++++ .../torrent-repository/tests/integration.rs | 22 + .../tests/repository/mod.rs | 504 ++++++++++++++++++ 9 files changed, 1700 insertions(+), 6 deletions(-) create mode 100644 packages/torrent-repository/tests/common/mod.rs create mode 100644 packages/torrent-repository/tests/common/repo.rs create mode 100644 packages/torrent-repository/tests/common/torrent.rs create mode 100644 packages/torrent-repository/tests/common/torrent_peer_builder.rs create mode 100644 packages/torrent-repository/tests/entry/mod.rs create mode 100644 packages/torrent-repository/tests/integration.rs create mode 100644 packages/torrent-repository/tests/repository/mod.rs diff --git a/Cargo.lock b/Cargo.lock index e28278abb..0bdd83b9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -167,6 +167,40 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" +dependencies = [ + "concurrent-queue", + "event-listener 5.2.0", + "event-listener-strategy 0.5.0", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" version = "0.4.6" @@ -183,6 +217,128 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.3.0", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite 2.3.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.2.0", + "async-executor", + "async-io 2.3.2", + "async-lock 3.3.0", + "blocking", + "futures-lite 2.3.0", + "once_cell", + "tokio", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" +dependencies = [ + "async-lock 3.3.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.6.0", + "rustix 0.38.32", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + [[package]] name = "async-trait" version = "0.1.78" @@ -194,6 +350,12 @@ dependencies = [ "syn 2.0.53", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" version = "1.1.0" @@ -418,6 +580,22 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.2.0", + "async-lock 3.3.0", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite 2.3.0", + "piper", + "tracing", +] + [[package]] name = "borsh" version = "1.3.1" @@ -662,6 +840,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "config" version = "0.14.0" @@ -996,6 +1183,54 @@ dependencies = [ "version_check", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +dependencies = [ + "event-listener 5.2.0", + "pin-project-lite", +] + [[package]] name = "fallible-iterator" version = "0.3.0" @@ -1008,6 +1243,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -1186,6 +1430,34 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand 2.0.1", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -1266,6 +1538,18 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "h2" version = "0.4.3" @@ -1458,7 +1742,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2", + "socket2 0.5.6", "tokio", "tower", "tower-service", @@ -1526,6 +1810,15 @@ dependencies = [ "serde", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "io-enum" version = "1.1.3" @@ -1535,6 +1828,17 @@ dependencies = [ "derive_utils", ] +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -1605,6 +1909,15 @@ dependencies = [ "serde", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1734,6 +2047,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + [[package]] name = "linux-raw-sys" version = "0.4.13" @@ -1767,6 +2086,9 @@ name = "log" version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +dependencies = [ + "value-bag", +] [[package]] name = "lru" @@ -1878,7 +2200,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2", + "socket2 0.5.6", "twox-hash", "url", ] @@ -2127,6 +2449,12 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + [[package]] name = "parking_lot" version = "0.12.1" @@ -2287,6 +2615,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + [[package]] name = "pkg-config" version = "0.3.30" @@ -2321,6 +2660,37 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi", + "pin-project-lite", + "rustix 0.38.32", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2778,6 +3148,20 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + [[package]] name = "rustix" version = "0.38.32" @@ -2787,7 +3171,7 @@ dependencies = [ "bitflags 2.5.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.13", "windows-sys 0.52.0", ] @@ -3133,6 +3517,16 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "socket2" version = "0.5.6" @@ -3268,8 +3662,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand", - "rustix", + "fastrand 2.0.1", + "rustix 0.38.32", "windows-sys 0.52.0", ] @@ -3386,7 +3780,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -3610,6 +4004,7 @@ dependencies = [ name = "torrust-tracker-torrent-repository" version = "3.0.0-alpha.12-develop" dependencies = [ + "async-std", "criterion", "futures", "rstest", @@ -3801,6 +4196,12 @@ dependencies = [ "rand", ] +[[package]] +name = "value-bag" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3813,6 +4214,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + [[package]] name = "walkdir" version = "2.5.0" diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index c36ae1440..4cea8767f 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -25,6 +25,7 @@ torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" [dev-dependencies] criterion = { version = "0", features = ["async_tokio"] } rstest = "0" +async-std = {version = "1", features = ["attributes", "tokio1"] } [[bench]] harness = false diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs new file mode 100644 index 000000000..efdf7f742 --- /dev/null +++ b/packages/torrent-repository/tests/common/mod.rs @@ -0,0 +1,3 @@ +pub mod repo; +pub mod torrent; +pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs new file mode 100644 index 000000000..3a4b53d2f --- /dev/null +++ b/packages/torrent-repository/tests/common/repo.rs @@ -0,0 +1,147 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; +use torrust_tracker_torrent_repository::{ + EntrySingle, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, +}; + +#[derive(Debug)] +pub(crate) enum Repo { + Std(TorrentsRwLockStd), + StdMutexStd(TorrentsRwLockStdMutexStd), + StdMutexTokio(TorrentsRwLockStdMutexTokio), + Tokio(TorrentsRwLockTokio), + TokioMutexStd(TorrentsRwLockTokioMutexStd), + TokioMutexTokio(TorrentsRwLockTokioMutexTokio), +} + +impl Repo { + pub(crate) async fn get(&self, key: &InfoHash) -> Option { + match self { + Repo::Std(repo) => repo.get(key), + Repo::StdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::Tokio(repo) => repo.get(key).await, + Repo::TokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + } + } + pub(crate) async fn get_metrics(&self) -> TorrentsMetrics { + match self { + Repo::Std(repo) => repo.get_metrics(), + Repo::StdMutexStd(repo) => repo.get_metrics(), + Repo::StdMutexTokio(repo) => repo.get_metrics().await, + Repo::Tokio(repo) => repo.get_metrics().await, + Repo::TokioMutexStd(repo) => repo.get_metrics().await, + Repo::TokioMutexTokio(repo) => repo.get_metrics().await, + } + } + pub(crate) async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + match self { + Repo::Std(repo) => repo.get_paginated(pagination), + Repo::StdMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::StdMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::Tokio(repo) => repo.get_paginated(pagination).await, + Repo::TokioMutexStd(repo) => repo + .get_paginated(pagination) + .await + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::TokioMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + } + } + pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + match self { + Repo::Std(repo) => repo.import_persistent(persistent_torrents), + Repo::StdMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::StdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::Tokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::TokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, + Repo::TokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + } + } + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + match self { + Repo::Std(repo) => repo.remove(key), + Repo::StdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::Tokio(repo) => repo.remove(key).await, + Repo::TokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + } + } + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Repo::Std(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::StdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::StdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::Tokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::TokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::TokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + } + } + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + match self { + Repo::Std(repo) => repo.remove_peerless_torrents(policy), + Repo::StdMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::StdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::Tokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::TokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, + Repo::TokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + } + } + pub(crate) async fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> (bool, SwarmMetadata) { + match self { + Repo::Std(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::StdMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::StdMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::Tokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::TokioMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::TokioMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + } + } + pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { + match self { + Repo::Std(repo) => repo.write().insert(*info_hash, torrent), + Repo::StdMutexStd(repo) => Some(repo.write().insert(*info_hash, torrent.into())?.lock().unwrap().clone()), + Repo::StdMutexTokio(repo) => { + let r = repo.write().insert(*info_hash, torrent.into()); + match r { + Some(t) => Some(t.lock().await.clone()), + None => None, + } + } + Repo::Tokio(repo) => repo.write().await.insert(*info_hash, torrent), + Repo::TokioMutexStd(repo) => Some(repo.write().await.insert(*info_hash, torrent.into())?.lock().unwrap().clone()), + Repo::TokioMutexTokio(repo) => Some(repo.write().await.insert(*info_hash, torrent.into())?.lock().await.clone()), + } + } +} diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs new file mode 100644 index 000000000..33264c443 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -0,0 +1,89 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntrySingle}; + +#[derive(Debug, Clone)] +pub(crate) enum Torrent { + Single(EntrySingle), + MutexStd(EntryMutexStd), + MutexTokio(EntryMutexTokio), +} + +impl Torrent { + pub(crate) async fn get_stats(&self) -> SwarmMetadata { + match self { + Torrent::Single(entry) => entry.get_stats(), + Torrent::MutexStd(entry) => entry.get_stats(), + Torrent::MutexTokio(entry) => entry.clone().get_stats().await, + } + } + + pub(crate) async fn is_good(&self, policy: &TrackerPolicy) -> bool { + match self { + Torrent::Single(entry) => entry.is_good(policy), + Torrent::MutexStd(entry) => entry.is_good(policy), + Torrent::MutexTokio(entry) => entry.clone().check_good(policy).await, + } + } + + pub(crate) async fn peers_is_empty(&self) -> bool { + match self { + Torrent::Single(entry) => entry.peers_is_empty(), + Torrent::MutexStd(entry) => entry.peers_is_empty(), + Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, + } + } + + pub(crate) async fn get_peers_len(&self) -> usize { + match self { + Torrent::Single(entry) => entry.get_peers_len(), + Torrent::MutexStd(entry) => entry.get_peers_len(), + Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, + } + } + + pub(crate) async fn get_peers(&self, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers(limit), + Torrent::MutexStd(entry) => entry.get_peers(limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, + } + } + + pub(crate) async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, + } + } + + pub(crate) async fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + match self { + Torrent::Single(entry) => entry.insert_or_update_peer(peer), + Torrent::MutexStd(entry) => entry.insert_or_update_peer(peer), + Torrent::MutexTokio(entry) => entry.clone().insert_or_update_peer(peer).await, + } + } + + pub(crate) async fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + match self { + Torrent::Single(entry) => entry.insert_or_update_peer_and_get_stats(peer), + Torrent::MutexStd(entry) => entry.insert_or_update_peer_and_get_stats(peer), + Torrent::MutexTokio(entry) => entry.clone().insert_or_update_peer_and_get_stats(peer).await, + } + } + + pub(crate) async fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, + } + } +} diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs new file mode 100644 index 000000000..3a4e61ed2 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -0,0 +1,88 @@ +use std::net::SocketAddr; + +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + +use crate::CurrentClock; + +#[derive(Debug, Default)] +struct TorrentPeerBuilder { + peer: peer::Peer, +} + +#[allow(dead_code)] +impl TorrentPeerBuilder { + #[must_use] + fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + #[must_use] + fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + #[must_use] + fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + fn into(self) -> peer::Peer { + self.peer + } +} + +/// A torrent seeder is a peer with 0 bytes left to download which +/// has not announced it has stopped +#[must_use] +pub fn a_completed_peer(id: i32) -> peer::Peer { + TorrentPeerBuilder::new() + .with_number_of_bytes_left(0) + .with_event_completed() + .with_peer_id(id.into()) + .into() +} + +/// A torrent leecher is a peer that is not a seeder. +/// Leecher: left > 0 OR event = Stopped +#[must_use] +pub fn a_started_peer(id: i32) -> peer::Peer { + TorrentPeerBuilder::new() + .with_number_of_bytes_left(1) + .with_event_started() + .with_peer_id(id.into()) + .into() +} diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs new file mode 100644 index 000000000..c39bef636 --- /dev/null +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -0,0 +1,433 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::ops::Sub; +use std::time::Duration; + +use rstest::{fixture, rstest}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time as _}; +use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::{peer, NumberOfBytes}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntrySingle}; + +use crate::common::torrent::Torrent; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +use crate::CurrentClock; + +#[fixture] +fn single() -> Torrent { + Torrent::Single(EntrySingle::default()) +} +#[fixture] +fn standard_mutex() -> Torrent { + Torrent::MutexStd(EntryMutexStd::default()) +} + +#[fixture] +fn mutex_tokio() -> Torrent { + Torrent::MutexTokio(EntryMutexTokio::default()) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(false, 0, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(false, 0, true) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(true, 0, false) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(true, 0, true) +} + +pub enum Makes { + Empty, + Started, + Completed, + Downloaded, + Three, +} + +async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { + match makes { + Makes::Empty => vec![], + Makes::Started => { + let peer = a_started_peer(1); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Completed => { + let peer = a_completed_peer(2); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Downloaded => { + let mut peer = a_started_peer(3); + torrent.insert_or_update_peer(&peer).await; + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer).await; + vec![peer] + } + Makes::Three => { + let peer_1 = a_started_peer(1); + torrent.insert_or_update_peer(&peer_1).await; + + let peer_2 = a_completed_peer(2); + torrent.insert_or_update_peer(&peer_2).await; + + let mut peer_3 = a_started_peer(3); + torrent.insert_or_update_peer(&peer_3).await; + peer_3.event = AnnounceEvent::Completed; + peer_3.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer_3).await; + vec![peer_1, peer_2, peer_3] + } + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[tokio::test] +async fn it_should_be_empty_by_default( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + assert_eq!(torrent.get_peers_len().await, 0); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_check_if_entry_is_good( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&mut torrent, makes).await; + + let has_peers = !torrent.peers_is_empty().await; + let has_downloads = torrent.get_stats().await.downloaded != 0; + + match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { + // remove torrents without peers, and keep completed download stats + (true, true) => match (has_peers, has_downloads) { + // no peers, but has downloads + // peers, with or without downloads + (false, true) | (true, true | false) => assert!(torrent.is_good(&policy).await), + // no peers and no downloads + (false, false) => assert!(!torrent.is_good(&policy).await), + }, + // remove torrents without peers and drop completed download stats + (true, false) => match (has_peers, has_downloads) { + // peers, with or without downloads + (true, true | false) => assert!(torrent.is_good(&policy).await), + // no peers and with or without downloads + (false, true | false) => assert!(!torrent.is_good(&policy).await), + }, + // keep torrents without peers, but keep or drop completed download stats + (false, true | false) => assert!(torrent.is_good(&policy).await), + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_for_torrent_entry( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + + let torrent_peers = torrent.get_peers(None).await; + + assert_eq!(torrent_peers.len(), peers.len()); + + for peer in torrent_peers { + assert!(peers.contains(&peer)); + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // Make and insert a new peer. + let mut peer = a_started_peer(-1); + torrent.insert_or_update_peer(&peer).await; + + // Get the Inserted Peer by Id. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started, "it should be as created"); + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + torrent.insert_or_update_peer(&peer).await; + + // Get the Updated Peer by Id. + let peers = torrent.get_peers(None).await; + let updated = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(updated.event, AnnounceEvent::Completed, "it should be updated"); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_a_peer_upon_stopped_announcement( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + use torrust_tracker_primitives::peer::ReadInfo as _; + + make(&mut torrent, makes).await; + + let mut peer = a_started_peer(-1); + + torrent.insert_or_update_peer(&peer).await; + + // The started peer should be inserted. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| p.get_id() == peer.get_id()) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started); + + // Change peer to "Stopped" and insert. + peer.event = AnnounceEvent::Stopped; + torrent.insert_or_update_peer(&peer).await; + + // It should be removed now. + let peers = torrent.get_peers(None).await; + + assert_eq!( + peers.iter().find(|p| p.get_id() == peer.get_id()), + None, + "it should be removed" + ); +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + let downloaded = torrent.get_stats().await.downloaded; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_completed = peer.event == AnnounceEvent::Completed; + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + + let (updated, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; + + if is_already_completed { + assert!(!updated); + assert_eq!(stats.downloaded, downloaded); + } else { + assert!(updated); + assert_eq!(stats.downloaded, downloaded + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_a_seeder( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_non_left = peer.left == NumberOfBytes(0); + + // Set Bytes Left to Zero + peer.left = NumberOfBytes(0); + let (_, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; // Add the peer + + if is_already_non_left { + // it was already complete + assert_eq!(stats.complete, completed); + } else { + // now it is complete + assert_eq!(stats.complete, completed + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_incomplete( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let completed_already = peer.left == NumberOfBytes(0); + + // Set Bytes Left to no Zero + peer.left = NumberOfBytes(1); + let (_, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; // Add the peer + + if completed_already { + // now it is incomplete + assert_eq!(stats.incomplete, incomplete + 1); + } else { + // was already incomplete + assert_eq!(stats.incomplete, incomplete); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_excluding_the_client_socket( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + + // for this test, we should not already use this socket. + assert_ne!(peer.peer_addr, socket); + + // it should get the peer as it dose not share the socket. + assert!(torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); + + // set the address to the socket. + peer.peer_addr = socket; + torrent.insert_or_update_peer(&peer).await; // Add peer + + // It should not include the peer that has the same socket. + assert!(!torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_limit_the_number_of_peers_returned( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let mut peer = a_started_peer(1); + peer.peer_id = peer::Id::from(peer_number); + torrent.insert_or_update_peer(&peer).await; + } + + let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)).await; + + assert_eq!(peers.len(), 74); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_inactive_peers_beyond_cutoff( + #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + let peers = make(&mut torrent, makes).await; + + let mut peer = a_completed_peer(-1); + + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + + torrent.insert_or_update_peer(&peer).await; + + assert_eq!(torrent.get_peers_len().await, peers.len() + 1); + + let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); + torrent.remove_inactive_peers(current_cutoff).await; + + assert_eq!(torrent.get_peers_len().await, peers.len()); +} diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs new file mode 100644 index 000000000..5aab67b03 --- /dev/null +++ b/packages/torrent-repository/tests/integration.rs @@ -0,0 +1,22 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +use torrust_tracker_clock::clock; + +pub mod common; +mod entry; +mod repository; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs new file mode 100644 index 000000000..7ffe17dd7 --- /dev/null +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -0,0 +1,504 @@ +use std::collections::{BTreeMap, HashSet}; +use std::hash::{DefaultHasher, Hash, Hasher}; + +use rstest::{fixture, rstest}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; +use torrust_tracker_torrent_repository::entry::Entry as _; +use torrust_tracker_torrent_repository::repository::{RwLockStd, RwLockTokio}; +use torrust_tracker_torrent_repository::EntrySingle; + +use crate::common::repo::Repo; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; + +#[fixture] +fn standard() -> Repo { + Repo::Std(RwLockStd::default()) +} +#[fixture] +fn standard_mutex() -> Repo { + Repo::StdMutexStd(RwLockStd::default()) +} + +#[fixture] +fn standard_tokio() -> Repo { + Repo::StdMutexTokio(RwLockStd::default()) +} + +#[fixture] +fn tokio_std() -> Repo { + Repo::Tokio(RwLockTokio::default()) +} +#[fixture] +fn tokio_mutex() -> Repo { + Repo::TokioMutexStd(RwLockTokio::default()) +} + +#[fixture] +fn tokio_tokio() -> Repo { + Repo::TokioMutexTokio(RwLockTokio::default()) +} + +type Entries = Vec<(InfoHash, EntrySingle)>; + +#[fixture] +fn empty() -> Entries { + vec![] +} + +#[fixture] +fn default() -> Entries { + vec![(InfoHash::default(), EntrySingle::default())] +} + +#[fixture] +fn started() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.insert_or_update_peer(&a_started_peer(1)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn completed() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.insert_or_update_peer(&a_completed_peer(2)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn downloaded() -> Entries { + let mut torrent = EntrySingle::default(); + let mut peer = a_started_peer(3); + torrent.insert_or_update_peer(&peer); + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes(0); + torrent.insert_or_update_peer(&peer); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn three() -> Entries { + let mut started = EntrySingle::default(); + let started_h = &mut DefaultHasher::default(); + started.insert_or_update_peer(&a_started_peer(1)); + started.hash(started_h); + + let mut completed = EntrySingle::default(); + let completed_h = &mut DefaultHasher::default(); + completed.insert_or_update_peer(&a_completed_peer(2)); + completed.hash(completed_h); + + let mut downloaded = EntrySingle::default(); + let downloaded_h = &mut DefaultHasher::default(); + let mut downloaded_peer = a_started_peer(3); + downloaded.insert_or_update_peer(&downloaded_peer); + downloaded_peer.event = AnnounceEvent::Completed; + downloaded_peer.left = NumberOfBytes(0); + downloaded.insert_or_update_peer(&downloaded_peer); + downloaded.hash(downloaded_h); + + vec![ + (InfoHash::from(&started_h.clone()), started), + (InfoHash::from(&completed_h.clone()), completed), + (InfoHash::from(&downloaded_h.clone()), downloaded), + ] +} + +#[fixture] +fn many_out_of_order() -> Entries { + let mut entries: HashSet<(InfoHash, EntrySingle)> = HashSet::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.insert_or_update_peer(&a_started_peer(i)); + + entries.insert((InfoHash::from(&i), entry)); + } + + // we keep the random order from the hashed set for the vector. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn many_hashed_in_order() -> Entries { + let mut entries: BTreeMap = BTreeMap::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.insert_or_update_peer(&a_started_peer(i)); + + let hash: &mut DefaultHasher = &mut DefaultHasher::default(); + hash.write_i32(i); + + entries.insert(InfoHash::from(&hash.clone()), entry); + } + + // We return the entries in-order from from the b-tree map. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn persistent_empty() -> PersistentTorrents { + PersistentTorrents::default() +} + +#[fixture] +fn persistent_single() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let t = [(InfoHash::from(&hash.clone()), 0_u32)]; + + t.iter().copied().collect() +} + +#[fixture] +fn persistent_three() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let info_1 = InfoHash::from(&hash.clone()); + hash.write_u8(2); + let info_2 = InfoHash::from(&hash.clone()); + hash.write_u8(3); + let info_3 = InfoHash::from(&hash.clone()); + + let t = [(info_1, 1_u32), (info_2, 2_u32), (info_3, 3_u32)]; + + t.iter().copied().collect() +} + +async fn make(repo: &Repo, entries: &Entries) { + for (info_hash, entry) in entries { + repo.insert(info_hash, entry.clone()).await; + } +} + +#[fixture] +fn paginated_limit_zero() -> Pagination { + Pagination::new(0, 0) +} + +#[fixture] +fn paginated_limit_one() -> Pagination { + Pagination::new(0, 1) +} + +#[fixture] +fn paginated_limit_one_offset_one() -> Pagination { + Pagination::new(1, 1) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(false, 0, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(false, 0, true) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(true, 0, false) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(true, 0, true) +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_a_torrent_entry( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + if let Some((info_hash, torrent)) = entries.first() { + assert_eq!(repo.get(info_hash).await, Some(torrent.clone())); + } else { + assert_eq!(repo.get(&InfoHash::default()).await, None); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + many_out_of_order: Entries, +) { + make(&repo, &entries).await; + + let entries_a = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + make(&repo, &many_out_of_order).await; + + let entries_b = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; + + let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]); + + assert!( + is_equal || is_sorted, + "The order is unstable: {is_equal}, or is sorted {is_sorted}." + ); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, +) { + make(&repo, &entries).await; + + let mut info_hashes = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + info_hashes.sort(); + + match paginated { + // it should return empty if limit is zero. + Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)).await, vec![]), + + // it should return a single entry if the limit is one. + Pagination { limit: 1, offset: 0 } => { + if info_hashes.is_empty() { + assert_eq!(repo.get_paginated(Some(&paginated)).await.len(), 0); + } else { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); + } + } + + // it should return the only the second entry if both the limit and the offset are one. + Pagination { limit: 1, offset: 1 } => { + if info_hashes.len() > 1 { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); + } + } + // the other cases are not yet tested. + _ => {} + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_metrics( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + + make(&repo, &entries).await; + + let mut metrics = TorrentsMetrics::default(); + + for (_, torrent) in entries { + let stats = torrent.get_stats(); + + metrics.torrents += 1; + metrics.incomplete += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + } + + assert_eq!(repo.get_metrics().await, metrics); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_import_persistent_torrents( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, +) { + make(&repo, &entries).await; + + let mut downloaded = repo.get_metrics().await.downloaded; + persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + + repo.import_persistent(&persistent_torrents).await; + + assert_eq!(repo.get_metrics().await.downloaded, downloaded); + + for (entry, _) in persistent_torrents { + assert!(repo.get(&entry).await.is_some()); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_an_entry( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + for (info_hash, torrent) in entries { + assert_eq!(repo.get(&info_hash).await, Some(torrent.clone())); + assert_eq!(repo.remove(&info_hash).await, Some(torrent)); + + assert_eq!(repo.get(&info_hash).await, None); + assert_eq!(repo.remove(&info_hash).await, None); + } + + assert_eq!(repo.get_metrics().await.torrents, 0); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_inactive_peers( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, +) { + use std::ops::Sub as _; + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time as _}; + use torrust_tracker_primitives::peer; + + use crate::CurrentClock; + + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + make(&repo, &entries).await; + + let info_hash: InfoHash; + let mut peer: peer::Peer; + + // Generate a new infohash and peer. + { + let hash = &mut DefaultHasher::default(); + hash.write_u8(255); + info_hash = InfoHash::from(&hash.clone()); + peer = a_completed_peer(-1); + } + + // Set the last updated time of the peer to be 121 seconds ago. + { + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + } + + // Insert the infohash and peer into the repository + // and verify there is an extra torrent entry. + { + repo.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(repo.get_metrics().await.torrents, entries.len() as u64 + 1); + } + + // Verify that this new peer was inserted into the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(entry.get_peers(None).contains(&peer.into())); + } + + // Remove peers that have not been updated since the timeout (120 seconds ago). + { + repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await; + } + + // Verify that the this peer was removed from the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(!entry.get_peers(None).contains(&peer.into())); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_peerless_torrents( + #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[case] entries: Entries, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&repo, &entries).await; + + repo.remove_peerless_torrents(&policy).await; + + let torrents = repo.get_paginated(None).await; + + for (_, entry) in torrents { + assert!(entry.is_good(&policy)); + } +} From 9e23ec99185795d634d264d08f1e4604b356cdbf Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 26 Mar 2024 05:35:44 +0800 Subject: [PATCH 0778/1003] chore: update deps Updating crates.io index Updating arc-swap v1.7.0 -> v1.7.1 Updating async-trait v0.1.78 -> v0.1.79 Updating axum v0.7.4 -> v0.7.5 Updating axum-extra v0.9.2 -> v0.9.3 Updating backtrace v0.3.69 -> v0.3.71 Updating bytes v1.5.0 -> v1.6.0 Updating clap v4.5.3 -> v4.5.4 Updating clap_derive v4.5.3 -> v4.5.4 Updating fastrand v2.0.1 -> v2.0.2 Updating indexmap v2.2.5 -> v2.2.6 Updating libz-sys v1.1.15 -> v1.1.16 Updating rayon v1.9.0 -> v1.10.0 Updating regex v1.10.3 -> v1.10.4 Updating reqwest v0.12.0 -> v0.12.2 Updating rustls-pki-types v1.3.1 -> v1.4.0 Updating syn v2.0.53 -> v2.0.55 Adding sync_wrapper v1.0.0 --- Cargo.lock | 155 +++++++++++---------- tests/servers/health_check_api/contract.rs | 14 +- 2 files changed, 91 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0bdd83b9b..e77b5de6d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -157,9 +157,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arrayvec" @@ -226,7 +226,7 @@ dependencies = [ "async-lock 3.3.0", "async-task", "concurrent-queue", - "fastrand 2.0.1", + "fastrand 2.0.2", "futures-lite 2.3.0", "slab", ] @@ -341,13 +341,13 @@ checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" [[package]] name = "async-trait" -version = "0.1.78" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "461abc97219de0eaaf81fe3ef974a540158f3d079c2ab200f891f1a2ef201e85" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -364,9 +364,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", "axum-core", @@ -389,7 +389,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 1.0.0", "tokio", "tower", "tower-layer", @@ -423,7 +423,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", "tracing", @@ -431,9 +431,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "895ff42f72016617773af68fb90da2a9677d89c62338ec09162d4909d86fdd8f" +checksum = "0be6ea09c9b96cb5076af0de2e383bd2bc0c18f827cf1967bdd353e0b910d733" dependencies = [ "axum", "axum-core", @@ -449,6 +449,7 @@ dependencies = [ "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -460,7 +461,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -488,9 +489,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -541,7 +542,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -589,7 +590,7 @@ dependencies = [ "async-channel 2.2.0", "async-lock 3.3.0", "async-task", - "fastrand 2.0.1", + "fastrand 2.0.2", "futures-io", "futures-lite 2.3.0", "piper", @@ -616,7 +617,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", "syn_derive", ] @@ -683,9 +684,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "cast" @@ -777,9 +778,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.3" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949626d00e063efc93b6dca932419ceb5432f99769911c0b995f7e884c778813" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -799,14 +800,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.3" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90239a040c80f5e14809ca132ddc4176ab33d5e17e49691793296e3fcb34d72f" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1069,7 +1070,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1080,7 +1081,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1114,7 +1115,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1254,9 +1255,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "fern" @@ -1349,7 +1350,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1361,7 +1362,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1373,7 +1374,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1451,7 +1452,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.0.1", + "fastrand 2.0.2", "futures-core", "futures-io", "parking", @@ -1466,7 +1467,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1562,7 +1563,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.5", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -1801,9 +1802,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -2032,9 +2033,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.15" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "pkg-config", @@ -2167,7 +2168,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -2218,7 +2219,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", "termcolor", "thiserror", ] @@ -2418,7 +2419,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -2531,7 +2532,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -2600,7 +2601,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -2622,7 +2623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" dependencies = [ "atomic-waker", - "fastrand 2.0.1", + "fastrand 2.0.2", "futures-io", ] @@ -2880,9 +2881,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -2909,9 +2910,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", @@ -2953,9 +2954,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.0" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58b48d98d932f4ee75e541614d32a7f44c889b72bd9c2e04d95edd135989df88" +checksum = "2d66674f2b6fb864665eea7a3c1ac4e3dfacd2fda83cf6f935a612e01b0e3338" dependencies = [ "base64", "bytes", @@ -2981,7 +2982,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration", "tokio", "tokio-native-tls", @@ -3083,7 +3084,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.53", + "syn 2.0.55", "unicode-ident", ] @@ -3208,9 +3209,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" +checksum = "868e20fada228fefaf6b652e00cc73623d54f8171e7352c18bb281571f2d92da" [[package]] name = "rustls-webpki" @@ -3354,7 +3355,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3364,7 +3365,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50437e6a58912eecc08865e35ea2e8d365fbb2db0debb1c8bb43bf1faf055f25" dependencies = [ "form_urlencoded", - "indexmap 2.2.5", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -3399,7 +3400,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3433,7 +3434,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_derive", "serde_json", @@ -3450,7 +3451,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3584,9 +3585,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.53" +version = "2.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032" +checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" dependencies = [ "proc-macro2", "quote", @@ -3602,7 +3603,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3611,6 +3612,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384595c11a4e2969895cad5a8c4029115f5ab956a9e5ef4de79d11a426e5f20c" + [[package]] name = "system-configuration" version = "0.5.1" @@ -3662,7 +3669,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.1", + "fastrand 2.0.2", "rustix 0.38.32", "windows-sys 0.52.0", ] @@ -3699,7 +3706,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3793,7 +3800,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3857,7 +3864,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "toml_datetime", "winnow 0.5.40", ] @@ -3868,7 +3875,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "toml_datetime", "winnow 0.5.40", ] @@ -3879,7 +3886,7 @@ version = "0.22.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", @@ -4095,7 +4102,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -4266,7 +4273,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", "wasm-bindgen-shared", ] @@ -4300,7 +4307,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4556,7 +4563,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index c893470c2..3c3c13151 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -114,8 +114,11 @@ mod api { assert_eq!(details.binding, binding); assert!( - details.result.as_ref().is_err_and(|e| e.contains("client error (Connect)")), - "Expected to contain, \"client error (Connect)\", but have message \"{:?}\".", + details + .result + .as_ref() + .is_err_and(|e| e.contains("error sending request for url")), + "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", details.result ); assert_eq!( @@ -215,8 +218,11 @@ mod http { assert_eq!(details.binding, binding); assert!( - details.result.as_ref().is_err_and(|e| e.contains("client error (Connect)")), - "Expected to contain, \"client error (Connect)\", but have message \"{:?}\".", + details + .result + .as_ref() + .is_err_and(|e| e.contains("error sending request for url")), + "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", details.result ); assert_eq!( From 0999aa07412559501a35dd9e4b3ed701b36f33bf Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 26 Mar 2024 10:27:55 +0800 Subject: [PATCH 0779/1003] dev: ci: enable rust stable workflows * also change container to use rust-stable --- .github/workflows/contract.yaml | 2 +- .github/workflows/deployment.yaml | 2 +- .github/workflows/testing.yaml | 6 +++--- Containerfile | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/contract.yaml b/.github/workflows/contract.yaml index b38e0e8f5..2777417e3 100644 --- a/.github/workflows/contract.yaml +++ b/.github/workflows/contract.yaml @@ -14,7 +14,7 @@ jobs: strategy: matrix: - toolchain: [nightly] + toolchain: [nightly, stable] steps: - id: checkout diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 91f8d86eb..2a0f174f7 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: - toolchain: [nightly] + toolchain: [nightly, stable] steps: - id: checkout diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 8a54e8982..620670f97 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -39,7 +39,7 @@ jobs: strategy: matrix: - toolchain: [nightly] + toolchain: [nightly, stable] steps: - id: checkout @@ -93,7 +93,7 @@ jobs: strategy: matrix: - toolchain: [nightly] + toolchain: [nightly, stable] steps: - id: checkout @@ -132,7 +132,7 @@ jobs: strategy: matrix: - toolchain: [nightly] + toolchain: [nightly, stable] steps: - id: setup diff --git a/Containerfile b/Containerfile index 77c7da669..590b0a13b 100644 --- a/Containerfile +++ b/Containerfile @@ -3,13 +3,13 @@ # Torrust Tracker ## Builder Image -FROM rustlang/rust:nightly-bookworm as chef +FROM docker.io/library/rust:bookworm as chef WORKDIR /tmp RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash RUN cargo binstall --no-confirm cargo-chef cargo-nextest ## Tester Image -FROM rustlang/rust:nightly-bookworm-slim as tester +FROM docker.io/library/rust:slim-bookworm as tester WORKDIR /tmp RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean From 3ac4aa5516269fdf7b2258efdfd56a99d120d508 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Apr 2024 13:35:12 +0100 Subject: [PATCH 0780/1003] docs: [#768] udpate profiling docs --- .cargo/config.toml | 1 + Cargo.toml | 6 +- cSpell.json | 8 +- docs/media/flamegraph.svg | 4 +- .../flamegraph_generated_withput_sudo.svg | 491 ++++++++++++++++++ docs/profiling.md | 75 ++- flamegraph_generated_withput_sudo.svg | 491 ++++++++++++++++++ 7 files changed, 1069 insertions(+), 7 deletions(-) create mode 100644 docs/media/flamegraph_generated_withput_sudo.svg create mode 100644 flamegraph_generated_withput_sudo.svg diff --git a/.cargo/config.toml b/.cargo/config.toml index a88db5f38..34d6230b9 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -23,3 +23,4 @@ rustflags = [ "-D", "unused", ] + diff --git a/Cargo.toml b/Cargo.toml index 99b7a334a..d045b945a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,6 +103,6 @@ debug = 1 lto = "fat" opt-level = 3 -[target.x86_64-unknown-linux-gnu] -linker = "/usr/bin/clang" -rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] \ No newline at end of file +[profile.release-debug] +inherits = "release" +debug = true \ No newline at end of file diff --git a/cSpell.json b/cSpell.json index bbcba98a7..0ee2f8306 100644 --- a/cSpell.json +++ b/cSpell.json @@ -21,6 +21,7 @@ "bools", "Bragilevsky", "bufs", + "buildid", "Buildx", "byteorder", "callgrind", @@ -69,9 +70,12 @@ "infoschema", "Intermodal", "intervali", - "kcachegrind", "Joakim", + "kallsyms", + "kcachegrind", + "kexec", "keyout", + "kptr", "lcov", "leecher", "leechers", @@ -83,6 +87,7 @@ "matchmakes", "metainfo", "middlewares", + "misresolved", "mockall", "multimap", "myacicontext", @@ -152,6 +157,7 @@ "Vagaa", "valgrind", "Vitaly", + "vmlinux", "Vuze", "Weidendorfer", "Werror", diff --git a/docs/media/flamegraph.svg b/docs/media/flamegraph.svg index 34e7146f9..58387ee06 100644 --- a/docs/media/flamegraph.svg +++ b/docs/media/flamegraph.svg @@ -1,4 +1,4 @@ - \ No newline at end of file diff --git a/docs/media/flamegraph_generated_withput_sudo.svg b/docs/media/flamegraph_generated_withput_sudo.svg new file mode 100644 index 000000000..84c00ffe3 --- /dev/null +++ b/docs/media/flamegraph_generated_withput_sudo.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch [unknown] (188 samples, 0.14%)[unknown] (187 samples, 0.14%)[unknown] (186 samples, 0.14%)[unknown] (178 samples, 0.14%)[unknown] (172 samples, 0.13%)[unknown] (158 samples, 0.12%)[unknown] (158 samples, 0.12%)[unknown] (125 samples, 0.10%)[unknown] (102 samples, 0.08%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (41 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (25 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (15 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)profiling (214 samples, 0.16%)clone3 (22 samples, 0.02%)start_thread (22 samples, 0.02%)std::sys::pal::unix::thread::Thread::new::thread_start (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::Handler::new (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::make_handler (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::get_stack (19 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (30 samples, 0.02%)[[vdso]] (93 samples, 0.07%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (143 samples, 0.11%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (31 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (21 samples, 0.02%)[[vdso]] (91 samples, 0.07%)__GI___clock_gettime (14 samples, 0.01%)_int_malloc (53 samples, 0.04%)epoll_wait (254 samples, 0.19%)tokio::runtime::context::with_scheduler (28 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::context::with_scheduler::{{closure}} (14 samples, 0.01%)core::option::Option<T>::map (17 samples, 0.01%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (17 samples, 0.01%)mio::poll::Poll::poll (27 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (27 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (54 samples, 0.04%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (26 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (65 samples, 0.05%)core::sync::atomic::AtomicUsize::fetch_add (65 samples, 0.05%)core::sync::atomic::atomic_add (65 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (31 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (49 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (33 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (93 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Parker::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (75 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (18 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (18 samples, 0.01%)core::cell::BorrowRefMut::new (18 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (96 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (18 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (14 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (220 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (54 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (54 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (240 samples, 0.18%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (265 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park (284 samples, 0.22%)core::option::Option<T>::or_else (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (40 samples, 0.03%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (17 samples, 0.01%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (17 samples, 0.01%)core::num::<impl u32>::wrapping_add (17 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (26 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (129 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (128 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (119 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::pack (39 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run (613 samples, 0.47%)tokio::runtime::context::runtime::enter_runtime (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (613 samples, 0.47%)tokio::runtime::context::set_scheduler (613 samples, 0.47%)std::thread::local::LocalKey<T>::with (613 samples, 0.47%)std::thread::local::LocalKey<T>::try_with (613 samples, 0.47%)tokio::runtime::context::set_scheduler::{{closure}} (613 samples, 0.47%)tokio::runtime::context::scoped::Scoped<T>::set (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Context::run (613 samples, 0.47%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (777 samples, 0.59%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (776 samples, 0.59%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (16 samples, 0.01%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::runtime::context::set_current_task_id (16 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (16 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (835 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (56 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (46 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (897 samples, 0.68%)tokio::runtime::task::harness::poll_future::{{closure}} (897 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::store_output (62 samples, 0.05%)tokio::runtime::task::harness::poll_future (930 samples, 0.71%)std::panic::catch_unwind (927 samples, 0.71%)std::panicking::try (927 samples, 0.71%)std::panicking::try::do_call (925 samples, 0.70%)core::mem::manually_drop::ManuallyDrop<T>::take (28 samples, 0.02%)core::ptr::read (28 samples, 0.02%)tokio::runtime::task::raw::poll (938 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll (934 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (934 samples, 0.71%)core::array::<impl core::default::Default for [T: 32]>::default (26 samples, 0.02%)tokio::runtime::time::Inner::lock (16 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::time::wheel::Wheel::poll (25 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (98 samples, 0.07%)tokio::runtime::time::Driver::park_internal (51 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (24 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (131 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (24 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (14 samples, 0.01%)core::sync::atomic::AtomicU32::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (39 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (34 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (32 samples, 0.02%)[[heap]] (2,361 samples, 1.80%)[..[[vdso]] (313 samples, 0.24%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (41 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (67 samples, 0.05%)alloc::string::String::push_str (18 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (18 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (18 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (18 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (36 samples, 0.03%)core::num::<impl u64>::rotate_left (28 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (60 samples, 0.05%)core::num::<impl u64>::wrapping_add (14 samples, 0.01%)core::hash::sip::u8to64_le (60 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (184 samples, 0.14%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (15 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (19 samples, 0.01%)core::cell::Cell<T>::get (17 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (26 samples, 0.02%)core::ops::function::FnMut::call_mut (21 samples, 0.02%)tokio::runtime::coop::poll_proceed (21 samples, 0.02%)tokio::runtime::context::budget (21 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (21 samples, 0.02%)[unknown] (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (195 samples, 0.15%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (14 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (14 samples, 0.01%)core::result::Result<T,E>::is_err (18 samples, 0.01%)core::result::Result<T,E>::is_ok (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (39 samples, 0.03%)core::sync::atomic::AtomicU32::compare_exchange (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (19 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (245 samples, 0.19%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (26 samples, 0.02%)[[vdso]] (748 samples, 0.57%)[profiling] (34 samples, 0.03%)core::fmt::write (31 samples, 0.02%)__GI___clock_gettime (29 samples, 0.02%)__GI___libc_free (131 samples, 0.10%)arena_for_chunk (20 samples, 0.02%)arena_for_chunk (19 samples, 0.01%)heap_for_ptr (19 samples, 0.01%)heap_max_size (14 samples, 0.01%)__GI___libc_malloc (114 samples, 0.09%)__GI___libc_realloc (15 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)__GI___pthread_disable_asynccancel (66 samples, 0.05%)__GI_getsockname (249 samples, 0.19%)__libc_calloc (15 samples, 0.01%)__libc_recvfrom (23 samples, 0.02%)__libc_sendto (130 samples, 0.10%)__memcmp_evex_movbe (451 samples, 0.34%)__memcpy_avx512_unaligned_erms (426 samples, 0.32%)__memset_avx512_unaligned_erms (215 samples, 0.16%)__posix_memalign (17 samples, 0.01%)_int_free (418 samples, 0.32%)tcache_put (24 samples, 0.02%)_int_malloc (385 samples, 0.29%)_int_memalign (31 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (26 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (15 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (15 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (96 samples, 0.07%)alloc::raw_vec::RawVec<T,A>::grow_amortized (66 samples, 0.05%)core::num::<impl usize>::checked_add (18 samples, 0.01%)core::num::<impl usize>::overflowing_add (18 samples, 0.01%)alloc::raw_vec::finish_grow (74 samples, 0.06%)alloc::sync::Arc<T,A>::drop_slow (16 samples, 0.01%)core::mem::drop (14 samples, 0.01%)core::fmt::Formatter::pad_integral (14 samples, 0.01%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (93 samples, 0.07%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (23 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (188 samples, 0.14%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (30 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_connect::{{closure}}> (22 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_packet::{{closure}}> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}}> (19 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (22 samples, 0.02%)malloc_consolidate (24 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (15 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)rand_chacha::guts::round (66 samples, 0.05%)rand_chacha::guts::refill_wide::impl_avx2 (99 samples, 0.08%)rand_chacha::guts::refill_wide::fn_impl (98 samples, 0.07%)rand_chacha::guts::refill_wide_impl (98 samples, 0.07%)std::io::error::Error::kind (14 samples, 0.01%)[unknown] (42 samples, 0.03%)[unknown] (14 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (490 samples, 0.37%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (211 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (84 samples, 0.06%)tokio::runtime::task::core::Header::get_owner_id (18 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (18 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (20 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (31 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (29 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (108 samples, 0.08%)tokio::runtime::task::core::TaskIdGuard::enter (14 samples, 0.01%)tokio::runtime::context::set_current_task_id (14 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (21 samples, 0.02%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (32 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (54 samples, 0.04%)tokio::runtime::task::raw::drop_abort_handle (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (17 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (22 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (22 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (79 samples, 0.06%)core::slice::<impl [T]>::contains (178 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (40 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (40 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (216 samples, 0.16%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (219 samples, 0.17%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (29 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (29 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (54 samples, 0.04%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (113 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (31 samples, 0.02%)core::sync::atomic::AtomicU64::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (447 samples, 0.34%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (174 samples, 0.13%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (489 samples, 0.37%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (489 samples, 0.37%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run (484 samples, 0.37%)tokio::runtime::context::runtime::enter_runtime (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (484 samples, 0.37%)tokio::runtime::context::set_scheduler (484 samples, 0.37%)std::thread::local::LocalKey<T>::with (484 samples, 0.37%)std::thread::local::LocalKey<T>::try_with (484 samples, 0.37%)tokio::runtime::context::set_scheduler::{{closure}} (484 samples, 0.37%)tokio::runtime::context::scoped::Scoped<T>::set (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Context::run (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (24 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (20 samples, 0.02%)tokio::runtime::task::raw::poll (515 samples, 0.39%)tokio::runtime::task::harness::Harness<T,S>::poll (493 samples, 0.38%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (493 samples, 0.38%)tokio::runtime::task::harness::poll_future (493 samples, 0.38%)std::panic::catch_unwind (493 samples, 0.38%)std::panicking::try (493 samples, 0.38%)std::panicking::try::do_call (493 samples, 0.38%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (493 samples, 0.38%)tokio::runtime::task::harness::poll_future::{{closure}} (493 samples, 0.38%)tokio::runtime::task::core::Core<T,S>::poll (493 samples, 0.38%)tokio::runtime::time::wheel::Wheel::next_expiration (16 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (27 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (15 samples, 0.01%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (44 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (15 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (29 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (74 samples, 0.06%)torrust_tracker::servers::udp::peer_builder::from_request (17 samples, 0.01%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (51 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (54 samples, 0.04%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (58 samples, 0.04%)torrust_tracker::core::Tracker::announce::{{closure}} (70 samples, 0.05%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (113 samples, 0.09%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (175 samples, 0.13%)<T as alloc::string::ToString>::to_string (38 samples, 0.03%)core::option::Option<T>::expect (56 samples, 0.04%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (18 samples, 0.01%)<T as alloc::string::ToString>::to_string (18 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (180 samples, 0.14%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (468 samples, 0.36%)torrust_tracker::servers::udp::logging::log_response (38 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (669 samples, 0.51%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (152 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (147 samples, 0.11%)tokio::net::udp::UdpSocket::send_to::{{closure}} (138 samples, 0.11%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (119 samples, 0.09%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (75 samples, 0.06%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to (39 samples, 0.03%)mio::io_source::IoSource<T>::do_io (39 samples, 0.03%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to::{{closure}} (39 samples, 0.03%)std::net::udp::UdpSocket::send_to (39 samples, 0.03%)std::sys_common::net::UdpSocket::send_to (39 samples, 0.03%)std::sys::pal::unix::cvt (39 samples, 0.03%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (39 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_stats (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (14 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (33 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (33 samples, 0.03%)torrust_tracker_primitives::peer::Peer::is_seeder (33 samples, 0.03%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (75 samples, 0.06%)core::iter::traits::iterator::Iterator::sum (75 samples, 0.06%)<usize as core::iter::traits::accum::Sum>::sum (75 samples, 0.06%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (75 samples, 0.06%)core::iter::traits::iterator::Iterator::fold (75 samples, 0.06%)core::iter::adapters::map::map_fold::{{closure}} (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (104 samples, 0.08%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (24 samples, 0.02%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (215 samples, 0.16%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (198 samples, 0.15%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (89 samples, 0.07%)core::option::Option<T>::is_some_and (32 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (30 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (30 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (26 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (58 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (58 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (58 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (238 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (236 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (208 samples, 0.16%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (208 samples, 0.16%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (282 samples, 0.21%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (67 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (22 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (22 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (22 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (22 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (22 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (43 samples, 0.03%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (43 samples, 0.03%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (43 samples, 0.03%)<u8 as core::slice::cmp::SliceOrd>::compare (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (151 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (145 samples, 0.11%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (137 samples, 0.10%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (137 samples, 0.10%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (266 samples, 0.20%)core::sync::atomic::AtomicU32::load (27 samples, 0.02%)core::sync::atomic::atomic_load (27 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (38 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (37 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (36 samples, 0.03%)tracing::span::Span::log (16 samples, 0.01%)tracing::span::Span::record_all (70 samples, 0.05%)unlink_chunk (139 samples, 0.11%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (30 samples, 0.02%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (30 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (28 samples, 0.02%)[anon] (8,759 samples, 6.67%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (32 samples, 0.02%)uuid::rng::bytes (32 samples, 0.02%)rand::random (32 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (15 samples, 0.01%)_int_free (338 samples, 0.26%)tcache_put (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (22 samples, 0.02%)hashbrown::raw::h2 (14 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (23 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (17 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (25 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (15 samples, 0.01%)[profiling] (545 samples, 0.42%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (32 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (22 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (30 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (28 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (83 samples, 0.06%)alloc::string::String::push_str (57 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (57 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (57 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (57 samples, 0.04%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (20 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (41 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (151 samples, 0.12%)core::hash::sip::u8to64_le (50 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (33 samples, 0.03%)tokio::runtime::context::CONTEXT::__getit (35 samples, 0.03%)core::cell::Cell<T>::get (33 samples, 0.03%)[unknown] (20 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (75 samples, 0.06%)core::ops::function::FnMut::call_mut (66 samples, 0.05%)tokio::runtime::coop::poll_proceed (66 samples, 0.05%)tokio::runtime::context::budget (66 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (66 samples, 0.05%)tokio::runtime::context::budget::{{closure}} (27 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (27 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (110 samples, 0.08%)[unknown] (15 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (27 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (27 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (70 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (55 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (55 samples, 0.04%)[unknown] (33 samples, 0.03%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (214 samples, 0.16%)__memcpy_avx512_unaligned_erms (168 samples, 0.13%)[profiling] (171 samples, 0.13%)binascii::bin2hex (77 samples, 0.06%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (280 samples, 0.21%)[unknown] (317 samples, 0.24%)[[vdso]] (2,648 samples, 2.02%)[..[unknown] (669 samples, 0.51%)[unknown] (396 samples, 0.30%)[unknown] (251 samples, 0.19%)[unknown] (65 samples, 0.05%)[unknown] (30 samples, 0.02%)[unknown] (21 samples, 0.02%)__GI___clock_gettime (56 samples, 0.04%)arena_for_chunk (72 samples, 0.05%)arena_for_chunk (62 samples, 0.05%)heap_for_ptr (49 samples, 0.04%)heap_max_size (28 samples, 0.02%)__GI___libc_free (194 samples, 0.15%)arena_for_chunk (19 samples, 0.01%)checked_request2size (24 samples, 0.02%)__GI___libc_malloc (220 samples, 0.17%)tcache_get (44 samples, 0.03%)__GI___libc_write (25 samples, 0.02%)__GI___libc_write (14 samples, 0.01%)__GI___pthread_disable_asynccancel (97 samples, 0.07%)core::num::<impl u128>::leading_zeros (15 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (72 samples, 0.05%)__floattidf (90 samples, 0.07%)compiler_builtins::float::conv::__floattidf (86 samples, 0.07%)exp_inline (40 samples, 0.03%)log_inline (64 samples, 0.05%)__ieee754_pow_fma (114 samples, 0.09%)__libc_calloc (106 samples, 0.08%)__libc_recvfrom (252 samples, 0.19%)__libc_sendto (133 samples, 0.10%)__memcmp_evex_movbe (137 samples, 0.10%)__memcpy_avx512_unaligned_erms (1,399 samples, 1.07%)__posix_memalign (172 samples, 0.13%)__posix_memalign (80 samples, 0.06%)_mid_memalign (71 samples, 0.05%)arena_for_chunk (14 samples, 0.01%)__pow (18 samples, 0.01%)__vdso_clock_gettime (40 samples, 0.03%)[unknown] (24 samples, 0.02%)_int_free (462 samples, 0.35%)tcache_put (54 samples, 0.04%)[unknown] (14 samples, 0.01%)_int_malloc (508 samples, 0.39%)_int_memalign (68 samples, 0.05%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (78 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::grow_amortized (73 samples, 0.06%)alloc::raw_vec::finish_grow (91 samples, 0.07%)core::result::Result<T,E>::map_err (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Weak<ring::ec::curve25519::ed25519::signing::Ed25519KeyPair,&alloc::alloc::Global>> (16 samples, 0.01%)<alloc::sync::Weak<T,A> as core::ops::drop::Drop>::drop (16 samples, 0.01%)core::mem::drop (18 samples, 0.01%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)alloc_new_heap (49 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (49 samples, 0.04%)core::fmt::Formatter::pad_integral (40 samples, 0.03%)core::fmt::Formatter::pad_integral::write_prefix (19 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (155 samples, 0.12%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (71 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (245 samples, 0.19%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (33 samples, 0.03%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (37 samples, 0.03%)core::str::converts::from_utf8 (33 samples, 0.03%)core::str::validations::run_utf8_validation (20 samples, 0.02%)epoll_wait (31 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (17 samples, 0.01%)rand_chacha::guts::refill_wide (19 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (17 samples, 0.01%)std_detect::detect::check_for (17 samples, 0.01%)std_detect::detect::cache::test (17 samples, 0.01%)std_detect::detect::cache::Cache::test (17 samples, 0.01%)core::sync::atomic::AtomicUsize::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)std::sys::pal::unix::time::Timespec::new (29 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (132 samples, 0.10%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (22 samples, 0.02%)core::cmp::PartialOrd::ge (22 samples, 0.02%)std::sys::pal::unix::time::Timespec::sub_timespec (67 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (18 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr (29 samples, 0.02%)std::sys_common::net::sockname (28 samples, 0.02%)syscall (552 samples, 0.42%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (74 samples, 0.06%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (74 samples, 0.06%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (74 samples, 0.06%)core::cell::Cell<T>::set (74 samples, 0.06%)core::cell::Cell<T>::replace (74 samples, 0.06%)core::mem::replace (74 samples, 0.06%)core::ptr::write (74 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (14 samples, 0.01%)tokio::runtime::context::with_scheduler (176 samples, 0.13%)std::thread::local::LocalKey<T>::try_with (152 samples, 0.12%)tokio::runtime::context::with_scheduler::{{closure}} (151 samples, 0.12%)tokio::runtime::context::scoped::Scoped<T>::with (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (16 samples, 0.01%)core::option::Option<T>::map (19 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (24 samples, 0.02%)mio::poll::Poll::poll (53 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (53 samples, 0.04%)core::result::Result<T,E>::map (28 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (28 samples, 0.02%)tokio::io::ready::Ready::from_mio (14 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (126 samples, 0.10%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (18 samples, 0.01%)[unknown] (51 samples, 0.04%)[unknown] (100 samples, 0.08%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (326 samples, 0.25%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (205 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (77 samples, 0.06%)[unknown] (26 samples, 0.02%)<tokio::util::linked_list::DrainFilter<T,F> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (396 samples, 0.30%)tokio::loom::std::mutex::Mutex<T>::lock (18 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (573 samples, 0.44%)core::sync::atomic::AtomicUsize::fetch_add (566 samples, 0.43%)core::sync::atomic::atomic_add (566 samples, 0.43%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (635 samples, 0.48%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (44 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (21 samples, 0.02%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (21 samples, 0.02%)core::sync::atomic::AtomicUsize::load (21 samples, 0.02%)core::sync::atomic::atomic_load (21 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (32 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (32 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (32 samples, 0.02%)std::sync::poison::Flag::done (32 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (43 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (43 samples, 0.03%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (123 samples, 0.09%)tokio::runtime::task::list::OwnedTasks<S>::remove (117 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (80 samples, 0.06%)tokio::runtime::scheduler::defer::Defer::wake (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (71 samples, 0.05%)std::sync::condvar::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (56 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (37 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (138 samples, 0.11%)tokio::runtime::driver::Driver::park (77 samples, 0.06%)tokio::runtime::driver::TimeDriver::park (77 samples, 0.06%)tokio::runtime::time::Driver::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Parker::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::park::Inner::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (432 samples, 0.33%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (26 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (94 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (94 samples, 0.07%)core::cell::BorrowRefMut::new (94 samples, 0.07%)tokio::runtime::coop::budget (142 samples, 0.11%)tokio::runtime::coop::with_budget (142 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (121 samples, 0.09%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (44 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (208 samples, 0.16%)tokio::runtime::signal::Driver::process (30 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (46 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (35 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage (75 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_xor (76 samples, 0.06%)core::sync::atomic::atomic_xor (76 samples, 0.06%)tokio::runtime::task::state::State::transition_to_complete (79 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::complete (113 samples, 0.09%)tokio::runtime::task::state::State::transition_to_terminal (18 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (28 samples, 0.02%)core::mem::drop (18 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (18 samples, 0.01%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (16 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (16 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (53 samples, 0.04%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (21 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (113 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (15 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (15 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (14 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (82 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (23 samples, 0.02%)tokio::runtime::task::state::State::ref_dec (23 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (34 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (32 samples, 0.02%)tokio::runtime::task::state::State::unset_join_interested (23 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (23 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (43 samples, 0.03%)core::num::<impl u32>::wrapping_add (23 samples, 0.02%)core::option::Option<T>::or_else (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (59 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (45 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (132 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (63 samples, 0.05%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run (290 samples, 0.22%)tokio::runtime::context::runtime::enter_runtime (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (290 samples, 0.22%)tokio::runtime::context::set_scheduler (290 samples, 0.22%)std::thread::local::LocalKey<T>::with (290 samples, 0.22%)std::thread::local::LocalKey<T>::try_with (290 samples, 0.22%)tokio::runtime::context::set_scheduler::{{closure}} (290 samples, 0.22%)tokio::runtime::context::scoped::Scoped<T>::set (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Context::run (290 samples, 0.22%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (327 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (322 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll (333 samples, 0.25%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (342 samples, 0.26%)tokio::runtime::task::harness::poll_future::{{closure}} (342 samples, 0.26%)tokio::runtime::task::harness::poll_future (348 samples, 0.27%)std::panic::catch_unwind (347 samples, 0.26%)std::panicking::try (347 samples, 0.26%)std::panicking::try::do_call (347 samples, 0.26%)core::sync::atomic::AtomicUsize::compare_exchange (18 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (18 samples, 0.01%)tokio::runtime::task::state::State::transition_to_running (47 samples, 0.04%)tokio::runtime::task::state::State::fetch_update_action (47 samples, 0.04%)tokio::runtime::task::state::State::transition_to_running::{{closure}} (19 samples, 0.01%)tokio::runtime::task::raw::poll (427 samples, 0.33%)tokio::runtime::task::harness::Harness<T,S>::poll (408 samples, 0.31%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (407 samples, 0.31%)tokio::runtime::task::state::State::transition_to_idle (17 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (21 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (14 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (72 samples, 0.05%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (23 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (15 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (14 samples, 0.01%)tokio::runtime::time::Driver::park_internal (155 samples, 0.12%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (96 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (35 samples, 0.03%)core::num::<impl usize>::pow (35 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (39 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (33 samples, 0.03%)core::num::<impl usize>::pow (33 samples, 0.03%)tokio::runtime::time::wheel::level::Level::next_expiration (208 samples, 0.16%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.04%)core::num::<impl usize>::pow (48 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (277 samples, 0.21%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (18 samples, 0.01%)core::option::Option<T>::is_some (18 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (50 samples, 0.04%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (37 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (19 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (17 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (17 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (20 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (62 samples, 0.05%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (40 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (27 samples, 0.02%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (17 samples, 0.01%)torrust_tracker::servers::udp::peer_builder::from_request (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (19 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (355 samples, 0.27%)<F as core::future::into_future::IntoFuture>::into_future (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (37 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (25 samples, 0.02%)core::sync::atomic::atomic_add (25 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet (14 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (20 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::result::Result<T,E>::map_err (16 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (136 samples, 0.10%)torrust_tracker::core::Tracker::announce::{{closure}} (173 samples, 0.13%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (267 samples, 0.20%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (30 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (423 samples, 0.32%)core::fmt::Formatter::new (26 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (80 samples, 0.06%)core::fmt::num::imp::fmt_u64 (58 samples, 0.04%)core::intrinsics::copy_nonoverlapping (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (74 samples, 0.06%)core::fmt::num::imp::fmt_u64 (70 samples, 0.05%)<T as alloc::string::ToString>::to_string (207 samples, 0.16%)core::option::Option<T>::expect (19 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (18 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (18 samples, 0.01%)torrust_tracker::servers::udp::logging::map_action_name (25 samples, 0.02%)alloc::str::<impl alloc::borrow::ToOwned for str>::to_owned (14 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (345 samples, 0.26%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (18 samples, 0.01%)core::fmt::num::imp::fmt_u64 (14 samples, 0.01%)<T as alloc::string::ToString>::to_string (35 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,067 samples, 0.81%)torrust_tracker::servers::udp::logging::log_response (72 samples, 0.05%)alloc::vec::from_elem (68 samples, 0.05%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (68 samples, 0.05%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (68 samples, 0.05%)alloc::alloc::Global::alloc_impl (68 samples, 0.05%)alloc::alloc::alloc_zeroed (68 samples, 0.05%)__rdl_alloc_zeroed (68 samples, 0.05%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (68 samples, 0.05%)[unknown] (48 samples, 0.04%)[unknown] (16 samples, 0.01%)[unknown] (28 samples, 0.02%)std::sys::pal::unix::cvt (134 samples, 0.10%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (134 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,908 samples, 1.45%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (504 samples, 0.38%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (382 samples, 0.29%)tokio::net::udp::UdpSocket::send_to::{{closure}} (344 samples, 0.26%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (332 samples, 0.25%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (304 samples, 0.23%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (215 samples, 0.16%)mio::net::udp::UdpSocket::send_to (185 samples, 0.14%)mio::io_source::IoSource<T>::do_io (185 samples, 0.14%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (185 samples, 0.14%)mio::net::udp::UdpSocket::send_to::{{closure}} (185 samples, 0.14%)std::net::udp::UdpSocket::send_to (185 samples, 0.14%)std::sys_common::net::UdpSocket::send_to (169 samples, 0.13%)alloc::vec::Vec<T>::with_capacity (17 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (17 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (104 samples, 0.08%)tokio::net::udp::UdpSocket::ready::{{closure}} (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (190 samples, 0.14%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (49 samples, 0.04%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (28 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (330 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (327 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (92 samples, 0.07%)tokio::task::spawn::spawn (92 samples, 0.07%)tokio::task::spawn::spawn_inner (92 samples, 0.07%)tokio::runtime::context::current::with_current (92 samples, 0.07%)std::thread::local::LocalKey<T>::try_with (92 samples, 0.07%)tokio::runtime::context::current::with_current::{{closure}} (92 samples, 0.07%)core::option::Option<T>::map (92 samples, 0.07%)tokio::task::spawn::spawn_inner::{{closure}} (92 samples, 0.07%)tokio::runtime::scheduler::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (92 samples, 0.07%)tokio::runtime::task::list::OwnedTasks<S>::bind (90 samples, 0.07%)tokio::runtime::task::new_task (89 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (89 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (89 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (34 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (27 samples, 0.02%)alloc::sync::Arc<T>::new (21 samples, 0.02%)alloc::boxed::Box<T>::new (21 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (152 samples, 0.12%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (125 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (88 samples, 0.07%)core::option::Option<T>::is_some_and (18 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (17 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (17 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (17 samples, 0.01%)std::sync::rwlock::RwLock<T>::read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::read (16 samples, 0.01%)tracing::span::Span::log (26 samples, 0.02%)core::fmt::Arguments::new_v1 (15 samples, 0.01%)tracing_core::span::Record::is_empty (34 samples, 0.03%)tracing_core::field::ValueSet::is_empty (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (22 samples, 0.02%)tracing_core::field::ValueSet::is_empty::{{closure}} (18 samples, 0.01%)core::option::Option<T>::is_none (16 samples, 0.01%)core::option::Option<T>::is_some (16 samples, 0.01%)tracing::span::Span::record_all (143 samples, 0.11%)unlink_chunk (185 samples, 0.14%)uuid::builder::Builder::with_variant (48 samples, 0.04%)[unknown] (40 samples, 0.03%)uuid::builder::Builder::from_random_bytes (77 samples, 0.06%)uuid::builder::Builder::with_version (29 samples, 0.02%)[unknown] (24 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)[unknown] (92 samples, 0.07%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (162 samples, 0.12%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (162 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (162 samples, 0.12%)[unknown] (18,233 samples, 13.89%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (270 samples, 0.21%)uuid::rng::bytes (190 samples, 0.14%)rand::random (190 samples, 0.14%)__memcpy_avx512_unaligned_erms (69 samples, 0.05%)_int_free (23 samples, 0.02%)_int_malloc (23 samples, 0.02%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)advise_stack_range (31 samples, 0.02%)__GI_madvise (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (31 samples, 0.02%)syscall (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sync::condvar::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (35 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (56 samples, 0.04%)std::sys::pal::unix::futex::futex_wait (56 samples, 0.04%)syscall (56 samples, 0.04%)[unknown] (56 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (53 samples, 0.04%)[unknown] (52 samples, 0.04%)[unknown] (46 samples, 0.04%)[unknown] (39 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[[vdso]] (26 samples, 0.02%)[[vdso]] (263 samples, 0.20%)__ieee754_pow_fma (26 samples, 0.02%)__pow (314 samples, 0.24%)std::f64::<impl f64>::powf (345 samples, 0.26%)__GI___clock_gettime (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (416 samples, 0.32%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (24 samples, 0.02%)std::time::Instant::now (18 samples, 0.01%)std::sys::pal::unix::time::Instant::now (18 samples, 0.01%)mio::poll::Poll::poll (102 samples, 0.08%)mio::sys::unix::selector::epoll::Selector::select (102 samples, 0.08%)epoll_wait (99 samples, 0.08%)[unknown] (92 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (88 samples, 0.07%)[unknown] (85 samples, 0.06%)[unknown] (84 samples, 0.06%)[unknown] (43 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (125 samples, 0.10%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (125 samples, 0.10%)tokio::runtime::driver::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::driver::TimeDriver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_internal (116 samples, 0.09%)tokio::runtime::io::driver::Driver::turn (116 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (148 samples, 0.11%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (111 samples, 0.08%)alloc::sync::Arc<T,A>::inner (111 samples, 0.08%)core::ptr::non_null::NonNull<T>::as_ref (111 samples, 0.08%)core::sync::atomic::AtomicUsize::compare_exchange (16 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (16 samples, 0.01%)core::bool::<impl bool>::then (88 samples, 0.07%)std::sys::pal::unix::futex::futex_wait (13,339 samples, 10.16%)std::sys::pal::..syscall (13,003 samples, 9.90%)syscall[unknown] (12,895 samples, 9.82%)[unknown][unknown] (12,759 samples, 9.72%)[unknown][unknown] (12,313 samples, 9.38%)[unknown][unknown] (12,032 samples, 9.16%)[unknown][unknown] (11,734 samples, 8.94%)[unknown][unknown] (11,209 samples, 8.54%)[unknown][unknown] (10,265 samples, 7.82%)[unknown][unknown] (9,345 samples, 7.12%)[unknown][unknown] (8,623 samples, 6.57%)[unknown][unknown] (7,744 samples, 5.90%)[unknow..[unknown] (5,922 samples, 4.51%)[unkn..[unknown] (4,459 samples, 3.40%)[un..[unknown] (2,808 samples, 2.14%)[..[unknown] (1,275 samples, 0.97%)[unknown] (1,022 samples, 0.78%)[unknown] (738 samples, 0.56%)[unknown] (607 samples, 0.46%)[unknown] (155 samples, 0.12%)core::result::Result<T,E>::is_err (77 samples, 0.06%)core::result::Result<T,E>::is_ok (77 samples, 0.06%)std::sync::condvar::Condvar::wait (13,429 samples, 10.23%)std::sync::cond..std::sys::sync::condvar::futex::Condvar::wait (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::mutex::futex::Mutex::lock (89 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (13,508 samples, 10.29%)tokio::runtime:..tokio::loom::std::mutex::Mutex<T>::lock (64 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (31 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (30 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (30 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (34 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (34 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (17 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (19 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (33 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::level_range (17 samples, 0.01%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (95 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (41 samples, 0.03%)core::num::<impl usize>::pow (41 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (129 samples, 0.10%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (202 samples, 0.15%)tokio::runtime::time::wheel::Wheel::poll_at (17 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (38 samples, 0.03%)core::option::Option<T>::map (38 samples, 0.03%)core::result::Result<T,E>::map (31 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (31 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (17 samples, 0.01%)[[vdso]] (28 samples, 0.02%)[unknown] (11,031 samples, 8.40%)[unknown][unknown] (10,941 samples, 8.33%)[unknown][unknown] (10,850 samples, 8.26%)[unknown][unknown] (10,691 samples, 8.14%)[unknown][unknown] (10,070 samples, 7.67%)[unknown][unknown] (9,737 samples, 7.42%)[unknown][unknown] (7,659 samples, 5.83%)[unknow..[unknown] (6,530 samples, 4.97%)[unkno..[unknown] (5,633 samples, 4.29%)[unkn..[unknown] (5,055 samples, 3.85%)[unk..[unknown] (4,046 samples, 3.08%)[un..[unknown] (2,911 samples, 2.22%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,226 samples, 0.93%)[unknown] (455 samples, 0.35%)[unknown] (408 samples, 0.31%)[unknown] (249 samples, 0.19%)[unknown] (202 samples, 0.15%)[unknown] (100 samples, 0.08%)mio::poll::Poll::poll (11,328 samples, 8.63%)mio::poll::P..mio::sys::unix::selector::epoll::Selector::select (11,328 samples, 8.63%)mio::sys::un..epoll_wait (11,229 samples, 8.55%)epoll_wait__GI___pthread_disable_asynccancel (50 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (47 samples, 0.04%)tokio::util::bit::Pack::pack (38 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (25 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (23 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (19 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (11,595 samples, 8.83%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (175 samples, 0.13%)__GI___clock_gettime (15 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (18 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (26 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (26 samples, 0.02%)tokio::time::clock::Clock::now (20 samples, 0.02%)tokio::time::clock::now (20 samples, 0.02%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (17 samples, 0.01%)tokio::runtime::time::Driver::park_internal (11,686 samples, 8.90%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (11,957 samples, 9.11%)tokio::runtim..tokio::runtime::driver::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::driver::TimeDriver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::time::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Parker::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::park::Inner::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (25,547 samples, 19.46%)tokio::runtime::scheduler::mul..core::result::Result<T,E>::is_err (14 samples, 0.01%)core::result::Result<T,E>::is_ok (14 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (45 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (45 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (81 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::lock (73 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (122 samples, 0.09%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (241 samples, 0.18%)<T as core::slice::cmp::SliceContains>::slice_contains (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (75 samples, 0.06%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (75 samples, 0.06%)core::sync::atomic::AtomicU32::compare_exchange (20 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (283 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (24 samples, 0.02%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (33 samples, 0.03%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (33 samples, 0.03%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (98 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (401 samples, 0.31%)alloc::vec::Vec<T,A>::push (14 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (15 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)core::result::Result<T,E>::is_err (15 samples, 0.01%)core::result::Result<T,E>::is_ok (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (22 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (22 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (62 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (14 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (21 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (17 samples, 0.01%)alloc::sync::Arc<T,A>::inner (17 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (17 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (68 samples, 0.05%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (33 samples, 0.03%)core::sync::atomic::AtomicU64::load (16 samples, 0.01%)core::sync::atomic::atomic_load (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::park (26,672 samples, 20.31%)tokio::runtime::scheduler::multi..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (272 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::has_tasks (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::has_tasks (24 samples, 0.02%)tokio::runtime::context::budget (18 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (18 samples, 0.01%)syscall (61 samples, 0.05%)__memcpy_avx512_unaligned_erms (172 samples, 0.13%)__memcpy_avx512_unaligned_erms (224 samples, 0.17%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (228 samples, 0.17%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (228 samples, 0.17%)std::panic::catch_unwind (415 samples, 0.32%)std::panicking::try (415 samples, 0.32%)std::panicking::try::do_call (415 samples, 0.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (415 samples, 0.32%)core::ops::function::FnOnce::call_once (415 samples, 0.32%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::set_stage (410 samples, 0.31%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (27 samples, 0.02%)core::result::Result<T,E>::is_err (43 samples, 0.03%)core::result::Result<T,E>::is_ok (43 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::complete (570 samples, 0.43%)tokio::runtime::task::harness::Harness<T,S>::release (155 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (152 samples, 0.12%)tokio::runtime::task::list::OwnedTasks<S>::remove (152 samples, 0.12%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (103 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (65 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (54 samples, 0.04%)std::io::stdio::stderr::INSTANCE (17 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (70 samples, 0.05%)__memcpy_avx512_unaligned_erms (42 samples, 0.03%)core::cmp::Ord::min (22 samples, 0.02%)core::cmp::min_by (22 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (27 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (30 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (24 samples, 0.02%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (44 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (20 samples, 0.02%)byteorder::io::ReadBytesExt::read_i32 (46 samples, 0.04%)core::cmp::Ord::min (14 samples, 0.01%)core::cmp::min_by (14 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (19 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (24 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (24 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (349 samples, 0.27%)__GI___lll_lock_wake_private (148 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (137 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (111 samples, 0.08%)[unknown] (98 samples, 0.07%)[unknown] (42 samples, 0.03%)[unknown] (30 samples, 0.02%)__GI___lll_lock_wait_private (553 samples, 0.42%)futex_wait (541 samples, 0.41%)[unknown] (536 samples, 0.41%)[unknown] (531 samples, 0.40%)[unknown] (524 samples, 0.40%)[unknown] (515 samples, 0.39%)[unknown] (498 samples, 0.38%)[unknown] (470 samples, 0.36%)[unknown] (435 samples, 0.33%)[unknown] (350 samples, 0.27%)[unknown] (327 samples, 0.25%)[unknown] (290 samples, 0.22%)[unknown] (222 samples, 0.17%)[unknown] (160 samples, 0.12%)[unknown] (104 samples, 0.08%)[unknown] (33 samples, 0.03%)[unknown] (25 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (703 samples, 0.54%)__GI___libc_free (866 samples, 0.66%)tracing::span::Span::record_all (30 samples, 0.02%)unlink_chunk (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (899 samples, 0.68%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (899 samples, 0.68%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (899 samples, 0.68%)alloc::alloc::dealloc (899 samples, 0.68%)__rdl_dealloc (899 samples, 0.68%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (899 samples, 0.68%)core::result::Result<T,E>::expect (91 samples, 0.07%)core::result::Result<T,E>::map_err (28 samples, 0.02%)[[vdso]] (28 samples, 0.02%)__GI___clock_gettime (47 samples, 0.04%)std::time::Instant::elapsed (67 samples, 0.05%)std::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (53 samples, 0.04%)std::sys::pal::unix::cvt (23 samples, 0.02%)__GI_getsockname (3,792 samples, 2.89%)__..[unknown] (3,714 samples, 2.83%)[u..[unknown] (3,661 samples, 2.79%)[u..[unknown] (3,557 samples, 2.71%)[u..[unknown] (3,416 samples, 2.60%)[u..[unknown] (2,695 samples, 2.05%)[..[unknown] (2,063 samples, 1.57%)[unknown] (891 samples, 0.68%)[unknown] (270 samples, 0.21%)[unknown] (99 samples, 0.08%)[unknown] (94 samples, 0.07%)[unknown] (84 samples, 0.06%)[unknown] (77 samples, 0.06%)[unknown] (25 samples, 0.02%)[unknown] (16 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr::{{closure}} (3,800 samples, 2.89%)st..tokio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)to..mio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)mi..std::net::tcp::TcpListener::local_addr (3,838 samples, 2.92%)st..std::sys_common::net::TcpListener::socket_addr (3,838 samples, 2.92%)st..std::sys_common::net::sockname (3,835 samples, 2.92%)st..[[vdso]] (60 samples, 0.05%)rand_chacha::guts::ChaCha::pos64 (168 samples, 0.13%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (26 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (29 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (18 samples, 0.01%)rand_chacha::guts::round (118 samples, 0.09%)rand_chacha::guts::refill_wide::impl_avx2 (312 samples, 0.24%)rand_chacha::guts::refill_wide::fn_impl (312 samples, 0.24%)rand_chacha::guts::refill_wide_impl (312 samples, 0.24%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (384 samples, 0.29%)rand_chacha::guts::ChaCha::refill4 (384 samples, 0.29%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (432 samples, 0.33%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (432 samples, 0.33%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)rand_core::block::BlockRng<R>::generate_and_set (392 samples, 0.30%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (392 samples, 0.30%)torrust_tracker::servers::udp::handlers::RequestId::make (440 samples, 0.34%)uuid::v4::<impl uuid::Uuid>::new_v4 (436 samples, 0.33%)uuid::rng::bytes (435 samples, 0.33%)rand::random (435 samples, 0.33%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (22 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (16 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (16 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.01%)core::iter::traits::iterator::Iterator::find (15 samples, 0.01%)core::iter::traits::iterator::Iterator::try_fold (15 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (31 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)core::slice::iter::Iter<T>::post_inc_start (14 samples, 0.01%)core::ptr::non_null::NonNull<T>::add (14 samples, 0.01%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (26 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (165 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (165 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (165 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (165 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (165 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (339 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (308 samples, 0.23%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (308 samples, 0.23%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (342 samples, 0.26%)std::sys::sync::rwlock::futex::RwLock::spin_read (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (28 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (436 samples, 0.33%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (397 samples, 0.30%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (29 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (29 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (29 samples, 0.02%)__memcmp_evex_movbe (31 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (52 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (52 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (52 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (52 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (52 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (103 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (102 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (96 samples, 0.07%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (96 samples, 0.07%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (72 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)core::slice::iter::Iter<T>::post_inc_start (32 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (32 samples, 0.02%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (81 samples, 0.06%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (271 samples, 0.21%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (271 samples, 0.21%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (271 samples, 0.21%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (271 samples, 0.21%)<u8 as core::slice::cmp::SliceOrd>::compare (271 samples, 0.21%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (610 samples, 0.46%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (566 samples, 0.43%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (566 samples, 0.43%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (18 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (616 samples, 0.47%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::KV>::split (15 samples, 0.01%)alloc::collections::btree::map::entry::Entry<K,V,A>::or_insert (46 samples, 0.04%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (45 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (40 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (29 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (120 samples, 0.09%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (118 samples, 0.09%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::Leaf>::new_leaf (118 samples, 0.09%)alloc::collections::btree::node::LeafNode<K,V>::new (118 samples, 0.09%)alloc::boxed::Box<T,A>::new_uninit_in (118 samples, 0.09%)alloc::boxed::Box<T,A>::try_new_uninit_in (118 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (118 samples, 0.09%)alloc::alloc::Global::alloc_impl (118 samples, 0.09%)alloc::alloc::alloc (118 samples, 0.09%)__rdl_alloc (118 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (118 samples, 0.09%)__GI___libc_malloc (118 samples, 0.09%)_int_malloc (107 samples, 0.08%)_int_malloc (28 samples, 0.02%)__GI___libc_malloc (32 samples, 0.02%)__rdl_alloc (36 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (36 samples, 0.03%)alloc::sync::Arc<T>::new (42 samples, 0.03%)alloc::boxed::Box<T>::new (42 samples, 0.03%)alloc::alloc::exchange_malloc (39 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (39 samples, 0.03%)alloc::alloc::Global::alloc_impl (39 samples, 0.03%)alloc::alloc::alloc (39 samples, 0.03%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)__GI___libc_free (39 samples, 0.03%)_int_free (37 samples, 0.03%)get_max_fast (16 samples, 0.01%)core::option::Option<T>::is_some_and (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (50 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (50 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (290 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (284 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (255 samples, 0.19%)std::sys::sync::rwlock::futex::RwLock::spin_read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::spin_until (16 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (21 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (21 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (1,147 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (1,144 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents_mut (32 samples, 0.02%)std::sync::rwlock::RwLock<T>::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write_contended (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_write (28 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (28 samples, 0.02%)torrust_tracker::core::Tracker::announce::{{closure}} (1,597 samples, 1.22%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (29 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (24 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (25 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (25 samples, 0.02%)core::hash::Hasher::write_u32 (25 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (36 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (37 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (37 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (64 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (39 samples, 0.03%)core::hash::Hasher::write_u64 (39 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (122 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (58 samples, 0.04%)core::hash::Hasher::write_u64 (58 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (57 samples, 0.04%)core::hash::sip::u8to64_le (23 samples, 0.02%)core::hash::Hasher::write_length_prefix (27 samples, 0.02%)core::hash::Hasher::write_usize (27 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (16 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (246 samples, 0.19%)core::array::<impl core::hash::Hash for [T: N]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (62 samples, 0.05%)core::hash::sip::u8to64_le (17 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (285 samples, 0.22%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (36 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (24 samples, 0.02%)std::time::SystemTime::now (19 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (19 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (1,954 samples, 1.49%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (24 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (18 samples, 0.01%)<core::time::Nanoseconds as core::hash::Hash>::hash (20 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (20 samples, 0.02%)core::hash::Hasher::write_u32 (20 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (44 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (65 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (45 samples, 0.03%)core::hash::Hasher::write_u64 (45 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (45 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (45 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (105 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (40 samples, 0.03%)core::hash::Hasher::write_u64 (40 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (39 samples, 0.03%)core::hash::Hasher::write_length_prefix (34 samples, 0.03%)core::hash::Hasher::write_usize (34 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (33 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (231 samples, 0.18%)core::array::<impl core::hash::Hash for [T: N]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (61 samples, 0.05%)core::hash::sip::u8to64_le (16 samples, 0.01%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (270 samples, 0.21%)torrust_tracker::servers::udp::connection_cookie::make (268 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (35 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (31 samples, 0.02%)std::time::SystemTime::now (26 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (26 samples, 0.02%)torrust_tracker::core::ScrapeData::add_file (19 samples, 0.01%)std::collections::hash::map::HashMap<K,V,S>::insert (19 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (19 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (16 samples, 0.01%)hashbrown::raw::RawTable<T,A>::reserve (16 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (17 samples, 0.01%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (17 samples, 0.01%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (17 samples, 0.01%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (17 samples, 0.01%)<u8 as core::slice::cmp::SliceOrd>::compare (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (2,336 samples, 1.78%)t..torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (101 samples, 0.08%)torrust_tracker::core::Tracker::scrape::{{closure}} (90 samples, 0.07%)torrust_tracker::core::Tracker::get_swarm_metadata (68 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (64 samples, 0.05%)alloc::raw_vec::finish_grow (19 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (21 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (23 samples, 0.02%)alloc::string::String::push_str (23 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (23 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (23 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (23 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (85 samples, 0.06%)core::fmt::num::imp::fmt_u64 (78 samples, 0.06%)<alloc::string::String as core::fmt::Write>::write_str (15 samples, 0.01%)alloc::string::String::push_str (15 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (15 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (15 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (37 samples, 0.03%)core::fmt::num::imp::fmt_u64 (36 samples, 0.03%)<T as alloc::string::ToString>::to_string (141 samples, 0.11%)core::option::Option<T>::expect (34 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (28 samples, 0.02%)alloc::alloc::dealloc (28 samples, 0.02%)__rdl_dealloc (28 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (28 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (55 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (55 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::current_memory (20 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (16 samples, 0.01%)binascii::bin2hex (51 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (16 samples, 0.01%)core::fmt::write (25 samples, 0.02%)core::fmt::rt::Argument::fmt (15 samples, 0.01%)core::fmt::Formatter::write_fmt (87 samples, 0.07%)core::str::converts::from_utf8 (43 samples, 0.03%)core::str::validations::run_utf8_validation (37 samples, 0.03%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (161 samples, 0.12%)<T as alloc::string::ToString>::to_string (161 samples, 0.12%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (156 samples, 0.12%)torrust_tracker::servers::udp::logging::log_request (479 samples, 0.36%)[[vdso]] (51 samples, 0.04%)alloc::raw_vec::finish_grow (56 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.05%)<alloc::string::String as core::fmt::Write>::write_str (65 samples, 0.05%)alloc::string::String::push_str (65 samples, 0.05%)alloc::vec::Vec<T,A>::extend_from_slice (65 samples, 0.05%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (65 samples, 0.05%)alloc::vec::Vec<T,A>::append_elements (65 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (114 samples, 0.09%)core::fmt::num::imp::fmt_u64 (110 samples, 0.08%)<T as alloc::string::ToString>::to_string (132 samples, 0.10%)core::option::Option<T>::expect (20 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (22 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (22 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (8,883 samples, 6.77%)torrust_t..torrust_tracker::servers::udp::logging::log_response (238 samples, 0.18%)__GI___lll_lock_wait_private (14 samples, 0.01%)futex_wait (14 samples, 0.01%)__GI___lll_lock_wake_private (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)_int_malloc (191 samples, 0.15%)__libc_calloc (238 samples, 0.18%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)alloc::vec::from_elem (316 samples, 0.24%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (316 samples, 0.24%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (312 samples, 0.24%)alloc::alloc::Global::alloc_impl (312 samples, 0.24%)alloc::alloc::alloc_zeroed (312 samples, 0.24%)__rdl_alloc_zeroed (312 samples, 0.24%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (312 samples, 0.24%)byteorder::ByteOrder::write_i32 (18 samples, 0.01%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (18 samples, 0.01%)core::num::<impl u32>::to_be_bytes (18 samples, 0.01%)core::num::<impl u32>::to_be (18 samples, 0.01%)core::num::<impl u32>::swap_bytes (18 samples, 0.01%)byteorder::io::WriteBytesExt::write_i32 (89 samples, 0.07%)std::io::Write::write_all (71 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (71 samples, 0.05%)std::io::cursor::vec_write (71 samples, 0.05%)std::io::cursor::vec_write_unchecked (51 samples, 0.04%)core::ptr::mut_ptr::<impl *mut T>::copy_from (51 samples, 0.04%)core::intrinsics::copy (51 samples, 0.04%)aquatic_udp_protocol::response::Response::write (227 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (28 samples, 0.02%)std::io::Write::write_all (21 samples, 0.02%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (21 samples, 0.02%)std::io::cursor::vec_write (21 samples, 0.02%)std::io::cursor::vec_write_unchecked (21 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::copy_from (21 samples, 0.02%)core::intrinsics::copy (21 samples, 0.02%)__GI___lll_lock_wake_private (17 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (136 samples, 0.10%)__GI___libc_free (206 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (211 samples, 0.16%)alloc::alloc::dealloc (211 samples, 0.16%)__rdl_dealloc (211 samples, 0.16%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (211 samples, 0.16%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (224 samples, 0.17%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (224 samples, 0.17%)std::io::cursor::Cursor<T>::new (56 samples, 0.04%)tokio::io::ready::Ready::intersection (23 samples, 0.02%)tokio::io::ready::Ready::from_interest (23 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (83 samples, 0.06%)[unknown] (32,674 samples, 24.88%)[unknown][unknown] (32,402 samples, 24.68%)[unknown][unknown] (32,272 samples, 24.58%)[unknown][unknown] (32,215 samples, 24.54%)[unknown][unknown] (31,174 samples, 23.74%)[unknown][unknown] (30,794 samples, 23.45%)[unknown][unknown] (30,036 samples, 22.88%)[unknown][unknown] (28,639 samples, 21.81%)[unknown][unknown] (27,908 samples, 21.25%)[unknown][unknown] (26,013 samples, 19.81%)[unknown][unknown] (23,181 samples, 17.65%)[unknown][unknown] (19,559 samples, 14.90%)[unknown][unknown] (18,052 samples, 13.75%)[unknown][unknown] (15,794 samples, 12.03%)[unknown][unknown] (14,740 samples, 11.23%)[unknown][unknown] (12,486 samples, 9.51%)[unknown][unknown] (11,317 samples, 8.62%)[unknown][unknown] (10,725 samples, 8.17%)[unknown][unknown] (10,017 samples, 7.63%)[unknown][unknown] (9,713 samples, 7.40%)[unknown][unknown] (8,432 samples, 6.42%)[unknown][unknown] (8,062 samples, 6.14%)[unknown][unknown] (6,973 samples, 5.31%)[unknow..[unknown] (5,328 samples, 4.06%)[unk..[unknown] (4,352 samples, 3.31%)[un..[unknown] (3,786 samples, 2.88%)[u..[unknown] (3,659 samples, 2.79%)[u..[unknown] (3,276 samples, 2.50%)[u..[unknown] (2,417 samples, 1.84%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,610 samples, 1.23%)[unknown] (422 samples, 0.32%)[unknown] (84 samples, 0.06%)[unknown] (69 samples, 0.05%)__GI___pthread_disable_asynccancel (67 samples, 0.05%)__libc_sendto (32,896 samples, 25.05%)__libc_sendtotokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (32,981 samples, 25.12%)tokio::net::udp::UdpSocket::send_to_addr..mio::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (32,981 samples, 25.12%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (32,981 samples, 25.12%)mio::sys::unix::stateless_io_source::IoS..mio::net::udp::UdpSocket::send_to::{{closure}} (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_to::{{clo..std::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (32,981 samples, 25.12%)std::sys_common::net::UdpSocket::send_tostd::sys::pal::unix::cvt (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (44,349 samples, 33.78%)torrust_tracker::servers::udp::server::Udp::process_req..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (43,412 samples, 33.06%)torrust_tracker::servers::udp::server::Udp::process_va..torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (34,320 samples, 26.14%)torrust_tracker::servers::udp::server::Udp..torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (33,360 samples, 25.41%)torrust_tracker::servers::udp::server::Ud..tokio::net::udp::UdpSocket::send_to::{{closure}} (33,227 samples, 25.31%)tokio::net::udp::UdpSocket::send_to::{{c..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (33,142 samples, 25.24%)tokio::net::udp::UdpSocket::send_to_addr..tokio::runtime::io::registration::Registration::async_io::{{closure}} (33,115 samples, 25.22%)tokio::runtime::io::registration::Regist..tokio::runtime::io::registration::Registration::readiness::{{closure}} (28 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (15 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (14 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (15 samples, 0.01%)core::sync::atomic::atomic_add (15 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (135 samples, 0.10%)__GI___libc_free (147 samples, 0.11%)syscall (22 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (15 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (24 samples, 0.02%)core::mem::drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (262 samples, 0.20%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (262 samples, 0.20%)tokio::runtime::task::raw::RawTask::drop_abort_handle (256 samples, 0.19%)tokio::runtime::task::raw::drop_abort_handle (59 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (50 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (50 samples, 0.04%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (16 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (47 samples, 0.04%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (47 samples, 0.04%)tokio::runtime::task::state::State::drop_join_handle_fast (19 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (19 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (14 samples, 0.01%)<ringbuf::ring_buffer::shared::SharedRb<T,C> as ringbuf::ring_buffer::base::RbBase<T>>::head (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (29 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (29 samples, 0.02%)ringbuf::ring_buffer::rb::Rb::pop (50 samples, 0.04%)ringbuf::consumer::Consumer<T,R>::pop (50 samples, 0.04%)ringbuf::producer::Producer<T,R>::advance (23 samples, 0.02%)ringbuf::ring_buffer::base::RbWrite::advance_tail (23 samples, 0.02%)core::num::nonzero::<impl core::ops::arith::Rem<core::num::nonzero::NonZero<usize>> for usize>::rem (19 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (107 samples, 0.08%)ringbuf::ring_buffer::rb::Rb::push (43 samples, 0.03%)ringbuf::producer::Producer<T,R>::push (43 samples, 0.03%)tokio::runtime::task::abort::AbortHandle::is_finished (84 samples, 0.06%)tokio::runtime::task::state::Snapshot::is_complete (84 samples, 0.06%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (17 samples, 0.01%)tokio::runtime::task::raw::RawTask::ref_inc (17 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (17 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (14 samples, 0.01%)core::sync::atomic::atomic_add (14 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)malloc_consolidate (95 samples, 0.07%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (26 samples, 0.02%)_int_malloc (282 samples, 0.21%)__GI___libc_malloc (323 samples, 0.25%)alloc::vec::Vec<T>::with_capacity (326 samples, 0.25%)alloc::vec::Vec<T,A>::with_capacity_in (326 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (324 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (324 samples, 0.25%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (324 samples, 0.25%)alloc::alloc::Global::alloc_impl (324 samples, 0.25%)alloc::alloc::alloc (324 samples, 0.25%)__rdl_alloc (324 samples, 0.25%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (324 samples, 0.25%)tokio::io::ready::Ready::intersection (24 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (199 samples, 0.15%)tokio::util::bit::Pack::unpack (16 samples, 0.01%)tokio::util::bit::unpack (16 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (19 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (16 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (222 samples, 0.17%)tokio::net::udp::UdpSocket::ready::{{closure}} (222 samples, 0.17%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (50 samples, 0.04%)std::io::error::repr_bitpacked::Repr::data (14 samples, 0.01%)std::io::error::repr_bitpacked::decode_repr (14 samples, 0.01%)std::io::error::Error::kind (16 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)[unknown] (8,756 samples, 6.67%)[unknown][unknown] (8,685 samples, 6.61%)[unknown][unknown] (8,574 samples, 6.53%)[unknown][unknown] (8,415 samples, 6.41%)[unknown][unknown] (7,686 samples, 5.85%)[unknow..[unknown] (7,239 samples, 5.51%)[unknow..[unknown] (6,566 samples, 5.00%)[unkno..[unknown] (5,304 samples, 4.04%)[unk..[unknown] (4,008 samples, 3.05%)[un..[unknown] (3,571 samples, 2.72%)[u..[unknown] (2,375 samples, 1.81%)[..[unknown] (1,844 samples, 1.40%)[unknown] (1,030 samples, 0.78%)[unknown] (344 samples, 0.26%)[unknown] (113 samples, 0.09%)__libc_recvfrom (8,903 samples, 6.78%)__libc_re..__GI___pthread_disable_asynccancel (22 samples, 0.02%)std::sys::pal::unix::cvt (20 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (9,005 samples, 6.86%)tokio::ne..mio::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)mio::net:..mio::io_source::IoSource<T>::do_io (8,964 samples, 6.83%)mio::io_s..mio::sys::unix::stateless_io_source::IoSourceState::do_io (8,964 samples, 6.83%)mio::sys:..mio::net::udp::UdpSocket::recv_from::{{closure}} (8,964 samples, 6.83%)mio::net:..std::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)std::net:..std::sys_common::net::UdpSocket::recv_from (8,964 samples, 6.83%)std::sys_..std::sys::pal::unix::net::Socket::recv_from (8,964 samples, 6.83%)std::sys:..std::sys::pal::unix::net::Socket::recv_from_with_flags (8,964 samples, 6.83%)std::sys:..std::sys_common::net::sockaddr_to_addr (23 samples, 0.02%)tokio::runtime::io::registration::Registration::clear_readiness (18 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (32 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (9,967 samples, 7.59%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (9,291 samples, 7.08%)tokio::ne..tokio::runtime::io::registration::Registration::async_io::{{closure}} (9,287 samples, 7.07%)tokio::ru..tokio::runtime::io::registration::Registration::readiness::{{closure}} (45 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (41 samples, 0.03%)__memcpy_avx512_unaligned_erms (424 samples, 0.32%)__memcpy_avx512_unaligned_erms (493 samples, 0.38%)__memcpy_avx512_unaligned_erms (298 samples, 0.23%)syscall (1,105 samples, 0.84%)[unknown] (1,095 samples, 0.83%)[unknown] (1,091 samples, 0.83%)[unknown] (1,049 samples, 0.80%)[unknown] (998 samples, 0.76%)[unknown] (907 samples, 0.69%)[unknown] (710 samples, 0.54%)[unknown] (635 samples, 0.48%)[unknown] (538 samples, 0.41%)[unknown] (358 samples, 0.27%)[unknown] (256 samples, 0.19%)[unknown] (153 samples, 0.12%)[unknown] (96 samples, 0.07%)[unknown] (81 samples, 0.06%)tokio::runtime::context::with_scheduler (36 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (31 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (27 samples, 0.02%)tokio::runtime::context::scoped::Scoped<T>::with (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (340 samples, 0.26%)core::sync::atomic::atomic_add (340 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (354 samples, 0.27%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (367 samples, 0.28%)[unknown] (95 samples, 0.07%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (90 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (73 samples, 0.06%)[unknown] (63 samples, 0.05%)[unknown] (44 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (35 samples, 0.03%)[unknown] (30 samples, 0.02%)[unknown] (22 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)tokio::runtime::driver::Handle::unpark (99 samples, 0.08%)tokio::runtime::driver::IoHandle::unpark (99 samples, 0.08%)tokio::runtime::io::driver::Handle::unpark (99 samples, 0.08%)mio::waker::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::fdbased::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::eventfd::WakerInternal::wake (99 samples, 0.08%)<&std::fs::File as std::io::Write>::write (99 samples, 0.08%)std::sys::pal::unix::fs::File::write (99 samples, 0.08%)std::sys::pal::unix::fd::FileDesc::write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)tokio::runtime::context::with_scheduler (1,615 samples, 1.23%)std::thread::local::LocalKey<T>::try_with (1,613 samples, 1.23%)tokio::runtime::context::with_scheduler::{{closure}} (1,612 samples, 1.23%)tokio::runtime::context::scoped::Scoped<T>::with (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (1,647 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (1,646 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::with_current (1,646 samples, 1.25%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (23 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (18 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (104 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (60 samples, 0.05%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (57 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (38 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (162 samples, 0.12%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)__GI___lll_lock_wake_private (127 samples, 0.10%)[unknown] (125 samples, 0.10%)[unknown] (124 samples, 0.09%)[unknown] (119 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (106 samples, 0.08%)[unknown] (87 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (51 samples, 0.04%)[unknown] (27 samples, 0.02%)[unknown] (19 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (77 samples, 0.06%)[unknown] (1,207 samples, 0.92%)[unknown] (1,146 samples, 0.87%)[unknown] (1,126 samples, 0.86%)[unknown] (1,091 samples, 0.83%)[unknown] (1,046 samples, 0.80%)[unknown] (962 samples, 0.73%)[unknown] (914 samples, 0.70%)[unknown] (848 samples, 0.65%)[unknown] (774 samples, 0.59%)[unknown] (580 samples, 0.44%)[unknown] (456 samples, 0.35%)[unknown] (305 samples, 0.23%)[unknown] (85 samples, 0.06%)__GI_mprotect (2,474 samples, 1.88%)_..[unknown] (2,457 samples, 1.87%)[..[unknown] (2,440 samples, 1.86%)[..[unknown] (2,436 samples, 1.86%)[..[unknown] (2,435 samples, 1.85%)[..[unknown] (2,360 samples, 1.80%)[..[unknown] (2,203 samples, 1.68%)[unknown] (1,995 samples, 1.52%)[unknown] (1,709 samples, 1.30%)[unknown] (1,524 samples, 1.16%)[unknown] (1,193 samples, 0.91%)[unknown] (865 samples, 0.66%)[unknown] (539 samples, 0.41%)[unknown] (259 samples, 0.20%)[unknown] (80 samples, 0.06%)[unknown] (29 samples, 0.02%)sysmalloc (3,786 samples, 2.88%)sy..grow_heap (2,509 samples, 1.91%)g.._int_malloc (4,038 samples, 3.08%)_in..unlink_chunk (31 samples, 0.02%)alloc::alloc::exchange_malloc (4,335 samples, 3.30%)all..<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,329 samples, 3.30%)<al..alloc::alloc::Global::alloc_impl (4,329 samples, 3.30%)all..alloc::alloc::alloc (4,329 samples, 3.30%)all..__rdl_alloc (4,329 samples, 3.30%)__r..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,329 samples, 3.30%)std..std::sys::pal::unix::alloc::aligned_malloc (4,329 samples, 3.30%)std..__posix_memalign (4,297 samples, 3.27%)__p..__posix_memalign (4,297 samples, 3.27%)__p.._mid_memalign (4,297 samples, 3.27%)_mi.._int_memalign (4,149 samples, 3.16%)_in..sysmalloc (18 samples, 0.01%)core::option::Option<T>::map (6,666 samples, 5.08%)core::..tokio::task::spawn::spawn_inner::{{closure}} (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::Handle::spawn (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (6,664 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (6,661 samples, 5.07%)tokio:..tokio::runtime::task::list::OwnedTasks<S>::bind (4,692 samples, 3.57%)toki..tokio::runtime::task::new_task (4,579 samples, 3.49%)tok..tokio::runtime::task::raw::RawTask::new (4,579 samples, 3.49%)tok..tokio::runtime::task::core::Cell<T,S>::new (4,579 samples, 3.49%)tok..alloc::boxed::Box<T>::new (4,389 samples, 3.34%)all..tokio::runtime::context::current::with_current (7,636 samples, 5.82%)tokio::..std::thread::local::LocalKey<T>::try_with (7,635 samples, 5.81%)std::th..tokio::runtime::context::current::with_current::{{closure}} (7,188 samples, 5.47%)tokio::..tokio::task::spawn::spawn (7,670 samples, 5.84%)tokio::..tokio::task::spawn::spawn_inner (7,670 samples, 5.84%)tokio::..tokio::runtime::task::id::Id::next (24 samples, 0.02%)core::sync::atomic::AtomicU64::fetch_add (24 samples, 0.02%)core::sync::atomic::atomic_add (24 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (62,691 samples, 47.75%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (62,691 samples, 47.75%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (18,228 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (18,226 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (7,679 samples, 5.85%)torrust..__memcpy_avx512_unaligned_erms (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (407 samples, 0.31%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::poll (63,150 samples, 48.10%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (459 samples, 0.35%)tokio::runtime::task::core::Core<T,S>::set_stage (459 samples, 0.35%)__memcpy_avx512_unaligned_erms (16 samples, 0.01%)__memcpy_avx512_unaligned_erms (398 samples, 0.30%)__memcpy_avx512_unaligned_erms (325 samples, 0.25%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage (731 samples, 0.56%)tokio::runtime::task::harness::poll_future (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (63,908 samples, 48.67%)std::panic::catch_unwindstd::panicking::try (63,908 samples, 48.67%)std::panicking::trystd::panicking::try::do_call (63,908 samples, 48.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (63,908 samples, 48.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()..tokio::runtime::task::harness::poll_future::{{closure}} (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (758 samples, 0.58%)tokio::runtime::coop::budget (65,027 samples, 49.53%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (65,027 samples, 49.53%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (65,009 samples, 49.51%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (65,003 samples, 49.51%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (65,003 samples, 49.51%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (64,538 samples, 49.15%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (64,493 samples, 49.12%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (63,919 samples, 48.68%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (93 samples, 0.07%)syscall (2,486 samples, 1.89%)s..[unknown] (2,424 samples, 1.85%)[..[unknown] (2,416 samples, 1.84%)[..[unknown] (2,130 samples, 1.62%)[unknown] (2,013 samples, 1.53%)[unknown] (1,951 samples, 1.49%)[unknown] (1,589 samples, 1.21%)[unknown] (1,415 samples, 1.08%)[unknown] (1,217 samples, 0.93%)[unknown] (820 samples, 0.62%)[unknown] (564 samples, 0.43%)[unknown] (360 samples, 0.27%)[unknown] (244 samples, 0.19%)[unknown] (194 samples, 0.15%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (339 samples, 0.26%)core::sync::atomic::AtomicUsize::fetch_add (337 samples, 0.26%)core::sync::atomic::atomic_add (337 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (364 samples, 0.28%)[unknown] (154 samples, 0.12%)[unknown] (152 samples, 0.12%)[unknown] (143 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (131 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (80 samples, 0.06%)[unknown] (74 samples, 0.06%)[unknown] (65 samples, 0.05%)[unknown] (64 samples, 0.05%)[unknown] (47 samples, 0.04%)[unknown] (44 samples, 0.03%)[unknown] (43 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (26 samples, 0.02%)[unknown] (20 samples, 0.02%)__GI___libc_write (158 samples, 0.12%)__GI___libc_write (158 samples, 0.12%)mio::sys::unix::waker::eventfd::WakerInternal::wake (159 samples, 0.12%)<&std::fs::File as std::io::Write>::write (159 samples, 0.12%)std::sys::pal::unix::fs::File::write (159 samples, 0.12%)std::sys::pal::unix::fd::FileDesc::write (159 samples, 0.12%)tokio::runtime::driver::Handle::unpark (168 samples, 0.13%)tokio::runtime::driver::IoHandle::unpark (168 samples, 0.13%)tokio::runtime::io::driver::Handle::unpark (168 samples, 0.13%)mio::waker::Waker::wake (165 samples, 0.13%)mio::sys::unix::waker::fdbased::Waker::wake (165 samples, 0.13%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (68,159 samples, 51.91%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (3,024 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (3,023 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (3,022 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (171 samples, 0.13%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (171 samples, 0.13%)core::option::Option<T>::or_else (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (107 samples, 0.08%)__GI___libc_free (17 samples, 0.01%)_int_free (17 samples, 0.01%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Dying,K,V>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::LeafOrInternal>::deallocate_and_ascend (18 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (18 samples, 0.01%)alloc::alloc::dealloc (18 samples, 0.01%)__rdl_dealloc (18 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (18 samples, 0.01%)alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (19 samples, 0.01%)tokio::runtime::task::Task<S>::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::RawTask::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task (26 samples, 0.02%)std::panic::catch_unwind (26 samples, 0.02%)std::panicking::try (26 samples, 0.02%)std::panicking::try::do_call (26 samples, 0.02%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (26 samples, 0.02%)core::ops::function::FnOnce::call_once (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task::{{closure}} (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (26 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::Tracker> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)core::ptr::drop_in_place<std::sync::rwlock::RwLock<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)core::mem::drop (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,NodeType>,alloc::collections::btree::node::marker::KV>::drop_key_val (24 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::assume_init_drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (24 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::entry::Torrent> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::mem::drop (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::pre_shutdown (33 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::close_and_shutdown_all (33 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (114 samples, 0.09%)alloc::sync::Arc<T,A>::inner (114 samples, 0.09%)core::ptr::non_null::NonNull<T>::as_ref (114 samples, 0.09%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (108 samples, 0.08%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (108 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (106 samples, 0.08%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (49 samples, 0.04%)alloc::sync::Arc<T,A>::inner (49 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (49 samples, 0.04%)core::num::<impl u32>::wrapping_sub (132 samples, 0.10%)core::sync::atomic::AtomicU64::load (40 samples, 0.03%)core::sync::atomic::atomic_load (40 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (48 samples, 0.04%)core::sync::atomic::AtomicU32::load (48 samples, 0.04%)core::sync::atomic::atomic_load (48 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (65 samples, 0.05%)alloc::sync::Arc<T,A>::inner (65 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (65 samples, 0.05%)core::num::<impl u32>::wrapping_sub (50 samples, 0.04%)core::sync::atomic::AtomicU32::load (55 samples, 0.04%)core::sync::atomic::atomic_load (55 samples, 0.04%)core::sync::atomic::AtomicU64::load (80 samples, 0.06%)core::sync::atomic::atomic_load (80 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::pack (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (666 samples, 0.51%)tokio::runtime::scheduler::multi_thread::queue::unpack (147 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (1,036 samples, 0.79%)tokio::runtime::scheduler::multi_thread::queue::unpack (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (49 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (2,414 samples, 1.84%)t..tokio::util::rand::FastRand::fastrand_n (24 samples, 0.02%)tokio::util::rand::FastRand::fastrand (24 samples, 0.02%)std::sys_common::backtrace::__rust_begin_short_backtrace (98,136 samples, 74.74%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (98,042 samples, 74.67%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (98,042 samples, 74.67%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (98,042 samples, 74.67%)std::panic::catch_unwindstd::panicking::try (98,042 samples, 74.67%)std::panicking::trystd::panicking::try::do_call (98,042 samples, 74.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,042 samples, 74.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (98,042 samples, 74.67%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (98,042 samples, 74.67%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (98,042 samples, 74.67%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (98,042 samples, 74.67%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (98,042 samples, 74.67%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (98,042 samples, 74.67%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Context::runstd::panic::catch_unwind (98,137 samples, 74.74%)std::panic::catch_unwindstd::panicking::try (98,137 samples, 74.74%)std::panicking::trystd::panicking::try::do_call (98,137 samples, 74.74%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,137 samples, 74.74%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (98,137 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (98,139 samples, 74.74%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (98,139 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (98,205 samples, 74.79%)clone3start_thread (98,205 samples, 74.79%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (98,158 samples, 74.76%)std::sys::pal::unix::thread::Thread::new::thread_startcore::ptr::drop_in_place<std::sys::pal::unix::stack_overflow::Handler> (19 samples, 0.01%)<std::sys::pal::unix::stack_overflow::Handler as core::ops::drop::Drop>::drop (19 samples, 0.01%)std::sys::pal::unix::stack_overflow::imp::drop_handler (19 samples, 0.01%)__GI_munmap (19 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)[unknown] (16 samples, 0.01%)core::fmt::Formatter::pad_integral (112 samples, 0.09%)core::fmt::Formatter::pad_integral::write_prefix (59 samples, 0.04%)core::fmt::Formatter::pad_integral (16 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (19 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (51 samples, 0.04%)rand_chacha::guts::round (18 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (26 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide (14 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (14 samples, 0.01%)std_detect::detect::check_for (14 samples, 0.01%)std_detect::detect::cache::test (14 samples, 0.01%)std_detect::detect::cache::Cache::test (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.06%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.06%)core::cell::BorrowRefMut::new (81 samples, 0.06%)std::sys::pal::unix::time::Timespec::now (164 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (106 samples, 0.08%)tokio::runtime::coop::budget (105 samples, 0.08%)tokio::runtime::coop::with_budget (105 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (96 samples, 0.07%)std::sys::pal::unix::time::Timespec::sub_timespec (35 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock_contended (15 samples, 0.01%)syscall (90 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (21 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run (61 samples, 0.05%)tokio::runtime::context::runtime::enter_runtime (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (61 samples, 0.05%)tokio::runtime::context::set_scheduler (61 samples, 0.05%)std::thread::local::LocalKey<T>::with (61 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (61 samples, 0.05%)tokio::runtime::context::set_scheduler::{{closure}} (61 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::set (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (19 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (17 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (14 samples, 0.01%)core::cell::Cell<T>::get (14 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (22 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (22 samples, 0.02%)tokio::runtime::context::set_current_task_id (22 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (112 samples, 0.09%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (111 samples, 0.08%)tokio::runtime::task::harness::poll_future (125 samples, 0.10%)std::panic::catch_unwind (125 samples, 0.10%)std::panicking::try (125 samples, 0.10%)std::panicking::try::do_call (125 samples, 0.10%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (125 samples, 0.10%)tokio::runtime::task::harness::poll_future::{{closure}} (125 samples, 0.10%)tokio::runtime::task::core::Core<T,S>::poll (125 samples, 0.10%)tokio::runtime::task::raw::poll (157 samples, 0.12%)tokio::runtime::task::harness::Harness<T,S>::poll (135 samples, 0.10%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (135 samples, 0.10%)tokio::runtime::time::Driver::park_internal (15 samples, 0.01%)torrust_tracker::bootstrap::logging::INIT (17 samples, 0.01%)__memcpy_avx512_unaligned_erms (397 samples, 0.30%)_int_free (24 samples, 0.02%)_int_malloc (132 samples, 0.10%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (570 samples, 0.43%)__GI___lll_lock_wait_private (22 samples, 0.02%)futex_wait (14 samples, 0.01%)__memcpy_avx512_unaligned_erms (299 samples, 0.23%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (361 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (41 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (23 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (53 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (14 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (63 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (21 samples, 0.02%)__GI___libc_malloc (18 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (116 samples, 0.09%)alloc::vec::Vec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (116 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (116 samples, 0.09%)alloc::alloc::Global::alloc_impl (116 samples, 0.09%)alloc::alloc::alloc (116 samples, 0.09%)__rdl_alloc (116 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (116 samples, 0.09%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (53 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (53 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (53 samples, 0.04%)_int_malloc (21 samples, 0.02%)[unknown] (36 samples, 0.03%)[unknown] (16 samples, 0.01%)core::mem::zeroed (27 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (27 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (27 samples, 0.02%)core::intrinsics::write_bytes (27 samples, 0.02%)[unknown] (27 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (64 samples, 0.05%)mio::net::udp::UdpSocket::recv_from (49 samples, 0.04%)mio::io_source::IoSource<T>::do_io (49 samples, 0.04%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (49 samples, 0.04%)mio::net::udp::UdpSocket::recv_from::{{closure}} (49 samples, 0.04%)std::net::udp::UdpSocket::recv_from (49 samples, 0.04%)std::sys_common::net::UdpSocket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from_with_flags (49 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (271 samples, 0.21%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (143 samples, 0.11%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (141 samples, 0.11%)tokio::runtime::io::registration::Registration::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (15 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (359 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (346 samples, 0.26%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (39 samples, 0.03%)tokio::task::spawn::spawn (39 samples, 0.03%)tokio::task::spawn::spawn_inner (39 samples, 0.03%)tokio::runtime::context::current::with_current (39 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (39 samples, 0.03%)tokio::runtime::context::current::with_current::{{closure}} (39 samples, 0.03%)core::option::Option<T>::map (39 samples, 0.03%)tokio::task::spawn::spawn_inner::{{closure}} (39 samples, 0.03%)tokio::runtime::scheduler::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (39 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind (34 samples, 0.03%)all (131,301 samples, 100%)tokio-runtime-w (131,061 samples, 99.82%)tokio-runtime-w \ No newline at end of file diff --git a/docs/profiling.md b/docs/profiling.md index 7c28367ce..26e5b786e 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -2,12 +2,85 @@ ## Using flamegraph +### Requirements + +You need to install some dependencies. For Ubuntu you can run: + ```console -TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" cargo flamegraph --bin=profiling -- 60 +sudo apt-get install clang lld +``` + +You also need to uncomment these lines in the cargo [config.toml](./../.cargo/config.toml) file. + +```toml +[target.x86_64-unknown-linux-gnu] +linker = "/usr/bin/clang" +rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] +``` + +Follow the [flamegraph](https://github.com/flamegraph-rs/flamegraph) instructions for installation. + +Apart from running the tracker you will need to run some request if you want to profile services while they are processing requests. + +You can use the aquatic [UDP load test](https://github.com/greatest-ape/aquatic/tree/master/crates/udp_load_test) script. + +### Generate flamegraph + +To generate the graph you will need to: + +1. Build the tracker for profiling. +2. Run the aquatic UDP load test. +3. Run the tracker with flamegraph and profiling configuration. + +```console +cargo build --profile=release-debug --bin=profiling +./target/release/aquatic_udp_load_test -c "load-test-config.toml" +sudo TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" /home/USER/.cargo/bin/flamegraph -- ./target/release-debug/profiling 60 +``` + +__NOTICE__: You need to install the `aquatic_udp_load_test` program. + +The output should be like the following: + +```output +Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +Torrust successfully shutdown. +[ perf record: Woken up 23377 times to write data ] +Warning: +Processed 533730 events and lost 3 chunks! + +Check IO/CPU overload! + +[ perf record: Captured and wrote 5899.806 MB perf.data (373239 samples) ] +writing flamegraph to "flamegraph.svg" ``` ![flamegraph](./media/flamegraph.svg) +__NOTICE__: You need to provide the absolute path for the installed `flamegraph` app if you use sudo. Replace `/home/USER/.cargo/bin/flamegraph` with the location of your installed `flamegraph` app. You can run it without sudo but you can get a warning message like the following: + +```output +WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted, +check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid. + +Samples in kernel functions may not be resolved if a suitable vmlinux +file is not found in the buildid cache or in the vmlinux path. + +Samples in kernel modules won't be resolved at all. + +If some relocation was applied (e.g. kexec) symbols may be misresolved +even with a suitable vmlinux or kallsyms file. + +Couldn't record kernel reference relocation symbol +Symbol resolution may be skewed if relocation was used (e.g. kexec). +Check /proc/kallsyms permission or run as root. +Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +``` + +And some bars in the graph will have the `unknown` label. + +![flamegraph generated without sudo](./media/flamegraph_generated_withput_sudo.svg) + ## Using valgrind and kcachegrind You need to: diff --git a/flamegraph_generated_withput_sudo.svg b/flamegraph_generated_withput_sudo.svg new file mode 100644 index 000000000..84c00ffe3 --- /dev/null +++ b/flamegraph_generated_withput_sudo.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch [unknown] (188 samples, 0.14%)[unknown] (187 samples, 0.14%)[unknown] (186 samples, 0.14%)[unknown] (178 samples, 0.14%)[unknown] (172 samples, 0.13%)[unknown] (158 samples, 0.12%)[unknown] (158 samples, 0.12%)[unknown] (125 samples, 0.10%)[unknown] (102 samples, 0.08%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (41 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (25 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (15 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)profiling (214 samples, 0.16%)clone3 (22 samples, 0.02%)start_thread (22 samples, 0.02%)std::sys::pal::unix::thread::Thread::new::thread_start (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::Handler::new (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::make_handler (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::get_stack (19 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (30 samples, 0.02%)[[vdso]] (93 samples, 0.07%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (143 samples, 0.11%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (31 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (21 samples, 0.02%)[[vdso]] (91 samples, 0.07%)__GI___clock_gettime (14 samples, 0.01%)_int_malloc (53 samples, 0.04%)epoll_wait (254 samples, 0.19%)tokio::runtime::context::with_scheduler (28 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::context::with_scheduler::{{closure}} (14 samples, 0.01%)core::option::Option<T>::map (17 samples, 0.01%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (17 samples, 0.01%)mio::poll::Poll::poll (27 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (27 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (54 samples, 0.04%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (26 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (65 samples, 0.05%)core::sync::atomic::AtomicUsize::fetch_add (65 samples, 0.05%)core::sync::atomic::atomic_add (65 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (31 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (49 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (33 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (93 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Parker::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (75 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (18 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (18 samples, 0.01%)core::cell::BorrowRefMut::new (18 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (96 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (18 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (14 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (220 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (54 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (54 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (240 samples, 0.18%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (265 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park (284 samples, 0.22%)core::option::Option<T>::or_else (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (40 samples, 0.03%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (17 samples, 0.01%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (17 samples, 0.01%)core::num::<impl u32>::wrapping_add (17 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (26 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (129 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (128 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (119 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::pack (39 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run (613 samples, 0.47%)tokio::runtime::context::runtime::enter_runtime (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (613 samples, 0.47%)tokio::runtime::context::set_scheduler (613 samples, 0.47%)std::thread::local::LocalKey<T>::with (613 samples, 0.47%)std::thread::local::LocalKey<T>::try_with (613 samples, 0.47%)tokio::runtime::context::set_scheduler::{{closure}} (613 samples, 0.47%)tokio::runtime::context::scoped::Scoped<T>::set (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Context::run (613 samples, 0.47%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (777 samples, 0.59%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (776 samples, 0.59%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (16 samples, 0.01%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::runtime::context::set_current_task_id (16 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (16 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (835 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (56 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (46 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (897 samples, 0.68%)tokio::runtime::task::harness::poll_future::{{closure}} (897 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::store_output (62 samples, 0.05%)tokio::runtime::task::harness::poll_future (930 samples, 0.71%)std::panic::catch_unwind (927 samples, 0.71%)std::panicking::try (927 samples, 0.71%)std::panicking::try::do_call (925 samples, 0.70%)core::mem::manually_drop::ManuallyDrop<T>::take (28 samples, 0.02%)core::ptr::read (28 samples, 0.02%)tokio::runtime::task::raw::poll (938 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll (934 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (934 samples, 0.71%)core::array::<impl core::default::Default for [T: 32]>::default (26 samples, 0.02%)tokio::runtime::time::Inner::lock (16 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::time::wheel::Wheel::poll (25 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (98 samples, 0.07%)tokio::runtime::time::Driver::park_internal (51 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (24 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (131 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (24 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (14 samples, 0.01%)core::sync::atomic::AtomicU32::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (39 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (34 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (32 samples, 0.02%)[[heap]] (2,361 samples, 1.80%)[..[[vdso]] (313 samples, 0.24%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (41 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (67 samples, 0.05%)alloc::string::String::push_str (18 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (18 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (18 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (18 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (36 samples, 0.03%)core::num::<impl u64>::rotate_left (28 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (60 samples, 0.05%)core::num::<impl u64>::wrapping_add (14 samples, 0.01%)core::hash::sip::u8to64_le (60 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (184 samples, 0.14%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (15 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (19 samples, 0.01%)core::cell::Cell<T>::get (17 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (26 samples, 0.02%)core::ops::function::FnMut::call_mut (21 samples, 0.02%)tokio::runtime::coop::poll_proceed (21 samples, 0.02%)tokio::runtime::context::budget (21 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (21 samples, 0.02%)[unknown] (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (195 samples, 0.15%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (14 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (14 samples, 0.01%)core::result::Result<T,E>::is_err (18 samples, 0.01%)core::result::Result<T,E>::is_ok (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (39 samples, 0.03%)core::sync::atomic::AtomicU32::compare_exchange (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (19 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (245 samples, 0.19%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (26 samples, 0.02%)[[vdso]] (748 samples, 0.57%)[profiling] (34 samples, 0.03%)core::fmt::write (31 samples, 0.02%)__GI___clock_gettime (29 samples, 0.02%)__GI___libc_free (131 samples, 0.10%)arena_for_chunk (20 samples, 0.02%)arena_for_chunk (19 samples, 0.01%)heap_for_ptr (19 samples, 0.01%)heap_max_size (14 samples, 0.01%)__GI___libc_malloc (114 samples, 0.09%)__GI___libc_realloc (15 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)__GI___pthread_disable_asynccancel (66 samples, 0.05%)__GI_getsockname (249 samples, 0.19%)__libc_calloc (15 samples, 0.01%)__libc_recvfrom (23 samples, 0.02%)__libc_sendto (130 samples, 0.10%)__memcmp_evex_movbe (451 samples, 0.34%)__memcpy_avx512_unaligned_erms (426 samples, 0.32%)__memset_avx512_unaligned_erms (215 samples, 0.16%)__posix_memalign (17 samples, 0.01%)_int_free (418 samples, 0.32%)tcache_put (24 samples, 0.02%)_int_malloc (385 samples, 0.29%)_int_memalign (31 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (26 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (15 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (15 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (96 samples, 0.07%)alloc::raw_vec::RawVec<T,A>::grow_amortized (66 samples, 0.05%)core::num::<impl usize>::checked_add (18 samples, 0.01%)core::num::<impl usize>::overflowing_add (18 samples, 0.01%)alloc::raw_vec::finish_grow (74 samples, 0.06%)alloc::sync::Arc<T,A>::drop_slow (16 samples, 0.01%)core::mem::drop (14 samples, 0.01%)core::fmt::Formatter::pad_integral (14 samples, 0.01%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (93 samples, 0.07%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (23 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (188 samples, 0.14%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (30 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_connect::{{closure}}> (22 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_packet::{{closure}}> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}}> (19 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (22 samples, 0.02%)malloc_consolidate (24 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (15 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)rand_chacha::guts::round (66 samples, 0.05%)rand_chacha::guts::refill_wide::impl_avx2 (99 samples, 0.08%)rand_chacha::guts::refill_wide::fn_impl (98 samples, 0.07%)rand_chacha::guts::refill_wide_impl (98 samples, 0.07%)std::io::error::Error::kind (14 samples, 0.01%)[unknown] (42 samples, 0.03%)[unknown] (14 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (490 samples, 0.37%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (211 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (84 samples, 0.06%)tokio::runtime::task::core::Header::get_owner_id (18 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (18 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (20 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (31 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (29 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (108 samples, 0.08%)tokio::runtime::task::core::TaskIdGuard::enter (14 samples, 0.01%)tokio::runtime::context::set_current_task_id (14 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (21 samples, 0.02%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (32 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (54 samples, 0.04%)tokio::runtime::task::raw::drop_abort_handle (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (17 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (22 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (22 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (79 samples, 0.06%)core::slice::<impl [T]>::contains (178 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (40 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (40 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (216 samples, 0.16%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (219 samples, 0.17%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (29 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (29 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (54 samples, 0.04%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (113 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (31 samples, 0.02%)core::sync::atomic::AtomicU64::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (447 samples, 0.34%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (174 samples, 0.13%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (489 samples, 0.37%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (489 samples, 0.37%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run (484 samples, 0.37%)tokio::runtime::context::runtime::enter_runtime (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (484 samples, 0.37%)tokio::runtime::context::set_scheduler (484 samples, 0.37%)std::thread::local::LocalKey<T>::with (484 samples, 0.37%)std::thread::local::LocalKey<T>::try_with (484 samples, 0.37%)tokio::runtime::context::set_scheduler::{{closure}} (484 samples, 0.37%)tokio::runtime::context::scoped::Scoped<T>::set (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Context::run (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (24 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (20 samples, 0.02%)tokio::runtime::task::raw::poll (515 samples, 0.39%)tokio::runtime::task::harness::Harness<T,S>::poll (493 samples, 0.38%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (493 samples, 0.38%)tokio::runtime::task::harness::poll_future (493 samples, 0.38%)std::panic::catch_unwind (493 samples, 0.38%)std::panicking::try (493 samples, 0.38%)std::panicking::try::do_call (493 samples, 0.38%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (493 samples, 0.38%)tokio::runtime::task::harness::poll_future::{{closure}} (493 samples, 0.38%)tokio::runtime::task::core::Core<T,S>::poll (493 samples, 0.38%)tokio::runtime::time::wheel::Wheel::next_expiration (16 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (27 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (15 samples, 0.01%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (44 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (15 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (29 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (74 samples, 0.06%)torrust_tracker::servers::udp::peer_builder::from_request (17 samples, 0.01%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (51 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (54 samples, 0.04%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (58 samples, 0.04%)torrust_tracker::core::Tracker::announce::{{closure}} (70 samples, 0.05%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (113 samples, 0.09%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (175 samples, 0.13%)<T as alloc::string::ToString>::to_string (38 samples, 0.03%)core::option::Option<T>::expect (56 samples, 0.04%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (18 samples, 0.01%)<T as alloc::string::ToString>::to_string (18 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (180 samples, 0.14%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (468 samples, 0.36%)torrust_tracker::servers::udp::logging::log_response (38 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (669 samples, 0.51%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (152 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (147 samples, 0.11%)tokio::net::udp::UdpSocket::send_to::{{closure}} (138 samples, 0.11%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (119 samples, 0.09%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (75 samples, 0.06%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to (39 samples, 0.03%)mio::io_source::IoSource<T>::do_io (39 samples, 0.03%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to::{{closure}} (39 samples, 0.03%)std::net::udp::UdpSocket::send_to (39 samples, 0.03%)std::sys_common::net::UdpSocket::send_to (39 samples, 0.03%)std::sys::pal::unix::cvt (39 samples, 0.03%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (39 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_stats (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (14 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (33 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (33 samples, 0.03%)torrust_tracker_primitives::peer::Peer::is_seeder (33 samples, 0.03%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (75 samples, 0.06%)core::iter::traits::iterator::Iterator::sum (75 samples, 0.06%)<usize as core::iter::traits::accum::Sum>::sum (75 samples, 0.06%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (75 samples, 0.06%)core::iter::traits::iterator::Iterator::fold (75 samples, 0.06%)core::iter::adapters::map::map_fold::{{closure}} (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (104 samples, 0.08%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (24 samples, 0.02%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (215 samples, 0.16%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (198 samples, 0.15%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (89 samples, 0.07%)core::option::Option<T>::is_some_and (32 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (30 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (30 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (26 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (58 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (58 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (58 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (238 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (236 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (208 samples, 0.16%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (208 samples, 0.16%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (282 samples, 0.21%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (67 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (22 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (22 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (22 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (22 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (22 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (43 samples, 0.03%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (43 samples, 0.03%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (43 samples, 0.03%)<u8 as core::slice::cmp::SliceOrd>::compare (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (151 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (145 samples, 0.11%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (137 samples, 0.10%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (137 samples, 0.10%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (266 samples, 0.20%)core::sync::atomic::AtomicU32::load (27 samples, 0.02%)core::sync::atomic::atomic_load (27 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (38 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (37 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (36 samples, 0.03%)tracing::span::Span::log (16 samples, 0.01%)tracing::span::Span::record_all (70 samples, 0.05%)unlink_chunk (139 samples, 0.11%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (30 samples, 0.02%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (30 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (28 samples, 0.02%)[anon] (8,759 samples, 6.67%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (32 samples, 0.02%)uuid::rng::bytes (32 samples, 0.02%)rand::random (32 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (15 samples, 0.01%)_int_free (338 samples, 0.26%)tcache_put (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (22 samples, 0.02%)hashbrown::raw::h2 (14 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (23 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (17 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (25 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (15 samples, 0.01%)[profiling] (545 samples, 0.42%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (32 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (22 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (30 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (28 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (83 samples, 0.06%)alloc::string::String::push_str (57 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (57 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (57 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (57 samples, 0.04%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (20 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (41 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (151 samples, 0.12%)core::hash::sip::u8to64_le (50 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (33 samples, 0.03%)tokio::runtime::context::CONTEXT::__getit (35 samples, 0.03%)core::cell::Cell<T>::get (33 samples, 0.03%)[unknown] (20 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (75 samples, 0.06%)core::ops::function::FnMut::call_mut (66 samples, 0.05%)tokio::runtime::coop::poll_proceed (66 samples, 0.05%)tokio::runtime::context::budget (66 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (66 samples, 0.05%)tokio::runtime::context::budget::{{closure}} (27 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (27 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (110 samples, 0.08%)[unknown] (15 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (27 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (27 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (70 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (55 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (55 samples, 0.04%)[unknown] (33 samples, 0.03%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (214 samples, 0.16%)__memcpy_avx512_unaligned_erms (168 samples, 0.13%)[profiling] (171 samples, 0.13%)binascii::bin2hex (77 samples, 0.06%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (280 samples, 0.21%)[unknown] (317 samples, 0.24%)[[vdso]] (2,648 samples, 2.02%)[..[unknown] (669 samples, 0.51%)[unknown] (396 samples, 0.30%)[unknown] (251 samples, 0.19%)[unknown] (65 samples, 0.05%)[unknown] (30 samples, 0.02%)[unknown] (21 samples, 0.02%)__GI___clock_gettime (56 samples, 0.04%)arena_for_chunk (72 samples, 0.05%)arena_for_chunk (62 samples, 0.05%)heap_for_ptr (49 samples, 0.04%)heap_max_size (28 samples, 0.02%)__GI___libc_free (194 samples, 0.15%)arena_for_chunk (19 samples, 0.01%)checked_request2size (24 samples, 0.02%)__GI___libc_malloc (220 samples, 0.17%)tcache_get (44 samples, 0.03%)__GI___libc_write (25 samples, 0.02%)__GI___libc_write (14 samples, 0.01%)__GI___pthread_disable_asynccancel (97 samples, 0.07%)core::num::<impl u128>::leading_zeros (15 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (72 samples, 0.05%)__floattidf (90 samples, 0.07%)compiler_builtins::float::conv::__floattidf (86 samples, 0.07%)exp_inline (40 samples, 0.03%)log_inline (64 samples, 0.05%)__ieee754_pow_fma (114 samples, 0.09%)__libc_calloc (106 samples, 0.08%)__libc_recvfrom (252 samples, 0.19%)__libc_sendto (133 samples, 0.10%)__memcmp_evex_movbe (137 samples, 0.10%)__memcpy_avx512_unaligned_erms (1,399 samples, 1.07%)__posix_memalign (172 samples, 0.13%)__posix_memalign (80 samples, 0.06%)_mid_memalign (71 samples, 0.05%)arena_for_chunk (14 samples, 0.01%)__pow (18 samples, 0.01%)__vdso_clock_gettime (40 samples, 0.03%)[unknown] (24 samples, 0.02%)_int_free (462 samples, 0.35%)tcache_put (54 samples, 0.04%)[unknown] (14 samples, 0.01%)_int_malloc (508 samples, 0.39%)_int_memalign (68 samples, 0.05%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (78 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::grow_amortized (73 samples, 0.06%)alloc::raw_vec::finish_grow (91 samples, 0.07%)core::result::Result<T,E>::map_err (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Weak<ring::ec::curve25519::ed25519::signing::Ed25519KeyPair,&alloc::alloc::Global>> (16 samples, 0.01%)<alloc::sync::Weak<T,A> as core::ops::drop::Drop>::drop (16 samples, 0.01%)core::mem::drop (18 samples, 0.01%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)alloc_new_heap (49 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (49 samples, 0.04%)core::fmt::Formatter::pad_integral (40 samples, 0.03%)core::fmt::Formatter::pad_integral::write_prefix (19 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (155 samples, 0.12%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (71 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (245 samples, 0.19%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (33 samples, 0.03%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (37 samples, 0.03%)core::str::converts::from_utf8 (33 samples, 0.03%)core::str::validations::run_utf8_validation (20 samples, 0.02%)epoll_wait (31 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (17 samples, 0.01%)rand_chacha::guts::refill_wide (19 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (17 samples, 0.01%)std_detect::detect::check_for (17 samples, 0.01%)std_detect::detect::cache::test (17 samples, 0.01%)std_detect::detect::cache::Cache::test (17 samples, 0.01%)core::sync::atomic::AtomicUsize::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)std::sys::pal::unix::time::Timespec::new (29 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (132 samples, 0.10%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (22 samples, 0.02%)core::cmp::PartialOrd::ge (22 samples, 0.02%)std::sys::pal::unix::time::Timespec::sub_timespec (67 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (18 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr (29 samples, 0.02%)std::sys_common::net::sockname (28 samples, 0.02%)syscall (552 samples, 0.42%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (74 samples, 0.06%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (74 samples, 0.06%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (74 samples, 0.06%)core::cell::Cell<T>::set (74 samples, 0.06%)core::cell::Cell<T>::replace (74 samples, 0.06%)core::mem::replace (74 samples, 0.06%)core::ptr::write (74 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (14 samples, 0.01%)tokio::runtime::context::with_scheduler (176 samples, 0.13%)std::thread::local::LocalKey<T>::try_with (152 samples, 0.12%)tokio::runtime::context::with_scheduler::{{closure}} (151 samples, 0.12%)tokio::runtime::context::scoped::Scoped<T>::with (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (16 samples, 0.01%)core::option::Option<T>::map (19 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (24 samples, 0.02%)mio::poll::Poll::poll (53 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (53 samples, 0.04%)core::result::Result<T,E>::map (28 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (28 samples, 0.02%)tokio::io::ready::Ready::from_mio (14 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (126 samples, 0.10%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (18 samples, 0.01%)[unknown] (51 samples, 0.04%)[unknown] (100 samples, 0.08%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (326 samples, 0.25%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (205 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (77 samples, 0.06%)[unknown] (26 samples, 0.02%)<tokio::util::linked_list::DrainFilter<T,F> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (396 samples, 0.30%)tokio::loom::std::mutex::Mutex<T>::lock (18 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (573 samples, 0.44%)core::sync::atomic::AtomicUsize::fetch_add (566 samples, 0.43%)core::sync::atomic::atomic_add (566 samples, 0.43%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (635 samples, 0.48%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (44 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (21 samples, 0.02%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (21 samples, 0.02%)core::sync::atomic::AtomicUsize::load (21 samples, 0.02%)core::sync::atomic::atomic_load (21 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (32 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (32 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (32 samples, 0.02%)std::sync::poison::Flag::done (32 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (43 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (43 samples, 0.03%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (123 samples, 0.09%)tokio::runtime::task::list::OwnedTasks<S>::remove (117 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (80 samples, 0.06%)tokio::runtime::scheduler::defer::Defer::wake (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (71 samples, 0.05%)std::sync::condvar::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (56 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (37 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (138 samples, 0.11%)tokio::runtime::driver::Driver::park (77 samples, 0.06%)tokio::runtime::driver::TimeDriver::park (77 samples, 0.06%)tokio::runtime::time::Driver::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Parker::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::park::Inner::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (432 samples, 0.33%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (26 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (94 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (94 samples, 0.07%)core::cell::BorrowRefMut::new (94 samples, 0.07%)tokio::runtime::coop::budget (142 samples, 0.11%)tokio::runtime::coop::with_budget (142 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (121 samples, 0.09%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (44 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (208 samples, 0.16%)tokio::runtime::signal::Driver::process (30 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (46 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (35 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage (75 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_xor (76 samples, 0.06%)core::sync::atomic::atomic_xor (76 samples, 0.06%)tokio::runtime::task::state::State::transition_to_complete (79 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::complete (113 samples, 0.09%)tokio::runtime::task::state::State::transition_to_terminal (18 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (28 samples, 0.02%)core::mem::drop (18 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (18 samples, 0.01%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (16 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (16 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (53 samples, 0.04%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (21 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (113 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (15 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (15 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (14 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (82 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (23 samples, 0.02%)tokio::runtime::task::state::State::ref_dec (23 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (34 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (32 samples, 0.02%)tokio::runtime::task::state::State::unset_join_interested (23 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (23 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (43 samples, 0.03%)core::num::<impl u32>::wrapping_add (23 samples, 0.02%)core::option::Option<T>::or_else (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (59 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (45 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (132 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (63 samples, 0.05%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run (290 samples, 0.22%)tokio::runtime::context::runtime::enter_runtime (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (290 samples, 0.22%)tokio::runtime::context::set_scheduler (290 samples, 0.22%)std::thread::local::LocalKey<T>::with (290 samples, 0.22%)std::thread::local::LocalKey<T>::try_with (290 samples, 0.22%)tokio::runtime::context::set_scheduler::{{closure}} (290 samples, 0.22%)tokio::runtime::context::scoped::Scoped<T>::set (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Context::run (290 samples, 0.22%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (327 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (322 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll (333 samples, 0.25%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (342 samples, 0.26%)tokio::runtime::task::harness::poll_future::{{closure}} (342 samples, 0.26%)tokio::runtime::task::harness::poll_future (348 samples, 0.27%)std::panic::catch_unwind (347 samples, 0.26%)std::panicking::try (347 samples, 0.26%)std::panicking::try::do_call (347 samples, 0.26%)core::sync::atomic::AtomicUsize::compare_exchange (18 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (18 samples, 0.01%)tokio::runtime::task::state::State::transition_to_running (47 samples, 0.04%)tokio::runtime::task::state::State::fetch_update_action (47 samples, 0.04%)tokio::runtime::task::state::State::transition_to_running::{{closure}} (19 samples, 0.01%)tokio::runtime::task::raw::poll (427 samples, 0.33%)tokio::runtime::task::harness::Harness<T,S>::poll (408 samples, 0.31%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (407 samples, 0.31%)tokio::runtime::task::state::State::transition_to_idle (17 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (21 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (14 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (72 samples, 0.05%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (23 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (15 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (14 samples, 0.01%)tokio::runtime::time::Driver::park_internal (155 samples, 0.12%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (96 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (35 samples, 0.03%)core::num::<impl usize>::pow (35 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (39 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (33 samples, 0.03%)core::num::<impl usize>::pow (33 samples, 0.03%)tokio::runtime::time::wheel::level::Level::next_expiration (208 samples, 0.16%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.04%)core::num::<impl usize>::pow (48 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (277 samples, 0.21%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (18 samples, 0.01%)core::option::Option<T>::is_some (18 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (50 samples, 0.04%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (37 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (19 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (17 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (17 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (20 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (62 samples, 0.05%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (40 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (27 samples, 0.02%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (17 samples, 0.01%)torrust_tracker::servers::udp::peer_builder::from_request (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (19 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (355 samples, 0.27%)<F as core::future::into_future::IntoFuture>::into_future (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (37 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (25 samples, 0.02%)core::sync::atomic::atomic_add (25 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet (14 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (20 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::result::Result<T,E>::map_err (16 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (136 samples, 0.10%)torrust_tracker::core::Tracker::announce::{{closure}} (173 samples, 0.13%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (267 samples, 0.20%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (30 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (423 samples, 0.32%)core::fmt::Formatter::new (26 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (80 samples, 0.06%)core::fmt::num::imp::fmt_u64 (58 samples, 0.04%)core::intrinsics::copy_nonoverlapping (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (74 samples, 0.06%)core::fmt::num::imp::fmt_u64 (70 samples, 0.05%)<T as alloc::string::ToString>::to_string (207 samples, 0.16%)core::option::Option<T>::expect (19 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (18 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (18 samples, 0.01%)torrust_tracker::servers::udp::logging::map_action_name (25 samples, 0.02%)alloc::str::<impl alloc::borrow::ToOwned for str>::to_owned (14 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (345 samples, 0.26%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (18 samples, 0.01%)core::fmt::num::imp::fmt_u64 (14 samples, 0.01%)<T as alloc::string::ToString>::to_string (35 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,067 samples, 0.81%)torrust_tracker::servers::udp::logging::log_response (72 samples, 0.05%)alloc::vec::from_elem (68 samples, 0.05%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (68 samples, 0.05%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (68 samples, 0.05%)alloc::alloc::Global::alloc_impl (68 samples, 0.05%)alloc::alloc::alloc_zeroed (68 samples, 0.05%)__rdl_alloc_zeroed (68 samples, 0.05%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (68 samples, 0.05%)[unknown] (48 samples, 0.04%)[unknown] (16 samples, 0.01%)[unknown] (28 samples, 0.02%)std::sys::pal::unix::cvt (134 samples, 0.10%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (134 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,908 samples, 1.45%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (504 samples, 0.38%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (382 samples, 0.29%)tokio::net::udp::UdpSocket::send_to::{{closure}} (344 samples, 0.26%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (332 samples, 0.25%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (304 samples, 0.23%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (215 samples, 0.16%)mio::net::udp::UdpSocket::send_to (185 samples, 0.14%)mio::io_source::IoSource<T>::do_io (185 samples, 0.14%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (185 samples, 0.14%)mio::net::udp::UdpSocket::send_to::{{closure}} (185 samples, 0.14%)std::net::udp::UdpSocket::send_to (185 samples, 0.14%)std::sys_common::net::UdpSocket::send_to (169 samples, 0.13%)alloc::vec::Vec<T>::with_capacity (17 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (17 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (104 samples, 0.08%)tokio::net::udp::UdpSocket::ready::{{closure}} (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (190 samples, 0.14%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (49 samples, 0.04%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (28 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (330 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (327 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (92 samples, 0.07%)tokio::task::spawn::spawn (92 samples, 0.07%)tokio::task::spawn::spawn_inner (92 samples, 0.07%)tokio::runtime::context::current::with_current (92 samples, 0.07%)std::thread::local::LocalKey<T>::try_with (92 samples, 0.07%)tokio::runtime::context::current::with_current::{{closure}} (92 samples, 0.07%)core::option::Option<T>::map (92 samples, 0.07%)tokio::task::spawn::spawn_inner::{{closure}} (92 samples, 0.07%)tokio::runtime::scheduler::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (92 samples, 0.07%)tokio::runtime::task::list::OwnedTasks<S>::bind (90 samples, 0.07%)tokio::runtime::task::new_task (89 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (89 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (89 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (34 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (27 samples, 0.02%)alloc::sync::Arc<T>::new (21 samples, 0.02%)alloc::boxed::Box<T>::new (21 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (152 samples, 0.12%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (125 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (88 samples, 0.07%)core::option::Option<T>::is_some_and (18 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (17 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (17 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (17 samples, 0.01%)std::sync::rwlock::RwLock<T>::read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::read (16 samples, 0.01%)tracing::span::Span::log (26 samples, 0.02%)core::fmt::Arguments::new_v1 (15 samples, 0.01%)tracing_core::span::Record::is_empty (34 samples, 0.03%)tracing_core::field::ValueSet::is_empty (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (22 samples, 0.02%)tracing_core::field::ValueSet::is_empty::{{closure}} (18 samples, 0.01%)core::option::Option<T>::is_none (16 samples, 0.01%)core::option::Option<T>::is_some (16 samples, 0.01%)tracing::span::Span::record_all (143 samples, 0.11%)unlink_chunk (185 samples, 0.14%)uuid::builder::Builder::with_variant (48 samples, 0.04%)[unknown] (40 samples, 0.03%)uuid::builder::Builder::from_random_bytes (77 samples, 0.06%)uuid::builder::Builder::with_version (29 samples, 0.02%)[unknown] (24 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)[unknown] (92 samples, 0.07%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (162 samples, 0.12%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (162 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (162 samples, 0.12%)[unknown] (18,233 samples, 13.89%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (270 samples, 0.21%)uuid::rng::bytes (190 samples, 0.14%)rand::random (190 samples, 0.14%)__memcpy_avx512_unaligned_erms (69 samples, 0.05%)_int_free (23 samples, 0.02%)_int_malloc (23 samples, 0.02%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)advise_stack_range (31 samples, 0.02%)__GI_madvise (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (31 samples, 0.02%)syscall (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sync::condvar::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (35 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (56 samples, 0.04%)std::sys::pal::unix::futex::futex_wait (56 samples, 0.04%)syscall (56 samples, 0.04%)[unknown] (56 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (53 samples, 0.04%)[unknown] (52 samples, 0.04%)[unknown] (46 samples, 0.04%)[unknown] (39 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[[vdso]] (26 samples, 0.02%)[[vdso]] (263 samples, 0.20%)__ieee754_pow_fma (26 samples, 0.02%)__pow (314 samples, 0.24%)std::f64::<impl f64>::powf (345 samples, 0.26%)__GI___clock_gettime (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (416 samples, 0.32%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (24 samples, 0.02%)std::time::Instant::now (18 samples, 0.01%)std::sys::pal::unix::time::Instant::now (18 samples, 0.01%)mio::poll::Poll::poll (102 samples, 0.08%)mio::sys::unix::selector::epoll::Selector::select (102 samples, 0.08%)epoll_wait (99 samples, 0.08%)[unknown] (92 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (88 samples, 0.07%)[unknown] (85 samples, 0.06%)[unknown] (84 samples, 0.06%)[unknown] (43 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (125 samples, 0.10%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (125 samples, 0.10%)tokio::runtime::driver::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::driver::TimeDriver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_internal (116 samples, 0.09%)tokio::runtime::io::driver::Driver::turn (116 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (148 samples, 0.11%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (111 samples, 0.08%)alloc::sync::Arc<T,A>::inner (111 samples, 0.08%)core::ptr::non_null::NonNull<T>::as_ref (111 samples, 0.08%)core::sync::atomic::AtomicUsize::compare_exchange (16 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (16 samples, 0.01%)core::bool::<impl bool>::then (88 samples, 0.07%)std::sys::pal::unix::futex::futex_wait (13,339 samples, 10.16%)std::sys::pal::..syscall (13,003 samples, 9.90%)syscall[unknown] (12,895 samples, 9.82%)[unknown][unknown] (12,759 samples, 9.72%)[unknown][unknown] (12,313 samples, 9.38%)[unknown][unknown] (12,032 samples, 9.16%)[unknown][unknown] (11,734 samples, 8.94%)[unknown][unknown] (11,209 samples, 8.54%)[unknown][unknown] (10,265 samples, 7.82%)[unknown][unknown] (9,345 samples, 7.12%)[unknown][unknown] (8,623 samples, 6.57%)[unknown][unknown] (7,744 samples, 5.90%)[unknow..[unknown] (5,922 samples, 4.51%)[unkn..[unknown] (4,459 samples, 3.40%)[un..[unknown] (2,808 samples, 2.14%)[..[unknown] (1,275 samples, 0.97%)[unknown] (1,022 samples, 0.78%)[unknown] (738 samples, 0.56%)[unknown] (607 samples, 0.46%)[unknown] (155 samples, 0.12%)core::result::Result<T,E>::is_err (77 samples, 0.06%)core::result::Result<T,E>::is_ok (77 samples, 0.06%)std::sync::condvar::Condvar::wait (13,429 samples, 10.23%)std::sync::cond..std::sys::sync::condvar::futex::Condvar::wait (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::mutex::futex::Mutex::lock (89 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (13,508 samples, 10.29%)tokio::runtime:..tokio::loom::std::mutex::Mutex<T>::lock (64 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (31 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (30 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (30 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (34 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (34 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (17 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (19 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (33 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::level_range (17 samples, 0.01%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (95 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (41 samples, 0.03%)core::num::<impl usize>::pow (41 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (129 samples, 0.10%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (202 samples, 0.15%)tokio::runtime::time::wheel::Wheel::poll_at (17 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (38 samples, 0.03%)core::option::Option<T>::map (38 samples, 0.03%)core::result::Result<T,E>::map (31 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (31 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (17 samples, 0.01%)[[vdso]] (28 samples, 0.02%)[unknown] (11,031 samples, 8.40%)[unknown][unknown] (10,941 samples, 8.33%)[unknown][unknown] (10,850 samples, 8.26%)[unknown][unknown] (10,691 samples, 8.14%)[unknown][unknown] (10,070 samples, 7.67%)[unknown][unknown] (9,737 samples, 7.42%)[unknown][unknown] (7,659 samples, 5.83%)[unknow..[unknown] (6,530 samples, 4.97%)[unkno..[unknown] (5,633 samples, 4.29%)[unkn..[unknown] (5,055 samples, 3.85%)[unk..[unknown] (4,046 samples, 3.08%)[un..[unknown] (2,911 samples, 2.22%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,226 samples, 0.93%)[unknown] (455 samples, 0.35%)[unknown] (408 samples, 0.31%)[unknown] (249 samples, 0.19%)[unknown] (202 samples, 0.15%)[unknown] (100 samples, 0.08%)mio::poll::Poll::poll (11,328 samples, 8.63%)mio::poll::P..mio::sys::unix::selector::epoll::Selector::select (11,328 samples, 8.63%)mio::sys::un..epoll_wait (11,229 samples, 8.55%)epoll_wait__GI___pthread_disable_asynccancel (50 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (47 samples, 0.04%)tokio::util::bit::Pack::pack (38 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (25 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (23 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (19 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (11,595 samples, 8.83%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (175 samples, 0.13%)__GI___clock_gettime (15 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (18 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (26 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (26 samples, 0.02%)tokio::time::clock::Clock::now (20 samples, 0.02%)tokio::time::clock::now (20 samples, 0.02%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (17 samples, 0.01%)tokio::runtime::time::Driver::park_internal (11,686 samples, 8.90%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (11,957 samples, 9.11%)tokio::runtim..tokio::runtime::driver::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::driver::TimeDriver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::time::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Parker::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::park::Inner::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (25,547 samples, 19.46%)tokio::runtime::scheduler::mul..core::result::Result<T,E>::is_err (14 samples, 0.01%)core::result::Result<T,E>::is_ok (14 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (45 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (45 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (81 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::lock (73 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (122 samples, 0.09%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (241 samples, 0.18%)<T as core::slice::cmp::SliceContains>::slice_contains (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (75 samples, 0.06%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (75 samples, 0.06%)core::sync::atomic::AtomicU32::compare_exchange (20 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (283 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (24 samples, 0.02%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (33 samples, 0.03%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (33 samples, 0.03%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (98 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (401 samples, 0.31%)alloc::vec::Vec<T,A>::push (14 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (15 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)core::result::Result<T,E>::is_err (15 samples, 0.01%)core::result::Result<T,E>::is_ok (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (22 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (22 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (62 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (14 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (21 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (17 samples, 0.01%)alloc::sync::Arc<T,A>::inner (17 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (17 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (68 samples, 0.05%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (33 samples, 0.03%)core::sync::atomic::AtomicU64::load (16 samples, 0.01%)core::sync::atomic::atomic_load (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::park (26,672 samples, 20.31%)tokio::runtime::scheduler::multi..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (272 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::has_tasks (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::has_tasks (24 samples, 0.02%)tokio::runtime::context::budget (18 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (18 samples, 0.01%)syscall (61 samples, 0.05%)__memcpy_avx512_unaligned_erms (172 samples, 0.13%)__memcpy_avx512_unaligned_erms (224 samples, 0.17%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (228 samples, 0.17%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (228 samples, 0.17%)std::panic::catch_unwind (415 samples, 0.32%)std::panicking::try (415 samples, 0.32%)std::panicking::try::do_call (415 samples, 0.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (415 samples, 0.32%)core::ops::function::FnOnce::call_once (415 samples, 0.32%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::set_stage (410 samples, 0.31%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (27 samples, 0.02%)core::result::Result<T,E>::is_err (43 samples, 0.03%)core::result::Result<T,E>::is_ok (43 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::complete (570 samples, 0.43%)tokio::runtime::task::harness::Harness<T,S>::release (155 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (152 samples, 0.12%)tokio::runtime::task::list::OwnedTasks<S>::remove (152 samples, 0.12%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (103 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (65 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (54 samples, 0.04%)std::io::stdio::stderr::INSTANCE (17 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (70 samples, 0.05%)__memcpy_avx512_unaligned_erms (42 samples, 0.03%)core::cmp::Ord::min (22 samples, 0.02%)core::cmp::min_by (22 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (27 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (30 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (24 samples, 0.02%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (44 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (20 samples, 0.02%)byteorder::io::ReadBytesExt::read_i32 (46 samples, 0.04%)core::cmp::Ord::min (14 samples, 0.01%)core::cmp::min_by (14 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (19 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (24 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (24 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (349 samples, 0.27%)__GI___lll_lock_wake_private (148 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (137 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (111 samples, 0.08%)[unknown] (98 samples, 0.07%)[unknown] (42 samples, 0.03%)[unknown] (30 samples, 0.02%)__GI___lll_lock_wait_private (553 samples, 0.42%)futex_wait (541 samples, 0.41%)[unknown] (536 samples, 0.41%)[unknown] (531 samples, 0.40%)[unknown] (524 samples, 0.40%)[unknown] (515 samples, 0.39%)[unknown] (498 samples, 0.38%)[unknown] (470 samples, 0.36%)[unknown] (435 samples, 0.33%)[unknown] (350 samples, 0.27%)[unknown] (327 samples, 0.25%)[unknown] (290 samples, 0.22%)[unknown] (222 samples, 0.17%)[unknown] (160 samples, 0.12%)[unknown] (104 samples, 0.08%)[unknown] (33 samples, 0.03%)[unknown] (25 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (703 samples, 0.54%)__GI___libc_free (866 samples, 0.66%)tracing::span::Span::record_all (30 samples, 0.02%)unlink_chunk (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (899 samples, 0.68%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (899 samples, 0.68%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (899 samples, 0.68%)alloc::alloc::dealloc (899 samples, 0.68%)__rdl_dealloc (899 samples, 0.68%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (899 samples, 0.68%)core::result::Result<T,E>::expect (91 samples, 0.07%)core::result::Result<T,E>::map_err (28 samples, 0.02%)[[vdso]] (28 samples, 0.02%)__GI___clock_gettime (47 samples, 0.04%)std::time::Instant::elapsed (67 samples, 0.05%)std::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (53 samples, 0.04%)std::sys::pal::unix::cvt (23 samples, 0.02%)__GI_getsockname (3,792 samples, 2.89%)__..[unknown] (3,714 samples, 2.83%)[u..[unknown] (3,661 samples, 2.79%)[u..[unknown] (3,557 samples, 2.71%)[u..[unknown] (3,416 samples, 2.60%)[u..[unknown] (2,695 samples, 2.05%)[..[unknown] (2,063 samples, 1.57%)[unknown] (891 samples, 0.68%)[unknown] (270 samples, 0.21%)[unknown] (99 samples, 0.08%)[unknown] (94 samples, 0.07%)[unknown] (84 samples, 0.06%)[unknown] (77 samples, 0.06%)[unknown] (25 samples, 0.02%)[unknown] (16 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr::{{closure}} (3,800 samples, 2.89%)st..tokio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)to..mio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)mi..std::net::tcp::TcpListener::local_addr (3,838 samples, 2.92%)st..std::sys_common::net::TcpListener::socket_addr (3,838 samples, 2.92%)st..std::sys_common::net::sockname (3,835 samples, 2.92%)st..[[vdso]] (60 samples, 0.05%)rand_chacha::guts::ChaCha::pos64 (168 samples, 0.13%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (26 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (29 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (18 samples, 0.01%)rand_chacha::guts::round (118 samples, 0.09%)rand_chacha::guts::refill_wide::impl_avx2 (312 samples, 0.24%)rand_chacha::guts::refill_wide::fn_impl (312 samples, 0.24%)rand_chacha::guts::refill_wide_impl (312 samples, 0.24%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (384 samples, 0.29%)rand_chacha::guts::ChaCha::refill4 (384 samples, 0.29%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (432 samples, 0.33%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (432 samples, 0.33%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)rand_core::block::BlockRng<R>::generate_and_set (392 samples, 0.30%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (392 samples, 0.30%)torrust_tracker::servers::udp::handlers::RequestId::make (440 samples, 0.34%)uuid::v4::<impl uuid::Uuid>::new_v4 (436 samples, 0.33%)uuid::rng::bytes (435 samples, 0.33%)rand::random (435 samples, 0.33%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (22 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (16 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (16 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.01%)core::iter::traits::iterator::Iterator::find (15 samples, 0.01%)core::iter::traits::iterator::Iterator::try_fold (15 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (31 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)core::slice::iter::Iter<T>::post_inc_start (14 samples, 0.01%)core::ptr::non_null::NonNull<T>::add (14 samples, 0.01%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (26 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (165 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (165 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (165 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (165 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (165 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (339 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (308 samples, 0.23%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (308 samples, 0.23%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (342 samples, 0.26%)std::sys::sync::rwlock::futex::RwLock::spin_read (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (28 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (436 samples, 0.33%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (397 samples, 0.30%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (29 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (29 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (29 samples, 0.02%)__memcmp_evex_movbe (31 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (52 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (52 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (52 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (52 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (52 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (103 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (102 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (96 samples, 0.07%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (96 samples, 0.07%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (72 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)core::slice::iter::Iter<T>::post_inc_start (32 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (32 samples, 0.02%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (81 samples, 0.06%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (271 samples, 0.21%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (271 samples, 0.21%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (271 samples, 0.21%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (271 samples, 0.21%)<u8 as core::slice::cmp::SliceOrd>::compare (271 samples, 0.21%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (610 samples, 0.46%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (566 samples, 0.43%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (566 samples, 0.43%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (18 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (616 samples, 0.47%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::KV>::split (15 samples, 0.01%)alloc::collections::btree::map::entry::Entry<K,V,A>::or_insert (46 samples, 0.04%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (45 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (40 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (29 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (120 samples, 0.09%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (118 samples, 0.09%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::Leaf>::new_leaf (118 samples, 0.09%)alloc::collections::btree::node::LeafNode<K,V>::new (118 samples, 0.09%)alloc::boxed::Box<T,A>::new_uninit_in (118 samples, 0.09%)alloc::boxed::Box<T,A>::try_new_uninit_in (118 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (118 samples, 0.09%)alloc::alloc::Global::alloc_impl (118 samples, 0.09%)alloc::alloc::alloc (118 samples, 0.09%)__rdl_alloc (118 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (118 samples, 0.09%)__GI___libc_malloc (118 samples, 0.09%)_int_malloc (107 samples, 0.08%)_int_malloc (28 samples, 0.02%)__GI___libc_malloc (32 samples, 0.02%)__rdl_alloc (36 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (36 samples, 0.03%)alloc::sync::Arc<T>::new (42 samples, 0.03%)alloc::boxed::Box<T>::new (42 samples, 0.03%)alloc::alloc::exchange_malloc (39 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (39 samples, 0.03%)alloc::alloc::Global::alloc_impl (39 samples, 0.03%)alloc::alloc::alloc (39 samples, 0.03%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)__GI___libc_free (39 samples, 0.03%)_int_free (37 samples, 0.03%)get_max_fast (16 samples, 0.01%)core::option::Option<T>::is_some_and (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (50 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (50 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (290 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (284 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (255 samples, 0.19%)std::sys::sync::rwlock::futex::RwLock::spin_read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::spin_until (16 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (21 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (21 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (1,147 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (1,144 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents_mut (32 samples, 0.02%)std::sync::rwlock::RwLock<T>::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write_contended (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_write (28 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (28 samples, 0.02%)torrust_tracker::core::Tracker::announce::{{closure}} (1,597 samples, 1.22%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (29 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (24 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (25 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (25 samples, 0.02%)core::hash::Hasher::write_u32 (25 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (36 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (37 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (37 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (64 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (39 samples, 0.03%)core::hash::Hasher::write_u64 (39 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (122 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (58 samples, 0.04%)core::hash::Hasher::write_u64 (58 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (57 samples, 0.04%)core::hash::sip::u8to64_le (23 samples, 0.02%)core::hash::Hasher::write_length_prefix (27 samples, 0.02%)core::hash::Hasher::write_usize (27 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (16 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (246 samples, 0.19%)core::array::<impl core::hash::Hash for [T: N]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (62 samples, 0.05%)core::hash::sip::u8to64_le (17 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (285 samples, 0.22%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (36 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (24 samples, 0.02%)std::time::SystemTime::now (19 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (19 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (1,954 samples, 1.49%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (24 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (18 samples, 0.01%)<core::time::Nanoseconds as core::hash::Hash>::hash (20 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (20 samples, 0.02%)core::hash::Hasher::write_u32 (20 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (44 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (65 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (45 samples, 0.03%)core::hash::Hasher::write_u64 (45 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (45 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (45 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (105 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (40 samples, 0.03%)core::hash::Hasher::write_u64 (40 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (39 samples, 0.03%)core::hash::Hasher::write_length_prefix (34 samples, 0.03%)core::hash::Hasher::write_usize (34 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (33 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (231 samples, 0.18%)core::array::<impl core::hash::Hash for [T: N]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (61 samples, 0.05%)core::hash::sip::u8to64_le (16 samples, 0.01%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (270 samples, 0.21%)torrust_tracker::servers::udp::connection_cookie::make (268 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (35 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (31 samples, 0.02%)std::time::SystemTime::now (26 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (26 samples, 0.02%)torrust_tracker::core::ScrapeData::add_file (19 samples, 0.01%)std::collections::hash::map::HashMap<K,V,S>::insert (19 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (19 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (16 samples, 0.01%)hashbrown::raw::RawTable<T,A>::reserve (16 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (17 samples, 0.01%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (17 samples, 0.01%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (17 samples, 0.01%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (17 samples, 0.01%)<u8 as core::slice::cmp::SliceOrd>::compare (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (2,336 samples, 1.78%)t..torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (101 samples, 0.08%)torrust_tracker::core::Tracker::scrape::{{closure}} (90 samples, 0.07%)torrust_tracker::core::Tracker::get_swarm_metadata (68 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (64 samples, 0.05%)alloc::raw_vec::finish_grow (19 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (21 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (23 samples, 0.02%)alloc::string::String::push_str (23 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (23 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (23 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (23 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (85 samples, 0.06%)core::fmt::num::imp::fmt_u64 (78 samples, 0.06%)<alloc::string::String as core::fmt::Write>::write_str (15 samples, 0.01%)alloc::string::String::push_str (15 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (15 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (15 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (37 samples, 0.03%)core::fmt::num::imp::fmt_u64 (36 samples, 0.03%)<T as alloc::string::ToString>::to_string (141 samples, 0.11%)core::option::Option<T>::expect (34 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (28 samples, 0.02%)alloc::alloc::dealloc (28 samples, 0.02%)__rdl_dealloc (28 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (28 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (55 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (55 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::current_memory (20 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (16 samples, 0.01%)binascii::bin2hex (51 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (16 samples, 0.01%)core::fmt::write (25 samples, 0.02%)core::fmt::rt::Argument::fmt (15 samples, 0.01%)core::fmt::Formatter::write_fmt (87 samples, 0.07%)core::str::converts::from_utf8 (43 samples, 0.03%)core::str::validations::run_utf8_validation (37 samples, 0.03%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (161 samples, 0.12%)<T as alloc::string::ToString>::to_string (161 samples, 0.12%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (156 samples, 0.12%)torrust_tracker::servers::udp::logging::log_request (479 samples, 0.36%)[[vdso]] (51 samples, 0.04%)alloc::raw_vec::finish_grow (56 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.05%)<alloc::string::String as core::fmt::Write>::write_str (65 samples, 0.05%)alloc::string::String::push_str (65 samples, 0.05%)alloc::vec::Vec<T,A>::extend_from_slice (65 samples, 0.05%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (65 samples, 0.05%)alloc::vec::Vec<T,A>::append_elements (65 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (114 samples, 0.09%)core::fmt::num::imp::fmt_u64 (110 samples, 0.08%)<T as alloc::string::ToString>::to_string (132 samples, 0.10%)core::option::Option<T>::expect (20 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (22 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (22 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (8,883 samples, 6.77%)torrust_t..torrust_tracker::servers::udp::logging::log_response (238 samples, 0.18%)__GI___lll_lock_wait_private (14 samples, 0.01%)futex_wait (14 samples, 0.01%)__GI___lll_lock_wake_private (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)_int_malloc (191 samples, 0.15%)__libc_calloc (238 samples, 0.18%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)alloc::vec::from_elem (316 samples, 0.24%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (316 samples, 0.24%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (312 samples, 0.24%)alloc::alloc::Global::alloc_impl (312 samples, 0.24%)alloc::alloc::alloc_zeroed (312 samples, 0.24%)__rdl_alloc_zeroed (312 samples, 0.24%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (312 samples, 0.24%)byteorder::ByteOrder::write_i32 (18 samples, 0.01%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (18 samples, 0.01%)core::num::<impl u32>::to_be_bytes (18 samples, 0.01%)core::num::<impl u32>::to_be (18 samples, 0.01%)core::num::<impl u32>::swap_bytes (18 samples, 0.01%)byteorder::io::WriteBytesExt::write_i32 (89 samples, 0.07%)std::io::Write::write_all (71 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (71 samples, 0.05%)std::io::cursor::vec_write (71 samples, 0.05%)std::io::cursor::vec_write_unchecked (51 samples, 0.04%)core::ptr::mut_ptr::<impl *mut T>::copy_from (51 samples, 0.04%)core::intrinsics::copy (51 samples, 0.04%)aquatic_udp_protocol::response::Response::write (227 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (28 samples, 0.02%)std::io::Write::write_all (21 samples, 0.02%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (21 samples, 0.02%)std::io::cursor::vec_write (21 samples, 0.02%)std::io::cursor::vec_write_unchecked (21 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::copy_from (21 samples, 0.02%)core::intrinsics::copy (21 samples, 0.02%)__GI___lll_lock_wake_private (17 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (136 samples, 0.10%)__GI___libc_free (206 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (211 samples, 0.16%)alloc::alloc::dealloc (211 samples, 0.16%)__rdl_dealloc (211 samples, 0.16%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (211 samples, 0.16%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (224 samples, 0.17%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (224 samples, 0.17%)std::io::cursor::Cursor<T>::new (56 samples, 0.04%)tokio::io::ready::Ready::intersection (23 samples, 0.02%)tokio::io::ready::Ready::from_interest (23 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (83 samples, 0.06%)[unknown] (32,674 samples, 24.88%)[unknown][unknown] (32,402 samples, 24.68%)[unknown][unknown] (32,272 samples, 24.58%)[unknown][unknown] (32,215 samples, 24.54%)[unknown][unknown] (31,174 samples, 23.74%)[unknown][unknown] (30,794 samples, 23.45%)[unknown][unknown] (30,036 samples, 22.88%)[unknown][unknown] (28,639 samples, 21.81%)[unknown][unknown] (27,908 samples, 21.25%)[unknown][unknown] (26,013 samples, 19.81%)[unknown][unknown] (23,181 samples, 17.65%)[unknown][unknown] (19,559 samples, 14.90%)[unknown][unknown] (18,052 samples, 13.75%)[unknown][unknown] (15,794 samples, 12.03%)[unknown][unknown] (14,740 samples, 11.23%)[unknown][unknown] (12,486 samples, 9.51%)[unknown][unknown] (11,317 samples, 8.62%)[unknown][unknown] (10,725 samples, 8.17%)[unknown][unknown] (10,017 samples, 7.63%)[unknown][unknown] (9,713 samples, 7.40%)[unknown][unknown] (8,432 samples, 6.42%)[unknown][unknown] (8,062 samples, 6.14%)[unknown][unknown] (6,973 samples, 5.31%)[unknow..[unknown] (5,328 samples, 4.06%)[unk..[unknown] (4,352 samples, 3.31%)[un..[unknown] (3,786 samples, 2.88%)[u..[unknown] (3,659 samples, 2.79%)[u..[unknown] (3,276 samples, 2.50%)[u..[unknown] (2,417 samples, 1.84%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,610 samples, 1.23%)[unknown] (422 samples, 0.32%)[unknown] (84 samples, 0.06%)[unknown] (69 samples, 0.05%)__GI___pthread_disable_asynccancel (67 samples, 0.05%)__libc_sendto (32,896 samples, 25.05%)__libc_sendtotokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (32,981 samples, 25.12%)tokio::net::udp::UdpSocket::send_to_addr..mio::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (32,981 samples, 25.12%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (32,981 samples, 25.12%)mio::sys::unix::stateless_io_source::IoS..mio::net::udp::UdpSocket::send_to::{{closure}} (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_to::{{clo..std::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (32,981 samples, 25.12%)std::sys_common::net::UdpSocket::send_tostd::sys::pal::unix::cvt (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (44,349 samples, 33.78%)torrust_tracker::servers::udp::server::Udp::process_req..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (43,412 samples, 33.06%)torrust_tracker::servers::udp::server::Udp::process_va..torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (34,320 samples, 26.14%)torrust_tracker::servers::udp::server::Udp..torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (33,360 samples, 25.41%)torrust_tracker::servers::udp::server::Ud..tokio::net::udp::UdpSocket::send_to::{{closure}} (33,227 samples, 25.31%)tokio::net::udp::UdpSocket::send_to::{{c..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (33,142 samples, 25.24%)tokio::net::udp::UdpSocket::send_to_addr..tokio::runtime::io::registration::Registration::async_io::{{closure}} (33,115 samples, 25.22%)tokio::runtime::io::registration::Regist..tokio::runtime::io::registration::Registration::readiness::{{closure}} (28 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (15 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (14 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (15 samples, 0.01%)core::sync::atomic::atomic_add (15 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (135 samples, 0.10%)__GI___libc_free (147 samples, 0.11%)syscall (22 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (15 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (24 samples, 0.02%)core::mem::drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (262 samples, 0.20%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (262 samples, 0.20%)tokio::runtime::task::raw::RawTask::drop_abort_handle (256 samples, 0.19%)tokio::runtime::task::raw::drop_abort_handle (59 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (50 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (50 samples, 0.04%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (16 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (47 samples, 0.04%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (47 samples, 0.04%)tokio::runtime::task::state::State::drop_join_handle_fast (19 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (19 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (14 samples, 0.01%)<ringbuf::ring_buffer::shared::SharedRb<T,C> as ringbuf::ring_buffer::base::RbBase<T>>::head (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (29 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (29 samples, 0.02%)ringbuf::ring_buffer::rb::Rb::pop (50 samples, 0.04%)ringbuf::consumer::Consumer<T,R>::pop (50 samples, 0.04%)ringbuf::producer::Producer<T,R>::advance (23 samples, 0.02%)ringbuf::ring_buffer::base::RbWrite::advance_tail (23 samples, 0.02%)core::num::nonzero::<impl core::ops::arith::Rem<core::num::nonzero::NonZero<usize>> for usize>::rem (19 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (107 samples, 0.08%)ringbuf::ring_buffer::rb::Rb::push (43 samples, 0.03%)ringbuf::producer::Producer<T,R>::push (43 samples, 0.03%)tokio::runtime::task::abort::AbortHandle::is_finished (84 samples, 0.06%)tokio::runtime::task::state::Snapshot::is_complete (84 samples, 0.06%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (17 samples, 0.01%)tokio::runtime::task::raw::RawTask::ref_inc (17 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (17 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (14 samples, 0.01%)core::sync::atomic::atomic_add (14 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)malloc_consolidate (95 samples, 0.07%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (26 samples, 0.02%)_int_malloc (282 samples, 0.21%)__GI___libc_malloc (323 samples, 0.25%)alloc::vec::Vec<T>::with_capacity (326 samples, 0.25%)alloc::vec::Vec<T,A>::with_capacity_in (326 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (324 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (324 samples, 0.25%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (324 samples, 0.25%)alloc::alloc::Global::alloc_impl (324 samples, 0.25%)alloc::alloc::alloc (324 samples, 0.25%)__rdl_alloc (324 samples, 0.25%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (324 samples, 0.25%)tokio::io::ready::Ready::intersection (24 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (199 samples, 0.15%)tokio::util::bit::Pack::unpack (16 samples, 0.01%)tokio::util::bit::unpack (16 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (19 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (16 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (222 samples, 0.17%)tokio::net::udp::UdpSocket::ready::{{closure}} (222 samples, 0.17%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (50 samples, 0.04%)std::io::error::repr_bitpacked::Repr::data (14 samples, 0.01%)std::io::error::repr_bitpacked::decode_repr (14 samples, 0.01%)std::io::error::Error::kind (16 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)[unknown] (8,756 samples, 6.67%)[unknown][unknown] (8,685 samples, 6.61%)[unknown][unknown] (8,574 samples, 6.53%)[unknown][unknown] (8,415 samples, 6.41%)[unknown][unknown] (7,686 samples, 5.85%)[unknow..[unknown] (7,239 samples, 5.51%)[unknow..[unknown] (6,566 samples, 5.00%)[unkno..[unknown] (5,304 samples, 4.04%)[unk..[unknown] (4,008 samples, 3.05%)[un..[unknown] (3,571 samples, 2.72%)[u..[unknown] (2,375 samples, 1.81%)[..[unknown] (1,844 samples, 1.40%)[unknown] (1,030 samples, 0.78%)[unknown] (344 samples, 0.26%)[unknown] (113 samples, 0.09%)__libc_recvfrom (8,903 samples, 6.78%)__libc_re..__GI___pthread_disable_asynccancel (22 samples, 0.02%)std::sys::pal::unix::cvt (20 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (9,005 samples, 6.86%)tokio::ne..mio::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)mio::net:..mio::io_source::IoSource<T>::do_io (8,964 samples, 6.83%)mio::io_s..mio::sys::unix::stateless_io_source::IoSourceState::do_io (8,964 samples, 6.83%)mio::sys:..mio::net::udp::UdpSocket::recv_from::{{closure}} (8,964 samples, 6.83%)mio::net:..std::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)std::net:..std::sys_common::net::UdpSocket::recv_from (8,964 samples, 6.83%)std::sys_..std::sys::pal::unix::net::Socket::recv_from (8,964 samples, 6.83%)std::sys:..std::sys::pal::unix::net::Socket::recv_from_with_flags (8,964 samples, 6.83%)std::sys:..std::sys_common::net::sockaddr_to_addr (23 samples, 0.02%)tokio::runtime::io::registration::Registration::clear_readiness (18 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (32 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (9,967 samples, 7.59%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (9,291 samples, 7.08%)tokio::ne..tokio::runtime::io::registration::Registration::async_io::{{closure}} (9,287 samples, 7.07%)tokio::ru..tokio::runtime::io::registration::Registration::readiness::{{closure}} (45 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (41 samples, 0.03%)__memcpy_avx512_unaligned_erms (424 samples, 0.32%)__memcpy_avx512_unaligned_erms (493 samples, 0.38%)__memcpy_avx512_unaligned_erms (298 samples, 0.23%)syscall (1,105 samples, 0.84%)[unknown] (1,095 samples, 0.83%)[unknown] (1,091 samples, 0.83%)[unknown] (1,049 samples, 0.80%)[unknown] (998 samples, 0.76%)[unknown] (907 samples, 0.69%)[unknown] (710 samples, 0.54%)[unknown] (635 samples, 0.48%)[unknown] (538 samples, 0.41%)[unknown] (358 samples, 0.27%)[unknown] (256 samples, 0.19%)[unknown] (153 samples, 0.12%)[unknown] (96 samples, 0.07%)[unknown] (81 samples, 0.06%)tokio::runtime::context::with_scheduler (36 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (31 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (27 samples, 0.02%)tokio::runtime::context::scoped::Scoped<T>::with (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (340 samples, 0.26%)core::sync::atomic::atomic_add (340 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (354 samples, 0.27%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (367 samples, 0.28%)[unknown] (95 samples, 0.07%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (90 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (73 samples, 0.06%)[unknown] (63 samples, 0.05%)[unknown] (44 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (35 samples, 0.03%)[unknown] (30 samples, 0.02%)[unknown] (22 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)tokio::runtime::driver::Handle::unpark (99 samples, 0.08%)tokio::runtime::driver::IoHandle::unpark (99 samples, 0.08%)tokio::runtime::io::driver::Handle::unpark (99 samples, 0.08%)mio::waker::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::fdbased::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::eventfd::WakerInternal::wake (99 samples, 0.08%)<&std::fs::File as std::io::Write>::write (99 samples, 0.08%)std::sys::pal::unix::fs::File::write (99 samples, 0.08%)std::sys::pal::unix::fd::FileDesc::write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)tokio::runtime::context::with_scheduler (1,615 samples, 1.23%)std::thread::local::LocalKey<T>::try_with (1,613 samples, 1.23%)tokio::runtime::context::with_scheduler::{{closure}} (1,612 samples, 1.23%)tokio::runtime::context::scoped::Scoped<T>::with (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (1,647 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (1,646 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::with_current (1,646 samples, 1.25%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (23 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (18 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (104 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (60 samples, 0.05%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (57 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (38 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (162 samples, 0.12%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)__GI___lll_lock_wake_private (127 samples, 0.10%)[unknown] (125 samples, 0.10%)[unknown] (124 samples, 0.09%)[unknown] (119 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (106 samples, 0.08%)[unknown] (87 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (51 samples, 0.04%)[unknown] (27 samples, 0.02%)[unknown] (19 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (77 samples, 0.06%)[unknown] (1,207 samples, 0.92%)[unknown] (1,146 samples, 0.87%)[unknown] (1,126 samples, 0.86%)[unknown] (1,091 samples, 0.83%)[unknown] (1,046 samples, 0.80%)[unknown] (962 samples, 0.73%)[unknown] (914 samples, 0.70%)[unknown] (848 samples, 0.65%)[unknown] (774 samples, 0.59%)[unknown] (580 samples, 0.44%)[unknown] (456 samples, 0.35%)[unknown] (305 samples, 0.23%)[unknown] (85 samples, 0.06%)__GI_mprotect (2,474 samples, 1.88%)_..[unknown] (2,457 samples, 1.87%)[..[unknown] (2,440 samples, 1.86%)[..[unknown] (2,436 samples, 1.86%)[..[unknown] (2,435 samples, 1.85%)[..[unknown] (2,360 samples, 1.80%)[..[unknown] (2,203 samples, 1.68%)[unknown] (1,995 samples, 1.52%)[unknown] (1,709 samples, 1.30%)[unknown] (1,524 samples, 1.16%)[unknown] (1,193 samples, 0.91%)[unknown] (865 samples, 0.66%)[unknown] (539 samples, 0.41%)[unknown] (259 samples, 0.20%)[unknown] (80 samples, 0.06%)[unknown] (29 samples, 0.02%)sysmalloc (3,786 samples, 2.88%)sy..grow_heap (2,509 samples, 1.91%)g.._int_malloc (4,038 samples, 3.08%)_in..unlink_chunk (31 samples, 0.02%)alloc::alloc::exchange_malloc (4,335 samples, 3.30%)all..<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,329 samples, 3.30%)<al..alloc::alloc::Global::alloc_impl (4,329 samples, 3.30%)all..alloc::alloc::alloc (4,329 samples, 3.30%)all..__rdl_alloc (4,329 samples, 3.30%)__r..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,329 samples, 3.30%)std..std::sys::pal::unix::alloc::aligned_malloc (4,329 samples, 3.30%)std..__posix_memalign (4,297 samples, 3.27%)__p..__posix_memalign (4,297 samples, 3.27%)__p.._mid_memalign (4,297 samples, 3.27%)_mi.._int_memalign (4,149 samples, 3.16%)_in..sysmalloc (18 samples, 0.01%)core::option::Option<T>::map (6,666 samples, 5.08%)core::..tokio::task::spawn::spawn_inner::{{closure}} (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::Handle::spawn (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (6,664 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (6,661 samples, 5.07%)tokio:..tokio::runtime::task::list::OwnedTasks<S>::bind (4,692 samples, 3.57%)toki..tokio::runtime::task::new_task (4,579 samples, 3.49%)tok..tokio::runtime::task::raw::RawTask::new (4,579 samples, 3.49%)tok..tokio::runtime::task::core::Cell<T,S>::new (4,579 samples, 3.49%)tok..alloc::boxed::Box<T>::new (4,389 samples, 3.34%)all..tokio::runtime::context::current::with_current (7,636 samples, 5.82%)tokio::..std::thread::local::LocalKey<T>::try_with (7,635 samples, 5.81%)std::th..tokio::runtime::context::current::with_current::{{closure}} (7,188 samples, 5.47%)tokio::..tokio::task::spawn::spawn (7,670 samples, 5.84%)tokio::..tokio::task::spawn::spawn_inner (7,670 samples, 5.84%)tokio::..tokio::runtime::task::id::Id::next (24 samples, 0.02%)core::sync::atomic::AtomicU64::fetch_add (24 samples, 0.02%)core::sync::atomic::atomic_add (24 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (62,691 samples, 47.75%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (62,691 samples, 47.75%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (18,228 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (18,226 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (7,679 samples, 5.85%)torrust..__memcpy_avx512_unaligned_erms (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (407 samples, 0.31%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::poll (63,150 samples, 48.10%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (459 samples, 0.35%)tokio::runtime::task::core::Core<T,S>::set_stage (459 samples, 0.35%)__memcpy_avx512_unaligned_erms (16 samples, 0.01%)__memcpy_avx512_unaligned_erms (398 samples, 0.30%)__memcpy_avx512_unaligned_erms (325 samples, 0.25%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage (731 samples, 0.56%)tokio::runtime::task::harness::poll_future (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (63,908 samples, 48.67%)std::panic::catch_unwindstd::panicking::try (63,908 samples, 48.67%)std::panicking::trystd::panicking::try::do_call (63,908 samples, 48.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (63,908 samples, 48.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()..tokio::runtime::task::harness::poll_future::{{closure}} (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (758 samples, 0.58%)tokio::runtime::coop::budget (65,027 samples, 49.53%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (65,027 samples, 49.53%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (65,009 samples, 49.51%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (65,003 samples, 49.51%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (65,003 samples, 49.51%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (64,538 samples, 49.15%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (64,493 samples, 49.12%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (63,919 samples, 48.68%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (93 samples, 0.07%)syscall (2,486 samples, 1.89%)s..[unknown] (2,424 samples, 1.85%)[..[unknown] (2,416 samples, 1.84%)[..[unknown] (2,130 samples, 1.62%)[unknown] (2,013 samples, 1.53%)[unknown] (1,951 samples, 1.49%)[unknown] (1,589 samples, 1.21%)[unknown] (1,415 samples, 1.08%)[unknown] (1,217 samples, 0.93%)[unknown] (820 samples, 0.62%)[unknown] (564 samples, 0.43%)[unknown] (360 samples, 0.27%)[unknown] (244 samples, 0.19%)[unknown] (194 samples, 0.15%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (339 samples, 0.26%)core::sync::atomic::AtomicUsize::fetch_add (337 samples, 0.26%)core::sync::atomic::atomic_add (337 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (364 samples, 0.28%)[unknown] (154 samples, 0.12%)[unknown] (152 samples, 0.12%)[unknown] (143 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (131 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (80 samples, 0.06%)[unknown] (74 samples, 0.06%)[unknown] (65 samples, 0.05%)[unknown] (64 samples, 0.05%)[unknown] (47 samples, 0.04%)[unknown] (44 samples, 0.03%)[unknown] (43 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (26 samples, 0.02%)[unknown] (20 samples, 0.02%)__GI___libc_write (158 samples, 0.12%)__GI___libc_write (158 samples, 0.12%)mio::sys::unix::waker::eventfd::WakerInternal::wake (159 samples, 0.12%)<&std::fs::File as std::io::Write>::write (159 samples, 0.12%)std::sys::pal::unix::fs::File::write (159 samples, 0.12%)std::sys::pal::unix::fd::FileDesc::write (159 samples, 0.12%)tokio::runtime::driver::Handle::unpark (168 samples, 0.13%)tokio::runtime::driver::IoHandle::unpark (168 samples, 0.13%)tokio::runtime::io::driver::Handle::unpark (168 samples, 0.13%)mio::waker::Waker::wake (165 samples, 0.13%)mio::sys::unix::waker::fdbased::Waker::wake (165 samples, 0.13%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (68,159 samples, 51.91%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (3,024 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (3,023 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (3,022 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (171 samples, 0.13%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (171 samples, 0.13%)core::option::Option<T>::or_else (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (107 samples, 0.08%)__GI___libc_free (17 samples, 0.01%)_int_free (17 samples, 0.01%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Dying,K,V>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::LeafOrInternal>::deallocate_and_ascend (18 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (18 samples, 0.01%)alloc::alloc::dealloc (18 samples, 0.01%)__rdl_dealloc (18 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (18 samples, 0.01%)alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (19 samples, 0.01%)tokio::runtime::task::Task<S>::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::RawTask::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task (26 samples, 0.02%)std::panic::catch_unwind (26 samples, 0.02%)std::panicking::try (26 samples, 0.02%)std::panicking::try::do_call (26 samples, 0.02%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (26 samples, 0.02%)core::ops::function::FnOnce::call_once (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task::{{closure}} (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (26 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::Tracker> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)core::ptr::drop_in_place<std::sync::rwlock::RwLock<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)core::mem::drop (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,NodeType>,alloc::collections::btree::node::marker::KV>::drop_key_val (24 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::assume_init_drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (24 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::entry::Torrent> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::mem::drop (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::pre_shutdown (33 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::close_and_shutdown_all (33 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (114 samples, 0.09%)alloc::sync::Arc<T,A>::inner (114 samples, 0.09%)core::ptr::non_null::NonNull<T>::as_ref (114 samples, 0.09%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (108 samples, 0.08%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (108 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (106 samples, 0.08%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (49 samples, 0.04%)alloc::sync::Arc<T,A>::inner (49 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (49 samples, 0.04%)core::num::<impl u32>::wrapping_sub (132 samples, 0.10%)core::sync::atomic::AtomicU64::load (40 samples, 0.03%)core::sync::atomic::atomic_load (40 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (48 samples, 0.04%)core::sync::atomic::AtomicU32::load (48 samples, 0.04%)core::sync::atomic::atomic_load (48 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (65 samples, 0.05%)alloc::sync::Arc<T,A>::inner (65 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (65 samples, 0.05%)core::num::<impl u32>::wrapping_sub (50 samples, 0.04%)core::sync::atomic::AtomicU32::load (55 samples, 0.04%)core::sync::atomic::atomic_load (55 samples, 0.04%)core::sync::atomic::AtomicU64::load (80 samples, 0.06%)core::sync::atomic::atomic_load (80 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::pack (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (666 samples, 0.51%)tokio::runtime::scheduler::multi_thread::queue::unpack (147 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (1,036 samples, 0.79%)tokio::runtime::scheduler::multi_thread::queue::unpack (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (49 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (2,414 samples, 1.84%)t..tokio::util::rand::FastRand::fastrand_n (24 samples, 0.02%)tokio::util::rand::FastRand::fastrand (24 samples, 0.02%)std::sys_common::backtrace::__rust_begin_short_backtrace (98,136 samples, 74.74%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (98,042 samples, 74.67%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (98,042 samples, 74.67%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (98,042 samples, 74.67%)std::panic::catch_unwindstd::panicking::try (98,042 samples, 74.67%)std::panicking::trystd::panicking::try::do_call (98,042 samples, 74.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,042 samples, 74.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (98,042 samples, 74.67%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (98,042 samples, 74.67%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (98,042 samples, 74.67%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (98,042 samples, 74.67%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (98,042 samples, 74.67%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (98,042 samples, 74.67%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Context::runstd::panic::catch_unwind (98,137 samples, 74.74%)std::panic::catch_unwindstd::panicking::try (98,137 samples, 74.74%)std::panicking::trystd::panicking::try::do_call (98,137 samples, 74.74%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,137 samples, 74.74%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (98,137 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (98,139 samples, 74.74%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (98,139 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (98,205 samples, 74.79%)clone3start_thread (98,205 samples, 74.79%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (98,158 samples, 74.76%)std::sys::pal::unix::thread::Thread::new::thread_startcore::ptr::drop_in_place<std::sys::pal::unix::stack_overflow::Handler> (19 samples, 0.01%)<std::sys::pal::unix::stack_overflow::Handler as core::ops::drop::Drop>::drop (19 samples, 0.01%)std::sys::pal::unix::stack_overflow::imp::drop_handler (19 samples, 0.01%)__GI_munmap (19 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)[unknown] (16 samples, 0.01%)core::fmt::Formatter::pad_integral (112 samples, 0.09%)core::fmt::Formatter::pad_integral::write_prefix (59 samples, 0.04%)core::fmt::Formatter::pad_integral (16 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (19 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (51 samples, 0.04%)rand_chacha::guts::round (18 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (26 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide (14 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (14 samples, 0.01%)std_detect::detect::check_for (14 samples, 0.01%)std_detect::detect::cache::test (14 samples, 0.01%)std_detect::detect::cache::Cache::test (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.06%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.06%)core::cell::BorrowRefMut::new (81 samples, 0.06%)std::sys::pal::unix::time::Timespec::now (164 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (106 samples, 0.08%)tokio::runtime::coop::budget (105 samples, 0.08%)tokio::runtime::coop::with_budget (105 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (96 samples, 0.07%)std::sys::pal::unix::time::Timespec::sub_timespec (35 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock_contended (15 samples, 0.01%)syscall (90 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (21 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run (61 samples, 0.05%)tokio::runtime::context::runtime::enter_runtime (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (61 samples, 0.05%)tokio::runtime::context::set_scheduler (61 samples, 0.05%)std::thread::local::LocalKey<T>::with (61 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (61 samples, 0.05%)tokio::runtime::context::set_scheduler::{{closure}} (61 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::set (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (19 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (17 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (14 samples, 0.01%)core::cell::Cell<T>::get (14 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (22 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (22 samples, 0.02%)tokio::runtime::context::set_current_task_id (22 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (112 samples, 0.09%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (111 samples, 0.08%)tokio::runtime::task::harness::poll_future (125 samples, 0.10%)std::panic::catch_unwind (125 samples, 0.10%)std::panicking::try (125 samples, 0.10%)std::panicking::try::do_call (125 samples, 0.10%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (125 samples, 0.10%)tokio::runtime::task::harness::poll_future::{{closure}} (125 samples, 0.10%)tokio::runtime::task::core::Core<T,S>::poll (125 samples, 0.10%)tokio::runtime::task::raw::poll (157 samples, 0.12%)tokio::runtime::task::harness::Harness<T,S>::poll (135 samples, 0.10%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (135 samples, 0.10%)tokio::runtime::time::Driver::park_internal (15 samples, 0.01%)torrust_tracker::bootstrap::logging::INIT (17 samples, 0.01%)__memcpy_avx512_unaligned_erms (397 samples, 0.30%)_int_free (24 samples, 0.02%)_int_malloc (132 samples, 0.10%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (570 samples, 0.43%)__GI___lll_lock_wait_private (22 samples, 0.02%)futex_wait (14 samples, 0.01%)__memcpy_avx512_unaligned_erms (299 samples, 0.23%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (361 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (41 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (23 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (53 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (14 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (63 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (21 samples, 0.02%)__GI___libc_malloc (18 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (116 samples, 0.09%)alloc::vec::Vec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (116 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (116 samples, 0.09%)alloc::alloc::Global::alloc_impl (116 samples, 0.09%)alloc::alloc::alloc (116 samples, 0.09%)__rdl_alloc (116 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (116 samples, 0.09%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (53 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (53 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (53 samples, 0.04%)_int_malloc (21 samples, 0.02%)[unknown] (36 samples, 0.03%)[unknown] (16 samples, 0.01%)core::mem::zeroed (27 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (27 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (27 samples, 0.02%)core::intrinsics::write_bytes (27 samples, 0.02%)[unknown] (27 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (64 samples, 0.05%)mio::net::udp::UdpSocket::recv_from (49 samples, 0.04%)mio::io_source::IoSource<T>::do_io (49 samples, 0.04%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (49 samples, 0.04%)mio::net::udp::UdpSocket::recv_from::{{closure}} (49 samples, 0.04%)std::net::udp::UdpSocket::recv_from (49 samples, 0.04%)std::sys_common::net::UdpSocket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from_with_flags (49 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (271 samples, 0.21%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (143 samples, 0.11%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (141 samples, 0.11%)tokio::runtime::io::registration::Registration::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (15 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (359 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (346 samples, 0.26%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (39 samples, 0.03%)tokio::task::spawn::spawn (39 samples, 0.03%)tokio::task::spawn::spawn_inner (39 samples, 0.03%)tokio::runtime::context::current::with_current (39 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (39 samples, 0.03%)tokio::runtime::context::current::with_current::{{closure}} (39 samples, 0.03%)core::option::Option<T>::map (39 samples, 0.03%)tokio::task::spawn::spawn_inner::{{closure}} (39 samples, 0.03%)tokio::runtime::scheduler::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (39 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind (34 samples, 0.03%)all (131,301 samples, 100%)tokio-runtime-w (131,061 samples, 99.82%)tokio-runtime-w \ No newline at end of file From 608585eaf65a08df28fbeb82400647fd15f78a25 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 4 Apr 2024 17:12:43 +0100 Subject: [PATCH 0781/1003] chore: add new cargo dependency: crossbeam-skiplist It will be used to create a new torrent repository implementation that allos adding torrents in parallel. That could be potencially faster than BTreeMap in a write intensive context. --- Cargo.lock | 12 ++++++++++++ Cargo.toml | 5 +++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e77b5de6d..74d9aaafd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1027,6 +1027,16 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-skiplist" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.19" @@ -3908,6 +3918,7 @@ dependencies = [ "clap", "colored", "config", + "crossbeam-skiplist", "derive_more", "fern", "futures", @@ -4013,6 +4024,7 @@ version = "3.0.0-alpha.12-develop" dependencies = [ "async-std", "criterion", + "crossbeam-skiplist", "futures", "rstest", "tokio", diff --git a/Cargo.toml b/Cargo.toml index d045b945a..8c1df0685 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,7 @@ chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } colored = "2" config = "0" +crossbeam-skiplist = "0.1" derive_more = "0" fern = "0" futures = "0" @@ -63,8 +64,8 @@ serde_json = "1" serde_repr = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } @@ -105,4 +106,4 @@ opt-level = 3 [profile.release-debug] inherits = "release" -debug = true \ No newline at end of file +debug = true From 642d6be04ae968a2b170ede2c379183a792bd782 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 4 Apr 2024 17:14:43 +0100 Subject: [PATCH 0782/1003] feat: new torrent repository using crossbeam_skiplist::SkipMap SkipMap is an ordered map based on a lock-free skip list. It's al alternative to BTreeMap which supports concurrent access across multiple threads. One of the performance problems with the current solution is we can only add one torrent at the time because threads need to lock the whole BTreeMap. The SkipMap should avoid that problem. More info about SkiMap: https://docs.rs/crossbeam-skiplist/latest/crossbeam_skiplist/struct.SkipMap.html#method.remove The aquatic UDP load test was executed with the current implementation and the new one: Current Implementation: Requests out: 397287.37/second Responses in: 357549.15/second - Connect responses: 177073.94 - Announce responses: 176905.36 - Scrape responses: 3569.85 - Error responses: 0.00 Peers per announce response: 0.00 Announce responses per info hash: - p10: 1 - p25: 1 - p50: 1 - p75: 1 - p90: 2 - p95: 3 - p99: 104 - p99.9: 287 - p100: 371 New Implementation: Requests out: 396788.68/second Responses in: 357105.27/second - Connect responses: 176662.91 - Announce responses: 176863.44 - Scrape responses: 3578.91 - Error responses: 0.00 Peers per announce response: 0.00 Announce responses per info hash: - p10: 1 - p25: 1 - p50: 1 - p75: 1 - p90: 2 - p95: 3 - p99: 105 - p99.9: 287 - p100: 351 The result is pretty similar but the benchmarking for the repository using criterios shows that this implementations is a litle bit better than the current one. --- cSpell.json | 1 + packages/torrent-repository/Cargo.toml | 5 +- .../benches/repository_benchmark.rs | 21 +++- packages/torrent-repository/src/lib.rs | 3 + .../torrent-repository/src/repository/mod.rs | 1 + .../src/repository/skip_map_mutex_std.rs | 106 ++++++++++++++++++ src/core/torrent/mod.rs | 5 +- 7 files changed, 137 insertions(+), 5 deletions(-) create mode 100644 packages/torrent-repository/src/repository/skip_map_mutex_std.rs diff --git a/cSpell.json b/cSpell.json index 0ee2f8306..0480590af 100644 --- a/cSpell.json +++ b/cSpell.json @@ -135,6 +135,7 @@ "Shareaza", "sharktorrent", "SHLVL", + "skiplist", "socketaddr", "sqllite", "subsec", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 4cea8767f..5f1a20d32 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -16,11 +16,12 @@ rust-version.workspace = true version.workspace = true [dependencies] +crossbeam-skiplist = "0.1" futures = "0.3.29" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } -torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } [dev-dependencies] criterion = { version = "0", features = ["async_tokio"] } diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs index a3684c8e2..65608c86c 100644 --- a/packages/torrent-repository/benches/repository_benchmark.rs +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -5,7 +5,7 @@ mod helpers; use criterion::{criterion_group, criterion_main, Criterion}; use torrust_tracker_torrent_repository::{ TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, - TorrentsRwLockTokioMutexTokio, + TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, }; use crate::helpers::{asyn, sync}; @@ -45,6 +45,10 @@ fn add_one_torrent(c: &mut Criterion) { .iter_custom(asyn::add_one_torrent::); }); + group.bench_function("SkipMapMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + group.finish(); } @@ -89,6 +93,11 @@ fn add_multiple_torrents_in_parallel(c: &mut Criterion) { .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); }); + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + group.finish(); } @@ -133,6 +142,11 @@ fn update_one_torrent_in_parallel(c: &mut Criterion) { .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); }); + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + group.finish(); } @@ -178,6 +192,11 @@ fn update_multiple_torrents_in_parallel(c: &mut Criterion) { }); }); + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + group.finish(); } diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 8bb1b6def..f7c19624c 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use repository::skip_map_mutex_std::CrossbeamSkipList; use torrust_tracker_clock::clock; pub mod entry; @@ -16,6 +17,8 @@ pub type TorrentsRwLockTokio = repository::RwLockTokio; pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; +pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; + /// This code needs to be copied into each crate. /// Working version, for production. #[cfg(not(test))] diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index 494040c9d..7ede1f87a 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -11,6 +11,7 @@ pub mod rw_lock_std_mutex_tokio; pub mod rw_lock_tokio; pub mod rw_lock_tokio_mutex_std; pub mod rw_lock_tokio_mutex_tokio; +pub mod skip_map_mutex_std; use std::fmt::Debug; diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs new file mode 100644 index 000000000..aa1f43826 --- /dev/null +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -0,0 +1,106 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use crossbeam_skiplist::SkipMap; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +#[derive(Default, Debug)] +pub struct CrossbeamSkipList { + torrents: SkipMap, +} + +impl Repository for CrossbeamSkipList +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().insert_or_update_peer_and_get_stats(peer) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().expect("it should get a lock").get_stats(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + peers: BTreeMap::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().is_good(policy) { + continue; + } + + entry.remove(); + } + } +} diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index ab78de683..5d42e8b4d 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -26,6 +26,7 @@ //! Peer that don not have a full copy of the torrent data are called "leechers". //! -use torrust_tracker_torrent_repository::TorrentsRwLockStdMutexStd; +use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; -pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used +//pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used +pub type Torrents = TorrentsSkipMapMutexStd; // Currently Used From eec20247e146b0b822e86061943f4c899c93658f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 5 Apr 2024 18:27:17 +0100 Subject: [PATCH 0783/1003] chore: ignore crossbeam-skiplist crate in cargo-machete It's been used in the src/packages/torrent-repository package. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 8c1df0685..f440799cc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,7 +77,7 @@ url = "2" uuid = { version = "1", features = ["v4"] } [package.metadata.cargo-machete] -ignored = ["serde_bytes"] +ignored = ["serde_bytes", "crossbeam-skiplist"] [dev-dependencies] local-ip-address = "0" From 098928592f821cb2aa779746d6e40b71703c529b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 5 Apr 2024 18:29:24 +0100 Subject: [PATCH 0784/1003] refactor: separate torrent repository trait from implementations There are now more implementations. --- packages/torrent-repository/src/lib.rs | 14 ++++---- .../torrent-repository/src/repository/mod.rs | 34 ------------------- .../src/repository/rw_lock_std.rs | 16 +++++++++ .../src/repository/rw_lock_tokio.rs | 18 ++++++++++ .../tests/repository/mod.rs | 3 +- src/core/torrent/mod.rs | 2 -- 6 files changed, 44 insertions(+), 43 deletions(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index f7c19624c..ccaf579e3 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use repository::rw_lock_std::RwLockStd; +use repository::rw_lock_tokio::RwLockTokio; use repository::skip_map_mutex_std::CrossbeamSkipList; use torrust_tracker_clock::clock; @@ -10,12 +12,12 @@ pub type EntrySingle = entry::Torrent; pub type EntryMutexStd = Arc>; pub type EntryMutexTokio = Arc>; -pub type TorrentsRwLockStd = repository::RwLockStd; -pub type TorrentsRwLockStdMutexStd = repository::RwLockStd; -pub type TorrentsRwLockStdMutexTokio = repository::RwLockStd; -pub type TorrentsRwLockTokio = repository::RwLockTokio; -pub type TorrentsRwLockTokioMutexStd = repository::RwLockTokio; -pub type TorrentsRwLockTokioMutexTokio = repository::RwLockTokio; +pub type TorrentsRwLockStd = RwLockStd; +pub type TorrentsRwLockStdMutexStd = RwLockStd; +pub type TorrentsRwLockStdMutexTokio = RwLockStd; +pub type TorrentsRwLockTokio = RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = RwLockTokio; pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index 7ede1f87a..975a876d8 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -41,37 +41,3 @@ pub trait RepositoryAsync: Debug + Default + Sized + 'static { peer: &peer::Peer, ) -> impl std::future::Future + Send; } - -#[derive(Default, Debug)] -pub struct RwLockStd { - torrents: std::sync::RwLock>, -} - -#[derive(Default, Debug)] -pub struct RwLockTokio { - torrents: tokio::sync::RwLock>, -} - -impl RwLockStd { - /// # Panics - /// - /// Panics if unable to get a lock. - pub fn write( - &self, - ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { - self.torrents.write().expect("it should get lock") - } -} - -impl RwLockTokio { - pub fn write( - &self, - ) -> impl std::future::Future< - Output = tokio::sync::RwLockWriteGuard< - '_, - std::collections::BTreeMap, - >, - > { - self.torrents.write() - } -} diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index 9d7f29416..e9074a271 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -11,6 +11,22 @@ use super::Repository; use crate::entry::Entry; use crate::{EntrySingle, TorrentsRwLockStd}; +#[derive(Default, Debug)] +pub struct RwLockStd { + pub(crate) torrents: std::sync::RwLock>, +} + +impl RwLockStd { + /// # Panics + /// + /// Panics if unable to get a lock. + pub fn write( + &self, + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().expect("it should get lock") + } +} + impl TorrentsRwLockStd { fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index fa84e2451..d84074eaf 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -11,6 +11,24 @@ use super::RepositoryAsync; use crate::entry::Entry; use crate::{EntrySingle, TorrentsRwLockTokio}; +#[derive(Default, Debug)] +pub struct RwLockTokio { + pub(crate) torrents: tokio::sync::RwLock>, +} + +impl RwLockTokio { + pub fn write( + &self, + ) -> impl std::future::Future< + Output = tokio::sync::RwLockWriteGuard< + '_, + std::collections::BTreeMap, + >, + > { + self.torrents.write() + } +} + impl TorrentsRwLockTokio { async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> where diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 7ffe17dd7..117f3c0a6 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -8,7 +8,8 @@ use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::Entry as _; -use torrust_tracker_torrent_repository::repository::{RwLockStd, RwLockTokio}; +use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; +use torrust_tracker_torrent_repository::repository::rw_lock_tokio::RwLockTokio; use torrust_tracker_torrent_repository::EntrySingle; use crate::common::repo::Repo; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 5d42e8b4d..286a7e047 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -25,8 +25,6 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! - use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; -//pub type Torrents = TorrentsRwLockStdMutexStd; // Currently Used pub type Torrents = TorrentsSkipMapMutexStd; // Currently Used From 12f54e703e78af677fbd4456c359f580e83b1c44 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 8 Apr 2024 14:07:15 +0100 Subject: [PATCH 0785/1003] test: add tests for new torrent repository using SkipMap --- .../src/repository/skip_map_mutex_std.rs | 2 +- .../torrent-repository/tests/common/repo.rs | 165 +++++++++++------- .../tests/repository/mod.rs | 108 ++++++++++-- 3 files changed, 193 insertions(+), 82 deletions(-) diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index aa1f43826..0c0127b15 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -15,7 +15,7 @@ use crate::{EntryMutexStd, EntrySingle}; #[derive(Default, Debug)] pub struct CrossbeamSkipList { - torrents: SkipMap, + pub torrents: SkipMap, } impl Repository for CrossbeamSkipList diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 3a4b53d2f..5a86aa3cf 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -7,49 +7,54 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository::{ EntrySingle, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, - TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, }; #[derive(Debug)] pub(crate) enum Repo { - Std(TorrentsRwLockStd), - StdMutexStd(TorrentsRwLockStdMutexStd), - StdMutexTokio(TorrentsRwLockStdMutexTokio), - Tokio(TorrentsRwLockTokio), - TokioMutexStd(TorrentsRwLockTokioMutexStd), - TokioMutexTokio(TorrentsRwLockTokioMutexTokio), + RwLockStd(TorrentsRwLockStd), + RwLockStdMutexStd(TorrentsRwLockStdMutexStd), + RwLockStdMutexTokio(TorrentsRwLockStdMutexTokio), + RwLockTokio(TorrentsRwLockTokio), + RwLockTokioMutexStd(TorrentsRwLockTokioMutexStd), + RwLockTokioMutexTokio(TorrentsRwLockTokioMutexTokio), + SkipMapMutexStd(TorrentsSkipMapMutexStd), } impl Repo { pub(crate) async fn get(&self, key: &InfoHash) -> Option { match self { - Repo::Std(repo) => repo.get(key), - Repo::StdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), - Repo::StdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), - Repo::Tokio(repo) => repo.get(key).await, - Repo::TokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), - Repo::TokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::RwLockStd(repo) => repo.get(key), + Repo::RwLockStdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::RwLockStdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::RwLockTokio(repo) => repo.get(key).await, + Repo::RwLockTokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), + Repo::RwLockTokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), } } + pub(crate) async fn get_metrics(&self) -> TorrentsMetrics { match self { - Repo::Std(repo) => repo.get_metrics(), - Repo::StdMutexStd(repo) => repo.get_metrics(), - Repo::StdMutexTokio(repo) => repo.get_metrics().await, - Repo::Tokio(repo) => repo.get_metrics().await, - Repo::TokioMutexStd(repo) => repo.get_metrics().await, - Repo::TokioMutexTokio(repo) => repo.get_metrics().await, + Repo::RwLockStd(repo) => repo.get_metrics(), + Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), + Repo::RwLockStdMutexTokio(repo) => repo.get_metrics().await, + Repo::RwLockTokio(repo) => repo.get_metrics().await, + Repo::RwLockTokioMutexStd(repo) => repo.get_metrics().await, + Repo::RwLockTokioMutexTokio(repo) => repo.get_metrics().await, + Repo::SkipMapMutexStd(repo) => repo.get_metrics(), } } + pub(crate) async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { match self { - Repo::Std(repo) => repo.get_paginated(pagination), - Repo::StdMutexStd(repo) => repo + Repo::RwLockStd(repo) => repo.get_paginated(pagination), + Repo::RwLockStdMutexStd(repo) => repo .get_paginated(pagination) .iter() .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) .collect(), - Repo::StdMutexTokio(repo) => { + Repo::RwLockStdMutexTokio(repo) => { let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; for (i, t) in repo.get_paginated(pagination).await { @@ -57,14 +62,14 @@ impl Repo { } v } - Repo::Tokio(repo) => repo.get_paginated(pagination).await, - Repo::TokioMutexStd(repo) => repo + Repo::RwLockTokio(repo) => repo.get_paginated(pagination).await, + Repo::RwLockTokioMutexStd(repo) => repo .get_paginated(pagination) .await .iter() .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) .collect(), - Repo::TokioMutexTokio(repo) => { + Repo::RwLockTokioMutexTokio(repo) => { let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; for (i, t) in repo.get_paginated(pagination).await { @@ -72,76 +77,102 @@ impl Repo { } v } + Repo::SkipMapMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), } } + pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { match self { - Repo::Std(repo) => repo.import_persistent(persistent_torrents), - Repo::StdMutexStd(repo) => repo.import_persistent(persistent_torrents), - Repo::StdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, - Repo::Tokio(repo) => repo.import_persistent(persistent_torrents).await, - Repo::TokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, - Repo::TokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockStd(repo) => repo.import_persistent(persistent_torrents), + Repo::RwLockStdMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::RwLockStdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), } } + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { match self { - Repo::Std(repo) => repo.remove(key), - Repo::StdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), - Repo::StdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), - Repo::Tokio(repo) => repo.remove(key).await, - Repo::TokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), - Repo::TokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::RwLockStd(repo) => repo.remove(key), + Repo::RwLockStdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::RwLockStdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::RwLockTokio(repo) => repo.remove(key).await, + Repo::RwLockTokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), + Repo::RwLockTokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), } } + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { match self { - Repo::Std(repo) => repo.remove_inactive_peers(current_cutoff), - Repo::StdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), - Repo::StdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, - Repo::Tokio(repo) => repo.remove_inactive_peers(current_cutoff).await, - Repo::TokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, - Repo::TokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::RwLockStdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::RwLockStdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), } } + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { match self { - Repo::Std(repo) => repo.remove_peerless_torrents(policy), - Repo::StdMutexStd(repo) => repo.remove_peerless_torrents(policy), - Repo::StdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, - Repo::Tokio(repo) => repo.remove_peerless_torrents(policy).await, - Repo::TokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, - Repo::TokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockStd(repo) => repo.remove_peerless_torrents(policy), + Repo::RwLockStdMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::RwLockStdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), } } + pub(crate) async fn update_torrent_with_peer_and_get_stats( &self, info_hash: &InfoHash, peer: &peer::Peer, ) -> (bool, SwarmMetadata) { match self { - Repo::Std(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), - Repo::StdMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), - Repo::StdMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, - Repo::Tokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, - Repo::TokioMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, - Repo::TokioMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::RwLockStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::RwLockStdMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::RwLockStdMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::RwLockTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::RwLockTokioMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::RwLockTokioMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, + Repo::SkipMapMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), } } + pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { match self { - Repo::Std(repo) => repo.write().insert(*info_hash, torrent), - Repo::StdMutexStd(repo) => Some(repo.write().insert(*info_hash, torrent.into())?.lock().unwrap().clone()), - Repo::StdMutexTokio(repo) => { - let r = repo.write().insert(*info_hash, torrent.into()); - match r { - Some(t) => Some(t.lock().await.clone()), - None => None, - } + Repo::RwLockStd(repo) => { + repo.write().insert(*info_hash, torrent); } - Repo::Tokio(repo) => repo.write().await.insert(*info_hash, torrent), - Repo::TokioMutexStd(repo) => Some(repo.write().await.insert(*info_hash, torrent.into())?.lock().unwrap().clone()), - Repo::TokioMutexTokio(repo) => Some(repo.write().await.insert(*info_hash, torrent.into())?.lock().await.clone()), - } + Repo::RwLockStdMutexStd(repo) => { + repo.write().insert(*info_hash, torrent.into()); + } + Repo::RwLockStdMutexTokio(repo) => { + repo.write().insert(*info_hash, torrent.into()); + } + Repo::RwLockTokio(repo) => { + repo.write().await.insert(*info_hash, torrent); + } + Repo::RwLockTokioMutexStd(repo) => { + repo.write().await.insert(*info_hash, torrent.into()); + } + Repo::RwLockTokioMutexTokio(repo) => { + repo.write().await.insert(*info_hash, torrent.into()); + } + Repo::SkipMapMutexStd(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + }; + self.get(info_hash).await } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 117f3c0a6..ab9648584 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -10,6 +10,7 @@ use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::Entry as _; use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; use torrust_tracker_torrent_repository::repository::rw_lock_tokio::RwLockTokio; +use torrust_tracker_torrent_repository::repository::skip_map_mutex_std::CrossbeamSkipList; use torrust_tracker_torrent_repository::EntrySingle; use crate::common::repo::Repo; @@ -17,30 +18,37 @@ use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] fn standard() -> Repo { - Repo::Std(RwLockStd::default()) + Repo::RwLockStd(RwLockStd::default()) } + #[fixture] fn standard_mutex() -> Repo { - Repo::StdMutexStd(RwLockStd::default()) + Repo::RwLockStdMutexStd(RwLockStd::default()) } #[fixture] fn standard_tokio() -> Repo { - Repo::StdMutexTokio(RwLockStd::default()) + Repo::RwLockStdMutexTokio(RwLockStd::default()) } #[fixture] fn tokio_std() -> Repo { - Repo::Tokio(RwLockTokio::default()) + Repo::RwLockTokio(RwLockTokio::default()) } + #[fixture] fn tokio_mutex() -> Repo { - Repo::TokioMutexStd(RwLockTokio::default()) + Repo::RwLockTokioMutexStd(RwLockTokio::default()) } #[fixture] fn tokio_tokio() -> Repo { - Repo::TokioMutexTokio(RwLockTokio::default()) + Repo::RwLockTokioMutexTokio(RwLockTokio::default()) +} + +#[fixture] +fn skip_list_std() -> Repo { + Repo::SkipMapMutexStd(CrossbeamSkipList::default()) } type Entries = Vec<(InfoHash, EntrySingle)>; @@ -224,7 +232,16 @@ fn policy_remove_persist() -> TrackerPolicy { #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_a_torrent_entry( - #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_std() + )] + repo: Repo, #[case] entries: Entries, ) { make(&repo, &entries).await; @@ -247,7 +264,16 @@ async fn it_should_get_a_torrent_entry( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_std() + )] + repo: Repo, #[case] entries: Entries, many_out_of_order: Entries, ) { @@ -280,7 +306,16 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated( - #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_std() + )] + repo: Repo, #[case] entries: Entries, #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, ) { @@ -328,7 +363,16 @@ async fn it_should_get_paginated( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_metrics( - #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_std() + )] + repo: Repo, #[case] entries: Entries, ) { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; @@ -360,7 +404,16 @@ async fn it_should_get_metrics( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_import_persistent_torrents( - #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_std() + )] + repo: Repo, #[case] entries: Entries, #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, ) { @@ -389,7 +442,16 @@ async fn it_should_import_persistent_torrents( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_an_entry( - #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_std() + )] + repo: Repo, #[case] entries: Entries, ) { make(&repo, &entries).await; @@ -416,7 +478,16 @@ async fn it_should_remove_an_entry( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_inactive_peers( - #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_std() + )] + repo: Repo, #[case] entries: Entries, ) { use std::ops::Sub as _; @@ -489,7 +560,16 @@ async fn it_should_remove_inactive_peers( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_peerless_torrents( - #[values(standard(), standard_mutex(), standard_tokio(), tokio_std(), tokio_mutex(), tokio_tokio())] repo: Repo, + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_std() + )] + repo: Repo, #[case] entries: Entries, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { From 537349376e4b5cc1d8b0347e2806ceef7a5e9353 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 8 Apr 2024 16:39:06 +0100 Subject: [PATCH 0786/1003] chore(deps): update dependencies ```output cargo update Updating crates.io index Updating async-compression v0.4.6 -> v0.4.8 Updating async-executor v1.8.0 -> v1.10.0 Updating autocfg v1.1.0 -> v1.2.0 Adding base64 v0.22.0 Updating borsh v1.3.1 -> v1.4.0 Updating borsh-derive v1.3.1 -> v1.4.0 Updating brotli v3.5.0 -> v4.0.0 Updating brotli-decompressor v2.5.1 -> v3.0.0 Updating bumpalo v3.15.4 -> v3.16.0 Updating cc v1.0.90 -> v1.0.91 Updating chrono v0.4.35 -> v0.4.37 Updating event-listener v5.2.0 -> v5.3.0 Updating event-listener-strategy v0.5.0 -> v0.5.1 Updating getrandom v0.2.12 -> v0.2.14 Updating h2 v0.4.3 -> v0.4.4 Updating half v2.4.0 -> v2.4.1 Updating itoa v1.0.10 -> v1.0.11 Updating memchr v2.7.1 -> v2.7.2 Updating openssl-sys v0.9.101 -> v0.9.102 Updating pest v2.7.8 -> v2.7.9 Updating pest_derive v2.7.8 -> v2.7.9 Updating pest_generator v2.7.8 -> v2.7.9 Updating pest_meta v2.7.8 -> v2.7.9 Updating pin-project-lite v0.2.13 -> v0.2.14 Updating regex-syntax v0.8.2 -> v0.8.3 Updating reqwest v0.12.2 -> v0.12.3 Updating rust_decimal v1.34.3 -> v1.35.0 Removing rustls-pemfile v1.0.4 Removing rustls-pemfile v2.1.1 Adding rustls-pemfile v2.1.2 Updating rustls-pki-types v1.4.0 -> v1.4.1 Updating rustversion v1.0.14 -> v1.0.15 Updating security-framework v2.9.2 -> v2.10.0 Updating security-framework-sys v2.9.1 -> v2.10.0 Updating serde_html_form v0.2.5 -> v0.2.6 Updating serde_json v1.0.114 -> v1.0.115 Updating strsim v0.11.0 -> v0.11.1 Updating syn v2.0.55 -> v2.0.58 Updating sync_wrapper v1.0.0 -> v1.0.1 Updating tokio v1.36.0 -> v1.37.0 Updating winreg v0.50.0 -> v0.52.0 Updating zstd v0.13.0 -> v0.13.1 Updating zstd-safe v7.0.0 -> v7.1.0 Updating zstd-sys v2.0.9+zstd.1.5.5 -> v2.0.10+zstd.1.5.6 ``` --- Cargo.lock | 261 ++++++++++++++++++++++++++--------------------------- 1 file changed, 129 insertions(+), 132 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74d9aaafd..853d21533 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -195,17 +195,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" dependencies = [ "concurrent-queue", - "event-listener 5.2.0", - "event-listener-strategy 0.5.0", + "event-listener 5.3.0", + "event-listener-strategy 0.5.1", "futures-core", "pin-project-lite", ] [[package]] name = "async-compression" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" +checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" dependencies = [ "brotli", "flate2", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +checksum = "5f98c37cf288e302c16ef6c8472aad1e034c6c84ce5ea7b8101c98eb4a802fee" dependencies = [ "async-lock 3.3.0", "async-task", @@ -347,7 +347,7 @@ checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -358,9 +358,9 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "axum" @@ -389,7 +389,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.0", + "sync_wrapper 1.0.1", "tokio", "tower", "tower-layer", @@ -461,7 +461,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -480,7 +480,7 @@ dependencies = [ "hyper-util", "pin-project-lite", "rustls", - "rustls-pemfile 2.1.1", + "rustls-pemfile", "tokio", "tokio-rustls", "tower", @@ -508,6 +508,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "bigdecimal" version = "0.3.1" @@ -542,7 +548,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -599,9 +605,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58b559fd6448c6e2fd0adb5720cd98a2506594cafa4737ff98c396f3e82f667" +checksum = "0901fc8eb0aca4c83be0106d6f2db17d86a08dfc2c25f0e84464bf381158add6" dependencies = [ "borsh-derive", "cfg_aliases", @@ -609,23 +615,23 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aadb5b6ccbd078890f6d7003694e33816e6b784358f18e15e7e6d9f065a57cd" +checksum = "51670c3aa053938b0ee3bd67c3817e471e626151131b934038e83c5bf8de48f5" dependencies = [ "once_cell", "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "syn_derive", ] [[package]] name = "brotli" -version = "3.5.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" +checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -634,9 +640,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.5.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" +checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -650,9 +656,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.15.4" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytecheck" @@ -696,9 +702,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.90" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +checksum = "1fd97381a8cc6493395a5afc4c691c1084b3768db713b73aa215217aa245d153" dependencies = [ "jobserver", "libc", @@ -727,9 +733,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.35" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" +checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" dependencies = [ "android-tzdata", "iana-time-zone", @@ -795,7 +801,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.0", + "strsim 0.11.1", ] [[package]] @@ -807,7 +813,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1080,7 +1086,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1091,7 +1097,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1125,7 +1131,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1213,9 +1219,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" dependencies = [ "concurrent-queue", "parking", @@ -1234,11 +1240,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +checksum = "332f51cb23d20b0de8458b86580878211da09bcd4503cb579c225b3d124cabb3" dependencies = [ - "event-listener 5.2.0", + "event-listener 5.3.0", "pin-project-lite", ] @@ -1360,7 +1366,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1372,7 +1378,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1384,7 +1390,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1477,7 +1483,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1528,9 +1534,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "libc", @@ -1563,9 +1569,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51ee2dd2e4f378392eeff5d51618cd9a63166a2513846bbc55f21cfacd9199d4" +checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" dependencies = [ "bytes", "fnv", @@ -1582,9 +1588,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", @@ -1887,9 +1893,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" @@ -2118,9 +2124,9 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "mime" @@ -2178,7 +2184,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -2229,7 +2235,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "termcolor", "thiserror", ] @@ -2240,7 +2246,7 @@ version = "0.30.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" dependencies = [ - "base64", + "base64 0.21.7", "bigdecimal", "bindgen", "bitflags 2.5.0", @@ -2429,7 +2435,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -2440,9 +2446,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.101" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -2501,7 +2507,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64", + "base64 0.21.7", "serde", ] @@ -2513,9 +2519,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.8" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" dependencies = [ "memchr", "thiserror", @@ -2524,9 +2530,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.8" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d24f72393fd16ab6ac5738bc33cdb6a9aa73f8b902e8fe29cf4e67d7dd1026" +checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c" dependencies = [ "pest", "pest_generator", @@ -2534,22 +2540,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.8" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc17e2a6c7d0a492f0158d7a4bd66cc17280308bbaff78d5bef566dca35ab80" +checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "pest_meta" -version = "2.7.8" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934cd7631c050f4674352a6e835d5f6711ffbfb9345c2fc0107155ac495ae293" +checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca" dependencies = [ "once_cell", "pest", @@ -2611,14 +2617,14 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -2943,9 +2949,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "relative-path" @@ -2964,11 +2970,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d66674f2b6fb864665eea7a3c1ac4e3dfacd2fda83cf6f935a612e01b0e3338" +checksum = "3e6cc1e89e689536eb5aeede61520e874df5a4707df811cd5da4aa5fbb2aae19" dependencies = [ - "base64", + "base64 0.22.0", "bytes", "encoding_rs", "futures-core", @@ -2988,7 +2994,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 1.0.4", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", @@ -3063,7 +3069,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ - "base64", + "base64 0.21.7", "bitflags 2.5.0", "serde", "serde_derive", @@ -3094,7 +3100,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.55", + "syn 2.0.58", "unicode-ident", ] @@ -3124,9 +3130,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.34.3" +version = "1.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39449a79f45e8da28c57c341891b69a183044b29518bb8f86dbac9df60bb7df" +checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a" dependencies = [ "arrayvec", "borsh", @@ -3200,28 +3206,19 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.4" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64", -] - -[[package]] -name = "rustls-pemfile" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab" -dependencies = [ - "base64", + "base64 0.22.0", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "868e20fada228fefaf6b652e00cc73623d54f8171e7352c18bb281571f2d92da" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" [[package]] name = "rustls-webpki" @@ -3235,9 +3232,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "ryu" @@ -3302,9 +3299,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -3315,9 +3312,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -3365,14 +3362,14 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "serde_html_form" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50437e6a58912eecc08865e35ea2e8d365fbb2db0debb1c8bb43bf1faf055f25" +checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", "indexmap 2.2.6", @@ -3383,9 +3380,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", @@ -3410,7 +3407,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -3440,7 +3437,7 @@ version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" dependencies = [ - "base64", + "base64 0.21.7", "chrono", "hex", "indexmap 1.9.3", @@ -3461,7 +3458,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -3568,9 +3565,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subprocess" @@ -3595,9 +3592,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.55" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -3613,7 +3610,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -3624,9 +3621,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "384595c11a4e2969895cad5a8c4029115f5ab956a9e5ef4de79d11a426e5f20c" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" [[package]] name = "system-configuration" @@ -3716,7 +3713,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -3786,9 +3783,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -3810,7 +3807,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -4114,7 +4111,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -4285,7 +4282,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "wasm-bindgen-shared", ] @@ -4319,7 +4316,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4532,9 +4529,9 @@ dependencies = [ [[package]] name = "winreg" -version = "0.50.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ "cfg-if", "windows-sys 0.48.0", @@ -4575,32 +4572,32 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "zstd" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.0.0" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", From 78b46c41a5c50f5016f745634fe5101969e5a828 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Apr 2024 13:24:41 +0100 Subject: [PATCH 0787/1003] chore(deps): add cargo dependency: dashmap It will be used to create a new torrent repository implementation, using a DashMap for the torrent list. DashMap crate: https://crates.io/crates/dashmap --- Cargo.lock | 15 +++++++++++++++ Cargo.toml | 1 + cSpell.json | 1 + packages/torrent-repository/Cargo.toml | 1 + 4 files changed, 18 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 853d21533..6b9be523c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1100,6 +1100,19 @@ dependencies = [ "syn 2.0.58", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.3", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "deranged" version = "0.3.11" @@ -3916,6 +3929,7 @@ dependencies = [ "colored", "config", "crossbeam-skiplist", + "dashmap", "derive_more", "fern", "futures", @@ -4022,6 +4036,7 @@ dependencies = [ "async-std", "criterion", "crossbeam-skiplist", + "dashmap", "futures", "rstest", "tokio", diff --git a/Cargo.toml b/Cargo.toml index f440799cc..dfb06168d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ clap = { version = "4", features = ["derive", "env"] } colored = "2" config = "0" crossbeam-skiplist = "0.1" +dashmap = "5.5.3" derive_more = "0" fern = "0" futures = "0" diff --git a/cSpell.json b/cSpell.json index 0480590af..24ef6b0a0 100644 --- a/cSpell.json +++ b/cSpell.json @@ -163,6 +163,7 @@ "Weidendorfer", "Werror", "whitespaces", + "Xacrimon", "XBTT", "Xdebug", "Xeon", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 5f1a20d32..6bc8bfcdd 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -17,6 +17,7 @@ version.workspace = true [dependencies] crossbeam-skiplist = "0.1" +dashmap = "5.5.3" futures = "0.3.29" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" } From 00ee9db340c5274a63aa5f321e3bd5064e967b6c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Apr 2024 14:17:10 +0100 Subject: [PATCH 0788/1003] feat: [#565] new torrent repository implementation usind DashMap It's not enabled as the deafult repository becuase DashMap does not return the items in order. Some tests fail: ``` output failures: ---- core::services::torrent::tests::searching_for_torrents::should_return_torrents_ordered_by_info_hash stdout ---- thread 'core::services::torrent::tests::searching_for_torrents::should_return_torrents_ordered_by_info_hash' panicked at src/core/services/torrent.rs:303:13: assertion `left == right` failed left: [BasicInfo { info_hash: InfoHash([158, 2, 23, 208, 250, 113, 200, 115, 50, 205, 139, 249, 219, 234, 188, 178, 194, 207, 60, 77]), seeders: 1, completed: 0, leechers: 0 }, BasicInfo { info_hash: InfoHash([3, 132, 5, 72, 100, 58, 242, 167, 182, 58, 159, 92, 188, 163, 72, 188, 113, 80, 202, 58]), seeders: 1, completed: 0, leechers: 0 }] right: [BasicInfo { info_hash: InfoHash([3, 132, 5, 72, 100, 58, 242, 167, 182, 58, 159, 92, 188, 163, 72, 188, 113, 80, 202, 58]), seeders: 1, completed: 0, leechers: 0 }, BasicInfo { info_hash: InfoHash([158, 2, 23, 208, 250, 113, 200, 115, 50, 205, 139, 249, 219, 234, 188, 178, 194, 207, 60, 77]), seeders: 1, completed: 0, leechers: 0 }] note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace failures: core::services::torrent::tests::searching_for_torrents::should_return_torrents_ordered_by_info_hash test result: FAILED. 212 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out; finished in 1.18s error: test failed, to rerun pass `--lib` ``` On the other hand, to use it, a new data strcuture should be added to the repo: An Index with sorted torrents by Infohash The API uses pagination returning torrents in alphabetically order by InfoHash. Adding such an Index would probably decrease the performace of this repository implementation. And it's performace looks similar to the current SkipMap implementation. SkipMap performace with Aquatic UDP load test: ``` Requests out: 396788.68/second Responses in: 357105.27/second - Connect responses: 176662.91 - Announce responses: 176863.44 - Scrape responses: 3578.91 - Error responses: 0.00 Peers per announce response: 0.00 Announce responses per info hash: - p10: 1 - p25: 1 - p50: 1 - p75: 1 - p90: 2 - p95: 3 - p99: 105 - p99.9: 287 - p100: 351 ``` DashMap performace with Aquatic UDP load test: ``` Requests out: 410658.38/second Responses in: 365892.86/second - Connect responses: 181258.91 - Announce responses: 181005.95 - Scrape responses: 3628.00 - Error responses: 0.00 Peers per announce response: 0.00 Announce responses per info hash: - p10: 1 - p25: 1 - p50: 1 - p75: 1 - p90: 2 - p95: 3 - p99: 104 - p99.9: 295 - p100: 363 ``` --- .../benches/repository_benchmark.rs | 23 +++- packages/torrent-repository/src/lib.rs | 2 + .../src/repository/dash_map_mutex_std.rs | 106 ++++++++++++++++++ .../torrent-repository/src/repository/mod.rs | 1 + .../torrent-repository/tests/common/repo.rs | 20 +++- .../tests/repository/mod.rs | 30 +++-- 6 files changed, 170 insertions(+), 12 deletions(-) create mode 100644 packages/torrent-repository/src/repository/dash_map_mutex_std.rs diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs index 65608c86c..58cd70d9a 100644 --- a/packages/torrent-repository/benches/repository_benchmark.rs +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -4,8 +4,8 @@ mod helpers; use criterion::{criterion_group, criterion_main, Criterion}; use torrust_tracker_torrent_repository::{ - TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, - TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, + TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, }; use crate::helpers::{asyn, sync}; @@ -49,6 +49,10 @@ fn add_one_torrent(c: &mut Criterion) { b.iter_custom(sync::add_one_torrent::); }); + group.bench_function("DashMapMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + group.finish(); } @@ -98,6 +102,11 @@ fn add_multiple_torrents_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); }); + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + group.finish(); } @@ -147,6 +156,11 @@ fn update_one_torrent_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); }); + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + group.finish(); } @@ -197,6 +211,11 @@ fn update_multiple_torrents_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); }); + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + group.finish(); } diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index ccaf579e3..7a6d209b9 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use repository::dash_map_mutex_std::XacrimonDashMap; use repository::rw_lock_std::RwLockStd; use repository::rw_lock_tokio::RwLockTokio; use repository::skip_map_mutex_std::CrossbeamSkipList; @@ -20,6 +21,7 @@ pub type TorrentsRwLockTokioMutexStd = RwLockTokio; pub type TorrentsRwLockTokioMutexTokio = RwLockTokio; pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; +pub type TorrentsDashMapMutexStd = XacrimonDashMap; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs new file mode 100644 index 000000000..67c47973e --- /dev/null +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -0,0 +1,106 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use dashmap::DashMap; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +#[derive(Default, Debug)] +pub struct XacrimonDashMap { + pub torrents: DashMap, +} + +impl Repository for XacrimonDashMap +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + if let Some(entry) = self.torrents.get(info_hash) { + entry.insert_or_update_peer_and_get_stats(peer) + } else { + let _unused = self.torrents.insert(*info_hash, Arc::default()); + + match self.torrents.get(info_hash) { + Some(entry) => entry.insert_or_update_peer_and_get_stats(peer), + None => (false, SwarmMetadata::zeroed()), + } + } + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().expect("it should get a lock").get_stats(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + peers: BTreeMap::default(), + downloaded: *completed, + } + .into(), + ); + + self.torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|(_key, value)| value.clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + self.torrents.retain(|_, entry| entry.is_good(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index 975a876d8..c7c64c54a 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -5,6 +5,7 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +pub mod dash_map_mutex_std; pub mod rw_lock_std; pub mod rw_lock_std_mutex_std; pub mod rw_lock_std_mutex_tokio; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 5a86aa3cf..5a6eddf97 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -6,8 +6,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository::{ - EntrySingle, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, - TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, + EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, + TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, }; #[derive(Debug)] @@ -19,6 +19,7 @@ pub(crate) enum Repo { RwLockTokioMutexStd(TorrentsRwLockTokioMutexStd), RwLockTokioMutexTokio(TorrentsRwLockTokioMutexTokio), SkipMapMutexStd(TorrentsSkipMapMutexStd), + DashMapMutexStd(TorrentsDashMapMutexStd), } impl Repo { @@ -31,6 +32,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), Repo::RwLockTokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::DashMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), } } @@ -43,6 +45,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.get_metrics().await, Repo::RwLockTokioMutexTokio(repo) => repo.get_metrics().await, Repo::SkipMapMutexStd(repo) => repo.get_metrics(), + Repo::DashMapMutexStd(repo) => repo.get_metrics(), } } @@ -82,6 +85,11 @@ impl Repo { .iter() .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) .collect(), + Repo::DashMapMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), } } @@ -94,6 +102,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, Repo::RwLockTokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::DashMapMutexStd(repo) => repo.import_persistent(persistent_torrents), } } @@ -106,6 +115,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), Repo::RwLockTokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::DashMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), } } @@ -118,6 +128,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, Repo::RwLockTokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::DashMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), } } @@ -130,6 +141,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, Repo::RwLockTokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::DashMapMutexStd(repo) => repo.remove_peerless_torrents(policy), } } @@ -146,6 +158,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, Repo::RwLockTokioMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, Repo::SkipMapMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), + Repo::DashMapMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), } } @@ -172,6 +185,9 @@ impl Repo { Repo::SkipMapMutexStd(repo) => { repo.torrents.insert(*info_hash, torrent.into()); } + Repo::DashMapMutexStd(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } }; self.get(info_hash).await } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index ab9648584..1854b89ac 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -8,6 +8,7 @@ use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::Entry as _; +use torrust_tracker_torrent_repository::repository::dash_map_mutex_std::XacrimonDashMap; use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; use torrust_tracker_torrent_repository::repository::rw_lock_tokio::RwLockTokio; use torrust_tracker_torrent_repository::repository::skip_map_mutex_std::CrossbeamSkipList; @@ -51,6 +52,11 @@ fn skip_list_std() -> Repo { Repo::SkipMapMutexStd(CrossbeamSkipList::default()) } +#[fixture] +fn dash_map_std() -> Repo { + Repo::DashMapMutexStd(XacrimonDashMap::default()) +} + type Entries = Vec<(InfoHash, EntrySingle)>; #[fixture] @@ -239,7 +245,8 @@ async fn it_should_get_a_torrent_entry( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_std(), + dash_map_std() )] repo: Repo, #[case] entries: Entries, @@ -271,7 +278,8 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_std(), + dash_map_std() )] repo: Repo, #[case] entries: Entries, @@ -313,7 +321,8 @@ async fn it_should_get_paginated( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_std(), + dash_map_std() )] repo: Repo, #[case] entries: Entries, @@ -370,7 +379,8 @@ async fn it_should_get_metrics( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_std(), + dash_map_std() )] repo: Repo, #[case] entries: Entries, @@ -411,7 +421,8 @@ async fn it_should_import_persistent_torrents( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_std(), + dash_map_std() )] repo: Repo, #[case] entries: Entries, @@ -449,7 +460,8 @@ async fn it_should_remove_an_entry( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_std(), + dash_map_std() )] repo: Repo, #[case] entries: Entries, @@ -485,7 +497,8 @@ async fn it_should_remove_inactive_peers( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_std(), + dash_map_std() )] repo: Repo, #[case] entries: Entries, @@ -567,7 +580,8 @@ async fn it_should_remove_peerless_torrents( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_std(), + dash_map_std() )] repo: Repo, #[case] entries: Entries, From 1e76c1700cd1cb5e69e01df34dfe19a7a9828153 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Apr 2024 16:35:35 +0100 Subject: [PATCH 0789/1003] chore: add dashmap cargo dep to cargp machete It's only used for benchmarking. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index dfb06168d..ef0c39d4b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,7 +78,7 @@ url = "2" uuid = { version = "1", features = ["v4"] } [package.metadata.cargo-machete] -ignored = ["serde_bytes", "crossbeam-skiplist"] +ignored = ["serde_bytes", "crossbeam-skiplist", "dashmap"] [dev-dependencies] local-ip-address = "0" From 4030fd12b575c039dbb1507fbe99e0aaec58fd5f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Apr 2024 17:12:07 +0100 Subject: [PATCH 0790/1003] fix: torrent repository tests. DashMap is not ordered DashMap implementation does not support returning torrent ordered, so we have to skip those tests for this reporitoy implementation. --- packages/torrent-repository/tests/repository/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 1854b89ac..a6784bf57 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -278,8 +278,7 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std(), - dash_map_std() + skip_list_std() )] repo: Repo, #[case] entries: Entries, @@ -321,8 +320,7 @@ async fn it_should_get_paginated( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std(), - dash_map_std() + skip_list_std() )] repo: Repo, #[case] entries: Entries, From aa4bfbaf5496de32a02bc1d23779ba200974f2a0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 12 Apr 2024 15:54:49 +0100 Subject: [PATCH 0791/1003] refactor: segregate command and query for announce request This changes the API of the torrent repository. The method: ``` fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); ``` is replaced with: ``` fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer); fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; ``` The performance is not affected. Benchmaring is still using both methods in order to simulate `announce` requests. 1. The interface is simpler (command/query segregation. 2. In the long-term: - Returning swarm metadata in the announce request could be optional. The announce request process would be faster if the tracker does not have to mantain the swarm data. This is not likely to happen becuase the scrape request needs this metadata. - New repository performance improvements could be implemented. This allow decoupling peer lists from swarm metadata. The repository internally can have two data strcutures one for the peer list and another for the swarm metatada. Both using different locks. --- .../benches/helpers/asyn.rs | 34 +++++------ .../benches/helpers/sync.rs | 22 +++++-- packages/torrent-repository/src/entry/mod.rs | 20 ++---- .../torrent-repository/src/entry/mutex_std.rs | 14 ++--- .../src/entry/mutex_tokio.rs | 12 ++-- .../torrent-repository/src/entry/single.rs | 10 +-- .../src/repository/dash_map_mutex_std.rs | 16 ++--- .../torrent-repository/src/repository/mod.rs | 10 ++- .../src/repository/rw_lock_std.rs | 10 ++- .../src/repository/rw_lock_std_mutex_std.rs | 12 +++- .../src/repository/rw_lock_std_mutex_tokio.rs | 16 ++++- .../src/repository/rw_lock_tokio.rs | 11 +++- .../src/repository/rw_lock_tokio_mutex_std.rs | 11 +++- .../repository/rw_lock_tokio_mutex_tokio.rs | 14 ++++- .../src/repository/skip_map_mutex_std.rs | 10 ++- .../torrent-repository/tests/common/repo.rs | 43 +++++++------ .../tests/common/torrent.rs | 22 +++---- .../torrent-repository/tests/entry/mod.rs | 41 +++++++------ .../tests/repository/mod.rs | 40 ++++++++---- src/core/mod.rs | 61 +++++++++++-------- src/core/services/torrent.rs | 38 ++++-------- src/servers/udp/handlers.rs | 12 +--- tests/servers/api/environment.rs | 2 +- tests/servers/http/environment.rs | 2 +- tests/servers/udp/environment.rs | 2 +- 25 files changed, 259 insertions(+), 226 deletions(-) diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs index 80f70cdc2..1c6d9d915 100644 --- a/packages/torrent-repository/benches/helpers/asyn.rs +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -18,9 +18,9 @@ where let info_hash = InfoHash([0; 20]); - torrent_repository - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER).await; + + torrent_repository.get_swarm_metadata(&info_hash).await; } start.elapsed() @@ -37,9 +37,9 @@ where let handles = FuturesUnordered::new(); // Add the torrent/peer to the torrent repository - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER).await; + + torrent_repository.get_swarm_metadata(info_hash).await; let start = Instant::now(); @@ -47,9 +47,9 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; + torrent_repository_clone.upsert_peer(info_hash, &DEFAULT_PEER).await; + + torrent_repository_clone.get_swarm_metadata(info_hash).await; if let Some(sleep_time) = sleep { let start_time = std::time::Instant::now(); @@ -87,9 +87,9 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + + torrent_repository_clone.get_swarm_metadata(&info_hash).await; if let Some(sleep_time) = sleep { let start_time = std::time::Instant::now(); @@ -123,9 +123,8 @@ where // Add the torrents/peers to the torrent repository for info_hash in &info_hashes { - torrent_repository - .update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER) - .await; + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER).await; + torrent_repository.get_swarm_metadata(info_hash).await; } let start = Instant::now(); @@ -134,9 +133,8 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone - .update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER) - .await; + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + torrent_repository_clone.get_swarm_metadata(&info_hash).await; if let Some(sleep_time) = sleep { let start_time = std::time::Instant::now(); diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs index 0523f4141..63fccfc77 100644 --- a/packages/torrent-repository/benches/helpers/sync.rs +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -20,7 +20,9 @@ where let info_hash = InfoHash([0; 20]); - torrent_repository.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER); + + torrent_repository.get_swarm_metadata(&info_hash); } start.elapsed() @@ -37,7 +39,9 @@ where let handles = FuturesUnordered::new(); // Add the torrent/peer to the torrent repository - torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER); + + torrent_repository.get_swarm_metadata(info_hash); let start = Instant::now(); @@ -45,7 +49,9 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + torrent_repository_clone.upsert_peer(info_hash, &DEFAULT_PEER); + + torrent_repository_clone.get_swarm_metadata(info_hash); if let Some(sleep_time) = sleep { let start_time = std::time::Instant::now(); @@ -83,7 +89,9 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + + torrent_repository_clone.get_swarm_metadata(&info_hash); if let Some(sleep_time) = sleep { let start_time = std::time::Instant::now(); @@ -117,7 +125,8 @@ where // Add the torrents/peers to the torrent repository for info_hash in &info_hashes { - torrent_repository.update_torrent_with_peer_and_get_stats(info_hash, &DEFAULT_PEER); + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER); + torrent_repository.get_swarm_metadata(info_hash); } let start = Instant::now(); @@ -126,7 +135,8 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.update_torrent_with_peer_and_get_stats(&info_hash, &DEFAULT_PEER); + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + torrent_repository_clone.get_swarm_metadata(&info_hash); if let Some(sleep_time) = sleep { let start_time = std::time::Instant::now(); diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 4c39af829..d72ff254b 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -15,7 +15,7 @@ pub trait Entry { /// It returns the swarm metadata (statistics) as a struct: /// /// `(seeders, completed, leechers)` - fn get_stats(&self) -> SwarmMetadata; + fn get_swarm_metadata(&self) -> SwarmMetadata; /// Returns True if Still a Valid Entry according to the Tracker Policy fn is_good(&self, policy: &TrackerPolicy) -> bool; @@ -40,10 +40,7 @@ pub trait Entry { /// /// The number of peers that have complete downloading is synchronously updated when peers are updated. /// That's the total torrent downloads counter. - fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; - - // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. - fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn upsert_peer(&mut self, peer: &peer::Peer) -> bool; /// It removes peer from the swarm that have not been updated for more than `current_cutoff` seconds fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch); @@ -51,20 +48,19 @@ pub trait Entry { #[allow(clippy::module_name_repetitions)] pub trait EntrySync { - fn get_stats(&self) -> SwarmMetadata; + fn get_swarm_metadata(&self) -> SwarmMetadata; fn is_good(&self, policy: &TrackerPolicy) -> bool; fn peers_is_empty(&self) -> bool; fn get_peers_len(&self) -> usize; fn get_peers(&self, limit: Option) -> Vec>; fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; - fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; - fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn upsert_peer(&self, peer: &peer::Peer) -> bool; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); } #[allow(clippy::module_name_repetitions)] pub trait EntryAsync { - fn get_stats(&self) -> impl std::future::Future + Send; + fn get_swarm_metadata(&self) -> impl std::future::Future + Send; fn check_good(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; fn peers_is_empty(&self) -> impl std::future::Future + Send; fn get_peers_len(&self) -> impl std::future::Future + Send; @@ -74,11 +70,7 @@ pub trait EntryAsync { client: &SocketAddr, limit: Option, ) -> impl std::future::Future>> + Send; - fn insert_or_update_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; - fn insert_or_update_peer_and_get_stats( - self, - peer: &peer::Peer, - ) -> impl std::future::Future + std::marker::Send; + fn upsert_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; } diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs index b4b823909..990d8ab76 100644 --- a/packages/torrent-repository/src/entry/mutex_std.rs +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -9,8 +9,8 @@ use super::{Entry, EntrySync}; use crate::{EntryMutexStd, EntrySingle}; impl EntrySync for EntryMutexStd { - fn get_stats(&self) -> SwarmMetadata { - self.lock().expect("it should get a lock").get_stats() + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_swarm_metadata() } fn is_good(&self, policy: &TrackerPolicy) -> bool { @@ -33,14 +33,8 @@ impl EntrySync for EntryMutexStd { self.lock().expect("it should get lock").get_peers_for_client(client, limit) } - fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { - self.lock().expect("it should lock the entry").insert_or_update_peer(peer) - } - - fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.lock() - .expect("it should lock the entry") - .insert_or_update_peer_and_get_stats(peer) + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").upsert_peer(peer) } fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs index 34f4a4e92..c5363e51a 100644 --- a/packages/torrent-repository/src/entry/mutex_tokio.rs +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -9,8 +9,8 @@ use super::{Entry, EntryAsync}; use crate::{EntryMutexTokio, EntrySingle}; impl EntryAsync for EntryMutexTokio { - async fn get_stats(&self) -> SwarmMetadata { - self.lock().await.get_stats() + async fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().await.get_swarm_metadata() } async fn check_good(self, policy: &TrackerPolicy) -> bool { @@ -33,12 +33,8 @@ impl EntryAsync for EntryMutexTokio { self.lock().await.get_peers_for_client(client, limit) } - async fn insert_or_update_peer(self, peer: &peer::Peer) -> bool { - self.lock().await.insert_or_update_peer(peer) - } - - async fn insert_or_update_peer_and_get_stats(self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - self.lock().await.insert_or_update_peer_and_get_stats(peer) + async fn upsert_peer(self, peer: &peer::Peer) -> bool { + self.lock().await.upsert_peer(peer) } async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) { diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index c1041e9a2..a38b54023 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -12,7 +12,7 @@ use crate::EntrySingle; impl Entry for EntrySingle { #[allow(clippy::cast_possible_truncation)] - fn get_stats(&self) -> SwarmMetadata { + fn get_swarm_metadata(&self) -> SwarmMetadata { let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; let incomplete: u32 = self.peers.len() as u32 - complete; @@ -70,7 +70,7 @@ impl Entry for EntrySingle { } } - fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { let mut downloaded_stats_updated: bool = false; match peer::ReadInfo::get_event(peer) { @@ -93,12 +93,6 @@ impl Entry for EntrySingle { downloaded_stats_updated } - fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - let changed = self.insert_or_update_peer(peer); - let stats = self.get_stats(); - (changed, stats) - } - fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { self.peers .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index 67c47973e..b398b09dc 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -23,19 +23,21 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { if let Some(entry) = self.torrents.get(info_hash) { - entry.insert_or_update_peer_and_get_stats(peer) + entry.upsert_peer(peer); } else { let _unused = self.torrents.insert(*info_hash, Arc::default()); - - match self.torrents.get(info_hash) { - Some(entry) => entry.insert_or_update_peer_and_get_stats(peer), - None => (false, SwarmMetadata::zeroed()), + if let Some(entry) = self.torrents.get(info_hash) { + entry.upsert_peer(peer); } } } + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.torrents.get(key); maybe_entry.map(|entry| entry.clone()) @@ -45,7 +47,7 @@ where let mut metrics = TorrentsMetrics::default(); for entry in &self.torrents { - let stats = entry.value().lock().expect("it should get a lock").get_stats(); + let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); metrics.complete += u64::from(stats.complete); metrics.downloaded += u64::from(stats.downloaded); metrics.incomplete += u64::from(stats.incomplete); diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index c7c64c54a..f198288f8 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -24,7 +24,8 @@ pub trait Repository: Debug + Default + Sized + 'static { fn remove(&self, key: &InfoHash) -> Option; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); fn remove_peerless_torrents(&self, policy: &TrackerPolicy); - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer); + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; } #[allow(clippy::module_name_repetitions)] @@ -36,9 +37,6 @@ pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> impl std::future::Future + Send; + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; } diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index e9074a271..af48428e4 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -47,12 +47,16 @@ impl Repository for TorrentsRwLockStd where EntrySingle: Entry, { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { let mut db = self.get_torrents_mut(); let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); - entry.insert_or_update_peer_and_get_stats(peer) + entry.upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).map(|entry| entry.get_swarm_metadata()) } fn get(&self, key: &InfoHash) -> Option { @@ -64,7 +68,7 @@ where let mut metrics = TorrentsMetrics::default(); for entry in self.get_torrents().values() { - let stats = entry.get_stats(); + let stats = entry.get_swarm_metadata(); metrics.complete += u64::from(stats.complete); metrics.downloaded += u64::from(stats.downloaded); metrics.incomplete += u64::from(stats.incomplete); diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 0b65234e3..74cdc4475 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -33,7 +33,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { let maybe_entry = self.get_torrents().get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { @@ -44,7 +44,13 @@ where entry.clone() }; - entry.insert_or_update_peer_and_get_stats(peer) + entry.upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get_torrents() + .get(info_hash) + .map(super::super::entry::EntrySync::get_swarm_metadata) } fn get(&self, key: &InfoHash) -> Option { @@ -56,7 +62,7 @@ where let mut metrics = TorrentsMetrics::default(); for entry in self.get_torrents().values() { - let stats = entry.lock().expect("it should get a lock").get_stats(); + let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); metrics.complete += u64::from(stats.complete); metrics.downloaded += u64::from(stats.downloaded); metrics.incomplete += u64::from(stats.incomplete); diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index 5394abb6a..83ac02c91 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -37,7 +37,7 @@ where EntryMutexTokio: EntryAsync, EntrySingle: Entry, { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { let maybe_entry = self.get_torrents().get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { @@ -48,8 +48,18 @@ where entry.clone() }; - entry.insert_or_update_peer_and_get_stats(peer).await + entry.upsert_peer(peer).await; } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + match maybe_entry { + Some(entry) => Some(entry.get_swarm_metadata().await), + None => None, + } + } + async fn get(&self, key: &InfoHash) -> Option { let db = self.get_torrents(); db.get(key).cloned() @@ -75,7 +85,7 @@ where let entries: Vec<_> = self.get_torrents().values().cloned().collect(); for entry in entries { - let stats = entry.lock().await.get_stats(); + let stats = entry.lock().await.get_swarm_metadata(); metrics.complete += u64::from(stats.complete); metrics.downloaded += u64::from(stats.downloaded); metrics.incomplete += u64::from(stats.incomplete); diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index d84074eaf..b95f1e31e 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -51,13 +51,18 @@ impl RepositoryAsync for TorrentsRwLockTokio where EntrySingle: Entry, { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { let mut db = self.get_torrents_mut().await; let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); - entry.insert_or_update_peer_and_get_stats(peer) + entry.upsert_peer(peer); } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) + } + async fn get(&self, key: &InfoHash) -> Option { let db = self.get_torrents().await; db.get(key).cloned() @@ -81,7 +86,7 @@ where let mut metrics = TorrentsMetrics::default(); for entry in self.get_torrents().await.values() { - let stats = entry.get_stats(); + let stats = entry.get_swarm_metadata(); metrics.complete += u64::from(stats.complete); metrics.downloaded += u64::from(stats.downloaded); metrics.incomplete += u64::from(stats.incomplete); diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index fbbc51a09..bde959940 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -35,7 +35,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { @@ -46,8 +46,13 @@ where entry.clone() }; - entry.insert_or_update_peer_and_get_stats(peer) + entry.upsert_peer(peer); } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) + } + async fn get(&self, key: &InfoHash) -> Option { let db = self.get_torrents().await; db.get(key).cloned() @@ -71,7 +76,7 @@ where let mut metrics = TorrentsMetrics::default(); for entry in self.get_torrents().await.values() { - let stats = entry.get_stats(); + let stats = entry.get_swarm_metadata(); metrics.complete += u64::from(stats.complete); metrics.downloaded += u64::from(stats.downloaded); metrics.incomplete += u64::from(stats.incomplete); diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index bc7fd61e8..1d002e317 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -35,7 +35,7 @@ where EntryMutexTokio: EntryAsync, EntrySingle: Entry, { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { @@ -46,8 +46,16 @@ where entry.clone() }; - entry.insert_or_update_peer_and_get_stats(peer).await + entry.upsert_peer(peer).await; } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + match self.get(info_hash).await { + Some(entry) => Some(entry.get_swarm_metadata().await), + None => None, + } + } + async fn get(&self, key: &InfoHash) -> Option { let db = self.get_torrents().await; db.get(key).cloned() @@ -71,7 +79,7 @@ where let mut metrics = TorrentsMetrics::default(); for entry in self.get_torrents().await.values() { - let stats = entry.get_stats().await; + let stats = entry.get_swarm_metadata().await; metrics.complete += u64::from(stats.complete); metrics.downloaded += u64::from(stats.downloaded); metrics.incomplete += u64::from(stats.incomplete); diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 0c0127b15..ef3e7e478 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -23,9 +23,13 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); - entry.value().insert_or_update_peer_and_get_stats(peer) + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) } fn get(&self, key: &InfoHash) -> Option { @@ -37,7 +41,7 @@ where let mut metrics = TorrentsMetrics::default(); for entry in &self.torrents { - let stats = entry.value().lock().expect("it should get a lock").get_stats(); + let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); metrics.complete += u64::from(stats.complete); metrics.downloaded += u64::from(stats.downloaded); metrics.incomplete += u64::from(stats.incomplete); diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 5a6eddf97..7c245fe04 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -23,6 +23,32 @@ pub(crate) enum Repo { } impl Repo { + pub(crate) async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + match self { + Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::RwLockStdMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::RwLockStdMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + } + } + + pub(crate) async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::RwLockStdMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::RwLockStdMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokioMutexStd(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokioMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::SkipMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::DashMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + } + } + pub(crate) async fn get(&self, key: &InfoHash) -> Option { match self { Repo::RwLockStd(repo) => repo.get(key), @@ -145,23 +171,6 @@ impl Repo { } } - pub(crate) async fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> (bool, SwarmMetadata) { - match self { - Repo::RwLockStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), - Repo::RwLockStdMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), - Repo::RwLockStdMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, - Repo::RwLockTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, - Repo::RwLockTokioMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, - Repo::RwLockTokioMutexTokio(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer).await, - Repo::SkipMapMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), - Repo::DashMapMutexStd(repo) => repo.update_torrent_with_peer_and_get_stats(info_hash, peer), - } - } - pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { match self { Repo::RwLockStd(repo) => { diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 33264c443..c0699479e 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -17,9 +17,9 @@ pub(crate) enum Torrent { impl Torrent { pub(crate) async fn get_stats(&self) -> SwarmMetadata { match self { - Torrent::Single(entry) => entry.get_stats(), - Torrent::MutexStd(entry) => entry.get_stats(), - Torrent::MutexTokio(entry) => entry.clone().get_stats().await, + Torrent::Single(entry) => entry.get_swarm_metadata(), + Torrent::MutexStd(entry) => entry.get_swarm_metadata(), + Torrent::MutexTokio(entry) => entry.clone().get_swarm_metadata().await, } } @@ -63,19 +63,11 @@ impl Torrent { } } - pub(crate) async fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + pub(crate) async fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { match self { - Torrent::Single(entry) => entry.insert_or_update_peer(peer), - Torrent::MutexStd(entry) => entry.insert_or_update_peer(peer), - Torrent::MutexTokio(entry) => entry.clone().insert_or_update_peer(peer).await, - } - } - - pub(crate) async fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { - match self { - Torrent::Single(entry) => entry.insert_or_update_peer_and_get_stats(peer), - Torrent::MutexStd(entry) => entry.insert_or_update_peer_and_get_stats(peer), - Torrent::MutexTokio(entry) => entry.clone().insert_or_update_peer_and_get_stats(peer).await, + Torrent::Single(entry) => entry.upsert_peer(peer), + Torrent::MutexStd(entry) => entry.upsert_peer(peer), + Torrent::MutexTokio(entry) => entry.clone().upsert_peer(peer).await, } } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index c39bef636..3c564c6f8 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -62,34 +62,34 @@ async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes(0); - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - torrent.insert_or_update_peer(&peer_1).await; + torrent.upsert_peer(&peer_1).await; let peer_2 = a_completed_peer(2); - torrent.insert_or_update_peer(&peer_2).await; + torrent.upsert_peer(&peer_2).await; let mut peer_3 = a_started_peer(3); - torrent.insert_or_update_peer(&peer_3).await; + torrent.upsert_peer(&peer_3).await; peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes(0); - torrent.insert_or_update_peer(&peer_3).await; + torrent.upsert_peer(&peer_3).await; vec![peer_1, peer_2, peer_3] } } @@ -182,7 +182,7 @@ async fn it_should_update_a_peer( // Make and insert a new peer. let mut peer = a_started_peer(-1); - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; // Get the Inserted Peer by Id. let peers = torrent.get_peers(None).await; @@ -195,7 +195,7 @@ async fn it_should_update_a_peer( // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; // Get the Updated Peer by Id. let peers = torrent.get_peers(None).await; @@ -224,7 +224,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( let mut peer = a_started_peer(-1); - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; // The started peer should be inserted. let peers = torrent.get_peers(None).await; @@ -237,7 +237,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; // It should be removed now. let peers = torrent.get_peers(None).await; @@ -270,13 +270,12 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - let (updated, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; if is_already_completed { - assert!(!updated); assert_eq!(stats.downloaded, downloaded); } else { - assert!(updated); assert_eq!(stats.downloaded, downloaded + 1); } } @@ -301,7 +300,8 @@ async fn it_should_update_a_peer_as_a_seeder( // Set Bytes Left to Zero peer.left = NumberOfBytes(0); - let (_, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; // Add the peer + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; if is_already_non_left { // it was already complete @@ -332,7 +332,8 @@ async fn it_should_update_a_peer_as_incomplete( // Set Bytes Left to no Zero peer.left = NumberOfBytes(1); - let (_, stats) = torrent.insert_or_update_peer_and_get_stats(&peer).await; // Add the peer + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; if completed_already { // now it is incomplete @@ -368,7 +369,7 @@ async fn it_should_get_peers_excluding_the_client_socket( // set the address to the socket. peer.peer_addr = socket; - torrent.insert_or_update_peer(&peer).await; // Add peer + torrent.upsert_peer(&peer).await; // Add peer // It should not include the peer that has the same socket. assert!(!torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); @@ -391,7 +392,7 @@ async fn it_should_limit_the_number_of_peers_returned( for peer_number in 1..=74 + 1 { let mut peer = a_started_peer(1); peer.peer_id = peer::Id::from(peer_number); - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; } let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)).await; @@ -422,7 +423,7 @@ async fn it_should_remove_inactive_peers_beyond_cutoff( peer.updated = now.sub(EXPIRE); - torrent.insert_or_update_peer(&peer).await; + torrent.upsert_peer(&peer).await; assert_eq!(torrent.get_peers_len().await, peers.len() + 1); diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index a6784bf57..fde34467e 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -6,6 +6,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::Entry as _; use torrust_tracker_torrent_repository::repository::dash_map_mutex_std::XacrimonDashMap; @@ -72,14 +73,14 @@ fn default() -> Entries { #[fixture] fn started() -> Entries { let mut torrent = EntrySingle::default(); - torrent.insert_or_update_peer(&a_started_peer(1)); + torrent.upsert_peer(&a_started_peer(1)); vec![(InfoHash::default(), torrent)] } #[fixture] fn completed() -> Entries { let mut torrent = EntrySingle::default(); - torrent.insert_or_update_peer(&a_completed_peer(2)); + torrent.upsert_peer(&a_completed_peer(2)); vec![(InfoHash::default(), torrent)] } @@ -87,10 +88,10 @@ fn completed() -> Entries { fn downloaded() -> Entries { let mut torrent = EntrySingle::default(); let mut peer = a_started_peer(3); - torrent.insert_or_update_peer(&peer); + torrent.upsert_peer(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes(0); - torrent.insert_or_update_peer(&peer); + torrent.upsert_peer(&peer); vec![(InfoHash::default(), torrent)] } @@ -98,21 +99,21 @@ fn downloaded() -> Entries { fn three() -> Entries { let mut started = EntrySingle::default(); let started_h = &mut DefaultHasher::default(); - started.insert_or_update_peer(&a_started_peer(1)); + started.upsert_peer(&a_started_peer(1)); started.hash(started_h); let mut completed = EntrySingle::default(); let completed_h = &mut DefaultHasher::default(); - completed.insert_or_update_peer(&a_completed_peer(2)); + completed.upsert_peer(&a_completed_peer(2)); completed.hash(completed_h); let mut downloaded = EntrySingle::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); - downloaded.insert_or_update_peer(&downloaded_peer); + downloaded.upsert_peer(&downloaded_peer); downloaded_peer.event = AnnounceEvent::Completed; downloaded_peer.left = NumberOfBytes(0); - downloaded.insert_or_update_peer(&downloaded_peer); + downloaded.upsert_peer(&downloaded_peer); downloaded.hash(downloaded_h); vec![ @@ -128,7 +129,7 @@ fn many_out_of_order() -> Entries { for i in 0..408 { let mut entry = EntrySingle::default(); - entry.insert_or_update_peer(&a_started_peer(i)); + entry.upsert_peer(&a_started_peer(i)); entries.insert((InfoHash::from(&i), entry)); } @@ -143,7 +144,7 @@ fn many_hashed_in_order() -> Entries { for i in 0..408 { let mut entry = EntrySingle::default(); - entry.insert_or_update_peer(&a_started_peer(i)); + entry.upsert_peer(&a_started_peer(i)); let hash: &mut DefaultHasher = &mut DefaultHasher::default(); hash.write_i32(i); @@ -390,7 +391,7 @@ async fn it_should_get_metrics( let mut metrics = TorrentsMetrics::default(); for (_, torrent) in entries { - let stats = torrent.get_stats(); + let stats = torrent.get_swarm_metadata(); metrics.torrents += 1; metrics.incomplete += u64::from(stats.incomplete); @@ -537,10 +538,25 @@ async fn it_should_remove_inactive_peers( // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - repo.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + repo.upsert_peer(&info_hash, &peer).await; assert_eq!(repo.get_metrics().await.torrents, entries.len() as u64 + 1); } + // Insert the infohash and peer into the repository + // and verify the swarm metadata was updated. + { + repo.upsert_peer(&info_hash, &peer).await; + let stats = repo.get_swarm_metadata(&info_hash).await; + assert_eq!( + stats, + Some(SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }) + ); + } + // Verify that this new peer was inserted into the repository. { let entry = repo.get(&info_hash).await.expect("it_should_get_some"); diff --git a/src/core/mod.rs b/src/core/mod.rs index 6628426c1..83813a863 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -626,10 +626,9 @@ impl Tracker { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.external_ip)); debug!("After: {peer:?}"); - // we should update the torrent and get the stats before we get the peer list. - let stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let stats = self.upsert_peer_and_get_stats(info_hash, peer).await; - let peers = self.get_torrent_peers_for_peer(info_hash, peer); + let peers = self.get_peers_for(info_hash, peer); AnnounceData { peers, @@ -660,7 +659,7 @@ impl Tracker { /// It returns the data for a `scrape` response. fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { match self.torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry.get_stats(), + Some(torrent_entry) => torrent_entry.get_swarm_metadata(), None => SwarmMetadata::default(), } } @@ -681,7 +680,7 @@ impl Tracker { Ok(()) } - fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { + fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { match self.torrents.get(info_hash) { None => vec![], Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(TORRENT_PEERS_LIMIT)), @@ -703,20 +702,36 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { - // code-review: consider splitting the function in two (command and query segregation). - // `update_torrent_with_peer` and `get_stats` + pub async fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { + let swarm_metadata_before = match self.torrents.get_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), + }; - let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer); + self.torrents.upsert_peer(info_hash, peer); - if self.policy.persistent_torrent_completed_stat && stats_updated { - let completed = stats.downloaded; + let swarm_metadata_after = match self.torrents.get_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), + }; + + if swarm_metadata_before != swarm_metadata_after { + self.persist_stats(info_hash, &swarm_metadata_after).await; + } + + swarm_metadata_after + } + + /// It stores the torrents stats into the database (if persistency is enabled). + /// + /// # Context: Tracker + async fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { + if self.policy.persistent_torrent_completed_stat { + let completed = swarm_metadata.downloaded; let info_hash = *info_hash; drop(self.database.save_persistent_torrent(&info_hash, completed).await); } - - stats } /// It calculates and returns the general `Tracker` @@ -1130,7 +1145,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + tracker.upsert_peer_and_get_stats(&info_hash, &peer).await; let peers = tracker.get_torrent_peers(&info_hash); @@ -1144,9 +1159,9 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + tracker.upsert_peer_and_get_stats(&info_hash, &peer).await; - let peers = tracker.get_torrent_peers_for_peer(&info_hash, &peer); + let peers = tracker.get_peers_for(&info_hash, &peer); assert_eq!(peers, vec![]); } @@ -1155,9 +1170,7 @@ mod tests { async fn it_should_return_the_torrent_metrics() { let tracker = public_tracker(); - tracker - .update_torrent_with_peer_and_get_stats(&sample_info_hash(), &leecher()) - .await; + tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()).await; let torrent_metrics = tracker.get_torrents_metrics(); @@ -1178,9 +1191,7 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - tracker - .update_torrent_with_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()) - .await; + tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()).await; } let result_a = start_time.elapsed(); @@ -1704,11 +1715,11 @@ mod tests { let mut peer = sample_peer(); peer.event = AnnounceEvent::Started; - let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer).await; assert_eq!(swarm_stats.downloaded, 0); peer.event = AnnounceEvent::Completed; - let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer).await; assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory @@ -1719,7 +1730,7 @@ mod tests { let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.get_stats().downloaded, 1); + assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1); // It does not persist the peers assert!(torrent_entry.peers_is_empty()); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index ce44af3a8..9cba5de25 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -50,7 +50,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let torrent_entry = torrent_entry_option?; - let stats = torrent_entry.get_stats(); + let stats = torrent_entry.get_swarm_metadata(); let peers = torrent_entry.get_peers(None); @@ -70,7 +70,7 @@ pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagina let mut basic_infos: Vec = vec![]; for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination) { - let stats = torrent_entry.get_stats(); + let stats = torrent_entry.get_swarm_metadata(); basic_infos.push(BasicInfo { info_hash, @@ -88,7 +88,7 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = tracker.torrents.get(info_hash).map(|t| t.get_stats()) { + if let Some(stats) = tracker.torrents.get(info_hash).map(|t| t.get_swarm_metadata()) { basic_infos.push(BasicInfo { info_hash: *info_hash, seeders: u64::from(stats.complete), @@ -156,9 +156,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()).await; let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); @@ -208,9 +206,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()).await; let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -234,12 +230,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) - .await; - tracker - .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()).await; + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()).await; let offset = 0; let limit = 1; @@ -258,12 +250,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) - .await; - tracker - .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()).await; + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()).await; let offset = 1; let limit = 4000; @@ -288,15 +276,11 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()).await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()).await; let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 2d5038ec3..122e666a8 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -718,9 +718,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6) - .await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6).await; } async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { @@ -944,9 +942,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4) - .await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4).await; } async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { @@ -1119,9 +1115,7 @@ mod tests { .with_number_of_bytes_left(0) .into(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer) - .await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer).await; } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 8d91f3ae8..dec4ccff2 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -23,7 +23,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + self.tracker.upsert_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 5638713aa..f00da293e 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -20,7 +20,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + self.tracker.upsert_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 12f4aeb9e..6ced1dbb7 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -20,7 +20,7 @@ impl Environment { /// Add a torrent to the tracker #[allow(dead_code)] pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + self.tracker.upsert_peer_and_get_stats(info_hash, peer).await; } } From 5fa01e76a2d17337e450e2c54c41a473ee215353 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 15 Apr 2024 08:50:47 +0100 Subject: [PATCH 0792/1003] chore(deps): update dependencies ```output Updating crates.io index Updating allocator-api2 v0.2.16 -> v0.2.18 Updating anyhow v1.0.81 -> v1.0.82 Updating async-channel v2.2.0 -> v2.2.1 Updating async-executor v1.10.0 -> v1.11.0 Updating async-trait v0.1.79 -> v0.1.80 Updating axum-client-ip v0.5.1 -> v0.6.0 Updating cc v1.0.91 -> v1.0.94 Updating either v1.10.0 -> v1.11.0 Updating encoding_rs v0.8.33 -> v0.8.34 Updating jobserver v0.1.28 -> v0.1.30 Updating proc-macro2 v1.0.79 -> v1.0.80 Updating quote v1.0.35 -> v1.0.36 Updating rstest v0.18.2 -> v0.19.0 Updating rstest_macros v0.18.2 -> v0.19.0 Updating serde_repr v0.1.18 -> v0.1.19 Updating syn v2.0.58 -> v2.0.59 Updating time v0.3.34 -> v0.3.36 Updating time-macros v0.2.17 -> v0.2.18 Updating windows-targets v0.52.4 -> v0.52.5 Updating windows_aarch64_gnullvm v0.52.4 -> v0.52.5 Updating windows_aarch64_msvc v0.52.4 -> v0.52.5 Updating windows_i686_gnu v0.52.4 -> v0.52.5 Adding windows_i686_gnullvm v0.52.5 Updating windows_i686_msvc v0.52.4 -> v0.52.5 Updating windows_x86_64_gnu v0.52.4 -> v0.52.5 Updating windows_x86_64_gnullvm v0.52.4 -> v0.52.5 Updating windows_x86_64_msvc v0.52.4 -> v0.52.5 Updating winnow v0.6.5 -> v0.6.6 ``` --- Cargo.lock | 200 +++++++++++++++++++++++++++-------------------------- 1 file changed, 103 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6b9be523c..f13ed1482 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -66,9 +66,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "aquatic_udp_protocol" @@ -190,9 +190,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" +checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" dependencies = [ "concurrent-queue", "event-listener 5.3.0", @@ -219,11 +219,10 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f98c37cf288e302c16ef6c8472aad1e034c6c84ce5ea7b8101c98eb4a802fee" +checksum = "b10202063978b3351199d68f8b22c4e47e4b1b822f8d43fd862d5ea8c006b29a" dependencies = [ - "async-lock 3.3.0", "async-task", "concurrent-queue", "fastrand 2.0.2", @@ -237,7 +236,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.2.0", + "async-channel 2.2.1", "async-executor", "async-io 2.3.2", "async-lock 3.3.0", @@ -341,13 +340,13 @@ checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -399,9 +398,9 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e7c467bdcd2bd982ce5c8742a1a178aba7b03db399fd18f5d5d438f5aa91cb4" +checksum = "72188bed20deb981f3a4a9fe674e5980fd9e9c2bd880baa94715ad5d60d64c67" dependencies = [ "axum", "forwarded-header-value", @@ -461,7 +460,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -548,7 +547,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -593,7 +592,7 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" dependencies = [ - "async-channel 2.2.0", + "async-channel 2.2.1", "async-lock 3.3.0", "async-task", "fastrand 2.0.2", @@ -623,7 +622,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", "syn_derive", ] @@ -702,9 +701,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.91" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd97381a8cc6493395a5afc4c691c1084b3768db713b73aa215217aa245d153" +checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" dependencies = [ "jobserver", "libc", @@ -741,7 +740,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -813,7 +812,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -1086,7 +1085,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -1097,7 +1096,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -1144,7 +1143,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -1174,15 +1173,15 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -1379,7 +1378,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -1391,7 +1390,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -1403,7 +1402,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -1496,7 +1495,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -1912,9 +1911,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" dependencies = [ "libc", ] @@ -2046,7 +2045,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -2197,7 +2196,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -2248,7 +2247,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", "termcolor", "thiserror", ] @@ -2448,7 +2447,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -2561,7 +2560,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -2630,7 +2629,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -2804,9 +2803,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "a56dea16b0a29e94408b9aa5e2940a4eedbd128a1ba20e8f7ae60fd3d465af0e" dependencies = [ "unicode-ident", ] @@ -2833,9 +2832,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -3090,9 +3089,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +checksum = "9d5316d2a1479eeef1ea21e7f9ddc67c191d497abc8fc3ba2467857abbb68330" dependencies = [ "futures", "futures-timer", @@ -3102,9 +3101,9 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +checksum = "04a9df72cc1f67020b0d63ad9bfe4a323e459ea7eb68e03bd9824db49f9a4c25" dependencies = [ "cfg-if", "glob", @@ -3113,7 +3112,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.58", + "syn 2.0.59", "unicode-ident", ] @@ -3375,7 +3374,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -3414,13 +3413,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -3471,7 +3470,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -3605,9 +3604,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.58" +version = "2.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "4a6531ffc7b071655e4ce2e04bd464c4830bb585a61cabb96cf808f05172615a" dependencies = [ "proc-macro2", "quote", @@ -3623,7 +3622,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -3726,14 +3725,14 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -3752,9 +3751,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -3820,7 +3819,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -3910,7 +3909,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.5", + "winnow 0.6.6", ] [[package]] @@ -4126,7 +4125,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] @@ -4297,7 +4296,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", "wasm-bindgen-shared", ] @@ -4331,7 +4330,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4389,7 +4388,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -4407,7 +4406,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -4427,17 +4426,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -4448,9 +4448,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -4460,9 +4460,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -4472,9 +4472,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -4484,9 +4490,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -4496,9 +4502,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -4508,9 +4514,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -4520,9 +4526,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -4535,9 +4541,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" +checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" dependencies = [ "memchr", ] @@ -4587,7 +4593,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.59", ] [[package]] From 4a567cd631cc6bdd220904b1af6fdf524192769b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 15 Apr 2024 17:39:50 +0100 Subject: [PATCH 0793/1003] refactor: extract PeerList Extract a type for a collection of peers. The performance adter the exatract is similar: ```output Requests out: 415067.21/second Responses in: 369397.08/second - Connect responses: 183049.81 - Announce responses: 182717.15 - Scrape responses: 3630.12 - Error responses: 0.00 Peers per announce response: 0.00 Announce responses per info hash: - p10: 1 - p25: 1 - p50: 1 - p75: 1 - p90: 2 - p95: 3 - p99: 104 - p99.9: 297 - p100: 375 ``` --- packages/torrent-repository/src/entry/mod.rs | 68 ++++++++++++++++++- .../torrent-repository/src/entry/single.rs | 32 ++------- .../src/repository/dash_map_mutex_std.rs | 5 +- .../src/repository/rw_lock_std.rs | 6 +- .../src/repository/rw_lock_std_mutex_std.rs | 5 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 5 +- .../src/repository/rw_lock_tokio.rs | 6 +- .../src/repository/rw_lock_tokio_mutex_std.rs | 5 +- .../repository/rw_lock_tokio_mutex_tokio.rs | 5 +- .../src/repository/skip_map_mutex_std.rs | 5 +- 10 files changed, 88 insertions(+), 54 deletions(-) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index d72ff254b..ee80305ee 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -82,8 +82,72 @@ pub trait EntryAsync { #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Torrent { /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - // #[serde(skip)] - pub(crate) peers: std::collections::BTreeMap>, + pub(crate) peers: PeerList, /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) downloaded: u32, } + +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PeerList { + peers: std::collections::BTreeMap>, +} + +impl PeerList { + fn len(&self) -> usize { + self.peers.len() + } + + fn is_empty(&self) -> bool { + self.peers.is_empty() + } + + fn insert(&mut self, key: peer::Id, value: Arc) -> Option> { + self.peers.insert(key, value) + } + + fn remove(&mut self, key: &peer::Id) -> Option> { + self.peers.remove(key) + } + + fn retain(&mut self, f: F) + where + F: FnMut(&peer::Id, &mut Arc) -> bool, + { + self.peers.retain(f); + } + + fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); + let leechers = self.len() - seeders; + + (seeders, leechers) + } + + fn get_peers(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) + .cloned() + .collect(), + } + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index a38b54023..36d04c3cf 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -13,13 +13,12 @@ use crate::EntrySingle; impl Entry for EntrySingle { #[allow(clippy::cast_possible_truncation)] fn get_swarm_metadata(&self) -> SwarmMetadata { - let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; - let incomplete: u32 = self.peers.len() as u32 - complete; + let (seeders, leechers) = self.peers.seeders_and_leechers(); SwarmMetadata { downloaded: self.downloaded, - complete, - incomplete, + complete: seeders as u32, + incomplete: leechers as u32, } } @@ -42,32 +41,13 @@ impl Entry for EntrySingle { fn get_peers_len(&self) -> usize { self.peers.len() } + fn get_peers(&self, limit: Option) -> Vec> { - match limit { - Some(limit) => self.peers.values().take(limit).cloned().collect(), - None => self.peers.values().cloned().collect(), - } + self.peers.get_peers(limit) } fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - match limit { - Some(limit) => self - .peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) - // Limit the number of peers on the result - .take(limit) - .cloned() - .collect(), - None => self - .peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) - .cloned() - .collect(), - } + self.peers.get_peers_for_client(client, limit) } fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index b398b09dc..2aba7e54f 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -1,4 +1,3 @@ -use std::collections::BTreeMap; use std::sync::Arc; use dashmap::DashMap; @@ -10,7 +9,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::Repository; -use crate::entry::{Entry, EntrySync}; +use crate::entry::{Entry, EntrySync, PeerList}; use crate::{EntryMutexStd, EntrySingle}; #[derive(Default, Debug)] @@ -82,7 +81,7 @@ where let entry = EntryMutexStd::new( EntrySingle { - peers: BTreeMap::default(), + peers: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index af48428e4..7d8055fca 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -1,5 +1,3 @@ -use std::collections::BTreeMap; - use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; @@ -8,7 +6,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::Repository; -use crate::entry::Entry; +use crate::entry::{Entry, PeerList}; use crate::{EntrySingle, TorrentsRwLockStd}; #[derive(Default, Debug)] @@ -102,7 +100,7 @@ where } let entry = EntrySingle { - peers: BTreeMap::default(), + peers: PeerList::default(), downloaded: *downloaded, }; diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 74cdc4475..629f3484e 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -1,4 +1,3 @@ -use std::collections::BTreeMap; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -9,7 +8,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::Repository; -use crate::entry::{Entry, EntrySync}; +use crate::entry::{Entry, EntrySync, PeerList}; use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; impl TorrentsRwLockStdMutexStd { @@ -97,7 +96,7 @@ where let entry = EntryMutexStd::new( EntrySingle { - peers: BTreeMap::default(), + peers: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index 83ac02c91..3cc0f53a1 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -1,4 +1,3 @@ -use std::collections::BTreeMap; use std::iter::zip; use std::pin::Pin; use std::sync::Arc; @@ -13,7 +12,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::RepositoryAsync; -use crate::entry::{Entry, EntryAsync}; +use crate::entry::{Entry, EntryAsync, PeerList}; use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; impl TorrentsRwLockStdMutexTokio { @@ -106,7 +105,7 @@ where let entry = EntryMutexTokio::new( EntrySingle { - peers: BTreeMap::default(), + peers: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index b95f1e31e..0a481fdde 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -1,5 +1,3 @@ -use std::collections::BTreeMap; - use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; @@ -8,7 +6,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::RepositoryAsync; -use crate::entry::Entry; +use crate::entry::{Entry, PeerList}; use crate::{EntrySingle, TorrentsRwLockTokio}; #[derive(Default, Debug)] @@ -106,7 +104,7 @@ where } let entry = EntrySingle { - peers: BTreeMap::default(), + peers: PeerList::default(), downloaded: *completed, }; diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index bde959940..d3b17c2d2 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -1,4 +1,3 @@ -use std::collections::BTreeMap; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -9,7 +8,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::RepositoryAsync; -use crate::entry::{Entry, EntrySync}; +use crate::entry::{Entry, EntrySync, PeerList}; use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; impl TorrentsRwLockTokioMutexStd { @@ -97,7 +96,7 @@ where let entry = EntryMutexStd::new( EntrySingle { - peers: BTreeMap::default(), + peers: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index 1d002e317..875a890ea 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -1,4 +1,3 @@ -use std::collections::BTreeMap; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; @@ -9,7 +8,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::RepositoryAsync; -use crate::entry::{Entry, EntryAsync}; +use crate::entry::{Entry, EntryAsync, PeerList}; use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; impl TorrentsRwLockTokioMutexTokio { @@ -100,7 +99,7 @@ where let entry = EntryMutexTokio::new( EntrySingle { - peers: BTreeMap::default(), + peers: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index ef3e7e478..b3c71a4de 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -1,4 +1,3 @@ -use std::collections::BTreeMap; use std::sync::Arc; use crossbeam_skiplist::SkipMap; @@ -10,7 +9,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::Repository; -use crate::entry::{Entry, EntrySync}; +use crate::entry::{Entry, EntrySync, PeerList}; use crate::{EntryMutexStd, EntrySingle}; #[derive(Default, Debug)] @@ -76,7 +75,7 @@ where let entry = EntryMutexStd::new( EntrySingle { - peers: BTreeMap::default(), + peers: PeerList::default(), downloaded: *completed, } .into(), From 922afda1e689faa62a54cb29ccc6bd7619a4f018 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 15 Apr 2024 17:48:08 +0100 Subject: [PATCH 0794/1003] refactor: rename field from peers to swarm --- packages/torrent-repository/src/entry/mod.rs | 4 ++-- .../torrent-repository/src/entry/single.rs | 20 +++++++++---------- .../src/repository/dash_map_mutex_std.rs | 2 +- .../src/repository/rw_lock_std.rs | 2 +- .../src/repository/rw_lock_std_mutex_std.rs | 2 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 2 +- .../src/repository/rw_lock_tokio.rs | 2 +- .../src/repository/rw_lock_tokio_mutex_std.rs | 2 +- .../repository/rw_lock_tokio_mutex_tokio.rs | 2 +- .../src/repository/skip_map_mutex_std.rs | 2 +- 10 files changed, 20 insertions(+), 20 deletions(-) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index ee80305ee..648ded98a 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -81,8 +81,8 @@ pub trait EntryAsync { /// The tracker keeps one entry like this for every torrent. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Torrent { - /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - pub(crate) peers: PeerList, + /// A network of peers that are all trying to download the torrent associated to this entry + pub(crate) swarm: PeerList, /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) downloaded: u32, } diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 36d04c3cf..169ee2fbb 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -13,7 +13,7 @@ use crate::EntrySingle; impl Entry for EntrySingle { #[allow(clippy::cast_possible_truncation)] fn get_swarm_metadata(&self) -> SwarmMetadata { - let (seeders, leechers) = self.peers.seeders_and_leechers(); + let (seeders, leechers) = self.swarm.seeders_and_leechers(); SwarmMetadata { downloaded: self.downloaded, @@ -27,7 +27,7 @@ impl Entry for EntrySingle { return true; } - if policy.remove_peerless_torrents && self.peers.is_empty() { + if policy.remove_peerless_torrents && self.swarm.is_empty() { return false; } @@ -35,19 +35,19 @@ impl Entry for EntrySingle { } fn peers_is_empty(&self) -> bool { - self.peers.is_empty() + self.swarm.is_empty() } fn get_peers_len(&self) -> usize { - self.peers.len() + self.swarm.len() } fn get_peers(&self, limit: Option) -> Vec> { - self.peers.get_peers(limit) + self.swarm.get_peers(limit) } fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.peers.get_peers_for_client(client, limit) + self.swarm.get_peers_for_client(client, limit) } fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { @@ -55,10 +55,10 @@ impl Entry for EntrySingle { match peer::ReadInfo::get_event(peer) { AnnounceEvent::Stopped => { - drop(self.peers.remove(&peer::ReadInfo::get_id(peer))); + drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); } AnnounceEvent::Completed => { - let previous = self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); + let previous = self.swarm.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); // Don't count if peer was not previously known and not already completed. if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { self.downloaded += 1; @@ -66,7 +66,7 @@ impl Entry for EntrySingle { } } _ => { - drop(self.peers.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer))); + drop(self.swarm.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer))); } } @@ -74,7 +74,7 @@ impl Entry for EntrySingle { } fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.peers + self.swarm .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); } } diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index 2aba7e54f..7bc60dbc6 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -81,7 +81,7 @@ where let entry = EntryMutexStd::new( EntrySingle { - peers: PeerList::default(), + swarm: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index 7d8055fca..800b1b31f 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -100,7 +100,7 @@ where } let entry = EntrySingle { - peers: PeerList::default(), + swarm: PeerList::default(), downloaded: *downloaded, }; diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 629f3484e..9fefc9115 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -96,7 +96,7 @@ where let entry = EntryMutexStd::new( EntrySingle { - peers: PeerList::default(), + swarm: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index 3cc0f53a1..31ccf2a50 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -105,7 +105,7 @@ where let entry = EntryMutexTokio::new( EntrySingle { - peers: PeerList::default(), + swarm: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index 0a481fdde..0987b064a 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -104,7 +104,7 @@ where } let entry = EntrySingle { - peers: PeerList::default(), + swarm: PeerList::default(), downloaded: *completed, }; diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index d3b17c2d2..77a82e445 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -96,7 +96,7 @@ where let entry = EntryMutexStd::new( EntrySingle { - peers: PeerList::default(), + swarm: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index 875a890ea..fc7608010 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -99,7 +99,7 @@ where let entry = EntryMutexTokio::new( EntrySingle { - peers: PeerList::default(), + swarm: PeerList::default(), downloaded: *completed, } .into(), diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index b3c71a4de..7f84dae2a 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -75,7 +75,7 @@ where let entry = EntryMutexStd::new( EntrySingle { - peers: PeerList::default(), + swarm: PeerList::default(), downloaded: *completed, } .into(), From 42f1b309a68d746f221ed377133cb9f2fa5e6208 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 15 Apr 2024 17:58:20 +0100 Subject: [PATCH 0795/1003] refactor: extract mod peer_list --- packages/torrent-repository/src/entry/mod.rs | 69 +---------------- .../torrent-repository/src/entry/peer_list.rs | 74 +++++++++++++++++++ .../src/repository/dash_map_mutex_std.rs | 3 +- .../src/repository/rw_lock_std.rs | 3 +- .../src/repository/rw_lock_std_mutex_std.rs | 3 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 3 +- .../src/repository/rw_lock_tokio.rs | 3 +- .../src/repository/rw_lock_tokio_mutex_std.rs | 3 +- .../repository/rw_lock_tokio_mutex_tokio.rs | 3 +- .../src/repository/skip_map_mutex_std.rs | 3 +- 10 files changed, 93 insertions(+), 74 deletions(-) create mode 100644 packages/torrent-repository/src/entry/peer_list.rs diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 648ded98a..40fa4efd5 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -2,13 +2,15 @@ use std::fmt::Debug; use std::net::SocketAddr; use std::sync::Arc; -//use serde::{Deserialize, Serialize}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use self::peer_list::PeerList; + pub mod mutex_std; pub mod mutex_tokio; +pub mod peer_list; pub mod single; pub trait Entry { @@ -86,68 +88,3 @@ pub struct Torrent { /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) downloaded: u32, } - -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct PeerList { - peers: std::collections::BTreeMap>, -} - -impl PeerList { - fn len(&self) -> usize { - self.peers.len() - } - - fn is_empty(&self) -> bool { - self.peers.is_empty() - } - - fn insert(&mut self, key: peer::Id, value: Arc) -> Option> { - self.peers.insert(key, value) - } - - fn remove(&mut self, key: &peer::Id) -> Option> { - self.peers.remove(key) - } - - fn retain(&mut self, f: F) - where - F: FnMut(&peer::Id, &mut Arc) -> bool, - { - self.peers.retain(f); - } - - fn seeders_and_leechers(&self) -> (usize, usize) { - let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); - let leechers = self.len() - seeders; - - (seeders, leechers) - } - - fn get_peers(&self, limit: Option) -> Vec> { - match limit { - Some(limit) => self.peers.values().take(limit).cloned().collect(), - None => self.peers.values().cloned().collect(), - } - } - - fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - match limit { - Some(limit) => self - .peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) - // Limit the number of peers on the result - .take(limit) - .cloned() - .collect(), - None => self - .peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) - .cloned() - .collect(), - } - } -} diff --git a/packages/torrent-repository/src/entry/peer_list.rs b/packages/torrent-repository/src/entry/peer_list.rs new file mode 100644 index 000000000..4af9b1d77 --- /dev/null +++ b/packages/torrent-repository/src/entry/peer_list.rs @@ -0,0 +1,74 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_primitives::peer; + +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PeerList { + peers: std::collections::BTreeMap>, +} + +impl PeerList { + #[must_use] + pub fn len(&self) -> usize { + self.peers.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.peers.is_empty() + } + + pub fn insert(&mut self, key: peer::Id, value: Arc) -> Option> { + self.peers.insert(key, value) + } + + pub fn remove(&mut self, key: &peer::Id) -> Option> { + self.peers.remove(key) + } + + pub fn retain(&mut self, f: F) + where + F: FnMut(&peer::Id, &mut Arc) -> bool, + { + self.peers.retain(f); + } + + #[must_use] + pub fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); + let leechers = self.len() - seeders; + + (seeders, leechers) + } + + #[must_use] + pub fn get_peers(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + #[must_use] + pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) + .cloned() + .collect(), + } + } +} diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index 7bc60dbc6..a38205205 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -9,7 +9,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::Repository; -use crate::entry::{Entry, EntrySync, PeerList}; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; use crate::{EntryMutexStd, EntrySingle}; #[derive(Default, Debug)] diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index 800b1b31f..0d96a2375 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -6,7 +6,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::Repository; -use crate::entry::{Entry, PeerList}; +use crate::entry::peer_list::PeerList; +use crate::entry::Entry; use crate::{EntrySingle, TorrentsRwLockStd}; #[derive(Default, Debug)] diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 9fefc9115..76d5e8f1e 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -8,7 +8,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::Repository; -use crate::entry::{Entry, EntrySync, PeerList}; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; impl TorrentsRwLockStdMutexStd { diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index 31ccf2a50..e527d6b59 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -12,7 +12,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::RepositoryAsync; -use crate::entry::{Entry, EntryAsync, PeerList}; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntryAsync}; use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; impl TorrentsRwLockStdMutexTokio { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index 0987b064a..c360106b8 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -6,7 +6,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::RepositoryAsync; -use crate::entry::{Entry, PeerList}; +use crate::entry::peer_list::PeerList; +use crate::entry::Entry; use crate::{EntrySingle, TorrentsRwLockTokio}; #[derive(Default, Debug)] diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index 77a82e445..9fce79b44 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -8,7 +8,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::RepositoryAsync; -use crate::entry::{Entry, EntrySync, PeerList}; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; impl TorrentsRwLockTokioMutexStd { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index fc7608010..c7e0d4054 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -8,7 +8,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::RepositoryAsync; -use crate::entry::{Entry, EntryAsync, PeerList}; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntryAsync}; use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; impl TorrentsRwLockTokioMutexTokio { diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 7f84dae2a..bc9ecd066 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -9,7 +9,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; use super::Repository; -use crate::entry::{Entry, EntrySync, PeerList}; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; use crate::{EntryMutexStd, EntrySingle}; #[derive(Default, Debug)] From 40182b49e0c221ed85fb78f4243cc180cc96ed1e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 16 Apr 2024 12:00:28 +0100 Subject: [PATCH 0796/1003] test: add tests for PeerList type --- packages/primitives/src/peer.rs | 39 +++ .../torrent-repository/src/entry/peer_list.rs | 249 ++++++++++++++++-- .../torrent-repository/src/entry/single.rs | 11 +- 3 files changed, 276 insertions(+), 23 deletions(-) diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index f5b009f2a..ab7559508 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -362,6 +362,38 @@ pub mod fixture { } impl PeerBuilder { + #[allow(dead_code)] + #[must_use] + pub fn seeder() -> Self { + let peer = Peer { + peer_id: Id(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Completed, + }; + + Self { peer } + } + + #[allow(dead_code)] + #[must_use] + pub fn leecher() -> Self { + let peer = Peer { + peer_id: Id(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(10), + event: AnnounceEvent::Started, + }; + + Self { peer } + } + #[allow(dead_code)] #[must_use] pub fn with_peer_id(mut self, peer_id: &Id) -> Self { @@ -390,6 +422,13 @@ pub mod fixture { self } + #[allow(dead_code)] + #[must_use] + pub fn last_updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + #[allow(dead_code)] #[must_use] pub fn build(self) -> Peer { diff --git a/packages/torrent-repository/src/entry/peer_list.rs b/packages/torrent-repository/src/entry/peer_list.rs index 4af9b1d77..3f69edbb5 100644 --- a/packages/torrent-repository/src/entry/peer_list.rs +++ b/packages/torrent-repository/src/entry/peer_list.rs @@ -1,7 +1,13 @@ +//! A peer list. use std::net::SocketAddr; use std::sync::Arc; -use torrust_tracker_primitives::peer; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +// code-review: the current implementation uses the peer Id as the ``BTreeMap`` +// key. That would allow adding two identical peers except for the Id. +// For example, two peers with the same socket address but a different peer Id +// would be allowed. That would lead to duplicated peers in the tracker responses. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PeerList { @@ -19,31 +25,26 @@ impl PeerList { self.peers.is_empty() } - pub fn insert(&mut self, key: peer::Id, value: Arc) -> Option> { - self.peers.insert(key, value) + pub fn upsert(&mut self, value: Arc) -> Option> { + self.peers.insert(value.peer_id, value) } pub fn remove(&mut self, key: &peer::Id) -> Option> { self.peers.remove(key) } - pub fn retain(&mut self, f: F) - where - F: FnMut(&peer::Id, &mut Arc) -> bool, - { - self.peers.retain(f); + pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.peers + .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); } #[must_use] - pub fn seeders_and_leechers(&self) -> (usize, usize) { - let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); - let leechers = self.len() - seeders; - - (seeders, leechers) + pub fn get(&self, peer_id: &peer::Id) -> Option<&Arc> { + self.peers.get(peer_id) } #[must_use] - pub fn get_peers(&self, limit: Option) -> Vec> { + pub fn get_all(&self, limit: Option) -> Vec> { match limit { Some(limit) => self.peers.values().take(limit).cloned().collect(), None => self.peers.values().cloned().collect(), @@ -51,13 +52,21 @@ impl PeerList { } #[must_use] - pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + pub fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); + let leechers = self.len() - seeders; + + (seeders, leechers) + } + + #[must_use] + pub fn get_peers_excluding_addr(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { match limit { Some(limit) => self .peers .values() // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) // Limit the number of peers on the result .take(limit) .cloned() @@ -66,9 +75,215 @@ impl PeerList { .peers .values() // Take peers which are not the client peer - .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *client) + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) .cloned() .collect(), } } } + +#[cfg(test)] +mod tests { + + mod it_should { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::peer::{self}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::entry::peer_list::PeerList; + + #[test] + fn be_empty_when_no_peers_have_been_inserted() { + let peer_list = PeerList::default(); + + assert!(peer_list.is_empty()); + } + + #[test] + fn have_zero_length_when_no_peers_have_been_inserted() { + let peer_list = PeerList::default(); + + assert_eq!(peer_list.len(), 0); + } + + #[test] + fn allow_inserting_a_new_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + assert_eq!(peer_list.upsert(peer.into()), None); + } + + #[test] + fn allow_updating_a_preexisting_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.upsert(peer.into()), Some(Arc::new(peer))); + } + + #[test] + fn allow_getting_all_peers() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.get_all(None), [Arc::new(peer)]); + } + + #[test] + fn allow_getting_one_peer_by_id() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); + } + + #[test] + fn increase_the_number_of_peers_after_inserting_a_new_one() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.len(), 1); + } + + #[test] + fn decrease_the_number_of_peers_after_removing_one() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + peer_list.remove(&peer.peer_id); + + assert!(peer_list.is_empty()); + } + + #[test] + fn allow_removing_an_existing_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + peer_list.remove(&peer.peer_id); + + assert_eq!(peer_list.get(&peer.peer_id), None); + } + + #[test] + fn allow_getting_all_peers_excluding_peers_with_a_given_address() { + let mut peer_list = PeerList::default(); + + let peer1 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + peer_list.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + peer_list.upsert(peer2.into()); + + assert_eq!(peer_list.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + } + + #[test] + fn return_the_number_of_seeders_in_the_list() { + let mut peer_list = PeerList::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + peer_list.upsert(seeder.into()); + peer_list.upsert(leecher.into()); + + let (seeders, _leechers) = peer_list.seeders_and_leechers(); + + assert_eq!(seeders, 1); + } + + #[test] + fn return_the_number_of_leechers_in_the_list() { + let mut peer_list = PeerList::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + peer_list.upsert(seeder.into()); + peer_list.upsert(leecher.into()); + + let (_seeders, leechers) = peer_list.seeders_and_leechers(); + + assert_eq!(leechers, 1); + } + + #[test] + fn remove_inactive_peers() { + let mut peer_list = PeerList::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + peer_list.upsert(peer.into()); + + // Remove peers not updated since one second after inserting the peer + peer_list.remove_inactive_peers(last_update_time + one_second); + + assert_eq!(peer_list.len(), 0); + } + + #[test] + fn not_remove_active_peers() { + let mut peer_list = PeerList::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + peer_list.upsert(peer.into()); + + // Remove peers not updated since one second before inserting the peer. + peer_list.remove_inactive_peers(last_update_time - one_second); + + assert_eq!(peer_list.len(), 1); + } + + #[test] + fn allow_inserting_two_identical_peers_except_for_the_id() { + let mut peer_list = PeerList::default(); + + let peer1 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + peer_list.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .build(); + peer_list.upsert(peer2.into()); + + assert_eq!(peer_list.len(), 2); + } + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 169ee2fbb..a01124454 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -43,11 +43,11 @@ impl Entry for EntrySingle { } fn get_peers(&self, limit: Option) -> Vec> { - self.swarm.get_peers(limit) + self.swarm.get_all(limit) } fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.swarm.get_peers_for_client(client, limit) + self.swarm.get_peers_excluding_addr(client, limit) } fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { @@ -58,7 +58,7 @@ impl Entry for EntrySingle { drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); } AnnounceEvent::Completed => { - let previous = self.swarm.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer)); + let previous = self.swarm.upsert(Arc::new(*peer)); // Don't count if peer was not previously known and not already completed. if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { self.downloaded += 1; @@ -66,7 +66,7 @@ impl Entry for EntrySingle { } } _ => { - drop(self.swarm.insert(peer::ReadInfo::get_id(peer), Arc::new(*peer))); + drop(self.swarm.upsert(Arc::new(*peer))); } } @@ -74,7 +74,6 @@ impl Entry for EntrySingle { } fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.swarm - .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + self.swarm.remove_inactive_peers(current_cutoff); } } From 5750e2c22d52dfa976150e363d3f400b93efd31e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 16 Apr 2024 14:16:13 +0100 Subject: [PATCH 0797/1003] chore(deps): add dependency parking_lot It provides implementations of Mutex and RwLock that are smaller, faster and more flexible than those in the Rust standard library. It will be used to check if a new torrent repo implementation using these lock is faster. --- Cargo.lock | 2 ++ Cargo.toml | 1 + packages/torrent-repository/Cargo.toml | 1 + 3 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index f13ed1482..dc6db21c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3939,6 +3939,7 @@ dependencies = [ "log", "mockall", "multimap", + "parking_lot", "percent-encoding", "r2d2", "r2d2_mysql", @@ -4037,6 +4038,7 @@ dependencies = [ "crossbeam-skiplist", "dashmap", "futures", + "parking_lot", "rstest", "tokio", "torrust-tracker-clock", diff --git a/Cargo.toml b/Cargo.toml index ef0c39d4b..57c18453b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ hyper = "1" lazy_static = "1" log = { version = "0", features = ["release_max_level_info"] } multimap = "0" +parking_lot = "0.12.1" percent-encoding = "2" r2d2 = "0" r2d2_mysql = "24" diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 6bc8bfcdd..937ec11e2 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,6 +19,7 @@ version.workspace = true crossbeam-skiplist = "0.1" dashmap = "5.5.3" futures = "0.3.29" +parking_lot = "0.12.1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } From 9258ac0cf2b42530950e7cd0cd40792b45c8f7b9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 16 Apr 2024 14:21:10 +0100 Subject: [PATCH 0798/1003] feat: new torrent repo implementation using parking_lot RwLock --- .../benches/repository_benchmark.rs | 22 ++++- packages/torrent-repository/src/entry/mod.rs | 1 + .../src/entry/mutex_parking_lot.rs | 49 ++++++++++ packages/torrent-repository/src/lib.rs | 7 ++ .../src/repository/skip_map_mutex_std.rs | 93 ++++++++++++++++++- .../torrent-repository/tests/common/repo.rs | 18 ++++ .../tests/common/torrent.rs | 11 ++- .../torrent-repository/tests/entry/mod.rs | 34 +++---- .../tests/repository/mod.rs | 31 +++++-- 9 files changed, 238 insertions(+), 28 deletions(-) create mode 100644 packages/torrent-repository/src/entry/mutex_parking_lot.rs diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs index 58cd70d9a..75e7fd5b8 100644 --- a/packages/torrent-repository/benches/repository_benchmark.rs +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -5,7 +5,7 @@ mod helpers; use criterion::{criterion_group, criterion_main, Criterion}; use torrust_tracker_torrent_repository::{ TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, - TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, }; use crate::helpers::{asyn, sync}; @@ -49,6 +49,10 @@ fn add_one_torrent(c: &mut Criterion) { b.iter_custom(sync::add_one_torrent::); }); + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + group.bench_function("DashMapMutexStd", |b| { b.iter_custom(sync::add_one_torrent::); }); @@ -102,6 +106,11 @@ fn add_multiple_torrents_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); }); + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + group.bench_function("DashMapMutexStd", |b| { b.to_async(&rt) .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); @@ -156,6 +165,11 @@ fn update_one_torrent_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); }); + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + group.bench_function("DashMapMutexStd", |b| { b.to_async(&rt) .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); @@ -211,6 +225,12 @@ fn update_multiple_torrents_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); }); + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt).iter_custom(|iters| { + sync::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + group.bench_function("DashMapMutexStd", |b| { b.to_async(&rt) .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 40fa4efd5..dbe1416be 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -8,6 +8,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use self::peer_list::PeerList; +pub mod mutex_parking_lot; pub mod mutex_std; pub mod mutex_tokio; pub mod peer_list; diff --git a/packages/torrent-repository/src/entry/mutex_parking_lot.rs b/packages/torrent-repository/src/entry/mutex_parking_lot.rs new file mode 100644 index 000000000..ef0e958d5 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_parking_lot.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryRwLockParkingLot, EntrySingle}; + +impl EntrySync for EntryRwLockParkingLot { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.read().get_swarm_metadata() + } + + fn is_good(&self, policy: &TrackerPolicy) -> bool { + self.read().is_good(policy) + } + + fn peers_is_empty(&self) -> bool { + self.read().peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.read().get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.read().get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.read().get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.write().upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.write().remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryRwLockParkingLot { + fn from(entry: EntrySingle) -> Self { + Arc::new(parking_lot::RwLock::new(entry)) + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 7a6d209b9..5d3a7ed45 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -9,9 +9,14 @@ use torrust_tracker_clock::clock; pub mod entry; pub mod repository; +// Repo Entries + pub type EntrySingle = entry::Torrent; pub type EntryMutexStd = Arc>; pub type EntryMutexTokio = Arc>; +pub type EntryRwLockParkingLot = Arc>; + +// Repos pub type TorrentsRwLockStd = RwLockStd; pub type TorrentsRwLockStdMutexStd = RwLockStd; @@ -21,6 +26,8 @@ pub type TorrentsRwLockTokioMutexStd = RwLockTokio; pub type TorrentsRwLockTokioMutexTokio = RwLockTokio; pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; +pub type TorrentsSkipMapRwLockParkingLot = CrossbeamSkipList; + pub type TorrentsDashMapMutexStd = XacrimonDashMap; /// This code needs to be copied into each crate. diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index bc9ecd066..0a2a566e7 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent use super::Repository; use crate::entry::peer_list::PeerList; use crate::entry::{Entry, EntrySync}; -use crate::{EntryMutexStd, EntrySingle}; +use crate::{EntryMutexStd, EntryRwLockParkingLot, EntrySingle}; #[derive(Default, Debug)] pub struct CrossbeamSkipList { @@ -108,3 +108,94 @@ where } } } + +impl Repository for CrossbeamSkipList +where + EntryRwLockParkingLot: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().read().get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryRwLockParkingLot)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryRwLockParkingLot::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().is_good(policy) { + continue; + } + + entry.remove(); + } + } +} diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 7c245fe04..c5da6258d 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -8,6 +8,7 @@ use torrust_tracker_torrent_repository::repository::{Repository as _, Repository use torrust_tracker_torrent_repository::{ EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, + TorrentsSkipMapRwLockParkingLot, }; #[derive(Debug)] @@ -19,6 +20,7 @@ pub(crate) enum Repo { RwLockTokioMutexStd(TorrentsRwLockTokioMutexStd), RwLockTokioMutexTokio(TorrentsRwLockTokioMutexTokio), SkipMapMutexStd(TorrentsSkipMapMutexStd), + SkipMapRwLockParkingLot(TorrentsSkipMapRwLockParkingLot), DashMapMutexStd(TorrentsDashMapMutexStd), } @@ -32,6 +34,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer).await, Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::SkipMapRwLockParkingLot(repo) => repo.upsert_peer(info_hash, peer), Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), } } @@ -45,6 +48,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.get_swarm_metadata(info_hash).await, Repo::RwLockTokioMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, Repo::SkipMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::SkipMapRwLockParkingLot(repo) => repo.get_swarm_metadata(info_hash), Repo::DashMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), } } @@ -58,6 +62,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), Repo::RwLockTokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::SkipMapRwLockParkingLot(repo) => Some(repo.get(key)?.read().clone()), Repo::DashMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), } } @@ -71,6 +76,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.get_metrics().await, Repo::RwLockTokioMutexTokio(repo) => repo.get_metrics().await, Repo::SkipMapMutexStd(repo) => repo.get_metrics(), + Repo::SkipMapRwLockParkingLot(repo) => repo.get_metrics(), Repo::DashMapMutexStd(repo) => repo.get_metrics(), } } @@ -111,6 +117,11 @@ impl Repo { .iter() .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) .collect(), + Repo::SkipMapRwLockParkingLot(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.read().clone())) + .collect(), Repo::DashMapMutexStd(repo) => repo .get_paginated(pagination) .iter() @@ -128,6 +139,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, Repo::RwLockTokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::SkipMapRwLockParkingLot(repo) => repo.import_persistent(persistent_torrents), Repo::DashMapMutexStd(repo) => repo.import_persistent(persistent_torrents), } } @@ -141,6 +153,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), Repo::RwLockTokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::SkipMapRwLockParkingLot(repo) => Some(repo.remove(key)?.write().clone()), Repo::DashMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), } } @@ -154,6 +167,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, Repo::RwLockTokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::SkipMapRwLockParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), Repo::DashMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), } } @@ -167,6 +181,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, Repo::RwLockTokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::SkipMapRwLockParkingLot(repo) => repo.remove_peerless_torrents(policy), Repo::DashMapMutexStd(repo) => repo.remove_peerless_torrents(policy), } } @@ -194,6 +209,9 @@ impl Repo { Repo::SkipMapMutexStd(repo) => { repo.torrents.insert(*info_hash, torrent.into()); } + Repo::SkipMapRwLockParkingLot(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } Repo::DashMapMutexStd(repo) => { repo.torrents.insert(*info_hash, torrent.into()); } diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index c0699479e..f672d14ef 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -5,13 +5,14 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; -use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntrySingle}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle}; #[derive(Debug, Clone)] pub(crate) enum Torrent { Single(EntrySingle), MutexStd(EntryMutexStd), MutexTokio(EntryMutexTokio), + RwLockParkingLot(EntryRwLockParkingLot), } impl Torrent { @@ -20,6 +21,7 @@ impl Torrent { Torrent::Single(entry) => entry.get_swarm_metadata(), Torrent::MutexStd(entry) => entry.get_swarm_metadata(), Torrent::MutexTokio(entry) => entry.clone().get_swarm_metadata().await, + Torrent::RwLockParkingLot(entry) => entry.clone().get_swarm_metadata(), } } @@ -28,6 +30,7 @@ impl Torrent { Torrent::Single(entry) => entry.is_good(policy), Torrent::MutexStd(entry) => entry.is_good(policy), Torrent::MutexTokio(entry) => entry.clone().check_good(policy).await, + Torrent::RwLockParkingLot(entry) => entry.is_good(policy), } } @@ -36,6 +39,7 @@ impl Torrent { Torrent::Single(entry) => entry.peers_is_empty(), Torrent::MutexStd(entry) => entry.peers_is_empty(), Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, + Torrent::RwLockParkingLot(entry) => entry.peers_is_empty(), } } @@ -44,6 +48,7 @@ impl Torrent { Torrent::Single(entry) => entry.get_peers_len(), Torrent::MutexStd(entry) => entry.get_peers_len(), Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, + Torrent::RwLockParkingLot(entry) => entry.get_peers_len(), } } @@ -52,6 +57,7 @@ impl Torrent { Torrent::Single(entry) => entry.get_peers(limit), Torrent::MutexStd(entry) => entry.get_peers(limit), Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, + Torrent::RwLockParkingLot(entry) => entry.get_peers(limit), } } @@ -60,6 +66,7 @@ impl Torrent { Torrent::Single(entry) => entry.get_peers_for_client(client, limit), Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, + Torrent::RwLockParkingLot(entry) => entry.get_peers_for_client(client, limit), } } @@ -68,6 +75,7 @@ impl Torrent { Torrent::Single(entry) => entry.upsert_peer(peer), Torrent::MutexStd(entry) => entry.upsert_peer(peer), Torrent::MutexTokio(entry) => entry.clone().upsert_peer(peer).await, + Torrent::RwLockParkingLot(entry) => entry.upsert_peer(peer), } } @@ -76,6 +84,7 @@ impl Torrent { Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, + Torrent::RwLockParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), } } } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 3c564c6f8..aa3126000 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::{peer, NumberOfBytes}; -use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntrySingle}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -20,7 +20,7 @@ fn single() -> Torrent { Torrent::Single(EntrySingle::default()) } #[fixture] -fn standard_mutex() -> Torrent { +fn mutex_std() -> Torrent { Torrent::MutexStd(EntryMutexStd::default()) } @@ -29,6 +29,11 @@ fn mutex_tokio() -> Torrent { Torrent::MutexTokio(EntryMutexTokio::default()) } +#[fixture] +fn rw_lock_parking_lot() -> Torrent { + Torrent::RwLockParkingLot(EntryRwLockParkingLot::default()) +} + #[fixture] fn policy_none() -> TrackerPolicy { TrackerPolicy::new(false, 0, false) @@ -99,7 +104,7 @@ async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { #[case::empty(&Makes::Empty)] #[tokio::test] async fn it_should_be_empty_by_default( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { make(&mut torrent, makes).await; @@ -115,7 +120,7 @@ async fn it_should_be_empty_by_default( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_check_if_entry_is_good( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { @@ -153,7 +158,7 @@ async fn it_should_check_if_entry_is_good( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_for_torrent_entry( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { let peers = make(&mut torrent, makes).await; @@ -174,10 +179,7 @@ async fn it_should_get_peers_for_torrent_entry( #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_update_a_peer(#[values(single(), mutex_std(), mutex_tokio())] mut torrent: Torrent, #[case] makes: &Makes) { make(&mut torrent, makes).await; // Make and insert a new peer. @@ -215,7 +217,7 @@ async fn it_should_update_a_peer( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_remove_a_peer_upon_stopped_announcement( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { use torrust_tracker_primitives::peer::ReadInfo as _; @@ -256,7 +258,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { make(&mut torrent, makes).await; @@ -287,7 +289,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_a_seeder( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { let peers = make(&mut torrent, makes).await; @@ -319,7 +321,7 @@ async fn it_should_update_a_peer_as_a_seeder( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_incomplete( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { let peers = make(&mut torrent, makes).await; @@ -351,7 +353,7 @@ async fn it_should_update_a_peer_as_incomplete( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_excluding_the_client_socket( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { make(&mut torrent, makes).await; @@ -383,7 +385,7 @@ async fn it_should_get_peers_excluding_the_client_socket( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_limit_the_number_of_peers_returned( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { make(&mut torrent, makes).await; @@ -408,7 +410,7 @@ async fn it_should_limit_the_number_of_peers_returned( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_remove_inactive_peers_beyond_cutoff( - #[values(single(), standard_mutex(), mutex_tokio())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { const TIMEOUT: Duration = Duration::from_secs(120); diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index fde34467e..ac53e6510 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -49,10 +49,15 @@ fn tokio_tokio() -> Repo { } #[fixture] -fn skip_list_std() -> Repo { +fn skip_list_mutex_std() -> Repo { Repo::SkipMapMutexStd(CrossbeamSkipList::default()) } +#[fixture] +fn skip_list_rw_lock_parking_lot() -> Repo { + Repo::SkipMapRwLockParkingLot(CrossbeamSkipList::default()) +} + #[fixture] fn dash_map_std() -> Repo { Repo::DashMapMutexStd(XacrimonDashMap::default()) @@ -246,7 +251,8 @@ async fn it_should_get_a_torrent_entry( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std(), + skip_list_mutex_std(), + skip_list_rw_lock_parking_lot(), dash_map_std() )] repo: Repo, @@ -279,7 +285,8 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_mutex_std(), + skip_list_rw_lock_parking_lot() )] repo: Repo, #[case] entries: Entries, @@ -321,7 +328,8 @@ async fn it_should_get_paginated( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std() + skip_list_mutex_std(), + skip_list_rw_lock_parking_lot() )] repo: Repo, #[case] entries: Entries, @@ -378,7 +386,8 @@ async fn it_should_get_metrics( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std(), + skip_list_mutex_std(), + skip_list_rw_lock_parking_lot(), dash_map_std() )] repo: Repo, @@ -420,7 +429,8 @@ async fn it_should_import_persistent_torrents( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std(), + skip_list_mutex_std(), + skip_list_rw_lock_parking_lot(), dash_map_std() )] repo: Repo, @@ -459,7 +469,8 @@ async fn it_should_remove_an_entry( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std(), + skip_list_mutex_std(), + skip_list_rw_lock_parking_lot(), dash_map_std() )] repo: Repo, @@ -496,7 +507,8 @@ async fn it_should_remove_inactive_peers( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std(), + skip_list_mutex_std(), + skip_list_rw_lock_parking_lot(), dash_map_std() )] repo: Repo, @@ -594,7 +606,8 @@ async fn it_should_remove_peerless_torrents( tokio_std(), tokio_mutex(), tokio_tokio(), - skip_list_std(), + skip_list_mutex_std(), + skip_list_rw_lock_parking_lot(), dash_map_std() )] repo: Repo, From 0fa396cc34ae457ff2855bd339317b3b1dc15672 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 16 Apr 2024 16:18:44 +0100 Subject: [PATCH 0799/1003] chore(deps): add parking_lot to cargo machete It's used for benchmarking in the torrent-repository workspace package. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 57c18453b..94889cbf0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,7 +79,7 @@ url = "2" uuid = { version = "1", features = ["v4"] } [package.metadata.cargo-machete] -ignored = ["serde_bytes", "crossbeam-skiplist", "dashmap"] +ignored = ["serde_bytes", "crossbeam-skiplist", "dashmap", "parking_lot"] [dev-dependencies] local-ip-address = "0" From 0058e72550d827da08071b63961ce78596d758e1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 16 Apr 2024 16:28:38 +0100 Subject: [PATCH 0800/1003] feat: new torrent repo implementation using parking_lot Mutex --- .../benches/repository_benchmark.rs | 23 ++++- packages/torrent-repository/src/entry/mod.rs | 1 + .../src/entry/mutex_parking_lot.rs | 24 ++--- .../src/entry/rw_lock_parking_lot.rs | 49 ++++++++++ packages/torrent-repository/src/lib.rs | 2 + .../src/repository/skip_map_mutex_std.rs | 93 ++++++++++++++++++- .../torrent-repository/tests/common/repo.rs | 21 ++++- .../tests/common/torrent.rs | 13 ++- .../torrent-repository/tests/entry/mod.rs | 29 +++--- .../tests/repository/mod.rs | 13 +++ 10 files changed, 240 insertions(+), 28 deletions(-) create mode 100644 packages/torrent-repository/src/entry/rw_lock_parking_lot.rs diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs index 75e7fd5b8..4e50f1454 100644 --- a/packages/torrent-repository/benches/repository_benchmark.rs +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -5,7 +5,8 @@ mod helpers; use criterion::{criterion_group, criterion_main, Criterion}; use torrust_tracker_torrent_repository::{ TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, - TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, TorrentsSkipMapMutexStd, + TorrentsSkipMapRwLockParkingLot, }; use crate::helpers::{asyn, sync}; @@ -49,6 +50,10 @@ fn add_one_torrent(c: &mut Criterion) { b.iter_custom(sync::add_one_torrent::); }); + group.bench_function("SkipMapMutexParkingLot", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + group.bench_function("SkipMapRwLockParkingLot", |b| { b.iter_custom(sync::add_one_torrent::); }); @@ -106,6 +111,11 @@ fn add_multiple_torrents_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); }); + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + group.bench_function("SkipMapRwLockParkingLot", |b| { b.to_async(&rt) .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); @@ -165,6 +175,11 @@ fn update_one_torrent_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); }); + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + group.bench_function("SkipMapRwLockParkingLot", |b| { b.to_async(&rt) .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); @@ -225,6 +240,12 @@ fn update_multiple_torrents_in_parallel(c: &mut Criterion) { .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); }); + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt).iter_custom(|iters| { + sync::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + group.bench_function("SkipMapRwLockParkingLot", |b| { b.to_async(&rt).iter_custom(|iters| { sync::update_multiple_torrents_in_parallel::(&rt, iters, None) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index dbe1416be..b811d3262 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -12,6 +12,7 @@ pub mod mutex_parking_lot; pub mod mutex_std; pub mod mutex_tokio; pub mod peer_list; +pub mod rw_lock_parking_lot; pub mod single; pub trait Entry { diff --git a/packages/torrent-repository/src/entry/mutex_parking_lot.rs b/packages/torrent-repository/src/entry/mutex_parking_lot.rs index ef0e958d5..4f3921ea7 100644 --- a/packages/torrent-repository/src/entry/mutex_parking_lot.rs +++ b/packages/torrent-repository/src/entry/mutex_parking_lot.rs @@ -6,44 +6,44 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::{Entry, EntrySync}; -use crate::{EntryRwLockParkingLot, EntrySingle}; +use crate::{EntryMutexParkingLot, EntrySingle}; -impl EntrySync for EntryRwLockParkingLot { +impl EntrySync for EntryMutexParkingLot { fn get_swarm_metadata(&self) -> SwarmMetadata { - self.read().get_swarm_metadata() + self.lock().get_swarm_metadata() } fn is_good(&self, policy: &TrackerPolicy) -> bool { - self.read().is_good(policy) + self.lock().is_good(policy) } fn peers_is_empty(&self) -> bool { - self.read().peers_is_empty() + self.lock().peers_is_empty() } fn get_peers_len(&self) -> usize { - self.read().get_peers_len() + self.lock().get_peers_len() } fn get_peers(&self, limit: Option) -> Vec> { - self.read().get_peers(limit) + self.lock().get_peers(limit) } fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.read().get_peers_for_client(client, limit) + self.lock().get_peers_for_client(client, limit) } fn upsert_peer(&self, peer: &peer::Peer) -> bool { - self.write().upsert_peer(peer) + self.lock().upsert_peer(peer) } fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.write().remove_inactive_peers(current_cutoff); + self.lock().remove_inactive_peers(current_cutoff); } } -impl From for EntryRwLockParkingLot { +impl From for EntryMutexParkingLot { fn from(entry: EntrySingle) -> Self { - Arc::new(parking_lot::RwLock::new(entry)) + Arc::new(parking_lot::Mutex::new(entry)) } } diff --git a/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs new file mode 100644 index 000000000..ef0e958d5 --- /dev/null +++ b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryRwLockParkingLot, EntrySingle}; + +impl EntrySync for EntryRwLockParkingLot { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.read().get_swarm_metadata() + } + + fn is_good(&self, policy: &TrackerPolicy) -> bool { + self.read().is_good(policy) + } + + fn peers_is_empty(&self) -> bool { + self.read().peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.read().get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.read().get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.read().get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.write().upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.write().remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryRwLockParkingLot { + fn from(entry: EntrySingle) -> Self { + Arc::new(parking_lot::RwLock::new(entry)) + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 5d3a7ed45..a8955808e 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -14,6 +14,7 @@ pub mod repository; pub type EntrySingle = entry::Torrent; pub type EntryMutexStd = Arc>; pub type EntryMutexTokio = Arc>; +pub type EntryMutexParkingLot = Arc>; pub type EntryRwLockParkingLot = Arc>; // Repos @@ -26,6 +27,7 @@ pub type TorrentsRwLockTokioMutexStd = RwLockTokio; pub type TorrentsRwLockTokioMutexTokio = RwLockTokio; pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; +pub type TorrentsSkipMapMutexParkingLot = CrossbeamSkipList; pub type TorrentsSkipMapRwLockParkingLot = CrossbeamSkipList; pub type TorrentsDashMapMutexStd = XacrimonDashMap; diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 0a2a566e7..9960b0c30 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent use super::Repository; use crate::entry::peer_list::PeerList; use crate::entry::{Entry, EntrySync}; -use crate::{EntryMutexStd, EntryRwLockParkingLot, EntrySingle}; +use crate::{EntryMutexParkingLot, EntryMutexStd, EntryRwLockParkingLot, EntrySingle}; #[derive(Default, Debug)] pub struct CrossbeamSkipList { @@ -199,3 +199,94 @@ where } } } + +impl Repository for CrossbeamSkipList +where + EntryMutexParkingLot: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexParkingLot)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexParkingLot::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().is_good(policy) { + continue; + } + + entry.remove(); + } + } +} diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index c5da6258d..f317d0d17 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -7,8 +7,8 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository::{ EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, - TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexStd, - TorrentsSkipMapRwLockParkingLot, + TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, + TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, }; #[derive(Debug)] @@ -20,6 +20,7 @@ pub(crate) enum Repo { RwLockTokioMutexStd(TorrentsRwLockTokioMutexStd), RwLockTokioMutexTokio(TorrentsRwLockTokioMutexTokio), SkipMapMutexStd(TorrentsSkipMapMutexStd), + SkipMapMutexParkingLot(TorrentsSkipMapMutexParkingLot), SkipMapRwLockParkingLot(TorrentsSkipMapRwLockParkingLot), DashMapMutexStd(TorrentsDashMapMutexStd), } @@ -34,6 +35,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer).await, Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::SkipMapMutexParkingLot(repo) => repo.upsert_peer(info_hash, peer), Repo::SkipMapRwLockParkingLot(repo) => repo.upsert_peer(info_hash, peer), Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), } @@ -48,6 +50,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.get_swarm_metadata(info_hash).await, Repo::RwLockTokioMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, Repo::SkipMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::SkipMapMutexParkingLot(repo) => repo.get_swarm_metadata(info_hash), Repo::SkipMapRwLockParkingLot(repo) => repo.get_swarm_metadata(info_hash), Repo::DashMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), } @@ -62,6 +65,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), Repo::RwLockTokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::SkipMapMutexParkingLot(repo) => Some(repo.get(key)?.lock().clone()), Repo::SkipMapRwLockParkingLot(repo) => Some(repo.get(key)?.read().clone()), Repo::DashMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), } @@ -76,6 +80,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.get_metrics().await, Repo::RwLockTokioMutexTokio(repo) => repo.get_metrics().await, Repo::SkipMapMutexStd(repo) => repo.get_metrics(), + Repo::SkipMapMutexParkingLot(repo) => repo.get_metrics(), Repo::SkipMapRwLockParkingLot(repo) => repo.get_metrics(), Repo::DashMapMutexStd(repo) => repo.get_metrics(), } @@ -117,6 +122,11 @@ impl Repo { .iter() .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) .collect(), + Repo::SkipMapMutexParkingLot(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().clone())) + .collect(), Repo::SkipMapRwLockParkingLot(repo) => repo .get_paginated(pagination) .iter() @@ -139,6 +149,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, Repo::RwLockTokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::SkipMapMutexParkingLot(repo) => repo.import_persistent(persistent_torrents), Repo::SkipMapRwLockParkingLot(repo) => repo.import_persistent(persistent_torrents), Repo::DashMapMutexStd(repo) => repo.import_persistent(persistent_torrents), } @@ -153,6 +164,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), Repo::RwLockTokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::SkipMapMutexParkingLot(repo) => Some(repo.remove(key)?.lock().clone()), Repo::SkipMapRwLockParkingLot(repo) => Some(repo.remove(key)?.write().clone()), Repo::DashMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), } @@ -167,6 +179,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, Repo::RwLockTokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::SkipMapMutexParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), Repo::SkipMapRwLockParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), Repo::DashMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), } @@ -181,6 +194,7 @@ impl Repo { Repo::RwLockTokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, Repo::RwLockTokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::SkipMapMutexParkingLot(repo) => repo.remove_peerless_torrents(policy), Repo::SkipMapRwLockParkingLot(repo) => repo.remove_peerless_torrents(policy), Repo::DashMapMutexStd(repo) => repo.remove_peerless_torrents(policy), } @@ -209,6 +223,9 @@ impl Repo { Repo::SkipMapMutexStd(repo) => { repo.torrents.insert(*info_hash, torrent.into()); } + Repo::SkipMapMutexParkingLot(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } Repo::SkipMapRwLockParkingLot(repo) => { repo.torrents.insert(*info_hash, torrent.into()); } diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index f672d14ef..abcf5525e 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -5,13 +5,16 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; -use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle}; +use torrust_tracker_torrent_repository::{ + EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, +}; #[derive(Debug, Clone)] pub(crate) enum Torrent { Single(EntrySingle), MutexStd(EntryMutexStd), MutexTokio(EntryMutexTokio), + MutexParkingLot(EntryMutexParkingLot), RwLockParkingLot(EntryRwLockParkingLot), } @@ -21,6 +24,7 @@ impl Torrent { Torrent::Single(entry) => entry.get_swarm_metadata(), Torrent::MutexStd(entry) => entry.get_swarm_metadata(), Torrent::MutexTokio(entry) => entry.clone().get_swarm_metadata().await, + Torrent::MutexParkingLot(entry) => entry.clone().get_swarm_metadata(), Torrent::RwLockParkingLot(entry) => entry.clone().get_swarm_metadata(), } } @@ -30,6 +34,7 @@ impl Torrent { Torrent::Single(entry) => entry.is_good(policy), Torrent::MutexStd(entry) => entry.is_good(policy), Torrent::MutexTokio(entry) => entry.clone().check_good(policy).await, + Torrent::MutexParkingLot(entry) => entry.is_good(policy), Torrent::RwLockParkingLot(entry) => entry.is_good(policy), } } @@ -39,6 +44,7 @@ impl Torrent { Torrent::Single(entry) => entry.peers_is_empty(), Torrent::MutexStd(entry) => entry.peers_is_empty(), Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, + Torrent::MutexParkingLot(entry) => entry.peers_is_empty(), Torrent::RwLockParkingLot(entry) => entry.peers_is_empty(), } } @@ -48,6 +54,7 @@ impl Torrent { Torrent::Single(entry) => entry.get_peers_len(), Torrent::MutexStd(entry) => entry.get_peers_len(), Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, + Torrent::MutexParkingLot(entry) => entry.get_peers_len(), Torrent::RwLockParkingLot(entry) => entry.get_peers_len(), } } @@ -57,6 +64,7 @@ impl Torrent { Torrent::Single(entry) => entry.get_peers(limit), Torrent::MutexStd(entry) => entry.get_peers(limit), Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, + Torrent::MutexParkingLot(entry) => entry.get_peers(limit), Torrent::RwLockParkingLot(entry) => entry.get_peers(limit), } } @@ -66,6 +74,7 @@ impl Torrent { Torrent::Single(entry) => entry.get_peers_for_client(client, limit), Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, + Torrent::MutexParkingLot(entry) => entry.get_peers_for_client(client, limit), Torrent::RwLockParkingLot(entry) => entry.get_peers_for_client(client, limit), } } @@ -75,6 +84,7 @@ impl Torrent { Torrent::Single(entry) => entry.upsert_peer(peer), Torrent::MutexStd(entry) => entry.upsert_peer(peer), Torrent::MutexTokio(entry) => entry.clone().upsert_peer(peer).await, + Torrent::MutexParkingLot(entry) => entry.upsert_peer(peer), Torrent::RwLockParkingLot(entry) => entry.upsert_peer(peer), } } @@ -84,6 +94,7 @@ impl Torrent { Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, + Torrent::MutexParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), Torrent::RwLockParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), } } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index aa3126000..3b9f3e3ad 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,9 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::{peer, NumberOfBytes}; -use torrust_tracker_torrent_repository::{EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle}; +use torrust_tracker_torrent_repository::{ + EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, +}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -29,6 +31,11 @@ fn mutex_tokio() -> Torrent { Torrent::MutexTokio(EntryMutexTokio::default()) } +#[fixture] +fn mutex_parking_lot() -> Torrent { + Torrent::MutexParkingLot(EntryMutexParkingLot::default()) +} + #[fixture] fn rw_lock_parking_lot() -> Torrent { Torrent::RwLockParkingLot(EntryRwLockParkingLot::default()) @@ -104,7 +111,7 @@ async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { #[case::empty(&Makes::Empty)] #[tokio::test] async fn it_should_be_empty_by_default( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { make(&mut torrent, makes).await; @@ -120,7 +127,7 @@ async fn it_should_be_empty_by_default( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_check_if_entry_is_good( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { @@ -158,7 +165,7 @@ async fn it_should_check_if_entry_is_good( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_for_torrent_entry( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { let peers = make(&mut torrent, makes).await; @@ -217,7 +224,7 @@ async fn it_should_update_a_peer(#[values(single(), mutex_std(), mutex_tokio())] #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_remove_a_peer_upon_stopped_announcement( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { use torrust_tracker_primitives::peer::ReadInfo as _; @@ -258,7 +265,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { make(&mut torrent, makes).await; @@ -289,7 +296,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_a_seeder( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { let peers = make(&mut torrent, makes).await; @@ -321,7 +328,7 @@ async fn it_should_update_a_peer_as_a_seeder( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_incomplete( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { let peers = make(&mut torrent, makes).await; @@ -353,7 +360,7 @@ async fn it_should_update_a_peer_as_incomplete( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_excluding_the_client_socket( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { make(&mut torrent, makes).await; @@ -385,7 +392,7 @@ async fn it_should_get_peers_excluding_the_client_socket( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_limit_the_number_of_peers_returned( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { make(&mut torrent, makes).await; @@ -410,7 +417,7 @@ async fn it_should_limit_the_number_of_peers_returned( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_remove_inactive_peers_beyond_cutoff( - #[values(single(), mutex_std(), mutex_tokio(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, ) { const TIMEOUT: Duration = Duration::from_secs(120); diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index ac53e6510..dd9893cc9 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -53,6 +53,11 @@ fn skip_list_mutex_std() -> Repo { Repo::SkipMapMutexStd(CrossbeamSkipList::default()) } +#[fixture] +fn skip_list_mutex_parking_lot() -> Repo { + Repo::SkipMapMutexParkingLot(CrossbeamSkipList::default()) +} + #[fixture] fn skip_list_rw_lock_parking_lot() -> Repo { Repo::SkipMapRwLockParkingLot(CrossbeamSkipList::default()) @@ -252,6 +257,7 @@ async fn it_should_get_a_torrent_entry( tokio_mutex(), tokio_tokio(), skip_list_mutex_std(), + skip_list_mutex_parking_lot(), skip_list_rw_lock_parking_lot(), dash_map_std() )] @@ -286,6 +292,7 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( tokio_mutex(), tokio_tokio(), skip_list_mutex_std(), + skip_list_mutex_parking_lot(), skip_list_rw_lock_parking_lot() )] repo: Repo, @@ -329,6 +336,7 @@ async fn it_should_get_paginated( tokio_mutex(), tokio_tokio(), skip_list_mutex_std(), + skip_list_mutex_parking_lot(), skip_list_rw_lock_parking_lot() )] repo: Repo, @@ -387,6 +395,7 @@ async fn it_should_get_metrics( tokio_mutex(), tokio_tokio(), skip_list_mutex_std(), + skip_list_mutex_parking_lot(), skip_list_rw_lock_parking_lot(), dash_map_std() )] @@ -430,6 +439,7 @@ async fn it_should_import_persistent_torrents( tokio_mutex(), tokio_tokio(), skip_list_mutex_std(), + skip_list_mutex_parking_lot(), skip_list_rw_lock_parking_lot(), dash_map_std() )] @@ -470,6 +480,7 @@ async fn it_should_remove_an_entry( tokio_mutex(), tokio_tokio(), skip_list_mutex_std(), + skip_list_mutex_parking_lot(), skip_list_rw_lock_parking_lot(), dash_map_std() )] @@ -508,6 +519,7 @@ async fn it_should_remove_inactive_peers( tokio_mutex(), tokio_tokio(), skip_list_mutex_std(), + skip_list_mutex_parking_lot(), skip_list_rw_lock_parking_lot(), dash_map_std() )] @@ -607,6 +619,7 @@ async fn it_should_remove_peerless_torrents( tokio_mutex(), tokio_tokio(), skip_list_mutex_std(), + skip_list_mutex_parking_lot(), skip_list_rw_lock_parking_lot(), dash_map_std() )] From 87c9834ed31dc89af2b41aa0ec21101bdf648fda Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 19 Apr 2024 08:13:21 +0100 Subject: [PATCH 0801/1003] chore(deps): update dependencies ```ouput Updating crates.io index Updating chrono v0.4.37 -> v0.4.38 Updating hyper v1.2.0 -> v1.3.1 Updating proc-macro2 v1.0.80 -> v1.0.81 Updating serde v1.0.197 -> v1.0.198 Updating serde_derive v1.0.197 -> v1.0.198 Updating serde_json v1.0.115 -> v1.0.116 Updating syn v2.0.59 -> v2.0.60 Updating toml_edit v0.22.9 -> v0.22.11 ``` --- Cargo.lock | 90 +++++++++++++++++++++++++++--------------------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc6db21c1..70ac685a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -346,7 +346,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -460,7 +460,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -547,7 +547,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -622,7 +622,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", "syn_derive", ] @@ -732,9 +732,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -812,7 +812,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -1085,7 +1085,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -1096,7 +1096,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -1143,7 +1143,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -1378,7 +1378,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -1390,7 +1390,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -1402,7 +1402,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -1495,7 +1495,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -1723,9 +1723,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", @@ -2196,7 +2196,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -2247,7 +2247,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", "termcolor", "thiserror", ] @@ -2447,7 +2447,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -2560,7 +2560,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -2629,7 +2629,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -2803,9 +2803,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56dea16b0a29e94408b9aa5e2940a4eedbd128a1ba20e8f7ae60fd3d465af0e" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -3112,7 +3112,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.59", + "syn 2.0.60", "unicode-ident", ] @@ -3340,9 +3340,9 @@ checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] @@ -3368,13 +3368,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -3392,9 +3392,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -3419,7 +3419,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -3470,7 +3470,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -3604,9 +3604,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.59" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a6531ffc7b071655e4ce2e04bd464c4830bb585a61cabb96cf808f05172615a" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", @@ -3622,7 +3622,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -3725,7 +3725,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -3819,7 +3819,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -3865,7 +3865,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.9", + "toml_edit 0.22.11", ] [[package]] @@ -3901,9 +3901,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.9" +version = "0.22.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" +checksum = "fb686a972ccef8537b39eead3968b0e8616cb5040dbb9bba93007c8e07c9215f" dependencies = [ "indexmap 2.2.6", "serde", @@ -4127,7 +4127,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] @@ -4298,7 +4298,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", "wasm-bindgen-shared", ] @@ -4332,7 +4332,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4595,7 +4595,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.59", + "syn 2.0.60", ] [[package]] From b3015968ab9b95077a0a5626901a7ee570e6a8aa Mon Sep 17 00:00:00 2001 From: ngthhu Date: Wed, 24 Apr 2024 15:27:39 +0700 Subject: [PATCH 0802/1003] chore:[#674] Tracker Checker: Ouput in JSON --- Cargo.lock | 1 + Cargo.toml | 2 +- src/console/clients/checker/checks/health.rs | 53 +++++++++---------- src/console/clients/checker/checks/http.rs | 30 +++++++---- src/console/clients/checker/checks/mod.rs | 1 + src/console/clients/checker/checks/structs.rs | 12 +++++ src/console/clients/checker/checks/udp.rs | 42 +++++++++------ src/console/clients/checker/service.rs | 13 +++-- 8 files changed, 95 insertions(+), 59 deletions(-) create mode 100644 src/console/clients/checker/checks/structs.rs diff --git a/Cargo.lock b/Cargo.lock index 70ac685a1..f71275517 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3396,6 +3396,7 @@ version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ + "indexmap 2.2.6", "itoa", "ryu", "serde", diff --git a/Cargo.toml b/Cargo.toml index 94889cbf0..e201f5ba9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ ringbuf = "0" serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" -serde_json = "1" +serde_json = { version = "1", features = ["preserve_order"] } serde_repr = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } diff --git a/src/console/clients/checker/checks/health.rs b/src/console/clients/checker/checks/health.rs index 9c28da514..bc545510b 100644 --- a/src/console/clients/checker/checks/health.rs +++ b/src/console/clients/checker/checks/health.rs @@ -1,51 +1,50 @@ use std::time::Duration; -use colored::Colorize; use reqwest::{Client as HttpClient, Url, Url as ServiceUrl}; -use crate::console::clients::checker::console::Console; -use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; -pub async fn run(health_checks: &Vec, console: &Console, check_results: &mut Vec) { - console.println("Health checks ..."); +use super::structs::{CheckerOutput, Status}; + +#[allow(clippy::missing_panics_doc)] +pub async fn run(health_checks: &Vec, check_results: &mut Vec) -> Vec { + let mut health_checkers: Vec = Vec::new(); for health_check_url in health_checks { - match run_health_check(health_check_url.clone(), console).await { - Ok(()) => check_results.push(Ok(())), - Err(err) => check_results.push(Err(err)), + let mut health_checker = CheckerOutput { + url: health_check_url.to_string(), + status: Status { + code: String::new(), + message: String::new(), + }, + }; + match run_health_check(health_check_url.clone()).await { + Ok(()) => { + check_results.push(Ok(())); + health_checker.status.code = "ok".to_string(); + } + Err(err) => { + check_results.push(Err(err)); + health_checker.status.code = "error".to_string(); + health_checker.status.message = "Health API is failing.".to_string(); + } } + health_checkers.push(health_checker); } + health_checkers } -async fn run_health_check(url: Url, console: &Console) -> Result<(), CheckError> { +async fn run_health_check(url: Url) -> Result<(), CheckError> { let client = HttpClient::builder().timeout(Duration::from_secs(5)).build().unwrap(); - let colored_url = url.to_string().yellow(); - match client.get(url.clone()).send().await { Ok(response) => { if response.status().is_success() { - console.println(&format!("{} - Health API at {} is OK", "✓".green(), colored_url)); Ok(()) } else { - console.eprintln(&format!( - "{} - Health API at {} is failing: {:?}", - "✗".red(), - colored_url, - response - )); Err(CheckError::HealthCheckError { url }) } } - Err(err) => { - console.eprintln(&format!( - "{} - Health API at {} is failing: {:?}", - "✗".red(), - colored_url, - err - )); - Err(CheckError::HealthCheckError { url }) - } + Err(_) => Err(CheckError::HealthCheckError { url }), } } diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index 501696df4..f65674ceb 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -1,47 +1,57 @@ use std::str::FromStr; -use colored::Colorize; use log::debug; use reqwest::Url as ServiceUrl; use torrust_tracker_primitives::info_hash::InfoHash; use url::Url; -use crate::console::clients::checker::console::Console; -use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; -pub async fn run(http_trackers: &Vec, console: &Console, check_results: &mut Vec) { - console.println("HTTP trackers ..."); +use super::structs::{CheckerOutput, Status}; + +#[allow(clippy::missing_panics_doc)] +pub async fn run(http_trackers: &Vec, check_results: &mut Vec) -> Vec { + let mut http_checkers: Vec = Vec::new(); for http_tracker in http_trackers { - let colored_tracker_url = http_tracker.to_string().yellow(); + let mut http_checker = CheckerOutput { + url: http_tracker.to_string(), + status: Status { + code: String::new(), + message: String::new(), + }, + }; match check_http_announce(http_tracker).await { Ok(()) => { check_results.push(Ok(())); - console.println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + http_checker.status.code = "ok".to_string(); } Err(err) => { check_results.push(Err(err)); - console.println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); + http_checker.status.code = "error".to_string(); + http_checker.status.message = "Announce is failing.".to_string(); } } match check_http_scrape(http_tracker).await { Ok(()) => { check_results.push(Ok(())); - console.println(&format!("{} - Scrape at {} is OK", "✓".green(), colored_tracker_url)); + http_checker.status.code = "ok".to_string(); } Err(err) => { check_results.push(Err(err)); - console.println(&format!("{} - Scrape at {} is failing", "✗".red(), colored_tracker_url)); + http_checker.status.code = "error".to_string(); + http_checker.status.message = "Scrape is failing.".to_string(); } } + http_checkers.push(http_checker); } + http_checkers } async fn check_http_announce(tracker_url: &Url) -> Result<(), CheckError> { diff --git a/src/console/clients/checker/checks/mod.rs b/src/console/clients/checker/checks/mod.rs index 16256595e..f8b03f749 100644 --- a/src/console/clients/checker/checks/mod.rs +++ b/src/console/clients/checker/checks/mod.rs @@ -1,3 +1,4 @@ pub mod health; pub mod http; +pub mod structs; pub mod udp; diff --git a/src/console/clients/checker/checks/structs.rs b/src/console/clients/checker/checks/structs.rs new file mode 100644 index 000000000..d28e20c04 --- /dev/null +++ b/src/console/clients/checker/checks/structs.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct Status { + pub code: String, + pub message: String, +} +#[derive(Serialize, Deserialize)] +pub struct CheckerOutput { + pub url: String, + pub status: Status, +} diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 47a2a1a00..e9a777a8d 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -1,26 +1,32 @@ use std::net::SocketAddr; use aquatic_udp_protocol::{Port, TransactionId}; -use colored::Colorize; use hex_literal::hex; use log::debug; use torrust_tracker_primitives::info_hash::InfoHash; -use crate::console::clients::checker::console::Console; -use crate::console::clients::checker::printer::Printer; use crate::console::clients::checker::service::{CheckError, CheckResult}; use crate::console::clients::udp::checker; +use crate::console::clients::checker::checks::structs::{CheckerOutput, Status}; + const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; -pub async fn run(udp_trackers: &Vec, console: &Console, check_results: &mut Vec) { - console.println("UDP trackers ..."); +#[allow(clippy::missing_panics_doc)] +pub async fn run(udp_trackers: &Vec, check_results: &mut Vec) -> Vec { + let mut udp_checkers: Vec = Vec::new(); for udp_tracker in udp_trackers { - debug!("UDP tracker: {:?}", udp_tracker); + let mut checker_output = CheckerOutput { + url: udp_tracker.to_string(), + status: Status { + code: String::new(), + message: String::new(), + }, + }; - let colored_tracker_url = udp_tracker.to_string().yellow(); + debug!("UDP tracker: {:?}", udp_tracker); let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); @@ -32,7 +38,8 @@ pub async fn run(udp_trackers: &Vec, console: &Console, check_result check_results.push(Err(CheckError::UdpError { socket_addr: *udp_tracker, })); - console.println(&format!("{} - Can't connect to socket {}", "✗".red(), colored_tracker_url)); + checker_output.status.code = "error".to_string(); + checker_output.status.message = "Can't connect to socket.".to_string(); break; }; @@ -42,11 +49,8 @@ pub async fn run(udp_trackers: &Vec, console: &Console, check_result check_results.push(Err(CheckError::UdpError { socket_addr: *udp_tracker, })); - console.println(&format!( - "{} - Can't make tracker connection request to {}", - "✗".red(), - colored_tracker_url - )); + checker_output.status.code = "error".to_string(); + checker_output.status.message = "Can't make tracker connection request.".to_string(); break; }; @@ -60,13 +64,14 @@ pub async fn run(udp_trackers: &Vec, console: &Console, check_result .is_ok() { check_results.push(Ok(())); - console.println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + checker_output.status.code = "ok".to_string(); } else { let err = CheckError::UdpError { socket_addr: *udp_tracker, }; check_results.push(Err(err)); - console.println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); + checker_output.status.code = "error".to_string(); + checker_output.status.message = "Announce is failing.".to_string(); } debug!("Send scrape request"); @@ -75,13 +80,16 @@ pub async fn run(udp_trackers: &Vec, console: &Console, check_result if (client.send_scrape_request(connection_id, transaction_id, info_hashes).await).is_ok() { check_results.push(Ok(())); - console.println(&format!("{} - Announce at {} is OK", "✓".green(), colored_tracker_url)); + checker_output.status.code = "ok".to_string(); } else { let err = CheckError::UdpError { socket_addr: *udp_tracker, }; check_results.push(Err(err)); - console.println(&format!("{} - Announce at {} is failing", "✗".red(), colored_tracker_url)); + checker_output.status.code = "error".to_string(); + checker_output.status.message = "Scrape is failing.".to_string(); } + udp_checkers.push(checker_output); } + udp_checkers } diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 94eff4a88..fd97342cc 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use reqwest::Url; -use super::checks; +use super::checks::{self}; use super::config::Configuration; use super::console::Console; use crate::console::clients::checker::printer::Printer; @@ -26,16 +26,21 @@ impl Service { /// # Errors /// /// Will return OK is all checks pass or an array with the check errors. + #[allow(clippy::missing_panics_doc)] pub async fn run_checks(&self) -> Vec { self.console.println("Running checks for trackers ..."); let mut check_results = vec![]; - checks::udp::run(&self.config.udp_trackers, &self.console, &mut check_results).await; + let udp_checkers = checks::udp::run(&self.config.udp_trackers, &mut check_results).await; - checks::http::run(&self.config.http_trackers, &self.console, &mut check_results).await; + let http_checkers = checks::http::run(&self.config.http_trackers, &mut check_results).await; - checks::health::run(&self.config.health_checks, &self.console, &mut check_results).await; + let health_checkers = checks::health::run(&self.config.health_checks, &mut check_results).await; + + let json_output = + serde_json::json!({ "udp_trackers": udp_checkers, "http_trackers": http_checkers, "health_checks": health_checkers }); + self.console.println(&serde_json::to_string_pretty(&json_output).unwrap()); check_results } From 7de4fbcf727e0b2484c80c5385dca78aba20446b Mon Sep 17 00:00:00 2001 From: ngthhu Date: Wed, 24 Apr 2024 21:25:30 +0700 Subject: [PATCH 0803/1003] format fix --- src/console/clients/checker/checks/health.rs | 3 +-- src/console/clients/checker/checks/http.rs | 3 +-- src/console/clients/checker/checks/udp.rs | 3 +-- src/console/clients/checker/service.rs | 2 -- 4 files changed, 3 insertions(+), 8 deletions(-) diff --git a/src/console/clients/checker/checks/health.rs b/src/console/clients/checker/checks/health.rs index bc545510b..47eec4cbd 100644 --- a/src/console/clients/checker/checks/health.rs +++ b/src/console/clients/checker/checks/health.rs @@ -2,9 +2,8 @@ use std::time::Duration; use reqwest::{Client as HttpClient, Url, Url as ServiceUrl}; -use crate::console::clients::checker::service::{CheckError, CheckResult}; - use super::structs::{CheckerOutput, Status}; +use crate::console::clients::checker::service::{CheckError, CheckResult}; #[allow(clippy::missing_panics_doc)] pub async fn run(health_checks: &Vec, check_results: &mut Vec) -> Vec { diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index f65674ceb..e0b14b480 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -5,14 +5,13 @@ use reqwest::Url as ServiceUrl; use torrust_tracker_primitives::info_hash::InfoHash; use url::Url; +use super::structs::{CheckerOutput, Status}; use crate::console::clients::checker::service::{CheckError, CheckResult}; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; -use super::structs::{CheckerOutput, Status}; - #[allow(clippy::missing_panics_doc)] pub async fn run(http_trackers: &Vec, check_results: &mut Vec) -> Vec { let mut http_checkers: Vec = Vec::new(); diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index e9a777a8d..48f72edf9 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -5,11 +5,10 @@ use hex_literal::hex; use log::debug; use torrust_tracker_primitives::info_hash::InfoHash; +use crate::console::clients::checker::checks::structs::{CheckerOutput, Status}; use crate::console::clients::checker::service::{CheckError, CheckResult}; use crate::console::clients::udp::checker; -use crate::console::clients::checker::checks::structs::{CheckerOutput, Status}; - const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index fd97342cc..16483e92e 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -28,8 +28,6 @@ impl Service { /// Will return OK is all checks pass or an array with the check errors. #[allow(clippy::missing_panics_doc)] pub async fn run_checks(&self) -> Vec { - self.console.println("Running checks for trackers ..."); - let mut check_results = vec![]; let udp_checkers = checks::udp::run(&self.config.udp_trackers, &mut check_results).await; From b27f002ac6e2fb21142bc21c1eb6580544c1bf2f Mon Sep 17 00:00:00 2001 From: ngthhu Date: Wed, 24 Apr 2024 23:00:27 +0700 Subject: [PATCH 0804/1003] remove unused dependencies --- Cargo.lock | 11 ----------- Cargo.toml | 1 - 2 files changed, 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f71275517..44f9db17c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -836,16 +836,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" -[[package]] -name = "colored" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" -dependencies = [ - "lazy_static", - "windows-sys 0.48.0", -] - [[package]] name = "concurrent-queue" version = "2.4.0" @@ -3926,7 +3916,6 @@ dependencies = [ "axum-server", "chrono", "clap", - "colored", "config", "crossbeam-skiplist", "dashmap", diff --git a/Cargo.toml b/Cargo.toml index e201f5ba9..0e37b7ad0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,6 @@ axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } -colored = "2" config = "0" crossbeam-skiplist = "0.1" dashmap = "5.5.3" From effca568032eaffcdb9d3f984000b238962ce438 Mon Sep 17 00:00:00 2001 From: ngthhu Date: Thu, 25 Apr 2024 17:22:01 +0700 Subject: [PATCH 0805/1003] refactor: [#681] udp return errors instead of panicking --- src/console/clients/udp/checker.rs | 16 +- src/shared/bit_torrent/tracker/udp/client.rs | 215 +++++++++++-------- tests/servers/udp/contract.rs | 71 ++++-- 3 files changed, 193 insertions(+), 109 deletions(-) diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index 12b8d764c..9b2a9011e 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -64,7 +64,7 @@ impl Client { let binding_address = local_bind_to.parse().context("binding local address")?; debug!("Binding to: {local_bind_to}"); - let udp_client = UdpClient::bind(&local_bind_to).await; + let udp_client = UdpClient::bind(&local_bind_to).await?; let bound_to = udp_client.socket.local_addr().context("bound local address")?; debug!("Bound to: {bound_to}"); @@ -88,7 +88,7 @@ impl Client { match &self.udp_tracker_client { Some(client) => { - client.udp_client.connect(&tracker_socket_addr.to_string()).await; + client.udp_client.connect(&tracker_socket_addr.to_string()).await?; self.remote_socket = Some(*tracker_socket_addr); Ok(()) } @@ -116,9 +116,9 @@ impl Client { match &self.udp_tracker_client { Some(client) => { - client.send(connect_request.into()).await; + client.send(connect_request.into()).await?; - let response = client.receive().await; + let response = client.receive().await?; debug!("connection request response:\n{response:#?}"); @@ -163,9 +163,9 @@ impl Client { match &self.udp_tracker_client { Some(client) => { - client.send(announce_request.into()).await; + client.send(announce_request.into()).await?; - let response = client.receive().await; + let response = client.receive().await?; debug!("announce request response:\n{response:#?}"); @@ -200,9 +200,9 @@ impl Client { match &self.udp_tracker_client { Some(client) => { - client.send(scrape_request.into()).await; + client.send(scrape_request.into()).await?; - let response = client.receive().await; + let response = client.receive().await?; debug!("scrape request response:\n{response:#?}"); diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 11c8d8f62..9af9571bc 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -1,8 +1,10 @@ +use core::result::Result::{Err, Ok}; use std::io::Cursor; use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use anyhow::{anyhow, Context, Result}; use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; use log::debug; use tokio::net::UdpSocket; @@ -25,99 +27,120 @@ pub struct UdpClient { } impl UdpClient { - /// # Panics + /// # Errors + /// + /// Will return error if the local address can't be bound. /// - /// Will panic if the local address can't be bound. - pub async fn bind(local_address: &str) -> Self { - let valid_socket_addr = local_address + pub async fn bind(local_address: &str) -> Result { + let socket_addr = local_address .parse::() - .unwrap_or_else(|_| panic!("{local_address} is not a valid socket address")); + .context(format!("{local_address} is not a valid socket address"))?; - let socket = UdpSocket::bind(valid_socket_addr).await.unwrap(); + let socket = UdpSocket::bind(socket_addr).await?; - Self { + let udp_client = Self { socket: Arc::new(socket), timeout: DEFAULT_TIMEOUT, - } + }; + Ok(udp_client) } - /// # Panics + /// # Errors /// - /// Will panic if can't connect to the socket. - pub async fn connect(&self, remote_address: &str) { - let valid_socket_addr = remote_address + /// Will return error if can't connect to the socket. + pub async fn connect(&self, remote_address: &str) -> Result<()> { + let socket_addr = remote_address .parse::() - .unwrap_or_else(|_| panic!("{remote_address} is not a valid socket address")); + .context(format!("{remote_address} is not a valid socket address"))?; - match self.socket.connect(valid_socket_addr).await { - Ok(()) => debug!("Connected successfully"), - Err(e) => panic!("Failed to connect: {e:?}"), + match self.socket.connect(socket_addr).await { + Ok(()) => { + debug!("Connected successfully"); + Ok(()) + } + Err(e) => Err(anyhow!("Failed to connect: {e:?}")), } } - /// # Panics + /// # Errors /// - /// Will panic if: + /// Will return error if: /// /// - Can't write to the socket. /// - Can't send data. - pub async fn send(&self, bytes: &[u8]) -> usize { + pub async fn send(&self, bytes: &[u8]) -> Result { debug!(target: "UDP client", "sending {bytes:?} ..."); match time::timeout(self.timeout, self.socket.writable()).await { - Ok(writable_result) => match writable_result { - Ok(()) => (), - Err(e) => panic!("{}", format!("IO error waiting for the socket to become readable: {e:?}")), - }, - Err(e) => panic!("{}", format!("Timeout waiting for the socket to become readable: {e:?}")), + Ok(writable_result) => { + match writable_result { + Ok(()) => (), + Err(e) => return Err(anyhow!("IO error waiting for the socket to become readable: {e:?}")), + }; + } + Err(e) => return Err(anyhow!("Timeout waiting for the socket to become readable: {e:?}")), }; match time::timeout(self.timeout, self.socket.send(bytes)).await { Ok(send_result) => match send_result { - Ok(size) => size, - Err(e) => panic!("{}", format!("IO error during send: {e:?}")), + Ok(size) => Ok(size), + Err(e) => Err(anyhow!("IO error during send: {e:?}")), }, - Err(e) => panic!("{}", format!("Send operation timed out: {e:?}")), + Err(e) => Err(anyhow!("Send operation timed out: {e:?}")), } } - /// # Panics + /// # Errors /// - /// Will panic if: + /// Will return error if: /// /// - Can't read from the socket. /// - Can't receive data. - pub async fn receive(&self, bytes: &mut [u8]) -> usize { + /// + /// # Panics + /// + pub async fn receive(&self, bytes: &mut [u8]) -> Result { debug!(target: "UDP client", "receiving ..."); match time::timeout(self.timeout, self.socket.readable()).await { - Ok(readable_result) => match readable_result { - Ok(()) => (), - Err(e) => panic!("{}", format!("IO error waiting for the socket to become readable: {e:?}")), - }, - Err(e) => panic!("{}", format!("Timeout waiting for the socket to become readable: {e:?}")), + Ok(readable_result) => { + match readable_result { + Ok(()) => (), + Err(e) => return Err(anyhow!("IO error waiting for the socket to become readable: {e:?}")), + }; + } + Err(e) => return Err(anyhow!("Timeout waiting for the socket to become readable: {e:?}")), }; - let size = match time::timeout(self.timeout, self.socket.recv(bytes)).await { + let size_result = match time::timeout(self.timeout, self.socket.recv(bytes)).await { Ok(recv_result) => match recv_result { - Ok(size) => size, - Err(e) => panic!("{}", format!("IO error during send: {e:?}")), + Ok(size) => Ok(size), + Err(e) => Err(anyhow!("IO error during send: {e:?}")), }, - Err(e) => panic!("{}", format!("Receive operation timed out: {e:?}")), + Err(e) => Err(anyhow!("Receive operation timed out: {e:?}")), }; - debug!(target: "UDP client", "{size} bytes received {bytes:?}"); - - size + if size_result.is_ok() { + let size = size_result.as_ref().unwrap(); + debug!(target: "UDP client", "{size} bytes received {bytes:?}"); + size_result + } else { + size_result + } } } /// Creates a new `UdpClient` connected to a Udp server -pub async fn new_udp_client_connected(remote_address: &str) -> UdpClient { +/// +/// # Errors +/// +/// Will return any errors present in the call stack +/// +pub async fn new_udp_client_connected(remote_address: &str) -> Result { let port = 0; // Let OS choose an unused port. - let client = UdpClient::bind(&source_address(port)).await; - client.connect(remote_address).await; - client + let client = UdpClient::bind(&source_address(port)).await?; + client.connect(remote_address).await?; + Ok(client) } #[allow(clippy::module_name_repetitions)] @@ -127,85 +150,103 @@ pub struct UdpTrackerClient { } impl UdpTrackerClient { - /// # Panics + /// # Errors /// - /// Will panic if can't write request to bytes. - pub async fn send(&self, request: Request) -> usize { + /// Will return error if can't write request to bytes. + pub async fn send(&self, request: Request) -> Result { debug!(target: "UDP tracker client", "send request {request:?}"); // Write request into a buffer let request_buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(request_buffer); - let request_data = match request.write(&mut cursor) { + let request_data_result = match request.write(&mut cursor) { Ok(()) => { #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner_request_buffer = cursor.get_ref(); // Return slice which contains written request data - &inner_request_buffer[..position] + Ok(&inner_request_buffer[..position]) } - Err(e) => panic!("could not write request to bytes: {e}."), + Err(e) => Err(anyhow!("could not write request to bytes: {e}.")), }; + let request_data = request_data_result?; + self.udp_client.send(request_data).await } - /// # Panics + /// # Errors /// - /// Will panic if can't create response from the received payload (bytes buffer). - pub async fn receive(&self) -> Response { + /// Will return error if can't create response from the received payload (bytes buffer). + pub async fn receive(&self) -> Result { let mut response_buffer = [0u8; MAX_PACKET_SIZE]; - let payload_size = self.udp_client.receive(&mut response_buffer).await; + let payload_size = self.udp_client.receive(&mut response_buffer).await?; debug!(target: "UDP tracker client", "received {payload_size} bytes. Response {response_buffer:?}"); - Response::from_bytes(&response_buffer[..payload_size], true).unwrap() + let response = Response::from_bytes(&response_buffer[..payload_size], true)?; + + Ok(response) } } /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server -pub async fn new_udp_tracker_client_connected(remote_address: &str) -> UdpTrackerClient { - let udp_client = new_udp_client_connected(remote_address).await; - UdpTrackerClient { udp_client } +/// +/// # Errors +/// +/// Will return any errors present in the call stack +/// +pub async fn new_udp_tracker_client_connected(remote_address: &str) -> Result { + let udp_client = new_udp_client_connected(remote_address).await?; + let udp_tracker_client = UdpTrackerClient { udp_client }; + Ok(udp_tracker_client) } /// Helper Function to Check if a UDP Service is Connectable /// -/// # Errors +/// # Panics /// /// It will return an error if unable to connect to the UDP service. /// -/// # Panics +/// # Errors +/// pub async fn check(binding: &SocketAddr) -> Result { debug!("Checking Service (detail): {binding:?}."); - let client = new_udp_tracker_client_connected(binding.to_string().as_str()).await; - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123), - }; - - client.send(connect_request.into()).await; - - let process = move |response| { - if matches!(response, Response::Connect(_connect_response)) { - Ok("Connected".to_string()) - } else { - Err("Did not Connect".to_string()) - } - }; - - let sleep = time::sleep(Duration::from_millis(2000)); - tokio::pin!(sleep); - - tokio::select! { - () = &mut sleep => { - Err("Timed Out".to_string()) - } - response = client.receive() => { - process(response) + match new_udp_tracker_client_connected(binding.to_string().as_str()).await { + Ok(client) => { + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + // client.send() return usize, but doesn't use here + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(e) => debug!("Error: {e:?}."), + }; + + let process = move |response| { + if matches!(response, Response::Connect(_connect_response)) { + Ok("Connected".to_string()) + } else { + Err("Did not Connect".to_string()) + } + }; + + let sleep = time::sleep(Duration::from_millis(2000)); + tokio::pin!(sleep); + + tokio::select! { + () = &mut sleep => { + Err("Timed Out".to_string()) + } + response = client.receive() => { + process(response.unwrap()) + } + } } + Err(e) => Err(format!("{e:?}")), } } diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 91dca4d42..56e400f84 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -24,9 +24,15 @@ fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { let connect_request = ConnectRequest { transaction_id }; - client.send(connect_request.into()).await; + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; - let response = client.receive().await; + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; match response { Response::Connect(connect_response) => connect_response.connection_id, @@ -38,12 +44,22 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_client_connected(&env.bind_address().to_string()).await; + let client = match new_udp_client_connected(&env.bind_address().to_string()).await { + Ok(udp_client) => udp_client, + Err(err) => panic!("{err}"), + }; - client.send(&empty_udp_request()).await; + match client.send(&empty_udp_request()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; let mut buffer = empty_buffer(); - client.receive(&mut buffer).await; + match client.receive(&mut buffer).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; + let response = Response::from_bytes(&buffer, true).unwrap(); assert!(is_error_response(&response, "bad request")); @@ -63,15 +79,24 @@ mod receiving_a_connection_request { async fn should_return_a_connect_response() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_tracker_client_connected(&env.bind_address().to_string()).await; + let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; let connect_request = ConnectRequest { transaction_id: TransactionId(123), }; - client.send(connect_request.into()).await; + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; - let response = client.receive().await; + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; assert!(is_connect_response(&response, TransactionId(123))); @@ -97,7 +122,10 @@ mod receiving_an_announce_request { async fn should_return_an_announce_response() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_tracker_client_connected(&env.bind_address().to_string()).await; + let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; let connection_id = send_connection_request(TransactionId(123), &client).await; @@ -118,9 +146,15 @@ mod receiving_an_announce_request { port: Port(client.udp_client.socket.local_addr().unwrap().port()), }; - client.send(announce_request.into()).await; + match client.send(announce_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; - let response = client.receive().await; + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; println!("test response {response:?}"); @@ -143,7 +177,10 @@ mod receiving_an_scrape_request { async fn should_return_a_scrape_response() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_tracker_client_connected(&env.bind_address().to_string()).await; + let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; let connection_id = send_connection_request(TransactionId(123), &client).await; @@ -159,9 +196,15 @@ mod receiving_an_scrape_request { info_hashes, }; - client.send(scrape_request.into()).await; + match client.send(scrape_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; - let response = client.receive().await; + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; assert!(is_scrape_response(&response)); From 895efe9c4154a6883008e9cf26592f3fed38602d Mon Sep 17 00:00:00 2001 From: ngthhu Date: Thu, 2 May 2024 16:02:42 +0700 Subject: [PATCH 0806/1003] refactor: [#680] http return errors instead of panicking --- src/console/clients/checker/checks/http.rs | 22 +++++- src/console/clients/http/app.rs | 10 +-- .../bit_torrent/tracker/http/client/mod.rs | 72 ++++++++++--------- 3 files changed, 63 insertions(+), 41 deletions(-) diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index e0b14b480..e526b5e57 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -61,9 +61,19 @@ async fn check_http_announce(tracker_url: &Url) -> Result<(), CheckError> { // We should change the client to catch that error and return a `CheckError`. // Otherwise the checking process will stop. The idea is to process all checks // and return a final report. - let response = Client::new(tracker_url.clone()) + let Ok(client) = Client::new(tracker_url.clone()) else { + return Err(CheckError::HttpError { + url: (tracker_url.to_owned()), + }); + }; + let Ok(response) = client .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) - .await; + .await + else { + return Err(CheckError::HttpError { + url: (tracker_url.to_owned()), + }); + }; if let Ok(body) = response.bytes().await { if let Ok(_announce_response) = serde_bencode::from_bytes::(&body) { @@ -89,7 +99,13 @@ async fn check_http_scrape(url: &Url) -> Result<(), CheckError> { // We should change the client to catch that error and return a `CheckError`. // Otherwise the checking process will stop. The idea is to process all checks // and return a final report. - let response = Client::new(url.clone()).scrape(&query).await; + + let Ok(client) = Client::new(url.clone()) else { + return Err(CheckError::HttpError { url: (url.to_owned()) }); + }; + let Ok(response) = client.scrape(&query).await else { + return Err(CheckError::HttpError { url: (url.to_owned()) }); + }; if let Ok(body) = response.bytes().await { if let Ok(_scrape_response) = scrape::Response::try_from_bencoded(&body) { diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs index 511fb6628..8fc9db0c3 100644 --- a/src/console/clients/http/app.rs +++ b/src/console/clients/http/app.rs @@ -64,11 +64,11 @@ async fn announce_command(tracker_url: String, info_hash: String) -> anyhow::Res let info_hash = InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); - let response = Client::new(base_url) + let response = Client::new(base_url)? .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) - .await; + .await?; - let body = response.bytes().await.unwrap(); + let body = response.bytes().await?; let announce_response: Announce = serde_bencode::from_bytes(&body) .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); @@ -85,9 +85,9 @@ async fn scrape_command(tracker_url: &str, info_hashes: &[String]) -> anyhow::Re let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; - let response = Client::new(base_url).scrape(&query).await; + let response = Client::new(base_url)?.scrape(&query).await?; - let body = response.bytes().await.unwrap(); + let body = response.bytes().await?; let scrape_response = scrape::Response::try_from_bencoded(&body) .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); diff --git a/src/shared/bit_torrent/tracker/http/client/mod.rs b/src/shared/bit_torrent/tracker/http/client/mod.rs index a75b0fec3..f5b1b3310 100644 --- a/src/shared/bit_torrent/tracker/http/client/mod.rs +++ b/src/shared/bit_torrent/tracker/http/client/mod.rs @@ -3,6 +3,7 @@ pub mod responses; use std::net::IpAddr; +use anyhow::{anyhow, Result}; use requests::announce::{self, Query}; use requests::scrape; use reqwest::{Client as ReqwestClient, Response, Url}; @@ -25,78 +26,83 @@ pub struct Client { /// base url path query /// ``` impl Client { - /// # Panics + /// # Errors /// /// This method fails if the client builder fails. - #[must_use] - pub fn new(base_url: Url) -> Self { - Self { + pub fn new(base_url: Url) -> Result { + let reqwest = reqwest::Client::builder().build()?; + Ok(Self { base_url, - reqwest: reqwest::Client::builder().build().unwrap(), + reqwest, key: None, - } + }) } /// Creates the new client binding it to an specific local address. /// - /// # Panics + /// # Errors /// /// This method fails if the client builder fails. - #[must_use] - pub fn bind(base_url: Url, local_address: IpAddr) -> Self { - Self { + pub fn bind(base_url: Url, local_address: IpAddr) -> Result { + let reqwest = reqwest::Client::builder().local_address(local_address).build()?; + Ok(Self { base_url, - reqwest: reqwest::Client::builder().local_address(local_address).build().unwrap(), + reqwest, key: None, - } + }) } - /// # Panics + /// # Errors /// /// This method fails if the client builder fails. - #[must_use] - pub fn authenticated(base_url: Url, key: Key) -> Self { - Self { + pub fn authenticated(base_url: Url, key: Key) -> Result { + let reqwest = reqwest::Client::builder().build()?; + Ok(Self { base_url, - reqwest: reqwest::Client::builder().build().unwrap(), + reqwest, key: Some(key), - } + }) } - pub async fn announce(&self, query: &announce::Query) -> Response { + /// # Errors + pub async fn announce(&self, query: &announce::Query) -> Result { self.get(&self.build_announce_path_and_query(query)).await } - pub async fn scrape(&self, query: &scrape::Query) -> Response { + /// # Errors + pub async fn scrape(&self, query: &scrape::Query) -> Result { self.get(&self.build_scrape_path_and_query(query)).await } - pub async fn announce_with_header(&self, query: &Query, key: &str, value: &str) -> Response { + /// # Errors + pub async fn announce_with_header(&self, query: &Query, key: &str, value: &str) -> Result { self.get_with_header(&self.build_announce_path_and_query(query), key, value) .await } - pub async fn health_check(&self) -> Response { + /// # Errors + pub async fn health_check(&self) -> Result { self.get(&self.build_path("health_check")).await } - /// # Panics + /// # Errors /// /// This method fails if there was an error while sending request. - pub async fn get(&self, path: &str) -> Response { - self.reqwest.get(self.build_url(path)).send().await.unwrap() + pub async fn get(&self, path: &str) -> Result { + match self.reqwest.get(self.build_url(path)).send().await { + Ok(response) => Ok(response), + Err(err) => Err(anyhow!("{err}")), + } } - /// # Panics + /// # Errors /// /// This method fails if there was an error while sending request. - pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { - self.reqwest - .get(self.build_url(path)) - .header(key, value) - .send() - .await - .unwrap() + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result { + match self.reqwest.get(self.build_url(path)).header(key, value).send().await { + Ok(response) => Ok(response), + Err(err) => Err(anyhow!("{err}")), + } } fn build_announce_path_and_query(&self, query: &announce::Query) -> String { From 75518570ae086ada07bbce6d077475a67461b944 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 May 2024 08:03:56 +0100 Subject: [PATCH 0807/1003] chore(deps): update dependencies --- Cargo.lock | 279 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 4 +- 2 files changed, 144 insertions(+), 139 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 44f9db17c..143ba1aac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,47 +93,48 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -196,16 +197,16 @@ checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" dependencies = [ "concurrent-queue", "event-listener 5.3.0", - "event-listener-strategy 0.5.1", + "event-listener-strategy 0.5.2", "futures-core", "pin-project-lite", ] [[package]] name = "async-compression" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" +checksum = "4e9eabd7a98fe442131a17c316bd9349c43695e49e730c3c8e12cfb5f4da2693" dependencies = [ "brotli", "flate2", @@ -225,7 +226,7 @@ checksum = "b10202063978b3351199d68f8b22c4e47e4b1b822f8d43fd862d5ea8c006b29a" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.0.2", + "fastrand 2.1.0", "futures-lite 2.3.0", "slab", ] @@ -278,8 +279,8 @@ dependencies = [ "futures-io", "futures-lite 2.3.0", "parking", - "polling 3.6.0", - "rustix 0.38.32", + "polling 3.7.0", + "rustix 0.38.34", "slab", "tracing", "windows-sys 0.52.0", @@ -334,9 +335,9 @@ dependencies = [ [[package]] name = "async-task" -version = "4.7.0" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" @@ -357,9 +358,9 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" @@ -509,9 +510,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" @@ -588,25 +589,23 @@ dependencies = [ [[package]] name = "blocking" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" dependencies = [ "async-channel 2.2.1", "async-lock 3.3.0", "async-task", - "fastrand 2.0.2", "futures-io", "futures-lite 2.3.0", "piper", - "tracing", ] [[package]] name = "borsh" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0901fc8eb0aca4c83be0106d6f2db17d86a08dfc2c25f0e84464bf381158add6" +checksum = "dbe5b10e214954177fb1dc9fbd20a1a2608fe99e6c832033bdc7cea287a20d77" dependencies = [ "borsh-derive", "cfg_aliases", @@ -614,9 +613,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51670c3aa053938b0ee3bd67c3817e471e626151131b934038e83c5bf8de48f5" +checksum = "d7a8646f94ab393e43e8b35a2558b1624bed28b97ee09c5d15456e3c9463f46d" dependencies = [ "once_cell", "proc-macro-crate 3.1.0", @@ -628,9 +627,9 @@ dependencies = [ [[package]] name = "brotli" -version = "4.0.0" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569" +checksum = "19483b140a7ac7174d34b5a581b406c64f84da5409d3e09cf4fff604f9270e67" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -639,9 +638,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "3.0.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525" +checksum = "e6221fe77a248b9117d431ad93761222e1cf8ff282d9d1d5d9f53d6299a1cf76" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -701,12 +700,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.94" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" +checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -832,15 +832,15 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -1096,7 +1096,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core", @@ -1242,9 +1242,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332f51cb23d20b0de8458b86580878211da09bcd4503cb579c225b3d124cabb3" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ "event-listener 5.3.0", "pin-project-lite", @@ -1273,9 +1273,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fern" @@ -1288,9 +1288,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "libz-sys", @@ -1470,7 +1470,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.0.2", + "fastrand 2.1.0", "futures-core", "futures-io", "parking", @@ -1618,9 +1618,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash 0.8.11", "allocator-api2", @@ -1632,7 +1632,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -1761,7 +1761,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower", "tower-service", @@ -1825,7 +1825,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -1875,6 +1875,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -1901,9 +1907,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -2024,9 +2030,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.153" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libloading" @@ -2092,9 +2098,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -2219,7 +2225,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2 0.5.6", + "socket2 0.5.7", "twox-hash", "url", ] @@ -2376,9 +2382,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] @@ -2476,9 +2482,9 @@ checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", "parking_lot_core", @@ -2486,15 +2492,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -2521,9 +2527,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -2532,9 +2538,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c" +checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" dependencies = [ "pest", "pest_generator", @@ -2542,9 +2548,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd" +checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" dependencies = [ "pest", "pest_meta", @@ -2555,9 +2561,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca" +checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" dependencies = [ "once_cell", "pest", @@ -2641,7 +2647,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" dependencies = [ "atomic-waker", - "fastrand 2.0.2", + "fastrand 2.1.0", "futures-io", ] @@ -2697,15 +2703,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" +checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi", "pin-project-lite", - "rustix 0.38.32", + "rustix 0.38.34", "tracing", "windows-sys 0.52.0", ] @@ -2919,11 +2925,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] @@ -2957,9 +2963,9 @@ checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "relative-path" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e898588f33fdd5b9420719948f9f2a32c922a246964576f71ba7f24f80610fbc" +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "rend" @@ -2972,11 +2978,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e6cc1e89e689536eb5aeede61520e874df5a4707df811cd5da4aa5fbb2aae19" +checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "bytes", "encoding_rs", "futures-core", @@ -3183,9 +3189,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -3196,9 +3202,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring", @@ -3212,15 +3218,15 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" [[package]] name = "rustls-webpki" @@ -3301,11 +3307,11 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -3314,9 +3320,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -3330,9 +3336,9 @@ checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.198" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" +checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" dependencies = [ "serde_derive", ] @@ -3358,9 +3364,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.198" +version = "1.0.200" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" +checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" dependencies = [ "proc-macro2", "quote", @@ -3436,11 +3442,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.7.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -3454,9 +3460,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.7.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655" +checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ "darling", "proc-macro2", @@ -3494,9 +3500,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -3540,9 +3546,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -3679,8 +3685,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.2", - "rustix 0.38.32", + "fastrand 2.1.0", + "rustix 0.38.34", "windows-sys 0.52.0", ] @@ -3701,18 +3707,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", @@ -3797,7 +3803,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -3835,16 +3841,15 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -3856,7 +3861,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.11", + "toml_edit 0.22.12", ] [[package]] @@ -3892,15 +3897,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.11" +version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb686a972ccef8537b39eead3968b0e8616cb5040dbb9bba93007c8e07c9215f" +checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.6", + "winnow 0.6.8", ] [[package]] @@ -4220,9 +4225,9 @@ dependencies = [ [[package]] name = "value-bag" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" [[package]] name = "vcpkg" @@ -4361,11 +4366,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -4533,9 +4538,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] @@ -4570,18 +4575,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 0e37b7ad0..486c41230 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,7 @@ version = "3.0.0-alpha.12-develop" [dependencies] anyhow = "1" -aquatic_udp_protocol = "0" +aquatic_udp_protocol = "0.8" async-trait = "0" axum = { version = "0", features = ["macros"] } axum-client-ip = "0" @@ -57,7 +57,7 @@ r2d2_mysql = "24" r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" reqwest = { version = "0", features = ["json"] } -ringbuf = "0" +ringbuf = "0.3.3" serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" From 801d91363a7570efef75d4aede39b961564963d1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 May 2024 10:05:48 +0100 Subject: [PATCH 0808/1003] chore(deps): bump aquatic_udp_protocol from 0.8.0 to 0.9.0 --- Cargo.lock | 65 +++++++- Cargo.toml | 3 +- cSpell.json | 3 +- src/console/clients/checker/checks/udp.rs | 4 +- src/console/clients/udp/app.rs | 6 +- src/console/clients/udp/checker.rs | 19 +-- src/console/clients/udp/responses.rs | 38 ++--- src/servers/udp/connection_cookie.rs | 8 +- src/servers/udp/handlers.rs | 152 ++++++++++--------- src/servers/udp/peer_builder.rs | 17 ++- src/servers/udp/request.rs | 2 +- src/servers/udp/server.rs | 2 +- src/shared/bit_torrent/tracker/udp/client.rs | 7 +- tests/servers/udp/contract.rs | 35 ++--- 14 files changed, 225 insertions(+), 136 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 143ba1aac..b7f592666 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,14 +146,30 @@ version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +[[package]] +name = "aquatic_peer_id" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0732a73df221dcb25713849c6ebaf57b85355f669716652a7466f688cc06f25" +dependencies = [ + "compact_str", + "hex", + "quickcheck", + "regex", + "serde", + "zerocopy", +] + [[package]] name = "aquatic_udp_protocol" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2919b480121f7d20d247524da62bad1b6b7928bc3f50898f624b5c592727341" +checksum = "0af90e5162f5fcbde33524128f08dc52a779f32512d5f8692eadd4b55c89389e" dependencies = [ + "aquatic_peer_id", "byteorder", "either", + "zerocopy", ] [[package]] @@ -698,6 +714,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "castaway" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +dependencies = [ + "rustversion", +] + [[package]] name = "cc" version = "1.0.96" @@ -836,6 +861,19 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +[[package]] +name = "compact_str" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "ryu", + "static_assertions", +] + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1176,6 +1214,16 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "env_logger" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +dependencies = [ + "log", + "regex", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -2826,6 +2874,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "quickcheck" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +dependencies = [ + "env_logger", + "log", + "rand", +] + [[package]] name = "quote" version = "1.0.36" @@ -3961,6 +4020,7 @@ dependencies = [ "tracing", "url", "uuid", + "zerocopy", ] [[package]] @@ -4579,6 +4639,7 @@ version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" dependencies = [ + "byteorder", "zerocopy-derive", ] diff --git a/Cargo.toml b/Cargo.toml index 486c41230..63735450e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,7 @@ version = "3.0.0-alpha.12-develop" [dependencies] anyhow = "1" -aquatic_udp_protocol = "0.8" +aquatic_udp_protocol = "0" async-trait = "0" axum = { version = "0", features = ["macros"] } axum-client-ip = "0" @@ -76,6 +76,7 @@ trace = "0" tracing = "0" url = "2" uuid = { version = "1", features = ["v4"] } +zerocopy = "0.7.33" [package.metadata.cargo-machete] ignored = ["serde_bytes", "crossbeam-skiplist", "dashmap", "parking_lot"] diff --git a/cSpell.json b/cSpell.json index 24ef6b0a0..2473e9c33 100644 --- a/cSpell.json +++ b/cSpell.json @@ -170,7 +170,8 @@ "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", - "yyyyyyyyyyyyyyyyyyyyd" + "yyyyyyyyyyyyyyyyyyyyd", + "zerocopy" ], "enableFiletypes": [ "dockerfile", diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 48f72edf9..6458190d4 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -27,7 +27,7 @@ pub async fn run(udp_trackers: &Vec, check_results: &mut Vec, check_results: &mut Vec anyhow::Result { - let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); let mut client = checker::Client::default(); @@ -151,12 +151,12 @@ async fn handle_announce(tracker_socket_addr: &SocketAddr, info_hash: &TorrustIn let connection_id = client.send_connection_request(transaction_id).await?; client - .send_announce_request(connection_id, transaction_id, *info_hash, Port(bound_to.port())) + .send_announce_request(connection_id, transaction_id, *info_hash, Port(bound_to.port().into())) .await } async fn handle_scrape(tracker_socket_addr: &SocketAddr, info_hashes: &[TorrustInfoHash]) -> anyhow::Result { - let transaction_id = TransactionId(RANDOM_TRANSACTION_ID); + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); let mut client = checker::Client::default(); diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index 9b2a9011e..d51492041 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -3,8 +3,8 @@ use std::net::{Ipv4Addr, SocketAddr}; use anyhow::Context; use aquatic_udp_protocol::common::InfoHash; use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, - ScrapeRequest, TransactionId, + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, }; use log::debug; use thiserror::Error; @@ -148,16 +148,17 @@ impl Client { let announce_request = AnnounceRequest { connection_id, + action_placeholder: AnnounceActionPlaceholder::default(), transaction_id, info_hash: InfoHash(info_hash.bytes()), peer_id: PeerId(*b"-qB00000000000000001"), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers(1i32.into()), port: client_port, }; diff --git a/src/console/clients/udp/responses.rs b/src/console/clients/udp/responses.rs index 2fbc38f5f..8ea1a978b 100644 --- a/src/console/clients/udp/responses.rs +++ b/src/console/clients/udp/responses.rs @@ -1,7 +1,7 @@ //! Aquatic responses are not serializable. These are the serializable wrappers. use std::net::{Ipv4Addr, Ipv6Addr}; -use aquatic_udp_protocol::{AnnounceResponse, ScrapeResponse}; +use aquatic_udp_protocol::{AnnounceResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; use serde::Serialize; #[derive(Serialize)] @@ -13,33 +13,33 @@ pub struct AnnounceResponseDto { peers: Vec, } -impl From> for AnnounceResponseDto { - fn from(announce: AnnounceResponse) -> Self { +impl From> for AnnounceResponseDto { + fn from(announce: AnnounceResponse) -> Self { Self { - transaction_id: announce.transaction_id.0, - announce_interval: announce.announce_interval.0, - leechers: announce.leechers.0, - seeders: announce.seeders.0, + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), peers: announce .peers .iter() - .map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)) + .map(|peer| format!("{}:{}", Ipv4Addr::from(peer.ip_address), peer.port.0)) .collect::>(), } } } -impl From> for AnnounceResponseDto { - fn from(announce: AnnounceResponse) -> Self { +impl From> for AnnounceResponseDto { + fn from(announce: AnnounceResponse) -> Self { Self { - transaction_id: announce.transaction_id.0, - announce_interval: announce.announce_interval.0, - leechers: announce.leechers.0, - seeders: announce.seeders.0, + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), peers: announce .peers .iter() - .map(|peer| format!("{}:{}", peer.ip_address, peer.port.0)) + .map(|peer| format!("{}:{}", Ipv6Addr::from(peer.ip_address), peer.port.0)) .collect::>(), } } @@ -54,14 +54,14 @@ pub struct ScrapeResponseDto { impl From for ScrapeResponseDto { fn from(scrape: ScrapeResponse) -> Self { Self { - transaction_id: scrape.transaction_id.0, + transaction_id: scrape.transaction_id.0.into(), torrent_stats: scrape .torrent_stats .iter() .map(|torrent_scrape_statistics| TorrentStats { - seeders: torrent_scrape_statistics.seeders.0, - completed: torrent_scrape_statistics.completed.0, - leechers: torrent_scrape_statistics.leechers.0, + seeders: torrent_scrape_statistics.seeders.0.into(), + completed: torrent_scrape_statistics.completed.0.into(), + leechers: torrent_scrape_statistics.leechers.0.into(), }) .collect::>(), } diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 49ea6261b..af3a28702 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -71,6 +71,8 @@ use std::panic::Location; use aquatic_udp_protocol::ConnectionId; use torrust_tracker_clock::time_extent::{Extent, TimeExtent}; +use zerocopy::network_endian::I64; +use zerocopy::AsBytes; use super::error::Error; @@ -83,13 +85,15 @@ pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); /// Converts a connection ID into a connection cookie. #[must_use] pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { - connection_id.0.to_le_bytes() + let mut cookie = [0u8; 8]; + connection_id.write_to(&mut cookie); + cookie } /// Converts a connection cookie into a connection ID. #[must_use] pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { - ConnectionId(i64::from_le_bytes(*connection_cookie)) + ConnectionId(I64::new(i64::from_be_bytes(*connection_cookie))) } /// Generates a new connection cookie. diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 122e666a8..876f4c9fe 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -1,19 +1,21 @@ //! Handlers for the UDP server. use std::fmt; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; use std::time::Instant; use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, - NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, + AnnounceInterval, AnnounceRequest, AnnounceResponse, AnnounceResponseFixedData, ConnectRequest, ConnectResponse, + ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, + ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use log::debug; use tokio::net::UdpSocket; use torrust_tracker_located_error::DynError; use torrust_tracker_primitives::info_hash::InfoHash; use uuid::Uuid; +use zerocopy::network_endian::I32; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use super::UdpRequest; @@ -41,7 +43,7 @@ pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc { - ip_address: ip, - port: Port(peer.peer_addr.port()), + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), }) } else { None @@ -204,18 +208,20 @@ pub async fn handle_announce( Ok(Response::from(announce_response)) } else { let announce_response = AnnounceResponse { - transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(i64::from(tracker.get_announce_policy().interval) as i32), - leechers: NumberOfPeers(i64::from(response.stats.incomplete) as i32), - seeders: NumberOfPeers(i64::from(response.stats.complete) as i32), + fixed: AnnounceResponseFixedData { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, peers: response .peers .iter() .filter_map(|peer| { if let IpAddr::V6(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()), + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), }) } else { None @@ -259,9 +265,9 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra #[allow(clippy::cast_possible_truncation)] let scrape_entry = { TorrentScrapeStatistics { - seeders: NumberOfPeers(i64::from(swarm_metadata.complete) as i32), - completed: NumberOfDownloads(i64::from(swarm_metadata.downloaded) as i32), - leechers: NumberOfPeers(i64::from(swarm_metadata.incomplete) as i32), + seeders: NumberOfPeers(I32::new(i64::from(swarm_metadata.complete) as i32)), + completed: NumberOfDownloads(I32::new(i64::from(swarm_metadata.downloaded) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(swarm_metadata.incomplete) as i32)), } }; @@ -445,14 +451,14 @@ mod tests { fn sample_connect_request() -> ConnectRequest { ConnectRequest { - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), } } #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { let request = ConnectRequest { - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), }; let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) @@ -471,7 +477,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { let request = ConnectRequest { - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), }; let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) @@ -529,10 +535,11 @@ mod tests { mod announce_request { use std::net::Ipv4Addr; + use std::num::NonZeroU16; use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, - TransactionId, + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; use crate::servers::udp::connection_cookie::{into_connection_id, make}; @@ -550,17 +557,18 @@ mod tests { let default_request = AnnounceRequest { connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), - transaction_id: TransactionId(0i32), + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id: TransactionId(0i32.into()), info_hash: info_hash_aquatic, peer_id: AquaticPeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(client_ip), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client_port), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: client_ip.into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers::new(1i32), + port: Port::new(NonZeroU16::new(client_port).expect("a non-zero client port")), }; AnnounceRequestBuilder { request: default_request, @@ -583,12 +591,12 @@ mod tests { } pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { - self.request.ip_address = Some(ip_address); + self.request.ip_address = ip_address.into(); self } pub fn with_port(mut self, port: u16) -> Self { - self.request.port = Port(port); + self.request.port = Port(port.into()); self } @@ -600,23 +608,23 @@ mod tests { mod using_ipv4 { use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, - Response, ResponsePeer, + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, + PeerId as AquaticPeerId, Response, ResponsePeer, }; use mockall::predicate::eq; use torrust_tracker_primitives::peer; use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; - use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; + use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -659,14 +667,16 @@ mod tests { let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); - let empty_peer_vector: Vec> = vec![]; + let empty_peer_vector: Vec> = vec![]; assert_eq!( response, Response::from(AnnounceResponse { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(120i32), - leechers: NumberOfPeers(0i32), - seeders: NumberOfPeers(1i32), + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, peers: empty_peer_vector }) ); @@ -739,7 +749,7 @@ mod tests { let response = announce_a_new_peer_using_ipv4(tracker.clone()).await; // The response should not contain the peer using IPV6 - let peers: Option>> = match response { + let peers: Option>> = match response { Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), _ => None, }; @@ -820,23 +830,23 @@ mod tests { mod using_ipv6 { use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, - Response, ResponsePeer, + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, + PeerId as AquaticPeerId, Response, ResponsePeer, }; use mockall::predicate::eq; use torrust_tracker_primitives::peer; use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; - use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; + use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -883,14 +893,16 @@ mod tests { let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); - let empty_peer_vector: Vec> = vec![]; + let empty_peer_vector: Vec> = vec![]; assert_eq!( response, Response::from(AnnounceResponse { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(120i32), - leechers: NumberOfPeers(0i32), - seeders: NumberOfPeers(1i32), + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, peers: empty_peer_vector }) ); @@ -966,7 +978,7 @@ mod tests { let response = announce_a_new_peer_using_ipv6(tracker.clone()).await; // The response should not contain the peer using IPV4 - let peers: Option>> = match response { + let peers: Option>> = match response { Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), _ => None, }; @@ -1074,9 +1086,9 @@ mod tests { fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), + seeders: NumberOfPeers(0.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), } } @@ -1089,7 +1101,7 @@ mod tests { let request = ScrapeRequest { connection_id: into_connection_id(&make(&remote_addr)), - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), info_hashes, }; @@ -1123,7 +1135,7 @@ mod tests { ScrapeRequest { connection_id: into_connection_id(&make(remote_addr)), - transaction_id: TransactionId(0i32), + transaction_id: TransactionId::new(0i32), info_hashes, } } @@ -1159,9 +1171,9 @@ mod tests { let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); let expected_torrent_stats = vec![TorrentScrapeStatistics { - seeders: NumberOfPeers(1), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), }]; assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); @@ -1232,9 +1244,9 @@ mod tests { let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); let expected_torrent_stats = vec![TorrentScrapeStatistics { - seeders: NumberOfPeers(1), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), }]; assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); @@ -1265,7 +1277,7 @@ mod tests { ScrapeRequest { connection_id: into_connection_id(&make(remote_addr)), - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), info_hashes, } } diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index f7eb935a0..104f42a73 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -18,13 +18,20 @@ use crate::CurrentClock; /// request. #[must_use] pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> peer::Peer { + let announce_event = match aquatic_udp_protocol::AnnounceEvent::from(announce_wrapper.announce_request.event) { + aquatic_udp_protocol::AnnounceEvent::Started => AnnounceEvent::Started, + aquatic_udp_protocol::AnnounceEvent::Stopped => AnnounceEvent::Stopped, + aquatic_udp_protocol::AnnounceEvent::Completed => AnnounceEvent::Completed, + aquatic_udp_protocol::AnnounceEvent::None => AnnounceEvent::None, + }; + peer::Peer { peer_id: peer::Id(announce_wrapper.announce_request.peer_id.0), - peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), + peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0.into()), updated: CurrentClock::now(), - uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0), - downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0), - left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0), - event: AnnounceEvent::from_i32(announce_wrapper.announce_request.event.to_i32()), + uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0.into()), + downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0.into()), + left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0.into()), + event: announce_event, } } diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index e172e03b1..f95fec07a 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -21,7 +21,7 @@ impl AnnounceWrapper { #[must_use] pub fn new(announce_request: &AnnounceRequest) -> Self { AnnounceWrapper { - announce_request: announce_request.clone(), + announce_request: *announce_request, info_hash: InfoHash(announce_request.info_hash.0), } } diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 7086b6ab7..dc1bccde3 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -346,7 +346,7 @@ impl Udp { let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); - match response.write(&mut cursor) { + match response.write_bytes(&mut cursor) { Ok(()) => { #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 9af9571bc..81209efb6 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -9,6 +9,7 @@ use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; use log::debug; use tokio::net::UdpSocket; use tokio::time; +use zerocopy::network_endian::I32; use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; @@ -160,7 +161,7 @@ impl UdpTrackerClient { let request_buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(request_buffer); - let request_data_result = match request.write(&mut cursor) { + let request_data_result = match request.write_bytes(&mut cursor) { Ok(()) => { #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; @@ -186,7 +187,7 @@ impl UdpTrackerClient { debug!(target: "UDP tracker client", "received {payload_size} bytes. Response {response_buffer:?}"); - let response = Response::from_bytes(&response_buffer[..payload_size], true)?; + let response = Response::parse_bytes(&response_buffer[..payload_size], true)?; Ok(response) } @@ -218,7 +219,7 @@ pub async fn check(binding: &SocketAddr) -> Result { match new_udp_tracker_client_connected(binding.to_string().as_str()).await { Ok(client) => { let connect_request = ConnectRequest { - transaction_id: TransactionId(123), + transaction_id: TransactionId(I32::new(123)), }; // client.send() return usize, but doesn't use here diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 56e400f84..7abd6092c 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -60,7 +60,7 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req Err(err) => panic!("{err}"), }; - let response = Response::from_bytes(&buffer, true).unwrap(); + let response = Response::parse_bytes(&buffer, true).unwrap(); assert!(is_error_response(&response, "bad request")); @@ -85,7 +85,7 @@ mod receiving_a_connection_request { }; let connect_request = ConnectRequest { - transaction_id: TransactionId(123), + transaction_id: TransactionId::new(123), }; match client.send(connect_request.into()).await { @@ -98,7 +98,7 @@ mod receiving_a_connection_request { Err(err) => panic!("{err}"), }; - assert!(is_connect_response(&response, TransactionId(123))); + assert!(is_connect_response(&response, TransactionId::new(123))); env.stop().await; } @@ -108,8 +108,8 @@ mod receiving_an_announce_request { use std::net::Ipv4Addr; use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, - TransactionId, + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, + PeerKey, Port, TransactionId, }; use torrust_tracker::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; use torrust_tracker_test_helpers::configuration; @@ -127,23 +127,24 @@ mod receiving_an_announce_request { Err(err) => panic!("{err}"), }; - let connection_id = send_connection_request(TransactionId(123), &client).await; + let connection_id = send_connection_request(TransactionId::new(123), &client).await; // Send announce request let announce_request = AnnounceRequest { connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id: TransactionId::new(123i32), info_hash: InfoHash([0u8; 20]), peer_id: PeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client.udp_client.socket.local_addr().unwrap().port()), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers(1i32.into()), + port: Port(client.udp_client.socket.local_addr().unwrap().port().into()), }; match client.send(announce_request.into()).await { @@ -182,7 +183,7 @@ mod receiving_an_scrape_request { Err(err) => panic!("{err}"), }; - let connection_id = send_connection_request(TransactionId(123), &client).await; + let connection_id = send_connection_request(TransactionId::new(123), &client).await; // Send scrape request @@ -192,7 +193,7 @@ mod receiving_an_scrape_request { let scrape_request = ScrapeRequest { connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), + transaction_id: TransactionId::new(123i32), info_hashes, }; From be51d2fa98ed83921be6828c685e1ca4b8a9382e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 May 2024 11:16:40 +0100 Subject: [PATCH 0809/1003] chore(deps): bump ringbuf from 0.3.3 to 0.4.0 --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- src/servers/udp/server.rs | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b7f592666..f43e85be1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3094,9 +3094,9 @@ dependencies = [ [[package]] name = "ringbuf" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79abed428d1fd2a128201cec72c5f6938e2da607c6f3745f769fabea399d950a" +checksum = "2542bc32f4c763f52a2eb375cb0b76c5aa5771f569af74299e84dca51d988a2f" dependencies = [ "crossbeam-utils", ] diff --git a/Cargo.toml b/Cargo.toml index 63735450e..cbfdc7697 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,7 +57,7 @@ r2d2_mysql = "24" r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" reqwest = { version = "0", features = ["json"] } -ringbuf = "0.3.3" +ringbuf = "0" serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index dc1bccde3..70cf8f01d 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -24,7 +24,8 @@ use std::sync::Arc; use aquatic_udp_protocol::Response; use derive_more::Constructor; use log::{debug, error, info, trace}; -use ringbuf::{Rb, StaticRb}; +use ringbuf::traits::{Consumer, Observer, RingBuffer}; +use ringbuf::StaticRb; use tokio::net::UdpSocket; use tokio::sync::oneshot; use tokio::task::{AbortHandle, JoinHandle}; From 62d4a209b048f120f5969fcd3fb7fb100b87f0a2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 May 2024 11:44:40 +0100 Subject: [PATCH 0810/1003] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 14 packages to latest compatible versions Updating anyhow v1.0.82 -> v1.0.83 Updating cc v1.0.96 -> v1.0.97 Updating getrandom v0.2.14 -> v0.2.15 Updating num-bigint v0.4.4 -> v0.4.5 Updating proc-macro2 v1.0.81 -> v1.0.82 Updating rustls-pki-types v1.5.0 -> v1.6.0 Updating rustversion v1.0.15 -> v1.0.16 Updating ryu v1.0.17 -> v1.0.18 Updating semver v1.0.22 -> v1.0.23 Updating syn v2.0.60 -> v2.0.61 Updating thiserror v1.0.59 -> v1.0.60 Updating thiserror-impl v1.0.59 -> v1.0.60 Updating zerocopy v0.7.33 -> v0.7.34 Updating zerocopy-derive v0.7.33 -> v0.7.34 ``` --- Cargo.lock | 113 ++++++++++++++++++++++++++--------------------------- 1 file changed, 56 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f43e85be1..d4282c158 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,9 +142,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3" [[package]] name = "aquatic_peer_id" @@ -363,7 +363,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -477,7 +477,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -564,7 +564,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -637,7 +637,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "syn_derive", ] @@ -725,9 +725,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" dependencies = [ "jobserver", "libc", @@ -837,7 +837,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1113,7 +1113,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1124,7 +1124,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1171,7 +1171,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1416,7 +1416,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1428,7 +1428,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1440,7 +1440,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1533,7 +1533,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -1584,9 +1584,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", @@ -2240,7 +2240,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2291,7 +2291,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "termcolor", "thiserror", ] @@ -2404,11 +2404,10 @@ checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -2491,7 +2490,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2604,7 +2603,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2673,7 +2672,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2847,9 +2846,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] @@ -3167,7 +3166,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.60", + "syn 2.0.61", "unicode-ident", ] @@ -3283,9 +3282,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" +checksum = "51f344d206c5e1b010eec27349b815a4805f70a778895959d70b74b9b529b30a" [[package]] name = "rustls-webpki" @@ -3299,15 +3298,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0" [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -3389,9 +3388,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" @@ -3429,7 +3428,7 @@ checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -3475,7 +3474,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -3526,7 +3525,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -3660,9 +3659,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" dependencies = [ "proc-macro2", "quote", @@ -3678,7 +3677,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -3766,22 +3765,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -3875,7 +3874,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -4182,7 +4181,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -4353,7 +4352,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "wasm-bindgen-shared", ] @@ -4387,7 +4386,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4635,9 +4634,9 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.33" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "byteorder", "zerocopy-derive", @@ -4645,13 +4644,13 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.7.33" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] From e3143f779c40e590cfbdcfd34ee3d972f94ca1b3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 May 2024 12:25:48 +0100 Subject: [PATCH 0811/1003] feat: log aborted UDP requests This will add a warning to the lofs when a UDP request is aborted. --- src/servers/udp/server.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 70cf8f01d..fc2d02a59 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -291,7 +291,14 @@ impl Udp { if !h.is_finished() { // the task is still running, lets yield and give it a chance to flush. tokio::task::yield_now().await; + h.abort(); + + let server_socket_addr = socket.local_addr().expect("Could not get local_addr for socket."); + + tracing::span!( + target: "UDP TRACKER", + tracing::Level::WARN, "request-aborted", server_socket_addr = %server_socket_addr); } } } From 3dee03ea0d6b911e58a51cf5fded5d4a0efc9f6d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 May 2024 17:48:09 +0100 Subject: [PATCH 0812/1003] docs: udpate installation docs --- README.md | 4 ++++ src/lib.rs | 70 ++++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 54 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index ea5078b19..8431c00e4 100644 --- a/README.md +++ b/README.md @@ -122,6 +122,10 @@ The following services are provided by the default configuration: ## Documentation +You can read the [latest documentation][docs] from . + +Some specific sections: + - [Management API (Version 1)][API] - [Tracker (HTTP/TLS)][HTTP] - [Tracker (UDP)][UDP] diff --git a/src/lib.rs b/src/lib.rs index 064f50eb6..22bc133e1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -109,29 +109,47 @@ //! With the default configuration you will need to create the `storage` directory: //! //! ```text -//! storage/ -//! ├── database -//! │   └── data.db -//! └── tls -//! ├── localhost.crt -//! └── localhost.key +//! ./storage/ +//! └── tracker +//! ├── etc +//! ├── lib +//! │   ├── database +//! │   │   └── sqlite3.db +//! │   └── tls +//! └── log //! ``` //! //! The default configuration expects a directory `./storage/tracker/lib/database` to be writable by the tracker process. //! -//! By default the tracker uses `SQLite` and the database file name `data.db`. +//! By default the tracker uses `SQLite` and the database file name `sqlite3.db`. //! //! You only need the `tls` directory in case you are setting up SSL for the HTTP tracker or the tracker API. //! Visit [`HTTP`](crate::servers::http) or [`API`](crate::servers::apis) if you want to know how you can use HTTPS. //! //! ## Install from sources //! +//! First, you need to create a folder to clone the repository. +//! +//! ```text +//! cd /tmp +//! mkdir torrust +//! ``` +//! //! ```text //! git clone https://github.com/torrust/torrust-tracker.git \ //! && cd torrust-tracker \ //! && cargo build --release \ +//! && mkdir -p ./storage/tracker/etc \ //! && mkdir -p ./storage/tracker/lib/database \ -//! && mkdir -p ./storage/tracker/lib/tls +//! && mkdir -p ./storage/tracker/lib/tls \ +//! && mkdir -p ./storage/tracker/log +//! ``` +//! +//! To run the tracker we will have to use the command "cargo run" this will +//! compile and after being compiled it will start running the tracker. +//! +//! ```text +//! cargo run //! ``` //! //! ## Run with docker @@ -141,9 +159,10 @@ //! //! # Configuration //! -//! In order to run the tracker you need to provide the configuration. If you run the tracker without providing the configuration, -//! the tracker will generate the default configuration the first time you run it. It will generate a `tracker.toml` file with -//! in the root directory. +//! In order to run the tracker you need to provide the configuration. If you +//! run the tracker without providing the configuration, the tracker will +//! generate the default configuration the first time you run it. It will +//! generate a `tracker.toml` file with in the root directory. //! //! The default configuration is: //! @@ -187,26 +206,37 @@ //! bind_address = "127.0.0.1:1313" //!``` //! -//! The default configuration includes one disabled UDP server, one disabled HTTP server and the enabled API. +//! The default configuration includes one disabled UDP server, one disabled +//! HTTP server and the enabled API. //! -//! For more information about each service and options you can visit the documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration). +//! For more information about each service and options you can visit the +//! documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration). //! -//! Alternatively to the `tracker.toml` file you can use one environment variable `TORRUST_TRACKER_CONFIG` to pass the configuration to the tracker: +//! Alternatively to the `tracker.toml` file you can use one environment +//! variable `TORRUST_TRACKER_CONFIG` to pass the configuration to the tracker: //! //! ```text -//! TORRUST_TRACKER_CONFIG=$(cat tracker.toml) -//! cargo run +//! TORRUST_TRACKER_CONFIG=$(cat ./share/default/config/tracker.development.sqlite3.toml) ./target/release/torrust-tracker //! ``` //! -//! In the previous example you are just setting the env var with the contents of the `tracker.toml` file. +//! In the previous example you are just setting the env var with the contents +//! of the `tracker.toml` file. +//! +//! The env var contains the same data as the `tracker.toml`. It's particularly +//! useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! +//! > NOTICE: The `TORRUST_TRACKER_CONFIG` env var has priority over the `tracker.toml` file. //! -//! The env var contains the same data as the `tracker.toml`. It's particularly useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! By default, if you don’t specify any `tracker.toml` file, the application +//! will use `./share/default/config/tracker.development.sqlite3.toml`. //! -//! > NOTE: The `TORRUST_TRACKER_CONFIG` env var has priority over the `tracker.toml` file. +//! > IMPORTANT: Every time you change the configuration you need to restart the +//! service. //! //! # Usage //! -//! Running the tracker with the default configuration and enabling the UDP and HTTP trackers will expose the services on these URLs: +//! Running the tracker with the default configuration and enabling the UDP and +//! HTTP trackers will expose the services on these URLs: //! //! - REST API: //! - UDP tracker: From cddc4dee7c8de35b543b8aeefb741f8882ccb27e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 May 2024 18:22:01 +0100 Subject: [PATCH 0813/1003] chore(deps): bump rustc-demangle v0.1.23 -> v0.1.24 --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4282c158..bd0e36c3a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3212,9 +3212,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" From 6f02aeb7e97c1d990df916514b0737f8b5e67f26 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 May 2024 12:15:41 +0100 Subject: [PATCH 0814/1003] docs: fix profiling docs - Remove unnecessary file - Fix filename typo --- ... => flamegraph_generated_without_sudo.svg} | 0 docs/profiling.md | 2 +- flamegraph_generated_withput_sudo.svg | 491 ------------------ 3 files changed, 1 insertion(+), 492 deletions(-) rename docs/media/{flamegraph_generated_withput_sudo.svg => flamegraph_generated_without_sudo.svg} (100%) delete mode 100644 flamegraph_generated_withput_sudo.svg diff --git a/docs/media/flamegraph_generated_withput_sudo.svg b/docs/media/flamegraph_generated_without_sudo.svg similarity index 100% rename from docs/media/flamegraph_generated_withput_sudo.svg rename to docs/media/flamegraph_generated_without_sudo.svg diff --git a/docs/profiling.md b/docs/profiling.md index 26e5b786e..406560f3c 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -79,7 +79,7 @@ Loading configuration file: `./share/default/config/tracker.udp.benchmarking.tom And some bars in the graph will have the `unknown` label. -![flamegraph generated without sudo](./media/flamegraph_generated_withput_sudo.svg) +![flamegraph generated without sudo](./media/flamegraph_generated_without_sudo.svg) ## Using valgrind and kcachegrind diff --git a/flamegraph_generated_withput_sudo.svg b/flamegraph_generated_withput_sudo.svg deleted file mode 100644 index 84c00ffe3..000000000 --- a/flamegraph_generated_withput_sudo.svg +++ /dev/null @@ -1,491 +0,0 @@ -Flame Graph Reset ZoomSearch [unknown] (188 samples, 0.14%)[unknown] (187 samples, 0.14%)[unknown] (186 samples, 0.14%)[unknown] (178 samples, 0.14%)[unknown] (172 samples, 0.13%)[unknown] (158 samples, 0.12%)[unknown] (158 samples, 0.12%)[unknown] (125 samples, 0.10%)[unknown] (102 samples, 0.08%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (41 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (25 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (15 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)profiling (214 samples, 0.16%)clone3 (22 samples, 0.02%)start_thread (22 samples, 0.02%)std::sys::pal::unix::thread::Thread::new::thread_start (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::Handler::new (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::make_handler (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::get_stack (19 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (30 samples, 0.02%)[[vdso]] (93 samples, 0.07%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (143 samples, 0.11%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (31 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (21 samples, 0.02%)[[vdso]] (91 samples, 0.07%)__GI___clock_gettime (14 samples, 0.01%)_int_malloc (53 samples, 0.04%)epoll_wait (254 samples, 0.19%)tokio::runtime::context::with_scheduler (28 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::context::with_scheduler::{{closure}} (14 samples, 0.01%)core::option::Option<T>::map (17 samples, 0.01%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (17 samples, 0.01%)mio::poll::Poll::poll (27 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (27 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (54 samples, 0.04%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (26 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (65 samples, 0.05%)core::sync::atomic::AtomicUsize::fetch_add (65 samples, 0.05%)core::sync::atomic::atomic_add (65 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (31 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (49 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (33 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (93 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Parker::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (75 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (18 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (18 samples, 0.01%)core::cell::BorrowRefMut::new (18 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (96 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (18 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (14 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (220 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (54 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (54 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (240 samples, 0.18%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (265 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park (284 samples, 0.22%)core::option::Option<T>::or_else (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (40 samples, 0.03%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (17 samples, 0.01%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (17 samples, 0.01%)core::num::<impl u32>::wrapping_add (17 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (26 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (129 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (128 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (119 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::pack (39 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run (613 samples, 0.47%)tokio::runtime::context::runtime::enter_runtime (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (613 samples, 0.47%)tokio::runtime::context::set_scheduler (613 samples, 0.47%)std::thread::local::LocalKey<T>::with (613 samples, 0.47%)std::thread::local::LocalKey<T>::try_with (613 samples, 0.47%)tokio::runtime::context::set_scheduler::{{closure}} (613 samples, 0.47%)tokio::runtime::context::scoped::Scoped<T>::set (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Context::run (613 samples, 0.47%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (777 samples, 0.59%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (776 samples, 0.59%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (16 samples, 0.01%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::runtime::context::set_current_task_id (16 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (16 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (835 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (56 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (46 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (897 samples, 0.68%)tokio::runtime::task::harness::poll_future::{{closure}} (897 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::store_output (62 samples, 0.05%)tokio::runtime::task::harness::poll_future (930 samples, 0.71%)std::panic::catch_unwind (927 samples, 0.71%)std::panicking::try (927 samples, 0.71%)std::panicking::try::do_call (925 samples, 0.70%)core::mem::manually_drop::ManuallyDrop<T>::take (28 samples, 0.02%)core::ptr::read (28 samples, 0.02%)tokio::runtime::task::raw::poll (938 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll (934 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (934 samples, 0.71%)core::array::<impl core::default::Default for [T: 32]>::default (26 samples, 0.02%)tokio::runtime::time::Inner::lock (16 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::time::wheel::Wheel::poll (25 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (98 samples, 0.07%)tokio::runtime::time::Driver::park_internal (51 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (24 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (131 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (24 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (14 samples, 0.01%)core::sync::atomic::AtomicU32::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (39 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (34 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (32 samples, 0.02%)[[heap]] (2,361 samples, 1.80%)[..[[vdso]] (313 samples, 0.24%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (41 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (67 samples, 0.05%)alloc::string::String::push_str (18 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (18 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (18 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (18 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (36 samples, 0.03%)core::num::<impl u64>::rotate_left (28 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (60 samples, 0.05%)core::num::<impl u64>::wrapping_add (14 samples, 0.01%)core::hash::sip::u8to64_le (60 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (184 samples, 0.14%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (15 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (19 samples, 0.01%)core::cell::Cell<T>::get (17 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (26 samples, 0.02%)core::ops::function::FnMut::call_mut (21 samples, 0.02%)tokio::runtime::coop::poll_proceed (21 samples, 0.02%)tokio::runtime::context::budget (21 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (21 samples, 0.02%)[unknown] (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (195 samples, 0.15%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (14 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (14 samples, 0.01%)core::result::Result<T,E>::is_err (18 samples, 0.01%)core::result::Result<T,E>::is_ok (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (39 samples, 0.03%)core::sync::atomic::AtomicU32::compare_exchange (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (19 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (245 samples, 0.19%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (26 samples, 0.02%)[[vdso]] (748 samples, 0.57%)[profiling] (34 samples, 0.03%)core::fmt::write (31 samples, 0.02%)__GI___clock_gettime (29 samples, 0.02%)__GI___libc_free (131 samples, 0.10%)arena_for_chunk (20 samples, 0.02%)arena_for_chunk (19 samples, 0.01%)heap_for_ptr (19 samples, 0.01%)heap_max_size (14 samples, 0.01%)__GI___libc_malloc (114 samples, 0.09%)__GI___libc_realloc (15 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)__GI___pthread_disable_asynccancel (66 samples, 0.05%)__GI_getsockname (249 samples, 0.19%)__libc_calloc (15 samples, 0.01%)__libc_recvfrom (23 samples, 0.02%)__libc_sendto (130 samples, 0.10%)__memcmp_evex_movbe (451 samples, 0.34%)__memcpy_avx512_unaligned_erms (426 samples, 0.32%)__memset_avx512_unaligned_erms (215 samples, 0.16%)__posix_memalign (17 samples, 0.01%)_int_free (418 samples, 0.32%)tcache_put (24 samples, 0.02%)_int_malloc (385 samples, 0.29%)_int_memalign (31 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (26 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (15 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (15 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (96 samples, 0.07%)alloc::raw_vec::RawVec<T,A>::grow_amortized (66 samples, 0.05%)core::num::<impl usize>::checked_add (18 samples, 0.01%)core::num::<impl usize>::overflowing_add (18 samples, 0.01%)alloc::raw_vec::finish_grow (74 samples, 0.06%)alloc::sync::Arc<T,A>::drop_slow (16 samples, 0.01%)core::mem::drop (14 samples, 0.01%)core::fmt::Formatter::pad_integral (14 samples, 0.01%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (93 samples, 0.07%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (23 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (188 samples, 0.14%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (30 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_connect::{{closure}}> (22 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_packet::{{closure}}> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}}> (19 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (22 samples, 0.02%)malloc_consolidate (24 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (15 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)rand_chacha::guts::round (66 samples, 0.05%)rand_chacha::guts::refill_wide::impl_avx2 (99 samples, 0.08%)rand_chacha::guts::refill_wide::fn_impl (98 samples, 0.07%)rand_chacha::guts::refill_wide_impl (98 samples, 0.07%)std::io::error::Error::kind (14 samples, 0.01%)[unknown] (42 samples, 0.03%)[unknown] (14 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (490 samples, 0.37%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (211 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (84 samples, 0.06%)tokio::runtime::task::core::Header::get_owner_id (18 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (18 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (20 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (31 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (29 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (108 samples, 0.08%)tokio::runtime::task::core::TaskIdGuard::enter (14 samples, 0.01%)tokio::runtime::context::set_current_task_id (14 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (21 samples, 0.02%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (32 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (54 samples, 0.04%)tokio::runtime::task::raw::drop_abort_handle (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (17 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (22 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (22 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (79 samples, 0.06%)core::slice::<impl [T]>::contains (178 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (40 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (40 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (216 samples, 0.16%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (219 samples, 0.17%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (29 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (29 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (54 samples, 0.04%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (113 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (31 samples, 0.02%)core::sync::atomic::AtomicU64::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (447 samples, 0.34%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (174 samples, 0.13%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (489 samples, 0.37%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (489 samples, 0.37%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run (484 samples, 0.37%)tokio::runtime::context::runtime::enter_runtime (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (484 samples, 0.37%)tokio::runtime::context::set_scheduler (484 samples, 0.37%)std::thread::local::LocalKey<T>::with (484 samples, 0.37%)std::thread::local::LocalKey<T>::try_with (484 samples, 0.37%)tokio::runtime::context::set_scheduler::{{closure}} (484 samples, 0.37%)tokio::runtime::context::scoped::Scoped<T>::set (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Context::run (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (24 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (20 samples, 0.02%)tokio::runtime::task::raw::poll (515 samples, 0.39%)tokio::runtime::task::harness::Harness<T,S>::poll (493 samples, 0.38%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (493 samples, 0.38%)tokio::runtime::task::harness::poll_future (493 samples, 0.38%)std::panic::catch_unwind (493 samples, 0.38%)std::panicking::try (493 samples, 0.38%)std::panicking::try::do_call (493 samples, 0.38%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (493 samples, 0.38%)tokio::runtime::task::harness::poll_future::{{closure}} (493 samples, 0.38%)tokio::runtime::task::core::Core<T,S>::poll (493 samples, 0.38%)tokio::runtime::time::wheel::Wheel::next_expiration (16 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (27 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (15 samples, 0.01%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (44 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (15 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (29 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (74 samples, 0.06%)torrust_tracker::servers::udp::peer_builder::from_request (17 samples, 0.01%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (51 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (54 samples, 0.04%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (58 samples, 0.04%)torrust_tracker::core::Tracker::announce::{{closure}} (70 samples, 0.05%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (113 samples, 0.09%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (175 samples, 0.13%)<T as alloc::string::ToString>::to_string (38 samples, 0.03%)core::option::Option<T>::expect (56 samples, 0.04%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (18 samples, 0.01%)<T as alloc::string::ToString>::to_string (18 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (180 samples, 0.14%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (468 samples, 0.36%)torrust_tracker::servers::udp::logging::log_response (38 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (669 samples, 0.51%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (152 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (147 samples, 0.11%)tokio::net::udp::UdpSocket::send_to::{{closure}} (138 samples, 0.11%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (119 samples, 0.09%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (75 samples, 0.06%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to (39 samples, 0.03%)mio::io_source::IoSource<T>::do_io (39 samples, 0.03%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to::{{closure}} (39 samples, 0.03%)std::net::udp::UdpSocket::send_to (39 samples, 0.03%)std::sys_common::net::UdpSocket::send_to (39 samples, 0.03%)std::sys::pal::unix::cvt (39 samples, 0.03%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (39 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_stats (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (14 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (33 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (33 samples, 0.03%)torrust_tracker_primitives::peer::Peer::is_seeder (33 samples, 0.03%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (75 samples, 0.06%)core::iter::traits::iterator::Iterator::sum (75 samples, 0.06%)<usize as core::iter::traits::accum::Sum>::sum (75 samples, 0.06%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (75 samples, 0.06%)core::iter::traits::iterator::Iterator::fold (75 samples, 0.06%)core::iter::adapters::map::map_fold::{{closure}} (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (104 samples, 0.08%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (24 samples, 0.02%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (215 samples, 0.16%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (198 samples, 0.15%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (89 samples, 0.07%)core::option::Option<T>::is_some_and (32 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (30 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (30 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (26 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (58 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (58 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (58 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (238 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (236 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (208 samples, 0.16%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (208 samples, 0.16%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (282 samples, 0.21%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (67 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (22 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (22 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (22 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (22 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (22 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (43 samples, 0.03%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (43 samples, 0.03%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (43 samples, 0.03%)<u8 as core::slice::cmp::SliceOrd>::compare (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (151 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (145 samples, 0.11%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (137 samples, 0.10%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (137 samples, 0.10%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (266 samples, 0.20%)core::sync::atomic::AtomicU32::load (27 samples, 0.02%)core::sync::atomic::atomic_load (27 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (38 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (37 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (36 samples, 0.03%)tracing::span::Span::log (16 samples, 0.01%)tracing::span::Span::record_all (70 samples, 0.05%)unlink_chunk (139 samples, 0.11%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (30 samples, 0.02%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (30 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (28 samples, 0.02%)[anon] (8,759 samples, 6.67%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (32 samples, 0.02%)uuid::rng::bytes (32 samples, 0.02%)rand::random (32 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (15 samples, 0.01%)_int_free (338 samples, 0.26%)tcache_put (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (22 samples, 0.02%)hashbrown::raw::h2 (14 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (23 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (17 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (25 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (15 samples, 0.01%)[profiling] (545 samples, 0.42%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (32 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (22 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (30 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (28 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (83 samples, 0.06%)alloc::string::String::push_str (57 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (57 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (57 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (57 samples, 0.04%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (20 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (41 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (151 samples, 0.12%)core::hash::sip::u8to64_le (50 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (33 samples, 0.03%)tokio::runtime::context::CONTEXT::__getit (35 samples, 0.03%)core::cell::Cell<T>::get (33 samples, 0.03%)[unknown] (20 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (75 samples, 0.06%)core::ops::function::FnMut::call_mut (66 samples, 0.05%)tokio::runtime::coop::poll_proceed (66 samples, 0.05%)tokio::runtime::context::budget (66 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (66 samples, 0.05%)tokio::runtime::context::budget::{{closure}} (27 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (27 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (110 samples, 0.08%)[unknown] (15 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (27 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (27 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (70 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (55 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (55 samples, 0.04%)[unknown] (33 samples, 0.03%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (214 samples, 0.16%)__memcpy_avx512_unaligned_erms (168 samples, 0.13%)[profiling] (171 samples, 0.13%)binascii::bin2hex (77 samples, 0.06%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (280 samples, 0.21%)[unknown] (317 samples, 0.24%)[[vdso]] (2,648 samples, 2.02%)[..[unknown] (669 samples, 0.51%)[unknown] (396 samples, 0.30%)[unknown] (251 samples, 0.19%)[unknown] (65 samples, 0.05%)[unknown] (30 samples, 0.02%)[unknown] (21 samples, 0.02%)__GI___clock_gettime (56 samples, 0.04%)arena_for_chunk (72 samples, 0.05%)arena_for_chunk (62 samples, 0.05%)heap_for_ptr (49 samples, 0.04%)heap_max_size (28 samples, 0.02%)__GI___libc_free (194 samples, 0.15%)arena_for_chunk (19 samples, 0.01%)checked_request2size (24 samples, 0.02%)__GI___libc_malloc (220 samples, 0.17%)tcache_get (44 samples, 0.03%)__GI___libc_write (25 samples, 0.02%)__GI___libc_write (14 samples, 0.01%)__GI___pthread_disable_asynccancel (97 samples, 0.07%)core::num::<impl u128>::leading_zeros (15 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (72 samples, 0.05%)__floattidf (90 samples, 0.07%)compiler_builtins::float::conv::__floattidf (86 samples, 0.07%)exp_inline (40 samples, 0.03%)log_inline (64 samples, 0.05%)__ieee754_pow_fma (114 samples, 0.09%)__libc_calloc (106 samples, 0.08%)__libc_recvfrom (252 samples, 0.19%)__libc_sendto (133 samples, 0.10%)__memcmp_evex_movbe (137 samples, 0.10%)__memcpy_avx512_unaligned_erms (1,399 samples, 1.07%)__posix_memalign (172 samples, 0.13%)__posix_memalign (80 samples, 0.06%)_mid_memalign (71 samples, 0.05%)arena_for_chunk (14 samples, 0.01%)__pow (18 samples, 0.01%)__vdso_clock_gettime (40 samples, 0.03%)[unknown] (24 samples, 0.02%)_int_free (462 samples, 0.35%)tcache_put (54 samples, 0.04%)[unknown] (14 samples, 0.01%)_int_malloc (508 samples, 0.39%)_int_memalign (68 samples, 0.05%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (78 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::grow_amortized (73 samples, 0.06%)alloc::raw_vec::finish_grow (91 samples, 0.07%)core::result::Result<T,E>::map_err (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Weak<ring::ec::curve25519::ed25519::signing::Ed25519KeyPair,&alloc::alloc::Global>> (16 samples, 0.01%)<alloc::sync::Weak<T,A> as core::ops::drop::Drop>::drop (16 samples, 0.01%)core::mem::drop (18 samples, 0.01%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)alloc_new_heap (49 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (49 samples, 0.04%)core::fmt::Formatter::pad_integral (40 samples, 0.03%)core::fmt::Formatter::pad_integral::write_prefix (19 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (155 samples, 0.12%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (71 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (245 samples, 0.19%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (33 samples, 0.03%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (37 samples, 0.03%)core::str::converts::from_utf8 (33 samples, 0.03%)core::str::validations::run_utf8_validation (20 samples, 0.02%)epoll_wait (31 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (17 samples, 0.01%)rand_chacha::guts::refill_wide (19 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (17 samples, 0.01%)std_detect::detect::check_for (17 samples, 0.01%)std_detect::detect::cache::test (17 samples, 0.01%)std_detect::detect::cache::Cache::test (17 samples, 0.01%)core::sync::atomic::AtomicUsize::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)std::sys::pal::unix::time::Timespec::new (29 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (132 samples, 0.10%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (22 samples, 0.02%)core::cmp::PartialOrd::ge (22 samples, 0.02%)std::sys::pal::unix::time::Timespec::sub_timespec (67 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (18 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr (29 samples, 0.02%)std::sys_common::net::sockname (28 samples, 0.02%)syscall (552 samples, 0.42%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (74 samples, 0.06%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (74 samples, 0.06%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (74 samples, 0.06%)core::cell::Cell<T>::set (74 samples, 0.06%)core::cell::Cell<T>::replace (74 samples, 0.06%)core::mem::replace (74 samples, 0.06%)core::ptr::write (74 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (14 samples, 0.01%)tokio::runtime::context::with_scheduler (176 samples, 0.13%)std::thread::local::LocalKey<T>::try_with (152 samples, 0.12%)tokio::runtime::context::with_scheduler::{{closure}} (151 samples, 0.12%)tokio::runtime::context::scoped::Scoped<T>::with (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (16 samples, 0.01%)core::option::Option<T>::map (19 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (24 samples, 0.02%)mio::poll::Poll::poll (53 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (53 samples, 0.04%)core::result::Result<T,E>::map (28 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (28 samples, 0.02%)tokio::io::ready::Ready::from_mio (14 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (126 samples, 0.10%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (18 samples, 0.01%)[unknown] (51 samples, 0.04%)[unknown] (100 samples, 0.08%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (326 samples, 0.25%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (205 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (77 samples, 0.06%)[unknown] (26 samples, 0.02%)<tokio::util::linked_list::DrainFilter<T,F> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (396 samples, 0.30%)tokio::loom::std::mutex::Mutex<T>::lock (18 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (573 samples, 0.44%)core::sync::atomic::AtomicUsize::fetch_add (566 samples, 0.43%)core::sync::atomic::atomic_add (566 samples, 0.43%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (635 samples, 0.48%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (44 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (21 samples, 0.02%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (21 samples, 0.02%)core::sync::atomic::AtomicUsize::load (21 samples, 0.02%)core::sync::atomic::atomic_load (21 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (32 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (32 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (32 samples, 0.02%)std::sync::poison::Flag::done (32 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (43 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (43 samples, 0.03%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (123 samples, 0.09%)tokio::runtime::task::list::OwnedTasks<S>::remove (117 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (80 samples, 0.06%)tokio::runtime::scheduler::defer::Defer::wake (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (71 samples, 0.05%)std::sync::condvar::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (56 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (37 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (138 samples, 0.11%)tokio::runtime::driver::Driver::park (77 samples, 0.06%)tokio::runtime::driver::TimeDriver::park (77 samples, 0.06%)tokio::runtime::time::Driver::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Parker::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::park::Inner::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (432 samples, 0.33%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (26 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (94 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (94 samples, 0.07%)core::cell::BorrowRefMut::new (94 samples, 0.07%)tokio::runtime::coop::budget (142 samples, 0.11%)tokio::runtime::coop::with_budget (142 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (121 samples, 0.09%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (44 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (208 samples, 0.16%)tokio::runtime::signal::Driver::process (30 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (46 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (35 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage (75 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_xor (76 samples, 0.06%)core::sync::atomic::atomic_xor (76 samples, 0.06%)tokio::runtime::task::state::State::transition_to_complete (79 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::complete (113 samples, 0.09%)tokio::runtime::task::state::State::transition_to_terminal (18 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (28 samples, 0.02%)core::mem::drop (18 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (18 samples, 0.01%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (16 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (16 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (53 samples, 0.04%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (21 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (113 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (15 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (15 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (14 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (82 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (23 samples, 0.02%)tokio::runtime::task::state::State::ref_dec (23 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (34 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (32 samples, 0.02%)tokio::runtime::task::state::State::unset_join_interested (23 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (23 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (43 samples, 0.03%)core::num::<impl u32>::wrapping_add (23 samples, 0.02%)core::option::Option<T>::or_else (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (59 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (45 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (132 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (63 samples, 0.05%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run (290 samples, 0.22%)tokio::runtime::context::runtime::enter_runtime (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (290 samples, 0.22%)tokio::runtime::context::set_scheduler (290 samples, 0.22%)std::thread::local::LocalKey<T>::with (290 samples, 0.22%)std::thread::local::LocalKey<T>::try_with (290 samples, 0.22%)tokio::runtime::context::set_scheduler::{{closure}} (290 samples, 0.22%)tokio::runtime::context::scoped::Scoped<T>::set (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Context::run (290 samples, 0.22%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (327 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (322 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll (333 samples, 0.25%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (342 samples, 0.26%)tokio::runtime::task::harness::poll_future::{{closure}} (342 samples, 0.26%)tokio::runtime::task::harness::poll_future (348 samples, 0.27%)std::panic::catch_unwind (347 samples, 0.26%)std::panicking::try (347 samples, 0.26%)std::panicking::try::do_call (347 samples, 0.26%)core::sync::atomic::AtomicUsize::compare_exchange (18 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (18 samples, 0.01%)tokio::runtime::task::state::State::transition_to_running (47 samples, 0.04%)tokio::runtime::task::state::State::fetch_update_action (47 samples, 0.04%)tokio::runtime::task::state::State::transition_to_running::{{closure}} (19 samples, 0.01%)tokio::runtime::task::raw::poll (427 samples, 0.33%)tokio::runtime::task::harness::Harness<T,S>::poll (408 samples, 0.31%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (407 samples, 0.31%)tokio::runtime::task::state::State::transition_to_idle (17 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (21 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (14 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (72 samples, 0.05%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (23 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (15 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (14 samples, 0.01%)tokio::runtime::time::Driver::park_internal (155 samples, 0.12%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (96 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (35 samples, 0.03%)core::num::<impl usize>::pow (35 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (39 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (33 samples, 0.03%)core::num::<impl usize>::pow (33 samples, 0.03%)tokio::runtime::time::wheel::level::Level::next_expiration (208 samples, 0.16%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.04%)core::num::<impl usize>::pow (48 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (277 samples, 0.21%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (18 samples, 0.01%)core::option::Option<T>::is_some (18 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (50 samples, 0.04%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (37 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (19 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (17 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (17 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (20 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (62 samples, 0.05%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (40 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (27 samples, 0.02%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (17 samples, 0.01%)torrust_tracker::servers::udp::peer_builder::from_request (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (19 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (355 samples, 0.27%)<F as core::future::into_future::IntoFuture>::into_future (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (37 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (25 samples, 0.02%)core::sync::atomic::atomic_add (25 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet (14 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (20 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::result::Result<T,E>::map_err (16 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (136 samples, 0.10%)torrust_tracker::core::Tracker::announce::{{closure}} (173 samples, 0.13%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (267 samples, 0.20%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (30 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (423 samples, 0.32%)core::fmt::Formatter::new (26 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (80 samples, 0.06%)core::fmt::num::imp::fmt_u64 (58 samples, 0.04%)core::intrinsics::copy_nonoverlapping (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (74 samples, 0.06%)core::fmt::num::imp::fmt_u64 (70 samples, 0.05%)<T as alloc::string::ToString>::to_string (207 samples, 0.16%)core::option::Option<T>::expect (19 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (18 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (18 samples, 0.01%)torrust_tracker::servers::udp::logging::map_action_name (25 samples, 0.02%)alloc::str::<impl alloc::borrow::ToOwned for str>::to_owned (14 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (345 samples, 0.26%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (18 samples, 0.01%)core::fmt::num::imp::fmt_u64 (14 samples, 0.01%)<T as alloc::string::ToString>::to_string (35 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,067 samples, 0.81%)torrust_tracker::servers::udp::logging::log_response (72 samples, 0.05%)alloc::vec::from_elem (68 samples, 0.05%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (68 samples, 0.05%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (68 samples, 0.05%)alloc::alloc::Global::alloc_impl (68 samples, 0.05%)alloc::alloc::alloc_zeroed (68 samples, 0.05%)__rdl_alloc_zeroed (68 samples, 0.05%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (68 samples, 0.05%)[unknown] (48 samples, 0.04%)[unknown] (16 samples, 0.01%)[unknown] (28 samples, 0.02%)std::sys::pal::unix::cvt (134 samples, 0.10%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (134 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,908 samples, 1.45%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (504 samples, 0.38%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (382 samples, 0.29%)tokio::net::udp::UdpSocket::send_to::{{closure}} (344 samples, 0.26%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (332 samples, 0.25%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (304 samples, 0.23%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (215 samples, 0.16%)mio::net::udp::UdpSocket::send_to (185 samples, 0.14%)mio::io_source::IoSource<T>::do_io (185 samples, 0.14%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (185 samples, 0.14%)mio::net::udp::UdpSocket::send_to::{{closure}} (185 samples, 0.14%)std::net::udp::UdpSocket::send_to (185 samples, 0.14%)std::sys_common::net::UdpSocket::send_to (169 samples, 0.13%)alloc::vec::Vec<T>::with_capacity (17 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (17 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (104 samples, 0.08%)tokio::net::udp::UdpSocket::ready::{{closure}} (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (190 samples, 0.14%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (49 samples, 0.04%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (28 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (330 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (327 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (92 samples, 0.07%)tokio::task::spawn::spawn (92 samples, 0.07%)tokio::task::spawn::spawn_inner (92 samples, 0.07%)tokio::runtime::context::current::with_current (92 samples, 0.07%)std::thread::local::LocalKey<T>::try_with (92 samples, 0.07%)tokio::runtime::context::current::with_current::{{closure}} (92 samples, 0.07%)core::option::Option<T>::map (92 samples, 0.07%)tokio::task::spawn::spawn_inner::{{closure}} (92 samples, 0.07%)tokio::runtime::scheduler::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (92 samples, 0.07%)tokio::runtime::task::list::OwnedTasks<S>::bind (90 samples, 0.07%)tokio::runtime::task::new_task (89 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (89 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (89 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (34 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (27 samples, 0.02%)alloc::sync::Arc<T>::new (21 samples, 0.02%)alloc::boxed::Box<T>::new (21 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (152 samples, 0.12%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (125 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (88 samples, 0.07%)core::option::Option<T>::is_some_and (18 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (17 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (17 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (17 samples, 0.01%)std::sync::rwlock::RwLock<T>::read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::read (16 samples, 0.01%)tracing::span::Span::log (26 samples, 0.02%)core::fmt::Arguments::new_v1 (15 samples, 0.01%)tracing_core::span::Record::is_empty (34 samples, 0.03%)tracing_core::field::ValueSet::is_empty (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (22 samples, 0.02%)tracing_core::field::ValueSet::is_empty::{{closure}} (18 samples, 0.01%)core::option::Option<T>::is_none (16 samples, 0.01%)core::option::Option<T>::is_some (16 samples, 0.01%)tracing::span::Span::record_all (143 samples, 0.11%)unlink_chunk (185 samples, 0.14%)uuid::builder::Builder::with_variant (48 samples, 0.04%)[unknown] (40 samples, 0.03%)uuid::builder::Builder::from_random_bytes (77 samples, 0.06%)uuid::builder::Builder::with_version (29 samples, 0.02%)[unknown] (24 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)[unknown] (92 samples, 0.07%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (162 samples, 0.12%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (162 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (162 samples, 0.12%)[unknown] (18,233 samples, 13.89%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (270 samples, 0.21%)uuid::rng::bytes (190 samples, 0.14%)rand::random (190 samples, 0.14%)__memcpy_avx512_unaligned_erms (69 samples, 0.05%)_int_free (23 samples, 0.02%)_int_malloc (23 samples, 0.02%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)advise_stack_range (31 samples, 0.02%)__GI_madvise (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (31 samples, 0.02%)syscall (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sync::condvar::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (35 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (56 samples, 0.04%)std::sys::pal::unix::futex::futex_wait (56 samples, 0.04%)syscall (56 samples, 0.04%)[unknown] (56 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (53 samples, 0.04%)[unknown] (52 samples, 0.04%)[unknown] (46 samples, 0.04%)[unknown] (39 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[[vdso]] (26 samples, 0.02%)[[vdso]] (263 samples, 0.20%)__ieee754_pow_fma (26 samples, 0.02%)__pow (314 samples, 0.24%)std::f64::<impl f64>::powf (345 samples, 0.26%)__GI___clock_gettime (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (416 samples, 0.32%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (24 samples, 0.02%)std::time::Instant::now (18 samples, 0.01%)std::sys::pal::unix::time::Instant::now (18 samples, 0.01%)mio::poll::Poll::poll (102 samples, 0.08%)mio::sys::unix::selector::epoll::Selector::select (102 samples, 0.08%)epoll_wait (99 samples, 0.08%)[unknown] (92 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (88 samples, 0.07%)[unknown] (85 samples, 0.06%)[unknown] (84 samples, 0.06%)[unknown] (43 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (125 samples, 0.10%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (125 samples, 0.10%)tokio::runtime::driver::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::driver::TimeDriver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_internal (116 samples, 0.09%)tokio::runtime::io::driver::Driver::turn (116 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (148 samples, 0.11%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (111 samples, 0.08%)alloc::sync::Arc<T,A>::inner (111 samples, 0.08%)core::ptr::non_null::NonNull<T>::as_ref (111 samples, 0.08%)core::sync::atomic::AtomicUsize::compare_exchange (16 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (16 samples, 0.01%)core::bool::<impl bool>::then (88 samples, 0.07%)std::sys::pal::unix::futex::futex_wait (13,339 samples, 10.16%)std::sys::pal::..syscall (13,003 samples, 9.90%)syscall[unknown] (12,895 samples, 9.82%)[unknown][unknown] (12,759 samples, 9.72%)[unknown][unknown] (12,313 samples, 9.38%)[unknown][unknown] (12,032 samples, 9.16%)[unknown][unknown] (11,734 samples, 8.94%)[unknown][unknown] (11,209 samples, 8.54%)[unknown][unknown] (10,265 samples, 7.82%)[unknown][unknown] (9,345 samples, 7.12%)[unknown][unknown] (8,623 samples, 6.57%)[unknown][unknown] (7,744 samples, 5.90%)[unknow..[unknown] (5,922 samples, 4.51%)[unkn..[unknown] (4,459 samples, 3.40%)[un..[unknown] (2,808 samples, 2.14%)[..[unknown] (1,275 samples, 0.97%)[unknown] (1,022 samples, 0.78%)[unknown] (738 samples, 0.56%)[unknown] (607 samples, 0.46%)[unknown] (155 samples, 0.12%)core::result::Result<T,E>::is_err (77 samples, 0.06%)core::result::Result<T,E>::is_ok (77 samples, 0.06%)std::sync::condvar::Condvar::wait (13,429 samples, 10.23%)std::sync::cond..std::sys::sync::condvar::futex::Condvar::wait (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::mutex::futex::Mutex::lock (89 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (13,508 samples, 10.29%)tokio::runtime:..tokio::loom::std::mutex::Mutex<T>::lock (64 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (31 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (30 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (30 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (34 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (34 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (17 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (19 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (33 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::level_range (17 samples, 0.01%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (95 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (41 samples, 0.03%)core::num::<impl usize>::pow (41 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (129 samples, 0.10%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (202 samples, 0.15%)tokio::runtime::time::wheel::Wheel::poll_at (17 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (38 samples, 0.03%)core::option::Option<T>::map (38 samples, 0.03%)core::result::Result<T,E>::map (31 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (31 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (17 samples, 0.01%)[[vdso]] (28 samples, 0.02%)[unknown] (11,031 samples, 8.40%)[unknown][unknown] (10,941 samples, 8.33%)[unknown][unknown] (10,850 samples, 8.26%)[unknown][unknown] (10,691 samples, 8.14%)[unknown][unknown] (10,070 samples, 7.67%)[unknown][unknown] (9,737 samples, 7.42%)[unknown][unknown] (7,659 samples, 5.83%)[unknow..[unknown] (6,530 samples, 4.97%)[unkno..[unknown] (5,633 samples, 4.29%)[unkn..[unknown] (5,055 samples, 3.85%)[unk..[unknown] (4,046 samples, 3.08%)[un..[unknown] (2,911 samples, 2.22%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,226 samples, 0.93%)[unknown] (455 samples, 0.35%)[unknown] (408 samples, 0.31%)[unknown] (249 samples, 0.19%)[unknown] (202 samples, 0.15%)[unknown] (100 samples, 0.08%)mio::poll::Poll::poll (11,328 samples, 8.63%)mio::poll::P..mio::sys::unix::selector::epoll::Selector::select (11,328 samples, 8.63%)mio::sys::un..epoll_wait (11,229 samples, 8.55%)epoll_wait__GI___pthread_disable_asynccancel (50 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (47 samples, 0.04%)tokio::util::bit::Pack::pack (38 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (25 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (23 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (19 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (11,595 samples, 8.83%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (175 samples, 0.13%)__GI___clock_gettime (15 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (18 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (26 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (26 samples, 0.02%)tokio::time::clock::Clock::now (20 samples, 0.02%)tokio::time::clock::now (20 samples, 0.02%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (17 samples, 0.01%)tokio::runtime::time::Driver::park_internal (11,686 samples, 8.90%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (11,957 samples, 9.11%)tokio::runtim..tokio::runtime::driver::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::driver::TimeDriver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::time::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Parker::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::park::Inner::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (25,547 samples, 19.46%)tokio::runtime::scheduler::mul..core::result::Result<T,E>::is_err (14 samples, 0.01%)core::result::Result<T,E>::is_ok (14 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (45 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (45 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (81 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::lock (73 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (122 samples, 0.09%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (241 samples, 0.18%)<T as core::slice::cmp::SliceContains>::slice_contains (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (75 samples, 0.06%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (75 samples, 0.06%)core::sync::atomic::AtomicU32::compare_exchange (20 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (283 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (24 samples, 0.02%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (33 samples, 0.03%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (33 samples, 0.03%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (98 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (401 samples, 0.31%)alloc::vec::Vec<T,A>::push (14 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (15 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)core::result::Result<T,E>::is_err (15 samples, 0.01%)core::result::Result<T,E>::is_ok (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (22 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (22 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (62 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (14 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (21 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (17 samples, 0.01%)alloc::sync::Arc<T,A>::inner (17 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (17 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (68 samples, 0.05%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (33 samples, 0.03%)core::sync::atomic::AtomicU64::load (16 samples, 0.01%)core::sync::atomic::atomic_load (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::park (26,672 samples, 20.31%)tokio::runtime::scheduler::multi..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (272 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::has_tasks (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::has_tasks (24 samples, 0.02%)tokio::runtime::context::budget (18 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (18 samples, 0.01%)syscall (61 samples, 0.05%)__memcpy_avx512_unaligned_erms (172 samples, 0.13%)__memcpy_avx512_unaligned_erms (224 samples, 0.17%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (228 samples, 0.17%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (228 samples, 0.17%)std::panic::catch_unwind (415 samples, 0.32%)std::panicking::try (415 samples, 0.32%)std::panicking::try::do_call (415 samples, 0.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (415 samples, 0.32%)core::ops::function::FnOnce::call_once (415 samples, 0.32%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::set_stage (410 samples, 0.31%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (27 samples, 0.02%)core::result::Result<T,E>::is_err (43 samples, 0.03%)core::result::Result<T,E>::is_ok (43 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::complete (570 samples, 0.43%)tokio::runtime::task::harness::Harness<T,S>::release (155 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (152 samples, 0.12%)tokio::runtime::task::list::OwnedTasks<S>::remove (152 samples, 0.12%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (103 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (65 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (54 samples, 0.04%)std::io::stdio::stderr::INSTANCE (17 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (70 samples, 0.05%)__memcpy_avx512_unaligned_erms (42 samples, 0.03%)core::cmp::Ord::min (22 samples, 0.02%)core::cmp::min_by (22 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (27 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (30 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (24 samples, 0.02%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (44 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (20 samples, 0.02%)byteorder::io::ReadBytesExt::read_i32 (46 samples, 0.04%)core::cmp::Ord::min (14 samples, 0.01%)core::cmp::min_by (14 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (19 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (24 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (24 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (349 samples, 0.27%)__GI___lll_lock_wake_private (148 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (137 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (111 samples, 0.08%)[unknown] (98 samples, 0.07%)[unknown] (42 samples, 0.03%)[unknown] (30 samples, 0.02%)__GI___lll_lock_wait_private (553 samples, 0.42%)futex_wait (541 samples, 0.41%)[unknown] (536 samples, 0.41%)[unknown] (531 samples, 0.40%)[unknown] (524 samples, 0.40%)[unknown] (515 samples, 0.39%)[unknown] (498 samples, 0.38%)[unknown] (470 samples, 0.36%)[unknown] (435 samples, 0.33%)[unknown] (350 samples, 0.27%)[unknown] (327 samples, 0.25%)[unknown] (290 samples, 0.22%)[unknown] (222 samples, 0.17%)[unknown] (160 samples, 0.12%)[unknown] (104 samples, 0.08%)[unknown] (33 samples, 0.03%)[unknown] (25 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (703 samples, 0.54%)__GI___libc_free (866 samples, 0.66%)tracing::span::Span::record_all (30 samples, 0.02%)unlink_chunk (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (899 samples, 0.68%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (899 samples, 0.68%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (899 samples, 0.68%)alloc::alloc::dealloc (899 samples, 0.68%)__rdl_dealloc (899 samples, 0.68%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (899 samples, 0.68%)core::result::Result<T,E>::expect (91 samples, 0.07%)core::result::Result<T,E>::map_err (28 samples, 0.02%)[[vdso]] (28 samples, 0.02%)__GI___clock_gettime (47 samples, 0.04%)std::time::Instant::elapsed (67 samples, 0.05%)std::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (53 samples, 0.04%)std::sys::pal::unix::cvt (23 samples, 0.02%)__GI_getsockname (3,792 samples, 2.89%)__..[unknown] (3,714 samples, 2.83%)[u..[unknown] (3,661 samples, 2.79%)[u..[unknown] (3,557 samples, 2.71%)[u..[unknown] (3,416 samples, 2.60%)[u..[unknown] (2,695 samples, 2.05%)[..[unknown] (2,063 samples, 1.57%)[unknown] (891 samples, 0.68%)[unknown] (270 samples, 0.21%)[unknown] (99 samples, 0.08%)[unknown] (94 samples, 0.07%)[unknown] (84 samples, 0.06%)[unknown] (77 samples, 0.06%)[unknown] (25 samples, 0.02%)[unknown] (16 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr::{{closure}} (3,800 samples, 2.89%)st..tokio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)to..mio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)mi..std::net::tcp::TcpListener::local_addr (3,838 samples, 2.92%)st..std::sys_common::net::TcpListener::socket_addr (3,838 samples, 2.92%)st..std::sys_common::net::sockname (3,835 samples, 2.92%)st..[[vdso]] (60 samples, 0.05%)rand_chacha::guts::ChaCha::pos64 (168 samples, 0.13%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (26 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (29 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (18 samples, 0.01%)rand_chacha::guts::round (118 samples, 0.09%)rand_chacha::guts::refill_wide::impl_avx2 (312 samples, 0.24%)rand_chacha::guts::refill_wide::fn_impl (312 samples, 0.24%)rand_chacha::guts::refill_wide_impl (312 samples, 0.24%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (384 samples, 0.29%)rand_chacha::guts::ChaCha::refill4 (384 samples, 0.29%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (432 samples, 0.33%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (432 samples, 0.33%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)rand_core::block::BlockRng<R>::generate_and_set (392 samples, 0.30%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (392 samples, 0.30%)torrust_tracker::servers::udp::handlers::RequestId::make (440 samples, 0.34%)uuid::v4::<impl uuid::Uuid>::new_v4 (436 samples, 0.33%)uuid::rng::bytes (435 samples, 0.33%)rand::random (435 samples, 0.33%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (22 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (16 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (16 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.01%)core::iter::traits::iterator::Iterator::find (15 samples, 0.01%)core::iter::traits::iterator::Iterator::try_fold (15 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (31 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)core::slice::iter::Iter<T>::post_inc_start (14 samples, 0.01%)core::ptr::non_null::NonNull<T>::add (14 samples, 0.01%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (26 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (165 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (165 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (165 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (165 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (165 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (339 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (308 samples, 0.23%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (308 samples, 0.23%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (342 samples, 0.26%)std::sys::sync::rwlock::futex::RwLock::spin_read (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (28 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (436 samples, 0.33%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (397 samples, 0.30%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (29 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (29 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (29 samples, 0.02%)__memcmp_evex_movbe (31 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (52 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (52 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (52 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (52 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (52 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (103 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (102 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (96 samples, 0.07%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (96 samples, 0.07%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (72 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)core::slice::iter::Iter<T>::post_inc_start (32 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (32 samples, 0.02%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (81 samples, 0.06%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (271 samples, 0.21%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (271 samples, 0.21%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (271 samples, 0.21%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (271 samples, 0.21%)<u8 as core::slice::cmp::SliceOrd>::compare (271 samples, 0.21%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (610 samples, 0.46%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (566 samples, 0.43%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (566 samples, 0.43%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (18 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (616 samples, 0.47%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::KV>::split (15 samples, 0.01%)alloc::collections::btree::map::entry::Entry<K,V,A>::or_insert (46 samples, 0.04%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (45 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (40 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (29 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (120 samples, 0.09%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (118 samples, 0.09%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::Leaf>::new_leaf (118 samples, 0.09%)alloc::collections::btree::node::LeafNode<K,V>::new (118 samples, 0.09%)alloc::boxed::Box<T,A>::new_uninit_in (118 samples, 0.09%)alloc::boxed::Box<T,A>::try_new_uninit_in (118 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (118 samples, 0.09%)alloc::alloc::Global::alloc_impl (118 samples, 0.09%)alloc::alloc::alloc (118 samples, 0.09%)__rdl_alloc (118 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (118 samples, 0.09%)__GI___libc_malloc (118 samples, 0.09%)_int_malloc (107 samples, 0.08%)_int_malloc (28 samples, 0.02%)__GI___libc_malloc (32 samples, 0.02%)__rdl_alloc (36 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (36 samples, 0.03%)alloc::sync::Arc<T>::new (42 samples, 0.03%)alloc::boxed::Box<T>::new (42 samples, 0.03%)alloc::alloc::exchange_malloc (39 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (39 samples, 0.03%)alloc::alloc::Global::alloc_impl (39 samples, 0.03%)alloc::alloc::alloc (39 samples, 0.03%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)__GI___libc_free (39 samples, 0.03%)_int_free (37 samples, 0.03%)get_max_fast (16 samples, 0.01%)core::option::Option<T>::is_some_and (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (50 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (50 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (290 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (284 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (255 samples, 0.19%)std::sys::sync::rwlock::futex::RwLock::spin_read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::spin_until (16 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (21 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (21 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (1,147 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (1,144 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents_mut (32 samples, 0.02%)std::sync::rwlock::RwLock<T>::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write_contended (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_write (28 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (28 samples, 0.02%)torrust_tracker::core::Tracker::announce::{{closure}} (1,597 samples, 1.22%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (29 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (24 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (25 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (25 samples, 0.02%)core::hash::Hasher::write_u32 (25 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (36 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (37 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (37 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (64 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (39 samples, 0.03%)core::hash::Hasher::write_u64 (39 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (122 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (58 samples, 0.04%)core::hash::Hasher::write_u64 (58 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (57 samples, 0.04%)core::hash::sip::u8to64_le (23 samples, 0.02%)core::hash::Hasher::write_length_prefix (27 samples, 0.02%)core::hash::Hasher::write_usize (27 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (16 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (246 samples, 0.19%)core::array::<impl core::hash::Hash for [T: N]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (62 samples, 0.05%)core::hash::sip::u8to64_le (17 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (285 samples, 0.22%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (36 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (24 samples, 0.02%)std::time::SystemTime::now (19 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (19 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (1,954 samples, 1.49%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (24 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (18 samples, 0.01%)<core::time::Nanoseconds as core::hash::Hash>::hash (20 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (20 samples, 0.02%)core::hash::Hasher::write_u32 (20 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (44 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (65 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (45 samples, 0.03%)core::hash::Hasher::write_u64 (45 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (45 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (45 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (105 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (40 samples, 0.03%)core::hash::Hasher::write_u64 (40 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (39 samples, 0.03%)core::hash::Hasher::write_length_prefix (34 samples, 0.03%)core::hash::Hasher::write_usize (34 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (33 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (231 samples, 0.18%)core::array::<impl core::hash::Hash for [T: N]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (61 samples, 0.05%)core::hash::sip::u8to64_le (16 samples, 0.01%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (270 samples, 0.21%)torrust_tracker::servers::udp::connection_cookie::make (268 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (35 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (31 samples, 0.02%)std::time::SystemTime::now (26 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (26 samples, 0.02%)torrust_tracker::core::ScrapeData::add_file (19 samples, 0.01%)std::collections::hash::map::HashMap<K,V,S>::insert (19 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (19 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (16 samples, 0.01%)hashbrown::raw::RawTable<T,A>::reserve (16 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (17 samples, 0.01%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (17 samples, 0.01%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (17 samples, 0.01%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (17 samples, 0.01%)<u8 as core::slice::cmp::SliceOrd>::compare (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (2,336 samples, 1.78%)t..torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (101 samples, 0.08%)torrust_tracker::core::Tracker::scrape::{{closure}} (90 samples, 0.07%)torrust_tracker::core::Tracker::get_swarm_metadata (68 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (64 samples, 0.05%)alloc::raw_vec::finish_grow (19 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (21 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (23 samples, 0.02%)alloc::string::String::push_str (23 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (23 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (23 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (23 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (85 samples, 0.06%)core::fmt::num::imp::fmt_u64 (78 samples, 0.06%)<alloc::string::String as core::fmt::Write>::write_str (15 samples, 0.01%)alloc::string::String::push_str (15 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (15 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (15 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (37 samples, 0.03%)core::fmt::num::imp::fmt_u64 (36 samples, 0.03%)<T as alloc::string::ToString>::to_string (141 samples, 0.11%)core::option::Option<T>::expect (34 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (28 samples, 0.02%)alloc::alloc::dealloc (28 samples, 0.02%)__rdl_dealloc (28 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (28 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (55 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (55 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::current_memory (20 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (16 samples, 0.01%)binascii::bin2hex (51 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (16 samples, 0.01%)core::fmt::write (25 samples, 0.02%)core::fmt::rt::Argument::fmt (15 samples, 0.01%)core::fmt::Formatter::write_fmt (87 samples, 0.07%)core::str::converts::from_utf8 (43 samples, 0.03%)core::str::validations::run_utf8_validation (37 samples, 0.03%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (161 samples, 0.12%)<T as alloc::string::ToString>::to_string (161 samples, 0.12%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (156 samples, 0.12%)torrust_tracker::servers::udp::logging::log_request (479 samples, 0.36%)[[vdso]] (51 samples, 0.04%)alloc::raw_vec::finish_grow (56 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.05%)<alloc::string::String as core::fmt::Write>::write_str (65 samples, 0.05%)alloc::string::String::push_str (65 samples, 0.05%)alloc::vec::Vec<T,A>::extend_from_slice (65 samples, 0.05%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (65 samples, 0.05%)alloc::vec::Vec<T,A>::append_elements (65 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (114 samples, 0.09%)core::fmt::num::imp::fmt_u64 (110 samples, 0.08%)<T as alloc::string::ToString>::to_string (132 samples, 0.10%)core::option::Option<T>::expect (20 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (22 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (22 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (8,883 samples, 6.77%)torrust_t..torrust_tracker::servers::udp::logging::log_response (238 samples, 0.18%)__GI___lll_lock_wait_private (14 samples, 0.01%)futex_wait (14 samples, 0.01%)__GI___lll_lock_wake_private (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)_int_malloc (191 samples, 0.15%)__libc_calloc (238 samples, 0.18%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)alloc::vec::from_elem (316 samples, 0.24%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (316 samples, 0.24%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (312 samples, 0.24%)alloc::alloc::Global::alloc_impl (312 samples, 0.24%)alloc::alloc::alloc_zeroed (312 samples, 0.24%)__rdl_alloc_zeroed (312 samples, 0.24%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (312 samples, 0.24%)byteorder::ByteOrder::write_i32 (18 samples, 0.01%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (18 samples, 0.01%)core::num::<impl u32>::to_be_bytes (18 samples, 0.01%)core::num::<impl u32>::to_be (18 samples, 0.01%)core::num::<impl u32>::swap_bytes (18 samples, 0.01%)byteorder::io::WriteBytesExt::write_i32 (89 samples, 0.07%)std::io::Write::write_all (71 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (71 samples, 0.05%)std::io::cursor::vec_write (71 samples, 0.05%)std::io::cursor::vec_write_unchecked (51 samples, 0.04%)core::ptr::mut_ptr::<impl *mut T>::copy_from (51 samples, 0.04%)core::intrinsics::copy (51 samples, 0.04%)aquatic_udp_protocol::response::Response::write (227 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (28 samples, 0.02%)std::io::Write::write_all (21 samples, 0.02%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (21 samples, 0.02%)std::io::cursor::vec_write (21 samples, 0.02%)std::io::cursor::vec_write_unchecked (21 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::copy_from (21 samples, 0.02%)core::intrinsics::copy (21 samples, 0.02%)__GI___lll_lock_wake_private (17 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (136 samples, 0.10%)__GI___libc_free (206 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (211 samples, 0.16%)alloc::alloc::dealloc (211 samples, 0.16%)__rdl_dealloc (211 samples, 0.16%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (211 samples, 0.16%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (224 samples, 0.17%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (224 samples, 0.17%)std::io::cursor::Cursor<T>::new (56 samples, 0.04%)tokio::io::ready::Ready::intersection (23 samples, 0.02%)tokio::io::ready::Ready::from_interest (23 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (83 samples, 0.06%)[unknown] (32,674 samples, 24.88%)[unknown][unknown] (32,402 samples, 24.68%)[unknown][unknown] (32,272 samples, 24.58%)[unknown][unknown] (32,215 samples, 24.54%)[unknown][unknown] (31,174 samples, 23.74%)[unknown][unknown] (30,794 samples, 23.45%)[unknown][unknown] (30,036 samples, 22.88%)[unknown][unknown] (28,639 samples, 21.81%)[unknown][unknown] (27,908 samples, 21.25%)[unknown][unknown] (26,013 samples, 19.81%)[unknown][unknown] (23,181 samples, 17.65%)[unknown][unknown] (19,559 samples, 14.90%)[unknown][unknown] (18,052 samples, 13.75%)[unknown][unknown] (15,794 samples, 12.03%)[unknown][unknown] (14,740 samples, 11.23%)[unknown][unknown] (12,486 samples, 9.51%)[unknown][unknown] (11,317 samples, 8.62%)[unknown][unknown] (10,725 samples, 8.17%)[unknown][unknown] (10,017 samples, 7.63%)[unknown][unknown] (9,713 samples, 7.40%)[unknown][unknown] (8,432 samples, 6.42%)[unknown][unknown] (8,062 samples, 6.14%)[unknown][unknown] (6,973 samples, 5.31%)[unknow..[unknown] (5,328 samples, 4.06%)[unk..[unknown] (4,352 samples, 3.31%)[un..[unknown] (3,786 samples, 2.88%)[u..[unknown] (3,659 samples, 2.79%)[u..[unknown] (3,276 samples, 2.50%)[u..[unknown] (2,417 samples, 1.84%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,610 samples, 1.23%)[unknown] (422 samples, 0.32%)[unknown] (84 samples, 0.06%)[unknown] (69 samples, 0.05%)__GI___pthread_disable_asynccancel (67 samples, 0.05%)__libc_sendto (32,896 samples, 25.05%)__libc_sendtotokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (32,981 samples, 25.12%)tokio::net::udp::UdpSocket::send_to_addr..mio::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (32,981 samples, 25.12%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (32,981 samples, 25.12%)mio::sys::unix::stateless_io_source::IoS..mio::net::udp::UdpSocket::send_to::{{closure}} (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_to::{{clo..std::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (32,981 samples, 25.12%)std::sys_common::net::UdpSocket::send_tostd::sys::pal::unix::cvt (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (44,349 samples, 33.78%)torrust_tracker::servers::udp::server::Udp::process_req..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (43,412 samples, 33.06%)torrust_tracker::servers::udp::server::Udp::process_va..torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (34,320 samples, 26.14%)torrust_tracker::servers::udp::server::Udp..torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (33,360 samples, 25.41%)torrust_tracker::servers::udp::server::Ud..tokio::net::udp::UdpSocket::send_to::{{closure}} (33,227 samples, 25.31%)tokio::net::udp::UdpSocket::send_to::{{c..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (33,142 samples, 25.24%)tokio::net::udp::UdpSocket::send_to_addr..tokio::runtime::io::registration::Registration::async_io::{{closure}} (33,115 samples, 25.22%)tokio::runtime::io::registration::Regist..tokio::runtime::io::registration::Registration::readiness::{{closure}} (28 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (15 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (14 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (15 samples, 0.01%)core::sync::atomic::atomic_add (15 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (135 samples, 0.10%)__GI___libc_free (147 samples, 0.11%)syscall (22 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (15 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (24 samples, 0.02%)core::mem::drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (262 samples, 0.20%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (262 samples, 0.20%)tokio::runtime::task::raw::RawTask::drop_abort_handle (256 samples, 0.19%)tokio::runtime::task::raw::drop_abort_handle (59 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (50 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (50 samples, 0.04%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (16 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (47 samples, 0.04%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (47 samples, 0.04%)tokio::runtime::task::state::State::drop_join_handle_fast (19 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (19 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (14 samples, 0.01%)<ringbuf::ring_buffer::shared::SharedRb<T,C> as ringbuf::ring_buffer::base::RbBase<T>>::head (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (29 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (29 samples, 0.02%)ringbuf::ring_buffer::rb::Rb::pop (50 samples, 0.04%)ringbuf::consumer::Consumer<T,R>::pop (50 samples, 0.04%)ringbuf::producer::Producer<T,R>::advance (23 samples, 0.02%)ringbuf::ring_buffer::base::RbWrite::advance_tail (23 samples, 0.02%)core::num::nonzero::<impl core::ops::arith::Rem<core::num::nonzero::NonZero<usize>> for usize>::rem (19 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (107 samples, 0.08%)ringbuf::ring_buffer::rb::Rb::push (43 samples, 0.03%)ringbuf::producer::Producer<T,R>::push (43 samples, 0.03%)tokio::runtime::task::abort::AbortHandle::is_finished (84 samples, 0.06%)tokio::runtime::task::state::Snapshot::is_complete (84 samples, 0.06%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (17 samples, 0.01%)tokio::runtime::task::raw::RawTask::ref_inc (17 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (17 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (14 samples, 0.01%)core::sync::atomic::atomic_add (14 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)malloc_consolidate (95 samples, 0.07%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (26 samples, 0.02%)_int_malloc (282 samples, 0.21%)__GI___libc_malloc (323 samples, 0.25%)alloc::vec::Vec<T>::with_capacity (326 samples, 0.25%)alloc::vec::Vec<T,A>::with_capacity_in (326 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (324 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (324 samples, 0.25%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (324 samples, 0.25%)alloc::alloc::Global::alloc_impl (324 samples, 0.25%)alloc::alloc::alloc (324 samples, 0.25%)__rdl_alloc (324 samples, 0.25%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (324 samples, 0.25%)tokio::io::ready::Ready::intersection (24 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (199 samples, 0.15%)tokio::util::bit::Pack::unpack (16 samples, 0.01%)tokio::util::bit::unpack (16 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (19 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (16 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (222 samples, 0.17%)tokio::net::udp::UdpSocket::ready::{{closure}} (222 samples, 0.17%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (50 samples, 0.04%)std::io::error::repr_bitpacked::Repr::data (14 samples, 0.01%)std::io::error::repr_bitpacked::decode_repr (14 samples, 0.01%)std::io::error::Error::kind (16 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)[unknown] (8,756 samples, 6.67%)[unknown][unknown] (8,685 samples, 6.61%)[unknown][unknown] (8,574 samples, 6.53%)[unknown][unknown] (8,415 samples, 6.41%)[unknown][unknown] (7,686 samples, 5.85%)[unknow..[unknown] (7,239 samples, 5.51%)[unknow..[unknown] (6,566 samples, 5.00%)[unkno..[unknown] (5,304 samples, 4.04%)[unk..[unknown] (4,008 samples, 3.05%)[un..[unknown] (3,571 samples, 2.72%)[u..[unknown] (2,375 samples, 1.81%)[..[unknown] (1,844 samples, 1.40%)[unknown] (1,030 samples, 0.78%)[unknown] (344 samples, 0.26%)[unknown] (113 samples, 0.09%)__libc_recvfrom (8,903 samples, 6.78%)__libc_re..__GI___pthread_disable_asynccancel (22 samples, 0.02%)std::sys::pal::unix::cvt (20 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (9,005 samples, 6.86%)tokio::ne..mio::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)mio::net:..mio::io_source::IoSource<T>::do_io (8,964 samples, 6.83%)mio::io_s..mio::sys::unix::stateless_io_source::IoSourceState::do_io (8,964 samples, 6.83%)mio::sys:..mio::net::udp::UdpSocket::recv_from::{{closure}} (8,964 samples, 6.83%)mio::net:..std::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)std::net:..std::sys_common::net::UdpSocket::recv_from (8,964 samples, 6.83%)std::sys_..std::sys::pal::unix::net::Socket::recv_from (8,964 samples, 6.83%)std::sys:..std::sys::pal::unix::net::Socket::recv_from_with_flags (8,964 samples, 6.83%)std::sys:..std::sys_common::net::sockaddr_to_addr (23 samples, 0.02%)tokio::runtime::io::registration::Registration::clear_readiness (18 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (32 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (9,967 samples, 7.59%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (9,291 samples, 7.08%)tokio::ne..tokio::runtime::io::registration::Registration::async_io::{{closure}} (9,287 samples, 7.07%)tokio::ru..tokio::runtime::io::registration::Registration::readiness::{{closure}} (45 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (41 samples, 0.03%)__memcpy_avx512_unaligned_erms (424 samples, 0.32%)__memcpy_avx512_unaligned_erms (493 samples, 0.38%)__memcpy_avx512_unaligned_erms (298 samples, 0.23%)syscall (1,105 samples, 0.84%)[unknown] (1,095 samples, 0.83%)[unknown] (1,091 samples, 0.83%)[unknown] (1,049 samples, 0.80%)[unknown] (998 samples, 0.76%)[unknown] (907 samples, 0.69%)[unknown] (710 samples, 0.54%)[unknown] (635 samples, 0.48%)[unknown] (538 samples, 0.41%)[unknown] (358 samples, 0.27%)[unknown] (256 samples, 0.19%)[unknown] (153 samples, 0.12%)[unknown] (96 samples, 0.07%)[unknown] (81 samples, 0.06%)tokio::runtime::context::with_scheduler (36 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (31 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (27 samples, 0.02%)tokio::runtime::context::scoped::Scoped<T>::with (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (340 samples, 0.26%)core::sync::atomic::atomic_add (340 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (354 samples, 0.27%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (367 samples, 0.28%)[unknown] (95 samples, 0.07%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (90 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (73 samples, 0.06%)[unknown] (63 samples, 0.05%)[unknown] (44 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (35 samples, 0.03%)[unknown] (30 samples, 0.02%)[unknown] (22 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)tokio::runtime::driver::Handle::unpark (99 samples, 0.08%)tokio::runtime::driver::IoHandle::unpark (99 samples, 0.08%)tokio::runtime::io::driver::Handle::unpark (99 samples, 0.08%)mio::waker::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::fdbased::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::eventfd::WakerInternal::wake (99 samples, 0.08%)<&std::fs::File as std::io::Write>::write (99 samples, 0.08%)std::sys::pal::unix::fs::File::write (99 samples, 0.08%)std::sys::pal::unix::fd::FileDesc::write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)tokio::runtime::context::with_scheduler (1,615 samples, 1.23%)std::thread::local::LocalKey<T>::try_with (1,613 samples, 1.23%)tokio::runtime::context::with_scheduler::{{closure}} (1,612 samples, 1.23%)tokio::runtime::context::scoped::Scoped<T>::with (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (1,647 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (1,646 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::with_current (1,646 samples, 1.25%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (23 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (18 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (104 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (60 samples, 0.05%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (57 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (38 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (162 samples, 0.12%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)__GI___lll_lock_wake_private (127 samples, 0.10%)[unknown] (125 samples, 0.10%)[unknown] (124 samples, 0.09%)[unknown] (119 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (106 samples, 0.08%)[unknown] (87 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (51 samples, 0.04%)[unknown] (27 samples, 0.02%)[unknown] (19 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (77 samples, 0.06%)[unknown] (1,207 samples, 0.92%)[unknown] (1,146 samples, 0.87%)[unknown] (1,126 samples, 0.86%)[unknown] (1,091 samples, 0.83%)[unknown] (1,046 samples, 0.80%)[unknown] (962 samples, 0.73%)[unknown] (914 samples, 0.70%)[unknown] (848 samples, 0.65%)[unknown] (774 samples, 0.59%)[unknown] (580 samples, 0.44%)[unknown] (456 samples, 0.35%)[unknown] (305 samples, 0.23%)[unknown] (85 samples, 0.06%)__GI_mprotect (2,474 samples, 1.88%)_..[unknown] (2,457 samples, 1.87%)[..[unknown] (2,440 samples, 1.86%)[..[unknown] (2,436 samples, 1.86%)[..[unknown] (2,435 samples, 1.85%)[..[unknown] (2,360 samples, 1.80%)[..[unknown] (2,203 samples, 1.68%)[unknown] (1,995 samples, 1.52%)[unknown] (1,709 samples, 1.30%)[unknown] (1,524 samples, 1.16%)[unknown] (1,193 samples, 0.91%)[unknown] (865 samples, 0.66%)[unknown] (539 samples, 0.41%)[unknown] (259 samples, 0.20%)[unknown] (80 samples, 0.06%)[unknown] (29 samples, 0.02%)sysmalloc (3,786 samples, 2.88%)sy..grow_heap (2,509 samples, 1.91%)g.._int_malloc (4,038 samples, 3.08%)_in..unlink_chunk (31 samples, 0.02%)alloc::alloc::exchange_malloc (4,335 samples, 3.30%)all..<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,329 samples, 3.30%)<al..alloc::alloc::Global::alloc_impl (4,329 samples, 3.30%)all..alloc::alloc::alloc (4,329 samples, 3.30%)all..__rdl_alloc (4,329 samples, 3.30%)__r..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,329 samples, 3.30%)std..std::sys::pal::unix::alloc::aligned_malloc (4,329 samples, 3.30%)std..__posix_memalign (4,297 samples, 3.27%)__p..__posix_memalign (4,297 samples, 3.27%)__p.._mid_memalign (4,297 samples, 3.27%)_mi.._int_memalign (4,149 samples, 3.16%)_in..sysmalloc (18 samples, 0.01%)core::option::Option<T>::map (6,666 samples, 5.08%)core::..tokio::task::spawn::spawn_inner::{{closure}} (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::Handle::spawn (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (6,664 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (6,661 samples, 5.07%)tokio:..tokio::runtime::task::list::OwnedTasks<S>::bind (4,692 samples, 3.57%)toki..tokio::runtime::task::new_task (4,579 samples, 3.49%)tok..tokio::runtime::task::raw::RawTask::new (4,579 samples, 3.49%)tok..tokio::runtime::task::core::Cell<T,S>::new (4,579 samples, 3.49%)tok..alloc::boxed::Box<T>::new (4,389 samples, 3.34%)all..tokio::runtime::context::current::with_current (7,636 samples, 5.82%)tokio::..std::thread::local::LocalKey<T>::try_with (7,635 samples, 5.81%)std::th..tokio::runtime::context::current::with_current::{{closure}} (7,188 samples, 5.47%)tokio::..tokio::task::spawn::spawn (7,670 samples, 5.84%)tokio::..tokio::task::spawn::spawn_inner (7,670 samples, 5.84%)tokio::..tokio::runtime::task::id::Id::next (24 samples, 0.02%)core::sync::atomic::AtomicU64::fetch_add (24 samples, 0.02%)core::sync::atomic::atomic_add (24 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (62,691 samples, 47.75%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (62,691 samples, 47.75%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (18,228 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (18,226 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (7,679 samples, 5.85%)torrust..__memcpy_avx512_unaligned_erms (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (407 samples, 0.31%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::poll (63,150 samples, 48.10%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (459 samples, 0.35%)tokio::runtime::task::core::Core<T,S>::set_stage (459 samples, 0.35%)__memcpy_avx512_unaligned_erms (16 samples, 0.01%)__memcpy_avx512_unaligned_erms (398 samples, 0.30%)__memcpy_avx512_unaligned_erms (325 samples, 0.25%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage (731 samples, 0.56%)tokio::runtime::task::harness::poll_future (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (63,908 samples, 48.67%)std::panic::catch_unwindstd::panicking::try (63,908 samples, 48.67%)std::panicking::trystd::panicking::try::do_call (63,908 samples, 48.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (63,908 samples, 48.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()..tokio::runtime::task::harness::poll_future::{{closure}} (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (758 samples, 0.58%)tokio::runtime::coop::budget (65,027 samples, 49.53%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (65,027 samples, 49.53%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (65,009 samples, 49.51%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (65,003 samples, 49.51%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (65,003 samples, 49.51%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (64,538 samples, 49.15%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (64,493 samples, 49.12%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (63,919 samples, 48.68%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (93 samples, 0.07%)syscall (2,486 samples, 1.89%)s..[unknown] (2,424 samples, 1.85%)[..[unknown] (2,416 samples, 1.84%)[..[unknown] (2,130 samples, 1.62%)[unknown] (2,013 samples, 1.53%)[unknown] (1,951 samples, 1.49%)[unknown] (1,589 samples, 1.21%)[unknown] (1,415 samples, 1.08%)[unknown] (1,217 samples, 0.93%)[unknown] (820 samples, 0.62%)[unknown] (564 samples, 0.43%)[unknown] (360 samples, 0.27%)[unknown] (244 samples, 0.19%)[unknown] (194 samples, 0.15%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (339 samples, 0.26%)core::sync::atomic::AtomicUsize::fetch_add (337 samples, 0.26%)core::sync::atomic::atomic_add (337 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (364 samples, 0.28%)[unknown] (154 samples, 0.12%)[unknown] (152 samples, 0.12%)[unknown] (143 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (131 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (80 samples, 0.06%)[unknown] (74 samples, 0.06%)[unknown] (65 samples, 0.05%)[unknown] (64 samples, 0.05%)[unknown] (47 samples, 0.04%)[unknown] (44 samples, 0.03%)[unknown] (43 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (26 samples, 0.02%)[unknown] (20 samples, 0.02%)__GI___libc_write (158 samples, 0.12%)__GI___libc_write (158 samples, 0.12%)mio::sys::unix::waker::eventfd::WakerInternal::wake (159 samples, 0.12%)<&std::fs::File as std::io::Write>::write (159 samples, 0.12%)std::sys::pal::unix::fs::File::write (159 samples, 0.12%)std::sys::pal::unix::fd::FileDesc::write (159 samples, 0.12%)tokio::runtime::driver::Handle::unpark (168 samples, 0.13%)tokio::runtime::driver::IoHandle::unpark (168 samples, 0.13%)tokio::runtime::io::driver::Handle::unpark (168 samples, 0.13%)mio::waker::Waker::wake (165 samples, 0.13%)mio::sys::unix::waker::fdbased::Waker::wake (165 samples, 0.13%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (68,159 samples, 51.91%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (3,024 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (3,023 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (3,022 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (171 samples, 0.13%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (171 samples, 0.13%)core::option::Option<T>::or_else (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (107 samples, 0.08%)__GI___libc_free (17 samples, 0.01%)_int_free (17 samples, 0.01%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Dying,K,V>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::LeafOrInternal>::deallocate_and_ascend (18 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (18 samples, 0.01%)alloc::alloc::dealloc (18 samples, 0.01%)__rdl_dealloc (18 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (18 samples, 0.01%)alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (19 samples, 0.01%)tokio::runtime::task::Task<S>::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::RawTask::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task (26 samples, 0.02%)std::panic::catch_unwind (26 samples, 0.02%)std::panicking::try (26 samples, 0.02%)std::panicking::try::do_call (26 samples, 0.02%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (26 samples, 0.02%)core::ops::function::FnOnce::call_once (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task::{{closure}} (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (26 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::Tracker> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)core::ptr::drop_in_place<std::sync::rwlock::RwLock<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)core::mem::drop (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,NodeType>,alloc::collections::btree::node::marker::KV>::drop_key_val (24 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::assume_init_drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (24 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::entry::Torrent> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::mem::drop (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::pre_shutdown (33 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::close_and_shutdown_all (33 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (114 samples, 0.09%)alloc::sync::Arc<T,A>::inner (114 samples, 0.09%)core::ptr::non_null::NonNull<T>::as_ref (114 samples, 0.09%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (108 samples, 0.08%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (108 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (106 samples, 0.08%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (49 samples, 0.04%)alloc::sync::Arc<T,A>::inner (49 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (49 samples, 0.04%)core::num::<impl u32>::wrapping_sub (132 samples, 0.10%)core::sync::atomic::AtomicU64::load (40 samples, 0.03%)core::sync::atomic::atomic_load (40 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (48 samples, 0.04%)core::sync::atomic::AtomicU32::load (48 samples, 0.04%)core::sync::atomic::atomic_load (48 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (65 samples, 0.05%)alloc::sync::Arc<T,A>::inner (65 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (65 samples, 0.05%)core::num::<impl u32>::wrapping_sub (50 samples, 0.04%)core::sync::atomic::AtomicU32::load (55 samples, 0.04%)core::sync::atomic::atomic_load (55 samples, 0.04%)core::sync::atomic::AtomicU64::load (80 samples, 0.06%)core::sync::atomic::atomic_load (80 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::pack (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (666 samples, 0.51%)tokio::runtime::scheduler::multi_thread::queue::unpack (147 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (1,036 samples, 0.79%)tokio::runtime::scheduler::multi_thread::queue::unpack (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (49 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (2,414 samples, 1.84%)t..tokio::util::rand::FastRand::fastrand_n (24 samples, 0.02%)tokio::util::rand::FastRand::fastrand (24 samples, 0.02%)std::sys_common::backtrace::__rust_begin_short_backtrace (98,136 samples, 74.74%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (98,042 samples, 74.67%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (98,042 samples, 74.67%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (98,042 samples, 74.67%)std::panic::catch_unwindstd::panicking::try (98,042 samples, 74.67%)std::panicking::trystd::panicking::try::do_call (98,042 samples, 74.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,042 samples, 74.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (98,042 samples, 74.67%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (98,042 samples, 74.67%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (98,042 samples, 74.67%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (98,042 samples, 74.67%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (98,042 samples, 74.67%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (98,042 samples, 74.67%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Context::runstd::panic::catch_unwind (98,137 samples, 74.74%)std::panic::catch_unwindstd::panicking::try (98,137 samples, 74.74%)std::panicking::trystd::panicking::try::do_call (98,137 samples, 74.74%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,137 samples, 74.74%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (98,137 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (98,139 samples, 74.74%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (98,139 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (98,205 samples, 74.79%)clone3start_thread (98,205 samples, 74.79%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (98,158 samples, 74.76%)std::sys::pal::unix::thread::Thread::new::thread_startcore::ptr::drop_in_place<std::sys::pal::unix::stack_overflow::Handler> (19 samples, 0.01%)<std::sys::pal::unix::stack_overflow::Handler as core::ops::drop::Drop>::drop (19 samples, 0.01%)std::sys::pal::unix::stack_overflow::imp::drop_handler (19 samples, 0.01%)__GI_munmap (19 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)[unknown] (16 samples, 0.01%)core::fmt::Formatter::pad_integral (112 samples, 0.09%)core::fmt::Formatter::pad_integral::write_prefix (59 samples, 0.04%)core::fmt::Formatter::pad_integral (16 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (19 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (51 samples, 0.04%)rand_chacha::guts::round (18 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (26 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide (14 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (14 samples, 0.01%)std_detect::detect::check_for (14 samples, 0.01%)std_detect::detect::cache::test (14 samples, 0.01%)std_detect::detect::cache::Cache::test (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.06%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.06%)core::cell::BorrowRefMut::new (81 samples, 0.06%)std::sys::pal::unix::time::Timespec::now (164 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (106 samples, 0.08%)tokio::runtime::coop::budget (105 samples, 0.08%)tokio::runtime::coop::with_budget (105 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (96 samples, 0.07%)std::sys::pal::unix::time::Timespec::sub_timespec (35 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock_contended (15 samples, 0.01%)syscall (90 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (21 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run (61 samples, 0.05%)tokio::runtime::context::runtime::enter_runtime (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (61 samples, 0.05%)tokio::runtime::context::set_scheduler (61 samples, 0.05%)std::thread::local::LocalKey<T>::with (61 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (61 samples, 0.05%)tokio::runtime::context::set_scheduler::{{closure}} (61 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::set (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (19 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (17 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (14 samples, 0.01%)core::cell::Cell<T>::get (14 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (22 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (22 samples, 0.02%)tokio::runtime::context::set_current_task_id (22 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (112 samples, 0.09%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (111 samples, 0.08%)tokio::runtime::task::harness::poll_future (125 samples, 0.10%)std::panic::catch_unwind (125 samples, 0.10%)std::panicking::try (125 samples, 0.10%)std::panicking::try::do_call (125 samples, 0.10%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (125 samples, 0.10%)tokio::runtime::task::harness::poll_future::{{closure}} (125 samples, 0.10%)tokio::runtime::task::core::Core<T,S>::poll (125 samples, 0.10%)tokio::runtime::task::raw::poll (157 samples, 0.12%)tokio::runtime::task::harness::Harness<T,S>::poll (135 samples, 0.10%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (135 samples, 0.10%)tokio::runtime::time::Driver::park_internal (15 samples, 0.01%)torrust_tracker::bootstrap::logging::INIT (17 samples, 0.01%)__memcpy_avx512_unaligned_erms (397 samples, 0.30%)_int_free (24 samples, 0.02%)_int_malloc (132 samples, 0.10%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (570 samples, 0.43%)__GI___lll_lock_wait_private (22 samples, 0.02%)futex_wait (14 samples, 0.01%)__memcpy_avx512_unaligned_erms (299 samples, 0.23%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (361 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (41 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (23 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (53 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (14 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (63 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (21 samples, 0.02%)__GI___libc_malloc (18 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (116 samples, 0.09%)alloc::vec::Vec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (116 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (116 samples, 0.09%)alloc::alloc::Global::alloc_impl (116 samples, 0.09%)alloc::alloc::alloc (116 samples, 0.09%)__rdl_alloc (116 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (116 samples, 0.09%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (53 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (53 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (53 samples, 0.04%)_int_malloc (21 samples, 0.02%)[unknown] (36 samples, 0.03%)[unknown] (16 samples, 0.01%)core::mem::zeroed (27 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (27 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (27 samples, 0.02%)core::intrinsics::write_bytes (27 samples, 0.02%)[unknown] (27 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (64 samples, 0.05%)mio::net::udp::UdpSocket::recv_from (49 samples, 0.04%)mio::io_source::IoSource<T>::do_io (49 samples, 0.04%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (49 samples, 0.04%)mio::net::udp::UdpSocket::recv_from::{{closure}} (49 samples, 0.04%)std::net::udp::UdpSocket::recv_from (49 samples, 0.04%)std::sys_common::net::UdpSocket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from_with_flags (49 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (271 samples, 0.21%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (143 samples, 0.11%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (141 samples, 0.11%)tokio::runtime::io::registration::Registration::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (15 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (359 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (346 samples, 0.26%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (39 samples, 0.03%)tokio::task::spawn::spawn (39 samples, 0.03%)tokio::task::spawn::spawn_inner (39 samples, 0.03%)tokio::runtime::context::current::with_current (39 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (39 samples, 0.03%)tokio::runtime::context::current::with_current::{{closure}} (39 samples, 0.03%)core::option::Option<T>::map (39 samples, 0.03%)tokio::task::spawn::spawn_inner::{{closure}} (39 samples, 0.03%)tokio::runtime::scheduler::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (39 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind (34 samples, 0.03%)all (131,301 samples, 100%)tokio-runtime-w (131,061 samples, 99.82%)tokio-runtime-w \ No newline at end of file From 9e01f7fa750c023507522a43f2ec70fc5fca64a6 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 May 2024 20:01:15 +0200 Subject: [PATCH 0815/1003] dev: fix udp ring-buffer not looping My previous version would be limited to a single thread, as `push_overwrite` would keep on returning the last element when the ring-buffer was full. Now the ring-buffer is pre-filled and is looped over with a mutating iterator. New handles are progressively swapped-in when the old entries are finished. Note: I think that this implementation can be replaced with a standard vector with the same effect. --- src/servers/udp/server.rs | 77 +++++++++++++++++++++++++++++++++------ 1 file changed, 66 insertions(+), 11 deletions(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index fc2d02a59..f7092f377 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -24,7 +24,7 @@ use std::sync::Arc; use aquatic_udp_protocol::Response; use derive_more::Constructor; use log::{debug, error, info, trace}; -use ringbuf::traits::{Consumer, Observer, RingBuffer}; +use ringbuf::traits::{Consumer, Observer, Producer}; use ringbuf::StaticRb; use tokio::net::UdpSocket; use tokio::sync::oneshot; @@ -202,11 +202,23 @@ impl Launcher { } } -#[derive(Default)] struct ActiveRequests { rb: StaticRb, // the number of requests we handle at the same time. } +impl ActiveRequests { + /// Creates a new [`ActiveRequests`] filled with finished tasks. + async fn new() -> Self { + let mut rb = StaticRb::default(); + + let () = while rb.try_push(tokio::task::spawn_blocking(|| ()).abort_handle()).is_ok() {}; + + task::yield_now().await; + + Self { rb } + } +} + impl std::fmt::Debug for ActiveRequests { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let (left, right) = &self.rb.as_slices(); @@ -280,15 +292,22 @@ impl Udp { let tracker = tracker.clone(); let socket = socket.clone(); - let reqs = &mut ActiveRequests::default(); + let reqs = &mut ActiveRequests::new().await; - // Main Waiting Loop, awaits on async [`receive_request`]. loop { - if let Some(h) = reqs.rb.push_overwrite( - Self::spawn_request_processor(Self::receive_request(socket.clone()).await, tracker.clone(), socket.clone()) - .abort_handle(), - ) { - if !h.is_finished() { + task::yield_now().await; + for h in reqs.rb.iter_mut() { + if h.is_finished() { + std::mem::swap( + h, + &mut Self::spawn_request_processor( + Self::receive_request(socket.clone()).await, + tracker.clone(), + socket.clone(), + ) + .abort_handle(), + ); + } else { // the task is still running, lets yield and give it a chance to flush. tokio::task::yield_now().await; @@ -299,6 +318,9 @@ impl Udp { tracing::span!( target: "UDP TRACKER", tracing::Level::WARN, "request-aborted", server_socket_addr = %server_socket_addr); + + // force-break a single thread, then loop again. + break; } } } @@ -396,13 +418,46 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use tokio::time::sleep; + use ringbuf::traits::{Consumer, Observer, RingBuffer}; use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; + use super::ActiveRequests; use crate::bootstrap::app::initialize_with_configuration; use crate::servers::registar::Registar; use crate::servers::udp::server::{Launcher, UdpServer}; + #[tokio::test] + async fn it_should_return_to_the_start_of_the_ring_buffer() { + let mut a_req = ActiveRequests::new().await; + + tokio::time::sleep(Duration::from_millis(10)).await; + + let mut count: usize = 0; + let cap: usize = a_req.rb.capacity().into(); + + // Add a single pending task to check that the ring-buffer is looping correctly. + a_req + .rb + .push_overwrite(tokio::task::spawn(std::future::pending::<()>()).abort_handle()); + + count += 1; + + for _ in 0..2 { + for h in a_req.rb.iter() { + let first = count % cap; + println!("{count},{first},{}", h.is_finished()); + + if first == 0 { + assert!(!h.is_finished()); + } else { + assert!(h.is_finished()); + } + + count += 1; + } + } + } + #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_mode_public()); @@ -423,7 +478,7 @@ mod tests { .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); - sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; assert_eq!(stopped.state.launcher.bind_to, bind_to); } From 7da52b1c796b17ebf8208185115eb0f566e61b03 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Apr 2024 12:24:31 +0100 Subject: [PATCH 0816/1003] chore(deps): add dependency figment It will replace the custom code for configuration inyection. --- Cargo.lock | 37 +++++++++++++++++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 38 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index bd0e36c3a..5972c250a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -366,6 +366,15 @@ dependencies = [ "syn 2.0.61", ] +[[package]] +name = "atomic" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +dependencies = [ + "bytemuck", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -696,6 +705,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "bytemuck" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" + [[package]] name = "byteorder" version = "1.5.0" @@ -1334,6 +1349,18 @@ dependencies = [ "log", ] +[[package]] +name = "figment" +version = "0.10.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d032832d74006f99547004d49410a4b4218e4c33382d56ca3ff89df74f86b953" +dependencies = [ + "atomic", + "serde", + "uncased", + "version_check", +] + [[package]] name = "flate2" version = "1.0.30" @@ -3984,6 +4011,7 @@ dependencies = [ "dashmap", "derive_more", "fern", + "figment", "futures", "hex-literal", "hyper", @@ -4222,6 +4250,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +[[package]] +name = "uncased" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" version = "0.3.15" diff --git a/Cargo.toml b/Cargo.toml index cbfdc7697..2be3455b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,6 +44,7 @@ crossbeam-skiplist = "0.1" dashmap = "5.5.3" derive_more = "0" fern = "0" +figment = "0.10.18" futures = "0" hex-literal = "0" hyper = "1" From f0e07217a52dddfcfd0170370db1177f7b31abe1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Apr 2024 12:56:44 +0100 Subject: [PATCH 0817/1003] test: remove broken example in rustdoc --- packages/configuration/src/lib.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index ca873f3cd..660a90701 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -229,6 +229,8 @@ //! [health_check_api] //! bind_address = "127.0.0.1:1313" //!``` +pub mod v1; + use std::collections::HashMap; use std::net::IpAddr; use std::str::FromStr; @@ -263,15 +265,6 @@ pub struct Info { impl Info { /// Build Configuration Info /// - /// # Examples - /// - /// ``` - /// use torrust_tracker_configuration::Info; - /// - /// let result = Info::new(env_var_config, env_var_path_config, default_path_config, env_var_api_admin_token); - /// assert_eq!(result, ); - /// ``` - /// /// # Errors /// /// Will return `Err` if unable to obtain a configuration. From 157807ca3144ef69de344f0570e9571c4f0e9492 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Apr 2024 12:57:54 +0100 Subject: [PATCH 0818/1003] chore(deps): enable figment features: env, toml, test --- Cargo.lock | 53 +++++++++++++++++++++++++++++++ packages/configuration/Cargo.toml | 1 + 2 files changed, 54 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 5972c250a..600914da7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1356,7 +1356,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d032832d74006f99547004d49410a4b4218e4c33382d56ca3ff89df74f86b953" dependencies = [ "atomic", + "parking_lot", + "pear", "serde", + "tempfile", + "toml", "uncased", "version_check", ] @@ -1904,6 +1908,12 @@ dependencies = [ "serde", ] +[[package]] +name = "inlinable_string" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" + [[package]] name = "instant" version = "0.1.12" @@ -2583,6 +2593,29 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +[[package]] +name = "pear" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" +dependencies = [ + "inlinable_string", + "pear_codegen", + "yansi", +] + +[[package]] +name = "pear_codegen" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" +dependencies = [ + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn 2.0.60", +] + [[package]] name = "pem" version = "2.0.1" @@ -2880,6 +2913,19 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", + "version_check", + "yansi", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -4065,6 +4111,7 @@ version = "3.0.0-alpha.12-develop" dependencies = [ "config", "derive_more", + "figment", "serde", "serde_with", "thiserror", @@ -4669,6 +4716,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "zerocopy" version = "0.7.34" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 102177816..e5335d416 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -17,6 +17,7 @@ version.workspace = true [dependencies] config = "0" derive_more = "0" +figment = { version = "0.10.18", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } serde_with = "3" thiserror = "1" From 636e779242e15965c8bbdefbb1142c3356dfa4b6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Apr 2024 14:00:20 +0100 Subject: [PATCH 0819/1003] refactor: create new configuration v1 mod with figment - Clone config strcuctures into a new mod `v1`. - Introduce versioning for configuration API. - Split config sections into submodules. TODO: - Still using root mod types in production. - Not using figment to build config in production. --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 600914da7..1b4c2a4e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2613,7 +2613,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.60", + "syn 2.0.61", ] [[package]] @@ -2921,7 +2921,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.61", "version_check", "yansi", ] From e7d344c5f8af51cfff7af4abf71db3a08f039096 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Apr 2024 14:00:20 +0100 Subject: [PATCH 0820/1003] refactor: create new configuration v1 mod with figment - Clone config strcuctures into a new mod `v1`. - Introduce versioning for configuration API. - Split config sections into submodules. TODO: - Still using root mod types in production. - Not using figment to build config in production. --- .../configuration/src/v1/health_check_api.rs | 13 + packages/configuration/src/v1/http_tracker.rs | 23 + packages/configuration/src/v1/mod.rs | 433 ++++++++++++++++++ packages/configuration/src/v1/tracker_api.rs | 32 ++ packages/configuration/src/v1/udp_tracker.rs | 12 + 5 files changed, 513 insertions(+) create mode 100644 packages/configuration/src/v1/health_check_api.rs create mode 100644 packages/configuration/src/v1/http_tracker.rs create mode 100644 packages/configuration/src/v1/mod.rs create mode 100644 packages/configuration/src/v1/tracker_api.rs create mode 100644 packages/configuration/src/v1/udp_tracker.rs diff --git a/packages/configuration/src/v1/health_check_api.rs b/packages/configuration/src/v1/health_check_api.rs new file mode 100644 index 000000000..f7b15249c --- /dev/null +++ b/packages/configuration/src/v1/health_check_api.rs @@ -0,0 +1,13 @@ +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +/// Configuration for the Health Check API. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HealthCheckApi { + /// The address the API will bind to. + /// The format is `ip:port`, for example `127.0.0.1:1313`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + pub bind_address: String, +} diff --git a/packages/configuration/src/v1/http_tracker.rs b/packages/configuration/src/v1/http_tracker.rs new file mode 100644 index 000000000..4c88feb9c --- /dev/null +++ b/packages/configuration/src/v1/http_tracker.rs @@ -0,0 +1,23 @@ +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, NoneAsEmptyString}; + +/// Configuration for each HTTP tracker. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HttpTracker { + /// Weather the HTTP tracker is enabled or not. + pub enabled: bool, + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + pub bind_address: String, + /// Weather the HTTP tracker will use SSL or not. + pub ssl_enabled: bool, + /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_cert_path: Option, + /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_key_path: Option, +} diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs new file mode 100644 index 000000000..815d74e40 --- /dev/null +++ b/packages/configuration/src/v1/mod.rs @@ -0,0 +1,433 @@ +//! Configuration data structures for [Torrust Tracker](https://docs.rs/torrust-tracker). +//! +//! This module contains the configuration data structures for the +//! Torrust Tracker, which is a `BitTorrent` tracker server. +//! +//! The configuration is loaded from a [TOML](https://toml.io/en/) file +//! `tracker.toml` in the project root folder or from an environment variable +//! with the same content as the file. +//! +//! Configuration can not only be loaded from a file, but also from an +//! environment variable `TORRUST_TRACKER_CONFIG`. This is useful when running +//! the tracker in a Docker container or environments where you do not have a +//! persistent storage or you cannot inject a configuration file. Refer to +//! [`Torrust Tracker documentation`](https://docs.rs/torrust-tracker) for more +//! information about how to pass configuration to the tracker. +//! +//! When you run the tracker without providing the configuration via a file or +//! env var, the default configuration is used. +//! +//! # Table of contents +//! +//! - [Sections](#sections) +//! - [Port binding](#port-binding) +//! - [TSL support](#tsl-support) +//! - [Generating self-signed certificates](#generating-self-signed-certificates) +//! - [Default configuration](#default-configuration) +//! +//! ## Sections +//! +//! Each section in the toml structure is mapped to a data structure. For +//! example, the `[http_api]` section (configuration for the tracker HTTP API) +//! is mapped to the [`HttpApi`] structure. +//! +//! > **NOTICE**: some sections are arrays of structures. For example, the +//! > `[[udp_trackers]]` section is an array of [`UdpTracker`] since +//! > you can have multiple running UDP trackers bound to different ports. +//! +//! Please refer to the documentation of each structure for more information +//! about each section. +//! +//! - [`Core configuration`](crate::v1::Configuration) +//! - [`HTTP API configuration`](crate::v1::tracker_api::HttpApi) +//! - [`HTTP Tracker configuration`](crate::v1::http_tracker::HttpTracker) +//! - [`UDP Tracker configuration`](crate::v1::udp_tracker::UdpTracker) +//! - [`Health Check API configuration`](crate::v1::health_check_api::HealthCheckApi) +//! +//! ## Port binding +//! +//! For the API, HTTP and UDP trackers you can bind to a random port by using +//! port `0`. For example, if you want to bind to a random port on all +//! interfaces, use `0.0.0.0:0`. The OS will choose a random free port. +//! +//! ## TSL support +//! +//! For the API and HTTP tracker you can enable TSL by setting `ssl_enabled` to +//! `true` and setting the paths to the certificate and key files. +//! +//! Typically, you will have a `storage` directory like the following: +//! +//! ```text +//! storage/ +//! ├── config.toml +//! └── tracker +//! ├── etc +//! │ └── tracker.toml +//! ├── lib +//! │ ├── database +//! │ │ ├── sqlite3.db +//! │ │ └── sqlite.db +//! │ └── tls +//! │ ├── localhost.crt +//! │ └── localhost.key +//! └── log +//! ``` +//! +//! where the application stores all the persistent data. +//! +//! Alternatively, you could setup a reverse proxy like Nginx or Apache to +//! handle the SSL/TLS part and forward the requests to the tracker. If you do +//! that, you should set [`on_reverse_proxy`](crate::Configuration::on_reverse_proxy) +//! to `true` in the configuration file. It's out of scope for this +//! documentation to explain in detail how to setup a reverse proxy, but the +//! configuration file should be something like this: +//! +//! For [NGINX](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/): +//! +//! ```text +//! # HTTPS only (with SSL - force redirect to HTTPS) +//! +//! server { +//! listen 80; +//! server_name tracker.torrust.com; +//! +//! return 301 https://$host$request_uri; +//! } +//! +//! server { +//! listen 443; +//! server_name tracker.torrust.com; +//! +//! ssl_certificate CERT_PATH +//! ssl_certificate_key CERT_KEY_PATH; +//! +//! location / { +//! proxy_set_header X-Forwarded-For $remote_addr; +//! proxy_pass http://127.0.0.1:6969; +//! } +//! } +//! ``` +//! +//! For [Apache](https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html): +//! +//! ```text +//! # HTTPS only (with SSL - force redirect to HTTPS) +//! +//! +//! ServerAdmin webmaster@tracker.torrust.com +//! ServerName tracker.torrust.com +//! +//! +//! RewriteEngine on +//! RewriteCond %{HTTPS} off +//! RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent] +//! +//! +//! +//! +//! +//! ServerAdmin webmaster@tracker.torrust.com +//! ServerName tracker.torrust.com +//! +//! +//! Order allow,deny +//! Allow from all +//! +//! +//! ProxyPreserveHost On +//! ProxyRequests Off +//! AllowEncodedSlashes NoDecode +//! +//! ProxyPass / http://localhost:3000/ +//! ProxyPassReverse / http://localhost:3000/ +//! ProxyPassReverse / http://tracker.torrust.com/ +//! +//! RequestHeader set X-Forwarded-Proto "https" +//! RequestHeader set X-Forwarded-Port "443" +//! +//! ErrorLog ${APACHE_LOG_DIR}/tracker.torrust.com-error.log +//! CustomLog ${APACHE_LOG_DIR}/tracker.torrust.com-access.log combined +//! +//! SSLCertificateFile CERT_PATH +//! SSLCertificateKeyFile CERT_KEY_PATH +//! +//! +//! ``` +//! +//! ## Generating self-signed certificates +//! +//! For testing purposes, you can use self-signed certificates. +//! +//! Refer to [Let's Encrypt - Certificates for localhost](https://letsencrypt.org/docs/certificates-for-localhost/) +//! for more information. +//! +//! Running the following command will generate a certificate (`localhost.crt`) +//! and key (`localhost.key`) file in your current directory: +//! +//! ```s +//! openssl req -x509 -out localhost.crt -keyout localhost.key \ +//! -newkey rsa:2048 -nodes -sha256 \ +//! -subj '/CN=localhost' -extensions EXT -config <( \ +//! printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") +//! ``` +//! +//! You can then use the generated files in the configuration file: +//! +//! ```s +//! [[http_trackers]] +//! enabled = true +//! ... +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" +//! +//! [http_api] +//! enabled = true +//! ... +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" +//! ``` +//! +//! ## Default configuration +//! +//! The default configuration is: +//! +//! ```toml +//! log_level = "info" +//! mode = "public" +//! db_driver = "Sqlite3" +//! db_path = "./storage/tracker/lib/database/sqlite3.db" +//! announce_interval = 120 +//! min_announce_interval = 120 +//! on_reverse_proxy = false +//! external_ip = "0.0.0.0" +//! tracker_usage_statistics = true +//! persistent_torrent_completed_stat = false +//! max_peer_timeout = 900 +//! inactive_peer_cleanup_interval = 600 +//! remove_peerless_torrents = true +//! +//! [[udp_trackers]] +//! enabled = false +//! bind_address = "0.0.0.0:6969" +//! +//! [[http_trackers]] +//! enabled = false +//! bind_address = "0.0.0.0:7070" +//! ssl_enabled = false +//! ssl_cert_path = "" +//! ssl_key_path = "" +//! +//! [http_api] +//! enabled = true +//! bind_address = "127.0.0.1:1212" +//! ssl_enabled = false +//! ssl_cert_path = "" +//! ssl_key_path = "" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! [health_check_api] +//! bind_address = "127.0.0.1:1313" +//!``` +pub mod health_check_api; +pub mod http_tracker; +pub mod tracker_api; +pub mod udp_tracker; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; + +use self::health_check_api::HealthCheckApi; +use self::http_tracker::HttpTracker; +use self::tracker_api::HttpApi; +use self::udp_tracker::UdpTracker; +use crate::AnnouncePolicy; + +/// Core configuration for the tracker. +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +pub struct Configuration { + /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, + /// `Debug` and `Trace`. Default is `Info`. + pub log_level: Option, + /// Tracker mode. See [`TrackerMode`] for more information. + pub mode: TrackerMode, + + // Database configuration + /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. + pub db_driver: DatabaseDriver, + /// Database connection string. The format depends on the database driver. + /// For `Sqlite3`, the format is `path/to/database.db`, for example: + /// `./storage/tracker/lib/database/sqlite3.db`. + /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for + /// example: `root:password@localhost:3306/torrust`. + pub db_path: String, + + /// See [`AnnouncePolicy::interval`] + pub announce_interval: u32, + + /// See [`AnnouncePolicy::interval_min`] + pub min_announce_interval: u32, + /// Weather the tracker is behind a reverse proxy or not. + /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header + /// sent from the proxy will be used to get the client's IP address. + pub on_reverse_proxy: bool, + /// The external IP address of the tracker. If the client is using a + /// loopback IP address, this IP address will be used instead. If the peer + /// is using a loopback IP address, the tracker assumes that the peer is + /// in the same network as the tracker and will use the tracker's IP + /// address instead. + pub external_ip: Option, + /// Weather the tracker should collect statistics about tracker usage. + /// If enabled, the tracker will collect statistics like the number of + /// connections handled, the number of announce requests handled, etc. + /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more + /// information about the collected metrics. + pub tracker_usage_statistics: bool, + /// If enabled the tracker will persist the number of completed downloads. + /// That's how many times a torrent has been downloaded completely. + pub persistent_torrent_completed_stat: bool, + + // Cleanup job configuration + /// Maximum time in seconds that a peer can be inactive before being + /// considered an inactive peer. If a peer is inactive for more than this + /// time, it will be removed from the torrent peer list. + pub max_peer_timeout: u32, + /// Interval in seconds that the cleanup job will run to remove inactive + /// peers from the torrent peer list. + pub inactive_peer_cleanup_interval: u64, + /// If enabled, the tracker will remove torrents that have no peers. + /// The clean up torrent job runs every `inactive_peer_cleanup_interval` + /// seconds and it removes inactive peers. Eventually, the peer list of a + /// torrent could be empty and the torrent will be removed if this option is + /// enabled. + pub remove_peerless_torrents: bool, + + // Server jobs configuration + /// The list of UDP trackers the tracker is running. Each UDP tracker + /// represents a UDP server that the tracker is running and it has its own + /// configuration. + pub udp_trackers: Vec, + /// The list of HTTP trackers the tracker is running. Each HTTP tracker + /// represents a HTTP server that the tracker is running and it has its own + /// configuration. + pub http_trackers: Vec, + /// The HTTP API configuration. + pub http_api: HttpApi, + /// The Health Check API configuration. + pub health_check_api: HealthCheckApi, +} + +impl Default for Configuration { + fn default() -> Self { + let announce_policy = AnnouncePolicy::default(); + + let mut configuration = Configuration { + log_level: Option::from(String::from("info")), + mode: TrackerMode::Public, + db_driver: DatabaseDriver::Sqlite3, + db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), + announce_interval: announce_policy.interval, + min_announce_interval: announce_policy.interval_min, + max_peer_timeout: 900, + on_reverse_proxy: false, + external_ip: Some(String::from("0.0.0.0")), + tracker_usage_statistics: true, + persistent_torrent_completed_stat: false, + inactive_peer_cleanup_interval: 600, + remove_peerless_torrents: true, + udp_trackers: Vec::new(), + http_trackers: Vec::new(), + http_api: HttpApi { + enabled: true, + bind_address: String::from("127.0.0.1:1212"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, + access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] + .iter() + .cloned() + .collect(), + }, + health_check_api: HealthCheckApi { + bind_address: String::from("127.0.0.1:1313"), + }, + }; + configuration.udp_trackers.push(UdpTracker { + enabled: false, + bind_address: String::from("0.0.0.0:6969"), + }); + configuration.http_trackers.push(HttpTracker { + enabled: false, + bind_address: String::from("0.0.0.0:7070"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, + }); + configuration + } +} + +#[cfg(test)] +mod tests { + use figment::providers::{Format, Toml}; + use figment::Figment; + + use crate::v1::Configuration; + + #[test] + fn configuration_should_be_loaded_from_a_toml_config_file() { + figment::Jail::expect_with(|jail| { + jail.create_file( + "Config.toml", + r#" + log_level = "info" + mode = "public" + db_driver = "Sqlite3" + db_path = "./storage/tracker/lib/database/sqlite3.db" + announce_interval = 120 + min_announce_interval = 120 + on_reverse_proxy = false + external_ip = "0.0.0.0" + tracker_usage_statistics = true + persistent_torrent_completed_stat = false + max_peer_timeout = 900 + inactive_peer_cleanup_interval = 600 + remove_peerless_torrents = true + + [[udp_trackers]] + enabled = false + bind_address = "0.0.0.0:6969" + + [[http_trackers]] + enabled = false + bind_address = "0.0.0.0:7070" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" + + [http_api] + enabled = true + bind_address = "127.0.0.1:1212" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" + + [http_api.access_tokens] + admin = "MyAccessToken" + + [health_check_api] + bind_address = "127.0.0.1:1313" + "#, + )?; + + let figment = Figment::new().merge(Toml::file("Config.toml")); + + let config: Configuration = figment.extract()?; + + assert_eq!(config, Configuration::default()); + + Ok(()) + }); + } +} diff --git a/packages/configuration/src/v1/tracker_api.rs b/packages/configuration/src/v1/tracker_api.rs new file mode 100644 index 000000000..6cda9b437 --- /dev/null +++ b/packages/configuration/src/v1/tracker_api.rs @@ -0,0 +1,32 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, NoneAsEmptyString}; + +pub type AccessTokens = HashMap; + +/// Configuration for the HTTP API. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HttpApi { + /// Weather the HTTP API is enabled or not. + pub enabled: bool, + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + pub bind_address: String, + /// Weather the HTTP API will use SSL or not. + pub ssl_enabled: bool, + /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_cert_path: Option, + /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_key_path: Option, + /// Access tokens for the HTTP API. The key is a label identifying the + /// token and the value is the token itself. The token is used to + /// authenticate the user. All tokens are valid for all endpoints and have + /// the all permissions. + pub access_tokens: AccessTokens, +} diff --git a/packages/configuration/src/v1/udp_tracker.rs b/packages/configuration/src/v1/udp_tracker.rs new file mode 100644 index 000000000..b304054c3 --- /dev/null +++ b/packages/configuration/src/v1/udp_tracker.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct UdpTracker { + /// Weather the UDP tracker is enabled or not. + pub enabled: bool, + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + pub bind_address: String, +} From 002fb306087919c874d0d3296d0306a083eb62f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 May 2024 12:57:14 +0100 Subject: [PATCH 0821/1003] refactor: reexport config versioned config types This is part of the migration to Figment in the configuration. This expose new versioned types (version 1). However, those types still used the old Config crate. Replacement by Figment has not been done yet. --- packages/configuration/src/lib.rs | 536 +------------------ packages/configuration/src/v1/mod.rs | 109 +++- packages/configuration/src/v1/tracker_api.rs | 6 + 3 files changed, 123 insertions(+), 528 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 660a90701..666500189 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -3,251 +3,29 @@ //! This module contains the configuration data structures for the //! Torrust Tracker, which is a `BitTorrent` tracker server. //! -//! The configuration is loaded from a [TOML](https://toml.io/en/) file -//! `tracker.toml` in the project root folder or from an environment variable -//! with the same content as the file. -//! -//! When you run the tracker without a configuration file, a new one will be -//! created with the default values, but the tracker immediately exits. You can -//! then edit the configuration file and run the tracker again. -//! -//! Configuration can not only be loaded from a file, but also from environment -//! variable `TORRUST_TRACKER_CONFIG`. This is useful when running the tracker -//! in a Docker container or environments where you do not have a persistent -//! storage or you cannot inject a configuration file. Refer to -//! [`Torrust Tracker documentation`](https://docs.rs/torrust-tracker) for more -//! information about how to pass configuration to the tracker. -//! -//! # Table of contents -//! -//! - [Sections](#sections) -//! - [Port binding](#port-binding) -//! - [TSL support](#tsl-support) -//! - [Generating self-signed certificates](#generating-self-signed-certificates) -//! - [Default configuration](#default-configuration) -//! -//! ## Sections -//! -//! Each section in the toml structure is mapped to a data structure. For -//! example, the `[http_api]` section (configuration for the tracker HTTP API) -//! is mapped to the [`HttpApi`] structure. -//! -//! > **NOTICE**: some sections are arrays of structures. For example, the -//! > `[[udp_trackers]]` section is an array of [`UdpTracker`] since -//! > you can have multiple running UDP trackers bound to different ports. -//! -//! Please refer to the documentation of each structure for more information -//! about each section. -//! -//! - [`Core configuration`](crate::Configuration) -//! - [`HTTP API configuration`](crate::HttpApi) -//! - [`HTTP Tracker configuration`](crate::HttpTracker) -//! - [`UDP Tracker configuration`](crate::UdpTracker) -//! -//! ## Port binding -//! -//! For the API, HTTP and UDP trackers you can bind to a random port by using -//! port `0`. For example, if you want to bind to a random port on all -//! interfaces, use `0.0.0.0:0`. The OS will choose a random port but the -//! tracker will not print the port it is listening to when it starts. It just -//! says `Starting Torrust HTTP tracker server on: http://0.0.0.0:0`. It shows -//! the port used in the configuration file, and not the port the -//! tracker is actually listening to. This is a planned feature, see issue -//! [186](https://github.com/torrust/torrust-tracker/issues/186) for more -//! information. -//! -//! ## TSL support -//! -//! For the API and HTTP tracker you can enable TSL by setting `ssl_enabled` to -//! `true` and setting the paths to the certificate and key files. -//! -//! Typically, you will have a directory structure like this: -//! -//! ```text -//! storage/ -//! ├── database -//! │ └── data.db -//! └── tls -//! ├── localhost.crt -//! └── localhost.key -//! ``` -//! -//! where you can store all the persistent data. -//! -//! Alternatively, you could setup a reverse proxy like Nginx or Apache to -//! handle the SSL/TLS part and forward the requests to the tracker. If you do -//! that, you should set [`on_reverse_proxy`](crate::Configuration::on_reverse_proxy) -//! to `true` in the configuration file. It's out of scope for this -//! documentation to explain in detail how to setup a reverse proxy, but the -//! configuration file should be something like this: -//! -//! For [NGINX](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/): -//! -//! ```text -//! # HTTPS only (with SSL - force redirect to HTTPS) -//! -//! server { -//! listen 80; -//! server_name tracker.torrust.com; -//! -//! return 301 https://$host$request_uri; -//! } -//! -//! server { -//! listen 443; -//! server_name tracker.torrust.com; -//! -//! ssl_certificate CERT_PATH -//! ssl_certificate_key CERT_KEY_PATH; -//! -//! location / { -//! proxy_set_header X-Forwarded-For $remote_addr; -//! proxy_pass http://127.0.0.1:6969; -//! } -//! } -//! ``` -//! -//! For [Apache](https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html): -//! -//! ```text -//! # HTTPS only (with SSL - force redirect to HTTPS) -//! -//! -//! ServerAdmin webmaster@tracker.torrust.com -//! ServerName tracker.torrust.com -//! -//! -//! RewriteEngine on -//! RewriteCond %{HTTPS} off -//! RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent] -//! -//! -//! -//! -//! -//! ServerAdmin webmaster@tracker.torrust.com -//! ServerName tracker.torrust.com -//! -//! -//! Order allow,deny -//! Allow from all -//! -//! -//! ProxyPreserveHost On -//! ProxyRequests Off -//! AllowEncodedSlashes NoDecode -//! -//! ProxyPass / http://localhost:3000/ -//! ProxyPassReverse / http://localhost:3000/ -//! ProxyPassReverse / http://tracker.torrust.com/ -//! -//! RequestHeader set X-Forwarded-Proto "https" -//! RequestHeader set X-Forwarded-Port "443" -//! -//! ErrorLog ${APACHE_LOG_DIR}/tracker.torrust.com-error.log -//! CustomLog ${APACHE_LOG_DIR}/tracker.torrust.com-access.log combined -//! -//! SSLCertificateFile CERT_PATH -//! SSLCertificateKeyFile CERT_KEY_PATH -//! -//! -//! ``` -//! -//! ## Generating self-signed certificates -//! -//! For testing purposes, you can use self-signed certificates. -//! -//! Refer to [Let's Encrypt - Certificates for localhost](https://letsencrypt.org/docs/certificates-for-localhost/) -//! for more information. -//! -//! Running the following command will generate a certificate (`localhost.crt`) -//! and key (`localhost.key`) file in your current directory: -//! -//! ```s -//! openssl req -x509 -out localhost.crt -keyout localhost.key \ -//! -newkey rsa:2048 -nodes -sha256 \ -//! -subj '/CN=localhost' -extensions EXT -config <( \ -//! printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") -//! ``` -//! -//! You can then use the generated files in the configuration file: -//! -//! ```s -//! [[http_trackers]] -//! enabled = true -//! ... -//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" -//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" -//! -//! [http_api] -//! enabled = true -//! ... -//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" -//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" -//! ``` -//! -//! ## Default configuration -//! -//! The default configuration is: -//! -//! ```toml -//! announce_interval = 120 -//! db_driver = "Sqlite3" -//! db_path = "./storage/tracker/lib/database/sqlite3.db" -//! external_ip = "0.0.0.0" -//! inactive_peer_cleanup_interval = 600 -//! log_level = "info" -//! max_peer_timeout = 900 -//! min_announce_interval = 120 -//! mode = "public" -//! on_reverse_proxy = false -//! persistent_torrent_completed_stat = false -//! remove_peerless_torrents = true -//! tracker_usage_statistics = true -//! -//! [[udp_trackers]] -//! bind_address = "0.0.0.0:6969" -//! enabled = false -//! -//! [[http_trackers]] -//! bind_address = "0.0.0.0:7070" -//! enabled = false -//! ssl_cert_path = "" -//! ssl_enabled = false -//! ssl_key_path = "" -//! -//! [http_api] -//! bind_address = "127.0.0.1:1212" -//! enabled = true -//! ssl_cert_path = "" -//! ssl_enabled = false -//! ssl_key_path = "" -//! -//! [http_api.access_tokens] -//! admin = "MyAccessToken" -//! -//! [health_check_api] -//! bind_address = "127.0.0.1:1313" -//!``` +//! The current version for configuration is [`v1`](crate::v1). pub mod v1; use std::collections::HashMap; -use std::net::IpAddr; -use std::str::FromStr; use std::sync::Arc; use std::{env, fs}; -use config::{Config, ConfigError, File, FileFormat}; +use config::ConfigError; use derive_more::Constructor; -use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, NoneAsEmptyString}; use thiserror::Error; use torrust_tracker_located_error::{DynError, Located, LocatedError}; -use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; /// The maximum number of returned peers for a torrent. pub const TORRENT_PEERS_LIMIT: usize = 74; +pub type Configuration = v1::Configuration; +pub type UdpTracker = v1::udp_tracker::UdpTracker; +pub type HttpTracker = v1::http_tracker::HttpTracker; +pub type HttpApi = v1::tracker_api::HttpApi; +pub type HealthCheckApi = v1::health_check_api::HealthCheckApi; + +pub type AccessTokens = HashMap; + #[derive(Copy, Clone, Debug, PartialEq, Constructor)] pub struct TrackerPolicy { pub remove_peerless_torrents: bool, @@ -307,84 +85,6 @@ impl Info { } } -/// Configuration for each UDP tracker. -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] -pub struct UdpTracker { - /// Weather the UDP tracker is enabled or not. - pub enabled: bool, - /// The address the tracker will bind to. - /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to - /// listen to all interfaces, use `0.0.0.0`. If you want the operating - /// system to choose a random port, use port `0`. - pub bind_address: String, -} - -/// Configuration for each HTTP tracker. -#[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] -pub struct HttpTracker { - /// Weather the HTTP tracker is enabled or not. - pub enabled: bool, - /// The address the tracker will bind to. - /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to - /// listen to all interfaces, use `0.0.0.0`. If you want the operating - /// system to choose a random port, use port `0`. - pub bind_address: String, - /// Weather the HTTP tracker will use SSL or not. - pub ssl_enabled: bool, - /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_cert_path: Option, - /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_key_path: Option, -} - -pub type AccessTokens = HashMap; - -/// Configuration for the HTTP API. -#[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] -pub struct HttpApi { - /// Weather the HTTP API is enabled or not. - pub enabled: bool, - /// The address the tracker will bind to. - /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to - /// listen to all interfaces, use `0.0.0.0`. If you want the operating - /// system to choose a random port, use port `0`. - pub bind_address: String, - /// Weather the HTTP API will use SSL or not. - pub ssl_enabled: bool, - /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_cert_path: Option, - /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_key_path: Option, - /// Access tokens for the HTTP API. The key is a label identifying the - /// token and the value is the token itself. The token is used to - /// authenticate the user. All tokens are valid for all endpoints and have - /// the all permissions. - pub access_tokens: AccessTokens, -} - -impl HttpApi { - fn override_admin_token(&mut self, api_admin_token: &str) { - self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); - } -} - -/// Configuration for the Health Check API. -#[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] -pub struct HealthCheckApi { - /// The address the API will bind to. - /// The format is `ip:port`, for example `127.0.0.1:1313`. If you want to - /// listen to all interfaces, use `0.0.0.0`. If you want the operating - /// system to choose a random port, use port `0`. - pub bind_address: String, -} - /// Announce policy #[derive(PartialEq, Eq, Debug, Clone, Copy, Constructor)] pub struct AnnouncePolicy { @@ -424,81 +124,6 @@ impl Default for AnnouncePolicy { } } -/// Core configuration for the tracker. -#[allow(clippy::struct_excessive_bools)] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct Configuration { - /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, - /// `Debug` and `Trace`. Default is `Info`. - pub log_level: Option, - /// Tracker mode. See [`TrackerMode`] for more information. - pub mode: TrackerMode, - - // Database configuration - /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. - pub db_driver: DatabaseDriver, - /// Database connection string. The format depends on the database driver. - /// For `Sqlite3`, the format is `path/to/database.db`, for example: - /// `./storage/tracker/lib/database/sqlite3.db`. - /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for - /// example: `root:password@localhost:3306/torrust`. - pub db_path: String, - - /// See [`AnnouncePolicy::interval`] - pub announce_interval: u32, - - /// See [`AnnouncePolicy::interval_min`] - pub min_announce_interval: u32, - /// Weather the tracker is behind a reverse proxy or not. - /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header - /// sent from the proxy will be used to get the client's IP address. - pub on_reverse_proxy: bool, - /// The external IP address of the tracker. If the client is using a - /// loopback IP address, this IP address will be used instead. If the peer - /// is using a loopback IP address, the tracker assumes that the peer is - /// in the same network as the tracker and will use the tracker's IP - /// address instead. - pub external_ip: Option, - /// Weather the tracker should collect statistics about tracker usage. - /// If enabled, the tracker will collect statistics like the number of - /// connections handled, the number of announce requests handled, etc. - /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more - /// information about the collected metrics. - pub tracker_usage_statistics: bool, - /// If enabled the tracker will persist the number of completed downloads. - /// That's how many times a torrent has been downloaded completely. - pub persistent_torrent_completed_stat: bool, - - // Cleanup job configuration - /// Maximum time in seconds that a peer can be inactive before being - /// considered an inactive peer. If a peer is inactive for more than this - /// time, it will be removed from the torrent peer list. - pub max_peer_timeout: u32, - /// Interval in seconds that the cleanup job will run to remove inactive - /// peers from the torrent peer list. - pub inactive_peer_cleanup_interval: u64, - /// If enabled, the tracker will remove torrents that have no peers. - /// The clean up torrent job runs every `inactive_peer_cleanup_interval` - /// seconds and it removes inactive peers. Eventually, the peer list of a - /// torrent could be empty and the torrent will be removed if this option is - /// enabled. - pub remove_peerless_torrents: bool, - - // Server jobs configuration - /// The list of UDP trackers the tracker is running. Each UDP tracker - /// represents a UDP server that the tracker is running and it has its own - /// configuration. - pub udp_trackers: Vec, - /// The list of HTTP trackers the tracker is running. Each HTTP tracker - /// represents a HTTP server that the tracker is running and it has its own - /// configuration. - pub http_trackers: Vec, - /// The HTTP API configuration. - pub http_api: HttpApi, - /// The Health Check API configuration. - pub health_check_api: HealthCheckApi, -} - /// Errors that can occur when loading the configuration. #[derive(Error, Debug)] pub enum Error { @@ -532,147 +157,6 @@ impl From for Error { } } -impl Default for Configuration { - fn default() -> Self { - let announce_policy = AnnouncePolicy::default(); - - let mut configuration = Configuration { - log_level: Option::from(String::from("info")), - mode: TrackerMode::Public, - db_driver: DatabaseDriver::Sqlite3, - db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), - announce_interval: announce_policy.interval, - min_announce_interval: announce_policy.interval_min, - max_peer_timeout: 900, - on_reverse_proxy: false, - external_ip: Some(String::from("0.0.0.0")), - tracker_usage_statistics: true, - persistent_torrent_completed_stat: false, - inactive_peer_cleanup_interval: 600, - remove_peerless_torrents: true, - udp_trackers: Vec::new(), - http_trackers: Vec::new(), - http_api: HttpApi { - enabled: true, - bind_address: String::from("127.0.0.1:1212"), - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] - .iter() - .cloned() - .collect(), - }, - health_check_api: HealthCheckApi { - bind_address: String::from("127.0.0.1:1313"), - }, - }; - configuration.udp_trackers.push(UdpTracker { - enabled: false, - bind_address: String::from("0.0.0.0:6969"), - }); - configuration.http_trackers.push(HttpTracker { - enabled: false, - bind_address: String::from("0.0.0.0:7070"), - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, - }); - configuration - } -} - -impl Configuration { - fn override_api_admin_token(&mut self, api_admin_token: &str) { - self.http_api.override_admin_token(api_admin_token); - } - - /// Returns the tracker public IP address id defined in the configuration, - /// and `None` otherwise. - #[must_use] - pub fn get_ext_ip(&self) -> Option { - match &self.external_ip { - None => None, - Some(external_ip) => match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None, - }, - } - } - - /// Loads the configuration from the configuration file. - /// - /// # Errors - /// - /// Will return `Err` if `path` does not exist or has a bad configuration. - pub fn load_from_file(path: &str) -> Result { - let config_builder = Config::builder(); - - #[allow(unused_assignments)] - let mut config = Config::default(); - - config = config_builder.add_source(File::with_name(path)).build()?; - - let torrust_config: Configuration = config.try_deserialize()?; - - Ok(torrust_config) - } - - /// Saves the default configuration at the given path. - /// - /// # Errors - /// - /// Will return `Err` if `path` is not a valid path or the configuration - /// file cannot be created. - pub fn create_default_configuration_file(path: &str) -> Result { - let config = Configuration::default(); - config.save_to_file(path)?; - Ok(config) - } - - /// Loads the configuration from the `Info` struct. The whole - /// configuration in toml format is included in the `info.tracker_toml` string. - /// - /// Optionally will override the admin api token. - /// - /// # Errors - /// - /// Will return `Err` if the environment variable does not exist or has a bad configuration. - pub fn load(info: &Info) -> Result { - let config_builder = Config::builder() - .add_source(File::from_str(&info.tracker_toml, FileFormat::Toml)) - .build()?; - let mut config: Configuration = config_builder.try_deserialize()?; - - if let Some(ref token) = info.api_admin_token { - config.override_api_admin_token(token); - }; - - Ok(config) - } - - /// Saves the configuration to the configuration file. - /// - /// # Errors - /// - /// Will return `Err` if `filename` does not exist or the user does not have - /// permission to read it. Will also return `Err` if the configuration is - /// not valid or cannot be encoded to TOML. - /// - /// # Panics - /// - /// Will panic if the configuration cannot be written into the file. - pub fn save_to_file(&self, path: &str) -> Result<(), Error> { - fs::write(path, self.to_toml()).expect("Could not write to file!"); - Ok(()) - } - - /// Encodes the configuration to TOML. - fn to_toml(&self) -> String { - toml::to_string(self).expect("Could not encode TOML value") - } -} - #[cfg(test)] mod tests { use crate::Configuration; diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 815d74e40..486d2c300 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -1,4 +1,5 @@ -//! Configuration data structures for [Torrust Tracker](https://docs.rs/torrust-tracker). +//! Version `1` for [Torrust Tracker](https://docs.rs/torrust-tracker) +//! configuration data structures. //! //! This module contains the configuration data structures for the //! Torrust Tracker, which is a `BitTorrent` tracker server. @@ -234,6 +235,11 @@ pub mod http_tracker; pub mod tracker_api; pub mod udp_tracker; +use std::fs; +use std::net::IpAddr; +use std::str::FromStr; + +use config::{Config, File, FileFormat}; use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; @@ -241,7 +247,7 @@ use self::health_check_api::HealthCheckApi; use self::http_tracker::HttpTracker; use self::tracker_api::HttpApi; use self::udp_tracker::UdpTracker; -use crate::AnnouncePolicy; +use crate::{AnnouncePolicy, Error, Info}; /// Core configuration for the tracker. #[allow(clippy::struct_excessive_bools)] @@ -368,6 +374,105 @@ impl Default for Configuration { } } +impl Configuration { + fn override_api_admin_token(&mut self, api_admin_token: &str) { + self.http_api.override_admin_token(api_admin_token); + } + + /// Returns the tracker public IP address id defined in the configuration, + /// and `None` otherwise. + #[must_use] + pub fn get_ext_ip(&self) -> Option { + match &self.external_ip { + None => None, + Some(external_ip) => match IpAddr::from_str(external_ip) { + Ok(external_ip) => Some(external_ip), + Err(_) => None, + }, + } + } + + /// Loads the configuration from the configuration file. + /// + /// # Errors + /// + /// Will return `Err` if `path` does not exist or has a bad configuration. + pub fn load_from_file(path: &str) -> Result { + // todo: use Figment + + let config_builder = Config::builder(); + + #[allow(unused_assignments)] + let mut config = Config::default(); + + config = config_builder.add_source(File::with_name(path)).build()?; + + let torrust_config: Configuration = config.try_deserialize()?; + + Ok(torrust_config) + } + + /// Saves the default configuration at the given path. + /// + /// # Errors + /// + /// Will return `Err` if `path` is not a valid path or the configuration + /// file cannot be created. + pub fn create_default_configuration_file(path: &str) -> Result { + // todo: use Figment + + let config = Configuration::default(); + config.save_to_file(path)?; + Ok(config) + } + + /// Loads the configuration from the `Info` struct. The whole + /// configuration in toml format is included in the `info.tracker_toml` string. + /// + /// Optionally will override the admin api token. + /// + /// # Errors + /// + /// Will return `Err` if the environment variable does not exist or has a bad configuration. + pub fn load(info: &Info) -> Result { + // todo: use Figment + + let config_builder = Config::builder() + .add_source(File::from_str(&info.tracker_toml, FileFormat::Toml)) + .build()?; + let mut config: Configuration = config_builder.try_deserialize()?; + + if let Some(ref token) = info.api_admin_token { + config.override_api_admin_token(token); + }; + + Ok(config) + } + + /// Saves the configuration to the configuration file. + /// + /// # Errors + /// + /// Will return `Err` if `filename` does not exist or the user does not have + /// permission to read it. Will also return `Err` if the configuration is + /// not valid or cannot be encoded to TOML. + /// + /// # Panics + /// + /// Will panic if the configuration cannot be written into the file. + pub fn save_to_file(&self, path: &str) -> Result<(), Error> { + // todo: use Figment + + fs::write(path, self.to_toml()).expect("Could not write to file!"); + Ok(()) + } + + /// Encodes the configuration to TOML. + fn to_toml(&self) -> String { + toml::to_string(self).expect("Could not encode TOML value") + } +} + #[cfg(test)] mod tests { use figment::providers::{Format, Toml}; diff --git a/packages/configuration/src/v1/tracker_api.rs b/packages/configuration/src/v1/tracker_api.rs index 6cda9b437..51f11a14d 100644 --- a/packages/configuration/src/v1/tracker_api.rs +++ b/packages/configuration/src/v1/tracker_api.rs @@ -30,3 +30,9 @@ pub struct HttpApi { /// the all permissions. pub access_tokens: AccessTokens, } + +impl HttpApi { + pub fn override_admin_token(&mut self, api_admin_token: &str) { + self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); + } +} From 265d89d1e8cfd318919e793bbfeceb5b3556cbcb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 May 2024 13:54:07 +0100 Subject: [PATCH 0822/1003] refactor: replace Config by Figment in Configuration implementation This replaces the crate `config` with `figment` in the Configuration implementation. --- packages/configuration/src/lib.rs | 13 +++++++------ packages/configuration/src/v1/mod.rs | 28 ++++++++-------------------- 2 files changed, 15 insertions(+), 26 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 666500189..78b62442c 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -10,10 +10,9 @@ use std::collections::HashMap; use std::sync::Arc; use std::{env, fs}; -use config::ConfigError; use derive_more::Constructor; use thiserror::Error; -use torrust_tracker_located_error::{DynError, Located, LocatedError}; +use torrust_tracker_located_error::{DynError, LocatedError}; /// The maximum number of returned peers for a torrent. pub const TORRENT_PEERS_LIMIT: usize = 74; @@ -142,17 +141,19 @@ pub enum Error { /// Unable to load the configuration from the configuration file. #[error("Failed processing the configuration: {source}")] - ConfigError { source: LocatedError<'static, ConfigError> }, + ConfigError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, #[error("The error for errors that can never happen.")] Infallible, } -impl From for Error { +impl From for Error { #[track_caller] - fn from(err: ConfigError) -> Self { + fn from(err: figment::Error) -> Self { Self::ConfigError { - source: Located(err).into(), + source: (Arc::new(err) as DynError).into(), } } } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 486d2c300..6c044c462 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -239,7 +239,8 @@ use std::fs; use std::net::IpAddr; use std::str::FromStr; -use config::{Config, File, FileFormat}; +use figment::providers::{Format, Toml}; +use figment::Figment; use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; @@ -398,18 +399,11 @@ impl Configuration { /// /// Will return `Err` if `path` does not exist or has a bad configuration. pub fn load_from_file(path: &str) -> Result { - // todo: use Figment + let figment = Figment::new().merge(Toml::file(path)); - let config_builder = Config::builder(); + let config: Configuration = figment.extract()?; - #[allow(unused_assignments)] - let mut config = Config::default(); - - config = config_builder.add_source(File::with_name(path)).build()?; - - let torrust_config: Configuration = config.try_deserialize()?; - - Ok(torrust_config) + Ok(config) } /// Saves the default configuration at the given path. @@ -419,8 +413,6 @@ impl Configuration { /// Will return `Err` if `path` is not a valid path or the configuration /// file cannot be created. pub fn create_default_configuration_file(path: &str) -> Result { - // todo: use Figment - let config = Configuration::default(); config.save_to_file(path)?; Ok(config) @@ -435,12 +427,9 @@ impl Configuration { /// /// Will return `Err` if the environment variable does not exist or has a bad configuration. pub fn load(info: &Info) -> Result { - // todo: use Figment + let figment = Figment::new().merge(Toml::string(&info.tracker_toml)); - let config_builder = Config::builder() - .add_source(File::from_str(&info.tracker_toml, FileFormat::Toml)) - .build()?; - let mut config: Configuration = config_builder.try_deserialize()?; + let mut config: Configuration = figment.extract()?; if let Some(ref token) = info.api_admin_token { config.override_api_admin_token(token); @@ -461,14 +450,13 @@ impl Configuration { /// /// Will panic if the configuration cannot be written into the file. pub fn save_to_file(&self, path: &str) -> Result<(), Error> { - // todo: use Figment - fs::write(path, self.to_toml()).expect("Could not write to file!"); Ok(()) } /// Encodes the configuration to TOML. fn to_toml(&self) -> String { + // code-review: do we need to use Figment also to serialize into toml? toml::to_string(self).expect("Could not encode TOML value") } } From 5bd94940d7230948fed44c6d0b6cae0c1da9810e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 May 2024 15:36:27 +0100 Subject: [PATCH 0823/1003] chore: remove unused config dependenciy It was replaced by `figment`. --- Cargo.lock | 195 +----------------------------- Cargo.toml | 7 +- packages/configuration/Cargo.toml | 1 - 3 files changed, 4 insertions(+), 199 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1b4c2a4e7..0bbf0205a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -587,9 +587,6 @@ name = "bitflags" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" -dependencies = [ - "serde", -] [[package]] name = "bitvec" @@ -898,61 +895,12 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "config" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" -dependencies = [ - "async-trait", - "convert_case 0.6.0", - "json5", - "lazy_static", - "nom", - "pathdiff", - "ron", - "rust-ini", - "serde", - "serde_json", - "toml", - "yaml-rust", -] - -[[package]] -name = "const-random" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" -dependencies = [ - "const-random-macro", -] - -[[package]] -name = "const-random-macro" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" -dependencies = [ - "getrandom", - "once_cell", - "tiny-keccak", -] - [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -1171,7 +1119,7 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case 0.4.0", + "convert_case", "proc-macro2", "quote", "rustc_version", @@ -1199,15 +1147,6 @@ dependencies = [ "crypto-common", ] -[[package]] -name = "dlv-list" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" -dependencies = [ - "const-random", -] - [[package]] name = "downcast" version = "0.11.0" @@ -2008,17 +1947,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "json5" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" -dependencies = [ - "pest", - "pest_derive", - "serde", -] - [[package]] name = "kv-log-macro" version = "1.0.7" @@ -2151,12 +2079,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -2548,16 +2470,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "ordered-multimap" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" -dependencies = [ - "dlv-list", - "hashbrown 0.13.2", -] - [[package]] name = "parking" version = "2.2.0" @@ -2587,12 +2499,6 @@ dependencies = [ "windows-targets 0.52.5", ] -[[package]] -name = "pathdiff" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" - [[package]] name = "pear" version = "0.2.9" @@ -2632,51 +2538,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "pest" -version = "2.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" -dependencies = [ - "memchr", - "thiserror", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.61", -] - -[[package]] -name = "pest_meta" -version = "2.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" -dependencies = [ - "once_cell", - "pest", - "sha2", -] - [[package]] name = "phf" version = "0.11.2" @@ -3202,18 +3063,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "ron" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" -dependencies = [ - "base64 0.21.7", - "bitflags 2.5.0", - "serde", - "serde_derive", -] - [[package]] name = "rstest" version = "0.19.0" @@ -3257,16 +3106,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "rust-ini" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" -dependencies = [ - "cfg-if", - "ordered-multimap", -] - [[package]] name = "rust_decimal" version = "1.35.0" @@ -3887,15 +3726,6 @@ dependencies = [ "time-core", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -4052,7 +3882,6 @@ dependencies = [ "axum-server", "chrono", "clap", - "config", "crossbeam-skiplist", "dashmap", "derive_more", @@ -4109,7 +3938,6 @@ dependencies = [ name = "torrust-tracker-configuration" version = "3.0.0-alpha.12-develop" dependencies = [ - "config", "derive_more", "figment", "serde", @@ -4291,12 +4119,6 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "ucd-trie" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" - [[package]] name = "uncased" version = "0.9.10" @@ -4327,12 +4149,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" - [[package]] name = "untrusted" version = "0.9.0" @@ -4707,15 +4523,6 @@ dependencies = [ "tap", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yansi" version = "1.0.1" diff --git a/Cargo.toml b/Cargo.toml index 2be3455b8..d7aa9a31c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,6 @@ axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } -config = "0" crossbeam-skiplist = "0.1" dashmap = "5.5.3" derive_more = "0" @@ -80,7 +79,7 @@ uuid = { version = "1", features = ["v4"] } zerocopy = "0.7.33" [package.metadata.cargo-machete] -ignored = ["serde_bytes", "crossbeam-skiplist", "dashmap", "parking_lot"] +ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes"] [dev-dependencies] local-ip-address = "0" @@ -94,7 +93,7 @@ members = [ "packages/located-error", "packages/primitives", "packages/test-helpers", - "packages/torrent-repository" + "packages/torrent-repository", ] [profile.dev] @@ -108,5 +107,5 @@ lto = "fat" opt-level = 3 [profile.release-debug] -inherits = "release" debug = true +inherits = "release" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index e5335d416..a033dcea1 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -15,7 +15,6 @@ rust-version.workspace = true version.workspace = true [dependencies] -config = "0" derive_more = "0" figment = { version = "0.10.18", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } From 146b77d86f86b62fc50014586ab19a1848edbc1b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 May 2024 16:38:09 +0100 Subject: [PATCH 0824/1003] feat: enable overwrite Configuration values using env vars Enable Figment ability to overwrite all config options with env vars. We are currently overwriting only this value: ```toml [http_api.access_tokens] admin = "MyAccessToken" ``` With the env var `TORRUST_TRACKER_API_ADMIN_TOKEN`. The name we gave to the env var does nto follow Figment convention which is `TORRUST_TRACKER_HTTP_API.ACCESS_TOKENS.ADMIN`. We have to keep both options until we remove the old one in the rest of the code. --- packages/configuration/src/v1/mod.rs | 138 ++++++++++++++++++--------- 1 file changed, 91 insertions(+), 47 deletions(-) diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 6c044c462..562eb569e 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -239,7 +239,7 @@ use std::fs; use std::net::IpAddr; use std::str::FromStr; -use figment::providers::{Format, Toml}; +use figment::providers::{Env, Format, Toml}; use figment::Figment; use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; @@ -400,6 +400,16 @@ impl Configuration { /// Will return `Err` if `path` does not exist or has a bad configuration. pub fn load_from_file(path: &str) -> Result { let figment = Figment::new().merge(Toml::file(path)); + //.merge(Env::prefixed("TORRUST_TRACKER_")); + + // code-review: merging values from env vars makes the test + // "configuration_should_be_loaded_from_a_toml_config_file" fail. + // + // It's because this line in a new test: + // + // jail.set_env("TORRUST_TRACKER_HTTP_API.ACCESS_TOKENS.ADMIN", "NewToken"); + // + // It seems env vars are shared between tests. let config: Configuration = figment.extract()?; @@ -427,7 +437,9 @@ impl Configuration { /// /// Will return `Err` if the environment variable does not exist or has a bad configuration. pub fn load(info: &Info) -> Result { - let figment = Figment::new().merge(Toml::string(&info.tracker_toml)); + let figment = Figment::new() + .merge(Toml::string(&info.tracker_toml)) + .merge(Env::prefixed("TORRUST_TRACKER_")); let mut config: Configuration = figment.extract()?; @@ -463,58 +475,67 @@ impl Configuration { #[cfg(test)] mod tests { - use figment::providers::{Format, Toml}; + use figment::providers::{Env, Format, Toml}; use figment::Figment; use crate::v1::Configuration; + #[cfg(test)] + fn default_config_toml() -> String { + let config = r#"log_level = "info" + mode = "public" + db_driver = "Sqlite3" + db_path = "./storage/tracker/lib/database/sqlite3.db" + announce_interval = 120 + min_announce_interval = 120 + on_reverse_proxy = false + external_ip = "0.0.0.0" + tracker_usage_statistics = true + persistent_torrent_completed_stat = false + max_peer_timeout = 900 + inactive_peer_cleanup_interval = 600 + remove_peerless_torrents = true + + [[udp_trackers]] + enabled = false + bind_address = "0.0.0.0:6969" + + [[http_trackers]] + enabled = false + bind_address = "0.0.0.0:7070" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" + + [http_api] + enabled = true + bind_address = "127.0.0.1:1212" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" + + [http_api.access_tokens] + admin = "MyAccessToken" + + [health_check_api] + bind_address = "127.0.0.1:1313" + "# + .lines() + .map(str::trim_start) + .collect::>() + .join("\n"); + config + } + #[test] fn configuration_should_be_loaded_from_a_toml_config_file() { figment::Jail::expect_with(|jail| { - jail.create_file( - "Config.toml", - r#" - log_level = "info" - mode = "public" - db_driver = "Sqlite3" - db_path = "./storage/tracker/lib/database/sqlite3.db" - announce_interval = 120 - min_announce_interval = 120 - on_reverse_proxy = false - external_ip = "0.0.0.0" - tracker_usage_statistics = true - persistent_torrent_completed_stat = false - max_peer_timeout = 900 - inactive_peer_cleanup_interval = 600 - remove_peerless_torrents = true - - [[udp_trackers]] - enabled = false - bind_address = "0.0.0.0:6969" - - [[http_trackers]] - enabled = false - bind_address = "0.0.0.0:7070" - ssl_enabled = false - ssl_cert_path = "" - ssl_key_path = "" - - [http_api] - enabled = true - bind_address = "127.0.0.1:1212" - ssl_enabled = false - ssl_cert_path = "" - ssl_key_path = "" - - [http_api.access_tokens] - admin = "MyAccessToken" - - [health_check_api] - bind_address = "127.0.0.1:1313" - "#, - )?; - - let figment = Figment::new().merge(Toml::file("Config.toml")); + jail.create_file("Config.toml", &default_config_toml())?; + + // todo: replace with Configuration method + let figment = Figment::new() + .merge(Toml::file("Config.toml")) + .merge(Env::prefixed("TORRUST_TRACKER_")); let config: Configuration = figment.extract()?; @@ -523,4 +544,27 @@ mod tests { Ok(()) }); } + + #[test] + fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin() { + figment::Jail::expect_with(|jail| { + jail.create_file("Config.toml", &default_config_toml())?; + + jail.set_env("TORRUST_TRACKER_HTTP_API.ACCESS_TOKENS.ADMIN", "NewToken"); + + // todo: replace with Configuration method + let figment = Figment::new() + .merge(Toml::file("Config.toml")) + .merge(Env::prefixed("TORRUST_TRACKER_")); + + let config: Configuration = figment.extract()?; + + assert_eq!( + config.http_api.access_tokens.get("admin"), + Some("NewToken".to_owned()).as_ref() + ); + + Ok(()) + }); + } } From 632c8baad3ad47934872a40f1f6da5c721325e75 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 May 2024 17:06:06 +0100 Subject: [PATCH 0825/1003] refactor: move Configuration unit test to inner mods --- packages/configuration/src/lib.rs | 133 ------------------- packages/configuration/src/v1/mod.rs | 91 +++++++------ packages/configuration/src/v1/tracker_api.rs | 29 ++++ 3 files changed, 80 insertions(+), 173 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 78b62442c..20912990a 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -157,136 +157,3 @@ impl From for Error { } } } - -#[cfg(test)] -mod tests { - use crate::Configuration; - - #[cfg(test)] - fn default_config_toml() -> String { - let config = r#"log_level = "info" - mode = "public" - db_driver = "Sqlite3" - db_path = "./storage/tracker/lib/database/sqlite3.db" - announce_interval = 120 - min_announce_interval = 120 - on_reverse_proxy = false - external_ip = "0.0.0.0" - tracker_usage_statistics = true - persistent_torrent_completed_stat = false - max_peer_timeout = 900 - inactive_peer_cleanup_interval = 600 - remove_peerless_torrents = true - - [[udp_trackers]] - enabled = false - bind_address = "0.0.0.0:6969" - - [[http_trackers]] - enabled = false - bind_address = "0.0.0.0:7070" - ssl_enabled = false - ssl_cert_path = "" - ssl_key_path = "" - - [http_api] - enabled = true - bind_address = "127.0.0.1:1212" - ssl_enabled = false - ssl_cert_path = "" - ssl_key_path = "" - - [http_api.access_tokens] - admin = "MyAccessToken" - - [health_check_api] - bind_address = "127.0.0.1:1313" - "# - .lines() - .map(str::trim_start) - .collect::>() - .join("\n"); - config - } - - #[test] - fn configuration_should_have_default_values() { - let configuration = Configuration::default(); - - let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); - - assert_eq!(toml, default_config_toml()); - } - - #[test] - fn configuration_should_contain_the_external_ip() { - let configuration = Configuration::default(); - - assert_eq!(configuration.external_ip, Some(String::from("0.0.0.0"))); - } - - #[test] - fn configuration_should_be_saved_in_a_toml_config_file() { - use std::{env, fs}; - - use uuid::Uuid; - - // Build temp config file path - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); - - // Convert to argument type for Configuration::save_to_file - let config_file_path = temp_file; - let path = config_file_path.to_string_lossy().to_string(); - - let default_configuration = Configuration::default(); - - default_configuration - .save_to_file(&path) - .expect("Could not save configuration to file"); - - let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); - - assert_eq!(contents, default_config_toml()); - } - - #[cfg(test)] - fn create_temp_config_file_with_default_config() -> String { - use std::env; - use std::fs::File; - use std::io::Write; - - use uuid::Uuid; - - // Build temp config file path - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); - - // Convert to argument type for Configuration::load_from_file - let config_file_path = temp_file.clone(); - let path = config_file_path.to_string_lossy().to_string(); - - // Write file contents - let mut file = File::create(temp_file).unwrap(); - writeln!(&mut file, "{}", default_config_toml()).unwrap(); - - path - } - - #[test] - fn configuration_should_be_loaded_from_a_toml_config_file() { - let config_file_path = create_temp_config_file_with_default_config(); - - let configuration = Configuration::load_from_file(&config_file_path).expect("Could not load configuration from file"); - - assert_eq!(configuration, Configuration::default()); - } - - #[test] - fn http_api_configuration_should_check_if_it_contains_a_token() { - let configuration = Configuration::default(); - - assert!(configuration.http_api.access_tokens.values().any(|t| t == "MyAccessToken")); - assert!(!configuration.http_api.access_tokens.values().any(|t| t == "NonExistingToken")); - } -} diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 562eb569e..4413ed7c4 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -345,17 +345,7 @@ impl Default for Configuration { remove_peerless_torrents: true, udp_trackers: Vec::new(), http_trackers: Vec::new(), - http_api: HttpApi { - enabled: true, - bind_address: String::from("127.0.0.1:1212"), - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] - .iter() - .cloned() - .collect(), - }, + http_api: HttpApi::default(), health_check_api: HealthCheckApi { bind_address: String::from("127.0.0.1:1313"), }, @@ -399,17 +389,9 @@ impl Configuration { /// /// Will return `Err` if `path` does not exist or has a bad configuration. pub fn load_from_file(path: &str) -> Result { - let figment = Figment::new().merge(Toml::file(path)); - //.merge(Env::prefixed("TORRUST_TRACKER_")); - - // code-review: merging values from env vars makes the test - // "configuration_should_be_loaded_from_a_toml_config_file" fail. - // - // It's because this line in a new test: - // - // jail.set_env("TORRUST_TRACKER_HTTP_API.ACCESS_TOKENS.ADMIN", "NewToken"); - // - // It seems env vars are shared between tests. + let figment = Figment::new() + .merge(Toml::file(path)) + .merge(Env::prefixed("TORRUST_TRACKER_")); let config: Configuration = figment.extract()?; @@ -475,8 +457,6 @@ impl Configuration { #[cfg(test)] mod tests { - use figment::providers::{Env, Format, Toml}; - use figment::Figment; use crate::v1::Configuration; @@ -527,19 +507,55 @@ mod tests { config } + #[test] + fn configuration_should_have_default_values() { + let configuration = Configuration::default(); + + let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); + + assert_eq!(toml, default_config_toml()); + } + + #[test] + fn configuration_should_contain_the_external_ip() { + let configuration = Configuration::default(); + + assert_eq!(configuration.external_ip, Some(String::from("0.0.0.0"))); + } + + #[test] + fn configuration_should_be_saved_in_a_toml_config_file() { + use std::{env, fs}; + + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::save_to_file + let config_file_path = temp_file; + let path = config_file_path.to_string_lossy().to_string(); + + let default_configuration = Configuration::default(); + + default_configuration + .save_to_file(&path) + .expect("Could not save configuration to file"); + + let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); + + assert_eq!(contents, default_config_toml()); + } + #[test] fn configuration_should_be_loaded_from_a_toml_config_file() { figment::Jail::expect_with(|jail| { - jail.create_file("Config.toml", &default_config_toml())?; - - // todo: replace with Configuration method - let figment = Figment::new() - .merge(Toml::file("Config.toml")) - .merge(Env::prefixed("TORRUST_TRACKER_")); + jail.create_file("tracker.toml", &default_config_toml())?; - let config: Configuration = figment.extract()?; + let configuration = Configuration::load_from_file("tracker.toml").expect("Could not load configuration from file"); - assert_eq!(config, Configuration::default()); + assert_eq!(configuration, Configuration::default()); Ok(()) }); @@ -548,19 +564,14 @@ mod tests { #[test] fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin() { figment::Jail::expect_with(|jail| { - jail.create_file("Config.toml", &default_config_toml())?; + jail.create_file("tracker.toml", &default_config_toml())?; jail.set_env("TORRUST_TRACKER_HTTP_API.ACCESS_TOKENS.ADMIN", "NewToken"); - // todo: replace with Configuration method - let figment = Figment::new() - .merge(Toml::file("Config.toml")) - .merge(Env::prefixed("TORRUST_TRACKER_")); - - let config: Configuration = figment.extract()?; + let configuration = Configuration::load_from_file("tracker.toml").expect("Could not load configuration from file"); assert_eq!( - config.http_api.access_tokens.get("admin"), + configuration.http_api.access_tokens.get("admin"), Some("NewToken".to_owned()).as_ref() ); diff --git a/packages/configuration/src/v1/tracker_api.rs b/packages/configuration/src/v1/tracker_api.rs index 51f11a14d..8749478c8 100644 --- a/packages/configuration/src/v1/tracker_api.rs +++ b/packages/configuration/src/v1/tracker_api.rs @@ -31,8 +31,37 @@ pub struct HttpApi { pub access_tokens: AccessTokens, } +impl Default for HttpApi { + fn default() -> Self { + Self { + enabled: true, + bind_address: String::from("127.0.0.1:1212"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, + access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] + .iter() + .cloned() + .collect(), + } + } +} + impl HttpApi { pub fn override_admin_token(&mut self, api_admin_token: &str) { self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); } } + +#[cfg(test)] +mod tests { + use crate::v1::tracker_api::HttpApi; + + #[test] + fn http_api_configuration_should_check_if_it_contains_a_token() { + let configuration = HttpApi::default(); + + assert!(configuration.access_tokens.values().any(|t| t == "MyAccessToken")); + assert!(!configuration.access_tokens.values().any(|t| t == "NonExistingToken")); + } +} From b3a1442ee808f2b50296e455880a0d767b56f599 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 May 2024 18:01:23 +0100 Subject: [PATCH 0826/1003] refactor!: remove unused method in Configuration --- packages/configuration/src/v1/mod.rs | 34 +++++++++++----------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 4413ed7c4..dbb6eb7c0 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -383,21 +383,6 @@ impl Configuration { } } - /// Loads the configuration from the configuration file. - /// - /// # Errors - /// - /// Will return `Err` if `path` does not exist or has a bad configuration. - pub fn load_from_file(path: &str) -> Result { - let figment = Figment::new() - .merge(Toml::file(path)) - .merge(Env::prefixed("TORRUST_TRACKER_")); - - let config: Configuration = figment.extract()?; - - Ok(config) - } - /// Saves the default configuration at the given path. /// /// # Errors @@ -459,6 +444,7 @@ impl Configuration { mod tests { use crate::v1::Configuration; + use crate::Info; #[cfg(test)] fn default_config_toml() -> String { @@ -550,10 +536,13 @@ mod tests { #[test] fn configuration_should_be_loaded_from_a_toml_config_file() { - figment::Jail::expect_with(|jail| { - jail.create_file("tracker.toml", &default_config_toml())?; + figment::Jail::expect_with(|_jail| { + let info = Info { + tracker_toml: default_config_toml(), + api_admin_token: None, + }; - let configuration = Configuration::load_from_file("tracker.toml").expect("Could not load configuration from file"); + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); assert_eq!(configuration, Configuration::default()); @@ -564,11 +553,14 @@ mod tests { #[test] fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin() { figment::Jail::expect_with(|jail| { - jail.create_file("tracker.toml", &default_config_toml())?; - jail.set_env("TORRUST_TRACKER_HTTP_API.ACCESS_TOKENS.ADMIN", "NewToken"); - let configuration = Configuration::load_from_file("tracker.toml").expect("Could not load configuration from file"); + let info = Info { + tracker_toml: default_config_toml(), + api_admin_token: None, + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); assert_eq!( configuration.http_api.access_tokens.get("admin"), From caae725578a253d9c73b85c9d4b8cb97f0ad66b3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 May 2024 08:15:35 +0100 Subject: [PATCH 0827/1003] feat: use double underscore to split config env var names For example, the env var `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` would be the config option: ``` [http_api.access_tokens] admin = "MyAccessToken" ``` It uses `__` double underscore becuase dots are not allowed in Bash names. See: https://www.gnu.org/software/bash/manual/bash.html#Definitions ``` name A word consisting solely of letters, numbers, and underscores, and beginning with a letter or underscore. Names are used as shell variable and function names. Also referred to as an identifier. ``` --- packages/configuration/src/v1/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index dbb6eb7c0..88b9565bd 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -406,7 +406,7 @@ impl Configuration { pub fn load(info: &Info) -> Result { let figment = Figment::new() .merge(Toml::string(&info.tracker_toml)) - .merge(Env::prefixed("TORRUST_TRACKER_")); + .merge(Env::prefixed("TORRUST_TRACKER__").split("__")); let mut config: Configuration = figment.extract()?; @@ -553,7 +553,7 @@ mod tests { #[test] fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin() { figment::Jail::expect_with(|jail| { - jail.set_env("TORRUST_TRACKER_HTTP_API.ACCESS_TOKENS.ADMIN", "NewToken"); + jail.set_env("TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN", "NewToken"); let info = Info { tracker_toml: default_config_toml(), From 69d793986605ff058a44a5ad4cba86f9ab50d360 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 May 2024 13:00:31 +0100 Subject: [PATCH 0828/1003] refactor: implement Default for Configuration sections --- .../configuration/src/v1/health_check_api.rs | 8 ++++++++ packages/configuration/src/v1/http_tracker.rs | 12 ++++++++++++ packages/configuration/src/v1/mod.rs | 17 +++-------------- packages/configuration/src/v1/udp_tracker.rs | 8 ++++++++ 4 files changed, 31 insertions(+), 14 deletions(-) diff --git a/packages/configuration/src/v1/health_check_api.rs b/packages/configuration/src/v1/health_check_api.rs index f7b15249c..1c2cd073a 100644 --- a/packages/configuration/src/v1/health_check_api.rs +++ b/packages/configuration/src/v1/health_check_api.rs @@ -11,3 +11,11 @@ pub struct HealthCheckApi { /// system to choose a random port, use port `0`. pub bind_address: String, } + +impl Default for HealthCheckApi { + fn default() -> Self { + Self { + bind_address: String::from("127.0.0.1:1313"), + } + } +} diff --git a/packages/configuration/src/v1/http_tracker.rs b/packages/configuration/src/v1/http_tracker.rs index 4c88feb9c..c2d5928e2 100644 --- a/packages/configuration/src/v1/http_tracker.rs +++ b/packages/configuration/src/v1/http_tracker.rs @@ -21,3 +21,15 @@ pub struct HttpTracker { #[serde_as(as = "NoneAsEmptyString")] pub ssl_key_path: Option, } + +impl Default for HttpTracker { + fn default() -> Self { + Self { + enabled: false, + bind_address: String::from("0.0.0.0:7070"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, + } + } +} diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 88b9565bd..07d8a7194 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -346,21 +346,10 @@ impl Default for Configuration { udp_trackers: Vec::new(), http_trackers: Vec::new(), http_api: HttpApi::default(), - health_check_api: HealthCheckApi { - bind_address: String::from("127.0.0.1:1313"), - }, + health_check_api: HealthCheckApi::default(), }; - configuration.udp_trackers.push(UdpTracker { - enabled: false, - bind_address: String::from("0.0.0.0:6969"), - }); - configuration.http_trackers.push(HttpTracker { - enabled: false, - bind_address: String::from("0.0.0.0:7070"), - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, - }); + configuration.udp_trackers.push(UdpTracker::default()); + configuration.http_trackers.push(HttpTracker::default()); configuration } } diff --git a/packages/configuration/src/v1/udp_tracker.rs b/packages/configuration/src/v1/udp_tracker.rs index b304054c3..254272bdd 100644 --- a/packages/configuration/src/v1/udp_tracker.rs +++ b/packages/configuration/src/v1/udp_tracker.rs @@ -10,3 +10,11 @@ pub struct UdpTracker { /// system to choose a random port, use port `0`. pub bind_address: String, } +impl Default for UdpTracker { + fn default() -> Self { + Self { + enabled: false, + bind_address: String::from("0.0.0.0:6969"), + } + } +} From b0c2f9f435f9a6c8b033fd70f1f0a69049709cd9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 May 2024 13:07:36 +0100 Subject: [PATCH 0829/1003] docs: update env var name in toml config template files --- share/default/config/tracker.container.mysql.toml | 8 ++++---- share/default/config/tracker.container.sqlite3.toml | 8 ++++---- share/default/config/tracker.development.sqlite3.toml | 4 ++++ share/default/config/tracker.e2e.container.sqlite3.toml | 8 ++++---- share/default/config/tracker.udp.benchmarking.toml | 4 ++++ 5 files changed, 20 insertions(+), 12 deletions(-) diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index e7714c229..f2db06228 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -30,11 +30,11 @@ ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" -# Please override the admin token setting the -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -# environmental variable! - [http_api.access_tokens] +# Please override the admin token setting the environmental variable: +# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` +# The old variable name is deprecated: +# `TORRUST_TRACKER_API_ADMIN_TOKEN` admin = "MyAccessToken" [health_check_api] diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 4ec055c56..4a3ba03b6 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -30,11 +30,11 @@ ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" -# Please override the admin token setting the -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -# environmental variable! - [http_api.access_tokens] +# Please override the admin token setting the environmental variable: +# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` +# The old variable name is deprecated: +# `TORRUST_TRACKER_API_ADMIN_TOKEN` admin = "MyAccessToken" [health_check_api] diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 9304a2d51..62e5b478e 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -31,6 +31,10 @@ ssl_enabled = false ssl_key_path = "" [http_api.access_tokens] +# Please override the admin token setting the environmental variable: +# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` +# The old variable name is deprecated: +# `TORRUST_TRACKER_API_ADMIN_TOKEN` admin = "MyAccessToken" [health_check_api] diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index 86ffb3ffd..3738704b5 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -30,11 +30,11 @@ ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" -# Please override the admin token setting the -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -# environmental variable! - [http_api.access_tokens] +# Please override the admin token setting the environmental variable: +# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` +# The old variable name is deprecated: +# `TORRUST_TRACKER_API_ADMIN_TOKEN` admin = "MyAccessToken" [health_check_api] diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index 70298e9dc..1e951d8fc 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -31,6 +31,10 @@ ssl_enabled = false ssl_key_path = "" [http_api.access_tokens] +# Please override the admin token setting the environmental variable: +# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` +# The old variable name is deprecated: +# `TORRUST_TRACKER_API_ADMIN_TOKEN` admin = "MyAccessToken" [health_check_api] From 43942ce2b0fca425558e715d7d556ce9307b0ef5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 May 2024 13:26:46 +0100 Subject: [PATCH 0830/1003] tests: add test for configuration with deprecated env var name Until it's updated in this repor and the Index repo you can overwrite the admin token using the new env var name and the old one (deprecated): - New: `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` - Old (deprecated): `TORRUST_TRACKER_API_ADMIN_TOKEN` THe new one uses exactly the strcuture and atribute names in the toml file, using `__` as the separator for levels. We can remove the test when we remove the deprecated name. --- packages/configuration/src/v1/mod.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 07d8a7194..8d5c123fe 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -540,7 +540,7 @@ mod tests { } #[test] - fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin() { + fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_env_var() { figment::Jail::expect_with(|jail| { jail.set_env("TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN", "NewToken"); @@ -559,4 +559,23 @@ mod tests { Ok(()) }); } + + #[test] + fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_the_deprecated_env_var_name() { + figment::Jail::expect_with(|_jail| { + let info = Info { + tracker_toml: default_config_toml(), + api_admin_token: Some("NewToken".to_owned()), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!( + configuration.http_api.access_tokens.get("admin"), + Some("NewToken".to_owned()).as_ref() + ); + + Ok(()) + }); + } } From 0252f308183e8ea53988907e7553eda8952ebdc7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 May 2024 13:47:22 +0100 Subject: [PATCH 0831/1003] feat: allow users not to provide config option with default values Now, you are able to run the tracler like this: ``` TORRUST_TRACKER_CONFIG="" cargo run ``` Default values will be used for the missing values in the provided configuration. In that case, none of the values have been provided, so it will use default values for all options. --- packages/configuration/src/v1/mod.rs | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 8d5c123fe..25aa587b3 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -239,7 +239,7 @@ use std::fs; use std::net::IpAddr; use std::str::FromStr; -use figment::providers::{Env, Format, Toml}; +use figment::providers::{Env, Format, Serialized, Toml}; use figment::Figment; use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; @@ -393,7 +393,7 @@ impl Configuration { /// /// Will return `Err` if the environment variable does not exist or has a bad configuration. pub fn load(info: &Info) -> Result { - let figment = Figment::new() + let figment = Figment::from(Serialized::defaults(Configuration::default())) .merge(Toml::string(&info.tracker_toml)) .merge(Env::prefixed("TORRUST_TRACKER__").split("__")); @@ -523,6 +523,24 @@ mod tests { assert_eq!(contents, default_config_toml()); } + #[test] + fn configuration_should_use_the_default_values_when_an_empty_configuration_is_provided_by_the_user() { + figment::Jail::expect_with(|_jail| { + let empty_configuration = String::new(); + + let info = Info { + tracker_toml: empty_configuration, + api_admin_token: None, + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + + Ok(()) + }); + } + #[test] fn configuration_should_be_loaded_from_a_toml_config_file() { figment::Jail::expect_with(|_jail| { From 384e9f820ea685d6f4d2cc9639b83df478b6b359 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 May 2024 17:19:30 +0100 Subject: [PATCH 0832/1003] refactor: [#852] eenrich field types in HealthCheckApi config struct --- packages/configuration/src/v1/health_check_api.rs | 6 ++++-- packages/test-helpers/src/configuration.rs | 4 ++-- src/bootstrap/jobs/health_check_api.rs | 5 +---- tests/servers/health_check_api/environment.rs | 5 +---- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/packages/configuration/src/v1/health_check_api.rs b/packages/configuration/src/v1/health_check_api.rs index 1c2cd073a..b8bfd2c1b 100644 --- a/packages/configuration/src/v1/health_check_api.rs +++ b/packages/configuration/src/v1/health_check_api.rs @@ -1,3 +1,5 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -9,13 +11,13 @@ pub struct HealthCheckApi { /// The format is `ip:port`, for example `127.0.0.1:1313`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. - pub bind_address: String, + pub bind_address: SocketAddr, } impl Default for HealthCheckApi { fn default() -> Self { Self { - bind_address: String::from("127.0.0.1:1313"), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1313), } } } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 49cfdd390..c1ee95197 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -1,6 +1,6 @@ //! Tracker configuration factories for testing. use std::env; -use std::net::IpAddr; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; @@ -39,7 +39,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for Health Check API let health_check_api_port = 0u16; - config.health_check_api.bind_address = format!("127.0.0.1:{}", &health_check_api_port); + config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), health_check_api_port); // Ephemeral socket address for UDP tracker let udp_port = 0u16; diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index eec4d81a8..fdedaa3e9 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -35,10 +35,7 @@ use crate::servers::signals::Halted; /// /// It would panic if unable to send the `ApiServerJobStarted` notice. pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("it should have a valid health check bind address"); + let bind_addr = config.bind_address; let (tx_start, rx_start) = oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index 0856985d5..c200beaeb 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -33,10 +33,7 @@ pub struct Environment { impl Environment { pub fn new(config: &Arc, registar: Registar) -> Self { - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); + let bind_to = config.bind_address; Self { registar, From 1475eadd25e7fbf208523f72f49188d3f28173c2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 May 2024 11:12:53 +0100 Subject: [PATCH 0833/1003] refactor: [#852] eenrich field types in UdpTracker config struct --- packages/configuration/src/v1/udp_tracker.rs | 6 ++++-- packages/test-helpers/src/configuration.rs | 10 +++++----- src/bootstrap/jobs/udp_tracker.rs | 5 +---- src/servers/udp/server.rs | 9 +++------ tests/servers/udp/environment.rs | 5 +---- 5 files changed, 14 insertions(+), 21 deletions(-) diff --git a/packages/configuration/src/v1/udp_tracker.rs b/packages/configuration/src/v1/udp_tracker.rs index 254272bdd..1f772164e 100644 --- a/packages/configuration/src/v1/udp_tracker.rs +++ b/packages/configuration/src/v1/udp_tracker.rs @@ -1,3 +1,5 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] @@ -8,13 +10,13 @@ pub struct UdpTracker { /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. - pub bind_address: String, + pub bind_address: SocketAddr, } impl Default for UdpTracker { fn default() -> Self { Self { enabled: false, - bind_address: String::from("0.0.0.0:6969"), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969), } } } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index c1ee95197..c28cf553e 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -1,6 +1,6 @@ //! Tracker configuration factories for testing. use std::env; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; @@ -44,7 +44,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for UDP tracker let udp_port = 0u16; config.udp_trackers[0].enabled = true; - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); + config.udp_trackers[0].bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port); // Ephemeral socket address for HTTP tracker let http_port = 0u16; @@ -136,10 +136,10 @@ pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { pub fn ephemeral_ipv6() -> Configuration { let mut cfg = ephemeral(); - let ipv6 = format!("[::]:{}", 0); + let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); - cfg.http_api.bind_address.clone_from(&ipv6); - cfg.http_trackers[0].bind_address.clone_from(&ipv6); + cfg.http_api.bind_address.clone_from(&ipv6.to_string()); + cfg.http_trackers[0].bind_address.clone_from(&ipv6.to_string()); cfg.udp_trackers[0].bind_address = ipv6; cfg diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index e9e4bc642..bb1cdb492 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -27,10 +27,7 @@ use crate::servers::udp::server::{Launcher, UdpServer}; /// It will panic if the task did not finish successfully. #[must_use] pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { - let bind_to = config - .bind_address - .parse::() - .expect("it should have a valid udp tracker bind address"); + let bind_to = config.bind_address; let server = UdpServer::new(Launcher::new(bind_to)) .start(tracker, form) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index f7092f377..b02b9802d 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -463,19 +463,16 @@ mod tests { let cfg = Arc::new(ephemeral_mode_public()); let tracker = initialize_with_configuration(&cfg); let config = &cfg.udp_trackers[0]; - - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - + let bind_to = config.bind_address; let register = &Registar::default(); let stopped = UdpServer::new(Launcher::new(bind_to)); + let started = stopped .start(tracker, register.give_form()) .await .expect("it should start the server"); + let stopped = started.stop().await.expect("it should stop the server"); tokio::time::sleep(Duration::from_secs(1)).await; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 6ced1dbb7..c1fecbdd3 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -31,10 +31,7 @@ impl Environment { let config = Arc::new(configuration.udp_trackers[0].clone()); - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); + let bind_to = config.bind_address; let server = UdpServer::new(Launcher::new(bind_to)); From fc191f7b6f8eef730665f16739e016ed1b4b5820 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 May 2024 11:26:04 +0100 Subject: [PATCH 0834/1003] refactor: [#852] enrich field types in HttpTracker config struct If the next major config version the `TslConfig` shoud always contain valid file paths and the whole field should be optional in the parent strcut `HttpTracker`: ```rust pub struct HttpTracker { pub enabled: bool, pub bind_address: SocketAddr, pub ssl_enabled: bool, #[serde(flatten)] pub tsl_config: Optional, } pub struct TslConfig { pub ssl_cert_path: PathBuf, pub ssl_key_path: PathBuf, } ``` That mean, the user could provide it or not, but if it's provided file paths can't be empty. --- packages/configuration/src/lib.rs | 13 +++++++++++ packages/configuration/src/v1/http_tracker.rs | 22 +++++++++---------- packages/test-helpers/src/configuration.rs | 4 ++-- src/bootstrap/jobs/http_tracker.rs | 15 +++++++------ src/servers/http/server.rs | 17 +++++++------- tests/servers/http/environment.rs | 13 ++++++----- 6 files changed, 50 insertions(+), 34 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 20912990a..f1e316f10 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -11,6 +11,8 @@ use std::sync::Arc; use std::{env, fs}; use derive_more::Constructor; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, NoneAsEmptyString}; use thiserror::Error; use torrust_tracker_located_error::{DynError, LocatedError}; @@ -157,3 +159,14 @@ impl From for Error { } } } + +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Default)] +pub struct TslConfig { + /// Path to the SSL certificate file. + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_cert_path: Option, + /// Path to the SSL key file. + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_key_path: Option, +} diff --git a/packages/configuration/src/v1/http_tracker.rs b/packages/configuration/src/v1/http_tracker.rs index c2d5928e2..57b2d83a1 100644 --- a/packages/configuration/src/v1/http_tracker.rs +++ b/packages/configuration/src/v1/http_tracker.rs @@ -1,5 +1,9 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, NoneAsEmptyString}; +use serde_with::serde_as; + +use crate::TslConfig; /// Configuration for each HTTP tracker. #[serde_as] @@ -11,25 +15,21 @@ pub struct HttpTracker { /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. - pub bind_address: String, + pub bind_address: SocketAddr, /// Weather the HTTP tracker will use SSL or not. pub ssl_enabled: bool, - /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_cert_path: Option, - /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_key_path: Option, + /// TSL config. Only used if `ssl_enabled` is true. + #[serde(flatten)] + pub tsl_config: TslConfig, } impl Default for HttpTracker { fn default() -> Self { Self { enabled: false, - bind_address: String::from("0.0.0.0:7070"), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070), ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, + tsl_config: TslConfig::default(), } } } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index c28cf553e..e6f53f85b 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -49,7 +49,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for HTTP tracker let http_port = 0u16; config.http_trackers[0].enabled = true; - config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); + config.http_trackers[0].bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port); // Ephemeral sqlite database let temp_directory = env::temp_dir(); @@ -139,7 +139,7 @@ pub fn ephemeral_ipv6() -> Configuration { let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); cfg.http_api.bind_address.clone_from(&ipv6.to_string()); - cfg.http_trackers[0].bind_address.clone_from(&ipv6.to_string()); + cfg.http_trackers[0].bind_address.clone_from(&ipv6); cfg.udp_trackers[0].bind_address = ipv6; cfg diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 0a0638b78..88d643149 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -40,14 +40,15 @@ pub async fn start_job( version: Version, ) -> Option> { if config.enabled { - let socket = config - .bind_address - .parse::() - .expect("it should have a valid http tracker bind address"); + let socket = config.bind_address; - let tls = make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path) - .await - .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); + let tls = make_rust_tls( + config.ssl_enabled, + &config.tsl_config.ssl_cert_path, + &config.tsl_config.ssl_key_path, + ) + .await + .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); match version { Version::V1 => Some(start_v1(socket, tls, tracker.clone(), form).await), diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index decc734c5..5791708ec 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -234,14 +234,15 @@ mod tests { let tracker = initialize_with_configuration(&cfg); let config = &cfg.http_trackers[0]; - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - - let tls = make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path) - .await - .map(|tls| tls.expect("tls config failed")); + let bind_to = config.bind_address; + + let tls = make_rust_tls( + config.ssl_enabled, + &config.tsl_config.ssl_cert_path, + &config.tsl_config.ssl_key_path, + ) + .await + .map(|tls| tls.expect("tls config failed")); let register = &Registar::default(); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index f00da293e..e3aa6641e 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -31,13 +31,14 @@ impl Environment { let config = Arc::new(configuration.http_trackers[0].clone()); - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); + let bind_to = config.bind_address; - let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) - .map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls( + config.ssl_enabled, + &config.tsl_config.ssl_cert_path, + &config.tsl_config.ssl_key_path, + )) + .map(|tls| tls.expect("tls config failed")); let server = HttpServer::new(Launcher::new(bind_to, tls)); From a2e718bb1d56b5977f53bed0e10b677db4b4e63e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 May 2024 12:00:24 +0100 Subject: [PATCH 0835/1003] chore(deps): add dependency camino We are using `String` to represent a filepath. We are refactoring to enrich types in configuration. Filepath should be represented with `PathBuf` but it allows non UTF-8 chars, so it can't be serialized. Since we need to serialize config options (toml, json) is valid UTF-8 strings, we need a type that represents a valid fileptah but also is a valid UTF-8 strings. That makes impossible to use non-UTF8 fielpath. It seems that's a restriction accepted for a lot of projects including Rust `cargo`. --- Cargo.lock | 11 +++++++++++ Cargo.toml | 1 + cSpell.json | 1 + packages/configuration/Cargo.toml | 1 + 4 files changed, 14 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 0bbf0205a..e54601bcf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -720,6 +720,15 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +[[package]] +name = "camino" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] + [[package]] name = "cast" version = "0.3.0" @@ -3880,6 +3889,7 @@ dependencies = [ "axum-client-ip", "axum-extra", "axum-server", + "camino", "chrono", "clap", "crossbeam-skiplist", @@ -3938,6 +3948,7 @@ dependencies = [ name = "torrust-tracker-configuration" version = "3.0.0-alpha.12-develop" dependencies = [ + "camino", "derive_more", "figment", "serde", diff --git a/Cargo.toml b/Cargo.toml index d7aa9a31c..60652b160 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } +camino = { version = "1.1.6", features = ["serde"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0.1" diff --git a/cSpell.json b/cSpell.json index 2473e9c33..bd6c9d489 100644 --- a/cSpell.json +++ b/cSpell.json @@ -25,6 +25,7 @@ "Buildx", "byteorder", "callgrind", + "camino", "canonicalize", "canonicalized", "certbot", diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index a033dcea1..bac2132d5 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -15,6 +15,7 @@ rust-version.workspace = true version.workspace = true [dependencies] +camino = { version = "1.1.6", features = ["serde"] } derive_more = "0" figment = { version = "0.10.18", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } From 3997cfa256a944b30b55f2d94ae5dbcf0670a696 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 May 2024 12:06:32 +0100 Subject: [PATCH 0836/1003] refactor: [#852] eenrich field types in TslConfig config struct --- packages/configuration/src/lib.rs | 5 +++-- src/bootstrap/jobs/http_tracker.rs | 4 ++-- src/bootstrap/jobs/mod.rs | 31 +++++++++++++++++++++++++++++- src/servers/http/server.rs | 4 ++-- tests/servers/http/environment.rs | 4 ++-- 5 files changed, 39 insertions(+), 9 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index f1e316f10..239803f47 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -10,6 +10,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::{env, fs}; +use camino::Utf8PathBuf; use derive_more::Constructor; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; @@ -165,8 +166,8 @@ impl From for Error { pub struct TslConfig { /// Path to the SSL certificate file. #[serde_as(as = "NoneAsEmptyString")] - pub ssl_cert_path: Option, + pub ssl_cert_path: Option, /// Path to the SSL key file. #[serde_as(as = "NoneAsEmptyString")] - pub ssl_key_path: Option, + pub ssl_key_path: Option, } diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 88d643149..d9e8bdabe 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -18,7 +18,7 @@ use log::info; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use super::make_rust_tls; +use super::make_rust_tls_from_path_buf; use crate::core; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; @@ -42,7 +42,7 @@ pub async fn start_job( if config.enabled { let socket = config.bind_address; - let tls = make_rust_tls( + let tls = make_rust_tls_from_path_buf( config.ssl_enabled, &config.tsl_config.ssl_cert_path, &config.tsl_config.ssl_key_path, diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 2c12eb40e..dd855f7c6 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -28,7 +28,35 @@ pub async fn make_rust_tls(enabled: bool, cert: &Option, key: &Option, + key: &Option, +) -> Option> { + if !enabled { + info!("TLS not enabled"); + return None; + } + + if let (Some(cert), Some(key)) = (cert, key) { + info!("Using https: cert path: {cert}."); + info!("Using https: key path: {key}."); Some( RustlsConfig::from_pem_file(cert, key) @@ -77,6 +105,7 @@ use std::panic::Location; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; +use camino::Utf8PathBuf; use log::info; use thiserror::Error; use torrust_tracker_located_error::{DynError, LocatedError}; diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 5791708ec..a6f96634f 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -224,7 +224,7 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; use crate::bootstrap::app::initialize_with_configuration; - use crate::bootstrap::jobs::make_rust_tls; + use crate::bootstrap::jobs::make_rust_tls_from_path_buf; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -236,7 +236,7 @@ mod tests { let bind_to = config.bind_address; - let tls = make_rust_tls( + let tls = make_rust_tls_from_path_buf( config.ssl_enabled, &config.tsl_config.ssl_cert_path, &config.tsl_config.ssl_key_path, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index e3aa6641e..e662cea7c 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; -use torrust_tracker::bootstrap::jobs::make_rust_tls; +use torrust_tracker::bootstrap::jobs::make_rust_tls_from_path_buf; use torrust_tracker::core::Tracker; use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; @@ -33,7 +33,7 @@ impl Environment { let bind_to = config.bind_address; - let tls = block_on(make_rust_tls( + let tls = block_on(make_rust_tls_from_path_buf( config.ssl_enabled, &config.tsl_config.ssl_cert_path, &config.tsl_config.ssl_key_path, From ceb30747cec6372b5244f797ad032c7db8c03d6b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 May 2024 12:49:57 +0100 Subject: [PATCH 0837/1003] refactor: [#852] enrich field types in HttpApi config struct --- packages/configuration/src/v1/tracker_api.rs | 23 ++++--- packages/test-helpers/src/configuration.rs | 4 +- src/bootstrap/jobs/http_tracker.rs | 12 ++-- src/bootstrap/jobs/mod.rs | 68 ++++++++------------ src/bootstrap/jobs/tracker_apis.rs | 7 +- src/servers/apis/server.rs | 7 +- src/servers/http/server.rs | 12 ++-- tests/servers/api/environment.rs | 8 +-- tests/servers/http/environment.rs | 9 +-- 9 files changed, 56 insertions(+), 94 deletions(-) diff --git a/packages/configuration/src/v1/tracker_api.rs b/packages/configuration/src/v1/tracker_api.rs index 8749478c8..5089c496a 100644 --- a/packages/configuration/src/v1/tracker_api.rs +++ b/packages/configuration/src/v1/tracker_api.rs @@ -1,7 +1,10 @@ use std::collections::HashMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, NoneAsEmptyString}; +use serde_with::serde_as; + +use crate::TslConfig; pub type AccessTokens = HashMap; @@ -15,19 +18,16 @@ pub struct HttpApi { /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. - pub bind_address: String, + pub bind_address: SocketAddr, /// Weather the HTTP API will use SSL or not. pub ssl_enabled: bool, - /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_cert_path: Option, - /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_key_path: Option, + /// TSL config. Only used if `ssl_enabled` is true. + #[serde(flatten)] + pub tsl_config: TslConfig, /// Access tokens for the HTTP API. The key is a label identifying the /// token and the value is the token itself. The token is used to /// authenticate the user. All tokens are valid for all endpoints and have - /// the all permissions. + /// all permissions. pub access_tokens: AccessTokens, } @@ -35,10 +35,9 @@ impl Default for HttpApi { fn default() -> Self { Self { enabled: true, - bind_address: String::from("127.0.0.1:1212"), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212), ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, + tsl_config: TslConfig::default(), access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] .iter() .cloned() diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index e6f53f85b..08f570dc6 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -35,7 +35,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for API let api_port = 0u16; config.http_api.enabled = true; - config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + config.http_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port); // Ephemeral socket address for Health Check API let health_check_api_port = 0u16; @@ -138,7 +138,7 @@ pub fn ephemeral_ipv6() -> Configuration { let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); - cfg.http_api.bind_address.clone_from(&ipv6.to_string()); + cfg.http_api.bind_address.clone_from(&ipv6); cfg.http_trackers[0].bind_address.clone_from(&ipv6); cfg.udp_trackers[0].bind_address = ipv6; diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index d9e8bdabe..d8a976b98 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -18,7 +18,7 @@ use log::info; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use super::make_rust_tls_from_path_buf; +use super::make_rust_tls; use crate::core; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; @@ -42,13 +42,9 @@ pub async fn start_job( if config.enabled { let socket = config.bind_address; - let tls = make_rust_tls_from_path_buf( - config.ssl_enabled, - &config.tsl_config.ssl_cert_path, - &config.tsl_config.ssl_key_path, - ) - .await - .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); + let tls = make_rust_tls(config.ssl_enabled, &config.tsl_config) + .await + .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); match version { Version::V1 => Some(start_v1(socket, tls, tracker.clone(), form).await), diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index dd855f7c6..d288989b5 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -20,41 +20,13 @@ pub struct Started { pub address: std::net::SocketAddr, } -pub async fn make_rust_tls(enabled: bool, cert: &Option, key: &Option) -> Option> { +pub async fn make_rust_tls(enabled: bool, tsl_config: &TslConfig) -> Option> { if !enabled { info!("TLS not enabled"); return None; } - if let (Some(cert), Some(key)) = (cert, key) { - info!("Using https: cert path: {cert}."); - info!("Using https: key path: {key}."); - - Some( - RustlsConfig::from_pem_file(cert, key) - .await - .map_err(|err| Error::BadTlsConfig { - source: (Arc::new(err) as DynError).into(), - }), - ) - } else { - Some(Err(Error::MissingTlsConfig { - location: Location::caller(), - })) - } -} - -pub async fn make_rust_tls_from_path_buf( - enabled: bool, - cert: &Option, - key: &Option, -) -> Option> { - if !enabled { - info!("TLS not enabled"); - return None; - } - - if let (Some(cert), Some(key)) = (cert, key) { + if let (Some(cert), Some(key)) = (tsl_config.ssl_cert_path.clone(), tsl_config.ssl_key_path.clone()) { info!("Using https: cert path: {cert}."); info!("Using https: key path: {key}."); @@ -75,15 +47,23 @@ pub async fn make_rust_tls_from_path_buf( #[cfg(test)] mod tests { + use camino::Utf8PathBuf; + use torrust_tracker_configuration::TslConfig; + use super::make_rust_tls; #[tokio::test] async fn it_should_error_on_bad_tls_config() { - let (bad_cert_path, bad_key_path) = (Some("bad cert path".to_string()), Some("bad key path".to_string())); - let err = make_rust_tls(true, &bad_cert_path, &bad_key_path) - .await - .expect("tls_was_enabled") - .expect_err("bad_cert_and_key_files"); + let err = make_rust_tls( + true, + &TslConfig { + ssl_cert_path: Some(Utf8PathBuf::from("bad cert path")), + ssl_key_path: Some(Utf8PathBuf::from("bad key path")), + }, + ) + .await + .expect("tls_was_enabled") + .expect_err("bad_cert_and_key_files"); assert!(err .to_string() @@ -91,11 +71,17 @@ mod tests { } #[tokio::test] - async fn it_should_error_on_missing_tls_config() { - let err = make_rust_tls(true, &None, &None) - .await - .expect("tls_was_enabled") - .expect_err("missing_config"); + async fn it_should_error_on_missing_cert_or_key_paths() { + let err = make_rust_tls( + true, + &TslConfig { + ssl_cert_path: None, + ssl_key_path: None, + }, + ) + .await + .expect("tls_was_enabled") + .expect_err("missing_config"); assert_eq!(err.to_string(), "tls config missing"); } @@ -105,9 +91,9 @@ use std::panic::Location; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use camino::Utf8PathBuf; use log::info; use thiserror::Error; +use torrust_tracker_configuration::TslConfig; use torrust_tracker_located_error::{DynError, LocatedError}; /// Error returned by the Bootstrap Process. diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index ffd7c7407..120c960ef 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -61,12 +61,9 @@ pub async fn start_job( version: Version, ) -> Option> { if config.enabled { - let bind_to = config - .bind_address - .parse::() - .expect("it should have a valid tracker api bind address"); + let bind_to = config.bind_address; - let tls = make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path) + let tls = make_rust_tls(config.ssl_enabled, &config.tsl_config) .await .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index e72890557..9317d6ec0 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -275,12 +275,9 @@ mod tests { let tracker = initialize_with_configuration(&cfg); - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); + let bind_to = config.bind_address; - let tls = make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path) + let tls = make_rust_tls(config.ssl_enabled, &config.tsl_config) .await .map(|tls| tls.expect("tls config failed")); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index a6f96634f..7c8148f22 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -224,7 +224,7 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; use crate::bootstrap::app::initialize_with_configuration; - use crate::bootstrap::jobs::make_rust_tls_from_path_buf; + use crate::bootstrap::jobs::make_rust_tls; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -236,13 +236,9 @@ mod tests { let bind_to = config.bind_address; - let tls = make_rust_tls_from_path_buf( - config.ssl_enabled, - &config.tsl_config.ssl_cert_path, - &config.tsl_config.ssl_key_path, - ) - .await - .map(|tls| tls.expect("tls config failed")); + let tls = make_rust_tls(config.ssl_enabled, &config.tsl_config) + .await + .map(|tls| tls.expect("tls config failed")); let register = &Registar::default(); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index dec4ccff2..cacde8af9 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -33,13 +33,9 @@ impl Environment { let config = Arc::new(configuration.http_api.clone()); - let bind_to = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); + let bind_to = config.bind_address; - let tls = block_on(make_rust_tls(config.ssl_enabled, &config.ssl_cert_path, &config.ssl_key_path)) - .map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls(config.ssl_enabled, &config.tsl_config)).map(|tls| tls.expect("tls config failed")); let server = ApiServer::new(Launcher::new(bind_to, tls)); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index e662cea7c..61837c40f 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; -use torrust_tracker::bootstrap::jobs::make_rust_tls_from_path_buf; +use torrust_tracker::bootstrap::jobs::make_rust_tls; use torrust_tracker::core::Tracker; use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; @@ -33,12 +33,7 @@ impl Environment { let bind_to = config.bind_address; - let tls = block_on(make_rust_tls_from_path_buf( - config.ssl_enabled, - &config.tsl_config.ssl_cert_path, - &config.tsl_config.ssl_key_path, - )) - .map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls(config.ssl_enabled, &config.tsl_config)).map(|tls| tls.expect("tls config failed")); let server = HttpServer::new(Launcher::new(bind_to, tls)); From 7519ecc70052555dcaf9b59f533327d190649cd6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 May 2024 13:47:47 +0100 Subject: [PATCH 0838/1003] refactor: [#852] enrich field types in Configuration struct --- packages/configuration/src/lib.rs | 17 +++++++++++++++ packages/configuration/src/v1/mod.rs | 25 +++++++++------------- packages/test-helpers/src/configuration.rs | 6 +++--- src/bootstrap/logging.rs | 14 ++++++++---- src/servers/apis/mod.rs | 4 ++-- src/servers/http/v1/services/announce.rs | 5 +++-- src/servers/udp/handlers.rs | 2 +- 7 files changed, 46 insertions(+), 27 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 239803f47..85867816c 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -171,3 +171,20 @@ pub struct TslConfig { #[serde_as(as = "NoneAsEmptyString")] pub ssl_key_path: Option, } + +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +#[serde(rename_all = "lowercase")] +pub enum LogLevel { + /// A level lower than all log levels. + Off, + /// Corresponds to the `Error` log level. + Error, + /// Corresponds to the `Warn` log level. + Warn, + /// Corresponds to the `Info` log level. + Info, + /// Corresponds to the `Debug` log level. + Debug, + /// Corresponds to the `Trace` log level. + Trace, +} diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 25aa587b3..643235c03 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -236,8 +236,7 @@ pub mod tracker_api; pub mod udp_tracker; use std::fs; -use std::net::IpAddr; -use std::str::FromStr; +use std::net::{IpAddr, Ipv4Addr}; use figment::providers::{Env, Format, Serialized, Toml}; use figment::Figment; @@ -248,7 +247,7 @@ use self::health_check_api::HealthCheckApi; use self::http_tracker::HttpTracker; use self::tracker_api::HttpApi; use self::udp_tracker::UdpTracker; -use crate::{AnnouncePolicy, Error, Info}; +use crate::{AnnouncePolicy, Error, Info, LogLevel}; /// Core configuration for the tracker. #[allow(clippy::struct_excessive_bools)] @@ -256,7 +255,7 @@ use crate::{AnnouncePolicy, Error, Info}; pub struct Configuration { /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, /// `Debug` and `Trace`. Default is `Info`. - pub log_level: Option, + pub log_level: Option, /// Tracker mode. See [`TrackerMode`] for more information. pub mode: TrackerMode, @@ -284,7 +283,7 @@ pub struct Configuration { /// is using a loopback IP address, the tracker assumes that the peer is /// in the same network as the tracker and will use the tracker's IP /// address instead. - pub external_ip: Option, + pub external_ip: Option, /// Weather the tracker should collect statistics about tracker usage. /// If enabled, the tracker will collect statistics like the number of /// connections handled, the number of announce requests handled, etc. @@ -330,7 +329,7 @@ impl Default for Configuration { let announce_policy = AnnouncePolicy::default(); let mut configuration = Configuration { - log_level: Option::from(String::from("info")), + log_level: Some(LogLevel::Info), mode: TrackerMode::Public, db_driver: DatabaseDriver::Sqlite3, db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), @@ -338,7 +337,7 @@ impl Default for Configuration { min_announce_interval: announce_policy.interval_min, max_peer_timeout: 900, on_reverse_proxy: false, - external_ip: Some(String::from("0.0.0.0")), + external_ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), tracker_usage_statistics: true, persistent_torrent_completed_stat: false, inactive_peer_cleanup_interval: 600, @@ -363,13 +362,7 @@ impl Configuration { /// and `None` otherwise. #[must_use] pub fn get_ext_ip(&self) -> Option { - match &self.external_ip { - None => None, - Some(external_ip) => match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None, - }, - } + self.external_ip.as_ref().map(|external_ip| *external_ip) } /// Saves the default configuration at the given path. @@ -432,6 +425,8 @@ impl Configuration { #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr}; + use crate::v1::Configuration; use crate::Info; @@ -495,7 +490,7 @@ mod tests { fn configuration_should_contain_the_external_ip() { let configuration = Configuration::default(); - assert_eq!(configuration.external_ip, Some(String::from("0.0.0.0"))); + assert_eq!(configuration.external_ip, Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))); } #[test] diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 08f570dc6..0c7cc533a 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -2,7 +2,7 @@ use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::{Configuration, LogLevel}; use torrust_tracker_primitives::TrackerMode; use crate::random; @@ -28,7 +28,7 @@ pub fn ephemeral() -> Configuration { // For example: a test for the UDP tracker should disable the API and HTTP tracker. let mut config = Configuration { - log_level: Some("off".to_owned()), // Change to `debug` for tests debugging + log_level: Some(LogLevel::Off), // Change to `debug` for tests debugging ..Default::default() }; @@ -125,7 +125,7 @@ pub fn ephemeral_mode_private_whitelisted() -> Configuration { pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { let mut cfg = ephemeral(); - cfg.external_ip = Some(ip.to_string()); + cfg.external_ip = Some(ip); cfg } diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 97e26919d..b71079b57 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -10,11 +10,10 @@ //! - `Trace` //! //! Refer to the [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) to know how to change log settings. -use std::str::FromStr; use std::sync::Once; use log::{info, LevelFilter}; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::{Configuration, LogLevel}; static INIT: Once = Once::new(); @@ -31,10 +30,17 @@ pub fn setup(cfg: &Configuration) { }); } -fn config_level_or_default(log_level: &Option) -> LevelFilter { +fn config_level_or_default(log_level: &Option) -> LevelFilter { match log_level { None => log::LevelFilter::Info, - Some(level) => LevelFilter::from_str(level).unwrap(), + Some(level) => match level { + LogLevel::Off => LevelFilter::Off, + LogLevel::Error => LevelFilter::Error, + LogLevel::Warn => LevelFilter::Warn, + LogLevel::Info => LevelFilter::Info, + LogLevel::Debug => LevelFilter::Debug, + LogLevel::Trace => LevelFilter::Trace, + }, } } diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index 2d4b3abe1..ef37026fe 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -130,8 +130,8 @@ //! > **NOTICE**: You can generate a self-signed certificate for localhost using //! OpenSSL. See [Let's Encrypt](https://letsencrypt.org/docs/certificates-for-localhost/). //! That's particularly useful for testing purposes. Once you have the certificate -//! you need to set the [`ssl_cert_path`](torrust_tracker_configuration::HttpApi::ssl_cert_path) -//! and [`ssl_key_path`](torrust_tracker_configuration::HttpApi::ssl_key_path) +//! you need to set the [`ssl_cert_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_cert_path) +//! and [`ssl_key_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_key_path) //! options in the configuration file with the paths to the certificate //! (`localhost.crt`) and key (`localhost.key`) files. //! diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index b37081045..5a0ae40e4 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -145,8 +145,9 @@ mod tests { fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { let mut configuration = configuration::ephemeral(); - configuration.external_ip = - Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); + configuration.external_ip = Some(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))); Tracker::new(&configuration, Some(stats_event_sender), statistics::Repo::new()).unwrap() } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 876f4c9fe..d8ca4680c 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -426,7 +426,7 @@ mod tests { } pub fn with_external_ip(mut self, external_ip: &str) -> Self { - self.configuration.external_ip = Some(external_ip.to_owned()); + self.configuration.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); self } From b545b33c17b3f71591a98dba9aeab84294cd1a62 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 May 2024 15:59:58 +0100 Subject: [PATCH 0839/1003] refactor: [#852] extract Core configuration type --- packages/configuration/src/lib.rs | 4 +- packages/configuration/src/v1/core.rs | 88 +++++++++++++++ packages/configuration/src/v1/mod.rs | 101 +++--------------- packages/test-helpers/src/configuration.rs | 23 ++-- src/app.rs | 6 +- src/bootstrap/jobs/torrent_cleanup.rs | 4 +- src/bootstrap/logging.rs | 2 +- src/core/mod.rs | 16 +-- src/core/services/mod.rs | 2 +- .../http/v1/extractors/client_ip_sources.rs | 2 +- src/servers/http/v1/services/announce.rs | 2 +- src/servers/udp/handlers.rs | 2 +- 12 files changed, 134 insertions(+), 118 deletions(-) create mode 100644 packages/configuration/src/v1/core.rs diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 85867816c..fdc021480 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -3,7 +3,7 @@ //! This module contains the configuration data structures for the //! Torrust Tracker, which is a `BitTorrent` tracker server. //! -//! The current version for configuration is [`v1`](crate::v1). +//! The current version for configuration is [`v1`]. pub mod v1; use std::collections::HashMap; @@ -172,7 +172,7 @@ pub struct TslConfig { pub ssl_key_path: Option, } -#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] #[serde(rename_all = "lowercase")] pub enum LogLevel { /// A level lower than all log levels. diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs new file mode 100644 index 000000000..ed9074194 --- /dev/null +++ b/packages/configuration/src/v1/core.rs @@ -0,0 +1,88 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; + +use crate::{AnnouncePolicy, LogLevel}; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Core { + /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, + /// `Debug` and `Trace`. Default is `Info`. + pub log_level: Option, + /// Tracker mode. See [`TrackerMode`] for more information. + pub mode: TrackerMode, + + // Database configuration + /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. + pub db_driver: DatabaseDriver, + /// Database connection string. The format depends on the database driver. + /// For `Sqlite3`, the format is `path/to/database.db`, for example: + /// `./storage/tracker/lib/database/sqlite3.db`. + /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for + /// example: `root:password@localhost:3306/torrust`. + pub db_path: String, + + /// See [`AnnouncePolicy::interval`] + pub announce_interval: u32, + + /// See [`AnnouncePolicy::interval_min`] + pub min_announce_interval: u32, + /// Weather the tracker is behind a reverse proxy or not. + /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header + /// sent from the proxy will be used to get the client's IP address. + pub on_reverse_proxy: bool, + /// The external IP address of the tracker. If the client is using a + /// loopback IP address, this IP address will be used instead. If the peer + /// is using a loopback IP address, the tracker assumes that the peer is + /// in the same network as the tracker and will use the tracker's IP + /// address instead. + pub external_ip: Option, + /// Weather the tracker should collect statistics about tracker usage. + /// If enabled, the tracker will collect statistics like the number of + /// connections handled, the number of announce requests handled, etc. + /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more + /// information about the collected metrics. + pub tracker_usage_statistics: bool, + /// If enabled the tracker will persist the number of completed downloads. + /// That's how many times a torrent has been downloaded completely. + pub persistent_torrent_completed_stat: bool, + + // Cleanup job configuration + /// Maximum time in seconds that a peer can be inactive before being + /// considered an inactive peer. If a peer is inactive for more than this + /// time, it will be removed from the torrent peer list. + pub max_peer_timeout: u32, + /// Interval in seconds that the cleanup job will run to remove inactive + /// peers from the torrent peer list. + pub inactive_peer_cleanup_interval: u64, + /// If enabled, the tracker will remove torrents that have no peers. + /// The clean up torrent job runs every `inactive_peer_cleanup_interval` + /// seconds and it removes inactive peers. Eventually, the peer list of a + /// torrent could be empty and the torrent will be removed if this option is + /// enabled. + pub remove_peerless_torrents: bool, +} + +impl Default for Core { + fn default() -> Self { + let announce_policy = AnnouncePolicy::default(); + + Self { + log_level: Some(LogLevel::Info), + mode: TrackerMode::Public, + db_driver: DatabaseDriver::Sqlite3, + db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), + announce_interval: announce_policy.interval, + min_announce_interval: announce_policy.interval_min, + max_peer_timeout: 900, + on_reverse_proxy: false, + external_ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + tracker_usage_statistics: true, + persistent_torrent_completed_stat: false, + inactive_peer_cleanup_interval: 600, + remove_peerless_torrents: true, + } + } +} diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 643235c03..d19fedc87 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -78,7 +78,7 @@ //! //! Alternatively, you could setup a reverse proxy like Nginx or Apache to //! handle the SSL/TLS part and forward the requests to the tracker. If you do -//! that, you should set [`on_reverse_proxy`](crate::Configuration::on_reverse_proxy) +//! that, you should set [`on_reverse_proxy`](crate::v1::core::Core::on_reverse_proxy) //! to `true` in the configuration file. It's out of scope for this //! documentation to explain in detail how to setup a reverse proxy, but the //! configuration file should be something like this: @@ -230,86 +230,32 @@ //! [health_check_api] //! bind_address = "127.0.0.1:1313" //!``` +pub mod core; pub mod health_check_api; pub mod http_tracker; pub mod tracker_api; pub mod udp_tracker; use std::fs; -use std::net::{IpAddr, Ipv4Addr}; +use std::net::IpAddr; use figment::providers::{Env, Format, Serialized, Toml}; use figment::Figment; use serde::{Deserialize, Serialize}; -use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +use self::core::Core; use self::health_check_api::HealthCheckApi; use self::http_tracker::HttpTracker; use self::tracker_api::HttpApi; use self::udp_tracker::UdpTracker; -use crate::{AnnouncePolicy, Error, Info, LogLevel}; +use crate::{Error, Info}; /// Core configuration for the tracker. -#[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { - /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, - /// `Debug` and `Trace`. Default is `Info`. - pub log_level: Option, - /// Tracker mode. See [`TrackerMode`] for more information. - pub mode: TrackerMode, - - // Database configuration - /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. - pub db_driver: DatabaseDriver, - /// Database connection string. The format depends on the database driver. - /// For `Sqlite3`, the format is `path/to/database.db`, for example: - /// `./storage/tracker/lib/database/sqlite3.db`. - /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for - /// example: `root:password@localhost:3306/torrust`. - pub db_path: String, - - /// See [`AnnouncePolicy::interval`] - pub announce_interval: u32, - - /// See [`AnnouncePolicy::interval_min`] - pub min_announce_interval: u32, - /// Weather the tracker is behind a reverse proxy or not. - /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header - /// sent from the proxy will be used to get the client's IP address. - pub on_reverse_proxy: bool, - /// The external IP address of the tracker. If the client is using a - /// loopback IP address, this IP address will be used instead. If the peer - /// is using a loopback IP address, the tracker assumes that the peer is - /// in the same network as the tracker and will use the tracker's IP - /// address instead. - pub external_ip: Option, - /// Weather the tracker should collect statistics about tracker usage. - /// If enabled, the tracker will collect statistics like the number of - /// connections handled, the number of announce requests handled, etc. - /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more - /// information about the collected metrics. - pub tracker_usage_statistics: bool, - /// If enabled the tracker will persist the number of completed downloads. - /// That's how many times a torrent has been downloaded completely. - pub persistent_torrent_completed_stat: bool, - - // Cleanup job configuration - /// Maximum time in seconds that a peer can be inactive before being - /// considered an inactive peer. If a peer is inactive for more than this - /// time, it will be removed from the torrent peer list. - pub max_peer_timeout: u32, - /// Interval in seconds that the cleanup job will run to remove inactive - /// peers from the torrent peer list. - pub inactive_peer_cleanup_interval: u64, - /// If enabled, the tracker will remove torrents that have no peers. - /// The clean up torrent job runs every `inactive_peer_cleanup_interval` - /// seconds and it removes inactive peers. Eventually, the peer list of a - /// torrent could be empty and the torrent will be removed if this option is - /// enabled. - pub remove_peerless_torrents: bool, - - // Server jobs configuration + /// Core configuration. + #[serde(flatten)] + pub core: Core, /// The list of UDP trackers the tracker is running. Each UDP tracker /// represents a UDP server that the tracker is running and it has its own /// configuration. @@ -326,30 +272,13 @@ pub struct Configuration { impl Default for Configuration { fn default() -> Self { - let announce_policy = AnnouncePolicy::default(); - - let mut configuration = Configuration { - log_level: Some(LogLevel::Info), - mode: TrackerMode::Public, - db_driver: DatabaseDriver::Sqlite3, - db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), - announce_interval: announce_policy.interval, - min_announce_interval: announce_policy.interval_min, - max_peer_timeout: 900, - on_reverse_proxy: false, - external_ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), - tracker_usage_statistics: true, - persistent_torrent_completed_stat: false, - inactive_peer_cleanup_interval: 600, - remove_peerless_torrents: true, - udp_trackers: Vec::new(), - http_trackers: Vec::new(), + Self { + core: Core::default(), + udp_trackers: vec![UdpTracker::default()], + http_trackers: vec![HttpTracker::default()], http_api: HttpApi::default(), health_check_api: HealthCheckApi::default(), - }; - configuration.udp_trackers.push(UdpTracker::default()); - configuration.http_trackers.push(HttpTracker::default()); - configuration + } } } @@ -362,7 +291,7 @@ impl Configuration { /// and `None` otherwise. #[must_use] pub fn get_ext_ip(&self) -> Option { - self.external_ip.as_ref().map(|external_ip| *external_ip) + self.core.external_ip.as_ref().map(|external_ip| *external_ip) } /// Saves the default configuration at the given path. @@ -490,7 +419,7 @@ mod tests { fn configuration_should_contain_the_external_ip() { let configuration = Configuration::default(); - assert_eq!(configuration.external_ip, Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))); + assert_eq!(configuration.core.external_ip, Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))); } #[test] diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 0c7cc533a..86ed57b9e 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -27,10 +27,9 @@ pub fn ephemeral() -> Configuration { // todo: disable services that are not needed. // For example: a test for the UDP tracker should disable the API and HTTP tracker. - let mut config = Configuration { - log_level: Some(LogLevel::Off), // Change to `debug` for tests debugging - ..Default::default() - }; + let mut config = Configuration::default(); + + config.core.log_level = Some(LogLevel::Off); // Change to `debug` for tests debugging // Ephemeral socket address for API let api_port = 0u16; @@ -55,7 +54,7 @@ pub fn ephemeral() -> Configuration { let temp_directory = env::temp_dir(); let random_db_id = random::string(16); let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); - temp_file.to_str().unwrap().clone_into(&mut config.db_path); + temp_file.to_str().unwrap().clone_into(&mut config.core.db_path); config } @@ -65,7 +64,7 @@ pub fn ephemeral() -> Configuration { pub fn ephemeral_with_reverse_proxy() -> Configuration { let mut cfg = ephemeral(); - cfg.on_reverse_proxy = true; + cfg.core.on_reverse_proxy = true; cfg } @@ -75,7 +74,7 @@ pub fn ephemeral_with_reverse_proxy() -> Configuration { pub fn ephemeral_without_reverse_proxy() -> Configuration { let mut cfg = ephemeral(); - cfg.on_reverse_proxy = false; + cfg.core.on_reverse_proxy = false; cfg } @@ -85,7 +84,7 @@ pub fn ephemeral_without_reverse_proxy() -> Configuration { pub fn ephemeral_mode_public() -> Configuration { let mut cfg = ephemeral(); - cfg.mode = TrackerMode::Public; + cfg.core.mode = TrackerMode::Public; cfg } @@ -95,7 +94,7 @@ pub fn ephemeral_mode_public() -> Configuration { pub fn ephemeral_mode_private() -> Configuration { let mut cfg = ephemeral(); - cfg.mode = TrackerMode::Private; + cfg.core.mode = TrackerMode::Private; cfg } @@ -105,7 +104,7 @@ pub fn ephemeral_mode_private() -> Configuration { pub fn ephemeral_mode_whitelisted() -> Configuration { let mut cfg = ephemeral(); - cfg.mode = TrackerMode::Listed; + cfg.core.mode = TrackerMode::Listed; cfg } @@ -115,7 +114,7 @@ pub fn ephemeral_mode_whitelisted() -> Configuration { pub fn ephemeral_mode_private_whitelisted() -> Configuration { let mut cfg = ephemeral(); - cfg.mode = TrackerMode::PrivateListed; + cfg.core.mode = TrackerMode::PrivateListed; cfg } @@ -125,7 +124,7 @@ pub fn ephemeral_mode_private_whitelisted() -> Configuration { pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { let mut cfg = ephemeral(); - cfg.external_ip = Some(ip); + cfg.core.external_ip = Some(ip); cfg } diff --git a/src/app.rs b/src/app.rs index 8bdc281a6..fcb01a696 100644 --- a/src/app.rs +++ b/src/app.rs @@ -67,7 +67,7 @@ pub async fn start(config: &Configuration, tracker: Arc) -> Vec) -> Vec 0 { - jobs.push(torrent_cleanup::start_job(config, &tracker)); + if config.core.inactive_peer_cleanup_interval > 0 { + jobs.push(torrent_cleanup::start_job(&config.core, &tracker)); } // Start Health Check API diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 300813430..bd3b2e332 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use chrono::Utc; use log::info; use tokio::task::JoinHandle; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::v1::core::Core; use crate::core; @@ -25,7 +25,7 @@ use crate::core; /// /// Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about that option. #[must_use] -pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { +pub fn start_job(config: &Core, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index b71079b57..5c7e93811 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -19,7 +19,7 @@ static INIT: Once = Once::new(); /// It redirects the log info to the standard output with the log level defined in the configuration pub fn setup(cfg: &Configuration) { - let level = config_level_or_default(&cfg.log_level); + let level = config_level_or_default(&cfg.core.log_level); if level == log::LevelFilter::Off { return; diff --git a/src/core/mod.rs b/src/core/mod.rs index 83813a863..dbaf27e22 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -544,13 +544,13 @@ impl Tracker { stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = Arc::new(databases::driver::build(&config.db_driver, &config.db_path)?); + let database = Arc::new(databases::driver::build(&config.core.db_driver, &config.core.db_path)?); - let mode = config.mode; + let mode = config.core.mode; Ok(Tracker { //config, - announce_policy: AnnouncePolicy::new(config.announce_interval, config.min_announce_interval), + announce_policy: AnnouncePolicy::new(config.core.announce_interval, config.core.min_announce_interval), mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), @@ -560,11 +560,11 @@ impl Tracker { database, external_ip: config.get_ext_ip(), policy: TrackerPolicy::new( - config.remove_peerless_torrents, - config.max_peer_timeout, - config.persistent_torrent_completed_stat, + config.core.remove_peerless_torrents, + config.core.max_peer_timeout, + config.core.persistent_torrent_completed_stat, ), - on_reverse_proxy: config.on_reverse_proxy, + on_reverse_proxy: config.core.on_reverse_proxy, }) } @@ -1033,7 +1033,7 @@ mod tests { pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut configuration = configuration::ephemeral(); - configuration.persistent_torrent_completed_stat = true; + configuration.core.persistent_torrent_completed_stat = true; tracker_factory(&configuration) } diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 76c6a36f6..dec143568 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -21,7 +21,7 @@ use crate::core::Tracker; #[must_use] pub fn tracker_factory(config: &Configuration) -> Tracker { // Initialize statistics - let (stats_event_sender, stats_repository) = statistics::setup::factory(config.tracker_usage_statistics); + let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); // Initialize Torrust tracker match Tracker::new(&Arc::new(config), stats_event_sender, stats_repository) { diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs index 18eff26b3..1c6cdc636 100644 --- a/src/servers/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -16,7 +16,7 @@ //! the tracker will use the `X-Forwarded-For` header to get the client IP //! address. //! -//! See [`torrust_tracker_configuration::Configuration::on_reverse_proxy`]. +//! See [`torrust_tracker_configuration::Configuration::core.on_reverse_proxy`]. //! //! The tracker can also be configured to run without a reverse proxy. In this //! case, the tracker will use the IP address from the connection info. diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 5a0ae40e4..9529f954c 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -145,7 +145,7 @@ mod tests { fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { let mut configuration = configuration::ephemeral(); - configuration.external_ip = Some(IpAddr::V6(Ipv6Addr::new( + configuration.core.external_ip = Some(IpAddr::V6(Ipv6Addr::new( 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index d8ca4680c..d6d7a1065 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -426,7 +426,7 @@ mod tests { } pub fn with_external_ip(mut self, external_ip: &str) -> Self { - self.configuration.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); + self.configuration.core.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); self } From ae77ebc50f2286e4c23b3f539759ebe4d5a908d1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 May 2024 17:05:48 +0100 Subject: [PATCH 0840/1003] refactor: tracker core service only needs the core config --- src/core/mod.rs | 21 +++++------ src/core/services/mod.rs | 2 +- src/servers/http/v1/services/announce.rs | 22 +++++++++--- src/servers/http/v1/services/scrape.rs | 40 ++++++++++++++++----- src/servers/udp/handlers.rs | 44 ++++++++++++++++++++---- 5 files changed, 98 insertions(+), 31 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index dbaf27e22..18a6028f7 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -445,7 +445,8 @@ use derive_more::Constructor; use log::debug; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; -use torrust_tracker_configuration::{AnnouncePolicy, Configuration, TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_configuration::v1::core::Core; +use torrust_tracker_configuration::{AnnouncePolicy, TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; @@ -540,17 +541,17 @@ impl Tracker { /// /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( - config: &Configuration, + config: &Core, stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = Arc::new(databases::driver::build(&config.core.db_driver, &config.core.db_path)?); + let database = Arc::new(databases::driver::build(&config.db_driver, &config.db_path)?); - let mode = config.core.mode; + let mode = config.mode; Ok(Tracker { //config, - announce_policy: AnnouncePolicy::new(config.core.announce_interval, config.core.min_announce_interval), + announce_policy: AnnouncePolicy::new(config.announce_interval, config.min_announce_interval), mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), @@ -558,13 +559,13 @@ impl Tracker { stats_event_sender, stats_repository, database, - external_ip: config.get_ext_ip(), + external_ip: config.external_ip, policy: TrackerPolicy::new( - config.core.remove_peerless_torrents, - config.core.max_peer_timeout, - config.core.persistent_torrent_completed_stat, + config.remove_peerless_torrents, + config.max_peer_timeout, + config.persistent_torrent_completed_stat, ), - on_reverse_proxy: config.core.on_reverse_proxy, + on_reverse_proxy: config.on_reverse_proxy, }) } diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index dec143568..166f40df4 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -24,7 +24,7 @@ pub fn tracker_factory(config: &Configuration) -> Tracker { let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); // Initialize Torrust tracker - match Tracker::new(&Arc::new(config), stats_event_sender, stats_repository) { + match Tracker::new(&Arc::new(config).core, stats_event_sender, stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 9529f954c..e3bef3973 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -135,8 +135,14 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = - Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); let mut peer = sample_peer_using_ipv4(); @@ -149,7 +155,7 @@ mod tests { 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); - Tracker::new(&configuration, Some(stats_event_sender), statistics::Repo::new()).unwrap() + Tracker::new(&configuration.core, Some(stats_event_sender), statistics::Repo::new()).unwrap() } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { @@ -194,8 +200,14 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = - Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); let mut peer = sample_peer_using_ipv6(); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 18b57f479..a6a40186f 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -146,8 +146,14 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = - Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -164,8 +170,14 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = - Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -217,8 +229,14 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = - Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -235,8 +253,14 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = - Arc::new(Tracker::new(&configuration::ephemeral(), Some(stats_event_sender), statistics::Repo::new()).unwrap()); + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index d6d7a1065..fee00a0bd 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -506,7 +506,12 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) .await @@ -524,7 +529,12 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) .await @@ -768,7 +778,12 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_announce( @@ -997,7 +1012,12 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -1027,7 +1047,7 @@ mod tests { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(core::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(core::Tracker::new(&configuration.core, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1305,7 +1325,12 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) @@ -1337,7 +1362,12 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - core::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) From 445bd5361607d8dcf78fcab48450a8d10c6753b3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 13 May 2024 14:10:09 +0100 Subject: [PATCH 0841/1003] feat: define only non-defaults in toml config templates After implementing the configuration with Figment, it's now possible to omit values if they have a default value. Therefore we don't need to add all options in templates. We only need to add values that are overwriting deffault values. --- packages/configuration/src/lib.rs | 28 ++++++- packages/configuration/src/v1/core.rs | 83 ++++++++++++++++--- .../configuration/src/v1/health_check_api.rs | 9 +- packages/configuration/src/v1/http_tracker.rs | 24 +++++- packages/configuration/src/v1/tracker_api.rs | 35 ++++++-- packages/configuration/src/v1/udp_tracker.rs | 16 +++- .../config/tracker.container.mysql.toml | 31 ------- .../config/tracker.container.sqlite3.toml | 32 ------- .../config/tracker.development.sqlite3.toml | 36 -------- .../config/tracker.e2e.container.sqlite3.toml | 28 ------- .../config/tracker.udp.benchmarking.toml | 32 ------- 11 files changed, 169 insertions(+), 185 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index fdc021480..c393623df 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -120,12 +120,22 @@ pub struct AnnouncePolicy { impl Default for AnnouncePolicy { fn default() -> Self { Self { - interval: 120, - interval_min: 120, + interval: Self::default_interval(), + interval_min: Self::default_interval_min(), } } } +impl AnnouncePolicy { + fn default_interval() -> u32 { + 120 + } + + fn default_interval_min() -> u32 { + 120 + } +} + /// Errors that can occur when loading the configuration. #[derive(Error, Debug)] pub enum Error { @@ -166,12 +176,26 @@ impl From for Error { pub struct TslConfig { /// Path to the SSL certificate file. #[serde_as(as = "NoneAsEmptyString")] + #[serde(default = "TslConfig::default_ssl_cert_path")] pub ssl_cert_path: Option, /// Path to the SSL key file. #[serde_as(as = "NoneAsEmptyString")] + #[serde(default = "TslConfig::default_ssl_key_path")] pub ssl_key_path: Option, } +impl TslConfig { + #[allow(clippy::unnecessary_wraps)] + fn default_ssl_cert_path() -> Option { + Some(Utf8PathBuf::new()) + } + + #[allow(clippy::unnecessary_wraps)] + fn default_ssl_key_path() -> Option { + Some(Utf8PathBuf::new()) + } +} + #[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] #[serde(rename_all = "lowercase")] pub enum LogLevel { diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index ed9074194..5d00c67ab 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -10,58 +10,71 @@ use crate::{AnnouncePolicy, LogLevel}; pub struct Core { /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, /// `Debug` and `Trace`. Default is `Info`. + #[serde(default = "Core::default_log_level")] pub log_level: Option, /// Tracker mode. See [`TrackerMode`] for more information. + #[serde(default = "Core::default_mode")] pub mode: TrackerMode, // Database configuration /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. + #[serde(default = "Core::default_db_driver")] pub db_driver: DatabaseDriver, /// Database connection string. The format depends on the database driver. /// For `Sqlite3`, the format is `path/to/database.db`, for example: /// `./storage/tracker/lib/database/sqlite3.db`. /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for /// example: `root:password@localhost:3306/torrust`. + #[serde(default = "Core::default_db_path")] pub db_path: String, /// See [`AnnouncePolicy::interval`] + #[serde(default = "AnnouncePolicy::default_interval")] pub announce_interval: u32, /// See [`AnnouncePolicy::interval_min`] + #[serde(default = "AnnouncePolicy::default_interval_min")] pub min_announce_interval: u32, /// Weather the tracker is behind a reverse proxy or not. /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header /// sent from the proxy will be used to get the client's IP address. + #[serde(default = "Core::default_on_reverse_proxy")] pub on_reverse_proxy: bool, /// The external IP address of the tracker. If the client is using a /// loopback IP address, this IP address will be used instead. If the peer /// is using a loopback IP address, the tracker assumes that the peer is /// in the same network as the tracker and will use the tracker's IP /// address instead. + #[serde(default = "Core::default_external_ip")] pub external_ip: Option, /// Weather the tracker should collect statistics about tracker usage. /// If enabled, the tracker will collect statistics like the number of /// connections handled, the number of announce requests handled, etc. /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more /// information about the collected metrics. + #[serde(default = "Core::default_tracker_usage_statistics")] pub tracker_usage_statistics: bool, /// If enabled the tracker will persist the number of completed downloads. /// That's how many times a torrent has been downloaded completely. + #[serde(default = "Core::default_persistent_torrent_completed_stat")] pub persistent_torrent_completed_stat: bool, // Cleanup job configuration /// Maximum time in seconds that a peer can be inactive before being /// considered an inactive peer. If a peer is inactive for more than this /// time, it will be removed from the torrent peer list. + #[serde(default = "Core::default_max_peer_timeout")] pub max_peer_timeout: u32, /// Interval in seconds that the cleanup job will run to remove inactive /// peers from the torrent peer list. + #[serde(default = "Core::default_inactive_peer_cleanup_interval")] pub inactive_peer_cleanup_interval: u64, /// If enabled, the tracker will remove torrents that have no peers. /// The clean up torrent job runs every `inactive_peer_cleanup_interval` /// seconds and it removes inactive peers. Eventually, the peer list of a /// torrent could be empty and the torrent will be removed if this option is /// enabled. + #[serde(default = "Core::default_remove_peerless_torrents")] pub remove_peerless_torrents: bool, } @@ -70,19 +83,67 @@ impl Default for Core { let announce_policy = AnnouncePolicy::default(); Self { - log_level: Some(LogLevel::Info), - mode: TrackerMode::Public, - db_driver: DatabaseDriver::Sqlite3, - db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), + log_level: Self::default_log_level(), + mode: Self::default_mode(), + db_driver: Self::default_db_driver(), + db_path: Self::default_db_path(), announce_interval: announce_policy.interval, min_announce_interval: announce_policy.interval_min, - max_peer_timeout: 900, - on_reverse_proxy: false, - external_ip: Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), - tracker_usage_statistics: true, - persistent_torrent_completed_stat: false, - inactive_peer_cleanup_interval: 600, - remove_peerless_torrents: true, + max_peer_timeout: Self::default_max_peer_timeout(), + on_reverse_proxy: Self::default_on_reverse_proxy(), + external_ip: Self::default_external_ip(), + tracker_usage_statistics: Self::default_tracker_usage_statistics(), + persistent_torrent_completed_stat: Self::default_persistent_torrent_completed_stat(), + inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), + remove_peerless_torrents: Self::default_remove_peerless_torrents(), } } } + +impl Core { + #[allow(clippy::unnecessary_wraps)] + fn default_log_level() -> Option { + Some(LogLevel::Info) + } + + fn default_mode() -> TrackerMode { + TrackerMode::Public + } + + fn default_db_driver() -> DatabaseDriver { + DatabaseDriver::Sqlite3 + } + + fn default_db_path() -> String { + String::from("./storage/tracker/lib/database/sqlite3.db") + } + + fn default_on_reverse_proxy() -> bool { + false + } + + #[allow(clippy::unnecessary_wraps)] + fn default_external_ip() -> Option { + Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + } + + fn default_tracker_usage_statistics() -> bool { + true + } + + fn default_persistent_torrent_completed_stat() -> bool { + false + } + + fn default_max_peer_timeout() -> u32 { + 900 + } + + fn default_inactive_peer_cleanup_interval() -> u64 { + 600 + } + + fn default_remove_peerless_torrents() -> bool { + true + } +} diff --git a/packages/configuration/src/v1/health_check_api.rs b/packages/configuration/src/v1/health_check_api.rs index b8bfd2c1b..61178fa80 100644 --- a/packages/configuration/src/v1/health_check_api.rs +++ b/packages/configuration/src/v1/health_check_api.rs @@ -11,13 +11,20 @@ pub struct HealthCheckApi { /// The format is `ip:port`, for example `127.0.0.1:1313`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. + #[serde(default = "HealthCheckApi::default_bind_address")] pub bind_address: SocketAddr, } impl Default for HealthCheckApi { fn default() -> Self { Self { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1313), + bind_address: Self::default_bind_address(), } } } + +impl HealthCheckApi { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1313) + } +} diff --git a/packages/configuration/src/v1/http_tracker.rs b/packages/configuration/src/v1/http_tracker.rs index 57b2d83a1..b1fe1437b 100644 --- a/packages/configuration/src/v1/http_tracker.rs +++ b/packages/configuration/src/v1/http_tracker.rs @@ -10,26 +10,44 @@ use crate::TslConfig; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpTracker { /// Weather the HTTP tracker is enabled or not. + #[serde(default = "HttpTracker::default_enabled")] pub enabled: bool, /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. + #[serde(default = "HttpTracker::default_bind_address")] pub bind_address: SocketAddr, /// Weather the HTTP tracker will use SSL or not. + #[serde(default = "HttpTracker::default_ssl_enabled")] pub ssl_enabled: bool, /// TSL config. Only used if `ssl_enabled` is true. #[serde(flatten)] + #[serde(default = "TslConfig::default")] pub tsl_config: TslConfig, } impl Default for HttpTracker { fn default() -> Self { Self { - enabled: false, - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070), - ssl_enabled: false, + enabled: Self::default_enabled(), + bind_address: Self::default_bind_address(), + ssl_enabled: Self::default_ssl_enabled(), tsl_config: TslConfig::default(), } } } + +impl HttpTracker { + fn default_enabled() -> bool { + false + } + + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070) + } + + fn default_ssl_enabled() -> bool { + false + } +} diff --git a/packages/configuration/src/v1/tracker_api.rs b/packages/configuration/src/v1/tracker_api.rs index 5089c496a..c2e3e5ee9 100644 --- a/packages/configuration/src/v1/tracker_api.rs +++ b/packages/configuration/src/v1/tracker_api.rs @@ -13,40 +13,61 @@ pub type AccessTokens = HashMap; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpApi { /// Weather the HTTP API is enabled or not. + #[serde(default = "HttpApi::default_enabled")] pub enabled: bool, /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. + #[serde(default = "HttpApi::default_bind_address")] pub bind_address: SocketAddr, /// Weather the HTTP API will use SSL or not. + #[serde(default = "HttpApi::default_ssl_enabled")] pub ssl_enabled: bool, /// TSL config. Only used if `ssl_enabled` is true. #[serde(flatten)] + #[serde(default = "TslConfig::default")] pub tsl_config: TslConfig, /// Access tokens for the HTTP API. The key is a label identifying the /// token and the value is the token itself. The token is used to /// authenticate the user. All tokens are valid for all endpoints and have /// all permissions. + #[serde(default = "HttpApi::default_access_tokens")] pub access_tokens: AccessTokens, } impl Default for HttpApi { fn default() -> Self { Self { - enabled: true, - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212), - ssl_enabled: false, + enabled: Self::default_enabled(), + bind_address: Self::default_bind_address(), + ssl_enabled: Self::default_ssl_enabled(), tsl_config: TslConfig::default(), - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] - .iter() - .cloned() - .collect(), + access_tokens: Self::default_access_tokens(), } } } impl HttpApi { + fn default_enabled() -> bool { + true + } + + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212) + } + + fn default_ssl_enabled() -> bool { + false + } + + fn default_access_tokens() -> AccessTokens { + [(String::from("admin"), String::from("MyAccessToken"))] + .iter() + .cloned() + .collect() + } + pub fn override_admin_token(&mut self, api_admin_token: &str) { self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); } diff --git a/packages/configuration/src/v1/udp_tracker.rs b/packages/configuration/src/v1/udp_tracker.rs index 1f772164e..f8387202e 100644 --- a/packages/configuration/src/v1/udp_tracker.rs +++ b/packages/configuration/src/v1/udp_tracker.rs @@ -5,18 +5,30 @@ use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct UdpTracker { /// Weather the UDP tracker is enabled or not. + #[serde(default = "UdpTracker::default_enabled")] pub enabled: bool, /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. + #[serde(default = "UdpTracker::default_bind_address")] pub bind_address: SocketAddr, } impl Default for UdpTracker { fn default() -> Self { Self { - enabled: false, - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969), + enabled: Self::default_enabled(), + bind_address: Self::default_bind_address(), } } } + +impl UdpTracker { + fn default_enabled() -> bool { + false + } + + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969) + } +} diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index f2db06228..7678327ab 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -1,41 +1,10 @@ -announce_interval = 120 db_driver = "MySQL" db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" -external_ip = "0.0.0.0" -inactive_peer_cleanup_interval = 600 -log_level = "info" -max_peer_timeout = 900 -min_announce_interval = 120 -mode = "public" -on_reverse_proxy = false -persistent_torrent_completed_stat = false -remove_peerless_torrents = true -tracker_usage_statistics = true - -[[udp_trackers]] -bind_address = "0.0.0.0:6969" -enabled = false [[http_trackers]] -bind_address = "0.0.0.0:7070" -enabled = false ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [http_api] -bind_address = "0.0.0.0:1212" -enabled = true ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -[http_api.access_tokens] -# Please override the admin token setting the environmental variable: -# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` -# The old variable name is deprecated: -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -admin = "MyAccessToken" - -[health_check_api] -bind_address = "127.0.0.1:1313" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 4a3ba03b6..da8259286 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,41 +1,9 @@ -announce_interval = 120 -db_driver = "Sqlite3" db_path = "/var/lib/torrust/tracker/database/sqlite3.db" -external_ip = "0.0.0.0" -inactive_peer_cleanup_interval = 600 -log_level = "info" -max_peer_timeout = 900 -min_announce_interval = 120 -mode = "public" -on_reverse_proxy = false -persistent_torrent_completed_stat = false -remove_peerless_torrents = true -tracker_usage_statistics = true - -[[udp_trackers]] -bind_address = "0.0.0.0:6969" -enabled = false [[http_trackers]] -bind_address = "0.0.0.0:7070" -enabled = false ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [http_api] -bind_address = "0.0.0.0:1212" -enabled = true ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -[http_api.access_tokens] -# Please override the admin token setting the environmental variable: -# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` -# The old variable name is deprecated: -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -admin = "MyAccessToken" - -[health_check_api] -bind_address = "127.0.0.1:1313" diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 62e5b478e..bf6478492 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -1,41 +1,5 @@ -announce_interval = 120 -db_driver = "Sqlite3" -db_path = "./storage/tracker/lib/database/sqlite3.db" -external_ip = "0.0.0.0" -inactive_peer_cleanup_interval = 600 -log_level = "info" -max_peer_timeout = 900 -min_announce_interval = 120 -mode = "public" -on_reverse_proxy = false -persistent_torrent_completed_stat = false -remove_peerless_torrents = true -tracker_usage_statistics = true - [[udp_trackers]] -bind_address = "0.0.0.0:6969" enabled = true [[http_trackers]] -bind_address = "0.0.0.0:7070" -enabled = true -ssl_cert_path = "" -ssl_enabled = false -ssl_key_path = "" - -[http_api] -bind_address = "127.0.0.1:1212" enabled = true -ssl_cert_path = "" -ssl_enabled = false -ssl_key_path = "" - -[http_api.access_tokens] -# Please override the admin token setting the environmental variable: -# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` -# The old variable name is deprecated: -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -admin = "MyAccessToken" - -[health_check_api] -bind_address = "127.0.0.1:1313" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index 3738704b5..e7d8fa279 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -1,41 +1,13 @@ -announce_interval = 120 -db_driver = "Sqlite3" db_path = "/var/lib/torrust/tracker/database/sqlite3.db" -external_ip = "0.0.0.0" -inactive_peer_cleanup_interval = 600 -log_level = "info" -max_peer_timeout = 900 -min_announce_interval = 120 -mode = "public" -on_reverse_proxy = false -persistent_torrent_completed_stat = false -remove_peerless_torrents = true -tracker_usage_statistics = true [[udp_trackers]] -bind_address = "0.0.0.0:6969" enabled = true [[http_trackers]] -bind_address = "0.0.0.0:7070" enabled = true ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [http_api] -bind_address = "0.0.0.0:1212" -enabled = true ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -[http_api.access_tokens] -# Please override the admin token setting the environmental variable: -# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` -# The old variable name is deprecated: -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -admin = "MyAccessToken" - -[health_check_api] -bind_address = "0.0.0.0:1313" diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index 1e951d8fc..00f62628b 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -1,41 +1,9 @@ -announce_interval = 120 -db_driver = "Sqlite3" -db_path = "./storage/tracker/lib/database/sqlite3.db" -external_ip = "0.0.0.0" -inactive_peer_cleanup_interval = 600 log_level = "error" -max_peer_timeout = 900 -min_announce_interval = 120 -mode = "public" -on_reverse_proxy = false -persistent_torrent_completed_stat = false remove_peerless_torrents = false tracker_usage_statistics = false [[udp_trackers]] -bind_address = "0.0.0.0:6969" enabled = true -[[http_trackers]] -bind_address = "0.0.0.0:7070" -enabled = false -ssl_cert_path = "" -ssl_enabled = false -ssl_key_path = "" - [http_api] -bind_address = "127.0.0.1:1212" enabled = false -ssl_cert_path = "" -ssl_enabled = false -ssl_key_path = "" - -[http_api.access_tokens] -# Please override the admin token setting the environmental variable: -# `TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN` -# The old variable name is deprecated: -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -admin = "MyAccessToken" - -[health_check_api] -bind_address = "127.0.0.1:1313" From cf1bfb11ad8a2d92d9e47b065ab2580e596c23ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 May 2024 07:35:38 +0100 Subject: [PATCH 0842/1003] chore(deps): update dependencies ```ouput Updating crates.io index Locking 13 packages to latest compatible versions Removing allocator-api2 v0.2.18 Updating async-channel v2.2.1 -> v2.3.0 Updating async-compression v0.4.9 -> v0.4.10 Updating brotli v5.0.0 -> v6.0.0 Updating bytemuck v1.15.0 -> v1.16.0 Updating errno v0.3.8 -> v0.3.9 Updating hashlink v0.9.0 -> v0.9.1 Updating piper v0.2.1 -> v0.2.2 Updating rustls-pki-types v1.6.0 -> v1.7.0 Updating serde v1.0.200 -> v1.0.201 Updating serde_derive v1.0.200 -> v1.0.201 Updating serde_json v1.0.116 -> v1.0.117 Updating syn v2.0.61 -> v2.0.63 Updating waker-fn v1.1.1 -> v1.2.0 ``` --- Cargo.lock | 121 +++++++++++++++++++++++++---------------------------- 1 file changed, 57 insertions(+), 64 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e54601bcf..446477aac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -64,12 +64,6 @@ dependencies = [ "alloc-no-stdlib", ] -[[package]] -name = "allocator-api2" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" - [[package]] name = "android-tzdata" version = "0.1.1" @@ -207,9 +201,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +checksum = "9f2776ead772134d55b62dd45e59a79e21612d85d0af729b8b7d3967d601a62a" dependencies = [ "concurrent-queue", "event-listener 5.3.0", @@ -220,9 +214,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9eabd7a98fe442131a17c316bd9349c43695e49e730c3c8e12cfb5f4da2693" +checksum = "9c90a406b4495d129f00461241616194cb8a032c8d1c53c657f0961d5f8e0498" dependencies = [ "brotli", "flate2", @@ -253,7 +247,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.0", "async-executor", "async-io 2.3.2", "async-lock 3.3.0", @@ -363,7 +357,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -486,7 +480,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -573,7 +567,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -615,7 +609,7 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.0", "async-lock 3.3.0", "async-task", "futures-io", @@ -643,15 +637,15 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "syn_derive", ] [[package]] name = "brotli" -version = "5.0.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19483b140a7ac7174d34b5a581b406c64f84da5409d3e09cf4fff604f9270e67" +checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -704,9 +698,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" [[package]] name = "byteorder" @@ -858,7 +852,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1085,7 +1079,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1096,7 +1090,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1143,7 +1137,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1195,9 +1189,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -1395,7 +1389,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1407,7 +1401,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1419,7 +1413,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1512,7 +1506,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -1650,14 +1644,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash 0.8.11", - "allocator-api2", ] [[package]] name = "hashlink" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ "hashbrown 0.14.5", ] @@ -2208,7 +2201,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2259,7 +2252,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "termcolor", "thiserror", ] @@ -2458,7 +2451,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2528,7 +2521,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2602,7 +2595,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -2619,9 +2612,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +checksum = "464db0c665917b13ebb5d453ccdec4add5658ee1adc7affc7677615356a8afaf" dependencies = [ "atomic-waker", "fastrand 2.1.0", @@ -2791,7 +2784,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "version_check", "yansi", ] @@ -3097,7 +3090,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.61", + "syn 2.0.63", "unicode-ident", ] @@ -3203,9 +3196,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f344d206c5e1b010eec27349b815a4805f70a778895959d70b74b9b529b30a" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" @@ -3315,9 +3308,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.200" +version = "1.0.201" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" +checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" dependencies = [ "serde_derive", ] @@ -3343,13 +3336,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.200" +version = "1.0.201" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" +checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -3367,9 +3360,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "indexmap 2.2.6", "itoa", @@ -3395,7 +3388,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -3446,7 +3439,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -3580,9 +3573,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.61" +version = "2.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" +checksum = "bf5be731623ca1a1fb7d8be6f261a3be6d3e2337b8a1f97be944d020c8fcb704" dependencies = [ "proc-macro2", "quote", @@ -3598,7 +3591,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -3701,7 +3694,7 @@ checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -3786,7 +3779,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -4095,7 +4088,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] @@ -4213,9 +4206,9 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "waker-fn" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" [[package]] name = "walkdir" @@ -4263,7 +4256,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "wasm-bindgen-shared", ] @@ -4297,7 +4290,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4558,7 +4551,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.61", + "syn 2.0.63", ] [[package]] From da6a21eac969c96005f2b5b4deb0b1ea91926951 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 May 2024 10:22:56 +0100 Subject: [PATCH 0843/1003] refactor: [#855] show toml file location in Figment errors Before this commit we loaded configuration in Figment always using a Toml string even if the configuration cames from a toml file. WHen there is an error Figment does not show the file location and that's one of the main advantages of using Figment. All errors point to the primary source of the configuration option. This commit fixes that problem leting Figment to load the configuration from the file when the user provides a file. Sample error: ``` Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... thread 'main' panicked at src/bootstrap/config.rs:45:32: called `Result::unwrap()` on an `Err` value: ConfigError { source: LocatedError { source: Error { tag: Tag(Default, 2), profile: Some(Profile(Uncased { string: "default" })), metadata: Some(Metadata { name: "TOML file", source: Some(File("/home/developer/torrust/torrust-tracker/./share/default/config/tracker.development.sqlite3.toml")), provide_location: Some(Location { file: "packages/configuration/src/v1/mod.rs", line: 330, col: 18 }), interpolater: }), path: [], kind: Message("TOML parse error at line 2, column 15\n |\n2 | enabled = truee\n | ^\nexpected newline, `#`\n"), prev: None }, location: Location { file: "packages/configuration/src/v1/mod.rs", line: 334, col: 41 } } } note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace ``` Notice how the file path is included is the error: `/home/developer/torrust/torrust-tracker/./share/default/config/tracker.development.sqlite3.toml` --- packages/configuration/src/lib.rs | 47 ++++++++---------- packages/configuration/src/v1/mod.rs | 71 +++++++++++++++++++++++----- 2 files changed, 79 insertions(+), 39 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index c393623df..9a00e6bbc 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -7,8 +7,8 @@ pub mod v1; use std::collections::HashMap; +use std::env; use std::sync::Arc; -use std::{env, fs}; use camino::Utf8PathBuf; use derive_more::Constructor; @@ -38,7 +38,8 @@ pub struct TrackerPolicy { /// Information required for loading config #[derive(Debug, Default, Clone)] pub struct Info { - tracker_toml: String, + config_toml: Option, + config_toml_path: String, api_admin_token: Option, } @@ -51,38 +52,30 @@ impl Info { /// #[allow(clippy::needless_pass_by_value)] pub fn new( - env_var_config: String, - env_var_path_config: String, - default_path_config: String, + env_var_config_toml: String, + env_var_config_toml_path: String, + default_config_toml_path: String, env_var_api_admin_token: String, ) -> Result { - let tracker_toml = if let Ok(tracker_toml) = env::var(&env_var_config) { - println!("Loading configuration from env var {env_var_config} ..."); + let config_toml = if let Ok(config_toml) = env::var(env_var_config_toml) { + println!("Loading configuration from environment variable {config_toml} ..."); + Some(config_toml) + } else { + None + }; - tracker_toml + let config_toml_path = if let Ok(config_toml_path) = env::var(env_var_config_toml_path) { + println!("Loading configuration from file: `{config_toml_path}` ..."); + config_toml_path } else { - let config_path = if let Ok(config_path) = env::var(env_var_path_config) { - println!("Loading configuration file: `{config_path}` ..."); - - config_path - } else { - println!("Loading default configuration file: `{default_path_config}` ..."); - - default_path_config - }; - - fs::read_to_string(config_path) - .map_err(|e| Error::UnableToLoadFromConfigFile { - source: (Arc::new(e) as DynError).into(), - })? - .parse() - .map_err(|_e: std::convert::Infallible| Error::Infallible)? + println!("Loading configuration from default configuration file: `{default_config_toml_path}` ..."); + default_config_toml_path }; - let api_admin_token = env::var(env_var_api_admin_token).ok(); Ok(Self { - tracker_toml, - api_admin_token, + config_toml, + config_toml_path, + api_admin_token: env::var(env_var_api_admin_token).ok(), }) } } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index d19fedc87..8e15d65ca 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -250,6 +250,11 @@ use self::tracker_api::HttpApi; use self::udp_tracker::UdpTracker; use crate::{Error, Info}; +/// Prefix for env vars that overwrite configuration options. +const CONFIG_OVERRIDE_PREFIX: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_"; +/// Path separator in env var names for nested values in configuration. +const CONFIG_OVERRIDE_SEPARATOR: &str = "__"; + /// Core configuration for the tracker. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { @@ -315,9 +320,16 @@ impl Configuration { /// /// Will return `Err` if the environment variable does not exist or has a bad configuration. pub fn load(info: &Info) -> Result { - let figment = Figment::from(Serialized::defaults(Configuration::default())) - .merge(Toml::string(&info.tracker_toml)) - .merge(Env::prefixed("TORRUST_TRACKER__").split("__")); + let figment = if let Some(config_toml) = &info.config_toml { + // Config in env var has priority over config file path + Figment::from(Serialized::defaults(Configuration::default())) + .merge(Toml::string(config_toml)) + .merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) + } else { + Figment::from(Serialized::defaults(Configuration::default())) + .merge(Toml::file(&info.config_toml_path)) + .merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) + }; let mut config: Configuration = figment.extract()?; @@ -449,11 +461,14 @@ mod tests { #[test] fn configuration_should_use_the_default_values_when_an_empty_configuration_is_provided_by_the_user() { - figment::Jail::expect_with(|_jail| { + figment::Jail::expect_with(|jail| { + jail.create_file("tracker.toml", "")?; + let empty_configuration = String::new(); let info = Info { - tracker_toml: empty_configuration, + config_toml: Some(empty_configuration), + config_toml_path: "tracker.toml".to_string(), api_admin_token: None, }; @@ -466,28 +481,59 @@ mod tests { } #[test] - fn configuration_should_be_loaded_from_a_toml_config_file() { + fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { figment::Jail::expect_with(|_jail| { + let config_toml = r#" + db_path = "OVERWRITTEN DEFAULT DB PATH" + "# + .to_string(); + let info = Info { - tracker_toml: default_config_toml(), + config_toml: Some(config_toml), + config_toml_path: String::new(), api_admin_token: None, }; let configuration = Configuration::load(&info).expect("Could not load configuration from file"); - assert_eq!(configuration, Configuration::default()); + assert_eq!(configuration.core.db_path, "OVERWRITTEN DEFAULT DB PATH".to_string()); + + Ok(()) + }); + } + + #[test] + fn default_configuration_could_be_overwritten_from_a_toml_config_file() { + figment::Jail::expect_with(|jail| { + jail.create_file( + "tracker.toml", + r#" + db_path = "OVERWRITTEN DEFAULT DB PATH" + "#, + )?; + + let info = Info { + config_toml: None, + config_toml_path: "tracker.toml".to_string(), + api_admin_token: None, + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration.core.db_path, "OVERWRITTEN DEFAULT DB PATH".to_string()); Ok(()) }); } #[test] - fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_env_var() { + fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_an_env_var() { figment::Jail::expect_with(|jail| { - jail.set_env("TORRUST_TRACKER__HTTP_API__ACCESS_TOKENS__ADMIN", "NewToken"); + jail.set_env("TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN", "NewToken"); let info = Info { - tracker_toml: default_config_toml(), + config_toml: Some(default_config_toml()), + config_toml_path: String::new(), api_admin_token: None, }; @@ -506,7 +552,8 @@ mod tests { fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_the_deprecated_env_var_name() { figment::Jail::expect_with(|_jail| { let info = Info { - tracker_toml: default_config_toml(), + config_toml: Some(default_config_toml()), + config_toml_path: String::new(), api_admin_token: Some("NewToken".to_owned()), }; From 4de5e7d32efd1277138ef1b92602204cdf5f8375 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 May 2024 10:39:20 +0100 Subject: [PATCH 0844/1003] refactor: move config env vars to configuration package --- packages/configuration/src/lib.rs | 24 ++++++++++++++++++------ src/bootstrap/config.rs | 22 ++-------------------- 2 files changed, 20 insertions(+), 26 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 9a00e6bbc..588feb87e 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -20,6 +20,19 @@ use torrust_tracker_located_error::{DynError, LocatedError}; /// The maximum number of returned peers for a torrent. pub const TORRENT_PEERS_LIMIT: usize = 74; +// Environment variables + +/// The whole `tracker.toml` file content. It has priority over the config file. +/// Even if the file is not on the default path. +const ENV_VAR_CONFIG: &str = "TORRUST_TRACKER_CONFIG"; + +/// The `tracker.toml` file location. +pub const ENV_VAR_PATH_CONFIG: &str = "TORRUST_TRACKER_PATH_CONFIG"; + +/// Env var to overwrite API admin token. +/// Deprecated: use `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN`. +const ENV_VAR_API_ADMIN_TOKEN: &str = "TORRUST_TRACKER_API_ADMIN_TOKEN"; + pub type Configuration = v1::Configuration; pub type UdpTracker = v1::udp_tracker::UdpTracker; pub type HttpTracker = v1::http_tracker::HttpTracker; @@ -51,12 +64,11 @@ impl Info { /// Will return `Err` if unable to obtain a configuration. /// #[allow(clippy::needless_pass_by_value)] - pub fn new( - env_var_config_toml: String, - env_var_config_toml_path: String, - default_config_toml_path: String, - env_var_api_admin_token: String, - ) -> Result { + pub fn new(default_config_toml_path: String) -> Result { + let env_var_config_toml = ENV_VAR_CONFIG.to_string(); + let env_var_config_toml_path = ENV_VAR_PATH_CONFIG.to_string(); + let env_var_api_admin_token = ENV_VAR_API_ADMIN_TOKEN.to_string(); + let config_toml = if let Ok(config_toml) = env::var(env_var_config_toml) { println!("Loading configuration from environment variable {config_toml} ..."); Some(config_toml) diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 858fd59fc..03dfd9a2f 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -4,17 +4,6 @@ use torrust_tracker_configuration::{Configuration, Info}; -// Environment variables - -/// The whole `tracker.toml` file content. It has priority over the config file. -/// Even if the file is not on the default path. -const ENV_VAR_CONFIG: &str = "TORRUST_TRACKER_CONFIG"; -const ENV_VAR_API_ADMIN_TOKEN: &str = "TORRUST_TRACKER_API_ADMIN_TOKEN"; - -/// The `tracker.toml` file location. -pub const ENV_VAR_PATH_CONFIG: &str = "TORRUST_TRACKER_PATH_CONFIG"; - -// Default values pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.development.sqlite3.toml"; /// It loads the application configuration from the environment. @@ -34,15 +23,8 @@ pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.developmen /// `./tracker.toml` file or the env var `TORRUST_TRACKER_CONFIG`. #[must_use] pub fn initialize_configuration() -> Configuration { - let info = Info::new( - ENV_VAR_CONFIG.to_string(), - ENV_VAR_PATH_CONFIG.to_string(), - DEFAULT_PATH_CONFIG.to_string(), - ENV_VAR_API_ADMIN_TOKEN.to_string(), - ) - .unwrap(); - - Configuration::load(&info).unwrap() + let info = Info::new(DEFAULT_PATH_CONFIG.to_string()).expect("info to load configuration is not valid"); + Configuration::load(&info).expect("configuration should be loaded from provided info") } #[cfg(test)] From ef15e0b42adf46d8c5b5fed9bedf01b56662f1b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 May 2024 13:46:58 +0100 Subject: [PATCH 0845/1003] refactor: [#851] rename env vars ``` TORRUST_TRACKER_BACK_ -> TORRUST_TRACKER_ TORRUST_TRACKER_DATABASE_DRIVER -> TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER TORRUST_TRACKER_API_ADMIN_TOKEN -> TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN TORRUST_TRACKER_CONFIG -> TORRUST_TRACKER_CONFIG_TOML TORRUST_TRACKER_PATH_CONFIG -> TORRUST_TRACKER_CONFIG_TOML_PATH ``` DB_DRIVER values: `MySQL`, `Sqlite3`. Removed lowercase values `mysql` and `sqlite3` used in containers. Some enums use lowercase. This is a braking change for container but not for configuration. IN the future we could use lowercase also in the configuration. --- Containerfile | 8 ++++---- README.md | 8 ++++---- compose.yaml | 4 ++-- docs/benchmarking.md | 2 +- docs/containers.md | 14 +++++++------- docs/profiling.md | 4 ++-- packages/configuration/src/lib.rs | 15 +++++++-------- packages/configuration/src/v1/mod.rs | 2 +- packages/primitives/src/lib.rs | 4 +++- share/container/entry_script_sh | 20 ++++++++++---------- src/bootstrap/config.rs | 6 +++--- src/console/ci/e2e/runner.rs | 2 +- src/console/profiling.rs | 4 ++-- src/lib.rs | 6 +++--- 14 files changed, 50 insertions(+), 49 deletions(-) diff --git a/Containerfile b/Containerfile index 590b0a13b..79fae692f 100644 --- a/Containerfile +++ b/Containerfile @@ -95,16 +95,16 @@ FROM gcr.io/distroless/cc-debian12:debug as runtime RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec -ARG TORRUST_TRACKER_PATH_CONFIG="/etc/torrust/tracker/tracker.toml" -ARG TORRUST_TRACKER_DATABASE_DRIVER="sqlite3" +ARG TORRUST_TRACKER_CONFIG_TOML_PATH="/etc/torrust/tracker/tracker.toml" +ARG TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER="Sqlite3" ARG USER_ID=1000 ARG UDP_PORT=6969 ARG HTTP_PORT=7070 ARG API_PORT=1212 ARG HEALTH_CHECK_API_PORT=1313 -ENV TORRUST_TRACKER_PATH_CONFIG=${TORRUST_TRACKER_PATH_CONFIG} -ENV TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER} +ENV TORRUST_TRACKER_CONFIG_TOML_PATH=${TORRUST_TRACKER_CONFIG_TOML_PATH} +ENV TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER} ENV USER_ID=${USER_ID} ENV UDP_PORT=${UDP_PORT} ENV HTTP_PORT=${HTTP_PORT} diff --git a/README.md b/README.md index 8431c00e4..754d2d5b7 100644 --- a/README.md +++ b/README.md @@ -84,14 +84,14 @@ cp ./share/default/config/tracker.development.sqlite3.toml ./storage/tracker/etc vim ./storage/tracker/etc/tracker.toml # Run the tracker with the updated configuration: -TORRUST_TRACKER_PATH_CONFIG="./storage/tracker/etc/tracker.toml" cargo run +TORRUST_TRACKER_CONFIG_TOML_PATH="./storage/tracker/etc/tracker.toml" cargo run ``` _Optionally, you may choose to supply the entire configuration as an environmental variable:_ ```sh # Use a configuration supplied on an environmental variable: -TORRUST_TRACKER_CONFIG=$(cat "./storage/tracker/etc/tracker.toml") cargo run +TORRUST_TRACKER_CONFIG_TOML=$(cat "./storage/tracker/etc/tracker.toml") cargo run ``` _For deployment, you **should** override the `api_admin_token` by using an environmental variable:_ @@ -102,8 +102,8 @@ gpg --armor --gen-random 1 10 | tee ./storage/tracker/lib/tracker_api_admin_toke chmod go-rwx ./storage/tracker/lib/tracker_api_admin_token.secret # Override secret in configuration using an environmental variable: -TORRUST_TRACKER_CONFIG=$(cat "./storage/tracker/etc/tracker.toml") \ - TORRUST_TRACKER_API_ADMIN_TOKEN=$(cat "./storage/tracker/lib/tracker_api_admin_token.secret") \ +TORRUST_TRACKER_CONFIG_TOML=$(cat "./storage/tracker/etc/tracker.toml") \ + TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=$(cat "./storage/tracker/lib/tracker_api_admin_token.secret") \ cargo run ``` diff --git a/compose.yaml b/compose.yaml index 672ca6d0f..1d425c743 100644 --- a/compose.yaml +++ b/compose.yaml @@ -4,8 +4,8 @@ services: image: torrust-tracker:release tty: true environment: - - TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER:-mysql} - - TORRUST_TRACKER_API_ADMIN_TOKEN=${TORRUST_TRACKER_API_ADMIN_TOKEN:-MyAccessToken} + - TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER:-MySQL} + - TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=${TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN:-MyAccessToken} networks: - server_side ports: diff --git a/docs/benchmarking.md b/docs/benchmarking.md index 7c82df14c..2a3f1f8b0 100644 --- a/docs/benchmarking.md +++ b/docs/benchmarking.md @@ -39,7 +39,7 @@ Build and run the tracker: ```console cargo build --release -TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" ./target/release/torrust-tracker +TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" ./target/release/torrust-tracker ``` Run the load test with: diff --git a/docs/containers.md b/docs/containers.md index 6622e29b2..a0ba59d4b 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -147,10 +147,10 @@ Environmental variables are loaded through the `--env`, in the format `--env VAR The following environmental variables can be set: -- `TORRUST_TRACKER_PATH_CONFIG` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). -- `TORRUST_TRACKER_API_ADMIN_TOKEN` - Override of the admin token. If set, this value overrides any value set in the config. -- `TORRUST_TRACKER_DATABASE_DRIVER` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. -- `TORRUST_TRACKER_CONFIG` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG=$(cat tracker-tracker.toml)`). +- `TORRUST_TRACKER_CONFIG_TOML_PATH` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). +- `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN` - Override of the admin token. If set, this value overrides any value set in the config. +- `TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER` - The database type used for the container, (options: `Sqlite3`, `MySQL`, default `Sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_TRACKER_CONFIG_TOML` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG_TOML=$(cat tracker-tracker.toml)`). - `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). - `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). - `HTTP_PORT` - The port for the HTTP tracker. This should match the port used in the configuration, (default `7070`). @@ -205,7 +205,7 @@ mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ ## Run Torrust Tracker Container Image docker run -it \ - --env TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + --env TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ --env USER_ID="$(id -u)" \ --publish 0.0.0.0:7070:7070/tcp \ --publish 0.0.0.0:6969:6969/udp \ @@ -227,7 +227,7 @@ mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ ## Run Torrust Tracker Container Image podman run -it \ - --env TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + --env TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ --env USER_ID="$(id -u)" \ --publish 0.0.0.0:7070:7070/tcp \ --publish 0.0.0.0:6969:6969/udp \ @@ -255,7 +255,7 @@ docker build --target release --tag torrust-tracker:release --file Containerfile mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ USER_ID=$(id -u) \ - TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ docker compose up --build ``` diff --git a/docs/profiling.md b/docs/profiling.md index 406560f3c..8038f9e77 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -35,7 +35,7 @@ To generate the graph you will need to: ```console cargo build --profile=release-debug --bin=profiling ./target/release/aquatic_udp_load_test -c "load-test-config.toml" -sudo TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" /home/USER/.cargo/bin/flamegraph -- ./target/release-debug/profiling 60 +sudo TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" /home/USER/.cargo/bin/flamegraph -- ./target/release-debug/profiling 60 ``` __NOTICE__: You need to install the `aquatic_udp_load_test` program. @@ -92,7 +92,7 @@ Build and the binary for profiling: ```console RUSTFLAGS='-g' cargo build --release --bin profiling \ - && export TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" \ + && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ && valgrind \ --tool=callgrind \ --callgrind-out-file=callgrind.out \ diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 588feb87e..b79081a13 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -24,14 +24,13 @@ pub const TORRENT_PEERS_LIMIT: usize = 74; /// The whole `tracker.toml` file content. It has priority over the config file. /// Even if the file is not on the default path. -const ENV_VAR_CONFIG: &str = "TORRUST_TRACKER_CONFIG"; +const ENV_VAR_CONFIG_TOML: &str = "TORRUST_TRACKER_CONFIG_TOML"; /// The `tracker.toml` file location. -pub const ENV_VAR_PATH_CONFIG: &str = "TORRUST_TRACKER_PATH_CONFIG"; +pub const ENV_VAR_CONFIG_TOML_PATH: &str = "TORRUST_TRACKER_CONFIG_TOML_PATH"; /// Env var to overwrite API admin token. -/// Deprecated: use `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN`. -const ENV_VAR_API_ADMIN_TOKEN: &str = "TORRUST_TRACKER_API_ADMIN_TOKEN"; +const ENV_VAR_HTTP_API_ACCESS_TOKENS_ADMIN: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN"; pub type Configuration = v1::Configuration; pub type UdpTracker = v1::udp_tracker::UdpTracker; @@ -65,9 +64,9 @@ impl Info { /// #[allow(clippy::needless_pass_by_value)] pub fn new(default_config_toml_path: String) -> Result { - let env_var_config_toml = ENV_VAR_CONFIG.to_string(); - let env_var_config_toml_path = ENV_VAR_PATH_CONFIG.to_string(); - let env_var_api_admin_token = ENV_VAR_API_ADMIN_TOKEN.to_string(); + let env_var_config_toml = ENV_VAR_CONFIG_TOML.to_string(); + let env_var_config_toml_path = ENV_VAR_CONFIG_TOML_PATH.to_string(); + let env_var_api_admin_token = ENV_VAR_HTTP_API_ACCESS_TOKENS_ADMIN.to_string(); let config_toml = if let Ok(config_toml) = env::var(env_var_config_toml) { println!("Loading configuration from environment variable {config_toml} ..."); @@ -146,7 +145,7 @@ impl AnnouncePolicy { pub enum Error { /// Unable to load the configuration from the environment variable. /// This error only occurs if there is no configuration file and the - /// `TORRUST_TRACKER_CONFIG` environment variable is not set. + /// `TORRUST_TRACKER_CONFIG_TOML` environment variable is not set. #[error("Unable to load from Environmental Variable: {source}")] UnableToLoadFromEnvironmentVariable { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 8e15d65ca..b9d75c71d 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -9,7 +9,7 @@ //! with the same content as the file. //! //! Configuration can not only be loaded from a file, but also from an -//! environment variable `TORRUST_TRACKER_CONFIG`. This is useful when running +//! environment variable `TORRUST_TRACKER_CONFIG_TOML`. This is useful when running //! the tracker in a Docker container or environments where you do not have a //! persistent storage or you cannot inject a configuration file. Refer to //! [`Torrust Tracker documentation`](https://docs.rs/torrust-tracker) for more diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index aeb4d0d4e..47a837a9b 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -52,7 +52,9 @@ pub struct NumberOfBytes(pub i64); /// For more information about persistence. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] pub enum DatabaseDriver { - // TODO: Move to the database crate once that gets its own crate. + // TODO: + // - Move to the database crate once that gets its own crate. + // - Rename serialized values to lowercase: `sqlite3` and `mysql`. /// The Sqlite3 database driver. Sqlite3, /// The `MySQL` database driver. diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh index 4f98e6622..8c704ea67 100644 --- a/share/container/entry_script_sh +++ b/share/container/entry_script_sh @@ -26,29 +26,29 @@ chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust # Install the database and config: -if [ -n "$TORRUST_TRACKER_DATABASE_DRIVER" ]; then - if cmp_lc "$TORRUST_TRACKER_DATABASE_DRIVER" "sqlite3"; then +if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER" ]; then + if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER" "Sqlite3"; then - # Select sqlite3 empty database + # Select Sqlite3 empty database default_database="/usr/share/torrust/default/database/tracker.sqlite3.db" - # Select sqlite3 default configuration + # Select Sqlite3 default configuration default_config="/usr/share/torrust/default/config/tracker.container.sqlite3.toml" - elif cmp_lc "$TORRUST_TRACKER_DATABASE_DRIVER" "mysql"; then + elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER" "MySQL"; then - # (no database file needed for mysql) + # (no database file needed for MySQL) - # Select default mysql configuration + # Select default MySQL configuration default_config="/usr/share/torrust/default/config/tracker.container.mysql.toml" else - echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_DATABASE_DRIVER\"." - echo "Please Note: Supported Database Types: \"sqlite3\", \"mysql\"." + echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER\"." + echo "Please Note: Supported Database Types: \"Sqlite3\", \"MySQL\"." exit 1 fi else - echo "Error: \"\$TORRUST_TRACKER_DATABASE_DRIVER\" was not set!"; exit 1; + echo "Error: \"\$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER\" was not set!"; exit 1; fi install_config="/etc/torrust/tracker/tracker.toml" diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 03dfd9a2f..6b607bd6f 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -1,6 +1,6 @@ //! Initialize configuration from file or env var. //! -//! All environment variables are prefixed with `TORRUST_TRACKER_BACK_`. +//! All environment variables are prefixed with `TORRUST_TRACKER_`. use torrust_tracker_configuration::{Configuration, Info}; @@ -11,7 +11,7 @@ pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.developmen /// There are two methods to inject the configuration: /// /// 1. By using a config file: `tracker.toml`. -/// 2. Environment variable: `TORRUST_TRACKER_CONFIG`. The variable contains the same contents as the `tracker.toml` file. +/// 2. Environment variable: `TORRUST_TRACKER_CONFIG_TOML`. The variable contains the same contents as the `tracker.toml` file. /// /// Environment variable has priority over the config file. /// @@ -20,7 +20,7 @@ pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.developmen /// # Panics /// /// Will panic if it can't load the configuration from either -/// `./tracker.toml` file or the env var `TORRUST_TRACKER_CONFIG`. +/// `./tracker.toml` file or the env var `TORRUST_TRACKER_CONFIG_TOML`. #[must_use] pub fn initialize_configuration() -> Configuration { let info = Info::new(DEFAULT_PATH_CONFIG.to_string()).expect("info to load configuration is not valid"); diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index 1a4746800..945a87033 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -46,7 +46,7 @@ pub fn run() { // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. // We could not use docker, but the intention was to create E2E tests including containerization. let options = RunOptions { - env_vars: vec![("TORRUST_TRACKER_CONFIG".to_string(), tracker_config.to_string())], + env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.to_string())], ports: vec![ "6969:6969/udp".to_string(), "7070:7070/tcp".to_string(), diff --git a/src/console/profiling.rs b/src/console/profiling.rs index e0867159f..52e11913f 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -27,7 +27,7 @@ //! //! ```text //! RUSTFLAGS='-g' cargo build --release --bin profiling \ -//! && export TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" \ +//! && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ //! && valgrind \ //! --tool=callgrind \ //! --callgrind-out-file=callgrind.out \ @@ -40,7 +40,7 @@ //! //! ```text //! RUSTFLAGS='-g' cargo build --release --bin profiling \ -//! && export TORRUST_TRACKER_PATH_CONFIG="./share/default/config/tracker.udp.benchmarking.toml" \ +//! && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ //! && valgrind \ //! --tool=callgrind \ //! --callgrind-out-file=callgrind.out \ diff --git a/src/lib.rs b/src/lib.rs index 22bc133e1..6fd5da15f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -213,10 +213,10 @@ //! documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration). //! //! Alternatively to the `tracker.toml` file you can use one environment -//! variable `TORRUST_TRACKER_CONFIG` to pass the configuration to the tracker: +//! variable `TORRUST_TRACKER_CONFIG_TOML` to pass the configuration to the tracker: //! //! ```text -//! TORRUST_TRACKER_CONFIG=$(cat ./share/default/config/tracker.development.sqlite3.toml) ./target/release/torrust-tracker +//! TORRUST_TRACKER_CONFIG_TOML=$(cat ./share/default/config/tracker.development.sqlite3.toml) ./target/release/torrust-tracker //! ``` //! //! In the previous example you are just setting the env var with the contents @@ -225,7 +225,7 @@ //! The env var contains the same data as the `tracker.toml`. It's particularly //! useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). //! -//! > NOTICE: The `TORRUST_TRACKER_CONFIG` env var has priority over the `tracker.toml` file. +//! > NOTICE: The `TORRUST_TRACKER_CONFIG_TOML` env var has priority over the `tracker.toml` file. //! //! By default, if you don’t specify any `tracker.toml` file, the application //! will use `./share/default/config/tracker.development.sqlite3.toml`. From a4d2adfe8bd5f5c9dd4fd866e0f89b2b3c404154 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 May 2024 15:27:37 +0100 Subject: [PATCH 0846/1003] feat!: remove deprecated env var The env var `TORRUST_TRACKER_API_ADMIN_TOKEN` was replaced with `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN`. After the migration to Figment all configuration options can be overwritten. --- packages/configuration/src/lib.rs | 6 ----- packages/configuration/src/v1/mod.rs | 34 +--------------------------- 2 files changed, 1 insertion(+), 39 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index b79081a13..62792c271 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -29,9 +29,6 @@ const ENV_VAR_CONFIG_TOML: &str = "TORRUST_TRACKER_CONFIG_TOML"; /// The `tracker.toml` file location. pub const ENV_VAR_CONFIG_TOML_PATH: &str = "TORRUST_TRACKER_CONFIG_TOML_PATH"; -/// Env var to overwrite API admin token. -const ENV_VAR_HTTP_API_ACCESS_TOKENS_ADMIN: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN"; - pub type Configuration = v1::Configuration; pub type UdpTracker = v1::udp_tracker::UdpTracker; pub type HttpTracker = v1::http_tracker::HttpTracker; @@ -52,7 +49,6 @@ pub struct TrackerPolicy { pub struct Info { config_toml: Option, config_toml_path: String, - api_admin_token: Option, } impl Info { @@ -66,7 +62,6 @@ impl Info { pub fn new(default_config_toml_path: String) -> Result { let env_var_config_toml = ENV_VAR_CONFIG_TOML.to_string(); let env_var_config_toml_path = ENV_VAR_CONFIG_TOML_PATH.to_string(); - let env_var_api_admin_token = ENV_VAR_HTTP_API_ACCESS_TOKENS_ADMIN.to_string(); let config_toml = if let Ok(config_toml) = env::var(env_var_config_toml) { println!("Loading configuration from environment variable {config_toml} ..."); @@ -86,7 +81,6 @@ impl Info { Ok(Self { config_toml, config_toml_path, - api_admin_token: env::var(env_var_api_admin_token).ok(), }) } } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index b9d75c71d..8d45270b8 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -288,10 +288,6 @@ impl Default for Configuration { } impl Configuration { - fn override_api_admin_token(&mut self, api_admin_token: &str) { - self.http_api.override_admin_token(api_admin_token); - } - /// Returns the tracker public IP address id defined in the configuration, /// and `None` otherwise. #[must_use] @@ -331,11 +327,7 @@ impl Configuration { .merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) }; - let mut config: Configuration = figment.extract()?; - - if let Some(ref token) = info.api_admin_token { - config.override_api_admin_token(token); - }; + let config: Configuration = figment.extract()?; Ok(config) } @@ -469,7 +461,6 @@ mod tests { let info = Info { config_toml: Some(empty_configuration), config_toml_path: "tracker.toml".to_string(), - api_admin_token: None, }; let configuration = Configuration::load(&info).expect("Could not load configuration from file"); @@ -491,7 +482,6 @@ mod tests { let info = Info { config_toml: Some(config_toml), config_toml_path: String::new(), - api_admin_token: None, }; let configuration = Configuration::load(&info).expect("Could not load configuration from file"); @@ -515,7 +505,6 @@ mod tests { let info = Info { config_toml: None, config_toml_path: "tracker.toml".to_string(), - api_admin_token: None, }; let configuration = Configuration::load(&info).expect("Could not load configuration from file"); @@ -534,27 +523,6 @@ mod tests { let info = Info { config_toml: Some(default_config_toml()), config_toml_path: String::new(), - api_admin_token: None, - }; - - let configuration = Configuration::load(&info).expect("Could not load configuration from file"); - - assert_eq!( - configuration.http_api.access_tokens.get("admin"), - Some("NewToken".to_owned()).as_ref() - ); - - Ok(()) - }); - } - - #[test] - fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_the_deprecated_env_var_name() { - figment::Jail::expect_with(|_jail| { - let info = Info { - config_toml: Some(default_config_toml()), - config_toml_path: String::new(), - api_admin_token: Some("NewToken".to_owned()), }; let configuration = Configuration::load(&info).expect("Could not load configuration from file"); From e1e107123a09893e08b0443ac9750cf6a3e6987f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 May 2024 17:27:08 +0100 Subject: [PATCH 0847/1003] docs: update README - Fix missing line break after badgets. - Add roadmap draft. --- README.md | 42 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 754d2d5b7..ebd7c357b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ # Torrust Tracker -[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf]**Torrust Tracker** is a [BitTorrent][bittorrent] Tracker that matchmakes peers and collects statistics. Written in [Rust Language][rust] with the [Axum] web framework. _**This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).___ +[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf] + +**Torrust Tracker** is a [BitTorrent][bittorrent] Tracker that matchmakes peers and collects statistics. Written in [Rust Language][rust] with the [Axum] web framework. **This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).** > This is a [Torrust][torrust] project and is in active development. It is community supported as well as sponsored by [Nautilus Cyberneering][nautilus]. @@ -17,6 +19,44 @@ - [x] Support [newTrackon][newtrackon] checks. - [x] Persistent `SQLite3` or `MySQL` Databases. +## Roadmap + +Persistence: + +- [ ] Support other databases. + +Integrations: + +- [ ] Webhooks. + +Administration: + +- [ ] Improve categories and tag management. +- [ ] User management: list, search and ban users. +- [ ] Full-private mode. +- [ ] User statistics. + +Users: + +- [ ] Reset or change the password. +- [ ] User profile. +- [ ] Invitation system. +- [ ] User moderation. +- [ ] Add torrent providing only the info-hash. +- [ ] Improve search. + +Torrents: + +- [ ] Change the source field. +- [ ] Change the creator field. +- [ ] Implement BEP 19: WebSeed - HTTP/FTP Seeding (GetRight style). +- [ ] Implement BEP 32: BitTorrent DHT Extensions for IPv6. + +Others: + +- [ ] Multi-tracker +- [ ] Multi-language + ## Implemented BitTorrent Enhancement Proposals (BEPs) > > _[Learn more about BitTorrent Enhancement Proposals][BEP 00]_ From 80fc8d6b4d7b120b11984bbe229c6c7860da3909 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 May 2024 18:01:47 +0100 Subject: [PATCH 0848/1003] docs: udpate roadmap in README I copied the Index roadmap isntead of the tracker one by mistake. --- README.md | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index ebd7c357b..f83dbd936 100644 --- a/README.md +++ b/README.md @@ -21,41 +21,39 @@ ## Roadmap +Core: + +- [ ] New option `want_ip_from_query_string`. See . +- [ ] Permanent keys. See . +- [ ] Peer and torrents specific statistics. See . + Persistence: -- [ ] Support other databases. +- [ ] Support other databases like PostgreSQL. -Integrations: +Performance: -- [ ] Webhooks. +- [ ] More optimizations. See . -Administration: +Protocols: -- [ ] Improve categories and tag management. -- [ ] User management: list, search and ban users. -- [ ] Full-private mode. -- [ ] User statistics. +- [ ] WebTorrent. -Users: +Integrations: -- [ ] Reset or change the password. -- [ ] User profile. -- [ ] Invitation system. -- [ ] User moderation. -- [ ] Add torrent providing only the info-hash. -- [ ] Improve search. +- [ ] Monitoring (Prometheus). -Torrents: +Utils: -- [ ] Change the source field. -- [ ] Change the creator field. -- [ ] Implement BEP 19: WebSeed - HTTP/FTP Seeding (GetRight style). -- [ ] Implement BEP 32: BitTorrent DHT Extensions for IPv6. +- [ ] Tracker client. +- [ ] Tracker checker. Others: -- [ ] Multi-tracker -- [ ] Multi-language +- [ ] Support for Windows. +- [ ] Docker images for other architectures. + + ## Implemented BitTorrent Enhancement Proposals (BEPs) > From dadc216e8a3a75316f735d6b3debdab08d8e595a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 May 2024 12:25:58 +0100 Subject: [PATCH 0849/1003] chore(deps): add cargo dependencies needed for axum timeouts We want to add timeouts for the Axum server configuration, in order to close HTTP connections when the client doesn't send any request after opening a HTTP connection. --- Cargo.lock | 5 +++++ Cargo.toml | 5 +++++ cSpell.json | 4 ++++ 3 files changed, 14 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 446477aac..523ea575d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3891,8 +3891,11 @@ dependencies = [ "fern", "figment", "futures", + "futures-util", "hex-literal", + "http-body", "hyper", + "hyper-util", "lazy_static", "local-ip-address", "log", @@ -3900,6 +3903,7 @@ dependencies = [ "multimap", "parking_lot", "percent-encoding", + "pin-project-lite", "r2d2", "r2d2_mysql", "r2d2_sqlite", @@ -3920,6 +3924,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", + "tower", "tower-http", "trace", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 60652b160..5183c6067 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,13 +46,17 @@ derive_more = "0" fern = "0" figment = "0.10.18" futures = "0" +futures-util = "0.3.30" hex-literal = "0" +http-body = "1.0.0" hyper = "1" +hyper-util = { version = "0.1.3", features = ["http1", "http2", "tokio"] } lazy_static = "1" log = { version = "0", features = ["release_max_level_info"] } multimap = "0" parking_lot = "0.12.1" percent-encoding = "2" +pin-project-lite = "0.2.14" r2d2 = "0" r2d2_mysql = "24" r2d2_sqlite = { version = "0", features = ["bundled"] } @@ -72,6 +76,7 @@ torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = " torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12-develop", path = "packages/torrent-repository" } +tower = { version = "0.4.13", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" tracing = "0" diff --git a/cSpell.json b/cSpell.json index bd6c9d489..2b5cf55bf 100644 --- a/cSpell.json +++ b/cSpell.json @@ -50,6 +50,7 @@ "downloadedi", "dtolnay", "elif", + "Eray", "filesd", "flamegraph", "Freebox", @@ -73,6 +74,7 @@ "intervali", "Joakim", "kallsyms", + "Karatay", "kcachegrind", "kexec", "keyout", @@ -107,6 +109,7 @@ "Pando", "peekable", "peerlist", + "programatik", "proot", "proto", "Quickstart", @@ -137,6 +140,7 @@ "sharktorrent", "SHLVL", "skiplist", + "slowloris", "socketaddr", "sqllite", "subsec", From 112b76d79da76381d4ee8f04828c74a438d83308 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 May 2024 12:27:50 +0100 Subject: [PATCH 0850/1003] fix: [#612] add timeout for time waiting for the first API requests Adds a timeout for the Tracker API for the time the server waits for the first request from the client after openning a new HTTP connection. --- src/servers/apis/server.rs | 9 +- src/servers/custom_axum_server.rs | 275 ++++++++++++++++++++++++++++++ src/servers/mod.rs | 1 + 3 files changed, 282 insertions(+), 3 deletions(-) create mode 100644 src/servers/custom_axum_server.rs diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 9317d6ec0..57d2629ae 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -37,6 +37,7 @@ use torrust_tracker_configuration::AccessTokens; use super::routes::router; use crate::bootstrap::jobs::Started; use crate::core::Tracker; +use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; @@ -177,7 +178,7 @@ impl ApiServer { /// Or if there request returns an error code. #[must_use] pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { - let url = format!("http://{binding}/api/health_check"); + let url = format!("http://{binding}/api/health_check"); // DevSkim: ignore DS137138 let info = format!("checking api health check at: {url}"); @@ -234,13 +235,15 @@ impl Launcher { let running = Box::pin(async { match tls { - Some(tls) => axum_server::from_tcp_rustls(socket, tls) + Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) .handle(handle) + .acceptor(TimeoutAcceptor) .serve(router.into_make_service_with_connect_info::()) .await .expect("Axum server for tracker API crashed."), - None => axum_server::from_tcp(socket) + None => custom_axum_server::from_tcp_with_timeouts(socket) .handle(handle) + .acceptor(TimeoutAcceptor) .serve(router.into_make_service_with_connect_info::()) .await .expect("Axum server for tracker API crashed."), diff --git a/src/servers/custom_axum_server.rs b/src/servers/custom_axum_server.rs new file mode 100644 index 000000000..5705ef24e --- /dev/null +++ b/src/servers/custom_axum_server.rs @@ -0,0 +1,275 @@ +//! Wrapper for Axum server to add timeouts. +//! +//! Copyright (c) Eray Karatay ([@programatik29](https://github.com/programatik29)). +//! +//! See: . +//! +//! If a client opens a HTTP connection and it does not send any requests, the +//! connection is closed after a timeout. You can test it with: +//! +//! ```text +//! telnet 127.0.0.1 1212 +//! Trying 127.0.0.1... +//! Connected to 127.0.0.1. +//! Escape character is '^]'. +//! Connection closed by foreign host. +//! ``` +//! +//! If you want to know more about Axum and timeouts see . +use std::future::Ready; +use std::io::ErrorKind; +use std::net::TcpListener; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +use axum_server::accept::Accept; +use axum_server::tls_rustls::{RustlsAcceptor, RustlsConfig}; +use axum_server::Server; +use futures_util::{ready, Future}; +use http_body::{Body, Frame}; +use hyper::Response; +use hyper_util::rt::TokioTimer; +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; +use tokio::time::{Instant, Sleep}; +use tower::Service; + +const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); +const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); +const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); + +#[must_use] +pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { + add_timeouts(axum_server::from_tcp(socket)) +} + +#[must_use] +pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> Server { + add_timeouts(axum_server::from_tcp_rustls(socket, tls)) +} + +fn add_timeouts(mut server: Server) -> Server { + server.http_builder().http1().timer(TokioTimer::new()); + server.http_builder().http2().timer(TokioTimer::new()); + + server.http_builder().http1().header_read_timeout(HTTP1_HEADER_READ_TIMEOUT); + server + .http_builder() + .http2() + .keep_alive_timeout(HTTP2_KEEP_ALIVE_TIMEOUT) + .keep_alive_interval(HTTP2_KEEP_ALIVE_INTERVAL); + + server +} + +#[derive(Clone)] +pub struct TimeoutAcceptor; + +impl Accept for TimeoutAcceptor { + type Stream = TimeoutStream; + type Service = TimeoutService; + type Future = Ready>; + + fn accept(&self, stream: I, service: S) -> Self::Future { + let (tx, rx) = mpsc::unbounded_channel(); + + let stream = TimeoutStream::new(stream, HTTP1_HEADER_READ_TIMEOUT, rx); + let service = TimeoutService::new(service, tx); + + std::future::ready(Ok((stream, service))) + } +} + +#[derive(Clone)] +pub struct TimeoutService { + inner: S, + sender: UnboundedSender, +} + +impl TimeoutService { + fn new(inner: S, sender: UnboundedSender) -> Self { + Self { inner, sender } + } +} + +impl Service for TimeoutService +where + S: Service>, +{ + type Response = Response>; + type Error = S::Error; + type Future = TimeoutServiceFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + // send timer wait signal + let _ = self.sender.send(TimerSignal::Wait); + + TimeoutServiceFuture::new(self.inner.call(req), self.sender.clone()) + } +} + +pin_project! { + pub struct TimeoutServiceFuture { + #[pin] + inner: F, + sender: Option>, + } +} + +impl TimeoutServiceFuture { + fn new(inner: F, sender: UnboundedSender) -> Self { + Self { + inner, + sender: Some(sender), + } + } +} + +impl Future for TimeoutServiceFuture +where + F: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + this.inner.poll(cx).map(|result| { + result.map(|response| { + response.map(|body| TimeoutBody::new(body, this.sender.take().expect("future polled after ready"))) + }) + }) + } +} + +enum TimerSignal { + Wait, + Reset, +} + +pin_project! { + pub struct TimeoutBody { + #[pin] + inner: B, + sender: UnboundedSender, + } +} + +impl TimeoutBody { + fn new(inner: B, sender: UnboundedSender) -> Self { + Self { inner, sender } + } +} + +impl Body for TimeoutBody { + type Data = B::Data; + type Error = B::Error; + + fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>>> { + let this = self.project(); + let option = ready!(this.inner.poll_frame(cx)); + + if option.is_none() { + let _ = this.sender.send(TimerSignal::Reset); + } + + Poll::Ready(option) + } + + fn is_end_stream(&self) -> bool { + let is_end_stream = self.inner.is_end_stream(); + + if is_end_stream { + let _ = self.sender.send(TimerSignal::Reset); + } + + is_end_stream + } + + fn size_hint(&self) -> http_body::SizeHint { + self.inner.size_hint() + } +} + +pub struct TimeoutStream { + inner: IO, + // hyper requires unpin + sleep: Pin>, + duration: Duration, + waiting: bool, + receiver: UnboundedReceiver, + finished: bool, +} + +impl TimeoutStream { + fn new(inner: IO, duration: Duration, receiver: UnboundedReceiver) -> Self { + Self { + inner, + sleep: Box::pin(tokio::time::sleep(duration)), + duration, + waiting: false, + receiver, + finished: false, + } + } +} + +impl AsyncRead for TimeoutStream { + fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + if !self.finished { + match Pin::new(&mut self.receiver).poll_recv(cx) { + // reset the timer + Poll::Ready(Some(TimerSignal::Reset)) => { + self.waiting = false; + + let deadline = Instant::now() + self.duration; + self.sleep.as_mut().reset(deadline); + } + // enter waiting mode (for response body last chunk) + Poll::Ready(Some(TimerSignal::Wait)) => self.waiting = true, + Poll::Ready(None) => self.finished = true, + Poll::Pending => (), + } + } + + if !self.waiting { + // return error if timer is elapsed + if let Poll::Ready(()) = self.sleep.as_mut().poll(cx) { + return Poll::Ready(Err(std::io::Error::new(ErrorKind::TimedOut, "request header read timed out"))); + } + } + + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +impl AsyncWrite for TimeoutStream { + fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + Pin::new(&mut self.inner).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_shutdown(cx) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) + } + + fn is_write_vectored(&self) -> bool { + self.inner.is_write_vectored() + } +} diff --git a/src/servers/mod.rs b/src/servers/mod.rs index b0e222d2a..0c9cc5dd8 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,5 +1,6 @@ //! Servers. Services that can be started and stopped. pub mod apis; +pub mod custom_axum_server; pub mod health_check_api; pub mod http; pub mod registar; From 9e42a1a452804f1f14f5e6a99c38bdccabbc5001 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 May 2024 12:51:25 +0100 Subject: [PATCH 0851/1003] feat: [#612] tower middleware to apply timeouts to requests --- src/servers/apis/routes.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index e3d1ef446..087bcfa4a 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -8,12 +8,15 @@ use std::sync::Arc; use std::time::Duration; +use axum::error_handling::HandleErrorLayer; use axum::http::HeaderName; use axum::response::Response; use axum::routing::get; -use axum::{middleware, Router}; -use hyper::Request; +use axum::{middleware, BoxError, Router}; +use hyper::{Request, StatusCode}; use torrust_tracker_configuration::AccessTokens; +use tower::timeout::TimeoutLayer; +use tower::ServiceBuilder; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; @@ -25,6 +28,8 @@ use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; use crate::core::Tracker; +const TIMEOUT: Duration = Duration::from_secs(5); + /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc, access_tokens: Arc) -> Router { @@ -73,4 +78,11 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router }), ) .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer( + ServiceBuilder::new() + // this middleware goes above `TimeoutLayer` because it will receive + // errors returned by `TimeoutLayer` + .layer(HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT })) + .layer(TimeoutLayer::new(TIMEOUT)), + ) } From 23d5e5e14d93d66dd51ec5095b3d3e199e727db3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 May 2024 15:31:15 +0100 Subject: [PATCH 0852/1003] fix: [#613] add timeout for time waiting for the first HTTP tracker request Adds a timeout to the HTTP tracker for the time the server waits for the first request from the client after openning a new HTTP connection. It also adds a tower middleware for timeouts in requests. --- src/servers/http/server.rs | 7 +++++-- src/servers/http/v1/routes.rs | 16 ++++++++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 7c8148f22..a68f7d16c 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -12,6 +12,7 @@ use tokio::sync::oneshot::{Receiver, Sender}; use super::v1::routes::router; use crate::bootstrap::jobs::Started; use crate::core::Tracker; +use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; @@ -60,13 +61,15 @@ impl Launcher { let running = Box::pin(async { match tls { - Some(tls) => axum_server::from_tcp_rustls(socket, tls) + Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) .handle(handle) + .acceptor(TimeoutAcceptor) .serve(app.into_make_service_with_connect_info::()) .await .expect("Axum server crashed."), - None => axum_server::from_tcp(socket) + None => custom_axum_server::from_tcp_with_timeouts(socket) .handle(handle) + .acceptor(TimeoutAcceptor) .serve(app.into_make_service_with_connect_info::()) .await .expect("Axum server crashed."), diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 05cd38713..c54da51a3 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -3,12 +3,15 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use axum::error_handling::HandleErrorLayer; use axum::http::HeaderName; use axum::response::Response; use axum::routing::get; -use axum::Router; +use axum::{BoxError, Router}; use axum_client_ip::SecureClientIpSource; -use hyper::Request; +use hyper::{Request, StatusCode}; +use tower::timeout::TimeoutLayer; +use tower::ServiceBuilder; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; @@ -18,6 +21,8 @@ use tracing::{Level, Span}; use super::handlers::{announce, health_check, scrape}; use crate::core::Tracker; +const TIMEOUT: Duration = Duration::from_secs(5); + /// It adds the routes to the router. /// /// > **NOTICE**: it's added a layer to get the client IP from the connection @@ -69,4 +74,11 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { }), ) .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer( + ServiceBuilder::new() + // this middleware goes above `TimeoutLayer` because it will receive + // errors returned by `TimeoutLayer` + .layer(HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT })) + .layer(TimeoutLayer::new(TIMEOUT)), + ) } From 23c52b191c62ca39b7b9947f3167b4bd6cf8fbae Mon Sep 17 00:00:00 2001 From: Gabriel Grondin Date: Sun, 19 May 2024 19:34:08 +0200 Subject: [PATCH 0853/1003] Fix REAADME HTTP port --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f83dbd936..306a8620c 100644 --- a/README.md +++ b/README.md @@ -154,7 +154,7 @@ The following services are provided by the default configuration: - UDP _(tracker)_ - `udp://127.0.0.1:6969/announce`. - HTTP _(tracker)_ - - `http://127.0.0.1:6969/announce`. + - `http://127.0.0.1:7070/announce`. - API _(management)_ - `http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken`. From 9be936622b5199d12f022aa197927aaf15a09915 Mon Sep 17 00:00:00 2001 From: Gabriel GRONDIN Date: Mon, 20 May 2024 18:12:54 +0200 Subject: [PATCH 0854/1003] Fix and improved bootstrap jobs module test. On O.S. with different language than english the BadTlsConfig source error message will differ from the compared one. This will fix this issue by matching the enum itself instead of relying on a string compare. --- src/bootstrap/jobs/mod.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index d288989b5..e20d243c6 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -50,7 +50,7 @@ mod tests { use camino::Utf8PathBuf; use torrust_tracker_configuration::TslConfig; - use super::make_rust_tls; + use super::{make_rust_tls, Error}; #[tokio::test] async fn it_should_error_on_bad_tls_config() { @@ -65,9 +65,7 @@ mod tests { .expect("tls_was_enabled") .expect_err("bad_cert_and_key_files"); - assert!(err - .to_string() - .contains("bad tls config: No such file or directory (os error 2)")); + assert!(matches!(err, Error::BadTlsConfig { source: _ })); } #[tokio::test] @@ -83,7 +81,7 @@ mod tests { .expect("tls_was_enabled") .expect_err("missing_config"); - assert_eq!(err.to_string(), "tls config missing"); + assert!(matches!(err, Error::MissingTlsConfig { location: _ })); } } From 932e66e14533ea15f3acff526b852e8ecafb938b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 May 2024 15:44:08 +0100 Subject: [PATCH 0855/1003] feat: [#870] add privacy methods to the TrackerMode The tracker mode can be: - Public (Non-whitelisted) - Listed (Whitelisted) - Private (Non-whitelisted) - PrivateListed (Whitelisted) They should have been two different flags (in my opinion): - Visibility: public or private - Whitelisted: true or false So we would have the same four convinations: - Not whitelisted: - Public - Private - Whitelisted - Public - Private That's a pending refactor. For this commits, the goal is just to align this enum with what we added to the Index so we can use this enum in the Index via the primmitives crate. --- packages/primitives/src/lib.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 47a837a9b..aa3af27e3 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -85,3 +85,21 @@ pub enum TrackerMode { #[serde(rename = "private_listed")] PrivateListed, } + +impl Default for TrackerMode { + fn default() -> Self { + Self::Public + } +} + +impl TrackerMode { + #[must_use] + pub fn is_open(&self) -> bool { + matches!(self, TrackerMode::Public | TrackerMode::Listed) + } + + #[must_use] + pub fn is_close(&self) -> bool { + !self.is_open() + } +} From 74d8f7918bd6c25c8dea8d208659ec6a3199bead Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 May 2024 16:05:39 +0100 Subject: [PATCH 0856/1003] feat: [#870] remove Copy trait from TrackerMode --- packages/primitives/src/lib.rs | 2 +- src/core/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index aa3af27e3..eccd220f9 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -67,7 +67,7 @@ pub type PersistentTorrents = BTreeMap; /// /// Refer to [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration) /// to know how to configure the tracker to run in each mode. -#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] pub enum TrackerMode { /// Will track every new info hash and serve every peer. #[serde(rename = "public")] diff --git a/src/core/mod.rs b/src/core/mod.rs index 18a6028f7..2b61e3031 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -547,7 +547,7 @@ impl Tracker { ) -> Result { let database = Arc::new(databases::driver::build(&config.db_driver, &config.db_path)?); - let mode = config.mode; + let mode = config.mode.clone(); Ok(Tracker { //config, From 0c9da2f48b04e149991f5cc9a86b25ee8d2ceb43 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 May 2024 16:19:20 +0100 Subject: [PATCH 0857/1003] feat: [#870] implement traits Dispaly and FromStr for TrackerMode --- packages/primitives/src/lib.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index eccd220f9..454635e8d 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -5,6 +5,8 @@ //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. use std::collections::BTreeMap; +use std::fmt; +use std::str::FromStr; use std::time::Duration; use info_hash::InfoHash; @@ -92,6 +94,32 @@ impl Default for TrackerMode { } } +impl fmt::Display for TrackerMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let display_str = match self { + TrackerMode::Public => "public", + TrackerMode::Listed => "listed", + TrackerMode::Private => "private", + TrackerMode::PrivateListed => "private_listed", + }; + write!(f, "{display_str}") + } +} + +impl FromStr for TrackerMode { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "public" => Ok(TrackerMode::Public), + "listed" => Ok(TrackerMode::Listed), + "private" => Ok(TrackerMode::Private), + "private_listed" => Ok(TrackerMode::PrivateListed), + _ => Err(format!("Unknown tracker mode: {s}")), + } + } +} + impl TrackerMode { #[must_use] pub fn is_open(&self) -> bool { From 9e71e718afd420266cbc1e1e43dcce073e0f8e3f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 May 2024 09:23:18 +0100 Subject: [PATCH 0858/1003] chore(deps): update dependencies ```output $ cargo update Updating crates.io index Updating addr2line v0.21.0 -> v0.22.0 (latest: v0.23.0) Updating anyhow v1.0.83 -> v1.0.86 Updating async-channel v2.3.0 -> v2.3.1 Updating async-compression v0.4.10 -> v0.4.11 Updating async-executor v1.11.0 -> v1.12.0 Updating backtrace v0.3.71 -> v0.3.72 Updating blocking v1.6.0 -> v1.6.1 Updating brotli-decompressor v4.0.0 -> v4.0.1 Updating camino v1.1.6 -> v1.1.7 Updating cc v1.0.97 -> v1.0.98 Updating clang-sys v1.7.0 -> v1.8.1 Updating crc32fast v1.4.0 -> v1.4.2 Updating crossbeam-channel v0.5.12 -> v0.5.13 Updating crossbeam-utils v0.8.19 -> v0.8.20 Updating darling v0.20.8 -> v0.20.9 Updating darling_core v0.20.8 -> v0.20.9 Updating darling_macro v0.20.8 -> v0.20.9 Updating either v1.11.0 -> v1.12.0 Updating event-listener v5.3.0 -> v5.3.1 Updating figment v0.10.18 -> v0.10.19 Updating gimli v0.28.1 -> v0.29.0 (latest: v0.30.0) Updating h2 v0.4.4 -> v0.4.5 Updating hyper-util v0.1.3 -> v0.1.5 Updating instant v0.1.12 -> v0.1.13 Updating libc v0.2.154 -> v0.2.155 Updating libz-sys v1.1.16 -> v1.1.18 Updating linux-raw-sys v0.4.13 -> v0.4.14 (latest: v0.6.4) Updating miniz_oxide v0.7.2 -> v0.7.3 Updating native-tls v0.2.11 -> v0.2.12 Updating object v0.32.2 -> v0.35.0 (latest: v0.36.0) Updating parking_lot v0.12.2 -> v0.12.3 Updating plotters v0.3.5 -> v0.3.6 Updating plotters-backend v0.3.5 -> v0.3.6 Updating plotters-svg v0.3.5 -> v0.3.6 Updating proc-macro2 v1.0.82 -> v1.0.84 Updating ringbuf v0.4.0 -> v0.4.1 Updating rstest v0.19.0 -> v0.20.0 Updating rstest_macros v0.19.0 -> v0.20.0 Updating rustversion v1.0.16 -> v1.0.17 Updating serde v1.0.201 -> v1.0.203 Updating serde_derive v1.0.201 -> v1.0.203 Updating serde_spanned v0.6.5 -> v0.6.6 Removing strsim v0.10.0 Updating syn v2.0.63 -> v2.0.66 Updating thiserror v1.0.60 -> v1.0.61 Updating thiserror-impl v1.0.60 -> v1.0.61 Updating tokio v1.37.0 -> v1.38.0 Updating tokio-macros v2.2.0 -> v2.3.0 Updating toml v0.8.12 -> v0.8.13 Updating toml_datetime v0.6.5 -> v0.6.6 Updating toml_edit v0.22.12 -> v0.22.13 Updating winnow v0.6.8 -> v0.6.9 ``` --- Cargo.lock | 290 ++++++++++++++++++++++++++--------------------------- 1 file changed, 141 insertions(+), 149 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 523ea575d..4e08b1b4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -136,9 +136,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.83" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "aquatic_peer_id" @@ -201,12 +201,11 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f2776ead772134d55b62dd45e59a79e21612d85d0af729b8b7d3967d601a62a" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener 5.3.0", "event-listener-strategy 0.5.2", "futures-core", "pin-project-lite", @@ -214,9 +213,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c90a406b4495d129f00461241616194cb8a032c8d1c53c657f0961d5f8e0498" +checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" dependencies = [ "brotli", "flate2", @@ -230,9 +229,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b10202063978b3351199d68f8b22c4e47e4b1b822f8d43fd862d5ea8c006b29a" +checksum = "c8828ec6e544c02b0d6691d21ed9f9218d0384a82542855073c2a3f58304aaf0" dependencies = [ "async-task", "concurrent-queue", @@ -247,7 +246,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.3.0", + "async-channel 2.3.1", "async-executor", "async-io 2.3.2", "async-lock 3.3.0", @@ -357,7 +356,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -480,7 +479,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -508,9 +507,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" dependencies = [ "addr2line", "cc", @@ -567,7 +566,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -605,12 +604,11 @@ dependencies = [ [[package]] name = "blocking" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel 2.3.0", - "async-lock 3.3.0", + "async-channel 2.3.1", "async-task", "futures-io", "futures-lite 2.3.0", @@ -637,7 +635,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", "syn_derive", ] @@ -654,9 +652,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6221fe77a248b9117d431ad93761222e1cf8ff282d9d1d5d9f53d6299a1cf76" +checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -716,9 +714,9 @@ checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] @@ -740,9 +738,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" dependencies = [ "jobserver", "libc", @@ -812,9 +810,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -840,7 +838,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim", ] [[package]] @@ -852,7 +850,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -931,9 +929,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] @@ -991,9 +989,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ "crossbeam-utils", ] @@ -1038,9 +1036,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -1060,9 +1058,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ "darling_core", "darling_macro", @@ -1070,27 +1068,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 2.0.63", + "strsim", + "syn 2.0.66", ] [[package]] name = "darling_macro" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -1137,7 +1135,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -1158,9 +1156,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "encoding_rs" @@ -1226,9 +1224,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "5.3.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", @@ -1251,7 +1249,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.3.0", + "event-listener 5.3.1", "pin-project-lite", ] @@ -1293,9 +1291,9 @@ dependencies = [ [[package]] name = "figment" -version = "0.10.18" +version = "0.10.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d032832d74006f99547004d49410a4b4218e4c33382d56ca3ff89df74f86b953" +checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" dependencies = [ "atomic", "parking_lot", @@ -1389,7 +1387,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -1401,7 +1399,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -1413,7 +1411,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -1506,7 +1504,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -1568,9 +1566,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "glob" @@ -1592,15 +1590,15 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", "http", "indexmap 2.2.6", "slab", @@ -1770,9 +1768,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" dependencies = [ "bytes", "futures-channel", @@ -1857,9 +1855,9 @@ checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", ] @@ -2045,9 +2043,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.154" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" @@ -2072,9 +2070,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.16" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" +checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" dependencies = [ "cc", "pkg-config", @@ -2089,9 +2087,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-ip-address" @@ -2159,9 +2157,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] @@ -2201,7 +2199,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -2252,7 +2250,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", "termcolor", "thiserror", ] @@ -2306,11 +2304,10 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -2409,9 +2406,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" dependencies = [ "memchr", ] @@ -2451,7 +2448,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -2480,9 +2477,9 @@ checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -2521,7 +2518,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -2595,7 +2592,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -2629,9 +2626,9 @@ checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" dependencies = [ "num-traits", "plotters-backend", @@ -2642,15 +2639,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" dependencies = [ "plotters-backend", ] @@ -2769,9 +2766,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6" dependencies = [ "unicode-ident", ] @@ -2784,7 +2781,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", "version_check", "yansi", ] @@ -3029,9 +3026,9 @@ dependencies = [ [[package]] name = "ringbuf" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2542bc32f4c763f52a2eb375cb0b76c5aa5771f569af74299e84dca51d988a2f" +checksum = "5c65e4c865bc3d2e3294493dff0acf7e6c259d066e34e22059fa9c39645c3636" dependencies = [ "crossbeam-utils", ] @@ -3067,9 +3064,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5316d2a1479eeef1ea21e7f9ddc67c191d497abc8fc3ba2467857abbb68330" +checksum = "27059f51958c5f8496a6f79511e7c0ac396dd815dc8894e9b6e2efb5779cf6f0" dependencies = [ "futures", "futures-timer", @@ -3079,18 +3076,19 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04a9df72cc1f67020b0d63ad9bfe4a323e459ea7eb68e03bd9824db49f9a4c25" +checksum = "e6132d64df104c0b3ea7a6ad7766a43f587bd773a4a9cf4cd59296d426afaf3a" dependencies = [ "cfg-if", "glob", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "regex", "relative-path", "rustc_version", - "syn 2.0.63", + "syn 2.0.66", "unicode-ident", ] @@ -3168,7 +3166,7 @@ dependencies = [ "bitflags 2.5.0", "errno", "libc", - "linux-raw-sys 0.4.13", + "linux-raw-sys 0.4.14", "windows-sys 0.52.0", ] @@ -3212,9 +3210,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "092474d1a01ea8278f69e6a358998405fae5b8b963ddaeb2b0b04a128bf1dfb0" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "ryu" @@ -3308,9 +3306,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.201" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] @@ -3336,13 +3334,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.201" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -3388,14 +3386,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -3439,7 +3437,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -3538,12 +3536,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -3573,9 +3565,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.63" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf5be731623ca1a1fb7d8be6f261a3be6d3e2337b8a1f97be944d020c8fcb704" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ "proc-macro2", "quote", @@ -3591,7 +3583,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -3679,22 +3671,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.60" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -3755,9 +3747,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.37.0" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", @@ -3773,13 +3765,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -3817,21 +3809,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.12" +version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.12", + "toml_edit 0.22.13", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -3860,15 +3852,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.12" +version = "0.22.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" +checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.8", + "winnow 0.6.9", ] [[package]] @@ -4093,7 +4085,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] @@ -4261,7 +4253,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", "wasm-bindgen-shared", ] @@ -4295,7 +4287,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4506,9 +4498,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" +checksum = "86c949fede1d13936a99f14fafd3e76fd642b556dd2ce96287fbe2e0151bfac6" dependencies = [ "memchr", ] @@ -4556,7 +4548,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.63", + "syn 2.0.66", ] [[package]] From a3df726c9108f658274ac2d2870b3767b0398401 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 May 2024 09:50:57 +0100 Subject: [PATCH 0859/1003] fix: clippy errors --- cSpell.json | 1 + packages/clock/src/lib.rs | 6 +- packages/test-helpers/src/configuration.rs | 6 +- src/bootstrap/jobs/http_tracker.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 8 +- src/bootstrap/jobs/udp_tracker.rs | 4 +- src/console/clients/udp/checker.rs | 2 +- src/console/profiling.rs | 8 +- src/core/databases/mod.rs | 4 +- src/core/mod.rs | 8 +- src/core/torrent/mod.rs | 6 +- src/lib.rs | 6 +- src/servers/apis/mod.rs | 22 ++-- .../apis/v1/context/auth_key/handlers.rs | 2 +- src/servers/apis/v1/context/auth_key/mod.rs | 10 +- .../v1/context/torrent/resources/torrent.rs | 4 +- .../apis/v1/context/whitelist/handlers.rs | 4 +- src/servers/apis/v1/context/whitelist/mod.rs | 8 +- src/servers/apis/v1/mod.rs | 2 +- src/servers/http/mod.rs | 54 ++++----- src/servers/http/server.rs | 6 +- .../http/v1/extractors/authentication_key.rs | 6 +- src/servers/http/v1/requests/announce.rs | 8 +- src/servers/http/v1/responses/error.rs | 4 +- src/servers/http/v1/routes.rs | 2 +- src/servers/http/v1/services/announce.rs | 4 +- src/servers/http/v1/services/scrape.rs | 4 +- src/servers/udp/connection_cookie.rs | 10 +- src/servers/udp/handlers.rs | 3 +- src/servers/udp/mod.rs | 108 +++++++++--------- src/servers/udp/peer_builder.rs | 3 +- src/servers/udp/server.rs | 10 +- 32 files changed, 165 insertions(+), 170 deletions(-) diff --git a/cSpell.json b/cSpell.json index 2b5cf55bf..ef807f035 100644 --- a/cSpell.json +++ b/cSpell.json @@ -64,6 +64,7 @@ "Hydranode", "hyperthread", "Icelake", + "iiiiiiiiiiiiiiiiiiiid", "imdl", "impls", "incompletei", diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs index 9fc67cb54..295d22c16 100644 --- a/packages/clock/src/lib.rs +++ b/packages/clock/src/lib.rs @@ -17,11 +17,11 @@ //! ``` //! //! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will -//! overflow in 584.9 billion years. +//! > overflow in 584.9 billion years. //! //! > **NOTICE**: the timestamp does not depend on the time zone. That gives you -//! the ability to use the clock regardless of the underlying system time zone -//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). +//! > the ability to use the clock regardless of the underlying system time zone +//! > configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). pub mod clock; pub mod conv; diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 86ed57b9e..15ecd5280 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -13,11 +13,11 @@ use crate::random; /// > **NOTICE**: This configuration is not meant to be used in production. /// /// > **NOTICE**: Port 0 is used for ephemeral ports, which means that the OS -/// will assign a random free port for the tracker to use. +/// > will assign a random free port for the tracker to use. /// /// > **NOTICE**: You can change the log level to `debug` to see the logs of the -/// tracker while running the tests. That can be particularly useful when -/// debugging tests. +/// > tracker while running the tests. That can be particularly useful when +/// > debugging tests. /// /// # Panics /// diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index d8a976b98..9ae8995fc 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -3,7 +3,7 @@ //! The function [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) starts a new HTTP tracker server. //! //! > **NOTICE**: the application can launch more than one HTTP tracker on different ports. -//! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. +//! > Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. //! //! The [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) function spawns a new asynchronous task, //! that tasks is the "**launcher**". The "**launcher**" starts the actual server and sends a message back to the main application. diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 120c960ef..834574edb 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -4,8 +4,8 @@ //! function starts a the HTTP tracker REST API. //! //! > **NOTICE**: that even thought there is only one job the API has different -//! versions. API consumers can choose which version to use. The API version is -//! part of the URL, for example: `http://localhost:1212/api/v1/stats`. +//! > versions. API consumers can choose which version to use. The API version is +//! > part of the URL, for example: `http://localhost:1212/api/v1/stats`. //! //! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) //! function spawns a new asynchronous task, that tasks is the "**launcher**". @@ -38,8 +38,8 @@ use crate::servers::registar::ServiceRegistrationForm; /// application process to notify the API server was successfully started. /// /// > **NOTICE**: it does not mean the API server is ready to receive requests. -/// It only means the new server started. It might take some time to the server -/// to be ready to accept request. +/// > It only means the new server started. It might take some time to the server +/// > to be ready to accept request. #[derive(Debug)] pub struct ApiServerJobStarted(); diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index bb1cdb492..853cb7461 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -4,8 +4,8 @@ //! function starts a new UDP tracker server. //! //! > **NOTICE**: that the application can launch more than one UDP tracker -//! on different ports. Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) -//! for the configuration options. +//! > on different ports. Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! > for the configuration options. use std::sync::Arc; use log::debug; diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index d51492041..37928f0df 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -104,7 +104,7 @@ impl Client { /// /// - It can't connect to the remote UDP socket. /// - It can't make a connection request successfully to the remote UDP - /// server (after successfully connecting to the remote UDP socket). + /// server (after successfully connecting to the remote UDP socket). /// /// # Panics /// diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 52e11913f..d77e55966 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -12,9 +12,9 @@ //! ``` //! //! > NOTICE: valgrind executes the program you wan to profile and waits until -//! it ends. Since the tracker is a service and does not end the profiling -//! binary accepts an arguments with the duration you want to run the tracker, -//! so that it terminates automatically after that period of time. +//! > it ends. Since the tracker is a service and does not end the profiling +//! > binary accepts an arguments with the duration you want to run the tracker, +//! > so that it terminates automatically after that period of time. //! //! # Run profiling //! @@ -81,7 +81,7 @@ //! ``` //! //! > NOTICE: We are using an specific tracker configuration for profiling that -//! removes all features except the UDP tracker and sets the logging level to `error`. +//! > removes all features except the UDP tracker and sets the logging level to `error`. //! //! Build the aquatic UDP load test command: //! diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index c08aed76a..e3fb9ad60 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -8,7 +8,7 @@ //! - [`Sqlite`](crate::core::databases::sqlite::Sqlite) //! //! > **NOTICE**: There are no database migrations. If there are any changes, -//! we will implemented them or provide a script to migrate to the new schema. +//! > we will implemented them or provide a script to migrate to the new schema. //! //! The persistent objects are: //! @@ -25,7 +25,7 @@ //! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](torrust_tracker_torrent_repository::entry::Entry) for more information. //! //! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be -//! regenerated again after some minutes. +//! > regenerated again after some minutes. //! //! # Torrent whitelist //! diff --git a/src/core/mod.rs b/src/core/mod.rs index 2b61e3031..e81ad2a94 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -219,7 +219,7 @@ //! The torrent entry has two attributes: //! //! - `completed`: which is hte number of peers that have completed downloading the torrent file/s. As they have completed downloading, -//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". +//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". //! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. //! //! The [`torrent`] module not only contains the original data obtained from peer via `announce` requests, it also contains @@ -401,7 +401,7 @@ //! - `scrapes_handled`: number of `scrape` handled requests by the tracker //! //! > **NOTICE**: as the HTTP tracker does not have an specific `connection` request like the UDP tracker, `connections_handled` are -//! increased on every `announce` and `scrape` requests. +//! > increased on every `announce` and `scrape` requests. //! //! The tracker exposes an event sender API that allows the tracker users to send events. When a higher application service handles a //! `connection` , `announce` or `scrape` requests, it notifies the `Tracker` by sending statistics events. @@ -467,8 +467,8 @@ use crate::CurrentClock; /// authentication and other services. /// /// > **NOTICE**: the `Tracker` is not responsible for handling the network layer. -/// Typically, the `Tracker` is used by a higher application service that handles -/// the network layer. +/// > Typically, the `Tracker` is used by a higher application service that handles +/// > the network layer. pub struct Tracker { announce_policy: AnnouncePolicy, /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 286a7e047..38311864b 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -20,10 +20,10 @@ //! //! - The number of peers that have completed downloading the torrent since the tracker started collecting metrics. //! - The number of peers that have completed downloading the torrent and are still active, that means they are actively participating in the network, -//! by announcing themselves periodically to the tracker. Since they have completed downloading they have a full copy of the torrent data. Peers with a -//! full copy of the data are called "seeders". +//! by announcing themselves periodically to the tracker. Since they have completed downloading they have a full copy of the torrent data. Peers with a +//! full copy of the data are called "seeders". //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. -//! Peer that don not have a full copy of the torrent data are called "leechers". +//! Peer that don not have a full copy of the torrent data are called "leechers". //! use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; diff --git a/src/lib.rs b/src/lib.rs index 6fd5da15f..39d0b5b3d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -103,8 +103,8 @@ //! ``` //! //! > **NOTICE**: those are the commands for `Ubuntu`. If you are using a -//! different OS, you will need to install the equivalent packages. Please -//! refer to the documentation of your OS. +//! > different OS, you will need to install the equivalent packages. Please +//! > refer to the documentation of your OS. //! //! With the default configuration you will need to create the `storage` directory: //! @@ -231,7 +231,7 @@ //! will use `./share/default/config/tracker.development.sqlite3.toml`. //! //! > IMPORTANT: Every time you change the configuration you need to restart the -//! service. +//! > service. //! //! # Usage //! diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index ef37026fe..47d40c654 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -1,7 +1,7 @@ //! The tracker REST API with all its versions. //! //! > **NOTICE**: This API should not be exposed directly to the internet, it is -//! intended for internal use only. +//! > intended for internal use only. //! //! Endpoints for the latest API: [v1]. //! @@ -124,16 +124,16 @@ //! ``` //! //! > **NOTICE**: If you are using a reverse proxy like NGINX, you can skip this -//! step and use NGINX for the SSL instead. See -//! [other alternatives to Nginx/certbot](https://github.com/torrust/torrust-tracker/discussions/131) +//! > step and use NGINX for the SSL instead. See +//! > [other alternatives to Nginx/certbot](https://github.com/torrust/torrust-tracker/discussions/131) //! //! > **NOTICE**: You can generate a self-signed certificate for localhost using -//! OpenSSL. See [Let's Encrypt](https://letsencrypt.org/docs/certificates-for-localhost/). -//! That's particularly useful for testing purposes. Once you have the certificate -//! you need to set the [`ssl_cert_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_cert_path) -//! and [`ssl_key_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_key_path) -//! options in the configuration file with the paths to the certificate -//! (`localhost.crt`) and key (`localhost.key`) files. +//! > OpenSSL. See [Let's Encrypt](https://letsencrypt.org/docs/certificates-for-localhost/). +//! > That's particularly useful for testing purposes. Once you have the certificate +//! > you need to set the [`ssl_cert_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_cert_path) +//! > and [`ssl_key_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_key_path) +//! > options in the configuration file with the paths to the certificate +//! > (`localhost.crt`) and key (`localhost.key`) files. //! //! # Versioning //! @@ -153,8 +153,8 @@ //! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). //! //! > **NOTICE**: we are using [curl](https://curl.se/) in the API examples. -//! And you have to use quotes around the URL in order to avoid unexpected -//! errors. For example: `curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken"`. +//! > And you have to use quotes around the URL in order to avoid unexpected +//! > errors. For example: `curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken"`. pub mod routes; pub mod server; pub mod v1; diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index a6c8bf812..792d9507e 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -46,7 +46,7 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path /// /// - `POST /api/v1/key/120`. It will generate a new key valid for two minutes. /// - `DELETE /api/v1/key/xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. It will delete the -/// key `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. +/// key `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. /// /// > **NOTICE**: this may change in the future, in the [API v2](https://github.com/torrust/torrust-tracker/issues/144). #[derive(Deserialize)] diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs index 11bc8a43f..330249b58 100644 --- a/src/servers/apis/v1/context/auth_key/mod.rs +++ b/src/servers/apis/v1/context/auth_key/mod.rs @@ -51,9 +51,9 @@ //! ``` //! //! > **NOTICE**: `valid_until` and `expiry_time` represent the same time. -//! `valid_until` is the number of seconds since the Unix epoch -//! ([timestamp](https://en.wikipedia.org/wiki/Timestamp)), while `expiry_time` -//! is the human-readable time ([ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html)). +//! > `valid_until` is the number of seconds since the Unix epoch +//! > ([timestamp](https://en.wikipedia.org/wiki/Timestamp)), while `expiry_time` +//! > is the human-readable time ([ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html)). //! //! **Resource** //! @@ -96,8 +96,8 @@ //! ``` //! //! > **NOTICE**: a `500` status code will be returned and the body is not a -//! valid JSON. It's a text body containing the serialized-to-display error -//! message. +//! > valid JSON. It's a text body containing the serialized-to-display error +//! > message. //! //! # Reload authentication keys //! diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 2f1ace5c9..0d65b3eb6 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -2,8 +2,8 @@ //! //! - `Torrent` is the full torrent resource. //! - `ListItem` is a list item resource on a torrent list. `ListItem` does -//! include a `peers` field but it is always `None` in the struct and `null` in -//! the JSON response. +//! include a `peers` field but it is always `None` in the struct and `null` in +//! the JSON response. use serde::{Deserialize, Serialize}; use crate::core::services::torrent::{BasicInfo, Info}; diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index c88f8cc1d..32e434918 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -42,7 +42,7 @@ pub async fn add_torrent_to_whitelist_handler( /// /// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. /// - `500` with serialized error in debug format if the torrent couldn't be -/// removed from the whitelisted. +/// removed from the whitelisted. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#remove-a-torrent-from-the-whitelist) /// for more information about this endpoint. @@ -65,7 +65,7 @@ pub async fn remove_torrent_from_whitelist_handler( /// /// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. /// - `500` with serialized error in debug format if the torrent whitelist -/// couldn't be reloaded from the database. +/// couldn't be reloaded from the database. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#reload-the-whitelist) /// for more information about this endpoint. diff --git a/src/servers/apis/v1/context/whitelist/mod.rs b/src/servers/apis/v1/context/whitelist/mod.rs index 2bb35ef65..79da43fdc 100644 --- a/src/servers/apis/v1/context/whitelist/mod.rs +++ b/src/servers/apis/v1/context/whitelist/mod.rs @@ -11,12 +11,12 @@ //! torrents in the whitelist. The whitelist can be updated using the API. //! //! > **NOTICE**: the whitelist is only used when the tracker is configured to -//! in `listed` or `private_listed` modes. Refer to the -//! [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) -//! to know how to enable the those modes. +//! > in `listed` or `private_listed` modes. Refer to the +//! > [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) +//! > to know how to enable the those modes. //! //! > **NOTICE**: if the tracker is not running in `listed` or `private_listed` -//! modes the requests to the whitelist API will be ignored. +//! > modes the requests to the whitelist API will be ignored. //! //! # Endpoints //! diff --git a/src/servers/apis/v1/mod.rs b/src/servers/apis/v1/mod.rs index 213ee9335..372ae0ff9 100644 --- a/src/servers/apis/v1/mod.rs +++ b/src/servers/apis/v1/mod.rs @@ -12,7 +12,7 @@ //! > **NOTICE**: //! - The authentication keys are only used by the HTTP tracker. //! - The whitelist is only used when the tracker is running in `listed` or -//! `private_listed` mode. +//! `private_listed` mode. //! //! Refer to the [authentication middleware](crate::servers::apis::v1::middlewares::auth) //! for more information about the authentication process. diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 3ef85e600..e50e3c351 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -39,7 +39,7 @@ //! **Query parameters** //! //! > **NOTICE**: you can click on the parameter name to see a full description -//! after extracting and parsing the parameter from the URL query component. +//! > after extracting and parsing the parameter from the URL query component. //! //! Parameter | Type | Description | Required | Default | Example //! ---|---|---|---|---|--- @@ -58,40 +58,40 @@ //! request for more information about the parameters. //! //! > **NOTICE**: the [BEP 03](https://www.bittorrent.org/beps/bep_0003.html) -//! defines only the `ip` and `event` parameters as optional. However, the -//! tracker assigns default values to the optional parameters if they are not -//! provided. +//! > defines only the `ip` and `event` parameters as optional. However, the +//! > tracker assigns default values to the optional parameters if they are not +//! > provided. //! //! > **NOTICE**: the `peer_addr` parameter is not part of the original -//! specification. But the peer IP was added in the -//! [UDP Tracker protocol](https://www.bittorrent.org/beps/bep_0015.html). It is -//! used to provide the peer's IP address to the tracker, but it is ignored by -//! the tracker. The tracker uses the IP address of the peer that sent the -//! request or the right-most-ip in the `X-Forwarded-For` header if the tracker -//! is behind a reverse proxy. +//! > specification. But the peer IP was added in the +//! > [UDP Tracker protocol](https://www.bittorrent.org/beps/bep_0015.html). It is +//! > used to provide the peer's IP address to the tracker, but it is ignored by +//! > the tracker. The tracker uses the IP address of the peer that sent the +//! > request or the right-most-ip in the `X-Forwarded-For` header if the tracker +//! > is behind a reverse proxy. //! //! > **NOTICE**: the maximum number of peers that the tracker can return is -//! `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](torrust_tracker_configuration::TORRENT_PEERS_LIMIT). -//! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) -//! for more information about this limitation. +//! > `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](torrust_tracker_configuration::TORRENT_PEERS_LIMIT). +//! > Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) +//! > for more information about this limitation. //! //! > **NOTICE**: the `info_hash` parameter is NOT a `URL` encoded string param. -//! It is percent encode of the raw `info_hash` bytes (40 bytes). URL `GET` params -//! can contain any bytes, not only well-formed UTF-8. The `info_hash` is a -//! 20-byte SHA1. Check the [`percent_encoding`] -//! module to know more about the encoding. +//! > It is percent encode of the raw `info_hash` bytes (40 bytes). URL `GET` params +//! > can contain any bytes, not only well-formed UTF-8. The `info_hash` is a +//! > 20-byte SHA1. Check the [`percent_encoding`] +//! > module to know more about the encoding. //! //! > **NOTICE**: the `peer_id` parameter is NOT a `URL` encoded string param. -//! It is percent encode of the raw peer ID bytes (20 bytes). URL `GET` params -//! can contain any bytes, not only well-formed UTF-8. The `info_hash` is a -//! 20-byte SHA1. Check the [`percent_encoding`] -//! module to know more about the encoding. +//! > It is percent encode of the raw peer ID bytes (20 bytes). URL `GET` params +//! > can contain any bytes, not only well-formed UTF-8. The `info_hash` is a +//! > 20-byte SHA1. Check the [`percent_encoding`] +//! > module to know more about the encoding. //! //! > **NOTICE**: by default, the tracker returns the non-compact peer list when -//! no `compact` parameter is provided or is empty. The -//! [BEP 23](https://www.bittorrent.org/beps/bep_0023.html) suggests to do the -//! opposite. The tracker should return the compact peer list by default and -//! return the non-compact peer list if the `compact` parameter is `0`. +//! > no `compact` parameter is provided or is empty. The +//! > [BEP 23](https://www.bittorrent.org/beps/bep_0023.html) suggests to do the +//! > opposite. The tracker should return the compact peer list by default and +//! > return the non-compact peer list if the `compact` parameter is `0`. //! //! **Sample announce URL** //! @@ -223,7 +223,7 @@ //! [`info_hash`](crate::servers::http::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` //! //! > **NOTICE**: you can scrape multiple torrents at the same time by passing -//! multiple `info_hash` parameters. +//! > multiple `info_hash` parameters. //! //! Refer to the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) //! request for more information about the parameters. @@ -238,7 +238,7 @@ //! `info_hash` parameters: `info_hash=%81%00%0...00%00%00&info_hash=%82%00%0...00%00%00` //! //! > **NOTICE**: the maximum number of torrents you can scrape at the same time -//! is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! > is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). //! //! **Sample response** //! diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index a68f7d16c..33e20a84e 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -27,7 +27,7 @@ use crate::servers::signals::{graceful_shutdown, Halted}; /// /// - The channel to send the shutdown signal to the server is closed. /// - The task to shutdown the server on the spawned server failed to execute to -/// completion. +/// completion. #[derive(Debug)] pub enum Error { Error(String), @@ -107,8 +107,8 @@ pub type RunningHttpServer = HttpServer; /// server but always keeping the same configuration. /// /// > **NOTICE**: if the configurations changes after running the server it will -/// reset to the initial value after stopping the server. This struct is not -/// intended to persist configurations between runs. +/// > reset to the initial value after stopping the server. This struct is not +/// > intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] pub struct HttpServer { /// The state of the server: `running` or `stopped`. diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index b8d3e7d50..985e32371 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -39,9 +39,9 @@ //! ``` //! //! > **NOTICE**: the returned HTTP status code is always `200` for authentication errors. -//! Neither [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -//! nor [The Private Torrents](https://www.bittorrent.org/beps/bep_0027.html) -//! specifications specify any HTTP status code for authentication errors. +//! > Neither [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! > nor [The Private Torrents](https://www.bittorrent.org/beps/bep_0027.html) +//! > specifications specify any HTTP status code for authentication errors. use std::panic::Location; use axum::async_trait; diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 39a6c1846..83cc7ddf9 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -51,12 +51,12 @@ const COMPACT: &str = "compact"; /// ``` /// /// > **NOTICE**: The [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -/// specifies that only the peer `IP` and `event`are optional. However, the -/// tracker defines default values for some of the mandatory params. +/// > specifies that only the peer `IP` and `event`are optional. However, the +/// > tracker defines default values for some of the mandatory params. /// /// > **NOTICE**: The struct does not contain the `IP` of the peer. It's not -/// mandatory and it's not used by the tracker. The `IP` is obtained from the -/// request itself. +/// > mandatory and it's not used by the tracker. The `IP` is obtained from the +/// > request itself. #[derive(Debug, PartialEq)] pub struct Announce { // Mandatory params diff --git a/src/servers/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs index 1cc31ad4e..c406c797a 100644 --- a/src/servers/http/v1/responses/error.rs +++ b/src/servers/http/v1/responses/error.rs @@ -9,8 +9,8 @@ //! why the query failed, and no other keys are required."_ //! //! > **NOTICE**: error responses are bencoded and always have a `200 OK` status -//! code. The official `BitTorrent` specification does not specify the status -//! code. +//! > code. The official `BitTorrent` specification does not specify the status +//! > code. use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use serde::Serialize; diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index c54da51a3..14641dc1d 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -26,7 +26,7 @@ const TIMEOUT: Duration = Duration::from_secs(5); /// It adds the routes to the router. /// /// > **NOTICE**: it's added a layer to get the client IP from the connection -/// info. The tracker could use the connection info to get the client IP. +/// > info. The tracker could use the connection info to get the client IP. #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { Router::new() diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index e3bef3973..253140fbc 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -24,8 +24,8 @@ use crate::core::{statistics, AnnounceData, Tracker}; /// - The number of TCP `announce` requests handled by the HTTP tracker. /// /// > **NOTICE**: as the HTTP tracker does not requires a connection request -/// like the UDP tracker, the number of TCP connections is incremented for -/// each `announce` request. +/// > like the UDP tracker, the number of TCP connections is incremented for +/// > each `announce` request. pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer::Peer) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index a6a40186f..bf9fbd933 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -23,8 +23,8 @@ use crate::core::{statistics, ScrapeData, Tracker}; /// - The number of TCP `scrape` requests handled by the HTTP tracker. /// /// > **NOTICE**: as the HTTP tracker does not requires a connection request -/// like the UDP tracker, the number of TCP connections is incremented for -/// each `scrape` request. +/// > like the UDP tracker, the number of TCP connections is incremented for +/// > each `scrape` request. pub async fn invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { let scrape_data = tracker.scrape(info_hashes).await; diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index af3a28702..c15ad114c 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -46,10 +46,10 @@ //! Peer C connects at timestamp 180 slot 1 -> connection ID will be valid from timestamp 180 to 360 //! ``` //! > **NOTICE**: connection ID is always the same for a given peer -//! (socket address) and time slot. +//! > (socket address) and time slot. //! //! > **NOTICE**: connection ID will be valid for two time extents, **not two -//! minutes**. It'll be valid for the the current time extent and the next one. +//! > minutes**. It'll be valid for the the current time extent and the next one. //! //! Refer to [`Connect`](crate::servers::udp#connect) for more information about //! the connection process. @@ -62,10 +62,8 @@ //! //! ## Disadvantages //! -//! - It's not very flexible. The connection ID is only valid for a certain -//! amount of time. -//! - It's not very accurate. The connection ID is valid for more than two -//! minutes. +//! - It's not very flexible. The connection ID is only valid for a certain amount of time. +//! - It's not very accurate. The connection ID is valid for more than two minutes. use std::net::SocketAddr; use std::panic::Location; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index fee00a0bd..4064cf041 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -31,8 +31,7 @@ use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; /// It's responsible for: /// /// - Parsing the incoming packet. -/// - Delegating the request to the correct handler depending on the request -/// type. +/// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc, socket: Arc) -> Response { diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index fa4e8e926..3062a4393 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -5,7 +5,7 @@ //! The UDP tracker is a simple UDP server that responds to these requests: //! //! - `Connect`: used to get a connection ID which must be provided on each -//! request in order to avoid spoofing the source address of the UDP packets. +//! request in order to avoid spoofing the source address of the UDP packets. //! - `Announce`: used to announce the presence of a peer to the tracker. //! - `Scrape`: used to get information about a torrent. //! @@ -22,10 +22,10 @@ //! for more information about the UDP tracker protocol. //! //! > **NOTICE**: [BEP-41](https://www.bittorrent.org/beps/bep_0041.html) is not -//! implemented yet. +//! > implemented yet. //! //! > **NOTICE**: we are using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) -//! crate so requests and responses are handled by it. +//! > crate so requests and responses are handled by it. //! //! > **NOTICE**: all values are send in network byte order ([big endian](https://en.wikipedia.org/wiki/Endianness)). //! @@ -83,23 +83,23 @@ //! spoofing can be explained as follows: //! //! 1. No connection state: Unlike TCP, UDP is a connectionless protocol, -//! meaning that it does not establish a connection between two endpoints before -//! exchanging data. As a result, it is more susceptible to IP spoofing, where -//! an attacker sends packets with a forged source IP address, tricking the -//! receiver into believing that they are coming from a legitimate source. +//! meaning that it does not establish a connection between two endpoints before +//! exchanging data. As a result, it is more susceptible to IP spoofing, where +//! an attacker sends packets with a forged source IP address, tricking the +//! receiver into believing that they are coming from a legitimate source. //! //! 2. Mitigating IP spoofing: To mitigate IP spoofing in the UDP tracker -//! protocol, a connection ID is used. When a client wants to interact with a -//! tracker, it sends a "connect" request to the tracker, which, in turn, -//! responds with a unique connection ID. This connection ID must be included in -//! all subsequent requests from the client to the tracker. +//! protocol, a connection ID is used. When a client wants to interact with a +//! tracker, it sends a "connect" request to the tracker, which, in turn, +//! responds with a unique connection ID. This connection ID must be included in +//! all subsequent requests from the client to the tracker. //! //! 3. Validating requests: By requiring the connection ID, the tracker can -//! verify that the requests are coming from the same client that initially sent -//! the "connect" request. If an attacker attempts to spoof the client's IP -//! address, they would also need to know the valid connection ID to be accepted -//! by the tracker. This makes it significantly more challenging for an attacker -//! to spoof IP addresses and disrupt the P2P network. +//! verify that the requests are coming from the same client that initially sent +//! the "connect" request. If an attacker attempts to spoof the client's IP +//! address, they would also need to know the valid connection ID to be accepted +//! by the tracker. This makes it significantly more challenging for an attacker +//! to spoof IP addresses and disrupt the P2P network. //! //! There are different ways to generate a connection ID. The most common way is //! to generate a time bound secret. The secret is generated using a time based @@ -161,9 +161,9 @@ //! 8 | [`i32`](std::i64) | `connection_id` | Generated by the tracker to authenticate the client. | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` //! //! > **NOTICE**: the `connection_id` is used when further information is -//! exchanged with the tracker, to identify the client. This `connection_id` can -//! be reused for multiple requests, but if it's cached for too long, it will -//! not be valid anymore. +//! > exchanged with the tracker, to identify the client. This `connection_id` can +//! > be reused for multiple requests, but if it's cached for too long, it will +//! > not be valid anymore. //! //! > **NOTICE**: `Hex` column is a signed 2's complement. //! @@ -243,41 +243,41 @@ //! circumstances might include: //! //! 1. Network Address Translation (NAT): In cases where a peer is behind a NAT, -//! the private IP address of the peer is not directly routable over the -//! internet. The NAT device translates the private IP address to a public one -//! when sending packets to the tracker. The public IP address is what the -//! tracker sees as the source IP of the incoming request. However, if the peer -//! provides its private IP address in the announce request, the tracker can use -//! this information to facilitate communication between peers in the same -//! private network. +//! the private IP address of the peer is not directly routable over the +//! internet. The NAT device translates the private IP address to a public one +//! when sending packets to the tracker. The public IP address is what the +//! tracker sees as the source IP of the incoming request. However, if the peer +//! provides its private IP address in the announce request, the tracker can use +//! this information to facilitate communication between peers in the same +//! private network. //! //! 2. Proxy or VPN usage: If a peer uses a proxy or VPN service to connect to -//! the tracker, the source IP address seen by the tracker will be the one -//! assigned by the proxy or VPN server. In this case, if the peer provides its -//! actual IP address in the announce request, the tracker can use it to -//! establish a direct connection with other peers, bypassing the proxy or VPN -//! server. This might improve performance or help in cases where some peers -//! cannot connect to the proxy or VPN server. +//! the tracker, the source IP address seen by the tracker will be the one +//! assigned by the proxy or VPN server. In this case, if the peer provides its +//! actual IP address in the announce request, the tracker can use it to +//! establish a direct connection with other peers, bypassing the proxy or VPN +//! server. This might improve performance or help in cases where some peers +//! cannot connect to the proxy or VPN server. //! //! 3. Tracker is behind a NAT, firewall, proxy, VPN, or load balancer: In cases -//! where the tracker is behind a NAT, firewall, proxy, VPN, or load balancer, -//! the source IP address of the incoming request will be the public IP address -//! of the NAT, firewall, proxy, VPN, or load balancer. If the peer provides its -//! private IP address in the announce request, the tracker can use this -//! information to establish a direct connection with the peer. +//! where the tracker is behind a NAT, firewall, proxy, VPN, or load balancer, +//! the source IP address of the incoming request will be the public IP address +//! of the NAT, firewall, proxy, VPN, or load balancer. If the peer provides its +//! private IP address in the announce request, the tracker can use this +//! information to establish a direct connection with the peer. //! //! It's important to note that using the provided IP address can pose security //! risks, as malicious peers might spoof their IP addresses in the announce //! request to perform various types of attacks. //! //! > **NOTICE**: The current tracker behavior is to ignore the IP address -//! provided by the peer, and use the source IP address of the incoming request, -//! when the tracker is not running behind a proxy, and to use the right-most IP -//! address in the `X-Forwarded-For` header when the tracker is running behind a -//! proxy. +//! > provided by the peer, and use the source IP address of the incoming request, +//! > when the tracker is not running behind a proxy, and to use the right-most IP +//! > address in the `X-Forwarded-For` header when the tracker is running behind a +//! > proxy. //! //! > **NOTICE**: The tracker also changes the peer IP address to the tracker -//! external IP when the peer is using a loopback IP address. +//! > external IP when the peer is using a loopback IP address. //! //! **Sample announce request (UDP packet)** //! @@ -317,11 +317,11 @@ //! 101 | N bytes | | | | //! //! > **NOTICE**: bytes after offset 98 are part of the [BEP-41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html). -//! There are three options defined for byte 98: `0x0` (`EndOfOptions`), `0x1` (`NOP`) and `0x2` (`URLData`). +//! > There are three options defined for byte 98: `0x0` (`EndOfOptions`), `0x1` (`NOP`) and `0x2` (`URLData`). //! //! > **NOTICE**: `num_want` is being ignored by the tracker. Refer to -//! [issue 262](https://github.com/torrust/torrust-tracker/issues/262) for more -//! information. +//! > [issue 262](https://github.com/torrust/torrust-tracker/issues/262) for more +//! > information. //! //! **Announce request (parsed struct)** //! @@ -342,7 +342,7 @@ //! `port` | [`Port`](aquatic_udp_protocol::common::Port) | `17548` //! //! > **NOTICE**: the `peers_wanted` field is the `num_want` field in the UDP -//! packet. +//! > packet. //! //! We are using a wrapper struct for the aquatic [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) //! struct, because we have our internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) @@ -374,7 +374,7 @@ //! > **NOTICE**: `Hex` column is a signed 2's complement. //! //! > **NOTICE**: `IP address` should always be set to 0 when the peer is using -//! `IPv6`. +//! > `IPv6`. //! //! **Sample announce response (UDP packet)** //! @@ -413,7 +413,7 @@ //! ``` //! //! > **NOTICE**: there are 6 bytes per peer (4 bytes for the `IPv4` address and -//! 2 bytes for the TCP port). +//! > 2 bytes for the TCP port). //! //! UDP packet fields (`IPv4` peer list): //! @@ -433,7 +433,7 @@ //! ``` //! //! > **NOTICE**: there are 18 bytes per peer (16 bytes for the `IPv6` address and -//! 2 bytes for the TCP port). +//! > 2 bytes for the TCP port). //! //! UDP packet fields (`IPv6` peer list): //! @@ -446,7 +446,7 @@ //! > **NOTICE**: `Hex` column is a signed 2's complement. //! //! > **NOTICE**: the peer list does not include the peer that sent the announce -//! request. +//! > request. //! //! **Announce response (struct)** //! @@ -478,10 +478,10 @@ //! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape -//! can't be done with this protocol. This is a limitation of the UDP protocol. -//! Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). -//! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) -//! for more information about this limitation. +//! > can't be done with this protocol. This is a limitation of the UDP protocol. +//! > Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! > Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) +//! > for more information about this limitation. //! //! #### Scrape Request //! diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 104f42a73..e54a23443 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -14,8 +14,7 @@ use crate::CurrentClock; /// # Arguments /// /// * `announce_wrapper` - The announce request to extract the peer info from. -/// * `peer_ip` - The real IP address of the peer, not the one in the announce -/// request. +/// * `peer_ip` - The real IP address of the peer, not the one in the announce request. #[must_use] pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> peer::Peer { let announce_event = match aquatic_udp_protocol::AnnounceEvent::from(announce_wrapper.announce_request.event) { diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index b02b9802d..be4c36d40 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -2,8 +2,7 @@ //! //! There are two main types in this module: //! -//! - [`UdpServer`]: a controller to -//! start and stop the server. +//! - [`UdpServer`]: a controller to start and stop the server. //! - [`Udp`]: the server launcher. //! //! The `UdpServer` is an state machine for a given configuration. This struct @@ -49,8 +48,7 @@ use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; /// /// Some errors triggered while stopping the server are: /// -/// - The [`UdpServer`] cannot send the -/// shutdown signal to the spawned UDP service thread. +/// - The [`UdpServer`] cannot send the shutdown signal to the spawned UDP service thread. #[derive(Debug)] pub enum Error { /// Any kind of error starting or stopping the server. @@ -78,8 +76,8 @@ pub type RunningUdpServer = UdpServer; /// server but always keeping the same configuration. /// /// > **NOTICE**: if the configurations changes after running the server it will -/// reset to the initial value after stopping the server. This struct is not -/// intended to persist configurations between runs. +/// > reset to the initial value after stopping the server. This struct is not +/// > intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] pub struct UdpServer { /// The state of the server: `running` or `stopped`. From 4de7793f79c4731e33e139fb26183747fac080fc Mon Sep 17 00:00:00 2001 From: Mario Date: Mon, 20 May 2024 17:22:03 +0200 Subject: [PATCH 0860/1003] feat: [#670] new JSON serialization for connect and error aquatic responses --- src/console/clients/udp/app.rs | 15 ++++++++++--- src/console/clients/udp/responses.rs | 32 +++++++++++++++++++++++++++- 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index f07044d09..9621cec52 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -60,7 +60,7 @@ use std::net::{SocketAddr, ToSocketAddrs}; use std::str::FromStr; use anyhow::Context; -use aquatic_udp_protocol::Response::{self, AnnounceIpv4, AnnounceIpv6, Scrape}; +use aquatic_udp_protocol::Response::{self, AnnounceIpv4, AnnounceIpv6, Connect, Error, Scrape}; use aquatic_udp_protocol::{Port, TransactionId}; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; @@ -68,7 +68,7 @@ use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use url::Url; use crate::console::clients::udp::checker; -use crate::console::clients::udp::responses::{AnnounceResponseDto, ScrapeResponseDto}; +use crate::console::clients::udp::responses::{AnnounceResponseDto, ConnectResponseDto, ErrorResponseDto, ScrapeResponseDto}; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; @@ -171,6 +171,11 @@ async fn handle_scrape(tracker_socket_addr: &SocketAddr, info_hashes: &[TorrustI fn print_response(response: Response) -> anyhow::Result<()> { match response { + Connect(response) => { + let pretty_json = serde_json::to_string_pretty(&ConnectResponseDto::from(response)) + .context("connect response JSON serialization")?; + println!("{pretty_json}"); + } AnnounceIpv4(response) => { let pretty_json = serde_json::to_string_pretty(&AnnounceResponseDto::from(response)) .context("announce IPv4 response JSON serialization")?; @@ -186,7 +191,11 @@ fn print_response(response: Response) -> anyhow::Result<()> { serde_json::to_string_pretty(&ScrapeResponseDto::from(response)).context("scrape response JSON serialization")?; println!("{pretty_json}"); } - _ => println!("{response:#?}"), // todo: serialize to JSON all aquatic responses. + Error(response) => { + let pretty_json = + serde_json::to_string_pretty(&ErrorResponseDto::from(response)).context("error response JSON serialization")?; + println!("{pretty_json}"); + } }; Ok(()) diff --git a/src/console/clients/udp/responses.rs b/src/console/clients/udp/responses.rs index 8ea1a978b..eb6b386fd 100644 --- a/src/console/clients/udp/responses.rs +++ b/src/console/clients/udp/responses.rs @@ -1,9 +1,24 @@ //! Aquatic responses are not serializable. These are the serializable wrappers. use std::net::{Ipv4Addr, Ipv6Addr}; -use aquatic_udp_protocol::{AnnounceResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; +use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; use serde::Serialize; +#[derive(Serialize)] +pub struct ConnectResponseDto { + transaction_id: i32, + connection_id: i64, +} + +impl From for ConnectResponseDto { + fn from(connect: ConnectResponse) -> Self { + Self { + transaction_id: connect.transaction_id.0.into(), + connection_id: connect.connection_id.0.into(), + } + } +} + #[derive(Serialize)] pub struct AnnounceResponseDto { transaction_id: i32, @@ -68,6 +83,21 @@ impl From for ScrapeResponseDto { } } +#[derive(Serialize)] +pub struct ErrorResponseDto { + transaction_id: i32, + message: String, +} + +impl From for ErrorResponseDto { + fn from(error: ErrorResponse) -> Self { + Self { + transaction_id: error.transaction_id.0.into(), + message: error.message.to_string(), + } + } +} + #[derive(Serialize)] struct TorrentStats { seeders: i32, From 625db48761f7272d1467d62afc45e2a5d3f720eb Mon Sep 17 00:00:00 2001 From: Mario Date: Mon, 27 May 2024 15:54:56 +0200 Subject: [PATCH 0861/1003] refactor: [#670] new trait for printing responses in JSON format and enum for Dto wrapper --- src/console/clients/udp/responses.rs | 37 ++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/src/console/clients/udp/responses.rs b/src/console/clients/udp/responses.rs index eb6b386fd..5a39a04d4 100644 --- a/src/console/clients/udp/responses.rs +++ b/src/console/clients/udp/responses.rs @@ -1,9 +1,46 @@ //! Aquatic responses are not serializable. These are the serializable wrappers. use std::net::{Ipv4Addr, Ipv6Addr}; +use anyhow::Context; +use aquatic_udp_protocol::Response::{self}; use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; use serde::Serialize; +pub trait DtoToJson { + fn print_response(&self) -> anyhow::Result<()> + where + Self: Serialize, + { + let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; + println!("{pretty_json}"); + + Ok(()) + } +} + +#[derive(Serialize)] +pub enum ResponseDto { + Connect(ConnectResponseDto), + AnnounceIpv4(AnnounceResponseDto), + AnnounceIpv6(AnnounceResponseDto), + Scrape(ScrapeResponseDto), + Error(ErrorResponseDto), +} + +impl From for ResponseDto { + fn from(response: Response) -> Self { + match response { + Response::Connect(response) => ResponseDto::Connect(ConnectResponseDto::from(response)), + Response::AnnounceIpv4(response) => ResponseDto::AnnounceIpv4(AnnounceResponseDto::from(response)), + Response::AnnounceIpv6(response) => ResponseDto::AnnounceIpv6(AnnounceResponseDto::from(response)), + Response::Scrape(response) => ResponseDto::Scrape(ScrapeResponseDto::from(response)), + Response::Error(response) => ResponseDto::Error(ErrorResponseDto::from(response)), + } + } +} + +impl DtoToJson for ResponseDto {} + #[derive(Serialize)] pub struct ConnectResponseDto { transaction_id: i32, From 08e87ca01f7bc8b8bf1ae72e1e4c442adfa3356b Mon Sep 17 00:00:00 2001 From: Mario Date: Mon, 27 May 2024 15:59:10 +0200 Subject: [PATCH 0862/1003] refactor: [#670] new print_response function from trait implemented --- src/console/clients/udp/app.rs | 40 ++++------------------------------ 1 file changed, 4 insertions(+), 36 deletions(-) diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index 9621cec52..1675fd9ed 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -60,15 +60,14 @@ use std::net::{SocketAddr, ToSocketAddrs}; use std::str::FromStr; use anyhow::Context; -use aquatic_udp_protocol::Response::{self, AnnounceIpv4, AnnounceIpv6, Connect, Error, Scrape}; -use aquatic_udp_protocol::{Port, TransactionId}; +use aquatic_udp_protocol::{Port, Response, TransactionId}; use clap::{Parser, Subcommand}; use log::{debug, LevelFilter}; use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use url::Url; use crate::console::clients::udp::checker; -use crate::console::clients::udp::responses::{AnnounceResponseDto, ConnectResponseDto, ErrorResponseDto, ScrapeResponseDto}; +use crate::console::clients::udp::responses::{DtoToJson, ResponseDto}; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; @@ -117,7 +116,8 @@ pub async fn run() -> anyhow::Result<()> { } => handle_scrape(&tracker_socket_addr, &info_hashes).await?, }; - print_response(response) + let response_dto: ResponseDto = response.into(); + response_dto.print_response() } fn setup_logging(level: LevelFilter) { @@ -169,38 +169,6 @@ async fn handle_scrape(tracker_socket_addr: &SocketAddr, info_hashes: &[TorrustI .await } -fn print_response(response: Response) -> anyhow::Result<()> { - match response { - Connect(response) => { - let pretty_json = serde_json::to_string_pretty(&ConnectResponseDto::from(response)) - .context("connect response JSON serialization")?; - println!("{pretty_json}"); - } - AnnounceIpv4(response) => { - let pretty_json = serde_json::to_string_pretty(&AnnounceResponseDto::from(response)) - .context("announce IPv4 response JSON serialization")?; - println!("{pretty_json}"); - } - AnnounceIpv6(response) => { - let pretty_json = serde_json::to_string_pretty(&AnnounceResponseDto::from(response)) - .context("announce IPv6 response JSON serialization")?; - println!("{pretty_json}"); - } - Scrape(response) => { - let pretty_json = - serde_json::to_string_pretty(&ScrapeResponseDto::from(response)).context("scrape response JSON serialization")?; - println!("{pretty_json}"); - } - Error(response) => { - let pretty_json = - serde_json::to_string_pretty(&ErrorResponseDto::from(response)).context("error response JSON serialization")?; - println!("{pretty_json}"); - } - }; - - Ok(()) -} - fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); From 74f4cb0aaf5de8ea0e9c09833f2e98f27e80ade3 Mon Sep 17 00:00:00 2001 From: Mario Date: Mon, 27 May 2024 17:29:17 +0200 Subject: [PATCH 0863/1003] refactor: [#670] added error message for pint_response function --- src/console/clients/udp/responses.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/console/clients/udp/responses.rs b/src/console/clients/udp/responses.rs index 5a39a04d4..83e4da506 100644 --- a/src/console/clients/udp/responses.rs +++ b/src/console/clients/udp/responses.rs @@ -7,6 +7,10 @@ use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv use serde::Serialize; pub trait DtoToJson { + /// # Errors + /// + /// Will return an error if serialization fails. + /// fn print_response(&self) -> anyhow::Result<()> where Self: Serialize, From 5a529cc4c8676177820a8e3770c49573f152a479 Mon Sep 17 00:00:00 2001 From: Mario Date: Tue, 4 Jun 2024 00:43:25 +0200 Subject: [PATCH 0864/1003] refactor: [#670] new mod for responses logic and refactors to json serialization trait --- src/console/clients/udp/app.rs | 9 +++++-- .../udp/{responses.rs => responses/dto.rs} | 19 --------------- src/console/clients/udp/responses/json.rs | 24 +++++++++++++++++++ src/console/clients/udp/responses/mod.rs | 2 ++ 4 files changed, 33 insertions(+), 21 deletions(-) rename src/console/clients/udp/{responses.rs => responses/dto.rs} (90%) create mode 100644 src/console/clients/udp/responses/json.rs create mode 100644 src/console/clients/udp/responses/mod.rs diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index 1675fd9ed..b4d08b26d 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -67,7 +67,8 @@ use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use url::Url; use crate::console::clients::udp::checker; -use crate::console::clients::udp::responses::{DtoToJson, ResponseDto}; +use crate::console::clients::udp::responses::dto::ResponseDto; +use crate::console::clients::udp::responses::json::ToJson; const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; @@ -117,7 +118,11 @@ pub async fn run() -> anyhow::Result<()> { }; let response_dto: ResponseDto = response.into(); - response_dto.print_response() + let response_json = response_dto.to_json_string()?; + + print!("{response_json}"); + + Ok(()) } fn setup_logging(level: LevelFilter) { diff --git a/src/console/clients/udp/responses.rs b/src/console/clients/udp/responses/dto.rs similarity index 90% rename from src/console/clients/udp/responses.rs rename to src/console/clients/udp/responses/dto.rs index 83e4da506..989231061 100644 --- a/src/console/clients/udp/responses.rs +++ b/src/console/clients/udp/responses/dto.rs @@ -1,27 +1,10 @@ //! Aquatic responses are not serializable. These are the serializable wrappers. use std::net::{Ipv4Addr, Ipv6Addr}; -use anyhow::Context; use aquatic_udp_protocol::Response::{self}; use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; use serde::Serialize; -pub trait DtoToJson { - /// # Errors - /// - /// Will return an error if serialization fails. - /// - fn print_response(&self) -> anyhow::Result<()> - where - Self: Serialize, - { - let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; - println!("{pretty_json}"); - - Ok(()) - } -} - #[derive(Serialize)] pub enum ResponseDto { Connect(ConnectResponseDto), @@ -43,8 +26,6 @@ impl From for ResponseDto { } } -impl DtoToJson for ResponseDto {} - #[derive(Serialize)] pub struct ConnectResponseDto { transaction_id: i32, diff --git a/src/console/clients/udp/responses/json.rs b/src/console/clients/udp/responses/json.rs new file mode 100644 index 000000000..1e25acac2 --- /dev/null +++ b/src/console/clients/udp/responses/json.rs @@ -0,0 +1,24 @@ +use anyhow::Context; +use serde::Serialize; + +use super::dto::ResponseDto; + +pub trait ToJson { + /// + /// Returns a string with the JSON serialized version of the response + /// + /// # Errors + /// + /// Will return an error if serialization fails. + /// + fn to_json_string(&self) -> anyhow::Result + where + Self: Serialize, + { + let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; + + Ok(pretty_json) + } +} + +impl ToJson for ResponseDto {} diff --git a/src/console/clients/udp/responses/mod.rs b/src/console/clients/udp/responses/mod.rs new file mode 100644 index 000000000..e6d2e5e51 --- /dev/null +++ b/src/console/clients/udp/responses/mod.rs @@ -0,0 +1,2 @@ +pub mod dto; +pub mod json; From 32416ee2239942891896260e9dfc4e0bf31d9ad1 Mon Sep 17 00:00:00 2001 From: Mario Date: Tue, 4 Jun 2024 12:32:35 +0200 Subject: [PATCH 0865/1003] refactor: [#670] changed DTOs and variable names --- src/console/clients/udp/app.rs | 6 ++-- src/console/clients/udp/responses/dto.rs | 42 +++++++++++------------ src/console/clients/udp/responses/json.rs | 4 +-- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index b4d08b26d..d2c986cd9 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -67,7 +67,7 @@ use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use url::Url; use crate::console::clients::udp::checker; -use crate::console::clients::udp::responses::dto::ResponseDto; +use crate::console::clients::udp::responses::dto::SerializableResponse; use crate::console::clients::udp::responses::json::ToJson; const ASSIGNED_BY_OS: u16 = 0; @@ -117,8 +117,8 @@ pub async fn run() -> anyhow::Result<()> { } => handle_scrape(&tracker_socket_addr, &info_hashes).await?, }; - let response_dto: ResponseDto = response.into(); - let response_json = response_dto.to_json_string()?; + let response: SerializableResponse = response.into(); + let response_json = response.to_json_string()?; print!("{response_json}"); diff --git a/src/console/clients/udp/responses/dto.rs b/src/console/clients/udp/responses/dto.rs index 989231061..93320b0f7 100644 --- a/src/console/clients/udp/responses/dto.rs +++ b/src/console/clients/udp/responses/dto.rs @@ -6,33 +6,33 @@ use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv use serde::Serialize; #[derive(Serialize)] -pub enum ResponseDto { - Connect(ConnectResponseDto), - AnnounceIpv4(AnnounceResponseDto), - AnnounceIpv6(AnnounceResponseDto), - Scrape(ScrapeResponseDto), - Error(ErrorResponseDto), +pub enum SerializableResponse { + Connect(ConnectSerializableResponse), + AnnounceIpv4(AnnounceSerializableResponse), + AnnounceIpv6(AnnounceSerializableResponse), + Scrape(ScrapeSerializableResponse), + Error(ErrorSerializableResponse), } -impl From for ResponseDto { +impl From for SerializableResponse { fn from(response: Response) -> Self { match response { - Response::Connect(response) => ResponseDto::Connect(ConnectResponseDto::from(response)), - Response::AnnounceIpv4(response) => ResponseDto::AnnounceIpv4(AnnounceResponseDto::from(response)), - Response::AnnounceIpv6(response) => ResponseDto::AnnounceIpv6(AnnounceResponseDto::from(response)), - Response::Scrape(response) => ResponseDto::Scrape(ScrapeResponseDto::from(response)), - Response::Error(response) => ResponseDto::Error(ErrorResponseDto::from(response)), + Response::Connect(response) => SerializableResponse::Connect(ConnectSerializableResponse::from(response)), + Response::AnnounceIpv4(response) => SerializableResponse::AnnounceIpv4(AnnounceSerializableResponse::from(response)), + Response::AnnounceIpv6(response) => SerializableResponse::AnnounceIpv6(AnnounceSerializableResponse::from(response)), + Response::Scrape(response) => SerializableResponse::Scrape(ScrapeSerializableResponse::from(response)), + Response::Error(response) => SerializableResponse::Error(ErrorSerializableResponse::from(response)), } } } #[derive(Serialize)] -pub struct ConnectResponseDto { +pub struct ConnectSerializableResponse { transaction_id: i32, connection_id: i64, } -impl From for ConnectResponseDto { +impl From for ConnectSerializableResponse { fn from(connect: ConnectResponse) -> Self { Self { transaction_id: connect.transaction_id.0.into(), @@ -42,7 +42,7 @@ impl From for ConnectResponseDto { } #[derive(Serialize)] -pub struct AnnounceResponseDto { +pub struct AnnounceSerializableResponse { transaction_id: i32, announce_interval: i32, leechers: i32, @@ -50,7 +50,7 @@ pub struct AnnounceResponseDto { peers: Vec, } -impl From> for AnnounceResponseDto { +impl From> for AnnounceSerializableResponse { fn from(announce: AnnounceResponse) -> Self { Self { transaction_id: announce.fixed.transaction_id.0.into(), @@ -66,7 +66,7 @@ impl From> for AnnounceResponseDto { } } -impl From> for AnnounceResponseDto { +impl From> for AnnounceSerializableResponse { fn from(announce: AnnounceResponse) -> Self { Self { transaction_id: announce.fixed.transaction_id.0.into(), @@ -83,12 +83,12 @@ impl From> for AnnounceResponseDto { } #[derive(Serialize)] -pub struct ScrapeResponseDto { +pub struct ScrapeSerializableResponse { transaction_id: i32, torrent_stats: Vec, } -impl From for ScrapeResponseDto { +impl From for ScrapeSerializableResponse { fn from(scrape: ScrapeResponse) -> Self { Self { transaction_id: scrape.transaction_id.0.into(), @@ -106,12 +106,12 @@ impl From for ScrapeResponseDto { } #[derive(Serialize)] -pub struct ErrorResponseDto { +pub struct ErrorSerializableResponse { transaction_id: i32, message: String, } -impl From for ErrorResponseDto { +impl From for ErrorSerializableResponse { fn from(error: ErrorResponse) -> Self { Self { transaction_id: error.transaction_id.0.into(), diff --git a/src/console/clients/udp/responses/json.rs b/src/console/clients/udp/responses/json.rs index 1e25acac2..74558c8f5 100644 --- a/src/console/clients/udp/responses/json.rs +++ b/src/console/clients/udp/responses/json.rs @@ -1,7 +1,7 @@ use anyhow::Context; use serde::Serialize; -use super::dto::ResponseDto; +use super::dto::SerializableResponse; pub trait ToJson { /// @@ -21,4 +21,4 @@ pub trait ToJson { } } -impl ToJson for ResponseDto {} +impl ToJson for SerializableResponse {} From 0157d96cd29bc79f1396d34088f6d109d6a59d61 Mon Sep 17 00:00:00 2001 From: Mario Date: Tue, 4 Jun 2024 13:16:47 +0200 Subject: [PATCH 0866/1003] refactor:[#670] fix clippy errors --- src/console/clients/udp/responses/json.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/console/clients/udp/responses/json.rs b/src/console/clients/udp/responses/json.rs index 74558c8f5..5d2bd6b89 100644 --- a/src/console/clients/udp/responses/json.rs +++ b/src/console/clients/udp/responses/json.rs @@ -3,6 +3,7 @@ use serde::Serialize; use super::dto::SerializableResponse; +#[allow(clippy::module_name_repetitions)] pub trait ToJson { /// /// Returns a string with the JSON serialized version of the response From f5d843b29e828c712845405a927c07ad867aea3c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 07:50:42 +0100 Subject: [PATCH 0867/1003] docs: add benchmarking to torrent repo README --- packages/torrent-repository/README.md | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository/README.md index 98d7d922b..ffc71f1d7 100644 --- a/packages/torrent-repository/README.md +++ b/packages/torrent-repository/README.md @@ -1,6 +1,27 @@ -# Torrust Tracker Configuration +# Torrust Tracker Torrent Repository -A library to provide torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). +A library to provide a torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Benchmarking + +```console +cargo bench -p torrust-tracker-torrent-repository +``` + +Example partial output: + +```output + Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-a9b0013c8d09c3c3) +add_one_torrent/RwLockStd + time: [63.057 ns 63.242 ns 63.506 ns] +Found 12 outliers among 100 measurements (12.00%) + 2 (2.00%) low severe + 2 (2.00%) low mild + 2 (2.00%) high mild + 6 (6.00%) high severe +add_one_torrent/RwLockStdMutexStd + time: [62.505 ns 63.077 ns 63.817 ns] +``` ## Documentation From 6e87d3e1a37d94fd3886a7420214a6e4746c7215 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 08:02:30 +0100 Subject: [PATCH 0868/1003] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 23 packages to latest compatible versions Updating anstyle-query v1.0.3 -> v1.1.0 Updating async-io v2.3.2 -> v2.3.3 Updating async-lock v3.3.0 -> v3.4.0 Updating borsh v1.5.0 -> v1.5.1 Updating borsh-derive v1.5.0 -> v1.5.1 Updating cc v1.0.98 -> v1.0.99 Updating cfg_aliases v0.1.1 -> v0.2.1 Updating clap v4.5.4 -> v4.5.6 Updating clap_builder v4.5.2 -> v4.5.6 Updating clap_derive v4.5.4 -> v4.5.5 Updating clap_lex v0.7.0 -> v0.7.1 Removing event-listener v4.0.3 Removing event-listener-strategy v0.4.0 Updating piper v0.2.2 -> v0.2.3 Updating polling v3.7.0 -> v3.7.1 Updating proc-macro2 v1.0.84 -> v1.0.85 Updating regex v1.10.4 -> v1.10.5 Updating regex-automata v0.4.6 -> v0.4.7 Updating regex-syntax v0.8.3 -> v0.8.4 Updating rstest v0.20.0 -> v0.21.0 Updating rstest_macros v0.20.0 -> v0.21.0 Updating toml v0.8.13 -> v0.8.14 Updating toml_edit v0.22.13 -> v0.22.14 Updating utf8parse v0.2.1 -> v0.2.2 Updating winnow v0.6.9 -> v0.6.13 ``` --- Cargo.lock | 131 ++++++++++++++++++++++------------------------------- 1 file changed, 55 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e08b1b4d..d3f5766d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -117,9 +117,9 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" +checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" dependencies = [ "windows-sys 0.52.0", ] @@ -206,7 +206,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener-strategy 0.5.2", + "event-listener-strategy", "futures-core", "pin-project-lite", ] @@ -248,8 +248,8 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io 2.3.2", - "async-lock 3.3.0", + "async-io 2.3.3", + "async-lock 3.4.0", "blocking", "futures-lite 2.3.0", "once_cell", @@ -278,17 +278,17 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" +checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" dependencies = [ - "async-lock 3.3.0", + "async-lock 3.4.0", "cfg-if", "concurrent-queue", "futures-io", "futures-lite 2.3.0", "parking", - "polling 3.7.0", + "polling 3.7.1", "rustix 0.38.34", "slab", "tracing", @@ -306,12 +306,12 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 4.0.3", - "event-listener-strategy 0.4.0", + "event-listener 5.3.1", + "event-listener-strategy", "pin-project-lite", ] @@ -617,9 +617,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbe5b10e214954177fb1dc9fbd20a1a2608fe99e6c832033bdc7cea287a20d77" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ "borsh-derive", "cfg_aliases", @@ -627,9 +627,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a8646f94ab393e43e8b35a2558b1624bed28b97ee09c5d15456e3c9463f46d" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", "proc-macro-crate 3.1.0", @@ -738,9 +738,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.98" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" +checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" dependencies = [ "jobserver", "libc", @@ -764,9 +764,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" @@ -821,9 +821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.4" +version = "4.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" +checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7" dependencies = [ "clap_builder", "clap_derive", @@ -831,9 +831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.2" +version = "4.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df" dependencies = [ "anstream", "anstyle", @@ -843,9 +843,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.4" +version = "4.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" +checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -855,9 +855,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" [[package]] name = "cmake" @@ -1211,17 +1211,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - [[package]] name = "event-listener" version = "5.3.1" @@ -1233,16 +1222,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.3", - "pin-project-lite", -] - [[package]] name = "event-listener-strategy" version = "0.5.2" @@ -2609,9 +2588,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464db0c665917b13ebb5d453ccdec4add5658ee1adc7affc7677615356a8afaf" +checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391" dependencies = [ "atomic-waker", "fastrand 2.1.0", @@ -2670,9 +2649,9 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.0" +version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" +checksum = "5e6a007746f34ed64099e88783b0ae369eaa3da6392868ba262e2af9b8fbaea1" dependencies = [ "cfg-if", "concurrent-queue", @@ -2766,9 +2745,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.84" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" dependencies = [ "unicode-ident", ] @@ -2925,9 +2904,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.4" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", @@ -2937,9 +2916,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", @@ -2948,9 +2927,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "relative-path" @@ -3064,9 +3043,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27059f51958c5f8496a6f79511e7c0ac396dd815dc8894e9b6e2efb5779cf6f0" +checksum = "9afd55a67069d6e434a95161415f5beeada95a01c7b815508a82dcb0e1593682" dependencies = [ "futures", "futures-timer", @@ -3076,9 +3055,9 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6132d64df104c0b3ea7a6ad7766a43f587bd773a4a9cf4cd59296d426afaf3a" +checksum = "4165dfae59a39dd41d8dec720d3cbfbc71f69744efb480a3920f5d4e0cc6798d" dependencies = [ "cfg-if", "glob", @@ -3809,14 +3788,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.13" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba" +checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.13", + "toml_edit 0.22.14", ] [[package]] @@ -3852,15 +3831,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.13" +version = "0.22.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c" +checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.9", + "winnow 0.6.13", ] [[package]] @@ -4169,9 +4148,9 @@ dependencies = [ [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" @@ -4498,9 +4477,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.9" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86c949fede1d13936a99f14fafd3e76fd642b556dd2ce96287fbe2e0151bfac6" +checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" dependencies = [ "memchr", ] From 3ccc0e41599dff371e4f8bc49eaa4d972b2f3627 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 15:12:08 +0100 Subject: [PATCH 0869/1003] chore(deps): add cargo dependency tracing We will move from `log` to `tracing` crate. --- Cargo.lock | 82 +++++++++++++++++++++++++++++++ Cargo.toml | 1 + packages/located-error/Cargo.toml | 1 + 3 files changed, 84 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index d3f5766d5..ab7512536 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2339,6 +2339,16 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.5" @@ -2448,6 +2458,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "parking" version = "2.2.0" @@ -3441,6 +3457,15 @@ dependencies = [ "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -3668,6 +3693,16 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + [[package]] name = "time" version = "0.3.36" @@ -3899,6 +3934,7 @@ dependencies = [ "tower-http", "trace", "tracing", + "tracing-subscriber", "url", "uuid", "zerocopy", @@ -3943,6 +3979,7 @@ version = "3.0.0-alpha.12-develop" dependencies = [ "log", "thiserror", + "tracing", ] [[package]] @@ -4074,6 +4111,45 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "nu-ansi-term", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] @@ -4162,6 +4238,12 @@ dependencies = [ "rand", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "value-bag" version = "1.9.0" diff --git a/Cargo.toml b/Cargo.toml index 5183c6067..6166831e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,6 +80,7 @@ tower = { version = "0.4.13", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" tracing = "0" +tracing-subscriber = { version = "0.3.18", features = ["json"] } url = "2" uuid = { version = "1", features = ["v4"] } zerocopy = "0.7.33" diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index fa3d1d76d..f34f9bc88 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -16,6 +16,7 @@ version.workspace = true [dependencies] log = { version = "0", features = ["release_max_level_info"] } +tracing = "0.1.40" [dev-dependencies] thiserror = "1" From 6e06b2e7aaeacb5307e494e28c4f45f08a96892b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 15:30:25 +0100 Subject: [PATCH 0870/1003] refactor: [#884] move from log to tracing crate --- packages/located-error/src/lib.rs | 2 +- src/app.rs | 2 +- src/bootstrap/jobs/health_check_api.rs | 2 +- src/bootstrap/jobs/http_tracker.rs | 2 +- src/bootstrap/jobs/mod.rs | 2 +- src/bootstrap/jobs/torrent_cleanup.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 2 +- src/bootstrap/jobs/udp_tracker.rs | 2 +- src/bootstrap/logging.rs | 73 ++++++++++++------- src/console/ci/e2e/docker.rs | 2 +- src/console/ci/e2e/logs_parser.rs | 34 ++++----- src/console/ci/e2e/runner.rs | 27 ++----- src/console/ci/e2e/tracker_checker.rs | 2 +- src/console/ci/e2e/tracker_container.rs | 2 +- src/console/clients/checker/app.rs | 27 ++----- src/console/clients/checker/checks/http.rs | 2 +- src/console/clients/checker/checks/udp.rs | 2 +- src/console/clients/udp/app.rs | 27 ++----- src/console/clients/udp/checker.rs | 2 +- src/console/profiling.rs | 2 +- src/core/auth.rs | 2 +- src/core/databases/mysql.rs | 2 +- src/core/mod.rs | 2 +- src/core/statistics.rs | 2 +- src/main.rs | 2 +- src/servers/apis/server.rs | 2 +- .../apis/v1/context/torrent/handlers.rs | 2 +- src/servers/health_check_api/server.rs | 3 +- src/servers/http/server.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/registar.rs | 2 +- src/servers/signals.rs | 2 +- src/servers/udp/handlers.rs | 2 +- src/servers/udp/server.rs | 2 +- src/shared/bit_torrent/tracker/udp/client.rs | 2 +- tests/servers/health_check_api/environment.rs | 2 +- 37 files changed, 112 insertions(+), 141 deletions(-) diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs index 49e135600..bfd4d4a86 100644 --- a/packages/located-error/src/lib.rs +++ b/packages/located-error/src/lib.rs @@ -33,7 +33,7 @@ use std::error::Error; use std::panic::Location; use std::sync::Arc; -use log::debug; +use tracing::debug; pub type DynError = Arc; diff --git a/src/app.rs b/src/app.rs index fcb01a696..b41f4098e 100644 --- a/src/app.rs +++ b/src/app.rs @@ -23,9 +23,9 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; -use log::warn; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; +use tracing::warn; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::servers::registar::Registar; diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index fdedaa3e9..c22a4cf95 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -14,10 +14,10 @@ //! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) //! for the API configuration options. -use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HealthCheckApi; +use tracing::info; use super::Started; use crate::servers::health_check_api::server; diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 9ae8995fc..e9eb6bc16 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -14,9 +14,9 @@ use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use log::info; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; +use tracing::info; use super::make_rust_tls; use crate::core; diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index e20d243c6..316e5746c 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -89,10 +89,10 @@ use std::panic::Location; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use log::info; use thiserror::Error; use torrust_tracker_configuration::TslConfig; use torrust_tracker_located_error::{DynError, LocatedError}; +use tracing::info; /// Error returned by the Bootstrap Process. #[derive(Error, Debug)] diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index bd3b2e332..992e7e644 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -13,9 +13,9 @@ use std::sync::Arc; use chrono::Utc; -use log::info; use tokio::task::JoinHandle; use torrust_tracker_configuration::v1::core::Core; +use tracing::info; use crate::core; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 834574edb..3c1f13255 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -24,9 +24,9 @@ use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use log::info; use tokio::task::JoinHandle; use torrust_tracker_configuration::{AccessTokens, HttpApi}; +use tracing::info; use super::make_rust_tls; use crate::core; diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 853cb7461..2c09e6de2 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -8,9 +8,9 @@ //! > for the configuration options. use std::sync::Arc; -use log::debug; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; +use tracing::debug; use crate::core; use crate::servers::registar::ServiceRegistrationForm; diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 5c7e93811..914ede0c4 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -12,55 +12,72 @@ //! Refer to the [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) to know how to change log settings. use std::sync::Once; -use log::{info, LevelFilter}; use torrust_tracker_configuration::{Configuration, LogLevel}; +use tracing::info; +use tracing::level_filters::LevelFilter; static INIT: Once = Once::new(); /// It redirects the log info to the standard output with the log level defined in the configuration pub fn setup(cfg: &Configuration) { - let level = config_level_or_default(&cfg.core.log_level); + let tracing_level = config_level_or_default(&cfg.core.log_level); - if level == log::LevelFilter::Off { + if tracing_level == LevelFilter::OFF { return; } INIT.call_once(|| { - stdout_config(level); + tracing_stdout_init(tracing_level, &TraceStyle::Default); }); } fn config_level_or_default(log_level: &Option) -> LevelFilter { match log_level { - None => log::LevelFilter::Info, + None => LevelFilter::INFO, Some(level) => match level { - LogLevel::Off => LevelFilter::Off, - LogLevel::Error => LevelFilter::Error, - LogLevel::Warn => LevelFilter::Warn, - LogLevel::Info => LevelFilter::Info, - LogLevel::Debug => LevelFilter::Debug, - LogLevel::Trace => LevelFilter::Trace, + LogLevel::Off => LevelFilter::OFF, + LogLevel::Error => LevelFilter::ERROR, + LogLevel::Warn => LevelFilter::WARN, + LogLevel::Info => LevelFilter::INFO, + LogLevel::Debug => LevelFilter::DEBUG, + LogLevel::Trace => LevelFilter::TRACE, }, } } -fn stdout_config(level: LevelFilter) { - if let Err(_err) = fern::Dispatch::new() - .format(|out, message, record| { - out.finish(format_args!( - "{} [{}][{}] {}", - chrono::Local::now().format("%+"), - record.target(), - record.level(), - message - )); - }) - .level(level) - .chain(std::io::stdout()) - .apply() - { - panic!("Failed to initialize logging.") - } +fn tracing_stdout_init(filter: LevelFilter, style: &TraceStyle) { + let builder = tracing_subscriber::fmt().with_max_level(filter); + + let () = match style { + TraceStyle::Default => builder.init(), + TraceStyle::Pretty(display_filename) => builder.pretty().with_file(*display_filename).init(), + TraceStyle::Compact => builder.compact().init(), + TraceStyle::Json => builder.json().init(), + }; info!("logging initialized."); } + +#[derive(Debug)] +pub enum TraceStyle { + Default, + Pretty(bool), + Compact, + Json, +} + +impl std::fmt::Display for TraceStyle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let style = match self { + TraceStyle::Default => "Default Style", + TraceStyle::Pretty(path) => match path { + true => "Pretty Style with File Paths", + false => "Pretty Style without File Paths", + }, + TraceStyle::Compact => "Compact Style", + TraceStyle::Json => "Json Format", + }; + + f.write_str(style) + } +} diff --git a/src/console/ci/e2e/docker.rs b/src/console/ci/e2e/docker.rs index c024efbae..26b7f7708 100644 --- a/src/console/ci/e2e/docker.rs +++ b/src/console/ci/e2e/docker.rs @@ -4,7 +4,7 @@ use std::process::{Command, Output}; use std::thread::sleep; use std::time::{Duration, Instant}; -use log::{debug, info}; +use tracing::{debug, info}; /// Docker command wrapper. pub struct Docker {} diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 6d3349196..2d215a569 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -1,9 +1,9 @@ //! Utilities to parse Torrust Tracker logs. use serde::{Deserialize, Serialize}; -const UDP_TRACKER_PATTERN: &str = "[UDP TRACKER][INFO] Starting on: udp://"; -const HTTP_TRACKER_PATTERN: &str = "[HTTP TRACKER][INFO] Starting on: "; -const HEALTH_CHECK_PATTERN: &str = "[HEALTH CHECK API][INFO] Starting on: "; +const UDP_TRACKER_PATTERN: &str = "INFO UDP TRACKER: Starting on: udp://"; +const HTTP_TRACKER_PATTERN: &str = "INFO HTTP TRACKER: Starting on: "; +const HEALTH_CHECK_PATTERN: &str = "INFO HEALTH CHECK API: Starting on: "; #[derive(Serialize, Deserialize, Debug, Default)] pub struct RunningServices { @@ -18,17 +18,17 @@ impl RunningServices { /// For example, from this logs: /// /// ```text - /// Loading default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... - /// 2024-01-24T16:36:14.614898789+00:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. - /// 2024-01-24T16:36:14.615586025+00:00 [UDP TRACKER][INFO] Starting on: udp://0.0.0.0:6969 - /// 2024-01-24T16:36:14.615623705+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled - /// 2024-01-24T16:36:14.615694484+00:00 [HTTP TRACKER][INFO] Starting on: http://0.0.0.0:7070 - /// 2024-01-24T16:36:14.615710534+00:00 [HTTP TRACKER][INFO] Started on: http://0.0.0.0:7070 - /// 2024-01-24T16:36:14.615716574+00:00 [torrust_tracker::bootstrap::jobs][INFO] TLS not enabled - /// 2024-01-24T16:36:14.615764904+00:00 [API][INFO] Starting on http://127.0.0.1:1212 - /// 2024-01-24T16:36:14.615767264+00:00 [API][INFO] Started on http://127.0.0.1:1212 - /// 2024-01-24T16:36:14.615777574+00:00 [HEALTH CHECK API][INFO] Starting on: http://127.0.0.1:1313 - /// 2024-01-24T16:36:14.615791124+00:00 [HEALTH CHECK API][INFO] Started on: http://127.0.0.1:1313 + /// Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... + /// 2024-06-10T14:26:10.040894Z INFO torrust_tracker::bootstrap::logging: logging initialized. + /// 2024-06-10T14:26:10.041363Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + /// 2024-06-10T14:26:10.041386Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T14:26:10.041420Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + /// 2024-06-10T14:26:10.041516Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + /// 2024-06-10T14:26:10.041521Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T14:26:10.041611Z INFO API: Starting on http://127.0.0.1:1212 + /// 2024-06-10T14:26:10.041614Z INFO API: Started on http://127.0.0.1:1212 + /// 2024-06-10T14:26:10.041623Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + /// 2024-06-10T14:26:10.041657Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 /// ``` /// /// It would extract these services: @@ -86,9 +86,9 @@ mod tests { #[test] fn it_should_parse_from_logs_with_valid_logs() { let logs = "\ - [UDP TRACKER][INFO] Starting on: udp://0.0.0.0:8080\n\ - [HTTP TRACKER][INFO] Starting on: 0.0.0.0:9090\n\ - [HEALTH CHECK API][INFO] Starting on: 0.0.0.0:10010"; + INFO UDP TRACKER: Starting on: udp://0.0.0.0:8080\n\ + INFO HTTP TRACKER: Starting on: 0.0.0.0:9090\n\ + INFO HEALTH CHECK API: Starting on: 0.0.0.0:10010"; let running_services = RunningServices::parse_from_logs(logs); assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:8080"]); diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index 945a87033..c44ce464e 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -3,7 +3,8 @@ //! ```text //! cargo run --bin e2e_tests_runner share/default/config/tracker.e2e.container.sqlite3.toml //! ``` -use log::{debug, info, LevelFilter}; +use tracing::info; +use tracing::level_filters::LevelFilter; use super::tracker_container::TrackerContainer; use crate::console::ci::e2e::docker::RunOptions; @@ -32,7 +33,7 @@ pub struct Arguments { /// /// Will panic if it can't not perform any of the operations. pub fn run() { - setup_runner_logging(LevelFilter::Info); + tracing_stdout_init(LevelFilter::INFO); let args = parse_arguments(); @@ -76,25 +77,9 @@ pub fn run() { info!("Tracker container final state:\n{:#?}", tracker_container); } -fn setup_runner_logging(level: LevelFilter) { - if let Err(_err) = fern::Dispatch::new() - .format(|out, message, record| { - out.finish(format_args!( - "{} [{}][{}] {}", - chrono::Local::now().format("%+"), - record.target(), - record.level(), - message - )); - }) - .level(level) - .chain(std::io::stdout()) - .apply() - { - panic!("Failed to initialize logging.") - } - - debug!("logging initialized."); +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + info!("logging initialized."); } fn parse_arguments() -> Arguments { diff --git a/src/console/ci/e2e/tracker_checker.rs b/src/console/ci/e2e/tracker_checker.rs index edc679802..b2fd7df2e 100644 --- a/src/console/ci/e2e/tracker_checker.rs +++ b/src/console/ci/e2e/tracker_checker.rs @@ -1,7 +1,7 @@ use std::io; use std::process::Command; -use log::info; +use tracing::info; /// Runs the Tracker Checker. /// diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs index 5a4d11d02..0cb4fec7c 100644 --- a/src/console/ci/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -1,8 +1,8 @@ use std::time::Duration; -use log::{debug, error, info}; use rand::distributions::Alphanumeric; use rand::Rng; +use tracing::{debug, error, info}; use super::docker::{RunOptions, RunningContainer}; use super::logs_parser::RunningServices; diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs index 82ea800d0..84802688d 100644 --- a/src/console/clients/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -17,7 +17,8 @@ use std::sync::Arc; use anyhow::{Context, Result}; use clap::Parser; -use log::{debug, LevelFilter}; +use tracing::info; +use tracing::level_filters::LevelFilter; use super::config::Configuration; use super::console::Console; @@ -40,7 +41,7 @@ struct Args { /// /// Will return an error if the configuration was not provided. pub async fn run() -> Result> { - setup_logging(LevelFilter::Info); + tracing_stdout_init(LevelFilter::INFO); let args = Args::parse(); @@ -56,25 +57,9 @@ pub async fn run() -> Result> { Ok(service.run_checks().await) } -fn setup_logging(level: LevelFilter) { - if let Err(_err) = fern::Dispatch::new() - .format(|out, message, record| { - out.finish(format_args!( - "{} [{}][{}] {}", - chrono::Local::now().format("%+"), - record.target(), - record.level(), - message - )); - }) - .level(level) - .chain(std::io::stdout()) - .apply() - { - panic!("Failed to initialize logging.") - } - - debug!("logging initialized."); +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + info!("logging initialized."); } fn setup_config(args: Args) -> Result { diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index e526b5e57..57f8c3015 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use log::debug; use reqwest::Url as ServiceUrl; use torrust_tracker_primitives::info_hash::InfoHash; +use tracing::debug; use url::Url; use super::structs::{CheckerOutput, Status}; diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 6458190d4..072aa5ca7 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -2,8 +2,8 @@ use std::net::SocketAddr; use aquatic_udp_protocol::{Port, TransactionId}; use hex_literal::hex; -use log::debug; use torrust_tracker_primitives::info_hash::InfoHash; +use tracing::debug; use crate::console::clients::checker::checks::structs::{CheckerOutput, Status}; use crate::console::clients::checker::service::{CheckError, CheckResult}; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index d2c986cd9..c780157f4 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -62,8 +62,9 @@ use std::str::FromStr; use anyhow::Context; use aquatic_udp_protocol::{Port, Response, TransactionId}; use clap::{Parser, Subcommand}; -use log::{debug, LevelFilter}; use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; +use tracing::level_filters::LevelFilter; +use tracing::{debug, info}; use url::Url; use crate::console::clients::udp::checker; @@ -102,7 +103,7 @@ enum Command { /// /// pub async fn run() -> anyhow::Result<()> { - setup_logging(LevelFilter::Info); + tracing_stdout_init(LevelFilter::INFO); let args = Args::parse(); @@ -125,25 +126,9 @@ pub async fn run() -> anyhow::Result<()> { Ok(()) } -fn setup_logging(level: LevelFilter) { - if let Err(_err) = fern::Dispatch::new() - .format(|out, message, record| { - out.finish(format_args!( - "{} [{}][{}] {}", - chrono::Local::now().format("%+"), - record.target(), - record.level(), - message - )); - }) - .level(level) - .chain(std::io::stdout()) - .apply() - { - panic!("Failed to initialize logging.") - } - - debug!("logging initialized."); +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + info!("logging initialized."); } async fn handle_announce(tracker_socket_addr: &SocketAddr, info_hash: &TorrustInfoHash) -> anyhow::Result { diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index 37928f0df..afde63d12 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -6,9 +6,9 @@ use aquatic_udp_protocol::{ AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, }; -use log::debug; use thiserror::Error; use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; +use tracing::debug; use crate::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; diff --git a/src/console/profiling.rs b/src/console/profiling.rs index d77e55966..c95354d6f 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -159,8 +159,8 @@ use std::env; use std::time::Duration; -use log::info; use tokio::time::sleep; +use tracing::info; use crate::{app, bootstrap}; diff --git a/src/core/auth.rs b/src/core/auth.rs index b5326a373..94d455d7e 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -42,7 +42,6 @@ use std::sync::Arc; use std::time::Duration; use derive_more::Display; -use log::debug; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; @@ -51,6 +50,7 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_located_error::{DynError, LocatedError}; use torrust_tracker_primitives::DurationSinceUnixEpoch; +use tracing::debug; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; use crate::CurrentClock; diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index ca95fa0b9..ebb002d31 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -3,13 +3,13 @@ use std::str::FromStr; use std::time::Duration; use async_trait::async_trait; -use log::debug; use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{DatabaseDriver, PersistentTorrents}; +use tracing::debug; use super::{Database, Error}; use crate::core::auth::{self, Key}; diff --git a/src/core/mod.rs b/src/core/mod.rs index e81ad2a94..6af28199f 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -442,7 +442,6 @@ use std::sync::Arc; use std::time::Duration; use derive_more::Constructor; -use log::debug; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v1::core::Core; @@ -453,6 +452,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, TrackerMode}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; +use tracing::debug; use self::auth::Key; use self::error::Error; diff --git a/src/core/statistics.rs b/src/core/statistics.rs index f38662cdd..d7192f5d1 100644 --- a/src/core/statistics.rs +++ b/src/core/statistics.rs @@ -20,11 +20,11 @@ use std::sync::Arc; use async_trait::async_trait; -use log::debug; #[cfg(test)] use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; +use tracing::debug; const CHANNEL_BUFFER_SIZE: usize = 65_535; diff --git a/src/main.rs b/src/main.rs index bd07f4a58..bad1fdb1e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,5 @@ -use log::info; use torrust_tracker::{app, bootstrap}; +use tracing::info; #[tokio::main] async fn main() { diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 57d2629ae..7c5b8983b 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -30,9 +30,9 @@ use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; -use log::{debug, error, info}; use tokio::sync::oneshot::{Receiver, Sender}; use torrust_tracker_configuration::AccessTokens; +use tracing::{debug, error, info}; use super::routes::router; use crate::bootstrap::jobs::Started; diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 15f70c8b6..b2418c689 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -7,11 +7,11 @@ use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use axum_extra::extract::Query; -use log::debug; use serde::{de, Deserialize, Deserializer}; use thiserror::Error; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; +use tracing::debug; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index 05ed605f4..f03753573 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -12,14 +12,13 @@ use axum::{Json, Router}; use axum_server::Handle; use futures::Future; use hyper::Request; -use log::debug; use serde_json::json; use tokio::sync::oneshot::{Receiver, Sender}; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; -use tracing::{Level, Span}; +use tracing::{debug, Level, Span}; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 33e20a84e..5c33fc8fa 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -6,8 +6,8 @@ use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; -use log::info; use tokio::sync::oneshot::{Receiver, Sender}; +use tracing::info; use super::v1::routes::router; use crate::bootstrap::jobs::Started; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index e9198f20c..0b009f700 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -11,10 +11,10 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use log::debug; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; +use tracing::debug; use crate::core::auth::Key; use crate::core::{AnnounceData, Tracker}; diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index d6b39cc53..172607637 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use log::debug; +use tracing::debug; use crate::core::auth::Key; use crate::core::{ScrapeData, Tracker}; diff --git a/src/servers/registar.rs b/src/servers/registar.rs index 9c23573c4..6058595ba 100644 --- a/src/servers/registar.rs +++ b/src/servers/registar.rs @@ -5,9 +5,9 @@ use std::net::SocketAddr; use std::sync::Arc; use derive_more::Constructor; -use log::debug; use tokio::sync::Mutex; use tokio::task::JoinHandle; +use tracing::debug; /// A [`ServiceHeathCheckResult`] is returned by a completed health check. pub type ServiceHeathCheckResult = Result; diff --git a/src/servers/signals.rs b/src/servers/signals.rs index 42fd868e8..0a1a06312 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -2,8 +2,8 @@ use std::time::Duration; use derive_more::Display; -use log::info; use tokio::time::sleep; +use tracing::info; /// This is the message that the "launcher" spawned task receives from the main /// application process to notify the service to shutdown. diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 4064cf041..858d6606c 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -10,10 +10,10 @@ use aquatic_udp_protocol::{ ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use log::debug; use tokio::net::UdpSocket; use torrust_tracker_located_error::DynError; use torrust_tracker_primitives::info_hash::InfoHash; +use tracing::debug; use uuid::Uuid; use zerocopy::network_endian::I32; diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index be4c36d40..dd30d9d6d 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -22,13 +22,13 @@ use std::sync::Arc; use aquatic_udp_protocol::Response; use derive_more::Constructor; -use log::{debug, error, info, trace}; use ringbuf::traits::{Consumer, Observer, Producer}; use ringbuf::StaticRb; use tokio::net::UdpSocket; use tokio::sync::oneshot; use tokio::task::{AbortHandle, JoinHandle}; use tokio::{select, task}; +use tracing::{debug, error, info, trace}; use super::UdpRequest; use crate::bootstrap::jobs::Started; diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 81209efb6..45b51ad35 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -6,9 +6,9 @@ use std::time::Duration; use anyhow::{anyhow, Context, Result}; use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; -use log::debug; use tokio::net::UdpSocket; use tokio::time; +use tracing::debug; use zerocopy::network_endian::I32; use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index c200beaeb..a50ad5156 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -1,7 +1,6 @@ use std::net::SocketAddr; use std::sync::Arc; -use log::debug; use tokio::sync::oneshot::{self, Sender}; use tokio::task::JoinHandle; use torrust_tracker::bootstrap::jobs::Started; @@ -9,6 +8,7 @@ use torrust_tracker::servers::health_check_api::server; use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::signals::{self, Halted}; use torrust_tracker_configuration::HealthCheckApi; +use tracing::debug; #[derive(Debug)] pub enum Error { From 69f100ab7d38cde396546ea3b1a34f7c718bd62d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 17:01:40 +0100 Subject: [PATCH 0871/1003] refactor: [#884] move from log to tracing crate --- packages/configuration/src/lib.rs | 2 +- .../config/tracker.e2e.container.sqlite3.toml | 4 + src/bootstrap/logging.rs | 2 +- src/console/ci/e2e/docker.rs | 2 + src/console/ci/e2e/logs_parser.rs | 73 +++++++++++++------ src/console/ci/e2e/runner.rs | 7 +- src/console/ci/e2e/tracker_container.rs | 4 +- src/console/clients/checker/app.rs | 2 +- src/console/clients/udp/app.rs | 2 +- src/servers/udp/server.rs | 2 +- 10 files changed, 70 insertions(+), 30 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 62792c271..46ece96ab 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -64,7 +64,7 @@ impl Info { let env_var_config_toml_path = ENV_VAR_CONFIG_TOML_PATH.to_string(); let config_toml = if let Ok(config_toml) = env::var(env_var_config_toml) { - println!("Loading configuration from environment variable {config_toml} ..."); + println!("Loading configuration from environment variable:\n {config_toml}"); Some(config_toml) } else { None diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index e7d8fa279..767b56116 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -11,3 +11,7 @@ ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [http_api] ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +[health_check_api] +# Must be bound to wildcard IP to be accessible from outside the container +bind_address = "0.0.0.0:1313" diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 914ede0c4..5194f06ea 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -46,7 +46,7 @@ fn config_level_or_default(log_level: &Option) -> LevelFilter { } fn tracing_stdout_init(filter: LevelFilter, style: &TraceStyle) { - let builder = tracing_subscriber::fmt().with_max_level(filter); + let builder = tracing_subscriber::fmt().with_max_level(filter).with_ansi(false); let () = match style { TraceStyle::Default => builder.init(), diff --git a/src/console/ci/e2e/docker.rs b/src/console/ci/e2e/docker.rs index 26b7f7708..32a0c3e56 100644 --- a/src/console/ci/e2e/docker.rs +++ b/src/console/ci/e2e/docker.rs @@ -176,6 +176,8 @@ impl Docker { let output_str = String::from_utf8_lossy(&output.stdout); + info!("Waiting until container is healthy: {:?}", output_str); + if output_str.contains("(healthy)") { return true; } diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 2d215a569..a9277524e 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -1,9 +1,9 @@ //! Utilities to parse Torrust Tracker logs. use serde::{Deserialize, Serialize}; -const UDP_TRACKER_PATTERN: &str = "INFO UDP TRACKER: Starting on: udp://"; -const HTTP_TRACKER_PATTERN: &str = "INFO HTTP TRACKER: Starting on: "; -const HEALTH_CHECK_PATTERN: &str = "INFO HEALTH CHECK API: Starting on: "; +const UDP_TRACKER_PATTERN: &str = "UDP TRACKER: Started on: udp://"; +const HTTP_TRACKER_PATTERN: &str = "HTTP TRACKER: Started on: "; +const HEALTH_CHECK_PATTERN: &str = "HEALTH CHECK API: Started on: "; #[derive(Serialize, Deserialize, Debug, Default)] pub struct RunningServices { @@ -19,16 +19,17 @@ impl RunningServices { /// /// ```text /// Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... - /// 2024-06-10T14:26:10.040894Z INFO torrust_tracker::bootstrap::logging: logging initialized. - /// 2024-06-10T14:26:10.041363Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 - /// 2024-06-10T14:26:10.041386Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - /// 2024-06-10T14:26:10.041420Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 - /// 2024-06-10T14:26:10.041516Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 - /// 2024-06-10T14:26:10.041521Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - /// 2024-06-10T14:26:10.041611Z INFO API: Starting on http://127.0.0.1:1212 - /// 2024-06-10T14:26:10.041614Z INFO API: Started on http://127.0.0.1:1212 - /// 2024-06-10T14:26:10.041623Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 - /// 2024-06-10T14:26:10.041657Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 + /// 2024-06-10T14:59:57.973525Z INFO torrust_tracker::bootstrap::logging: logging initialized. + /// 2024-06-10T14:59:57.974306Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + /// 2024-06-10T14:59:57.974316Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + /// 2024-06-10T14:59:57.974332Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T14:59:57.974366Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + /// 2024-06-10T14:59:57.974513Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + /// 2024-06-10T14:59:57.974521Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T14:59:57.974615Z INFO API: Starting on http://127.0.0.1:1212 + /// 2024-06-10T14:59:57.974618Z INFO API: Started on http://127.0.0.1:1212 + /// 2024-06-10T14:59:57.974643Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + /// 2024-06-10T14:59:57.974760Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 /// ``` /// /// It would extract these services: @@ -46,6 +47,9 @@ impl RunningServices { /// ] /// } /// ``` + /// + /// NOTICE: Using colors in the console output could affect this method + /// due to the hidden control chars. #[must_use] pub fn parse_from_logs(logs: &str) -> Self { let mut udp_trackers: Vec = Vec::new(); @@ -85,20 +89,45 @@ mod tests { #[test] fn it_should_parse_from_logs_with_valid_logs() { - let logs = "\ - INFO UDP TRACKER: Starting on: udp://0.0.0.0:8080\n\ - INFO HTTP TRACKER: Starting on: 0.0.0.0:9090\n\ - INFO HEALTH CHECK API: Starting on: 0.0.0.0:10010"; - let running_services = RunningServices::parse_from_logs(logs); + let log = r#" + Loading configuration from environment variable db_path = "/var/lib/torrust/tracker/database/sqlite3.db" + + [[udp_trackers]] + enabled = true + + [[http_trackers]] + enabled = true + ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" + ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + + [http_api] + ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" + ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + ... + Loading configuration from file: `/etc/torrust/tracker/tracker.toml` ... + 2024-06-10T15:09:54.411031Z INFO torrust_tracker::bootstrap::logging: logging initialized. + 2024-06-10T15:09:54.415084Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + 2024-06-10T15:09:54.415091Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + 2024-06-10T15:09:54.415104Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + 2024-06-10T15:09:54.415130Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + 2024-06-10T15:09:54.415266Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + 2024-06-10T15:09:54.415275Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + 2024-06-10T15:09:54.415403Z INFO API: Starting on http://127.0.0.1:1212 + 2024-06-10T15:09:54.415411Z INFO API: Started on http://127.0.0.1:1212 + 2024-06-10T15:09:54.415430Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + 2024-06-10T15:09:54.415472Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 + "#; + + let running_services = RunningServices::parse_from_logs(log); - assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:8080"]); - assert_eq!(running_services.http_trackers, vec!["127.0.0.1:9090"]); - assert_eq!(running_services.health_checks, vec!["127.0.0.1:10010/health_check"]); + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6969"]); + assert_eq!(running_services.http_trackers, vec!["http://127.0.0.1:7070"]); + assert_eq!(running_services.health_checks, vec!["http://127.0.0.1:1313/health_check"]); } #[test] fn it_should_ignore_logs_with_no_matching_lines() { - let logs = "[Other Service][INFO] Starting on: 0.0.0.0:7070"; + let logs = "[Other Service][INFO] Started on: 0.0.0.0:7070"; let running_services = RunningServices::parse_from_logs(logs); assert!(running_services.udp_trackers.is_empty()); diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index c44ce464e..aeb28b777 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -60,6 +60,11 @@ pub fn run() { let running_services = tracker_container.running_services(); + info!( + "Running services:\n {}", + serde_json::to_string_pretty(&running_services).expect("running services to be serializable to JSON") + ); + assert_there_is_at_least_one_service_per_type(&running_services); let tracker_checker_config = @@ -78,7 +83,7 @@ pub fn run() { } fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).init(); + tracing_subscriber::fmt().with_max_level(filter).with_ansi(false).init(); info!("logging initialized."); } diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs index 0cb4fec7c..dc7036faa 100644 --- a/src/console/ci/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -2,7 +2,7 @@ use std::time::Duration; use rand::distributions::Alphanumeric; use rand::Rng; -use tracing::{debug, error, info}; +use tracing::{error, info}; use super::docker::{RunOptions, RunningContainer}; use super::logs_parser::RunningServices; @@ -72,7 +72,7 @@ impl TrackerContainer { pub fn running_services(&self) -> RunningServices { let logs = Docker::logs(&self.name).expect("Logs should be captured from running container"); - debug!("Parsing running services from logs. Logs :\n{logs}"); + info!("Parsing running services from logs. Logs :\n{logs}"); RunningServices::parse_from_logs(&logs) } diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs index 84802688d..ade1d4820 100644 --- a/src/console/clients/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -58,7 +58,7 @@ pub async fn run() -> Result> { } fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).init(); + tracing_subscriber::fmt().with_max_level(filter).with_ansi(false).init(); info!("logging initialized."); } diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index c780157f4..323fca1b6 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -127,7 +127,7 @@ pub async fn run() -> anyhow::Result<()> { } fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).init(); + tracing_subscriber::fmt().with_max_level(filter).with_ansi(false).init(); info!("logging initialized."); } diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index dd30d9d6d..b2c72258d 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -273,7 +273,7 @@ impl Udp { .send(Started { address }) .expect("the UDP Tracker service should not be dropped"); - debug!(target: "UDP TRACKER", "Started on: udp://{}", address); + info!(target: "UDP TRACKER", "Started on: udp://{}", address); let stop = running.abort_handle(); From 7de259524724bc0036e075b4ee4df1d9fefd53c9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 17:04:15 +0100 Subject: [PATCH 0872/1003] chore(deps): [#884] remove unused crate log We have moved to tracing crate. --- Cargo.lock | 2 -- Cargo.toml | 1 - packages/located-error/Cargo.toml | 1 - 3 files changed, 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab7512536..c71ee890d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3904,7 +3904,6 @@ dependencies = [ "hyper-util", "lazy_static", "local-ip-address", - "log", "mockall", "multimap", "parking_lot", @@ -3977,7 +3976,6 @@ dependencies = [ name = "torrust-tracker-located-error" version = "3.0.0-alpha.12-develop" dependencies = [ - "log", "thiserror", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index 6166831e2..94ad9a02c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,6 @@ http-body = "1.0.0" hyper = "1" hyper-util = { version = "0.1.3", features = ["http1", "http2", "tokio"] } lazy_static = "1" -log = { version = "0", features = ["release_max_level_info"] } multimap = "0" parking_lot = "0.12.1" percent-encoding = "2" diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index f34f9bc88..4b2c73178 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -15,7 +15,6 @@ rust-version.workspace = true version.workspace = true [dependencies] -log = { version = "0", features = ["release_max_level_info"] } tracing = "0.1.40" [dev-dependencies] From d6fd11a0b736a4e20abfdea02cb84c79c64f7168 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 17:23:55 +0100 Subject: [PATCH 0873/1003] test: [#884] add test for parsing array of services from app logs The tracker can run multiple UDP or HTTP trackers. We parse the services from app output but there was not test for multiple services of the same type (UDP or HTTP tracker). --- src/console/ci/e2e/logs_parser.rs | 72 +++++++++++++++++++------------ 1 file changed, 45 insertions(+), 27 deletions(-) diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index a9277524e..2a1876a11 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -19,17 +19,19 @@ impl RunningServices { /// /// ```text /// Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... - /// 2024-06-10T14:59:57.973525Z INFO torrust_tracker::bootstrap::logging: logging initialized. - /// 2024-06-10T14:59:57.974306Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 - /// 2024-06-10T14:59:57.974316Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 - /// 2024-06-10T14:59:57.974332Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - /// 2024-06-10T14:59:57.974366Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 - /// 2024-06-10T14:59:57.974513Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 - /// 2024-06-10T14:59:57.974521Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - /// 2024-06-10T14:59:57.974615Z INFO API: Starting on http://127.0.0.1:1212 - /// 2024-06-10T14:59:57.974618Z INFO API: Started on http://127.0.0.1:1212 - /// 2024-06-10T14:59:57.974643Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 - /// 2024-06-10T14:59:57.974760Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 + /// 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: logging initialized. + /// 2024-06-10T16:07:39.990205Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6868 + /// 2024-06-10T16:07:39.990215Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6868 + /// 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + /// 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + /// 2024-06-10T16:07:39.990261Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + /// 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + /// 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + /// 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 /// ``` /// /// It would extract these services: @@ -48,7 +50,7 @@ impl RunningServices { /// } /// ``` /// - /// NOTICE: Using colors in the console output could affect this method + /// NOTICE: Using colors in the console output could affect this method /// due to the hidden control chars. #[must_use] pub fn parse_from_logs(logs: &str) -> Self { @@ -89,7 +91,7 @@ mod tests { #[test] fn it_should_parse_from_logs_with_valid_logs() { - let log = r#" + let logs = r#" Loading configuration from environment variable db_path = "/var/lib/torrust/tracker/database/sqlite3.db" [[udp_trackers]] @@ -103,22 +105,22 @@ mod tests { [http_api] ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - ... - Loading configuration from file: `/etc/torrust/tracker/tracker.toml` ... - 2024-06-10T15:09:54.411031Z INFO torrust_tracker::bootstrap::logging: logging initialized. - 2024-06-10T15:09:54.415084Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 - 2024-06-10T15:09:54.415091Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 - 2024-06-10T15:09:54.415104Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - 2024-06-10T15:09:54.415130Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 - 2024-06-10T15:09:54.415266Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 - 2024-06-10T15:09:54.415275Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - 2024-06-10T15:09:54.415403Z INFO API: Starting on http://127.0.0.1:1212 - 2024-06-10T15:09:54.415411Z INFO API: Started on http://127.0.0.1:1212 - 2024-06-10T15:09:54.415430Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 - 2024-06-10T15:09:54.415472Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 + + Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... + 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: logging initialized. + 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990261Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 + 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 "#; - let running_services = RunningServices::parse_from_logs(log); + let running_services = RunningServices::parse_from_logs(logs); assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6969"]); assert_eq!(running_services.http_trackers, vec!["http://127.0.0.1:7070"]); @@ -128,6 +130,7 @@ mod tests { #[test] fn it_should_ignore_logs_with_no_matching_lines() { let logs = "[Other Service][INFO] Started on: 0.0.0.0:7070"; + let running_services = RunningServices::parse_from_logs(logs); assert!(running_services.udp_trackers.is_empty()); @@ -135,6 +138,21 @@ mod tests { assert!(running_services.health_checks.is_empty()); } + #[test] + fn it_should_parse_multiple_services() { + let logs = " + 2024-06-10T16:07:39.990205Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6868 + 2024-06-10T16:07:39.990215Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6868 + + 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + "; + + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6868", "127.0.0.1:6969"]); + } + #[test] fn it_should_replace_wildcard_ip_with_localhost() { let address = "0.0.0.0:8080"; From ec88dbfffdeac08bbc3aa69de90ffad9e2711023 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 17:24:25 +0100 Subject: [PATCH 0874/1003] chore(deps): remove unused dependencies log and fern We have moved from crate log to tracing. --- Cargo.lock | 10 ---------- Cargo.toml | 1 - 2 files changed, 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c71ee890d..36508e261 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1259,15 +1259,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" -[[package]] -name = "fern" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" -dependencies = [ - "log", -] - [[package]] name = "figment" version = "0.10.19" @@ -3894,7 +3885,6 @@ dependencies = [ "crossbeam-skiplist", "dashmap", "derive_more", - "fern", "figment", "futures", "futures-util", diff --git a/Cargo.toml b/Cargo.toml index 94ad9a02c..418bcb3ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,6 @@ clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0.1" dashmap = "5.5.3" derive_more = "0" -fern = "0" figment = "0.10.18" futures = "0" futures-util = "0.3.30" From c08de7519498ea8495fb3d200ae918a4c4076f7f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Jun 2024 18:35:10 +0100 Subject: [PATCH 0875/1003] refactor: [#659] use clap and anyhow in E2E test runner You can execute the E2E runner with: ```bash cargo run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" ``` Or: ```bash TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.e2e.container.sqlite3.toml" cargo run --bin e2e_tests_runner ``` Or: ```bash TORRUST_TRACKER_CONFIG_TOML=$(cat "./share/default/config/tracker.e2e.container.sqlite3.toml") cargo run --bin e2e_tests_runner ``` --- .github/workflows/testing.yaml | 2 +- src/bin/e2e_tests_runner.rs | 4 +- src/console/ci/e2e/runner.rs | 85 +++++++++++++++++++++++----------- 3 files changed, 61 insertions(+), 30 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 620670f97..abe6f0a60 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -152,4 +152,4 @@ jobs: - id: test name: Run E2E Tests - run: cargo run --bin e2e_tests_runner ./share/default/config/tracker.e2e.container.sqlite3.toml + run: cargo run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" diff --git a/src/bin/e2e_tests_runner.rs b/src/bin/e2e_tests_runner.rs index b21459d2e..eb91c0d86 100644 --- a/src/bin/e2e_tests_runner.rs +++ b/src/bin/e2e_tests_runner.rs @@ -1,6 +1,6 @@ //! Program to run E2E tests. use torrust_tracker::console::ci::e2e; -fn main() { - e2e::runner::run(); +fn main() -> anyhow::Result<()> { + e2e::runner::run() } diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index aeb28b777..a80b65ce2 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -1,8 +1,26 @@ //! Program to run E2E tests. //! +//! You can execute it with (passing a TOML config file path): +//! +//! ```text +//! cargo run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" +//! ``` +//! +//! Or: +//! //! ```text -//! cargo run --bin e2e_tests_runner share/default/config/tracker.e2e.container.sqlite3.toml +//! TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.e2e.container.sqlite3.toml" cargo run --bin e2e_tests_runner" //! ``` +//! +//! You can execute it with (directly passing TOML config): +//! +//! ```text +//! TORRUST_TRACKER_CONFIG_TOML=$(cat "./share/default/config/tracker.e2e.container.sqlite3.toml") cargo run --bin e2e_tests_runner +//! ``` +use std::path::PathBuf; + +use anyhow::Context; +use clap::Parser; use tracing::info; use tracing::level_filters::LevelFilter; @@ -19,25 +37,38 @@ use crate::console::ci::e2e::tracker_checker::{self}; Should we remove the image too? */ -const NUMBER_OF_ARGUMENTS: usize = 2; const CONTAINER_IMAGE: &str = "torrust-tracker:local"; const CONTAINER_NAME_PREFIX: &str = "tracker_"; -pub struct Arguments { - pub tracker_config_path: String, +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to the JSON configuration file. + #[clap(short, long, env = "TORRUST_TRACKER_CONFIG_TOML_PATH")] + config_toml_path: Option, + + /// Direct configuration content in JSON. + #[clap(env = "TORRUST_TRACKER_CONFIG_TOML", hide_env_values = true)] + config_toml: Option, } /// Script to run E2E tests. /// +/// # Errors +/// +/// Will return an error if it can't load the tracker configuration from arguments. +/// /// # Panics /// /// Will panic if it can't not perform any of the operations. -pub fn run() { +pub fn run() -> anyhow::Result<()> { tracing_stdout_init(LevelFilter::INFO); - let args = parse_arguments(); + let args = Args::parse(); - let tracker_config = load_tracker_configuration(&args.tracker_config_path); + let tracker_config = load_tracker_configuration(&args)?; + + info!("tracker config:\n{tracker_config}"); let mut tracker_container = TrackerContainer::new(CONTAINER_IMAGE, CONTAINER_NAME_PREFIX); @@ -80,36 +111,36 @@ pub fn run() { tracker_container.remove(); info!("Tracker container final state:\n{:#?}", tracker_container); + + Ok(()) } fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).with_ansi(false).init(); - info!("logging initialized."); + info!("Logging initialized."); } -fn parse_arguments() -> Arguments { - let args: Vec = std::env::args().collect(); - - if args.len() < NUMBER_OF_ARGUMENTS { - eprintln!("Usage: cargo run --bin e2e_tests_runner "); - eprintln!("For example: cargo run --bin e2e_tests_runner ./share/default/config/tracker.e2e.container.sqlite3.toml"); - std::process::exit(1); - } - - let config_path = &args[1]; - - Arguments { - tracker_config_path: config_path.to_string(), +fn load_tracker_configuration(args: &Args) -> anyhow::Result { + match (args.config_toml_path.clone(), args.config_toml.clone()) { + (Some(config_path), _) => { + info!( + "Reading tracker configuration from file: {} ...", + config_path.to_string_lossy() + ); + load_config_from_file(&config_path) + } + (_, Some(config_content)) => { + info!("Reading tracker configuration from env var ..."); + Ok(config_content) + } + _ => Err(anyhow::anyhow!("No configuration provided")), } } -fn load_tracker_configuration(tracker_config_path: &str) -> String { - info!("Reading tracker configuration from file: {} ...", tracker_config_path); - read_file(tracker_config_path) -} +fn load_config_from_file(path: &PathBuf) -> anyhow::Result { + let config = std::fs::read_to_string(path).with_context(|| format!("CSan't read config file {path:?}"))?; -fn read_file(path: &str) -> String { - std::fs::read_to_string(path).unwrap_or_else(|_| panic!("Can't read file {path}")) + Ok(config) } fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServices) { From f8a9976ec90481bedbc9eedbd4a38a42d7163bfe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Jun 2024 14:40:22 +0100 Subject: [PATCH 0876/1003] docs: [#770] update benchmarking docs We are now using criterion to benchmark the torrent repository implementations. --- docs/benchmarking.md | 96 ++++++++++++------ ...ry-implementations-benchmarking-report.png | Bin 0 -> 630867 bytes 2 files changed, 63 insertions(+), 33 deletions(-) create mode 100644 docs/media/torrent-repository-implementations-benchmarking-report.png diff --git a/docs/benchmarking.md b/docs/benchmarking.md index 2a3f1f8b0..1758e0de4 100644 --- a/docs/benchmarking.md +++ b/docs/benchmarking.md @@ -96,7 +96,7 @@ Announce responses per info hash: - p100: 361 ``` -> IMPORTANT: The performance of th Torrust UDP Tracker is drastically decreased with these log levels: `info`, `debug`, `trace`. +> IMPORTANT: The performance of the Torrust UDP Tracker is drastically decreased with these log levels: `info`, `debug`, `trace`. ```output Requests out: 40719.21/second @@ -226,46 +226,76 @@ Using a PC with: ## Repository benchmarking +### Requirements + +You need to install the `gnuplot` package. + +```console +sudo apt install gnuplot +``` + +### Run + You can run it with: ```console -cargo run --release -p torrust-torrent-repository-benchmarks -- --threads 4 --sleep 0 --compare true +cargo bench -p torrust-tracker-torrent-repository ``` -It tests the different implementation for the internal torrent storage. +It tests the different implementations for the internal torrent storage. The output should be something like this: ```output -tokio::sync::RwLock> -add_one_torrent: Avg/AdjAvg: (60ns, 59ns) -update_one_torrent_in_parallel: Avg/AdjAvg: (10.909457ms, 0ns) -add_multiple_torrents_in_parallel: Avg/AdjAvg: (13.88879ms, 0ns) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (7.772484ms, 7.782535ms) - -std::sync::RwLock> -add_one_torrent: Avg/AdjAvg: (43ns, 39ns) -update_one_torrent_in_parallel: Avg/AdjAvg: (4.020937ms, 4.020937ms) -add_multiple_torrents_in_parallel: Avg/AdjAvg: (5.896177ms, 5.768448ms) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (3.883823ms, 3.883823ms) - -std::sync::RwLock>>> -add_one_torrent: Avg/AdjAvg: (51ns, 49ns) -update_one_torrent_in_parallel: Avg/AdjAvg: (3.252314ms, 3.149109ms) -add_multiple_torrents_in_parallel: Avg/AdjAvg: (8.411094ms, 8.411094ms) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.106086ms, 4.106086ms) - -tokio::sync::RwLock>>> -add_one_torrent: Avg/AdjAvg: (91ns, 90ns) -update_one_torrent_in_parallel: Avg/AdjAvg: (3.542378ms, 3.435695ms) -add_multiple_torrents_in_parallel: Avg/AdjAvg: (15.651172ms, 15.651172ms) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.368189ms, 4.257572ms) - -tokio::sync::RwLock>>> -add_one_torrent: Avg/AdjAvg: (111ns, 109ns) -update_one_torrent_in_parallel: Avg/AdjAvg: (6.590677ms, 6.808535ms) -add_multiple_torrents_in_parallel: Avg/AdjAvg: (16.572217ms, 16.30488ms) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.073221ms, 4.000122ms) + Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-2f7830898bbdfba4) +add_one_torrent/RwLockStd + time: [60.936 ns 61.383 ns 61.764 ns] +Found 24 outliers among 100 measurements (24.00%) + 15 (15.00%) high mild + 9 (9.00%) high severe +add_one_torrent/RwLockStdMutexStd + time: [60.829 ns 60.937 ns 61.053 ns] +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe +add_one_torrent/RwLockStdMutexTokio + time: [96.034 ns 96.243 ns 96.545 ns] +Found 6 outliers among 100 measurements (6.00%) + 4 (4.00%) high mild + 2 (2.00%) high severe +add_one_torrent/RwLockTokio + time: [108.25 ns 108.66 ns 109.06 ns] +Found 2 outliers among 100 measurements (2.00%) + 2 (2.00%) low mild +add_one_torrent/RwLockTokioMutexStd + time: [109.03 ns 109.11 ns 109.19 ns] +Found 4 outliers among 100 measurements (4.00%) + 1 (1.00%) low mild + 1 (1.00%) high mild + 2 (2.00%) high severe +Benchmarking add_one_torrent/RwLockTokioMutexTokio: Collecting 100 samples in estimated 1.0003 s (7.1M iterationsadd_one_torrent/RwLockTokioMutexTokio + time: [139.64 ns 140.11 ns 140.62 ns] +``` + +After running it you should have a new directory containing the criterion reports: + +```console +target/criterion/ +├── add_multiple_torrents_in_parallel +├── add_one_torrent +├── report +├── update_multiple_torrents_in_parallel +└── update_one_torrent_in_parallel ``` +You can see one report for each of the operations we are considering for benchmarking: + +- Add multiple torrents in parallel. +- Add one torrent. +- Update multiple torrents in parallel. +- Update one torrent in parallel. + +Each report look like the following: + +![Torrent repository implementations benchmarking report](./media/torrent-repository-implementations-benchmarking-report.png) + ## Other considerations -We are testing new repository implementations that allow concurrent writes. See . +If you are interested in knowing more about the tracker performance or contribute to improve its performance you ca join the [performance optimizations discussion](https://github.com/torrust/torrust-tracker/discussions/774). diff --git a/docs/media/torrent-repository-implementations-benchmarking-report.png b/docs/media/torrent-repository-implementations-benchmarking-report.png new file mode 100644 index 0000000000000000000000000000000000000000..ee87c6d42292b707338a1917782b39da1357dba9 GIT binary patch literal 630867 zcmdpdS3px;(=NWe3W5|xK)Q6LD!m01P^2T$Ta?~GAe2yIp$I6wNS7|X1VRlUh!A>* z(0fhjgc3+j@H^klxjDE0Is0apWUoDIX4cF+Gqb{Vv{Wd_7|Do;h$vKFDeDpu-P9u@ zqNybPhwwxieoR9|^pHqZ`MI8NI%*DLZ)%u{$H%jP8Ztp)I}FLOwsYnSF{qb3AG=2b ziZqCsLhdO4B92vMr+b{IMqW1iv1m@wNgLVE$I$(V;T4+Bq+~7aAqnHE;wOE13WFrX z!$j>*_w6gIxptT&Y`G;)FJJZ7oqoly2fFXskGquw)|G{y1h5s(V zr}y}{hCm$Q_;X@cEA*dBJ^CfKzgM3*FNv=&-ns`v{yw-9wNG_@@!{89+>PstCsOSS z*Jq+zDrY3u=e9*s{PmgW0rUU7K}5$JKlH1$0=zKAGI*e7^;P}kyYd&$|I(}XY!L4V zV2li)-Ot+H!TvmM{n)KsM&6-jF8j{Uwf7$oOY*UaAxtg}9z?`6F{aaXL=Lfl8O=xa zWoQ9*e)5aXmA5FD^PCWvBeHo89S?&< zz}!y))Js6m`D>6nB$e;RoBokpnfFJm&Q+~`TWTA(4l&PIW{Ic^%wldz(~FPI9^~Af zw#Cs-66DeI@KS*#-gzE5F=DhMvM;7lT|JYHqSIn$M(&%0ZkWkj#uje$+P`%5%-TI= zGnhcdtLs+;{Yb4bB?j!JQ_Rs%e4ZoTfJ-z?+|0hCI{(fSRVh6kGL=CY7_VXY>Ld>7 zC~1l&3I47dN6)#f_icwDX!5(bkB+YuKt7ieIppjkOVEl2!EpOin91b7&eU&WTR6=> zk|>hiZJHWqe{7W{ozFP=FF|XpXOF1#!ZW|Th)mAY;?8+0q;vCdw=0L6Gw)}WeVmXP zfmMzim&E!Uk7r6d@uH6n_^(o*S;Xfgm~9BVxxKEZrXV8vkxGhB3z0Z>>`2;}>+p(l zxwGAc-N~Gp1_f3*IPX!O&CsfnvOgY(|CpIPQ3UH(j`n(Nx*ha1rX0k63!08Gw)&QV z({6^nWDkz<@xL|T<vyH1WNgOxbu^ z|H+Q-lx?m##lO74Qx-cicWhJ8(1`@jtAmafFOzOf5yMV`+UPMYK|Ehs+)*wJe)!Dw z&V|p%HuRd7KuZ6*BONp)UkOO*B>jBf)Rf;bOWZcDU$eHHz|2Ktn+@LSSuy5uf|-Cj z`>(g}nL=65IgH8*jTGMO-XflKm~@K5+`d*MQoOeIy@G9w+4kf=zp(ll#0DHJcLMqa zx1AnhUVI>VSPGctC6LkMXDfsqbl5Jl7bq&=5jXE%=gIG!w{^2XkiL=DgtKUK#TV5S ztZ@}t#I4;@j~{o`&%0O)ftaf+=ydHCZF%wKZ-KRvxr|+_VY?1caYJ0Nnar8rBluyE z+|_dquxMubkXWFfs0qm%M~ z(KT-MWn}f-#6{dmQIeLH`urDVw-O5UT)B(WQ+PSI+Y1^|1t}iYB*BRB2}~RaR0nhN zu^LXZLU@_bSNoZktb8yp3LqJZm?_NE`jZph)7eFZsap^yH&r69O}FMz-^5B8XTVl?*zP89(0i zDVlyjKvo{L6VTw?5O=<&wUBs#8d>lY|D!dtF<8?gnvjth6c&ov-40plsrdk za;lwSS92=po=@U}X!mp1KA?QAn_zvSg~zc%y6kJ7O|$xkTulnbIR4)XkL1#|Jt+Fk zwTgT<&FXgYd(dsxm;l+M7d_$*h;?kxZr?pP%+F42+x%D9!J?ZCTGPgs!_CuIA#xB% z)>{E-F{zD5`C1S8%)Tp2|C$$wI|$kHkBbNzL6;_IZ_{F*0ynW9+>o88gQf#KX~MA; zFIfbUjcWt^{8>SnD#O2g($`uuoIyD76P*40X@vJ+v|ZfF<=OGE7iNBw@I4lC?HB}X zKzsS9b^CC}?$=7yqCc$~t>UZdk$DB!F`0Q4DyF3GrJsP9TXi2jIm{etcZ@hbj@P>^ zA#3F&X;fiN%Gh8u!d~?jW5`R~vXH~`#1k)GvL?c$n}oj#!p4*PrY|S#I}xlT<{=Fz zR7NX*mR89mpsS2$wWr0XEEUl z*}i?FJUMMy(jlV}6B6%fM4+Tt%(-;(ThO5Uq6OAL$Th#g4TY7{tb_BQ(V45HLg<{U zi$H)$$Uac&r9+KO6gY6>)@P;cA;ZsCY+lo3du4SX6*Y2((>>GTNvn5f5D`{63nRI> zLp8foRS4d%b&ojON(Hyy!!@(OWG<&vK9XBCy!3p*KLgWFY7y7(**Mc+HOyMMKJeB< zz^iez#kVdV^*w8ft?&;=_Ak%5sAUvgFn(*RucOLdO3Jrj#`ud-2^pRnQv8F<%!V!M zMa92ZZ7@rJ=ZmKbmv(Os_J6c`y-(>|_o!+O&SPdUTip+6SZWPL=ZQVgyNDz2Xk|QX zF%#$m4~+(nEZ_+grdaF^$8B;y9uOs$*l>0fxW_0rI(M$SM04NGng^6{X+ZjvDak-4 zFuacfSS2noo$hvgF>pfQk6-lt_~M#=@$Y>DMZ!-R*s5A=mAib$Osi zP-$|97ido~7%U^cJO_|fDnibmJ7wVm{==Axoy5yJGm(ZeEeX*ePqE{a*3+M5`=gal zF179cPQ97pF3v8|dqn6C{-*Xzxe(%9za@zG zr`K*zDfk9}*w0Iw-%}B`g@gkd{b$AS0#2e96-&qblAiwpHe(?5<*6*V_cX-zvkIAt zR%8goA1y{<#o+dw<}Dysv%O<$bju>RY!pJI?6IbtwvN1e5;Rx z<82Ee?|}A>V?J$WKauYn1HcTj4*UYlU8eaPU#&eyHPq8+u600S$!8Uk+D8&F+yF>Q zc~t>eaPJ`*`%>qNfzCSzb|TLW>h{d=i#qmPaw3Q0Kk$>Dvak9xZ0f?+Y6lN~VUUQ` zz?P5$uS@=D%VLu=;re0ovmP4);d=Ow%Yz?gojsWkyzqY(@GqWt*;uctJvvj}n`I+I z1T`{F@~@W7;ns>UUW*iHXobDm@A&>UvJ7jYbLturMI4HDdcKz*- zeYfgF2O?3R$4}_@JDTeQONab!(kE^S-qo}FS!ApOB>spArV{s;EAMN3e3n*9=OlYH z4DKZb$Iy0FS>T84FD-av0RTV-qjNHU7~Vz$Z{2t5v2-WIE^k(mzd^Xb?3v(g{NX`t z^@0e^PAz>?>e|4P2+-3QePmk!vb9JIsxIgX5LRUpx-^{j$ofhWO$X7=OAgT2z&q7K zQi2HXv1jH;gC{vUY2ioLAT85IpE)lr*#Q2DoXr{J0&R2tcWu~t*Fd00WAhV8h=$gj*)1Cjnyl33b-JA;j#c%^goQ z(cIquT$=Gg0Dr5N=hl)aaR~#z_LpzdT6`lFSZw(~l({h5?v`JJv+UiR`FWoaK-#~@ zt8Iu6jZ=kc1D*%NS{N}D~H+F&q^@QT*a6Tx)k<0wiVaz-wTmB_o<|I;RiYe?)hbt*-zPX4mrI!I)_ZKknWSmWJdY&%s?JB4OpK5RI4iDtzZ;D7s}FEZVQ1Myb-P(!d}|xZNbFYyHx%N{2D8y6X%^%_ zvcXK<*wQdczO{wXF-NC?))uyKIV0l`(X8DmLzJ!-pr>@1Gnjfhw+|&#)~uPb@PMJU z>a9i(MG4}auo-)zdZ8musp<@8^HThj+-4eTy~jd^FU8AOXt4m-SC8h5Ywf7WgIBJqso6S!o+4kw?ZWj%Al(2ZOorh9FvyVE_zf$f*9Mj zg-Hldgv>|=3-MvLyOZLUDo=RF%N&%Pyk*D(!yBwo7mnZ!31ive+}NG-QN_4g&|r?? z!Yh}kcJp?y$Lh3Nay0Tl%WWenzmQ)P&iZY(dxIO{Saw|bEIy~%$mp}<;XE06`nhjI z_SsbLI3}cLQ2ktJyt|Dz+$8f~Gk%_iY_Hi%iXrvpDV$BGK~;pobmru+jH@sK!iS`R1^RZ}V6Aysg3(52hbV>ZMm|@m*>{5{4z(*bhe} zF4}>T5&gD^)`^vH{aKD`S$V&v@*wSf&x!B}DI0|~PZzF!o*iMrms@kJf9Hg?e z9UtZWN>3U#&|frhrtDEKOS%-F4_}a>f-9Kq?5s|s%=Sj9^zyzz;>RRa-QRpUiV8-GzLxKtKg$Pne#7~t%Dg_i z1GtFo_h@VnmKHQ8^^nf^T-5*K^?lVom`0%Dc(4%%F}XF=MW$ zY3Cq)&D*b+z{|B59ewghCyt@)U_Nu*?BstRm`}bqXMbAkRxx>``(sbmoRr;Tt9wtz zq@_kCx_(YY#vGf#IBqRRdn%k?+yH!}qW-d6g~lh&QLWeB7?7NWuokIx-8hq&!~8?^ z;kDe#5-^S_znwNQ)u8^Pfx0%Gzr!lM0Qr2q$Ta%H-qjo8$uY-}pJr&GA37NyX(p6Kri}cgZXl3<6vNg36P@+HS(P%e!uboKZz-=4zpe3)e z)#Weg<(dLc4HI#QX{-(8axr{~W-HSfM{7wenj{=P-IAqOZ%y78H=Q$U5fSx$5(q!TZ9W67wz!w_S!>wmeXP z_n6dTa(1*kCP?BQJh>-9E~jMm-ux9av%7gAmoyP#UjxcyX4d>_$C}(%v04?WU&l~K zp#IKM3Kb-OE9Y`_J+pkn(_;#|{>*Y+uU$fW^Un|H`*5F9-L2Hs!&kYfqh^LD4`7<( zT6rUmp1lmmrCfVyzb^oVuC5trFXMBuinIPAA^{FkXZnvkn)1}83b}>6rSBnzYJ)b& z_5`25&cEtyMAO?AeXN5koq`CM-gyqFa#bx(J65x0V#FXo0cWapXcnVuZ|Q&r_)QuSDk;xI1x*R{fi>j6C& zS~G`}dv0IosU}+nb~J+StDSHwJRld|o1e zM_81pVUtg$h40TYFxE?$h6hv@R=^ODcsGItaCRK@J)6|++%e%^>uiT#RnHp9NvY-= z1iI&_SREk6EOk--C6X6LpYx@JK4yZAMpe$T9d(N|4p`54dV9KZrmWG*)kXp#G|chp zVZnL`9hSIGXX85ZG>e$XqY>S$erb=E=KkUHUyJr00>@7NOU=3$#R~g+gFpR~^ru@( zXM)}t$j}MOEsi!$T3|~yD5~H@M3oGHnI#3uq~>I*H$0t z;upQ*C-sd8%OcCus8TWdHyL2w0j2z}uMGF-O>(T~Y)t*|bGA;rvZdTJ*~LP%_s0A? zt40ZKZHL|iW*m8{zJoZM1EsyEeHF1YyWh5S8J(T^b$)iU-}-ZLJ;b=~N--#iN}bih zDrow^Y4Stt^(v4^{6>q5d}piwIi+ms%Bb00wWOiHs_Be=N4+ehMfl3rdYl-zrLBf5 zOudSk0KgQ-hOJ_|Mg8kt7iC|0hFjhdeH;~7vh`%v8l5-1G?jtZABsnCH~tt`=70!( zM$E$M`mDx_U%1wyyITjiIVajRQ9WW+bN-0ydHH*w!!AszB`!ioFAfnXb z#4ifV5oz5{A0heW)d@vnq0$Qi+4@t$0>xfGqxHbVvnVL?ny+`PI71+aocraG>0j>v zD?Hpn=vdDLBxIw(VW((mByda?ph6BP0k3Kd4mnZ|_o;cdWQiAsR#cQja#!!#HFy>4 z){Fc2k{NE30Xz5!zn zuOtTuR=-WuhA_?eS{Lnum`D3C|HY>BE@H3@16D=^bL_yK%oQ@h{vTYMhM4tAUrzCXuF|YLjfr|FAUzHfSOP+2cDc zADf*sQ&^OWRiQ3^R`}MoTjy;oX17#~M`X$6Gpx;i7_)T~WPZt`fbj}U`QDmYeN(h6$h@q{Mb~~b>inMXq`$Lm z=8EmhSIW8-XEW)lMG?e0;-N^&UWQ?ex0}|*CW}uAmj*lg+%G>JuE;nHlJ}|AE{fIm zmzg)t%BpUrfD#PCzJ_t#Qf0D|cgvE25LF-RC%Ff^)nQneahb5h7yw~gJOoWiNY&uVsf+hq9-f_E^^W%M8fxqN@G?F&6Rzf-m0Me+rC~GX zAAaVdPh+dnq=}O;Xjte#$-Rv)@Au-^s)~<-?EzR|w_^KkM~WR~R`GeBFjKoU$ur~j z1CzG}Z=c?kUYnMw>{yy|g0@TzJ-mJOo<}rAm+hycNJ_fbzq;82Cuo+kdq;OGcC4*2 z;nr^uZ98SZsLEh2NsSxT$SxBwM}9%AG`)@-%g&9k6Kl%?8VhVeiHqA9+S!2`*_T^$ z@eE_OFKf8)x;Q@`;RO{bgv)>VcktvSkWqajm|8KYLDu(Iu3Ud;)AMQ7z%;80IWK)0 z#V=+ai{la}KTFV^G6z!esXC(m7oYa}ZuuS!e_X;kY#_cQjmBqr*Az4bvUxXHqyj)c z6u#V4ILNq=lJopWX?mEU9tEBJ@lHKz^f}(#Zm>2b^e3;dcWodViU!r7ovXxNDQ+TF zio8tp^+Bkp>3uCPl$ZX1jzDXmS|6I7NI?kmq_lRZTEtme~SKi$Df}4AZ z{p+?bkOkk>Kc_gO<>>nbEOdTvOv9#L$6V?b7Q(GJ#f_`#{J{#5bYwlVo3(}y292lJ zj|0;Sl>P(f!C8hL4Q)GewoQ;2eM>GfsC4pF^~wG4=>GF& z=l$+aiKfD7Y4>f?EQS15^PZ%j-R}Hh(;lo_z9=kon$aR#kXtI*pL&W20z1#wtN=mD z@}DdSIreqGUKJS2UbMW)lQiCxgS6ai?SM=}HP(&yLp!pP5ALX}oUYg9(`qZ!^$qz` z+8j(-$CaVye(goF#p|!+DqXc$b$)q-_BzRAy7OXrB_ykm7ddvC%LzR5k51BnHI1IC zy05PLuB$rSRj(b~+By)$G>EI|<{CRl^l~%Bj-ieNO%oHd{G{lvB*c8@Cj;}OF7u|A zQ1V-BmyTSi53f!pV+LU>d`9Af@}ic+Pk zN9?i?_7a;szDI8`DgTCZo=o;G`;5%s3sI!K&jAS`i|zrQKZ>61)zY>TMVxKASK>7) z>fmSl6b@md*pIDvMf>A*l;uEyt)bH%Q`9n26}f%^s=z}RUvbH|xX+mNW2$Gtgo>mh zlOQ3pt8Ld+aRx_AyV^=<%&G?FfQuf$R_N_XDCnDMw!;+ zjV4eUa`urr4?CBtD{OiOFt?%dhmJKeq2duOeDR3&{S++4MwK+4D~)!icVNIL;8(Y= zH&{k?H5I4>^{H8Pm-Bb~jZYedxfBo~oBP+Lwoj;Dyb&F>R*^f3`iDDh58EJ~mvglz z<@!~V3C4u-EGK`G;T8GS_$pH2l%m5Zsx zaUpCwz1!B#e<*lC71#R;a+GkpR$5rCP673H0!_$S=UO(@ai#_O@TuU`1R*ilAfmL< z2ta$!NI=bmZ^v_qr`j>9?w#xCWteWn#|_nfxct+NvaWJ<$}iTi9jx=@H>wF7nE3vH zBJ&i)<|AzZd#rib2YMRkY~Veeq0$scpEIteqpg^sp5f&Y5SN;nIS{Z^TF*k8=4D$6 zKh(ost~delsc4`<1JDSB4UnUg*?NyMusyZZG{F8E*!9dlbIu@7&Re|{5WkYzG+CBm zM1a^u0f1egRm#i%T6LWHM7(2`&zb1hTd7_ zX2{L#hLV&QIUTC@+^F!xdFq#hc@rR!j`A4A#bOAU5Emx(Tb!+17 z(!527j!KqleNoM;jf>v9MpqqA3`76E*1nm1^|$)J-Ihl8)4)1nxa2nzkgoT6gzHT12%@A-k^(-mPZQz)2*w)SLWXl>5TAP4F9waY*BM&$8Ed8WpL5g&TS7HVso*-8lY zTJv9eKcPVY{(p3C|7GPC!{h(7GJFuJ-(b6@mFWMs=7#^*do}XM+MHYj38AJ7vf8FM zNpR2weQs68Z!)s#J~^LMCft|Qoc{}uUIJ3Rmfi;nTjF3m$;T-ABV?`e9JwH1vPUEH z3?0;abX4Fh3sT=)$N<#9vwuxLbvFgNJdW5<^g_Mk0sZ1m%zCCBwCn1Z{ccBC>qX7@ zKpqEZF_{qf(FuhMnr4%Y(r$P<`q~RWn6*#M_ZTSaXq%8MQ%+lqBq5){S}y)FD(f^ zFYB~YGxf`ztCqr!@|qUugmg%dQn8WfH6@EFbvN2usTD~P&LDh`i}-E!U^;S@a3&0E^IEGJc z;LA;pUMm2`DmTWK`LAa}R2lC4V_!IU{HWj!ogyi~o1NM;?+9=3&|RsHb;qNBD=;}B zP-uf+EoA0o@{2XBu8Dc3ST;Uln`PJg1tC+2*mp1GgMN=l2hdxb{3P^FTx*EEQV4{d z4bzwwLajJUg@L@;DgXlxJ9XNvPn#Wpb?p+AosJpO2S4TwCxiN)vyTmG-`q7^@+25E zBBEMoReS4CA76_)RYKejK0DGmpPA+*K=_iwU*##}iy_p@gtV9eGjHoju5yZxL-^8y zHwfiY)vQyu)*uL?NJ@gU@CL~y7U zp><};%P0ojkcR;s22D+S#f_grDjzk-Lxz9p*&?rw8o_}-HfpcRW&}YXQ`p$JF|2NP z55-VlkQAhN=&^9y&S@s>t~u$I0k9V(>4J))3i&Mv?$$k#k!&tQDws)RQ`xH{59noh`Vw~ME8C2gwRX*7@PJ0Zuh3!t5 zW3{i|8}kyaVsQ1Th&zJ5WItBPRbSgP3J6JuldySo!VF00aAOQUVQW~sf^etVx;f}A}#lQiIN%0fei`oAq- zHf3c2SUN7CDj!?usM0)V;L`1Pfp%Qr&ON}k7qffg{3c8DT^2cKIvPTkQf7Aent@>u z&q+jJ-N?gWYTX%?VLhHSKPma7FVRIsRl+i~@zf30iz{UGBBe2@>4AM&)g6srHz+p4 z<5_kGX~4NqJlJcWIC|$t$M8Hd(3K0EKVwkAD}U-0AST=R$**n{8Pt98i7Pl}+msot zweh_0=!ba`FtYJ~>(oJeYmB+up!|_G{66U*T6nU8;?7(mvm4mCH?S1WD!amzN}V&0 zyvnxHGLd+kISTMr{c`NS&~Vd^!Jv26S)6{f+8VICGtYmU{aD#j!0_CqH?K=^;in?$ zyJ{JTgDLnT{Ov;5ZFAC{D*3%AX-Mqpd4Niv;%0bs;4hmj>@_QioL@3Mxguaju0CmYXzIQ*9oN!f~aZ!6sWuN7+pH0MJdw z)#}mS^%NQou#EnwN?HsDVhJ8oCqs#?t7l$qa% zj2@L-uKR0*OB25_;zBA}k7NeYNu3m%WT*ocx9eF+^ZQxc>XY9SK(I`a7AbphxnX@} zAZFoc)a6lJE&9i3%Z0w4q`xg^p)7mVrXz=8kndK1NPt`*?Y!kVpwbkr%y0-J9ugi_TKG-5x}#lmz_LTU8Vd5 zeI7(GKs@I*yLH&k;$<)Ear#in2}&mRlm=*_@Lvcw>#eu`)pjkXxcq8*CJ;}&Tn~2 z(&CI=zc<`i3=(VHH~Z_1-g-`NUd?$lb&bk-dAGxkX>f;D7h8?x za4An{W}Y-yo)2)qEM~_wmZFV_{xQ&qxr)Cd?Qy$6UTp*{fyHL7uTt^aV zD@HyLYdO<(nm3%S@2C58-&Ox&iWKnwD_@>U8$u6}rWVKb9nptx4xbnP16YVM>Hg<+ zDgVD7$p2;X|HSt1{|jR!3QYscV1y>_OSaL;Fbsd$ucx8Sv-Fj*)wA7T{EUqrs%x zZz-?4@t^fOd<06=3-5GJ0tYb?r$!n`XUe%O1;&s;9cXoFm36A#M?eJF5*A+2A05g zP+_}D{-&^9>8DWwjH;I0oq5c39ci6w9^XnOb}C{J{uzj|bCzRe^qv$vQKrEfdc`ecCNpc2n#f`Nj;;08N< zd6{XMU|H)XLl7Zi+loG|YxcvScET`wE)Gaf%T3(uny1gt-r?%;>1vR_Q`erQ3DU!3 zNyt=X@mHVjHp5wW;3mC`-~=qQWJ`_Tt9yLtVA#0M6OvYzkFHIrDB^9E8@8Qm)JIM) zZ>AP$Uim_LcM8EM(;0a+0kh~eJqvJy(IBP_01?PA@?b^}wNU;!+|a2h$=(#I^G}nw zY+OD!q0gDE?$?%;)g+{Vn76Stg&hs81ak0foSt;U%R68ZkL~dZIw!(RPli;MYQ9g3 z+$iP07FF9HE>#XjKh*6&S;p20&Vdt(S^DLq)(vu`kEcKo=c4a}*Pe=`*+BHsd?iWz z$_gT81)JH@n~DZNCMi2TpUy*!CWJs)q^2aUQPF{H2lj<>Y`ndL{d&B+%A&1zzRrHZ zmT_iM>7RzgZc?-!o*;*Eu4?j+OiB-%*T!>w2Yl+py^j~}fKx34y9OuVYz*pAox-v- zd)_yBf}f0sWAC24^_4^Es{s`rNFBJg`#CtvF;KB;95*w)B4^i7gwxffGc&;N?$xOT z0)MMHMWydVvEBK`+x5ro!nBnwTuZ1?O9Qm5CFQ{O;9PQjDt8znW{Hp(~p7C^#)L%Oc#J;V;0Rp(frC0$DruD4!LA#dfgVvy| zy;Ib8(=PcsetOEe@8PRkLu>Xk!<3#&gAE?Nu&?%9Ym}iXE0fJ5=O6deU(|FrPIT;L zk&2(^BC4v=h+%Gx7vj&o|?R3bP&yJ8GyGjAqoQZWm`n*ZI{v6c4a3M{$tbh zqDs~|j`}2G~V}x#M=qd&zq#9^Nnl2pyrU>t&3d9SczF)9?N3)doil%PN31PEbLnxrA~=Hh_DR{g@n{Y+LrnFUTCFz_e8#U!@_ z^o&lidxkbLNzJhN>2pUHA3h#-jfkTU-0ww&`Z9;$cio`Wir!N$7s`&_*;6tyu-jF? z!Da&^@h&786MiRYs9nyH{k5M$!QV+K0{7h6glzGLz25Bc9^K+li88f7KJmRg<$O>^ zWT;#BfL545@XtCQ<^;}3ntz_5zdM>=+ju?)`|tf89_H|&g5$4AiQD-|Y-Lt5ldVNw zuHAxycw8Tusy&t1IeJ8U=uIje01nylBcQv6QhQxp>32f@cfRA_s=m_2fihIBCvy(R z$3&HSH&4#p_*O%~=A&Nh!9SCk$o6_#2Q+Mtcjn4wu(mj9{!$@LQuYXPmm4!-*|>!) zM&H8zc$gGK{K_g6An{baCdKA|o!w<}M@Q|HoVmbYhA&=-xZ?TOS`=mO>3FsoBoSgl3wq4EF zC*;{-3LYeDZ;uO<=)h;7PRWhW-0Ha~DFzn54uMPq9M>}gpunNQ&w!JYZiXIth_H!+c`u!Ge9HYCV?shlB_;GvX!RaRrF=c=dWP%PNP&D-0=9gwa9_wmD&r1> zb~J>VN^xn=bO!Tk9U!4Da9@iMTvGuf?4=1Iz1Uo}d2Gng-mTA1%?G#Jxo*doD@=sM zOjF9lCI1utn1I_an@e_jxpOT2Y$LhLiW{jFBh9|`ZMg^yl>GkGa|AK+2Joo%*wj2` z_m;{yiOnu?AjI{&QUMj{7`WwG@~O0`>#fh%T|byPB+VXWvNoWxJu^HhbQ> z!3&G2GPoo7a@$>(UQs)^<8c~XJlCN;=h2CxeO239@X$%$tYm_5^pRl8am{M&tLnaI zH`KabJE2B1W7fU)GMY9Gs-5|ZYsxiUq5@5P9}&Ppf#t6flbIW)=M(?DHK)Gb^X52s z;GMNr5;Xy2-j?E>#vq`R@oCO3gCvwlf;(i2kw5lN4e@R0f^f}Q%}iH!(bP3K69CU@ zK#5BF!a{W+ddP1WG+hSS+G@P83L9nKcHTNOON>ru3EG_=g98QCob9RrAIC*+Z_{SJriY3a^atGnTAQNI&{3X z=m|nOh0aN(8$1C~Hi+_SV#Cy$n9f*`Ki)f)Vao*JtxCNvJa!HgHUc*-)rFJqGzW6H z7iv6S?{M|!C}#7X*)TtGGC@?A|Fl6R|{rFlWptI2MG?6aNO$p z03nx3ql|ABiKQ~p{~ClnRrtV7|lLt(6ufS%tnE8x!u2h*kB?C@oh5sb~- zXM~^GP~QG!B|jq+uC}xX0jINBS1C^`=6Y?RAP}gwJckc=3c;U}D4hMG3fli{La$3( zTCdsD#=nb*=QV{nKTY|{;C-igS)vqjo(wrZAB0#C22XlQ$SSmnx`M$M1PYX{ho7l) z!=C{0ASETGGV-Db$VDOKqNx?zWMcT5)@OJ2d`la0(M~Qr))-U#WIsZkCtd+}5DmE) z&6@bnlO~_N`Rm!VUiP+@g4p%nEz%YiZ(q~YdQs9^3}QcG$Q8WaJbJ5!kmMi1yWrkh zr?>^|&nW3XvJ-@#Ek{BQ=pnCn_=?xu@EA8IbH&0i3(!$_aScG&5`i%d%vw} zoTK@R+6TU!SI314;*NE#PmZ;@i+d{5{w3hny301;mO0ara+`M0VZ2GQ&I`K8{0VNo z-e}0>%Jr=R7CCr5(1R1cG=^Ii?-eeFqY3x43}=P9uMiELg1eu;VWBS7p=IR*8dWYZN)bgI1t2VLXeG){`D?f1Kaic&&me=(a_Su z5tvF%auGg^hv*Sx*@ob6)Mj0Xf2j{BVlt3*#AHAPu zdFn`o{U6pA4g2Brr&Mx}fI~wii6w$NLB5+!#S~RdBWs=|72^}h-TEcz5w3%MKEwGw zO?fZ{OihbS-c|?8pj%N%Nr`_;`EE#=OL=BnS)u^KEYHb*2HE6^f6cb*8x*y(N_2NMP$DkWe%lr|0*J^e)O>YA7?G-_gO7|?$w`d_Y)&=K~dG7V<@*wYQj2n`nM>s>`@2g~(49Tja$ z+7U~NI2{?2mwBC|b|cO9pe|=S>2Z=d6#gvL)a|SL3%#~%cF^sgdmki*b~mvrqY@-V z^JYF)QX+|X2tdRucc6LsZmB@enEPf^JKk}rBA!Ask#~@gLJ5oSRcMWgJYkVdlxEa@ zQ#8g2I=%{B+?mfl?oqH?F`j`EeZ{n@KApz%blT(7?&n@9lqtF?n-h+V&WT}3&zC{5 z77n9%oPnYX!@H*{FJuj@N z#uMDngfLC1A@xjB?@@Zn$%z10Qyi%dcDM92bss}B>cJpPM2U19et(@voRGjQXnH zS^Z}8D@FT{K)%?Ol($XPieK=ILIX}yGH+#_nx1Z7Hkz%4NY5F+1WfuOTn4mhL2&l7%Ho`-_go@x*_70g`|>m0GKM6*qJGtN0n43#6%f6-lqA z-Mx}?#Q~mDCA_Vu!}Rbx5c^}T1^TOUTbJ(qU~i0SUArS6w>OAH{>X@sskdIRyG^AM zP$9*|t}JuE(GKdSP5RoKf0N!Vi*$A{_|)3Jm9nvL9df$5cghywmjeW)hA_JCvbnOc zNEa77lpOugzo$s5Dm}u6%thFWFY*NqWGhEwua*)@r_3<@-*eX2@Z)pzZU0e3H{JiO znT$`QFA=ZZ;;9u4TX&_88&y=l`mV-7vUEA=wNGkf$9i8c@W)Z-xYh}??wD&utM`X> z`$@R<=Wek5q>R_8=rkSp_I3UX$>~dftuRnFMc|m^IwJ#CUjDHB)ozWu!11uFCdd6#*9Aj@SBo3e1 z_+Y#Y+fYpB60Bfx@qJ^jDh`OANJaagS4z*3M{kK;RxAEzCuaB6aSdl+RTqsDIoF>* zal7>3g4SbMa_=2NIqAN;d z&1fcLLRqXhxn${i19y!2OFfmvb>Hn!N&k7a%=9Y^rUN#$R>>1g4L;kkH*E^U%qq!D z9c6FhYy-@`VSR$(g&mDC!K%>#yyxX#U%EwkX&j%JX99}cq!)Et7^1cnNn>%gMst(H zw#5m*Xa~=CVSl1;{Nnv%8xyxivx^hyZVbBeQbA&dI;3S)iL&jr1hE-li56+8s z92TFiFflT2Z*TwCHEyaXi0p5{K=`7BgboWIPqBMxO`};``M$017Ofw`TW-Dsk>a1# zzVeqfvn43K>=4hr@I2P&`5I;!`^4ZF??y9d#=ykOULKp(fzTi&WLTN3$gYWh`-O`9mHA4%D>e0Vo9-EHw5=f=Ph#s6t z6Y#dZD^xOHmqv8dn7P%fTT!5jHM#2e*oe!)GeO~vP%nkFps0R%XtjDtQOP6+4P~DlJ;lX0ZGuzqLKrYH$2JExsEqU(`4VGSyTh?G(LF;jMNc?9V*I zB;YS|A;h~;n%7i?&w|TRx~yoNcH(^vAFqsUWvI?EJ=XLV<(MD6E3RxA_;Y>2U5?|1 zdS0@T)X=7cI%nPNOEqDql8-zb5Nq^(UFna%rxi`b)9H{y4;iAW{&5nK9}9bp*?356 zQkw8@^q_=Z&Q4juH}5N<63;`dQj*||RhMdMk7%I${o-k>e$hStBCyzA0Qbj5=X|0M zXs@G2*<2_J`XLcN*7%IjY@lGH2Yn5yB=y-B5(z&qiW>>G!!PWr#FZ;D(OEdohS2g6O`^NxtTt z`)#?+_bPr~zTSS!Vs3FGRY)@BjwZifzENYtSuJ91hVU-Uju=&vMN&8OcuIv_^~mt^ zHHMQvoUuy{^W=fMqOO(T&&l9uxzi6&$H>!KOeD~%L7v!LyFd1yN0OW$p~HHtkBAZd zF;RzZDd)G%Jz3TDR4U}|Y*pQ#q|>dCPwv&NDSBQOn#fs6PxR;|=r?0<)WkDut_|le z`RL47#NkOn76~*PpO)VH4G=v^V=sjsuY`o>GJbF9q>BBzt5ps$;_CO1+d&0p2ldx- zJF&{@j8(uhU6P2zsr_1-xjV_+f`cWAUI_v29m2PGnhF)ktNsaT9;wu!>)*sM7e{2R zZz~hHn?@WP)-l|qSYeJ%UW48zdUEPF1miZs-Lz?a!hMx=RTZI3722<$$W;7ly$4SB zI^lfaEHg*!yZw)pP`MNZinPHb#u?dnc!Wk*{ic7^C)ulJK<(j{{u_{x6af(s5J{y& zx&&#YyQRCk5fA|ZY3Y)Z?k*AO?(Xhx&VL_#p5GJiecy4%|K2e!!!z`7j(e}Y*4%5& z`JJD+_S4!y{OVUpo{dXJZHOyB2b#`4A~dAwN^wswa^N}KB`CkfUQB!0<*q>@yC%vr z`UTzLtUoD2j-DXxE2>&Uw}{QIOJb3t#R)I{Ej1|x-r~Dc{(zHg#>u@hyrHyl1ydDX z*uzl>u34&|6v=@Wjd-TNGHDbEqgTEl%)B2h;PRAjh|Y!8*uj;)`*O=7cH5R869`Pn1U*!qHNsj_2bJI zh$y{Ya6-6U^71lASDc;2cV~BdvvpuRHok2;R@8VUUKD%lb==D~U1r~e?*i>u=et$&yoW!rSQ;4jQCJg*O_qXmQ{x`u1Kzr4$cwZ$B&-gGv%oxO zL)6oL40sFh&NtiEQBke^48vj~@dKU$$YdCR8$2#8I{G>Q*y_%g@E*$2*=CuLMkj^C z0DC_lcQ=w+z1PNxQEogyZrpR-LgU|i31ByAf4#D2cn9!{p$6I~*o|ghBIMaMJ-!xI z08**ir5#V_HE!c?U_0TyK#({?{g@v#>y)#^Z4p}}`F>ZmIf#b^J5=Q5Oxvkik0YAR(?MCf26sl=H`~ys5C++`=-si|<9eO?Mwzo}U|m z_e+~0ZxNM_?j!<0j#m?pAm_^cWki%)5D?`I?@-&VY;#LX+Ix}xJ|CqlfCb@#je$xZ!oqv(lEsP)lJc#3x)mZKzi z`4J_wZ1Hpk7agK!Q#+4S?7YYv-pNv;bBd_4i76CYZ+__mo3XC(DTHg1BG@|6b~igs zbiu`{na)9#wvtu5^whpN8J!5dTPgN<`?(d`C!O7zA-LEFc9D}aPomTlZ%92nX`f-l z^EynUSI^*jTVpGsxEl+)%Z+pH;-S{3CsxzJwL8{Hp_^j{cc6dVvX8) z>POAFGMwojVZl&<-1$s?V5~(Mm3De^`I6Ik+5BeksoPQc@wR9G z{v8tk;k9Vdu;JniVPy`5hjtr>B3%lheW%UYl91=b(se5;~ z*WgA#B!m(4T+iZ$hnsLsH9G`{EmsNFj#GCPKS`e5`Qa9Xx3p$Wq?zv+6nK7yOd6$d z+?_8PL9D)w`2!29SD=$o{^{pzR`G$*X5L-Q-3^{oy2_QBR(x4qgORHT{Qv;yc^MzocjZn#Uhh}VO(t%aK5w7}D}xkxCXT^!?IDKmq%r$4kjn-|DWZMiuXEa? z@dL|0)`&@HJJB2*G(>4Nw33iqV!S zkHJoni5@uJVE!S&g01zlQ@-uUyp{l;rckcf4? z{mus-nm45=FK>7oeuXo+dcO(cxjk0!`;%wiU)?u-Qw9_UR=rHq^Ry}d+4tjAgnYBC zaI?JRAT&y%`J>z`d8wre( z69~TlKVORe;98GFLRjks@hp(YiJ6?7Y=YeGL-j3iLnY{Mz$*!TBnJw4LhV?w{p;&% zYkzhToln83@4vCAuN>U6nQ`iO9?W+|kBzGl_nVN%^4Ne^o!G%SJ zT*#e1D#!A;$yVFGUBq@}0HNUMRaUyuGTx}86CC%-#nVK%&96?<(Ls~!@Ur#2DA#~D z=aG$P;Y$Ce4=$6B@^v2TS8D9Y7UioW`t}ub)@!57D=`Y!9=5M}XNj=Dj*DA)(0M4- zjLIMC9EW|$dKn)3v|q8*+0FUxv+;sv)3$f(n7N&{)VE-Ha$&g9@`ab&9Up~;Rv!^9x z0032`9lzv9uFn!{ENsuLS>kAE(fjM(@4M)ERVU-4 zH2dpZpm!+4HvDwriHzVq?I9n|+~aeZBc8_*?*u_DtyOmhCb+RGfxC5;eac)P!6@fn zSSCn8RoPh-AA0tAS_txXdVwQ9^9B}I9z2IExI0GP#A>>R{%kK|S#1j~C2%ZCgjWsp zJx+_~BzW92+;o_>W}{IXr?kD~61yZb3ps~p?>aOAL7o}R)w7@WKiZY%k{>!bWG4yA z563B#^AK)?t^icNFQmNphquQm^^%L+T9@@p)_jke?p^z;}9{p zZZZa1tj5weKz&_ZTV^YZVq{tl6FC^~5&al9UP=#iK%D92ae-2_FV**uo1{+=^Rtc$ z^Rw;kMuDRJ21yq)p`Ek0XCHN&R;i};%q8HtZf^y3DMSFOWg=2Vj%?wQDz5h)UZ$o; zu0@Qb1(#Xq;K+!#V~%}2E0X>F3L6jySEg?w2z{3k(Xc?qg1J=Ykhtp$0~4nb1aE{o zH$L^LE>GU*+UEm6aX{66mm#%s8SZc(W9_m-HYR|dlL#@SRUjcvt*TxzZ0M}hYmXPN>ZkhV7ePMo2e!8K1ER~HUOwh??DFa?tkkNFIB zlizxtq_viz;0JLToXWeax&BrLj3eW*mb_OI-KAZ)YE1z-M#&MBX!e(0*DS;Q z*!}>(GSDO7vO0Y^$&I&P@)}-(FBz^-5GL#B+AxL~bNiYddHqMrnYZM_x#Pt-$7Q&! zw4AHUy6*u?_(l+=&-{!D?6|05-F67xlc?{Lj;EtBEPGvORP9b|)4x_Bp7$wF9?G&# ziM?SQo~QV<>$wgVv(-kkc|`TCtVFduG4*dL6I+c*oa#Mi`Or1#R1x0azxJ}0J0845 zJsXN4yN@HpCvi_c#_IaB3H%@c$YO~eUe4J^&dR50uFgL>>iWSW&r)V62_`9K)vX#T z#-123?^l-(oyF>aYv}ztTJ7^dQ#vt}uM|e}54QvKG=?f&0VXf>8e1L{UcULrmoD%d zA0evqfcP6223Y>R{;p>L_Hf$x)w+~eZfuB*BtpSl<}H;Vn;M*Q=5RlsW8kX%%(nV| zZN4osa+n%MuMB+6%<`*CSfSxO)?i6e!79Wo1g=+=&krm1=cz8}9`Zgv5^cV9{nB)s zW~cW8B{s~P3H#vP#A7{KGdQZ0>dCas1U?-ii4ox<_LL@mino({6?E54K0e1uHX@t(SF|;DeC&W_u#k32Y>oWYHb+6C_K^03E_rA6?=sYpaDUNFTb(_sY#2B zMI)1|1X4m6gVt<#{ZF~XspbEx20$&2EbiHhr~q~z7%YrW>H5bxX<$_FKo{>XAEE9w zOGs9Tf}=13P>m%)Ll@Cc#{2Fu@HLRUvG|a-yum*KKd@8CTa}SjsdkRL2-~=_6A3u7 zW?@XAv;z>3QT=8}b&5I9sY}A%!UI9JH~PwuUXdDwXvX*>W?OUsdGmr;KSyJ`vA9UQ zn_8Rw5DeT=u@%8s+j~8+55dNo3=vrAHojrqm($EZ2LStoVI~mfA^}ooHukqd^C*vK zNAk&T5Ni?kVSzSV8aM(cj*TG94r=7DDwQz&V#0-4K3_7;h(^|ITqeyzl39`~E2osu z(JJ^RTH8Lv4rjVO%C}v)jcT)mhclW^3PLr!WUWBCd*iXPsno&>p6P{54;qn^Gao21 z7{|<#o@niLB%ZX{0cE=TveG(l>2PA}FheO+>qe{Dv)x?OWj{+(0c6bB(Vpc+B>eYY zEics8SQniKPI9NgQq@Mloq(q)c}8coN=U;oxQ13T3EAZwxK?YLf3&7rwtMh=nvOr* z+vnV|>`~ zLAM`f7df9~s$Y`#^3m*U^Wl0`rXMkxHk`5?^~IF;Z*5%9;MWf^J82cOsGApkI+Iu= zHKXa7U%ozg#^v0hYsoY%CA#zGeY8dQYwg$9obPp-CTH|niN#guPb>#ayeew8cMjbN z%WYWij6d_ZOkFxOtvh+;PiR>XLF^`zz)%_z{e)7rFBMin=n@e~)!IQL z?s`zWT;q|gw_jJf{*0!Tq5tazR>Y|Q&Bb;QN5d+&hn6Vea!;>-wUYJjhY)1x;V}-c z|Anvpos)2#g!BYU5&QO?$IOMpe6pJIQE|EYrEm-8#Pbx7v$*F-ZA*grXcNb0IZC{+ z7dOZKUYFh4Tc?~WAm`u(0U?H1D3hS=dAS-&$d&A}76*kMz>o-Zi#)E+z_=UAC=49_ zoNe@Q14E>K-fQ#Unpq-EHx#h&Itc*x!mRKX~Wd%><0qIzBrK{ zQrhOKKO5g{5TbAp6ca_TzkJXhODkZMh+A~tc)>6oAf=qk`;GF?mEKj;Kt6pru=O9Y z%a=L#vq>ZLfMsIHMf(-xtstn>fp`B2>2Q0g7wU~5)QF+8fjv6ty@8R3#J?jChTW#` z#g`&TB;N!%8*?li9nbc?AW(uN#++3h8n4#vHUr5y~P$58$o#}ph3KQPG5Tt+hc>*ylJYl<-|%guPGUg<-`Vnwrl|I{c#xBqbWoa2SvyIhw=cl?VA9?9kkUMAJNnan$~v* zhxiB_0>U2tx6L-zWbMob{v6}~cyzx5Q&;xPyC$VrxIfoI=z4g+CSGh-;%%J0^a0PP zs8}EX^Y2mpGoA$7Nz`pv{RDuTfw6`2eONvPT0{;6ZE-UWm z{p%E9tP5IAj%iR};cVzZ&#R_}$fxzPvl!o2yU~wrN^G@{+K!}^UIlqNo|=xX{6cK2 z3)5URC9-snYC-?SbF$o&N8uZQcm4?UGrFch7JW@~&PG)#rDW79H>E0sQB~9D{pFjw zpgKH}3rqx!ib~ZI2$j{O1ykHpd93}KuQVYQQ;Bq+E$G}+8w)wU(kR_ncJ!JvUbibx zS4BJ%=iX?JdCo?92lphhQ{VNMzNO8%W_N)rpBY~hYa|*38+xtK`xO+(^Zs+c<}7cp z6H3p3vxNP#u^dqG2}-Xtp9Dn+d#Ju@D%dzi9M=KqxYnKjl-Y+uU|$q;p+zNIu?8V& z57@g94ShCsGmjP-Nzpvl1?#;TGFTJgrn*T(-MAN~upb$j8zJ1<4~*@2L<>a_`1iXnFjS5 zTV`74`wc7dIiKbK$pRd_Y}WI9bZ{~(%?HZ)1d5}E8Ow?5yVF(M7u5Oo&+z*hBt1Ml ze&3=qp#Gn^Og}zyD3RHzKXf=a`V6$}f?zQA^_17BpQY_f!Q>ghe#i;HRAA-mA5`WMU*km^y-}9tz$=Tyh zN&#DlZyQPDqz>aChJu%YMmV&N&@osDDF~ULanRsZ$`d+@TFFAu!kYk`>xg-~X{kar zm&w{Kv8Ls{nwY?P-D3=DP%9^_RAmqLFRSSzvjt?Z>r|8%2&?>0QTvo;l)T;XHcczO zas+3#&9W5&{ClOXN3LEF#@LKeU6W1x`Z9ZWOSCKa;JC7{3FdoFk$E%-GE40$`_`MN z_0$(y&Aav!?7_=UOQzF#q29I`^TWEVY;nE4g$pX^z=Gj`;rTgXSzOY4Jkd3A-2S-U z{K*#=%gkF&wAyH30B_i*%k@a5G2}&{_O#!6>@9p-rJLU`$u(B7XUuhlQ~==A8fvl1 zyu}6^Gj{iRt4}GE`(8NcrHJgLxj@`kuzVL`+fuGs9lkYGSmUu9 ze9z1M6%P19Dpj`4gNIz2xa{XxsDL}o@tmTS@E7@c#?j4CR>;-!-%~&mc-L-(%j!H| zLi>I`Unmiou~*l!pa}nIVmhg~k?-|umxl4L;n)Dc^!Vi6L`K}^rljP>i%0d}`PGdV z3?eJs6gVlV1bH+c(s6U}oceDlUe(8r)^P)XXl4uUwJ%w+{*R*b$i0cvZyU^Sf9RRf zITcNet3}5alo4&KlR)h9&m%{gNnY8MQdd%w8DwQqw2qnGEg->w38Kwp>4*%Gji70? z%{%4mqjuqJX8GK1jh$2ebU+7ip)ug(a`kxeS|`?(Ph4q?Mr%W?qCJaQemIh38aHET z2X0;HaapqQlB;Qth$b@SNR=|7kK{!a$q?Jd8;PB{jwi zQ9CuPJha&Y{p9ZJk6$hGV(o27aRp>$G3Oe)aSk+Gke~uz#29C34!WBR{5wbo{d6l{ z%ezy@iitn)Cd;hx_|+4F3Uox86dxyc$v%n>r_XZJMj1~LHz+$YcFW36_n!GO&PDf@ zm-7d{iV;`Bh9f*(d!!RKkm^*z0><_X&J^sUjGU)JrsV`foo#ThclULQ+A@dMHO7~0 z6B5|+&kpn&VqGtU&zto}>Ld*2YOvtu%&qoc3*qf;(_ex45dP^Ti(H-TCNI&R6(qo5 zM(^9J{q`Jiz5yy@oQ!V3d;FgvB-yi1mG($-7Ax&>0ds zd-p>NV6b)DUak?@@KI8@J8B)5my3FgRc@%Q@b(?;as>{m zrk&$F0djEbi84qTAF=}9+h$8p04fxtE@?WBwC<dLvH@AEr|bcUZ`0lDWVjiJpt7ow#6dCiR}F~L&g zFQ-FmR?Q+{%{k*%wM$_0Kb^@EUQ);^_{}-u1J(1PhI6ULk6`%Yc8(JR=EAkyzykX7 z7%CoIbg#!}$sn3P7CP?RV8t=n<|ZW;NiqI16xMs4i!;9=_zh;=Ea5u=_e7-Kt_^D? zb`yU)jAX1)7pF=a9$YRzHtysXhweutMj%7UMOC8wBb?f2U>nc5ZS`NHEo3=`EUV0dx<#iJ# z@8viCo>2N9&g>%(FmaRCE--wo%A^3toc)MuCHkX3@t|a`S*O@7<+U)g9lsA##y~su zzPCP(5a&9BHn>^p(z6}iG9iZVe6P20>2WoyO#p8ZF%m5;4jBw>>A(o(rjDofb`X8R8d@}iz!+==X7r~2>vcqPE+c&MBTsBGs>G`BS#Je*5l=9otfm@gg9 zGgnozku8AHIYntx{tR?#pdrl1mIC~?ap@k7RaU8SLyNepKWlWHHIkWu9z&#U4VRSv z$Q=+4M2!I0RS6+x-H1i)6#Ds-sf`nI7;iG^qh!^vs$L|h>J_17!kXc3vLtot_f!AVnpwiJ5qe^#lxp$n^idH z8afk~A$VD7w%Ni?l;eCf5r!Pe4rh%r^aY&F64Z)uPC-`{<7K^%QxQOUKtLw{Kv>in z&U}^vefNW`6KGotDB+MqfTE^yzVaPy)Gph5L6AyR2v#IZujc;L0=<%d`5bux9c4Q7PEbVq@D)euX;Y1muEO=l4P1-;IJ+UV(yu6JkO3+p<9Q+6y#w$ZC_3=o=`Z>p z4HE&royT`M3@b-THsZJszCRBhw~dji!vvpw(zwCnVLlMk^GJpLT$&lpg{di(EJf4R zg2!XPf(JJY)u3#*K?yF01wu{iV|tAK-w+lO4ys&kx5}j9^tQzVH+kGmCwjM5(tjv@ zxyu)fgaN+@LWBt^UvD`tIqponXV|C&10spc&=kuA7%k|SFbKhVMZ?k(icF9H7Dl9CfOqa)&>AWW*2 zV>Fto;19Y|H|!*ltJ)n&%GeAg=5XE-pEcgWkx;{}Z?I+?FyS`{0{k_D^P_~?)hk+f zxT{ZEYL7fbm%|rqUfI5S0SCx{tD!63E580RepPjNB$-MO6LT%OzN!Y2Y9bsU)E`l zyUbpz$I~QZPSt4BPVV}kD(#DDxraEhYqr#}zQ-lO@fiHk=&5ou8PtUvYTs~L{9*Vh zm#&T17l-8Q0UoRcs6$Ihe8(`-cvYwFTd^*Vq4BuqouQq&jHS&{9?cT{IL&R*vcOqX zZ%=%0@0`6G)xnbB=dO6A&&|hk-kMvbYUUP=`rDh{JcQiO`fillV4TjJ8_}oO3A|W- zC3ElI)XpE*@zFJn^E6`gm-q(ItbwT^zn?_dUDQ;cpyJx$7UH$=0Qvbj8a2Q_?XqR~ zu}mGt+hQ?Fzwb=9rkyUxv$7Bre`aH9YhBB{66Nq*d|zL@!O&QEM=^%8ensz$8*9zN zDTsS8t?_a#!R>r+6s)m;sv@>E#GV(6Ez>5Ep_F^mR!xf74esPS$ehXSsxc6- z>SVMQd3MMG6+s|g=rN`L!bRz7&*$T%(>XNt!@@;ZTR}}iTseunw26%Hi9kg<>1rIu zCj9VNt!GlNAx#kZ0$9xNS1xy8mYg!hT@GCOxBf~ko09A{jrlA1hf<;|;(!TcP{(1b zP&lDC zrMsu4Wg)cG1ssKI5P8_vhN%5!2m-z%$KKvU6t%Ur{#qXW(-~oolnLLawGGerW~F&A z_3tBOFPG82B(76@ZgimOYP9IS?k7aJ!BYfo!}Y(+`aXZ4B%P&1o+16PtVUH$s2~%7 z{kKsqfs=dgW4zYS z6zs15P#7S<;HLzoZ$R{6bab@9s7&PUg*QHwYnZCBTL;m*cpk)A^fZV?5VF@-VNxO4KLC%v!8>wLf&#d=$E)1ov1~w^@Ad>snj2aKCe+T+4@Dpk6V$+k zni*AITU-10$e)kMOK)B*vx3MVvM#HAG6X9H zHWT5-9gxAGJ}1|vf#_$ZZx|^TDZPX87}pbug=>qyMDTWWW(4`}R!j8{7?z1%n1yP& z5CA|&eK!|f_a<}Y747M9B_l;1H3JQwv9b-@G7{ep@4PWq>NDb1+PZfXSbC;QJh+jW zB}xgy{w2Ns7$f7a5hL4z=Fw{6XbmA3rq@alf`N#H6HC9sW4E@!?Yu0Y#ZC!LCA~b= zOR~im%~}_o_!#E*<5G?eW`7lEk1Fmu-rkAK;a&^VIA~3zx+(rV7YO|R8iK*W`y1O> z(`HQgzot&5Hb7XNAjIeU{bD?BHOYb3=MWpnUBd+CRSURlK8CpizS?EjKdB!m1$0mT z9IrUJ949O-?ONL0ecp)k3>$7yr@a{z{RjXnAK^kN$}-y)U6kXkw1RExr{BK^@s1wi zV11-!=Vk|`2)gPE`4{|BH%ZfD`jysC+P9CL($lFx@?{}?-ov|!<&m4{*p+Tk<-E2YRT|p8MjLp*HkL4E{Q5eq=2^iL35oHA zq|9YIvjKXQp2W5peY>Uvja9D^@tddHgmg(hPVn<6)k)|CMm9H68WwJ&m@<|pZlJNm_eP|xogy8En}vE>(tm>;wX!It^UBvUHG%Yv&9_pq}A44m_}6IZ;voeqFeEXevc^cCsrR@i|*7QQf3IQye^GA*riC>Q%oumbSI&`pEtD09iO}Ejp#Ug0ub#$i1(+7V?%cBTW@{dLoO)SqHD4n8HIlPTIfb(u zMME3~IcuC#vu+7@DykK(61UYfEbWdzjuLt1pUYv)ZxDU`?CO-;!{wygoUpswkVVz% zaVw;~@mgt>*V4ZsdxeyASU&FMp-SU^ND#&w#~pg4LAM z)WMmroVCkwpc5ZJ&9`fM$rJZMUF_ilBTzRo&#oI;lMQ%rE)o)76Cu98o$PCGr$DEqMgr>OPy>jNNFZHBN8@GiB^a<}X>_4;Mp6(Y3E zrZ0e!&9~>k%Dd5lnxEGy4&xoJM^HMc*lxIrWS!lC<2x&sprUI>@@Ty$u9>%MY9V1N6Ai)ZC96Wbj9OYYNj}ly$u{7 zUa-S-grFPwqgVV;l1p-e8@DSVb@;W+LG4C&Vr@EK9hD%ezeCgyvLbrf)2^Nk>_F3;*WQ50C(4i*41dZ(}U9}Di{Cv-8>w(sLX7Uh~NaojG}BtKcjTiW!}>o zcx)AgY`Y6tk1r2Ec1}FF@Gz|XxmZ&{^*U^8pH&Y7S~S<|TCSSA z_u(wvhxuja{;npYFj{K4UG?(+Ddkc1eEIW-bDt~=kJmv|c-m1WHA`7*OY(~Y+zs;O z^p(yZC2+kgj|>id8fKtLb-wGJvhPi`?w3bap2q~EN?xEh4MICa*2VW;6~3R*9p-l# zOHyk3ZiT3uA0e*s9}t4*qP!GjjvfG_OP?vlQ=}xthy0#`kDPJHlng1iRsyF)9uGHH zwLU{iDRtswy{Z>+>LNdn2%wv2(`thF&gsSnY=(nm2lOi!4Y@crlT>KNrV}U|CUjd7*}fd_h5z#wZs5<#G0d z(b_oSkCqtHv&nqT=8hwEUjkx5c?$Q8hL8KY)7yjQX{{Kz5rk5jkW^^k7qrTOKwc0$Fgd)0>V#2hkPBY7_g$IWMIh-S(2!Jfg{1rs)Z0EZyNBWM@HclEI@_K z__~AzsKE)Lco;_t$XEfk(lDtPjwH<3Vz>aft{+?%<=gZ^@Fjo!$_EQxp0hoZpwryZ zm{ZQgiSAc-;`xIRTJUEjXQX-s$cPGyV0%KgBwEgq>C~P*>w^JSem3fPFwBN?zx$S49j?wX88n^6UA;5G?vS&>gJXGLb1D|=}@on6s%zE$XIPuoEw3dI=m%u-u zS|@<1UMVg=D3n;>TPIPjzr*yO-uF9Cj2T}Zn1n0_zbqOxHz{=%HeUCv(62&dQfZ)c zIeb00Xb-h8%ZGh=jW4P%>h+CR%Za8Lcd6qRT=z~kFt1wos`ylxo zVMi`@RM+g7TwNM*XLDiEdadaddUaX*vBSWx$%_oevlWx|@!RD2Tbs*|FyNS68@ITl1n8)7L; zP0J})Wfm6B?GR@x2+!%Dc@DtgI!6XNwAI|9Xf6M+#7{~?alh&H7WEsjZgOC1-ZunS z%}QD-F493P^R~o3mp=cA>PU-Hq*$tPPyC?hTfSXx{UkGchn>8OxGmD+aHq;|9!etf z>(uK>Wc~2pD({j#_!}M}O=9`jiAAHFKvAetUqp})U%L7^a_y$Qy8e8|3eo-BJjAD7 zM({wqf5xR{$h@TC#T31N*i+@XWo{^|LMWo5_XpSUehQ3_M>j8aF$KaxP>ERE&<*tH z&FIp_>Y}Z%pZTPegLG2QqV)|(`>42;8N}Y2t!8Vy04wUA17dIADD*(I?~YN98<+)9`zdsIs964R{=gy%#0x-0;?YGn@)Cmt^L_zoPGR4O16+bDvppkcM+~zA7U*j8?foN~;_<{{^ zFLZ;lD47)3eqB0g6R|Rd$>`?QNr;2?WI0`$`Uw-_j1wwHyN1 znpbIryGw!TC{<^gUp*+g?|JSoaIU z%7tEy3w{qxk4e_ZRd2qdRX8c3tp14}E8dX@(X>VH0_zpD!PyE_(?NLmCd6W|1t*V5 znU`$LQ&f4{unrk|!+9_l``KcJP@fAqxs!>YTMX3A>ArGhO_sHAVrv!*4Qt`}66$iUkoz@6 z{!!a>eK7`*LrT29E`FCtm7HmUgn=}CTj&7B zoy1v)%2vJ1Ei%lQ`RRE*PwgZx(mgP%MhP}xY9*>e;`90|)p{OouS$-C!g+`0_hywf z^b`+4$7HdU)h;r`=?Zc6Xf%+6@gf46&B*V)wUmPq{Tl+5N_HIv@g#kF8k}XcH&R9pH;rN4j7BA5`#MiF8pt zbFgKW$BRH^Ilip|fG85SR_7(~0l$~&t+q_LwQSMhh!K}R_ZK87A}1YL66q9(94L0m z+35^N)}sCE1v?M^-*;Xcm%a#OxvgGqDiE^__$P%|xxn}=sju@8}yS;vEV#VhpGGXkIe zZJ-f%_{UUdH&fFW9XE4W^(9?%sL!Q4#w0SvBV~;^7l@Zz1Gk5+-LK9uPe<1baIUY2 z%@nkR$CNR)8%A2$Y7r?i<}^DO@@`Kl+j4spw!SmZ?uO04GmNwn%34ZGQo5V3?#5Xw zJ56!0yiO7mnjZGq6GiuCqFQ=z!Q0^J!L)W+%f>DB#Eh2;BT-CcJ|P}_V3wxMT45lD zF$>!N>#`k%G0z!fH|b6h;jC;X`K1!~d4gLMF9d)4`R)d5im7Ye%YB7&b(D(T`?`_8 z=KBAnI7Aar6OYNEs7uRaqIhQX$=j}>JzX>a&GFo`vON0M{guO;R2Ur$tNG|&B}ef# zd)?1_&a);DyoIyZpMTQZ96b}BOoBz@FQJi8KaaJv%~B&y;esTKmV?KwE>YpG1%xSVT7chD`+KsY)pMq(+;G5uAl9 z1-%D8&OX%$?|eXPXfXzNxp0~u9~xkfWhPfzGs=sn$^k2EwR4tZg#iqFm=)hMdKK&nBtm&SO!zLs47n~4}dUsZ9`%6fqIwj*r%OuIYi6)d*NRHJ2EE6Fe41W zyJXm+fm|%}HI|UeLA5gXqmX(H#)I$7jX~f<1@X#L-z^%OA001-{Ak^h$VMd7b1202@kkC$gN8 zk@5)YIXDRY6w&Iorfl=WNUzr4jR^DdC1h_L8M6)?hqc9t_T$NU4H&7d!bGMdWf;5O z6;Y82u1c(%l>Qm@r|{(TSG&giyuCh`PGnEJ%3E#^gB%s zi!*dl9&2CUg?!kLSb%#G_F$$6@p77&lWh$m(k;Rdjrh$2OL8h;qNHbb28Mcx8N$d- zGn5niwVz|9o1d%2Hp~&Q&*M3*I9n|Zct;{kw5p4V;_H=O*Y9W*dlJ5r>{#*Fdcto2 z^Kq(!x%tbtbYK8T?TGsj2nS5z5gm&sX*Mn@oP6o3@yZG(**ww?*GOi_3_}CMcITK2 zuG{d+!8Y!;fVe|wxDM_nNUVHa@Ez>7?R|(DSl`5bdoo-xbsEOsP6AC`6@kw6xBJzd zs6N~f%%p-#doFYXLa>%>le2p<~o!rnl3W^IfK*5etj+SSQF%hx$YRlgOFjKDm!G|EYP+%D6rtbOq50Zq8p zi*Wt|`UV!;R*P?E%M;&Y!QFel*&^pA@#`jmyZ2as&^P{q+OG0ZkLlx@UQ}C+96ha) zz=!tfcv1a^(elIGY=ycSuMS6jx0Q)p92biIFn)a#mgD+;F4F5bmvzi~-Kz1!!L(;U z$GpgrlS;tSGY2sAQQz%xCbLyW05*lSMT|6RUd-$E$o+6iCFMi|9tg?1PE>QGP`f^- z7t{Hc!wI~^Ym7%w)#6-#WAZFEph0c_p`M9duz|t&qiZ-#dm8JPni_CMPg+y5pXTA& zmm5gGde&Q7AkKEI6-7!_#vx&uX<1v6b7#9EzGF z1N`-%1Yna01MoM+WG|F^2^|^28slD7`D@E|= zla3m<`Al8n>I`{oBetJYc;mRLdy*4LD8(TZSi4A_iN_U`of#dzT3%^kWmC`EU+AOM zkzEF~S=?S{tSh~3^j0zBM$2#D*nK_i+V~og&Z#$P`76aQ+o&onFwkdW^eQ^k#p zq-V;aI)|I%s}W~_tJ`RLMD+3QmO?(G)1K-*FY!!j_no+&Q+4;PUbBGuM3tVx`OE7t zkG(JnI#ue>8N?`%ey?DM_rpyuAV7X5E>i~QM~iwhT6h`8y*M%{3K`@Nc*gP=-16_1;Kme z`sKaxFPpZfH^Eiqm9V$#o<%e3*ScxW=}V6HT|9t4!tH)hJjIy*hGfysu-X6*VKZjQP~(e-O$wOl)Q9aFfR3a5N-NM4C1wZWw92YtbhWp+$f`OHR- zT;?eKdKpZTm%HRHkDOt}#l^YYU9MNe-x!st7TgJ4gRnpN^fRHn&ZduubSeGgBO}JI zFUJp>Z^7~>u%=OibhaG{IbPSdGs6KO^i)sfUr!@0!-vtiS3`|W;Sxprawo3VQD!fL2 zm_lCpkKQRF=iTN-eZ&CwG$0?ogIQ6H%W2hw0RyyUi=A>vZ_GX6u<^3h1R^c#gRygd z+)bCZSv{vu4?e0BWIpr?wz;5D8Hu058}i(;zNH3iTUsT|lW6U zCurnI(TnVqGVk=%->X1s|MGd#=S-9^FG}!#pZsK9OoV_BR@C;ot(cNk=mLi(k5C|U z2Hak@#TRijG+mB`+qy<$FIL+#*AY89ebJ}R3(`m?_QxmbRCfoh7^9&AWg`(cMGOhO zK{Yqav0%~E35}eDPy!_bGxISN=iv$x?=&nMMNgCSyxOExjUjGILHW_k+1X_y<;QD zKP!?`+11W0o@x?*U*Q!!bg)>7m$UL^K(#)_A9wGP5{d8;Li}8a8v+=a2Tk<6hJZ;h z+1*F4I*eJAxctN?!Fsg{!JJghI!8tD?cnw)*uD?o<{km9T^GWqn3{vaO+cyKIR2`5NFDd?Kh3WgI6(gOy3q(ve%7wDHP8-tdNw* zX;t-1+PLx)BG?4e)}H&lGSt!|B-Y_cNGjWZeOPOAuB$;e)1aEzePsdzHCU3Y?CdJ5 z`NGkAfs{nma;0T0TCv$Z4F}F~uP!iQZOP?_ z^=VW6fzX=#I9BcwGntSixvo68o2_1MtCFvH{0Go#k0-7Ni?{Z6+y8zKDD96>4ITV{ z-2G)(Ra@J~4NpKoML?w+X(W~IPyuOCX;?^iccX+zcZ0NubhmV~=&nU84brimxzK&@ zeeL_cKD_Vq?cwkcWKLwvImaAhoWJq^pQmKDe-ilkX!OR_>Ka2G-U{4BPzyu{mI`?j zd5O~Hgaa#j_#^qApWj8{?Qu9kks24vO~i~i>+3UGRQTP5#>~dkZxNQ_{fKpL+o-j&bGlZ=5x! z+t|)f5wM4e$U30HOa6KttdZ{|lJjDwl_JUg)sdBq$i%q$>Jnpx0#6W~*7f06nbVtb z{+hBv9~ylnWL2RD-qb8cHxc4O$J;meU!glk;Toemm0B$Q^RLKIOm~l-+8-L;;N!$m zvRV@n5^C_ca4f*Ttn;|2RNggc0CV$rcr++mU4Y{$DC|RAxC2&=KRQBEh)~a&Hj7|l zCo_CPB-id`Ytq~nEjJkZOcgc>SgJZeXZT<=~qHm0h|Fp2rtt980uG zKW;&4S@v66IGFI9Dc%aJ)7F$Qh+G9^G8=hZQNKuWo$%HyG46@Tl>AkQeFATkKhM!JN|Tvg{nh$Ei+GiJpEt zNfV}OpO5{Isj(!cR2LcoenpKDT&dXl$C%Z>G-Mk~Y$INF`?M`5Hu$=8YzcfP43St4 zl6YN~Z$zWwD3nj(k*|EAVl@rn{J#f3&vOc0g}|G62io6{7#fFuPduOjRpcw#i)?Tz zRD5A^y_K*O>iU%oMO;Cg^an<-{l%uja`fR+<(CuPycP+IS4$$Bat@Vtbv(u%G6iK` zQtK)iDU{?*Ix(xhn>s?z#7Kj8F%`5|KyM&^z-6IcqxE5N?$eZ-dspe~CyxNpOA8OH z*V3of{g9}0b#ICf2di&_N6iHtsf|4{Pcx$l`!-ZeUOmF&!>OXEd~J65_O%pR=@hPnpxA7JwP0WZki$-H)}ef}H?w}nq;9%N zzUqu4g+2>LE#De2V_5v7=#u(Z`Ny=<9PCrcFoCnP{QlgFSw|8x!^x^vA~5v}&qftj zV!UC}#p_29_h(|kPSb=v`WdoYCDsyXgogqHw_9A$-6GMu_9^O^L+Jbs%{#$s|NQ-e zAqoB->fmiatOlo~5Qdei5&2{6JrsR3^iH#ySyRfMkPOz&*2Fv&dz#QYti^qRYW9i# zDlp%S21nR*A5)sW^@Oy9owIcizw^g)(46m%zPjhNq|4xBpR-bO>)-?yir?{^Ll%6_ zvdG@Ev?O-*MZ58><1Os{WJ7f_7wgKSoloczpKz&brqT7vqJ=MYGC+0q@77cAB`g?J zsTp@1KY>`fw3CNh{;Elm%aMr6>wZAkj}S1aerrtC!;cc8d7oS}+dNE%W1!0mahG-( zTYB46(5;_A)Qbc^Fre{)54AMZZjs#@TU#T3njm1i6yI$F?y<*XP5mS&Ojy|CVDTVo z4=LDk0nQA&EVzh5ByiS)=2^7Vq4s9(MMG2GzJu~PZ;UO&4WH+(F6VhAH4Jf%dd zt7`$RW?ZNCbG;VOGHoi`c6O#kM-VjkM1$lG(0=v$9>tH_3n6ETb1+7~1}eK|L;i)ubeU6v7_S;L*k;5ZHQ6ehAIj{PPH+i*MHnk>-+?cg^H1C@45JX?DJ(UC#}kZ-|!RPIx6>vu2L+AjzQo$0kkB?OMr@;lC^#iI`T z@FqS@C2DjO8neQC522OOTZ1}fPwRrGbnk2TI<>a+>2iXET~JN!Wxs^yt2h95w6r`z0wY7(D63RfTCW9(}%?3(QYym?d6 zm}HpB#g#YC#Q9GxzZ~m7PQnYAd8$Q=w1Os>`h@lJZCwlAf%wxsd58s%NJN5^Rz2HJ)Aj=RF1ni^W2+Fp`t9T8oh8P{w*xn`8d9i{ zJ`emfr9K1q>H)$Uo*F;33yiQ`I)5YH-B)*kj}tdi);9N=xSU6@(15=O#)$*ye9o&iuQXrywuN*)8c)b)4%WViF|gIe2ke1c zn}WumzR6kF$-lu3$$RCSp$mWA)1sJ;hk1r!>u-_0KZjM0ywr~O_+{$)=CekP?%kBD zmUORIMP9EU8u>=hRiPn8HJ1d_0XD+<&-|j@`h8&hw5(Emha=LkGUUu ze})St2fE&C|K8y?yH3e_zkHn0p@Stgt)hwoyvF@cGcR*b_jQ+7`Uor+qUmktuQ$+w zOR2GnJ6FwDmi3;BS~|1$^2M}hzqBbD9MU34&Kt2jAsX^%3>rrQRs-)wI8r%GLAid0 z^cQ{~%f@*f9olG1F6{2C7Py27H!VU2Cx38>ubS9^gO(m59(lfmk|8fb-_4#B_Y*&j z>Zo0ijsBOz$vvYrZ@@&FN+G&XbU-@Ndfc!Uw5#~>P0ATyDCTGyY-jSN2Vpw^doGp1Tps% zlTF6-?P%j8`z|Yyv&A%#kk;L+rxn{Fy5Gt7Ipra9AA0{DD9Cjec3-Y*ce`HI=h;lP zaZ|?x05`QVQe4BQ!zU8QG&0(OS%!=-fyA=Mj$ukr8Cti`p_H4zUBK^+mC;>d!46vw z`qkBTC#1ZhDB723K>S^&lvp!t*O1z58Kz+85^L{fhxi?ml~uYYsTJsuBD@}RYZn3B zZDK4Y3FQ_@BQ_-v-h z9#OaOO1@9#l|YUA7R}BbK1=lORI9b4oe>%Xb?%k8UfBHLFUP)>%d}ebK ze$N?+=H!)=#&l-i?NRyj=bfJGp}6BP(MG&u(qFeU=v*NXJL7!yc@(E-jXS}1!6%L4 z-{*wK6V)3py%*f(A_+s{H$`igGSG0x^Q;D=EUp8;l+o-*XST!b-cxFPaEs%CPH$Hw zqBQW-ET-KaOsFw@BHXj?vu(kSQ-^Y?sKjoo{ZRDXdA$-e%GFl64>=y`GiBiGQX4k! zfsJeTcoJ)8g&DuYWN{_gpMh#N?>hXV(`dVvSjDEFWs<|J1Dx3L_{vWSB9}h0)jJLv z2$NuA>z7%Gm0{sV!ef$NdMf{=1-Qr^2J=e|ODU+X?+e~WmDIelppm6tCi-9}f(nQd z_62-s%!lnc@u|vQXhOnznwNHdw-J!mv3po4=U+?!Xt+Z5?8t{_S6NV zbtz$6E_6yG)|$zIcXxpAQanlnb`7zQAUd*)^q%dOMa!Ab_<-=Hu(sXe&?QZj$c$s# zW|;xms*_hT$`m%kJEr*O*O5*v35P}HUO#VVgvS~7ZsC==FRu7*YI^PFI>PkB(@^7K zAFS%Y408K(H6-a$k)?^4w41MZ&wHA|cPtwwV&t?vojuk^!KuYLufuMP2-dJ?5tbL% ztL3=Y^dK^Lc&h{y*P2SEtATGIXPDr|5y3fce!mT~cx5pLBSUi|9r`xDzhnDG<9sX4 z=#)arvQnqlOm7|Cm{X=k`VV$Xx$CIeEntqlwlCMpstX|GLRC};?XiViy zgT7sD#T7D&$bidN6~O}oD~+?WaeljrRo3dR1Y)_IaqmfUquyjY$VatP3i|4;mqCF~ z6A}e~RBFRXHaBA47Hs&a2zllYzB6PlnAh)RY~mv0eb{rU^6~?=Q;pX>Rbz|`$KapB z2JjN-)qWETWK(-L&M0r=QOBrpX+4`FdW|0uZYg^#cU5UmhzHx^{R>w#T7a^E*@okp zMn72EXGH0#k&E7=Al5zMuQ`S?a{aN1iRw{S!HOF$DSUe`2$GFLBzhj?UJSUA%fXC` zPg0GRDNga^t2g?5dbRA%h9PVp4V#hYBj zqSn-MJ@tJ$X+*oMV0|XW%`aJSlR*8VRvTr>!K;W1CXy5l5x~2cjSw=+{N;KTBysG~ z>@JLd`nucIvOw@W^#y`8m1}K7T3;;x6=Q<{1@Gdnb| z;%^cPEw^es>o6?4y1v-`RP@yYlqZ2Pb!zE<0!-&s1lB4;eeHo*7zuqS1~obgX~`RF z%CqqWRgh}@*`oYzAI3wv}P|S2g0!t3ye& z_d6oBvk9(uaipOcMs4brrmsz|!~XEfKa&fXFcIwAV3MBCV<1g(T@wL+TONX497SUq z*BaM@HaP2S32o21SI%j+wjzBi!;tur4e_)(c>x)#7cWHxA2K;iF z>(asI^ADeqpZXzGfA|{`{@+=cII*G-mbV)&A+^kYHNem$pB%U_QO;hjw;$e*h@REDsTtxlAQrG2V>bsyFF$ z(F(M;A!C2>vA>Y7G$Q={JoOBb#L1O;Ean^?q-pB^!7!~yxKWb-78#k4f{Wyv7h{y!O01|K;I=r=Z<{Q0qHpf5TCPVGQweQXiCyj+;(0VY&F-{)iPrh1okH^Kq`E`q| zk*cyU*L)isOs@M^`jI>AnaKZT%su1n(P}3E;l$}Bx_i>zy;~nvdd7CDiLROxafE#U!(mN|c~=dv zA>lqG>+tJs@p~RH^k9IczC>SR$%s|PX$oWljH$oq3kxG;PrtPDFf`v=uxo1>y!T9l zG#7^tVAvyzZiNR1rOyr}M0P&xS_lwOC&cMz?=MV!atAcGnfL8CHtJz%V6<{NJ>O;B zPSbcr(HU~Z*Q+cjHsnIzfhK$!bJLuJzJsWzYK7BDofXqR+1i7jp=g|%O$h9R@s~I zxg;8dK7WzzuF*(jt3l*%7*Cf3|2e$5?p65OT|V>6+0$%-(mm!@YlJFS@Zq#{J70GvmO5hK=L10=X71_~kR;2T7e-tuc!_nVuva5@6C zMtr`%y`wKYnZWKP{N#xzi*eU&uttU;?w)p)#M7v#1j3UP5$m__$LFuBg4YJ#!I;79 zt`0OCgQqKPPW6&M_cfIU4!~u744T!}F7iSFHrZ~JfZjx8y$S(VNZ~obyY5QmAophC zXYxI`h!uY=zVf$)F;F8v4Vv%Ck-P}BHV%r zATY_$>%0n71IRvRGA<{;%qCu6DL*=WTgqv8?XL2b1&jjXNp~yUd6@eI9W`$WZ-&wo zW=k}*wJVCdfGn`f_j;De>28!0onO@R>M{{6U~sHOOWRyVmJTdquq47*D@jM+{$`iy z&hC`2c$0!3KmW+FPhhv~*+%nLQbigO*PBN+lQi--_Yz#1DkxSN+&ytCZC?CHgL(W5ufAbd>F#DV-a#_%OaLAIQ=H2Zcc>d}u9V@3 zkQFM-p60Yed^Eqph7U1a5wpH)`2TH6qGCqhj>ZBQE@Bai-^bhnJ{&SkI0QmHq)2e9ax!bKI03@rR~;S_ zmJ|TthDXKzEKZ-S#)C7B&9K}rlr)7=$4H^Ky!rF=Q`u;TTL9rV<|r8hq&|M&y#qM{Qwf#`SY}`WQUj*3=%q}D>L$z) zrpImCw1Btpw0zpKpW4Y%{*PBamYDcaLt6w`R{#=l@J+UH?U&@UZA<_JNG|Hkli69u zd}|LSZ{7J~|FoB!3*{THLeb=5h#;FyGxtMaLj>Dso?2=VNtAfkChfzb;-Gr&C{dNO z=lHxESl6PB$&!AqH>{WZE&y_*oyBN+{HmyYs}o98;D#eD={=pG!^AeMvE)eh=(9#* z*u^_muHAee023`;jz+fLvR<##p^0N<@A4R%6r^DP3ODJ2w6&rl*@RnqcA z43#!j2L)8{s|EN~5&XQUXXttaUgJ+^q5apKZNlqw3*1leSD5n75qZot3#LT?R~Fth z>t01T4(EI?{io{RRGs4Hu^Hm?r5Dv1@p7Qwl6w8nCBq)oRyoqQjC%x(DjE8kEgx$D z9}Zw|FE%!12q=C6i!XC@lk~PYXHtio)O}bM}cmoI?f0 z2kOTRbk)c<^QX;BM4am$V(2cJQ*zP``~VD}J~e>U3|z`;lW&%CqJ0t>K-!MhX)Fnc zMY$fIx}SD91<3wK%RnOiPQGOJ0nLP?l+rVaPdZnxFX&6e`eOCDBo+}lk{uze= z;l^XYEmd>atjB^`gFeGN@-q6d10`Y|Zb21)nQ7D~PORp-oK3SD6t_>y2$6XRwyJE{ z-I1MX6^!Q&SOSMW)?j5Zv6_m$OF2gqnp#Wy$S-3n&8L1T zLVvrIs~U9Cf1C5n=Noj3!nh6uQWDJYmoweTC+E>PN(8=R)1p$Z(<0&KK$(sN^XQG* znZ1dCyOF%KBqzul!8_1^8l-@eaxGpc<4RptYCIl6sXC)bUHI#$7I@b;fzUN?{W^uvvtMX> zp3;3FxZ%HcvVlGKPA2{Yl2SgU)IX`Rv5RrAyKGfSP7iTa%3H;sPZms_MXz$m(o2+>h-S)vkP~(XGaIZm)$~-qVv0|?%T1RSGI~g zMgmGE#3S;xx*@rELhMPDt!k&Q8?eo?IF+cV@ zR`xyr2S{}~;?p{V&ZXPw2glemeeIj=rTdulGX4o*x^UKcbV~UGe3i!-H#m#A*pu z#iQ+jK08dTbu*dqqGB)Z;^VdQ3G>Y}$5rh|^w4v^@y5alZ|AuMGr3bJ9iF`U*5Pql zdImbULMBxSZf zj7X!T@wLwL%X}Bl-DBYKCRoX+WXik=eqFbe?-nP20dnL0H}9v};b&=ZH3+21VRIZI z>p{p%#!?aT5-{$4V342rWd@Yam88$_4^a%boZ5}+KaPMO1N z(0<6dLAwZnIE4^OsP~BP{gTWcG9Ih?j@F-oCWj&TAgW7LIYro9DgTpwO(a7+~ z1k7A?*c5-6??H8w75Xt|E+Ws6_=d@U+tomDC$0gr*UJW8e^ZVd2^g5s$L$8=D9Xvn zfeNl@A-7{e5YQl4&GjneI(z!jo1Q|gI!;Z`tu*i^Bb080_s}p^Oh@;=Z_n1e&K)t& z$%KVTV^V-dFhlH{QbSSyH>f&t^|;Z9Vb5Z))hpi6D~cFm%V|Sz&@u%=yr5BD*p1w4 zBk(u2xuH0Jcup$c*T$`XLAC!G6F^`ghTu06f31N2L`Yz&4^+h6Gr5lloH65yJN}|E zGh^3Wg4V>sv)m~C6=YKz^#&R>K< zXK=%6#{W1zU3gsdpSRh#)xy6F_~#W5J63r8MF4Mp&0sC!l$)lT1%QA9+<{!#W4`G$ zZ;);vkvPd>-dpsxKeq*J;9s!mW;YD%R3v%BMo9m<5YXTDyT{?ZN*6%>*FlhUfymBV z1$4oQ*XwYkEcP}ek^~)$;LSDu#yzw%kr@MLM#>M_3h)bte{$^S|LDD!GsZ*uV6LN`YF#@paSfHL9$6X1AI#KX zTuO$WYc#P}>2T~Hskz}j@8N(cVB+Vr7E)1ShXnTH5^zMpR2!qqg7M#Ed)lL@AZ8^l z`dg9Vyr}IAL&Z$9903v9pI07SZI8<%#71wp&mc7{!)Gys!sTLKY@QT%u(BSk(ZDF zbMu;3a+JulsPnAdS92|M!VGiO3CAXj&0VOeQ0|6D^*rAiK{`oTas6=X%L;>vqfudW zdl{gR&Uy3D6&Yo(Bm7^&cnH`YNyB)KcEr4skb=f0Mt+pqFpubC1WN-z*^j{oL3u+h z(;lyEV2zEW6wV{M86YrPI_G3W$`Y}02+?nmWN8$;lcf(4=;Z{3F|}+l4VUqC`g);9 zzv{P(6Xg0L?u+1B&sfAt1YMNg29|Qa{rGUUc?^EeI~?M=jI_9NyChsCiVjjyyh=MK zMK=d6cbnq2-4YWI!*F+6lTO!5WgIttt#d-k zPOq&}&P2k*5;AOX)rOcX3Khf~c`Ud&T6v$DxV(g~_+M7i;XF%{HI&q$$(B5G{)6%Y@TFvQs zfaA63?)s-Y-1}eY-LKTG%uFg-=oQNPBEB#{t7Ke^P}s{=SDV@iO1+*4a~fHwNzJH8 zXc*2K=+w|XDlL|G;bi5siCx0xxs%j5&f;q0`|WyjO#xg3I>gv!l;zA z{KhhiIiSrjKF#D{znArZN3~(s_H5H#f=|5hgYBt^&68+>^Dka1zdEYGCrH7+?MSLa zlz4=_3cCIAc8s|bjDxnFtBd9~YLzT4L5gS4RnNlsgL_p2~_*88c*|4;zE z7um*|Ks^<}!I?lgZ>mP&>9LfrXH8_og2+g0nKW5NW6~DhNh|x&FjtS9cFSP9&|<#MX+xuNY+sSr;~ zP#6g=s7n{*aNlX< zHkz;}4RPdnU+`mro_PscezI$s{S~b{K-V0uTI9w0A5oA%h4Y8>;Q=&YaKl!N`I-d) zW#_eJNzEgglHY@zf~Yi@}xWhVMmUcU_9uUe|J% zt%eR5tRpM)QG5TlRW|wX&a475FlAb`7AY<-4^enxfhCQuXymY+`$z>l=KC&eEb{n>hfMBY=+&66E=CPWI8kbta`=U=CmU19*rjm&NPNCHD2LEL+szWkx5x*^Rv zGoS2kb94_9)b9h5G205%Cv;V(m8A_s-}_G_s12e^`S@tFLPDPzB72)3RYO?ZM#Ohq zsq`G`PLKgYth&CcTj_n~54T?lQtwG@!1aX5HI@^Q$A5jPBgy;bMOa9W> z_$Z_7OXhHg!vu^xdNW!8_+dEdMHktFk{)IEy40@w@xibxo#P}f3%Te4sZF(cqcrol z)9~0hK85hvyW9z$z5q21^x>@~iipjRMyb(poA~EQQXVPHFP{SU9aePjR_&?&Tf1~9 zmjtnQ31ZlVP+N0@)y{)%t@9Dwtt3*$4)~&>hxZLnNU@7<08LzPDH%(Lj{5cS*(yG9 z)lXjhf@wFtRHz2ZA^r2)=Q=9zW>$djq>0++WX>{$@{;It_|I)wu@9Zq2R#=DDHhiz z=ixG5ype)(dtUh!kn}(#Rj%o$%i5iM8key23TYXHS`2Q3!iKaB4VG%fU zz+@*0NFqRe83n`qicUG{PLR7XD@l6jC|jdN%sl1Cvq-gCb%riL$x~SY%2ugvlXlrh zD#j{XMhSwSqB%1@vjg}!q^PK9!n9xHAL~uT!s@qk1+l*dC}L1TAxSJXvs5ga4sqFh zHL;XIcVceofW>_U!lK=-(aE7|%8{%`v`xYoti#uBB;lP+K zErDQuVI^igs0XVnfqU6vH!HLv6YUsj&@lrp&-`w_3GS%>*(>G@ronvfD18g4CXr#5 zk4%GQQa=Qws7qEXnPx;)lYRuI$EyPIFulz7Z`^GCk#91QfVO5ST{<=>(grBDJwPF{ zH@xsxR_%fl-;(m0c5%R4J~+*>RsE0s>?nq|n0VwyMx!WX46kx5OKLo-H91TG;A`^RQ<)7!NQnu}2q-EB)FU16gb zF4pPFpLhB$s@)W0Wft5vDZ!o{V*E?J!|X)2HGn!P5f$j@t3Ply9i=%zhwXahCp=&Z zrkxCD*@|4Dz^^*m4NDzKm^v?R?TJ@^*48~AS)@QcPKZ6URSYN8>Gvf6w^(MCel6-> zqM{K4e9ez%U%TB1&rgy-Ix~{)d**Kaf%Ssv7vS)}J&N!GeV1&T%M$}8mIXI>%vvX7 zr5`ivn82hF`AM1EuXBQT;vwjJ)U6`oOxn^D<=_Q&qdcOkUdyTV6oKvZ|oY!yh~P?!RUJ)$9l-vc)mcOC^|eIv(f6g8N~}Hk|)p7>+X^USSa( z(sP*m|Bt6@bi+gUUyd&3lVKUQnzL~emb3g7!*L-pxflcl5pYo3Sj(0BdQ z-}cL3DyK$rnD^lv^uu{w9Zh^SKWD54jYbHp111g^a^Cy=+bGBy z*L!u)%l1c)bF#Y-mo?UCr91|Du>N)@{#FW&XVvn-ss3uK(DZBSHrxLK zwf~?hgE%whcf9gIX!b_H_wG~T5QLwx^0#)$c%o1X@y}oX6${-E3zozys4g->g;T-Y zm{4}3_evM}efjGCYBzN!@9Q+*WR=$XH;ee2xNvU3S_2Tj?q8?;#WIFJCnhGwC=}Mi zAt5}dpzHLXJM90MyKko(sn3O0M4!LbG>l4n0Qz|T$C`R0)4}8cvv02;FqV48ui)== zA`fnq-{lfF@-J}%Sl~^~HWQTJ60LFlUCGvVuQ5!NztPLTbUwEW6da()|7sDq=P!@Q zyzZMisCy0P+`aw38}c`h+sI=~MRn%S1^#vIAgYm@x%c})b4$FXL%!Ldx^WqRT?wsq z_a?v2py7ubh?ahLdhe(AKVFKz?hw;mkauwVh-c`9v2ea!8>#e#kZ@OMg0S z5lDcas+RZt;>*F%(m8^KBTs`A&de|Dk?e%EtMh5g| z=Tj$#1bPuzFwI<;Y;6Z^)@Ve!S0qR4c}n`RYjT>_soD*v+1x>+<{}B3Q@wm8eb4Y$ zT`!xq)Qf>O4zN0$;*Cpe;Zxw0`LRfw+1z>86D!WD>!UBt6nqa5@tG<3q8uF^-Mp@v zHyYY%`(%G~OR$(6G3)pjm=E=BU9j#i!cq)AOXClL|FDUb{x_T0J7E`=kNff@&k{g= zj1WG!S*z#SNhHR*tD|rL@LlW0We>Ft25+BUO5@@>Qy3gV_G++-WvMqQR$A#bxtUt6 zk+hK+X!wIG6`$a-fuBRIEny+FGY>`V)M|Sr8g=CM|H+CBPgAM!{S67Hmv~dUkVJiU zKa*Z7Xx;?c;Py_^@t^8>4?xc&(SEXcA>OMA)gUd(`+T7awvaAMl%Q+4bp` z20Rbvh!aqq20d~2)`Juni`46XR&gcBa_aFdLK=f<_C(aHRUWxXj`9fbqJZU%RTWE*f;F?6QflWCJ4Sk_mhXHdS{ zky2bqjAP?@ovX?0*lBJ-1K^%8J#o*XcIR0$a}3DE1?wFBpI$Jq=YMIva(}wQ$*dOE z@+#9an;v=GmQ|-Slniu$dffGS>*v2I2>P3~S|RVUVHx|b4ULsxY1$j-_l_Td_TXNe znW^(4h(`Ur9=ss~`L7TvxaD|__TX}c>z(@~7a5p-N_6c*%ALb?9GvBe_x{0@NI=&! zq+JIy8V2(Q!lrTL?RgdA1Y`@6_q+qEYP*Ux}MpTW7a z^@4!D?)<=|&{zb~89(DCq~MGw9ST+Kz+znzQpy*TXY4#yw=SIBxQrH1hG!*dzt(^wG}qN&#TMWUh#!gbje zxO_Z6)D1!lNLPN&+_u7>X`f1}cUSkH1Ay}@%N<$XCnNLnG`h>*E|P_nG`0DrV+srb z;J7+DMAKb%Z8{6q%sc^ZnS||{v=5b64TCnXSvN~3F-OQ* z?FXOMUzsx5Rk)#vMx7Mh@O=_=aBWsF;h*f(0TE5Qi+BfVY52UBiC*QVMd`=)%Qyrn zfZ`U^V2E7pBalT7qXqelh7yoqnSv+9FZ7lXUwXMJ^{qXbEi z`#44@=}&Cr(b=OS7%zGp){G7gRO7OrfErgUK7N>@@d+tI_rCTp+8? zeWe`g8QgW*k4brGRR1{>xg(}vbaP!G;adTUi6g9Dlx*F6KOn&NxK?%gC)PywVYgR; zu}XPu*CM~{3Fd)q6d`o_L(#-tgSvt?ICTE1GskA_A`^bfJmoP-dyZ8Xdbe&$h&);o zHG0EQkvgWP!X8;V+dHxd{5x?*o3?t+DrhD%#3Yp1?m!YhSIHFoqn?LZAWu!2vL>1( zL8oLF0-gKzStakY5>?b&swkll`~hyK1fhM_5gf-FuSJSUv|z8KsS}wP;yw1mzC)Ka zmo<()!O>7UJq4Fp4LMQRXMK4iq}k4Ri3mxQt$IpH+Zm zpJCWuUq)xtybhBV7>-Kd3s*ac$~SL+>cQ_~X$q%-6=a%EKBTs z#><6$X2R=*lKMT@1s2Yxpq`c2N!J?QD-=4IvbhmsI07}q<=fM>6V$vyxpvqF{@>mS>c=?ddD)0_02O&) z-KidFAd@|(F({;6X>d$W`S@|>-BGHwe`r>xj3t$!yAb{~xpn=p71<`v+OJi=mQgZt zB{3>_js{e`GV+r1`7mF#X;;p+Hzgg}1vpMiShcP5-qn4@lcS0( zJPLS24$ahAOf=g7B(Wohf(L5S=&8V(22!8bZ5iyD6gaxQLNX;vh?8wS!1_rdr_I>W ziI?lPeAzivUZ<@Th{|`PE0AU%CfvIg78cTSi06WEB_N(nOyVUIuXS+_SgH8?;?qBx99;?dB?Mx!31~;;QAw>IR|9xQvmqwLIr+C@{s4 z7S8^0@P#l>|13iG@ldV8C(KS-szh1;BIza1^kd17#sOo^?EytAfgca{ul25u3|Ep^ z?L1Fk1nky6fBvnt&n~2~8#M!Z^A`)uE>Y`bM78TPZXP8ORX8Yb)>A`~^ zPJ|(n=;xJw^;X&I=1tLo9*NO`8=f?dz05l){WEI0Bd$)??0OE41C46AU%rl7E-ZV7Vl{(9kFx(VBVhk~kmbUd`Rd3S+j|&| z)dTytn|U^)46RpHq=(mL#q5R`Rn_j%DgyZ{7U2EVPUO---u2Ja3*q}Ew1_>$m zvcq3D&od6c>FH^sU-h0#9*{O)&{?@H`Pgk?D*hsV=rhc3T{Cq1NkaS;9%REH=?6}V zXA!-1$F|GdNEr|Lq*X4#c8)vye)FQAhIW`vcwl@9(_X3#-|Fi-sW=&%v_l=6XCBI% z$MKpmVnn%B4knIs;FBuLl}u#DwyM}n*2H_ry1NwE?N#_Ps@_Wfg zvI}C5=sSj6JR?4S>53>{bYW;B$YDEI=h*WkCS{=yx1R~tf0It-|v`>v+Z`# z@SerZ#a@hab>$^Q7s9?z^W*?i_4xvk%91Q(1?3>EWS%9OHEnlvz#hOmMWYNeBl-xxb?yQaPjLx`|_ojPR z4%SseYq}0uPJ(SWW8Evt)iI-&gaj@^^fKo|W4>jN9lQEKu70Xi!p~+E zt|xMm9^5}9M*@KIjI_%YddsSdt*J5+t#G_%9x#O$V0QbC2>8hyP3js+J&si1-BDP7 z-M*Tcml0>NsZ)KGwaY9SlPUQL(a;X+vnP+r_brt-ii&Z*s zV1Pc{NNoyt_GF}(Tn(XVsJPzqYEa*tlan?e>rq#!kL5Bg@xmF0xm^cfQ|MO0m3@Er zbVS#u(0N3IXg^a$>dCPl_!b-?@^I(HnM)1Xg>BmNcc?ocN55d?wN?<^_m$3`l$C7u zd5!RmF&u}3Kc}vY0fivKg}MQRVZ`q zso}u!dh>tYF|Qz{zuWo6IK8cr=$~(Tw35~M7e(tuT*15MXkJxAs9>WIAKn#yD-^%K z6opyOfNi~$Gu~VAG7}>3{`RPQFuUYYg}0pZ zBhjg{D<?xjOqmj8+}{(Q z*ZD{q_wQpLxcp`iEmm%>6~g*ocQt#l)q+ED>k)WD@uTtc$N#>azX$2+ZHntP{e==D z?U0EJGf#8gYaO{NHf)hx2d&8bN?J^T+<7&I3(L&madi(mSH1?vlNi&gXwN1Q&JQNDXZe&RG{5r8&ah{TKk&?yf)!k!6 zvaNC+T_eYWHJ|Ug={cCAoaB|D*23ELYGR!GDA)pRzg@>RLXzOH*{EWtxA*q7c~WwO zuz|8w!d&h4s-kOYAQwwXp{a>0+jw} zv)z&{15o0W%wiv~lz6^5PPcCxDN)W=pUh* z$5wSO+BXj(ddu=WkEJ>DHO>vpV;mMOZKUazVv%=7_K*4;j*5b1t%S_I7M;@MS&dlC zCeX$!^NLCn@`396h~|)>c8ipAGTIxj$%r%BOKmoa|H@u4|M5d)r&R652ze45TnKO` zUZ*c}4r^2>=l?$C!!^5Y(liaJHI&xH^l5Sf4*${iDL1(9iDk7S6>!F;099`yy zXVnft@R^fEj^5Q38^8Jc=3koZo@2X}GkpU6J7Hzdqdhb@>xGXRJdQ1ftlf_VF27n_ zak~beXhtwCM4Zp>*a(_&>^`ZRe>pH^%&{7gF$YCYu&%EInZ}J4U_{SzNI0G~;?iKy z;z>d<9oW=e+t`n#yVJ6r=N|YUoF8jlS!K9i#<3pCf9N(YZ6NLGdQJoO6{iNgEk6CS zJ9JST@v-aw4|8uB6<7213+9ROBm@W$+!EXg?vMo64(<@#-5U!K+=4p#kW-UpSo8U0tVY*RH*P+a^Kx#UnMn-`EJ>_;va)={34Y zLrB|k8bFUDPeeh9;)e#dO-0ilz?Ow>o#L%ACF$fLTSK04ORu$*8FIKGnQQt z)nSg(=G5crZ#XLTLtbo%+C6+s%!PwjOZX(SYF-NB@)=liBWR|ZMR&zZv%ya3xK~v> zP8XRGx=fmU&PoiL>hnGJ6=rygLBj$3z}}nH=fRB)DU7>7op8nOgV-!%g4G>IHix=N z7f6BEVRLo~vKbT=#aO?SuliguL$uv1-!N^63pO}io3lA?F=AQ_Hnm6;ULNN6w5^wI z@^GuUz?+;>^RDW~6WDYSU`PqiKD5XHgvu;L^Gp9RD-T|d**sUBQdiqfzviTcNv**F zmgnF8mxS~`MJ7&wRlbuDbK*}}AR~;R!7{yCU4OQVWh{L0!syV9I{tD}M>d~f&gaAm z6*^!mgDXAM&FGA>Zi@3Z*YrPPxeF$qv9bE2vy)7Xg ztM4&CO^BevasEG|kDC*H%&4PxA=icXYU}Az@M9@$F^Hgv2}at z4JK>+yVlRkAPEUu@kKopKf@ENWDJua`R*Uc-d=X>b+U!inYFoYznJEX@k~;J=4ir7 z%BN&gH|X`MXd9g^u^LEdNQjliH?<)TYHDoO__#|yv|O6Ni<#i{r(u)2*I>WHcLc{5 z&ph|*I*~RU?jUk<8~zcM&HF1E(vc+2GX*?hb`9yW9&yp?iOp=uGX`?<26kfieLH7a zuWVIwqAsubPNyA*P@jR@R%cD;UD15eI8ZLYqtST$QljJfWffqkKNZ$2p;_j-`Fe!xta z@+0}U#({@oQF29LqGAS}>)NrD$INFEOo)z60V_@`9X94G=9$P{A{W_l#q13cH1yDc9f)?x zihF&~Yqi0h{tm;g=U+Q`PY$!d+(6+?4h}v>N%IRVv?oq2ZnnBbErTjai1u)=g`B!v ziH{nNmAH@HsWS3=y^v^_I~0=fmH8*k1TnD9T~pL-$EUF|!GUR99$tvSd&Q&_Hp6T^ zT$i}kBB*)5Ri;{^dg#>aiWUfZJ^3Lt&xss#GF;s>Vi+Gomwdt9c4q{~8v85({G-!V0@j3!pFZxF0VF#y=%NJKpYCUP5I%3hMt8WPews+G1l3 zFAFN22=TdVu&9-vnv&OL**tNekDlg7M-2~N*tTa`wDE5)bgYmnNp)rVwJi3}Mpjg|bC}Y6;vvB?CXUCD zJ&lGuTQR!kw0?ZDT#kv)C(7-RMxPJ+jO@rbdCHLspXcl8;F^Z)#0E<(W1+7ivr7*d z@gS~>C|m&%xNq7jhX z6AK?u;T%@XcP<8@ALI>Cyak^|w;vIGSvo1lY)79;{I9#y^c|I}6Qg zv*C5apRDw%NORyU-dmvcU2n%=YV|0~`ztm7HYqdHmKJU)4gdJ^F_y@shQ z8asChZQ2bMlMwB48gaXhL`#h&Naj)8T_OutXW?xs*bi}>=H=&Wg~+wGcJ3~2g=!mx zl);YBWANw>{OZ&u+;1iPFO)hFphl;zF8Q1X@$d&}mb5=Z80tsjvMp4>9+opLH98Ch zR3X6>u|{E#rO4iZ@9YNra1Uch5~!LOs4$c zr86g4!PGmd5Ho&E9EX)jo$?-C#-_?&W~uZ*`tk1pqY5H2ZPpK?<2>@c4#+A|Q6xDe zIdy}qC#XV7lu{>*vR13h#bnNsxRtO!A(*s>VunG(`g6+0WjY`EfbOeaQrr#l=+J2C z1hW2Ito@t3J_-AfVClJ?5JWpm;_0t^)dH5qk*yFjHj+C5nW?HLLsli4bl5i>bl78e zlKTnnTsf940`%EZ^`Ar=_g&;X>)6WX5A4p$cYaUGbp6GRl&_e$c~A90BfZYvaW?rw zily6gh*cEPE;ovbyeSWShtFv5C_~N!Dn7)-WDT0fY}O+MCPUY64Y-S&yVNyzIM~S* zjdKdp5VdRZ+e8~E$D;-7Tg^`?$j6CXPheHgwn_x=A@!WXK%w7TUssZ8iIF#!7>&g? zeHBXcVEasJVx*Ss%cq8HnD*(}7d@kr{fsrSHWgp%2&H$#EYpqDYey_v2qG70fa!?L zMXHxAys#{IcSp&L*{}CJ?dC2%^X=&2N5UxdGiy}$6^o-AcezOy|ArI^W32GxOXwR+ zDZ{M3g9g-@SyUYrCAFV7w!9YiwPTxrruBsC(+Nh|@O81?L($-EX{U2!mltuo5qUNV zx$qY53;(OQ4c={R<*rta*__v*P3^afAAQ=Ssgjkmw|U3}4L)jF-8Xssp%OZ2@VFEL zuo`k5>X{&3`0h{sLp)c)H{udsYxpv?VaWN#&lMFMX3$yU-VW^o9-6CyWa~SZ3&HsJ zHfX$z)SWHw`FX#dKQk}wKl$B^elz2(aMUezBxJ+I=!Jc>srA;UB%uPQcLkDUWD6kn#wUU35Vi>`9#CwV23sMA!dnY z4e>vjrgyFTI(7DHn5?a}=I3h)hg)SQ&<{t=y)S?D8*%V5?>#$ocQ?3T$6Lez4}k@J zIOsCSoM1x9BOb8xUE!1I*6K-F>zRs@D<{OnKM3p+{ivOm0e%x7^{nLPLW{jJt_2_H zCe_vduFM0l-J?+IBL|Hi+>Bs;)>eEyU@Ih@mFhd4`mE{Q;oO`pG10k;m6H^g^x#xm zF-J|p0-WdFg|HJFDfqB)Lf!6+qy^@Oi`@;v_qLB{| zH?0ph!nd8mR!(%&T|i3iOt-ZVAgs&U>GSjtzh{a4M&1DGD|Fn24PZi?S%2D=fOxPQ z0PF>MpTtHm_oV<(L*?AQYs+c+W9bCoLs!6q%(^86;ICuj^jS4`bDRPI5`s7EV zn@qP60Xem@F8WZL_j69F$Z4OkOhBj$O07S?MqdPqhG!|#aZM-y zkjA5%PpBZ}6IBhmIa{RomY{xV<;!x6Sx%_%ajh~buS-Y6pMPx2?$XEF*kv@lf7wP1 zprpobUVgtz8o8Vxg^HUBk`x~|iG*q8^q%{i<&c$iTX4U6F(MfG7l$~kEP4b^Zl6T} zs2YHr{zsetWX^R_<0CtU0zeQS2LSaP8&L7Tk8e2CpRepR$iW^?Gaqi(ql9(gWmdtD zki_k_@Z)>LtLb^2-y#3LQwWfnNxa?nrq}1T_jG&2_HnMs9xf#SNFwuoH&as(R3edJ z`~n4bhy)t&_#J9LfY7vsmY!mqNX4815$h}PntJDpIn&I)kVNMF8I6W6tfHsphtEx| z&%zFQX}s{m)d(on?cd?*Y6z`K#nbmrKUh#;lkPlY+MezXR zeN3@_qUW#wiW$yM{lQlh=ek%8ZCN}<@!+xFY{jB?i67pMN$<+Y;41$zaZ$a|VBYm3Z1YYaEZ(o6OXE>;HZnVS@I$2-cZZ9E^_kf}U$QGoa7-DH-!>CAEaibh_?9dz8XaFhfYrX+Mr-zfo zR$7Od;2j`ZQ(+IYmY|yMvH=r7>a4G4eC}o*6Z{onYuuez_*h$7Ds5PSfSlLb%oKJ^ z5_7&KY4nd5?YC= zB(=x(z2n0bK;@6^E6-n?^6~O@(Eq{>|5p#hkBHgd79jG7T0XX4c7O4@$IEx<|F@m~ zqP?>8?j;2RKe&?Cghq$)l`ezh>a> zkekl%CSPMMI$p=3-cpsu3fWdx-u?M`E^HBNwQ6G|Ga3Kp$6e^WHP~_Hn<6>+=5zUb z`pG5pQ41(IXV{fjCihoceB~H+d>cU*p8u{CxuX1^Toyk>b9@`Gr&=6N9i78%Gc1Nz2 zU?P*Mfb_Xw{n!glUb<<=U&6IAqa`LQGOY&)mh*7$^udVpcOwlbwzk^Gs??{EgI)pH zu2pljxqi8)kpd~9`uel|dp@4*vCLq>nQ)QaFRSo6Q-?MWX1UN;+H@p5^LlRflxnm_ zFx2B2uTG43?7ICR!R3lByZo=0;I6xmf81$ALSLm3BgxQ;LE&4k1I~DbE{cWe>1gbf z5YN3brLT9@UcXv=N9SVkzGy99kB-4E_0?Z^7u}r^Yi0dda2V4*K~1m+9(}nXo@jn9 zsTgg)xHbrV8$agZZnX1O*uy|=pi3w=y~5>~K0C5J!{1>;I=}rj7yH4z@8xgMtmgT2 zuJWYaJQ~B;-rPER%VF(aaMGA_TU>Kyd^|P}41C+sra(clb}=WGFuVoaf&O`w4#(jk zH>oE3xLK*ONo-HmeCkAN_rp}Q;Z9y5eppCZ{!ef)Es+2wro7wLK%Qdv^T^=f4aleW zhr9g0GQeepZPv_xcQ$V&vT~jzzv#9ueVcP`$UlN@s3^Q z;nNhjB$)BJLqOIYQ>Y6qjh$#*>kg_d@&-BVlqCI%7_+~>t*9Da8kD*F5$9dq@XN=Y zLK~zV7c!hiR74OP_WF8D&(vNE!!2hwWZ${<0=pJji8cOkWhBMSdhYawz_5~$Zo4Up zeE~y#X?qQOPT5Q^9~sc@WS$dNO|E#t$cs{0ec!=1F2z$# z3^g=wd9sx>|CGB}PHCQ+_n(%7zS7a!0i?a1d>H3-G0pPg1@m2`@YVjO!}4O(_Rv?0 zTYuSnoWHekrFN3HN}O^2l$DXYDvqg_q1hjtl1NOr-9<}ZZv_okgQVqmds!;Hf)RaV z4U=#s*hEHS!%G;F*VQq1jj{DeldNl<$3*{UqfhKnzjyC(U1@3CSUYM!Z}+~0!Tt-^qYQ(mG~M!OeHG|@&MJp=)OJX} z`y{HPz>nK<0ncsN|C&z*+7JJPAuE*}Z-s}9j5AhRAXJ)SiWgx{-dm~ZF{P@|jpSu= z<)$Kv)Ozl73xU|ph}%G1GtT}i*%R2=9U)aV7OP4phLx`3?T?9v-=>s%IP^0!aqsFp zJ|IyQPH!Mt{!0A`ice`w%h7rnSl7aJdiObSM}8_kO$4FqmER(sPaK@O5MUqY=KgGp zSA@Xl&IrbSPkh@sqd^3vc{Jq2zJ8^Bp~>j%GuzzzZPwgQZ>R?pQd2t)Y|wlFWCBVH z`X4xIuxDlI$%rc6YSUJ&SgXLeAL@6AT#yS=A(Aa*B*Y1&C__X#xzKM;q-)vRA{xmX z9$9}l)8W7E0TKX@`y>39S8v#&<2Ts%`;F*Fb}8k7-kf8n<6-ec6BQhG{X?OQb)NFhac5?Q*`1S8ZPxj2)*k%mP<+Gv+h&_Ttp9Zbu-8rtkaI_eFkFh!B@g$?cvZ?tJY@56CdFTI3S)WXGa`HEH3SQVV>zz zh*Y+Ei5&Xw@YL0{t;5V^g@bRz)VTY3xwa%43H0dir%CsTt{SwN8K~?wPr6LwVVUbL z>H>#(p_`QsPr29OAK>N7UF#0@rD<)$_+AmUK0g@}2VcKg%pX%_PUOD62JC!)Tj__z zi~mQ5-rsNipFTL$Kni6R&Lm&6zT2xe=-lM?_N1xW+m?i?I_DKWd(Z3HK|7dd&6f>HbpG=YLRFm`3ROWHpkA;cpwablq@AQZ|cq(ula1gkpbQxw< z%jQDBQ3^-eZ}je;wLRLaA159z8>%B8B2ik}Ih9B9%127H>GVC2CNME1c;j*^clTFp zE7`tKMvWd}W?yax%6Q9qmO!+oSUn`}cujPz`n}${hpl=?a!8157kF^Hl`)$5lq@P$ zSTdquy+x7n zVTLWs1}Sq*%}ugBbyDq&4y)x%2|MIy%1L*6Zsf;JhQ6ri!iyaG_?FiF*Ds!^rvXCX zU&{8;I#CNWL@L^Q1qT;W4yFlt&l@c1yY6a3Sfm(x*7(!CNR-{wfA)fh`S0REIQ=_u z!NEy^>-nm2_yS|XF?3iIyDeSK(j0U$gz{Qf3ACC$NplAxhtz5$k}FTK4J5cVhrl`MdCRL@nAo z&AR^z=(y9N3DcnKC6=K?T!^GG;MRhAuKb$?q9@BY(Jy;~(x=$BH_f-6t45|f1l8NE z<;PR-x`boAjSjSrJ()Z>Rb=LtdKz6b4NkB&w`7o88F-M4DRk)fBgn+ib`$!f2>v~Y z@Y0iQ($|h~_~!}Z)7ra7{?o?SAV>_;AOQ_GZSA96UT!9d8VT(D>G5l4_1a8!x5->` zVkd5)!Hz!b{)Rj@^G2jlN=+Yb@RDE2^Q6UXX3O#q_4K=K$Rtb3`v|1bkFCKKSXJpI z!-W3*sI=H)X*uJl_?cd=)$^z1B1N|6zJmZa-Joc)D@l3nIDlm}K}2NYsbuQYx^@>* znVvmqV|)KsXZU^|drQf9P>ZTko9Td()&pOi70K3-N9i^I7nJx12lD~0H%Fa8a)IeD zPc2{F|5D*Ll^ePbje;%6dWiM9@A0lD2w$9qYuuR*9dFC!H%I>JL73SZ2p(j6+0HY} zUbkA=pb%D+b(+OY6eYv|k$p|ZihohJ4)`Qd@AD>ekRB7pl49F_+<*5h#`Fk>0&~pY zDc=Z-FQpWS?AtU0xHyr{%g5FGK1D6D>hY(&HBz+a7dzyMu z?<{$r$FP)dZL%^;Z@qW_>tOCrKJT99k2q5A!R^_(d<1c+inGJ*4~@nu{%bpuuxgX^ zPw~!+6^i4vN@deTvVL<`n8QsqWo2@fX@t09?Z0rY9}XW1^bf1wgq>f>k$_`&S8E%( zt*z$`Au9CK^U>=xI{%!Attnt6GH_@rYHm;(&8h0|jPwfcAJ*(GOh&G51?`EUuc}lw zeQ;nMBzn5RKh_@09NRGLM6Tg(_^hF5i_u{XQob>?d5@6yT2NJUdUc*2(R|=|WW8tV zf0g1>fhfnzxG9sXnv;dTE}`)0hm%gD804&q#92<>lxd(WS8{zm0p)sqh#8EGDo`pi z>S&FMmfmknkfg*8F(P*!>x>n5%c>|)(nFq7#TaYox{LDfH`6SMPle7*Xr`725v1E* z%=6FkS<6#6H4_~DJ2<~+!TJ3KV3T<&(f!gmCd@%W*8sP;3JhvYtj$Rs!&@CC(S66W zw(mQEZbMR|PCr1EtV*{y$H2;GwykDr?8DBJ1($1`P;bRdG@{2*V-T0qsFMGNP@T4K zyo`_+Z-{yAQG5=A{Eiiqd!Z@9h93H@I z9O3aooNr`K7czD^$mD=kX;t#^3|`6t)K~wLIE{rX_(QGVA+S%3$@1TCjcLsVg4y|C zLHM#9r|B4Wlyg_O#5Fim$6|2%!1i*VpXiKIWy!hBu@!2@m0F~#ud*S*EFR|9T}Zyp z@GL_IEkU~qy~)@*a3k~B^X{kB3>=pP&(%%bAz)G%wM4TxexS)1-qJ7@px!V|Tl4Nf zh$AW4y#1`|PU-M+wd_F?_ye;Vnh^RSb&R#7i3<*a8;oqK_z;^zaS zo)wkFLCe%f?_-mV!db};NNzh&6PX<9tPL#LMavcWwF5fcIVCorJCxjIJxk|)CRMi- z_M8?tcWo(}-scIG$&I-t7UblSdb9o%c4$_98$Paw0o?FRh&3{BL~{nhQDVf#b@{ot zZY)o|5|_^9V*T4j>a=B-Sh@Eo9E0m6=jaY5k-odXDF7 zX#}vLuoOgTa`Dgl$rWpEcx^GXY1ows^ys^kZkJqBv#S>YV&YClZ?!RH7#e7Z$Ax^r5P$!v*!!>3z5F*bp>Ec3EpC^)tMST(Qh z>atO&7gkdv2&fUkCu8f`pO^A$8e42Px@rMLCAla{mqtk)aH&7*UAB4SRT@0xvA!T)Aq2;sVRB_3Uj6WqV?Yoc^1 zo2zx_ke8D1%;H?N`AVHuDF(6I}{pzb;gea(fNNeG&qBkTs*J&qL zYjHXs5jr4xrZU>Bwo=pmhY#WCoN}i5IQVmz6;Z(r*L9ZKZ3ZjqKa&nm4z$WE=`&8-aC#5oJk;RIEUtd_I}Or zUX3;S3QHr2pKgh&rR%(B;zYm9onX*VptS{bdGYReABjDV*DifX`Pu}S$RWz;reE}X zqBY`CPS&4yED&SaoYYkQ6?{V;Lu*y&i4%fORTp)s&6@5{=Wv@qoanP)G}~9_^?~Va zr7v)VTw7Hs@*_B5ipBSuC9W1WVKRHCmL%Yaz_Dbo=Zf*X#qZ`tvc^{R%^7%+MP$-2 z+S$p0ex}kpe(M_fs=2ijodIR(%=!(-2Lm3h;{C=Qum{2llM|yBO!Gl>7~MjK?Ar2z zCcYu6b;%lA)}&;3)#=@I~l?i4U6o$s| zIJ5~XEa01~VCJ`(x|>a%^@??8L4uJ3yx+U6<|$nChOnk~nJh{9uiPHOvfV4ps*;9- zJSPf_BIKF!>GQ;Cq^e^?v_g>!a`Q|Ukdg(+%&(Q=X+)ZNg7cb|xk5sblIQ}Y{W492 zo`@Imad*lBI{nT5ssbhyrPmsQ<%NyGGyJklvur9A<0|m5PZTOYzn!#TwM7=zjjs-9`4Q+3T#mjc9 z5W5s8AmgxH+%$-Gayf6}NBQ8OW&p3gGi6cMj))o=5>z<+R5FKNWCEfsU{M9-{ETXa zJHFcEYN63B_6Yo1qOh4mLoet8dVrS~kuHprT$7V{74%M-kK3qNBI{nZrU}j`xJcK# zolhUIZm@_5>fA>M#Wn8Z@QW>kIhqEmJD{|j{nIcN&z)#+`7eb@>(TJ7Fy->cm>$AQ z0nUtJxy4YoOrz=&?&CV3?#Eyhb~C>ua=^^Ee==3P#`fEqJpg6l*`B$irxrqziJJYy zw{O2?tGnNw|GfJXpbb3yaS$FZ!9r;xx6zUznN|f?9=cbs*;DG&fnm@;WZ=(_%qsaF z50Dv<%6GqFc^)W4@~YM3ylY@H)k_>_2H00#Y523fuWYI5Q%QvvrV<$wiEOWrFWZFV zUG2=8PLeAqbolWf1|+i{M^}$5l{MaPg5=>h*NO8BJ|KF$joto6|$;c?bJ-L1PHQ3!fV(jK>l256CNxRYcy%JOc1Ugix zRvUf$FmGmz4Rcw>PA#599r2K$?+$k7iHEt{a5%e?C(7x#`O=sWBrQ+$p@zF4k?w{u4B3zx8F-v#zXvrzGuC#m}A@KY_QC&I6a$lleJA=nme+tEl3b!IczLgY1 zpnLW-y2wC<{`A&t&xYBRo7G8Of)GAB6aQ91fIsQjj1uWaZ{us)Kpr)?*-JsXB)e1K zn#Q_qXMb$RQb~a=a#O~hMeHRevC!BtX8_QKQO7b3!NL-dtL}02=00qX74hWDqskh# z{!GZT0n?ag>)9-P|2MCS}pdkxgA_PgvW_uw8H&{yNYISx$ zA*L@aoGLaQ5YPAxr?d7aV@}a$d(bNCueDR~uSehWURzX+Zi`lNTyx4#IJQ~6S_ zec7b;jc=Q#p6^Cz#&3t4=T(t#p^Db^s9xP&5dFdJX3QB@=HYPDl46&lFfuSmR^84F|58zqfx$l zPB^I$Z>1~sDSRhXd9RE|$mwfu#$E}>^zp<~R(M<4mt*_-Te-asRPmnhX72nNje?M^ zzzg;Z#pjldf&b#Mjks+a95O}$bB5sr0W1B~AtI3OVJc1!A1j@C^6CdGs$Eivy zdslV;TepB@8vcrcWk@jn{so9+d2DWCzTWg-XQHXDHX(5kn7Iq9NNYVrzv9=2i`?xm zP8`vspzf!tBoP0J0*pbPJndXC(K&vYm&+@X%hxD7s*{TSK8>dN6U4$4A0ybcbI9m^ zV6eOV={P&mWt{BOaKJa(ye?|qkA0_#9x(`jED>cLLNh;?&Nx#o20ixhPk?r*`KBoEIZ)s zr_3EQ9;0RiJIbfKhofBZGsN@S!#bRFq9v3pla}|iIYG}cbd$I5B0(}OMKgk*V5lu^xDmJ*JbFck9D0lZ9{rDA;8NC`FeVOMDxU^ue3ewl)( zMHf8dc^_6YmDp`_@8=|k*nXO;Lr!zpN!`FMps+FK^w3MSe7@2yBHR(v`1#`6j%(kf znRjFX>4em2$z_KYGNAhs`cua!`EKMgF!s%3_JqCD-vIZ$9}DPO>-Xuj<6e&5|LOIx zk0PNrHWoSOIBifZQ?S+`#FQ|&d6ILioX>MA#O+vS4ZC(+PyX`X>|zEddAD62HP=3{ zAgXuKV>)>Wl#UxbXWH<4Y{qTeT|5-EIQz4b#w^9~#3bo(5u}Jcbxww6Q2svE$}Ix! zXwB#Ao+5RxhCi{zA$xC$lpnT%TnV)bJdBp`hr_5Jx+i%HPGz$#s?Hf&bv?=j4Y=4x z6MI3Zvg@S-um6&R1z(=F_=(;PWp4W~F4q7Vl505h;mhot{8T}~RKHL2D9h&C4D9^H zvSWk0Ws@@;9cSnUOiQB~zK%9xw54LZCr4p!f>F+ELHPg;iU;)5uHwKyFZ&;IBol31 zo-hGZ)v*&`R{mdG_?Wj|?|7oTNO6iEnv`Rb(oiYF&u?TrbU!9r2UwRzIPT|<(~FYl zmaeM5W+3Ls*;6m6_cww)^es}I8+H~)y?ZV&hR;g6N19di%*tr5SXN&DTRdl&i;vWd zoZgp9{8|FAIRQa0T2IGzSQ0cwj2re5{-6Dre?1%JWM0e`)|<18KHjX%YhEN=I%5;$ z?9tI3^T2ER@A= zRVdmbu}u4|_6sfqcf21xFBJ{f;)wv!#W&G8dJL&3?VrGMrU|j*OOiCjs)+X9{E%1nFROE=va}BNZ74T55Uw0A|HF!9CyA)+;INM13$jH^dd>?|ob z7?C%QweBhp>Kli_lhTHnPX-ySf}#tQuM=dO?Dt~no8>_cTw167#7nWj9%aiXFVb>MlO={>qYe6q@SB)8vr4^QoGVe z9OC+Tsk%W8FdC;G9b(QUGXld>iw=t(d<6w&%;-2NQ_0@b37v@h0zPWvDpXqwo64fe z{pghW9A3_ZJGN*gg5i!(pOLJ&z}o8TwAPSK=cf!+uUU1n=9bN&HR@B_BUL*eHDjt0 z1UmVg}$FUt%w_y)PvhD+k^c5uUPq zs8uo#A$Z|fhBJwFSBas&WJ6&&x==W&Ob@#q6^AHODpqOgAjwVH99hpD$k;MTR8NXn z*e5?Er6!Lbx{%-fBzQ5gugY7p{X4R7Zkg>yg<7J?931MAKVwOkIy^9(p~+aO#2{Q9 zf$N*YUv%bIE?>LQhoA&jIHISx`$dkl^h&Ao^*D2qD=RPiR5};`#w$M!DpQ-Xo4+PXwy!Snw;00*Vwzgp;D8I9Co?kbgEhuQdmDnoN!Kuqg1u`Sz=h3QmNDC=N`1v z1l%EZ(N<{LqQKfyEb+VtKL@LS^0u$#htdGmIlhcayjc0nljdZrVzNXnk{<4Cib?px zx8FAoJ|oPge>%S&vO7k-)S&O{;*df30sijjo^MBF+2lnO`Dz!R?g@}}Zz$cjM|lt* zUS8fci)23EM?;}0df%UGsv@k=$)PFkpsz@6Aesgbo^=!vT9eWP9C46l zeB``A@`NNo&*zi+{41D=LueK#$ztbLeC;7 zW1}Y+8~zw1Odn0xVyiRA^;QSe)XjdRFFEfw-aXdCg@iZioSh?MJjp2=Lsi9pYyFQakQGSt_*ygk2K?(<_mBK zg{!9so%71ZJGj2i-R&8Q<(SaZ%TCLhFf5rHw+!Vsn&ksG21?jO$PdBodp^9xgsAF# z>tkA~BUd=JnfFr0uPtEZejM=Fy_SD`dAd6lSzJWW>>thS9W~&-DsrxGpobRzt z`+Q$^do6n}l~>h5Am2o1+WX~R&8sEnpVjH_&ItEk3DJ~NGL}L}f5bCyzI6PnfM`rA zP?7;&qE?4X7_wR6u-@||X2qjv#jzvps%!4gx6NweoNVoL;{XATpCRT2BaL;>Z1}fB z#SOFLAf&bGeN;^(_$^q8Jv_ZM>=3B?N~NT>%C73NF9dw`@)B!Y9~bXf})g42)A4EUuY8 z+3zp+9cg$;njbWB4PLQF?)NjhA8y6apjJv(Sg+B?GU8CJogG-2<1DN?xLkXVnWf3} zqxkiBt-~U}?Kp5OM{|%~fUKd*uVo|Xt&0;(VM++3 zpqJ!qozRHM_jocNEdR}VGRhooV?U;^O!ZRe6i+^hfSgoca|71tn7&ZW=T zs!YP2wHV*^_HO*nG=nHp9KL z$T@m-!@+MGP+zb@QU49&e=)O%%;rW6v%P`Sm-iGe9Y6mtdPhiX9@0HU>+;4T(FjWN zWjF434QF`gzo2x;7^2#&5qCZrcMrigig1FYn`fn$eRbXcGUL*(%>PRXbtru4k1|!w zzhk~w^Iv2AlD>~Ee(LkO|Mxx1E0$2dUF0d=#^$o)$l&l@x(9-n8y;@I?!#~Kx}YAFv(c3PEV!Ii@TDrx}hFbRUQP@Zo|-!v(;59)g6OJwfX4<5jnT9 zv9{Nz6a{N(Os(*a4JSIxXwf^07toYEIEtZ9b

7 z5srUrw-7ham|qfcizvB1qlApKycQ#F&MLOfdKZF2s&=CoZ!fIInuUK(Z@jeI3=+G2 zk$xqcKDG%})YY=r8W1*$C-T%|{T-M+GdOp-UrHJHg`7fUwjn3}8nv`cPS4${^xJ}K zUi$MnlZY6-=Is!{DQiYdOv|dX%(Ot$ZqC~Fe&Vqy5t(Tt(S_>9szKO%a4-}QyeR2Qb zGWT*oe8u(t9iAh(@EPS&Oxf!qX7{QGR#Ppx&;6`f*HRHqUzT*SS?)qqcKaXBZb~0e zl{n$uJ+2Op!W@;NM%;w5Y9m!Gq4#_#N9w||&@AJ+IDI@$#ut^!-yzNKMtts*0F53dX{8raqae$BM>2ELfK8HFG_DT9Ib_ zaE_GtQWrH7sRt0grDfHunO`EVVdiLHve^VF>_p$YzrQb>gz!*BzHq)p&cw6z_mCsG z0h(AvLaHHqZ3Wpoqqb29Y4n4|MUfT&fnwyE!9fq+t(EolkGpLzNzyBQZL-YxTs1{i+q%QavQNYJ80w;JnKMt%4d z)wQVnk^ma_VRi4!FKUR`lzHU?UeL!O&mspc{i`tPK+Hihx>A0Pl&a4IM*)=)8t(J$ z8e&?~wUPmED0Jy(_JIugrnWk()b)(2`Lc@i>ze~i0elR-5mlCvReP_=)CSh#2PpGA zp};ST4F+r2k*FlIicrgN$3&ASI>bB|H1SEUBQ7=>2*|1Cj8WJws6 zgQ=A2f0|{noie3Qsi!1U#+D~A#KxVt8DEE~m_)Acp}clgl6M#EB^Yqz>i^*>-teK| zwyI$bk4=SD^h+|>RM&l?P0@$dVG0~iFGZ(C2U-#Q-7#7iPUrJEM_!R0d*j3{*(p!) zRC;QvzsJj@bVk~kX^EDgvQ*hnx9M!p&u86gR10`rY1iU%Rk0Fa5#xV=I^5(wp9jAh zBZ;`Fnq&y*!W-l=t9*&xuAucxliujdj~F-SX@-F05&}(HkdN5589iA{GP;D9IZT|O z)6^*Sgg%_O*FG};Bu&ntD2YJQ)VJIp@CQCdgNpWjG$=Yr4sBJ%HL{L*bww#PzR|R) zQ8^%CA7%d@MOV3Omf~A_FmR(!S!I@n{)I~Ax$cN$r<-v0#QMAD&aWx)j5t9b!@rWtOy^eM#C$CV zmm9c2f2%e}=SjWA)?@Uc9CW-Uoc_jgRp>S?wXq$+_bMD#SVC~WeA5|KVr(%`4f&|S z7(QHl(h@_xJ&I>)t-xRyAWErQCcToNBvhBZhi_Vt53v}t!xr4mycj=gx(jz;+GNQc z{L#gzvh8KG#ZzwY(-8LoYml?T>uhW``4B4Pao4>;eJyfW$TF?)e66yq~yksgl)#!~L z?SlHRMh?|6Q0c4G3RQo+P!y2AkdH+8W=cGTh^W4&Zo2a%h;sW>y60A*{rA3LTp}q8 zWsB&!w)y&zpEZF3M(>S=Id5E!E)y)Z0wvNMt;F-j&6I^C8Ffwc&rbd z|Kdp-Sx#vjLhV{$2`^!TER{v%hnV%sLm$6D00webf0CdZc?vdXZOBF!%(`=?PpN@? z?f>Zpm4@o(N&oCuEvZ^Q-N=hNIp9=lPAfmlb%e^+=Y9K_H9fypDo1Y|qu8R#92P&o zZ4{3#%YWDy%14Ma&8r}*>kEl5a(Or7KL11ia<{?o5~}B)Xu*CGRG|k@y!wH$+^}W8 zN^i*1b46f~z^w2Zcc++P^%OgxCwu)5_TDlouIB3(j1U77Xb7%p+&wrQLgVfZ!QCxr zCj@tb`x7*{y9IX$?(WuDaOj>xp7%d<-!*gBtob%~t$RM5?scka*RHBvyY_F_-bZQc zb>%HClQ|8BBN$d*9>WN}!2AOHJgNRDdlQdgZ9c2Ok}KY783~ypz8)vn)6w5v6C%rI zADu+ieEkgYfi2~R!~dCfcBe$_e8|3>XY0pfwfrL0QWJVSEKvpyw*+oJEqXoTE78wC zs^H)2_u?2Ql>+=K{w(4!5xdzW#2Oj1^jt@H52`+!kHrc|#5cK#NPTzG6?MZFjD3Xy zW4;%E)Z@V2-0}DqS^b}ScG2jjg`0TGhL|TYYWGlMsc2H@bK$bK|$83t8Af&HKIgVSHdqGF~ z#qKeZ=mLW^+;;H$XM5$cl6@i|XU%c(+8CZdbD(A^0XqQ}MR**0XV4wzMpJbe!cCa{ zm|PP02;OMJYY;Gt4kk>4yZ1A$R=VlA8XJ@3y@lzKPi3I5MdE@ri`4f1(Bld+okPbYMz!l)C#>4Y# zcyT|M=b1eLp5K5f`%>6GGxP3 zh%m2>&v&?IE9V49@|e+3H7=|VxsKW#t7z4m8ObHex2M7{wWO7(qC*n?OgBAVUlpuR z!O8sfPuOK?DN~rHo^m9Yx&B-_J`U!ePkrYsr=Q%pIkFhp0oSTO!t9Z?^>jim4?X&# zpK{#>i4LVo_@={c<<=uDJuLN>{oN~7_eNzuZi7hX7yE{Jdym3fOS8}E`zEZ>gM!TS z`L?@cy1b6;*rg{m8@WQF_UFFw&~bQvi>)uQk>SQGXB^NI=ZdE^+Z0S;f2Qg-c$(}H zrgE6TE{@ACQsb%(ZWKhwubU*@Dtq;#)a2?2c zt{4N(hK?Z0%4gNy*Nc)h3bD;4QYvE3`Vs7#NDc=7fU+0tpNOUN{X3vTzgIE@3x{AW zPzZ@z6|w97>!oj^w;sVK&q=hba^h9?c8DrZ^4%yZ*QnQTKQHRlZx{3R$jbK18ZM}- zES=a8+QTa*;6psThR2StZHgA+%Fmge?vEX6 zmMRoU4;J$lE^=+G8l<#rT%(JA{-DqhPn9_?N?-+k`UEdWC@rnQRx_KHmexyiukfZ2 z@2EKLIt6=uiWVy_qz-3bE%v(F)w>Uypgz!M>yiU!AB*FHaMwC%n|FJ{t<-1a%D^W; z>4yR}b^rRB%Dl9k$6ELaM(5R!`0tZg;EtAJhv>(TLzQcCX_#Yz03w+^S+(Z{%0kiR ztW9)M1=vJ$G^kar7p*=8_o`|CNd&XA(r(InA1)nCaD}d=mr{Z6fl!wfbx>8QOl!@B zZ}oAwU~mV&Ha3;;<5jxZ)`etn=-X${f^lg~TT4s5xnZ~8975Z_`GVJL;Ef*YGp4VenFI&wUf5XbUJ3K2U2cQf}XRu{`laO=?#a^7? zONKS$kT88ULfxlC(a%wOPtTdb6)LUiY4m8QQP!mwAggk-{)$P50pk#$H+zw%1`C^n4Ts@4$asi$%ArzN-7+%w8Y zwa7LvF&|Byxa;^9%o;CH5EMg)r5fxoW)duK75>mFf}Mg{fZW_EY1Mr6;>9}dVum_m zZz?RI(8f%feToh%V`A>$=DV44!qDp=FT-A=i>&?D0Uot)$t$4ipFfSxl%Dn}U3>Mj zD+0N^it8hDJ0+pQ!os0bHq$k)y#l!`Gvrl!ap($SJGmSNaHTokNKd@_SlYFB?X?NB zme;J5;N+%NYN;-o^^}_7FqyUVH`Fy;Yp4Un<3;0rS7$ooY+95pu<{`u$Fgs!z{Vmfw1C>bA*3N^V=;w1aR(xp;m z+fJ>sSdkK#cBF$-HxfW5xkrz#Fl?z=aSFFMP8VI?L;z~$CJq%gn1%IEtA&qr7i<5= ztnJqLcFF);OM2$lKJ{#N#R8?k?X=+n*#y?HK}!ZeQN`wE7faPM-QSOPsDm%grh7v#iEie9H@7l*u~vye87{pr1m2Pb)Vegr0&#qAHJCG zRI8+wNiHQc`8v89Y1O#de%+5)z~BC$g&Gt4O!gkBBFLQUzndfWqQ^l^#9~V-AQ(_B zH&V(KnsXM;sv8#BsiCUYEe+ldA5E9EH*xGNVn=p)A`0hzoEo!CD<%seE&KP=W3VVj zJW8e1mo?ONQ(Izbq3P$YqN+B!MmFwh0wQNzIO#_0Z|L-`R87$-W)(MA#LWD&Ec0T6 zmXX<32Y%VIeAD^h+#qHA~aDZ%(v zrqX>)5-bP&dipavl8N~Of&tM2daKu;t|`!q2MhN}dVi&1g$x&+-py;r5eQ}z zCQ3~jirst`g{@wwnQz7XNqT3(w*!rcWs!k6pFh*Twpc7RP5f$UN2^9-qKdBaF$2dF zMZb$LHA`mbI7gE#iG;YvWdg_UyMhD^ZOvof@x8Gs?*q@tOr);boo!%Qj2Z(9Vf6c( zYo{X5_RgO%rB*vX?daU*dSg31u0%@K3(h!)4L(=RpjqeGJ>u==1>51cB!V_K3!ghO zJ+mU97AkD2Dep42oiOeDC^3d)3WO8TRkAiOD5FlH0)xCXrmrf7VN~#S13q z1yccERhY1a)lc@mxh^vNrbNmRM`i9fZUT*VAX4rBoW!Y@)~wk!6E21ZibkYmN;N7~ z%64N$E9A>XaJJ~H3Errc7FTSLh-?HLreBj#5=}Z3mHfH1 zssAj}@SV+kdB63kj<%XHT_0X(OM& zQKQzeqa0?SFPtM=e}95-N@Jzt=V;6I9@_tqZG}xKtH7Wxox`B2$*)WWjXrgr)Jo!1 zD{`&VqkEM2Q0@~x*#z~$K}Q&o^lM)2fvD5lQ0YyV~Ol#?{MSJd4UD( zyugZtDmw^Hy#Ur6A*0bDrEiXROrlaOrQs+^xXxE}Hh#Y__}lsv%Tl95d0f zbz@{4V&aveXLoF^ToX$$BNi2D7nCFYaGItgO1y|NxPn6U#LYR-JJ6FpryAgcU+jay zDZ+N-8pXgOn}XRIO0lae1h0ynFf z!8HwcRZs$>LZ8}&kIKP+N?)&4O(~s&uAP^$aN```5c51FzDsx9 zIm&38#;SB^`Dea#uOh}&YOwmjE1Z4WvvuQ9V<96}EYPbQh8L$9WB`q1R(L%TjjV0e zQ%DfCvSN?KAk>Qd87QN7lMa(QNM2+Zi?QA-BTmdPNPc3UimCzoxtf48HRPlsC)!yp zuTtK;F$TMnRfGXA!{B+u@aMTt)O{6QPu+g!k-^Y{dnwNQ@;n5u8iLV0`}s$U}hE5LM+jAyrP=k z9SN4kCS#*u8!rlqlgo4@iCTH*{B{#ouC0oxT?r#Zj~u`XONR!sWV#CHLf(g;c37U= zD9KluRL*IH7zZ#8^!Nqg8y369clY~&if|d&-ho%EtJ2xd#!cv!lLpGH4;f}nB4hJB z`Cd`$mD4B|i5wFsz7$EYW(3W?o>RY!O)pb=*-;nv98Y)O5f4~%;&n9?gcj{w_OvZ3 zaf}0=%Gw_9DV*p;hBkAmDLuieuDW?LEVJ!?tQ&@!M599*YE@)bwH3yU-U8)w%8|rs z)LP&*`GUUzjejwjilHDW- zSf@FplCESFUb4Na$GM&4Sb3sGiT;j#j=I_L5ye}!&birl39ChiSZwVlABSpArPNe0 z*KeP?##3R1KsUy=Z6%mAbuy=OL$9HMwILC^Ufz8dwz;RTM7G#;Pb>p^>&$db;Kq?b z_=+#1HFQ-pSx>uOY)yheuV_?F1QyxeOmXc#=hXAzqm0B&=&z#Jdl_#)M+w5GLxPORg z^NTZ?paY@XJvQwUGF?zeKQMry8rvF|rN?K=TEnVZDQxWpaoeA_?xuK}uztrgiyD9) zKveN|D!Zox6~~coJhxDYqwnZs-IzUq>y5kCjIFt~355y!^5vi4o1VME?$3Xcil@07 zIvomgj0?vza!mF&u-)|*?K7vS!<0*!(&2A^aH>h+dZ$fW90rDHQolBMeI7o|bEWZ9 zN*0%Bd1Tl|xxgscaGXOe23~!b)BbkygE5sR3EnO{VR8Utk$v|sc83ZaC={J-Bf0m~ z2Ft>lvU+N-TQF0xyHCCa`YwT0x)L=#K54mcSg;>Uw$4yKS3}&VJdUyu$B=z$Z-`Qv zb~_T8a}uVGq*3ag_JU?%af}l37$Uhub7D31bQOOvmGY=)9bdi8hL(%|_Nci&S>l-xD?s6}D@kn=yWVkn@jzAqRtiV{vMR=ZwasoZbVP!m_#D zT4gq(XJa#uKb`S$(uhDw+eX(|Ozh7}=-LDm{pzmoVIxC$x^|;$%4WNZe&;C_)+-}k zEhWb}!*cxB89RhF*mJqg(j^{0TFmD!Dq`-{{|lsoXFPyZqMZ>-KtRkq>Qm>w^sMxK zM>V9SaE^4+wV&f)v*y3pRS%-*$IiLqdm(;?;2%-Fy_hv{^BlwzE30vJQX+;KT(OWO zG`>Sc3aeTINPbac?mr)DG_y9;kK7IAtMgw!1^z}G7ahzM$W9c zSa1!#f8Q<;P$>9D9MzIbdb+S=NmVx?sXl@UR+$ZRseW&>+zBETRa`%l>yYXdVblBvNP`J0{Et8y zs!;|XwNaW_<@ZT=KjSV7$<)+VAE@T^)hv6G%$cX(UlW**SwjNR%7eZf-bF-3VHXgh z+9f+xz+J)l%5W`oGtwBxi_>|vqT9C`OLjA;x{BDv&a6N!#Q$9ws}{X}hUVwNe%=T8 zfYJ-AbaI2larEc z_KMU|In+4-hfGAs@Tf}qzvz%G@8W zV(F&~Rr;i^`wKm{0f^$zIVsWLW9Qh$;M%<=dq6a$NxaX0;aogI-p4cb5e2D~K#P8C zr+@GU(6~`08Kq(6d(I|$H@eeGoFUS&_-b6Oxntgb(oF+x;%T164z z2z{(((v4n%+UJmKsQPE}8S+Nc#Xpk4_s7Hr zFpu(uN|{dsKZwsQ%YhC6I1DJ9J{hnvn`#dHqhU0!2=Xj`GZa4f1D zZF3+|<$Yxc7HbeHTv*96j9hEhcufJ>qE+Vb`oSofBc(>SZc?!^Xy}ccBBnu|kOttq z*dn#^%w7f8DR38Fdz<(@P`isJ4XQ+s~%oQ+Up@xmo;N%f$|zc-l&JOV4SNA*tUV ziWZk-w_8MPhM<)ea>PWTSe$JYTZuBf-=Dm5FYaHkD)wMfCXl6!w_5rg&P)TqQ&S%G z*2PLmlnGshmHju8F0+J z8B1kyWXquJ)9k8N156FdqDvG2zyq}p0MChl3x zwhdsbvZ%FVPH5IG2!_MjTm{sV%EbSKl<%Ebr8yjEm)->T#uA2?2p!$z(msWCw4~5` zOr)xp4iFjzyd(!?qNT04?RDrW!bfsS;{v$It6>>et`H2Q4F;atWCFH>)|;+_F@KR4 z$1$v~NdSHqMZoH`TpZ@2yS?)1SfAU_5*;?C^}Tt%V1}{P7&ob1ObfU=jt@Csz{b&ZX-0Wv!9Dt zJQQ0|&?cKEJ@uhr~G%3zik;4D92Ex`9)122W7T@1QlN3hd zIBvui@FXb6x+m1}a#u5<+S%=WBOQm?{YYHD=$d(GE)UF%sgB?q&ep26_Dro>2$Lz zwJ8H0*~3et>qVorZZd9K)JDdA-JQ$lGbGqoq}b7E;EO3S534w(tXw>kUz9Tz& zt&lUFeT3bD;sCCV_FG1BFf|XekK|nYyWo06@?UweOpPG8lV9^u7nlBl=nt3fo4}9j z{Y^FIjr2NdAsa+%b`Rbu=ABdGKz#U?QY-?47u%IM5q))7*3Y^(6CDm&If3lArplU< z40(PkA94D4RUnEH3IxJIFD6^)-GR8CKY11zw9p_gJM^to@8;&q*URI@wII4>{U{!r z`Y%Df9*AG{OKk~65h=rrK~mbRR^L-nlHd^E*6l%tz~Id}#Y`QN&P`8$D-82@%d~{xP~F%;3Dr zMW&!=FSj0#-KS>@E-(Q^(eT^>ICIzxrg>t01wM=!+iY42SPyuTwi#O;i85GV!p4gs z))f{fKTKf)^C@VVmZYo`wCu4N-ZQmwQqTCni7Y2$mfvAN6!d9nIFxczFxNUQSgcry zxEVq2{*w=92eGWxA}h8Y=TX6qwmtbhf}f1PUq&+Q28DY{Zlr`Tc)rmloI$+E%M#+v zMHsnpgHA-S;*VwYXJiH6(qjpx;-@g7oh)G(R=-u_B)Mu|f@cODUt9FK+2{+H21@4b zZs!DG3^qe2ipH6EhdoyUAy?sH;t710WJ0w_J6&p;S6e6yYK+5(HeY<9q=*a&$O0fO z*8H~wnr)p%60XTOz3nyc^dW|*h(<}bK3OQn-7#XB`EUtou&(#i#>a87#(k+GZ8{x! z8r-Z4g*sFustO)*6}!fK1LKMv%0~I&6<-fu-V~&4;QiIrYO|g7f+T!B7`H~Sv@mvd!eUXl(pwsda zOUfejEq*O!EaOywv~61|roXl1Fvd;bEPo0*gx}ht<`Q&X!*41Yw4Jkv*XSMmWiEOA z^0uGg_A7Gq2-!b-NPST;PWMu?+FEfKBGx5i{=27>p;B9YSu)hHhfum5o6dIou)3jd zvG}&Vo=N9J!tizi*QE0WpF6xn+WU!OBD-F4rP^C5=*9%eeba~K$Nm*t?we3M!n zmyL5>m8ar41L`I3krsxs-?*h6YI(LQfqpgz%BPO#`H1s?&NVJzQz=ozIHMEn&idMe21^C@q~+?CHgbXNL1I@G??{LU*V4?CvUz|a?Mku&&b zUI#?fZ;p*0vHXUmxzfkKx2~+^kGF7X$?k3otVJXIl3V#&t&qFl5`uzKi_Lr*TCYE8 z+Z|v`rS&$;-yR2TCoLWxUIaBBm0Wds_xQBz8GNl=)frGm&W{N<)sfMnBA6QkvUN2sJ%Cj~=5NVVY-h9Zr-m@9YC z27JeQ1i6@hKegh=<52V^HKE~BJ+;t5@hYUT0%b8VT~pi6)-Pam@(z~czcQmf+QTPEDdEHtxknXnGOLN17^lV9?rT38) ztuQNiUz@)=n7f>Q(pgf)nrl6u$wLZKtnK=9#vOTdQ#8K;29wPWhxX25tq0@FPAw&F zy4h-j!1j%@`rh^X@@buDi+AIykXJNdEJmGE2cOe&GBJhu`i%RLgu}7MNI@8Kd!$4X zt-+9x<`EWUQKa{0_#`YaHa7;>>yl0M^V`Sknhki8~D_e^pB6wZ?xw4nZmw3a8h*GABOtYr_*AMd$ee;ikcZAa)kpEZt)33B1~ zJ{P6|DPEf8b_h7kh5P~$u*=<^&nY$4@}z^W$q@Uu@1B4i&kS2>B9cjnar#3Q7w+Ma zS(!m7(wTy3YKv3D&4K51NQ6T_j3~luof>?^mGSdhz6Yk|pf`=F^ z@>k9N^$!8&U1aEVhRkE^3ITQ=PMIqAG@g(k=31D-5#^rXDUI^C6k3@k3L)r+w|r)u zhnjj5X`!T{7_X_ors;-{LD7{ROB?MY25VGw&z1C_FQz2iiZC%jyu`WXMhE*;L7MnjLNprD_GazYU`HSO z0wd>*`NiLk|HK606=`5$H5~H~xI+*8JItD0u*3Rh|8yM<+Je7+q{liI`JNE?DoEzM zm<&CfdGR7SU}=MlK0=T|I)(pWKaX%u1gDTbz4=mI`U%bVL&eanabvVGC0v@3cVNq- zD$mz2{+5dOjrsGINA{p0kd2&PCVI+2^f@*ZY8|0}{A+He2Tlog)H;3Gz4?>R3N9;1z)i2p1VDYE^v+ zLP6W^8KwqXBD#eTvWtBxre-nq4R(5`Ga^JmC{NO9(w!TCiYi6Oq%>m5*FcJcV8yop zb61Jtdl!GtO&+?|y6HGzy|N7*mP5eHn0bA6I;^rtqX5}ULQf8!j$2Loh0PdzAz=8) zl9i-hX4t9t11G)i6tSB=?-~YgC;7=|gG!p)Uw<{}t4i&6S7c1!PHYKIx0ya0L6;?2sBaLf{qT&&dO*$gY-od+LQ68IaZ3`dct*U z^$7(D5_g+utlywxP=iT3Xu!3*nMM`-Y~G8Dve2eqWxP=64=2G|jYh&@j3GwhcJB%s z2B>1@Gv3|M1dM!Jwq@=3EKC+W|LmOpF<-MW&^*?Xc;qB zG|%?QtDR5#?=;g3wWnsq zZPgQKY&XxmbWZv9g?KNg%%--an>qP^&u-p}Jk2;fUyy%K0%_S$2A}%zdCN^R#iN^ z;pPiW?V;=GLM@l=Qs+7||5J~YK$V)!ONm;p-AkW!IJ}DVD(3vD_w7JrsfRgx7OSD! zV#!Zl<#k7(|0a$od4gYQ-0mU75n-$fHgg6kr8vGB-QuAw{0SM>4GM1K1Md5=uDlt!?SAV zWucveEnJPx59j-p=Z*g0xpy8n0@IfUPEUfx4W5AUq6ZHDydP=ZjZuaam${z#a@)1I zU4GYmaY^RWzv4OI`(b3LlOtem&OKt$i(Td8qxH3 zSVOAF1jr@aJLIeK+(A?zsg90rDP*Tm+}agpWadfDE%)j{XKbM|oe<1yVRki4D8@UW z<_0}033)@ICn3WF$mHh2+4;{~TQw!N;5C1G_#do!^d-nX8Rei~R{%DsdY%|NS@Cf0 z&$fr%bSHc*LT01R;~`00HfUjZOOi>%r#kn~w&R4G;Qa1kgA8zF(Zx6`p5Ck&qGKU^2&_OK>(~y=4msZ`b0gM-!oVOlR3ob zXXB8v=(9rHNVcwZdpKNq8~EQ|+t08`zdRT^0Z0{Kg4jcj(EhbGJ%xVx0r~q0F`jZ|Y+XVwC#-k7AJR1d4#snU+49+$Z{}y{M#=SA($n*83}U;> zo3Q5b$$6hXS;32^NHaE&KXa4OJQk_qaYs5$W+P)M%<#}O?X~`FW2sLX9tc+kx>3XJ zhNZ~Df7ifs$N%IpsJ_u9!{vRIi(B2tafE>$PxM)6u2~_Ii>arRc3w%orzQWB%&ix% zf&j~sXWgbmvjwb!C#E)BD!UZp!gby=4Zaj$l$%TnHeITz)_cy3mOYx%5xmp?RU`wL zlJ8;Ti@y!?iE1gxX?egwLA}PD(^W3q&VNwhVF*de$liPojWp(41CfLnT1(MCT(9F> zwyg1>uiT*<2L}Q(6N|rHH^*||Phl1|bpYwkfB1sqdj>CWs;*yeHVwKq{%p+O{G|>R zXzO^Ts0jV>!}ldPbIEi72V6T2#7`jBQ<%^JgY&8j^8Lph8|hRg}5Y<;Oh@@`$`U8I1lKO>%htTK>Lk?BM#+@v=>1h|9K3j^$?i;jK42p^>GmFfyXE(@0zvv!Eap!*cdP zi-u1SRpe$Va?E3pl)P@EsW*Dom~HqLZkl8-7kF4S>2pw7YNgG5{_KyJYkB02$Kdcv zb}L%21}5Q@fuNBIQF803H}-M!=BjiS9<6BeA>Lub^YO3B+mYVxvjp@i3#13PxrU`B zO2~bdRCS)c%6S%4J8kz>I~L+2xIL(zSgeP5GxY+-e6vexTWQXHwTk=~M|7tU*IM*KYTn)hd^E6b*R3>xLuN8EX@mzT5%Wx|I; zT}Ud`Bs&B^tEopG0~h|o_aq@ocHWcgbS;%S z5`Jy0Rh-C{Te}gMA;%?FeOfns_L2qdjBTG71gg$pDQaC1*HQPpZG&Vm@yrfkU@YJL zbAZR-dky7;$gDL@{PxLNwcDbQ#p7Q{-6gs}opvi9C-I!$)VFHhl35v(@DZP!A4sb?xu~r$|ar+?b@<{LmaYo*2moqYsS(St|pe@;*92D+< zOo(Yzntl5x=0#Tgn+5rj+br9CvSIEr9Wq6I_3f7ATzz$}BV^IpTBSsJJ7SzSH78#Ci zcN^@P?~V}eQzoSZ4EUpcj7t+imE#5s&u6b5hp>_OmlfknTQhPaMtlvGGkDg;ewHitJd^eygD7RKnzIy&ilH@phhz z9GB+_#yd&Dn-buJuh<+d3=ep>z_C$LzWQoAxxU1pncqKS_FO6Y-6s?EemHkHO-pGC zp8cz8=n^T#H*4HJkrI;o7l2v8u+5Ju88LJA_*QRKMP_$B*yDv#*>$U@5ywiwUJ-dn z!Y=YF+nLkm#>FWuQY1{eLA?Lu#pCZ8h zM+&Tox=p=R(R_xI+J($b8E@_u+3mOgu(Lj5jTyW%KsIxckuING)b@#XV(9pIstn6# zhgfOI48vS@&LPD-%l1jYQ6&0|8`5Mte>k3&YnNxImF}^<&?d(X@(=kKa}<3~kBBE> zUd8*(`M28VLwEnVRr9C>ja2$?!1hOv{@?Mb{|`{s|NivzOCt|+L$`=r&zXNl9XN2+f&+V zR@c4POFvcGojNBx^xu`B06dn$Iy2yZ&9pLMg&rdVVlGKS>)CwpaD$_rWx4`tmtD_i zmhV7)n~J!L$-bwAz%98Fnmilh> zI#3l&&0w0f_$!*yf)}yO$KsrKsH>^)y2YrJ1M2@fb zQ45!+Bxo+mVCw1#AIQlwf2gODers!;KZGA1o;U5dHZI!MZZCrHN;y`~TUuH)D@}(| z|LBbDGbb>RlYiRE-sXE~*h>iZ_w|x^cu;cWyh-S44PA1?r3LQVC`j{_;CuSM`{Jf! zb!{!S3<(MnA;yTC!RKPxpe)>@%_j)!WJ%_3rhELT&PP;KbiURu1FrImXQom#fB$Td zXuzfPXJ<_Puj|DlpM>~$S0InjnNEYVWZwAw$_q#$+)pMPn?}wrFz|NhXZPhY47T1K z4s<%d%^_8-P*zqp?cocj?P10FwCk2l3C4=bT z!GEIe>%n9_QRVACs<={F=2HORyo&!dLLnoF0^}C77nupGHVcc;tH8JEOg`@^2=?{O z%s$2cv3kCcsU|vQYhWbnVuZ}QT~3~QyR720H5eQj3G_uKX>%X}BSP=u>E)u^vNMeV zJzZS?u3UR;{dxrd^T7Mtub%LFvmI=LM65p2c`)lf>Grv5vz zRD@r8b>;QAu*Eu~b9sjRGQSFS7e)tfiKM3bS4mf&2J zOk{uj_%Xtn2c4t*)kj;-wsq@FX`fHY9zDwapj2L8JG0Nft0jITD~J9- z!LxsDx&So~t`n?X$G`q{n)oI2?e7f#MFynO&Kl)e%ui&;4b3Smv}#ahGZ{|f=YD9A zxaa=R4wmxJ^-w&Pn{mU_p`jsfZ*O3}C;q^^>lpg73e^0FTT5O(65?-69n)u7E$2p` zI|!sa6ap%cIzK-jPxGT2A00I)*U06^QO+$d2em=2eE+jy{2DQ+fh9`SbpRSatM1ld z5;hqbOu5SGv(V)Cj9Q{p?-)Wg$+_j5ZBo$sa%u^fe zq;24`96vl|=*ewzU($awQ(fLzI|+B--_=oXXgoJO+RLsz%yv6e*Kv8Ot(_lB{UrM5 zKfGAv!af_6l&ME#!al)}-%kUH^+$TV7hYU%P`Z;0@NB1zusnJc^|W-_s&+m{?j2#o z`rQ1(OY+>DRz5|L7$yp^8nm^wot!Es=%7ZGvt^THIHgn8qA4yg}C%QY14I^zEMk8ep7ul4zZR{;47 z%W6I*CRw1Mq@?64G_)8#fAflp3ZN`?)+ntD$|xl@^~J@76wrip0<$W;;#ynp?=y_` z7iSNq-|mQyKZ3%;HX^YL!~^-*F6r0hYv5ZHa5i*ASlI4 z-&DE0;^N}^dQG6}@>%PdJ&Y2Rztw#qzVd2mXyD6N4gzN4=%(WXZvYl4iDN3u%7z1_ z*PZ!zBOZFmh>I2itHXMW{Fk0b?18f658B?whhnw7oScV(baZqE1_lo;tEt6LfI5vi z33DRIlRG;*mzMN^CQNt7a-^vK?bat95*u?_>141ADo_c567Ada@^YYPcxsIv4fwzD zL~ZY-;g~t+`h{kC`Y*eF8~t=U?#>>8dPYr>;4#O{4%oYYNy7KvdV_dTN@`=i3;rrp z;N){mReRZE(SWb%9Rr3Wp8Y*UKYEl^b_la1)M^L{!Utyt#KVho^d*R1mAvYL1U0>Xxni6+ZMOCgC1$DS ztIY8d)VI>F-?d#-h_5I8P^q_z)b{}@ddaI-I&E7k4KN3|VpjY2rMkKrU~f42OGm!6 znwpxeEmMFs-KLe{BWked)sE@)Fa`GY<9d4zg}cbF!>8?&^<8PyP8ROh88t(-$GlPedp z1LNW1Gwk@D?Qo@2N1$;m@3qjP+m-6D{mv;OB4}*f%&`X#4~L|Gx!G{%2|m1Hv9*>c zT}ba#`6+1Bo4`1H#Jh0trcy;DZ$P%lCwjC_CIlOEK4F{XIZUFSNo)>K8i2?COpT@P zuw8Tn=DpB+i30Ax;k~3Tl1zK&LRp|2wem!^E2>j@IB{|pLB3R`Ty3bvQ1(fPKug5? zs$44BXg*;7{jxy&He_Z-0~l!PhXoGIXUm64@VbN5k&i^}J&7$8nmuaX{pTLSyLd5G zF80-YQQZ{9ZddU-5c4h%D`oj`k5aYDQUSX#oZ8WI0ouUO5>|Srx->jau*F@W3Ep!) z0nqlzg|HU`1<9Xf9lPgf2Did@r|&W<$X^O&91SLMs6!ap*iQIH{ymL+3DUdMK5Q8w zOyO^WI%k(`&gY-Z%bexBd6xBM4F$$FaJ<43svQ^Q<}^zIYcQ9 zF+^&5{;$tF@!d<9r{-{_?p40JKRnvfM_GrX{-O;vG2K(lr(V6tX&;T<@g>_z6 zc_lLHPZc^&-iT*&on;p9xfZf(YdGz;2ZA`sl>^r5rzh)HA>4v>`7>#5zgy;cb}D-3 zrH15FE=eo+Vo&lJ&W2d5+4S5y0WCI6%0<}|wn?^8*9&Xh$$<1p`OwPB(cIHm!s5Yit}x5Zm!;G8`wLDeaR<)NH$x}z(03B$SRBR z>^I;yDMPu_`#px`gBq%Jd`j-4<4*a)?D{ zd8roXWp|~%*XHdy1Y_T3l$O^|x|}0J{SzlD8h91GES~Enq_3-+1}vuWnVB6&-O6CzH}HGSS^?x|S3z6603%{1GjDsQ8|Pm@l?G&=WOI8OF2?vmha1~NDE2>AFU z3+Jh8UgB)HHVhHTdc>_?X>YY(&K^KE2}`b$0I5>@$Ryx8N-~dPKd&Rb4({#x_|YfM zRFWk;NKx@RAEzRS5(N#>QUO!UH0^PNrx(WT;+i=6t-;XCcDF7s#<$v;W<_mtgp7uU z$M8;X#8$2sr{oKDsMCXbU7LK@5A6gR?!wzjxtdKOQEnqO6>hWb+aYJIs=d)-usk+9 zJBL-iK^>GwU$j%jxKF!|Z!Tx!II6YM;^V0SF_WF09oF^RB;VM?L=up|Vp)oNFEb@& zWDX7wAC!Z5X59jnh=xvvQX8KWm=%9B!d$K1q>Dr|V7KjpqgWT?el@1Dc(GGw?}!GR z>HjL(tToATWUzcu4HFu;2D2VyO1!Y zLZ3Z*1{{9W3nx*#erZVvWsK~fV?}EfBW|W!ZQoyd>o5NwjD2NTRPEX}CP)YfNQs03 zN_U4!w{*9J(%p~xGvC5zzu$iLyZ1hhmtXK_t;Je* zoYxuGJv!=sx}(ov@WbqbK`BD=@vg0$=(JQ-z&c#jl^mO0U>xDI2vTEG`HY@`8c2xp z+RqV~pFgdE9@1{7+YF}D6Ol!LfIH#1Cpj=AbvBcc^KmvYmwm_2H|gbrKDTlx7Pekr`#xS2P>)G8 zwu_%{#yWqZa*o{f@vNUK49 zL!04M3bAUG@!jnC>1*67L;&Cdp6}J%v(u>r?*BV_m2Dh@Ljy&vLUd&CiSH#nJj|!i zLA6(PM1ZM5^}d0wM#m22MJsh1`LxN%7k#Cb(GmDr>$Hx2Lm@ha*TeyPy91=8+|M_x zvN7CCi{?4AMxiy;a6G^6j14kz@edw2_H7=0*)cRZNkLA|oK^+}Ai`fu5FdaFDuikB zM>bD<^HtKAJibtS?Z0vB?Y6o7!L=oIp1*8VXf%V@1YH5y>Pz!9geMrm=7Hn9T;f2?=#S%87UDzjvhF8l>woFCg2CF?Ta zbbn5o8QiO0UgXU14zZ6Lv>yt}MB4<|aoz)TtbO7WlOogw3ug5wZ})s&91QlLISZn` zBqrzZjTse&Mh~GY-v4=`7xzhVKiS{y9kIG#3l{l1_MO_+r*IECKfIK&$#-&Aatqy) z!}vsnX)X3O+H_<(Y`@zE?B2EA6QV!9V_rCc;L9fGs$08M+Gl4ScMJcgdchIb93|#1 z86FdVjp3C>ARaTgY7%${g?dWh@U*(lJ z(WS2aM`n-AtFPx5fMb3iB}kWY2)H^&a-FSz=mS$c2+>0S`T}0EoPsc_F=lgx_#h0u zn3UD!SlSs9%z3MdOv_YTgF#a-V3P97wo-Nm!=#0pf36k$1F&G7he!rYubkrCtoFD# z%WMfiCx76)8EjSyg6e5^>D@n(Q=Hp z=Dy8`b12rv_9~$+ms)9^Wu9y#7E9k{5@5%kvg+8uNE+z%&k3Y4|CMB<5 z#Q&{sYCLrz`MLfXQ6!7hzVCg|=mmQn&$6TuE?CfLS{yOyjoD$|Q$r7m*J87R7&Jn- zlXTH^mK)b+X1*U&ad8LucAXrX zs#bYk$kRD!bwI7)9*Yd7W0Ngr);pM|p+5}v%M^Ym3Drwa%@Q2XLhEcA?Kg0L^cIyn1tRx_NG9%T7 zV+P!)459QSS@bd_OdxHpvvW8{H|d`48Rd7AV=XIZz3#CN5d$=Pnd?o}pcE(^v&|`F zRrA{`NUog57Jc(8Khlo=JU;XLNP=oc&l{9k4zObET|G=!u3GbfEkmr{f3TghvP+Uh z+4s~GYlGhM?{FBa-2gHA-^(#{#_)#N>;_5uzF7aX7^y8IZz zg!7c^*EhKiy+`C92}fi5RQls+4O73HQQWM<=jaDsu`|urE@Qb0j|My10#gTxx@5QI zhqtg6)|!q`^+jE$J_pw&m(cp*ZE8yEeZ{~4nHG&!F&EIGWs1TU&gH=;);7*-aBW05 z@M0Bah?FpWwLKu*E6u(tDnLrAd(d#)WDK!aAS`3f9Wgh4j`f~;e~L2cMU|QCPOT@V zGAox?s2o%wvLx_%t7%3`y2v?qLeTpe7Uhy9dOOxHUs_H+f%4Uz8qeziuD;k0Hc3E+ zj4d-Wcx{fSdW1`3%;vwez83ggO(rB7b>f3K_2wpaa%;w{JekxS{M0%!uAc|6~g&7(v=o7hf+F73RgV6D#6spI?l{ zWo+|RMS(HS`U&vl=rp|Wzj%#iG;SnMFi`P!)zt}2v6<@`8;g4(jIQa9ygXTfe1P3( zHre}wuU|=C^oR47&b5L+E$pLjw86JIys=M^@FX#-fRI3iLoMyIFDl$@50zfM3QG6v z!y;Zh2VgT+&XRMT2j2P*=?+@q*JXSUA)`dZn@jwN<@@24H?V@gKRHpg358%5zlC4j zB$nzp)XoqhCmWmGJ8W z@Qq3<;}s>tOQO5t#h)Y2PLL00C6Uu^Bz#o<*m%0ZcQDuY($ZT#!j7QLR4buJCREH7 z^GkQ=CM2G&kTrk#+^7l+&~Lbe7bwBCO$&YcNjRwYSFbwe;NxhKX-QpXAML`{{*~D5 zw;X~}4LRUy;nLKuHPA%ff)B=5saDl*TmblBjJ;RuN;5#-{43W5&0AdrE=x|!@NKMW zeWCgX#r?FcFWpN@n3$ZSc??@v+y7J#dqeH)b_2_K?(&8?Y*Dk3a0=LX*w6D+A}$ri zX@pHxz6enC5SGD!8*;HA4w-p$uErHLPY&8;QI&`&n+k&A?>7&9o7Z9BqyZh!qwYd2 zmZ=hp*TC3MYd*OBvY@y)eqghzx*ByASXgt;iVnRJ4BeWW6K@SIFWP5LVw7xnx)}oh zZCwcfD`ED;dkg?z2JV%!2${SPSWOW2gp4ZC+zHu7`FnVHR6IvhQ&TT5Expma)YaAP z&of}kPEDnnN{=4d1fbip>E+(l*$(JYN#pND0*N>6?T35d<^%Mb+@Sh%JjkF$`O~LQ zznz5=|6vT@tD8@4Xf5+-p@9`EyQ*k&f_7Z6q2VJT8UB&?+D>;8Q)fY2F2UW3&_Ad| zRw^a7*;no$?Q|#9YPg+^`tQ_dtmdB8|6cL4X10vmMZw_WclM@Syq*K%x`q*ZyRfv= zz}aU+S7t-M_5!VBe;k0ID<+rUPPN=@Fy=A-DwxUP@0;A&4uW;=n;N+f7#~OBLn-NU zfsg)HLG$}{ji1v-RqOcNxSVSNe5WZalTC3-xlF{bKXg(Ii_L z3(InM^U73@hWw-GfzMTQwNW<=t#Nfoh`1-=*HC={pRdjI@K6iKt#!?4?E{B#9HOBC zz0H1YLl_ApqZ+#rKPc02xDI?{_OHt0r|nsc?&Be+;u0?SlW8GstoPx2kxYUF#!-9y zN4u@GTO96UhuxMcWi110q3l!ZCrA}mz4U^)cd6i1uHV9NRvbSGk(18)Q&FeLfJ^k0 z`7f2yQ*39Cf2=iCR2k-rV!$A9wz~(l4~FtYp_+!+iJH%9rx_Aq}4Hiu*Viq@1w z?l$sR6Q&@xwoXfDG~c>;KH%P)aCXwo@Y^mw?%fJ27rj&0`I$@ZT3}j;_`2yseHLe` z+mL%7r~;c!8s|8GLf84x#QgmAFQen*iZn0zg6MiSuUU=Mv^2DX9LMS(U-&9v&yN^={Wf!rvy>NS;dLT5OEJLt=C8(YNXpB(|QFUTAH8b-&l9 zLdd!A1$G~LpNlDk0J`ltz*>(d=PzaGHu3vzURJJK#}f#+(?UUB{>z9eB^`qi_?oKH z^455F++QL0j9Z_8=@!7_Cll(SO(tGFolO!usds|c4~+Wdm2TS`BGu51?|!RGb)i@( z))pZH=bc3;UC_Go>OLi!_}R2M+h5ap=yl#YG&7Ua1Gg^Spj1^!>6)h3(9l>J^-D=e zxUT0ZKg$C3JQFv!I#DpIc$(p5ucX63Yuo(z>{}Nje>tGd#7 zkaOHe3_Ce{X4t-C&aV3Y+`ca8t~MGsBrwiHuKg%42COepcj#olJvgsfj+~rYwlVgT z6mC5ABD0xp_^4KD$hR!F8P3aGoZtO}w_1IBgv6~S)j2*tXULWm_p3J-&!u$~R+Eob@oi{TH4gi1BDR`BXT?M3>pz^+G^uQNG;HbU8gTW*l1MIa^u)_%{T1^Gh4$_xc?|~sO+j$q!V$D1G z`d(3;`dG*9$zNPLcW1PJQF=Y*S>FhICLX}#NpHBnV#ci zM@^_Q>gJ32p<}K&cmnttAZ0Q!2L+#krC+_*$Qou;jP#45yd- ztOw;!)I_j|Xd*BND}O`Gi)av^v(j3}+9NSnYXwZMl0x2-(eN5>SLj(Zw zgN{L-HK_eBP>T1Uqxe9**y9An;xwz@+uw~h!Hf(wm=@XI3itlGwf{>coKB9inU~zP zT~{Ew`BX^Mzm*ZoP$b~#m%w+UpYA)=a{k7|0c-OCfC{Euh9-{CYEUK1%dbQrwP)9b z#MniT!HM0s%}9b1xWYKnhJX1w-uiPWOcUi)Pks!dyHq#6Eo3BOa_h!ibHX zS5IXVe5}@EyLa#2Gqkz%Gc9OGR5jo3-Ma_u z^Z?B!{Usd=g3Nu5&k9Jix)K}UNP-nZ_ze54n=uHS`!UnKXkP0J!n<)I7HW|?va^b)A5jEmgWBlB_sR= z*9sTkbgU?oK_A{k3H#PBc9gT%Y1H-=QL_ouvO7BMGa#iB8K^#7mC#ToZ1LQWjq-*z zU#AM}~9z<_9d|Pobfb=(A<(O5sVz%BgpD zcHy?Yag5yD+*jRd!KWJ=8&y?R;qmKCGk|8X=PbmemMQk~aQ2O#j?Q>+pBwLP5Z$cb zUNc`$$1mZMx%uIf4O$jMk!EQ=2~$Z`MJMJ_ zKZ82bU~b*wNYymbOITHHeG@e=QL0H5+Rof3P ztx4h!pf$br7&@raZ78&2fx{Bj5V1U?8A>~&UvHWFmU(p#G-Ka{h>lE7P8N1s^FN)k zS+aWEIutUlPJ`J8Jw7_V{qWasu}atZFl@6{Nv!0Bh=>RQhiUhvj_}647Q-zzE`}ES z=0Jyl)`LFwkd}}`6|YhjQ(bc(bOq}U9j*U<>n%dDAAwirqobq6q!|2mv4akh*3-=E zpT`Uq`kwpN)7LyRv$x+kM6!b4>y(y+wm3rld1KRu9%mW5&$SX6AAfcG6?aS}`*FVlxIW)UjV}Vs!)u^R~p%eTGgi94tJS^h1EK57U)u5-) z4aq%DvmP@IpLjJKBeVKvl{0O01`U1XX4AmwjM;mfsr7FLahm-}*5NACr+~!MPrYdU z?Tl78mH!TklLBvoydbb#2S&>|-_T!f?}ZMKmOhT5=?4oto)4hekwh3mw?&B(OZj+LH_6z{u5>`; z+<|9f#?oGRw+9JUSi#8l9)eP=9MrS0?g;;>3E&fe8^2i4CF$b$HDRqr?D9$S{WhSG z_?Q1DjmJ?_WT!huC)e`IZFU_k)rQkdo=B^pE^XHSd4 z`6ub)8-i(tZcga9IFui&i6Ohf6@*(nof_PHG*o(?K^^3V##1uTz#Gq3BEE~olso|^37J2xs8()$LA|GxiuSpZI$T}bccxJzjNvWvm}mQ?UO zD5mO=UstVt@FFbiSh92K4||;Dz~%Z!Q}joR8YO4P3T)Db>zBQKt&O#^=UGn43#L6WiwItLg4fVGF+2 zhjcivdNKw95d)!1azTy)2vO;eET9l;TWzpof85Jer^>0DA8Q`mrojKhx<-XyynU=L z^VVy>Z{Mx02;ncr7by1lPEgr(r`XhzLwI;$i%5*-z7opLFmbc&ioxgLPf>f#wOd-| z^shFYy3l7gG#CJScu`?ttVo;ltPFY!+uk7Ya{62bD2+aAf4cC)jybnk((2O%)jd;g zH9F|bYPTo{YFgITAD}PufYiZuMur&;9axHvv{Jc~I+1v}Tf}#$l zW-PP-WO+fjj~y&YYj+7IS&zwhJXs=wobTx7jkkW66Bf4aBfJ}snUYedwM58$UHtxG z9vWtM@A&CTh?qiI1Ik0(4Efa{BCk!=H%)8FQM;7PyBZzn+$@EOnx&#m1U8zwlncmwp4l~pxsSUMW#{l;I! z$;OY!8XBQQlS@UHseDhgs`$3eI)pb(aD~#Qck3|~7E>OkMChuqz2<0FOzPJv$qDt0 z7_#{_;sojI|3P){C(l#GJMn3WXOLpeEav{_|_g7i(0kG{C($b zoP$?WFzps~qs#dZ5q&JI6SZy3#k}A1Y)T9=l8|N%_r(Fkgtg>m265c@^1saQ9UCv2PW@$rW^x0jZd zItJLt{t3RGNk@Xpb_#c91DZAqAM|(Gw};_H*Olf%xzhw~C|H#gO$E%a0Mvrza>jbv zqT*u}J;&*XDz0-UR7zg&}wyc9=jOfB`FE_ zLq%NSKPSTtA@;rb9Y5!L;Sgsa644Kz-E{YDHkl&DJd9+dr@zrD8TE`W5(JV-8PIe6 zWuNM@YRMO}WBT=tq>)ZtR!N&v-~9m+U|CJ}))NpJtHV!_#EK%(?%3B5;4$KUuCaM= z;_G`8fygwo1Z1h;-gky>a*g?+VHR48{p+fRu5z;nm1T`iF<0~1Da#quWa2mpXqD*aC;>qJBD1O4CqKn(fw{Us%AK%M*+b-22Wa=}#i502N` zrHY>GoTXCHr1*4?LMLk6jSVI_Wpn-eyt{Ldo8cRfv1rWsSrpyxkI2Ex!JwJ&XQC&k z#Z5jI7TM0If)8I3t|xSebvU-AS|XHEOjpxtZ4BA62yoqna$JkG@`iCx8LEmfM%IyrZ8eqHVD9R+X|P<(6T1At?DLDIxW8d_S><{wki z(2r%7jr#3MFZ3RSb|?o_CJ85Zq~wWnS4lUI&9=q9E{+-L8-3c!b3T$hdSF3_A}E!$ z(Gwy>&SG(e024s?^{%sz|I^=#zBZr>!iO+_`%U{Rt?J+@tXE%OZ2_7K%rtzo7x+I` zT-) z<0>$r*4#xP0Y+gzrtCkwc(Fyw_41Q93<~AbZS-_nmsgWnHyZ@RzxQXC+N<5(-Sn21 ztmmV`2Su$(2Dw9*h#Gezf5vN<-S#$MGXo4e(f^oAB_=w5mmREkRM#Wjb}{F=N{0C{ z$x15tu`l5I&S~@#vd1xQA{izSK@JYpW3R;Ee=!a2I49?NHvZ3Syl2``DB#SK-O4oxw>&Wr?|m`;brS{2UsuYw$&s&9 zJTg3dZ5;gil~nk|#QHDK3SdYkUkyN?GS*(b|KxUb^Ij2-Rni0;qopOSR|`o>ZgIiX zb{2Knx@>fGNWUa11Kw-TuU}d$Qq3J7%NbyB`~jXEba(|Ws74`kojK)yNdCgQUGOwY zzc8fG@giSmP;!`FF~dAI(5My@^76mU3@T!DDB=V=Q`Aft(9UR2b`~G;_oii+XE>07t-j%HmiC>(hm0H zvS&sicf5e8IloZjFKo8;X>r9*Z9@{ri5XeBy_)e$OeeQ}0sX{}wplkj3hR(`X~G7y zT9%$cCM8OHrrKCt(O2mUk_`g`UFv?`&gM&DjTyl=-VY6m^+WUDHS)i)&)l4(jy>GAu~>y5#Y5nU8l+SxTW6_TYb z0oPJLj1xVvVPdO$LI#=eL(B?K*`IW~q$}pe4cuM>jMu=Q%s8)%jpqo`2Mxgjk45JM zO1Q<8iLqfMosk~7Y ztETQn&!l|jT?${4#ibpD{>&0%#9ir?KeNh8^HNYipsTa9H}4GK0TNXX)T9BI$^=Mc zH#4&uS}HU$KF9mL#wI8ObCjnx-Oxveb3A(^a*X@?G$Ed57&$5e@Ig;(C#W1al>_Zk z9smySus8abZnh3=<@9IS^#5SUtC*$-;(b7cUi&! z&$#(BGe?%PCM@Uh%JvJN_zP2XW6=!xhLaYTGUPXs{XItrySJI6?=UGtpMLuO5;ilZ zN$T(7Xst4Z3~qGFjW^1fz^B}G_3#w~a1UVHCfDKRocmg`Sg&;z@6#R$`jYyesa$r` z7XOY@*kIl5Y?<(B_@g4w^2XGuBX|NXdQ2osM*NVRhKwk91IMcMmK*Qh#j5^{SbdFLVznSJX`{tpc$gfX{BhPXw5>cij<#5*HU6B`j3>D=08*7 z0WB7qn-*UwT)+YvABQ}@{9s}cMv*G6ZQK;-0+f^ChbB@$=7Emr3!p~|q^UqA`rno- z6R8)#P+md?$CQyC0Wp`-(y3LKOEyc90^~D{zW~HVXLs2 zF))mqg<-PWwqO&X=O|$2DT^Vb?sPek3CDfza{sukBD%-?uKcSb=QBh}!5O8%O)MNA z%qhC}aN7EV?Phu*RF5ADy^_xH>u(`hpApZ@#Cf?v<^ADB$8U$!9u*{vI4tUDDNj~y z_8sj)BH5BSrtec1W7!h3V1jE%RHIQRjCAKJbj-1sC7^J{+r7zOKnr+=%rtPJoj_La^cPS$)O&xb;RgOM<{#1wCG2N-F4ulLO%%)KXg2TNeSDH!>BXQNTzc$G&|IBTCV5Aglks-deu z`^1q0J76QD6M>GsGfr-#5115y1FrOQrbc%+#95z>ik<2DXV@Mm=EqZzwf17_>=IM& zu>7lk+Gb>ALo^6{JPaIc-IX%jjY?KUd@m-pEPK{=N+c#)FlY6UD?cBlq?mIsokft` zJ?U$*DzJ&~=|q~DzD};jbp`}Tap1}nBzbe`yn;$(nosLnGcaVQ$8kn|O6cAdqXwtO z7Fjah2gn^dHMV|{M@De1#vAPSy{dG0msq=AYKEg~5wp#!YY9{gicn?~Odh|)v6CBC z*VN`X;w6~`vAR;|o}V9!H)!wotvd`B6saQN1e&kEYaF!u*;wVzOGGQ&FdKb!RI~wM z@r&UvV>1UXyyxlO8ns0!ln_4Oo43IRB{mo5M$Fllrghv`*Bk>Zb%Xbv;FclsMBX6y3#>a* z%^&YxZu1pEC49gvV#;LE)vGsNy$FlG=aL}l@XI^g7*CtC3nEjh? zm4RyOk>;CEff~`WdB#JYB>@fq?a3<|DEXeYA91!PcR5 z!*xfmT^7&9meY;9LXTW^sXs?*dR*Nim^&@9OjvayCvR3q=r@(Fe3=qL*!2FpgMO>b zxd6-x&rG{0jzDqh-v^xkqk5+szqC80TBgn16vY|sx&#COu;Ve5;`z9IJ)jP{quJQr zSvh@%id4=cyZhO_mMr=qwi7#x_mlhu!3!^z-CP(sY002f3{6j($L@ruhljD7QvH|Y zAG?26#H6pJaYtpG{{R{+cY)Ll5Oz!u=_Cti+@L+xfyu;hvM?x9J~V1|dQC1VMQXI9 zW|r4oFrLpSl)+BO-e{L6pjh;i0bsKM8ibLU zoe*sPt?9_Lo0(x_wtQ^;M~<~=%oZMz6-Oquq$RxaT&N%a&C0)unKbfQJfCHdYx11b z{73X=(i5JW=&XIzi*bLZx-RPB9Y?TUq#$BVscqbaGF24Oil>B1Sb^g}7fp|tTP zo3iE;Gn4C|dF}g?H4bT0_V>~D)dqmV5N6vMK}I$3)3{68mJkoG7y==Dh#{eesnpO) z_zUsy9E!qGgSUI#;&*{^9e6<*LOyqEp#cJy(n{Vxa%Tj8$Xw1QznAnME*wX+!f)aD z=ZAjk%rNJX7w`sDQ zYPLAjlfQ`Up9qZAJd7I`V@{fpC|HN)dMC8dq1h+wa+(Sw!oyQ|Z3bi zh4DTg<kVte(5YL$+n=mr8)83pO-zfp}?&6klOZEZa8loiO(L zB<%oF`4^MU$nVu~l<3;dK0?@k%f0^#zyktYZkNwUnC(GHFK21vItp1z?gMiFeu$Ca zLbj|F2GOyZwiu;$SkeJ7@osb06%Q|Of}${>hddTK zz)WA81fnpgcM&|*#BD4Avbupq*HN#Qn6SeNshBnvQ?Jr-k{2xP=Vwy#B5bzRT-;h} zBB(!4!i_SJn>sS=o7{=RKA8xl|5>_XE457??MXd>N?KO=@WJ>ZxxiUQS<9PwFUC3n z&qkHw?7YJ`jR?z;A_8--SL!=`s9zwuKW)vhY5hEBLuX#UPxG(JYO{&E zmgf1g;1*Bhg=l4N-%T!hD@;w>mM!x5`2IV*vMo;OVAdMYh3aP(F>}|i9I`s%i&(!o zME>6NhHd4BoS=VYtkiuCLLx@in@xOrcJC#G>U>wsyVeS8AbT_RzLc1DjjcP zkr)o{#XOeuLnS5#WMBZN`8@Smk@>k${>}FXq)VMpD4Uw8nQ2Nq^0f&9_p0M8k>`KM zt{j;%nZ~~5OU?41(lJ?3$mWkY0JFAB5>Pf5sCG^tTF$FsYXhCWw<^Jd1Cw5}FZuj^ z3@JEGAV`2gZ7oH=dA}@_pvahZ?-#IPOgu^xmPM~0x%o-F6Y4eI@nie)ljzCs1sQan zip6?ox6wK^ZqRF8UFNaPu(Hxpz)IB7*?GMH{1ZjTUNcQ{DY3QUa`39!pI^U^al!&> z>Snu?GQtP+%-_T$(AYMN?xQ{&jvRDBVwOz8{2IBCvDrl*G#6Q-r`{&gOLRbs>mN@7 z-`cEg1O{I9tTq~-U2ahV+JLvLL~&kS zF@wbAFJc%5g=GN303n)7MVdA$)|tgDAU{y=OyhV#?fn&d668G>`koyEYAfx$BqtG1SMD_pD;qi zOPH7HVa045gs4(S$@(4b)zBT%vJB`m?f(+Jnm4cm49!0~zAF!a1o*w&K6tvdyp1(! ze{kSvw~EdtFII*nEoj`~B7Rin2CPX%OQSEq5gM&HK}2;`U8c!F`aH~tY=Gb$XHPv? zSh}s?db2QX-0pw806)EPPnMvB#Y+4R%KQ#|M@HJ4{hvOmvBAry%>21PGb27-`pXj3 z?^Oh$wO|2q$6gLs))PuQ&%~B56SW#(xWiRdpB3)uCP0Kz>Ng+R*}GmySbl4CZTRLSl>TCFC+c^=^+VmY@8M0 zTd&(7Yq!%z#R@CeBx0U_WtFq~_4W0ak6w1GHhR()VG0I?ziZQhAU{Jw-|AbD@5QKp zg8TR>ADnjzUOor7H0abf5?O#erZcKpH1FxVM*IiepGyI`mW-yrDLji+#?%n#r!~ed|x$t8o}GT44>V zQz@&ZL5A8PeDXCraQ1(*%6pbbX0Kg#iqmGC+nu0@Gl62{Eb}KX>-s=+q-ye`1TIt; z#)mRw$abaj7Jt8Llt;TXL%!rqx`y9So{Rb-q@veXhBh^3tI-&XkVfB}GLZ$iDuj4DUDf$PpQ|ZbI%VyD=-mwVowx zdRYXiy6yO+SjqUQ3g~Sh~jSP57yn_)qg+(h%+y0@lsXHkzuNoHI3sH6dzXkgi_9*@>!jIpbjH7N zbFXU7kfJ5D56N{O*LpUvf_wu+}Y$5F*{c+G)R{qFhP>;@>zhjcd~11)MSE} zMUE{N1yRLGIS)TZ4g2USx86mDQXBj5F2+i;!oDZ-AyC!?=lAz2@&JMDnn*+G%xTE->RdlLj|~UPzZA9zVEk5q5roRPgrH?(>53 zEI*y#mZ4eP**}FkzjI%yefUd{%hFy>8Gv5?%5abvBNG$uM?o1Jmz0zgMaFmZ6#8Kp z>t~fpMpJ+Ixw_naz!de6wrGh^8$olb>^_Lb{(MdZt;aF`|9st6mtZ#deU-s(K{O|% znFyzB42F}$f(d*Tr6|lEQri4GmP>dmdXsxFdJV086~8oUX^i5<&l&2mF(gb(7^7nn zz9PmN`m}a3Wm>=NCn_$jj%XhhL|*;JLfcTbvyEXAus+}Cs%zHJZ|d=ZzAcJ=5x zW4iPH3vp2=cYj6e4qJc46LL(&`F()LFl$=~SAhixX@|11|D-ruFH8g$jC41jyVcy% zZO00+=H|E$zFI|!l!R<&;+W2&53FQQBaE#=XQxHJtywL zv{S$vPa)8hmCNc9aXvU>aSqBjZ&LfDpL2MYX-trtgV- zJKzM`bpjYrVVm>sO_0sKF*SMyDgR$Tl4f}|E$tAGW_ge`i``Mx*Y;{s3;sAtSKyvlht=D^`L2WLG1()P#?0cR1|2fN^o$Lh69W@MH+ye zV|tLgS?$}6vm2PX>}_mo;zKwgj0Z1#i`XZ=2Y8r)ps5wA>8(cS&6MS7lx_@P6I#s= z)DNUqOrXDqR~>{`tNFbG?OtAT>I^_HKVdQhkU&!k0inhw^b2o_cvA9ax7{EY{QFp0 zEPR|pw{}jMKDvZQhZR(m%sR}Te#;EwE5tid>86n`e9#TN$2zM?Pb-Lf^0pVX)b3ig zF447oIWtbCXptQKc{7ZHACqkivfsy-QGGkP)5c`EM_I_%h74Ix> zou1r2H%Ou|Nd8za>% ze+?d@s`6ji{s9kCyAZCZq}P~%-vxTL#qnVUhwZN2OJbJJ+0dCz)=*3qV2Q8!r&o~{fr*1XI3+kLRvf=#0O|vTG@f1D)3r$e}Or)zf4qBAhls z^#~N~*o56?I7HruNY;Dqh07vWJ3+s)UpXpKGl@3I9XWyuSrA(e%?6BBrXSfbqy?`h@)?Me(J)*k(s{!85@v!bm)IsK!} z5i5)qa}n}Q@V#%&>-cEgq<8$2o>WDm!PU!DRW!>&hUU{#ZE`k-_M)1$$e~vkDJ;u~ ziO(6v#8Xbl*8irw`caVm5$f&m`MM?SRBqkK7L zqc<}R9QZZA()&14ioZ-j=^g>iJb#+1f(h}d4LP_wu9&Grxo4D~cSMRN)a;ptG;7#v%p%bD}og-&j79N*vEr7b{GhM&W*j9O%P6$ajp zUI})0s{mcM-i3|ad|x3my?#UisKC;(=TOiWSt)>FTXj#*E?Y>Ezh9H7pp;4P1ypzx z2`03AC8A8fja2*j$H(x#b^uRfuv;jf;UH+)>Y%iS9-viomw(Ehl17x30EMlK2Ao$} zB%Zr9&QSq*8;rg}k0}n4J_`Y>`GX1KA6!*}UkU)_jU3q+V;w<+vJn(EcQv62v z_eq7t3wGnU??8*SXV!(#va+%lbl;M5(FO(v-kG|?Z&HY)7yb0=Ron6nUBS=FggBy_ z=sEmq^Tl|Vkk=IZuY%9|c2#TLm3@e^Dz*^=aLo!WjJqrHL^RSa?ojE;X}nj30htT$ z<>%icBO|l3CCN)r0EE%g(?5vBNTzRiBfR9q$o*s+W9b;`C;1q`Q88o+S%)AZ_080*}Z zvUS;CqlMkU*Vc~Z_xuh;v*NiyOM1&8hfAkM+iRCRv8#xIsuq2-FQSj$N1=Cie6Ql; z$5h}(0hN4#vhJi$6uw?%2*V8vo@9TW$q#u-hox*AYeFkZHN!q0&^gto?*J?rbUWou ztGPEB7SRc&k~I42FYDrS=o?T;-KqvO5E4VhVJ29rLiq}&I7Y1>2pyjNKBK$ zCtw9&$k>>*@uFtW^Jaj~px+%t`+neEeFz*J7h^?Ps8uBY=&594axz8z6Qd*&Q!r+i z*3fIBn((cq7RwgxB;3h&=ey0oku-=-M)0K3OZIZ#HN;#z`EgJ4Jxtq@TJ=>vkR__wvo}#3Y3kc!KAug(hY} zSmVCuBRPe>VExu;opE8y95KiJE5o9UPzI*H{9fM{&oij z1@jz_=jAbVmV9Pb7zS8f(n?WdV`IQ{!>x2xT~)O&M*A|}>eaKtmHR~a*^QpMQ;Fn9 z6OtxcL#iuY%Dfl4O5U`oY#QH75(_sz(fPFOXem@ux$W4dX#zSGDUN3{vnbk z@O)bsnL9`+fOE1jl(U?{`<04{Y9*S85158P-#7&6OQ87;1ExrBsitnsb|oRcWzkpBd4Ewk<3yR(!W&sOBKR>7K~d1Sf`&r{U%l z3UOh!h+l=A0Z!=598-l3F;{pAG?N;X&^`JtftQTGo$FD4I*C0S>xF*5na1!-tRRlz zNXyvEooAwpF_+>1YI{$;R81~!`gYPNK5eBSK@?BAKvKZ#@-#jkB;sTJ!>U50Z=JR& z$Y8ij_1t`Grwp&f&syc8{CRj74?eFI4lb7SGDDNJPJU`sG<%vuiW5G*=u?Kn#^Ra; zQ3MB0sPMIyg6?U@^uVXp^S0D(Z-#DyxsgrxKp>Ey3twxwKs0``C%SPzW9xjERxNPn z%=!nTXPg17JRoj#+Qs~b%ftLBTYWmY7l1vMZ1bd?!^UkfI$>M$?7@h|AiGc!Oqg^L*huS7Wr2H|zYsHuLkbwyL4JOF?8~^|;j@{WWa7J-TWtTsWx1<$ z3BKrj40CI-=#0pE0v>u?zq?DGJRu>h@3C{}zc++`qUfI04L=<4nd2KUJs5%Rh)s-) z6x-Z#5*M+*0d?+)@vMp&3%oo#05T^sj((sgw}*c6D-o-RS=BaM7mMt;XOQc&gDZN{ zuilgiE2!%N?HLL+^MRqUmpVs2@m#MHV~GhYXcXMv)&GBly>(nv?bj~OW1^yz0+Ny< zEl8IVA}t`@-5o=ZqO^c?gS51CGa}v6-7z%MG0e=lN1yNKeb0N&`Th1^Gs7@*&)#d_ zYprWt>sqt2%=SBS{AP&T?>R8KE9vRz3wZj1^PsSk`G=u%3i{q*h^RpgdbW!o5@v--QBNb1?{{=7B)xnXlb@i957*lnGU;w z-EQOW%coBlL}z)5R5MZhSGVT4LJ=JMrS=DPpyP9WFdZ6-OTa)UE#SEP$;d6Qu#g!K z8+1;0QPA=I2?!!WLZSrU;4QZDZqPKr(}RNKIpMb4@NNoVMK8OrsCQcT08`V` z(cNe|Kf6HCNgwL$_yDhrR`S~Fxld;%v8xDgpVx-YXGujcVS2wYvA~XklvLm#Tt)6+ zVRDkse1P2aAsWvGU;pY}=kDqC6NMV7ch+^90mr6hD@DPRa1gC<%xi?NfVxrdF}@IQ zP2)Vx1GA%vhs9mQX*Ka8ea3TCB%_Y=XKf9Ey5{ZoG8bl?FN%e{>T~C4A3O*!Evco= z3x1&b8tc8|!)~fzJ49UGQ85*tM13WdFHO}zZS}XESfOaAaVp?2v9DXHvN6s3Y8-d1+%~1H|}dpZS?B{q%3yZR#td5IU%WScGcj z49h8>+`2T%%woggr@+fmzIl0hi7rm>KwTkPOO9at7@YLv_8qFnj~_cpVFO`FiJLmN zg)M-DX-(~@#G+sYRsn)~%&d6c{2aO?6b<6OLiO!e*~hs5nV?FHg;Ak_CvQ(sY^icHuc)3AWSbF=aM zSa2Es-yF)&yeCdE4>mA1l7Pno%-Y$;nrZ-aNu{yKl~2x9mb3}Jy~3ibc#q##<=uFi zo{p}j!^9%Cc$~ID5325&vujVmqwz*{=Pf-R^r@ZK zqRObV;|AR922n8^HD0QL=9>w*?3#Hm33 z^LeS$9=-6fIF*(e8XT0sEag!#RDkxT*@Y$dhd)q2_c_gLcU*u1J%LRjlmXRMg?IW{?90F3@b29+NZ;o0Bq*#*>@ z6_4J~_ICT75`MG6M?H|yyH0`EpU@Ntt!wFCZa#e~5YWHS`9`VWL;ls&#?rzE`g&Sw zwru==u*BFnL!kr>OHZ@jer>btVDQuZ1!u3Rtg|_OuJE8uQ9+Kg>e^z_E4Sa}5t;e=>_mwU@C}_k=ajY(=gFNB(N>N^3UR@nOKfmVQla`_J z@whKv9E4R!nmxXrG>JkTn`fY9yiZQ`*t~G+^Pe{JQ*AT|wL#Z(xuDHf23x4LfEiTN zdMNE~T7zABweKCO<11QnVNVy-)x`>ug?i-IudfMnn;RS8gn(X5r`vZhRBF)6cLS1x z^Y(KWD@vFfbY<*OtaO}`;`%SxKvxu_vg6|7Vwu(Td@7o}Iz5-b4O}8=+ozSqEt;n4 z2FWo^9KWojN4tkV*DG4k)QOJKC#_pxdBUkm=^U$?{;=LFfTyHh8$CKKxOXvL*}1?E zhGA51iwz!6c0&d}lNDVdkUpL?N=6ny$hV6@Of*yTzSMvZ&x|V1l5%G=(@O>`1v05Q zy}>UGL$r8~(@!28M1+T<#%nl~KnKESs`1Kvr?Z&5A;w{3J)tws#mPs#5^)W@BbR0^ z5<(}GJVwUGfMmU@fdAqiFRXTP zBaZs~`A3Y`2U9rb6HARi;{vCM*Dq5OlP(Hs3j#vI9A@+PpEWQF#D%n#Z66(p6Zdhd zOudrL8)l!`rOb0gJW}AF`v|ly9|T6@;2yOID7J9O85u2~?&6n&tQ3j&EpIxKjMuXEe=$)&}pZ@ds6FjmKauEB) z%3tyl`gi80>`@;3qCgae=i=$nh*fNbAK{;vY${)aXU7xA-aqQWHQ=`dmh?Xvji@I~ ziL1u$#*>D>n^xTH$6efd1M)9&RKbaNT)%L(WPiD%#C^4jvR~nM&;}_!J_>#AYtaFx z&aKR7RGRksu;?vs*J^};wL0SpP~9o++PR3(m`(cGSiGL;>YtQueqfYpD;{jEX*o|o zLv!gO+ac>3rw$>Wm6h;Y84*xtSNd3(eB+F|VNN{hS)-%UuMQ01bC$0LsoOB4KQg(Q zt;=(t*X8bv-NXl`ye}cmP!dsiy%*H$>&c0t)^#5r@tG_V29CD~u2yTPz|7o?Bjr9| zLeM-!XLKYLdAW@oN#0B}^-EIJSJ5fsh8TEG9SzifJb4Z36CQ;jYU}UZZ||9Sakjpe zfY=rl){s7(wBIqdH&uuZ>PJSujnmUl6m*Hz;_h7*^q>7VuJXx; zu@C47xvu|A*?dll_6U2;Wjp<1CU0N@?OwhDH2*#_J|d*ZA+66&w_`#qe>}Q4*Pz*o z&hE)#8*Hbf!Nk&gsdu{Xl$GyvVFS6?5KS_0@BV>dAfQ?W_9p=7SX){yc~HtCTfY5% z1#TZKUa?foy>1X(hwiB;#H*X|rz!q)wLIM3+_bc_3*T5qKi&6n%SIN#{USnxQE>rr zbNnk!gADf0&33huhBfv~2=|DI2WeWB|?4W#U| zj)on+aN_ek{$^BQN@HbV@wRWz#l_{^TLhplpf(R=?}3_d)?c+A-md+k$r%0PWTrrl zUq7}!Ck_(FHyS*Mm$^sts~X9AnrrWQ0hq~ZIQY>^g}%;e1IaNLa+X6zAKAhs zT6T-(bWfes!t2e@&bR7D5xxZzXD&G=r|lQzClLKhP6 z3CG(-1{q#5qcdKkbi8aE`&Ss4o}=a29+7BAR20bW38IsSc*^FX61dW1R8nb6M)o>6^< z>qCvdBC<_Zu0uupd^m_yRHpsL+OVRsz>A9Mo=0Ce1?0e6}z&ZC)@OhtQ z3j)u0@wY!R>@8V^TE+TPbw{#87KZCoogscBO4>@NPW*@j3S*Svm%)Vzs3BUGihKro zWnD`0WXqUj?2CN%k}k5XU2gij(}&3!2)wUWp>&_vD)&;YN1vyO z?`BGv=zYPX;^nXO!ZZ2*zcv4!2G`#cGihwU-kBtu;g<#M@LHAYyNy#h*EUKxdVU=0 zVm3#Wz8U)ifs#LHM>kaIzZ69yKE7@?1zSWAb?ufzoPd9Z52A?;du__~tLRXfoV(HV&HJhSRo*M~Td)MQ(-c*X(cYV&V()d-2IE)) zq#0l)W>*9OX=;TjIUFPbOMGiO@YeE>q(VA5;2qO`Utl!mCZt^`3iBPqeFS?=36{j( z2WOqGCdQu$3i1UMT8ivlqn7~TTb34~v$uNgcL!+Ov0BTqERbhtP5TF3YwyAqlo9Cj@;pIACQ z!>zG4&O~UJBs|rnq0zTbcUfcQ%`7hNUpWB7pgzTdEV_q+I^q8U zEzJ&O!AeR!nxqE*r@74yGsb=|$AD?1Ko{-7KE@cZ=)sn|Qqu;R@K2+){W$@@%sV!| zC3XC?vlD5vc6D3XMdJ80J>}6!Uhu~4g>g|wG)2w%XCUTLbB=8&SvUsHfCa|T6aY?# zv9vEvR}Zdz#R7sSBs#hPEWp~Qyjq5dSkoF&a?Z1z-f*L|B1HNSd!y!me19-(5o`6) zw8g2l4@E_Vutt@^*4Mi30So;rt&7FhyP$~<%zaQ=8XuH23#X$=huM#^o{%@#plmbb zZv$^~rvizA-Ux!EZ2aTiAs>dBg^09A*$Z+y^?TG(7E{cN=ha^VyCOjPfR;7s9HU&T z%sk7?Xn?89t_v$MnT-Oa_4f`R|47u)EIiX5@40eb~J?Gto^wqZKHQfE1Y8`l8;Q_jJ6!1=O zqO!B+HbEcu#>N*=&;Z&e2Kk~S_X3dOCLlXvVD%t}83MzWD1_GZDI~7#6Pr6pm~i&H zO|Y=d>#~}O6n)EYEGhXz97FREWNiqLQ80Pgpt{riB^4cRu1&k_Vm`oo_m z5d`ATA-aGO3X~;{%-97zk2kI^j@Cdf0Qy)%rc%ShAEXb)fBEtR@@3J&#s+MB;ZbTm z@1Oh)MeqmD*S3@GrCJ=-g$SmP>Jk>UBowa!uZ(1EncH+?kW04&f=6rn<0K|W<9Sv39N+63c!@HU7t^|AK0V|UUS{@!AR@U8sJ*S;x z&ESm*L^6zjLB0*|xr-$}j89G2wfr(ob40IS0bo|+S+-(5yDI6ZWI={P@zJbWhb1xk z6FoM-Cf>ttiHVG)hJ5-86wYJlHm7GKgK}i!3&t1^3|!pJfyuouuiWpNd7I%=#A)B+ zYU}%Z@m1^=X~h%atKY4!uz3dFF^!$zM4`*KF?F#MeNVmf(kxwK5Zw6arKbmk9p-7K ztdF2=DX$I^O`ShdooNBgSPY%LB2{?5y^0{Cg#)Ma8N83Qm&wZei&06;hXbIy%qIA6 zs*6=4A=FF@kuRBhYXfcfP95tm^*t%nM{cRBPZ@xrOCQ@|txFab7EdxCF{|MeL;e1P zecr5TDpY61pNqshD@Bh~A}ZJ=^dmxmm>t+YX(1=do#>RuWh?Qw|Hzq_<~70a{eqN+ z8Hp1FT(=rMjzkbU70C?UUt|G)(AO6QMhE#0 z3fE5a7X=5(mXAN=-(xTKW}a}MNcEudIUf3mD{>A%({$0xp@hD2;AGKQb~n10k|aR1by}_;*(Y=IS4sn2 z{n@1od~h9U(kJwe9WsnfJ^Fb<74}%1fZ#`-Vmc+zsaIqLqH;&~=b26*d$`-X@o$WR z4ByaDbfPY$UfcF%zrEEgP%~2uwAY;faaK`@HX84UrrT@_CIt5Xe=E*TB+TD>;%y)U zs@Ei;D`T1EDw|VK8vvFX`)wqI@Euo4xw{ni2ci*`*`?~5(Lj%^NwkwG+f_7 zlYN;GpFQ+60GAjj6Rsb)V^VLa2ucv1a-0lmWZ?W?3O#p)F%AN6^V6mHNv8U2l;r|v zb}ZBmy3M@)d_VgM6}jcY0h|Z=RIYl<=X8dpYYN_w=)8z zdxa>Z=>9gXfLB>ySoqe-S)>GVtvST7?;b&1f4b&b4jkNzzBV#6nI1Lm-APC*A~v0G z*%`e6GmQs7>c6mo*caC?F|}4JKTAAU-uo=4)M63hq}bQViLVgRW>fEf}hwTeOcHo!0;W4laJAg(Dq=M_z$QEvBp z%-sb})RV&)^hIwyyKwYq!N5^-Sxr=JLO0ZV$sSaJGcZFD4Z+AA@V=LH_+ALqX_aC! z?DSNc06+g7P8h$L<|8WJJD8>iyg%pUeb=FK`w+v~MGF<;@--_Eo|P`nysgZzGaLx; zZJfAEGGZfHNhLF@anJ+mtKl4yisC@unNC2s)^^eFPW`f9nKvhMf&~P0~JE7u42Wg zW)Pyygz3aereMUfuO-|2(F|8IFfhEC*lT3cQsg1_b7bp|D#~e6HT}!~NwT^!tDC+A?E+*UsA+o(E(u z;s>uS2fT393coQVzIqbmTZRftIrh*Bdz}Rnq($>q(e8l`G_1Qv?TPma^^h)Oh0sgi z&Ag|AY@eGWB_!l_vcK)mKNbZo)*$e6aNW4T6&;e8lOV7A6w_#X@k~o`1wk)4s6~#d zM-Kj0hZWLE>-%1;sVEA&p7i@7W)^O-W7y2I1uxSd<`_9GTokP(%UYkGKz_Xu&pV0_ zA~6bjGc0cyKcD~Q0tj_HdGYq@c5>24hf>W1SGM#qHnQom^?OPjecmE{2yB$j{l;>o zqr@2zZ+*z^nin}a*0bJw+SQloT458HVHe)b&d>&sHHdzBL!Z z`#Pul5WW->_XH{r{vn}{A9fU;DhX1F`D`W4w7{YZdZFZy+@1t1E_q*b4Rp&qwi&^+A)x$L*JvH_Rc3UOdk6K)qxD}Pr{ zM;L-hV(wwa>W1O!-%+DKt#w@YwL?$K6Zcn1I-YWj1F17C?8Lr>tIf&H3M6_DmYKfE zewLF#c*%887VUeiUZG;AKCpuX7ls|kCBEm@Jln~Ihu-Dvy}_N{oLg&Tz4t@T$G63? zcHa2$#>hl*m1?l0f9YxhFJ8@$6vTkrHfT-Tm=9B>Dwjmt^deqGCi9`#ocA7R54F~p z{SoZ>IomSM1$9gfReQdh7Wy#^seX*hdRbK#9p9Jw9Pk8~UB`Wbd8_$!Y;20UP42#) z{Qeyi{@&Ns`=1*Nqx3Ka`1*s)gV}&!Um>BYU>QGS9cP}Y+v(Zc$q*$=XPiSzY{rsp z#XYjoSL3j|02lI!3nx@%W#`zF?X*KKWNYuSvqVXnF{DIMHVNO$c`#EsKUYC@;LV4G zgy-xIK@wWKVW~ZL%yC*B$RWUQ-RKQZNnp-=W|o9Qf(tU~LxZ1C0Ixs6U_`F^4|E!Q z9V>hEd1Kd%7+7V7MdBUdpKGQ6VLg5N^fofmbjhuOn>5Ub&A`seDoGUD z4BV~w{{|Po1WD8EE5EFq9Dle6y?orASJ_bjr32fq*r?AY#w`>WmqbwLd$CO3Q=XxEE^TIJ3Bg(N-Lf&fCI%Grz zu(AK2nUnN?s5SqE2E0!mU9Z#d=%YW8rlLZd?G$I*J0{`bXaFe*l{^KCee*ks2_%yD zBY#;{Vc-=&ehbI2^uDt&gI}oJ_u?@J<=ZPoi-cWpITv$xg^t%sMeW?u38iTQ9(xp; zMR|J4RZ6_jCOf)B%f!+?0j&_Gf3LaSq{ooHmAzv+A%cAuCS;)jy3^V@_@Uel7OEjO z7?TZXwL#vfO{-g8k|y(7?;EgcQiTQM}a;IJ^&Nh_$#Po27E$+i&iOj+II9p;F^6o@m< z1F_w{pU7dy$=||yAM~X9orZqp-Z!_}QO}hgq3!W&6?m(P^IyH8!og`}%%>B-Pgosu%tPDWH1g1m{Qcrz=pu5^nq4bZ zMWo?tcIIbdGTY?473KNsS=U;_pd%NW?Mi_Na{Qr}`zWAFK6+_s^HnjarvO?#Z19uc z0_=GIIWI6hBxA{-p|hT)86tH|D%A+%q6<3st!a3_qe>N5&a394<(q+=EA}HjY6ijcFxlwDaom zc6ue{KGm+M&d%yL^FC+j;hdmfyj$Zr(wCm~1Td?>;k>*1WHqL8fC{r80kkRM$sch2 z<)@>2lA!z9&_OF974*jWAUWY)9>n(vfl|5z z>RY#xuOqQI`UIA^y1I%AyZ+%Nnt1!$H^$hl!?oz8UwbYBK&HmLOyeirAZs-*FE8|D zOnm_{e+HfxSKhw%56xvK^r{my>-vkQY1=>==A!6MU?V@N!MmY=@a7AH0wDv#C^*rK zkH|t{Y4asw0WBhTI4k2NC3XAN+dqc<9y@Qn(s6Y61jmv<_!@rRT8Zn#x9+4PDU{UK zBShOhv4!t;N2un0lxu~Sw)SnZz-rt%jnh*U6-K?7quY6VmzYgw5(Wh$1nM^3q@%!8 z2ody^GbyV1ZcWO>LWy$z<$@(xgW5ErcK`r<;9o*_C4h=AM*0^#VcrGzvy7Jib8?_B z8Wf1Rt_(s=hLp@HPW88MUmz$RJbg^LUh3O|UWvebWd!upAKolsi>(Y`h%W33q#_^5 z5wORQ*4$E#iogP<8Fp^+{L1v|){!UE&7~U~+kF$T>)pVe}{gI%PN1Zt4UAV_riY$y#|6si=Gt(^E*Dm@~EXG#+ zGeHV`3D#S_hv=_(B%_$eAWa7FKOuIH@t-H}gF*JqczeL^&oeOCq8%;q;U~$TX9hq2 z-k;#fS1#cYssOD!Xz7@a}z| zS*Xdajx`n5S|-W*eg|S?yR^lU{Jp?MKWjuuCF$ApcyZ4HnG%#Qvtn7(d8>lkXtqn6 z<{;q-IZHREW={%BA*HXl1SO$F=ghWB{G+k_y63z~9nW8>ZCTr2%YZaCx~oG55Nl-W z6%r;|ZS{M4s;94g_RLmXMX*3iV(~m<9dKqZ&kz1zC_^vBt{M6FsR!0a1wFiM#=%`N z9Rj#T1?OLL-4iwUOlriwO}>2xW9cj})!Cz~k(e}Q6)|JuS4%0TrHKxnNVd`(r%GIC zv4*Hu{^qy0s|2JE6OWc8gm3-sM^Y`zNWm#rL1i=H7%RcIiyl1mPzx{cTRzQc+b&0| za)-L*Y45b!1-BXdE$EexCDvt$z#YuCth4Mgz8L#8{yL%xpS`_vQZ!Zmnc_T=TRYA% z+^XT|axc6G%GgK>ZaU@AruCTl#$lJuk8xb3Z}POfY-`Vs!dgt`^kJ|Kl&n|Qxb;f+ zO4!cc^M5JO{fX0WSfbbotxqT0AMqyw0-G~yopryER~6z@sRd1DZ)%^FS{8${)aU>U zV^INHe@Y?66lG-R4ZE@Gq#k0q`VHnk>lK*OYFell;~n3X;b-W=X@~MT@d#QHi5`D^ zB^r^Zq5yZrA=oyf*}@|y09EG2hu{)yqg9&H(5nh}BL_EK@CI==hgRJ6_8~lJQLIC( zhWx(&4Bq#pwyT+V@qIJ*9Y5_HO=;7{g-q-E@+@6l^emHkllWq^213+4XK=*&q_) zJ#{GFRbnf5u%8rN9GT}3d*0d`JmGT5yQD)(rpRhy*(`ODm#4eOe}uRl-g zqJ|`BuMi8Ru1;%el-K{L?=5hh3e{)PsS4P7VDmy^nO9)k0{L~eNr0E0I$bq1bNWkD zak+aiL4B%&9Jc*k0fsJC0$d>u2I(lkU2&q+@;K=~_VmF^uPiKHI}@BoRru^{khvUN zAcFE$PKbQnMS=#sCy_ zC<>pv)0utbyx3JxQ4t+P>9HEQvB4Zz)x8P2ESj2{h-4>qHzud2FZ+2Hx=40(42~B@ z<85uhU@G>j%@yhauAi~c^K>YxZebs{RiD1=N#g@9-T`g>xK!0io9f*vQrc=7LfrV( zrhcM5W{z*cG%iMsz4ekX#=f4pX4d$pnO^sK#+9tgVkssy>gDz^LUzuBK(fwOU=$3i5cg(Qd$bKdJ%lt=?k(n-@~e!qVTg`vcNbJ2w}6WQgZFRJ|yG-=88 z@aUH7bm+KAtcP>YS*+PWlwhCykzh|6vsro-@hmeWYVntoBMYR)(SeV@fp6|xHNW<3 zCCg#G;ao$}IPGRm|F{hWe&iAx8O4gTuVJsY5?)8evMw<>;m`PCV5H&)RN?0yLREsP zG>>nGgZ*(z`pD_r8d-lSXb(K}qSg9i921B@aMUf*8+jw`hu*^pXV{SWX;$m<9!C2! zkwSjdbaG&6T)|wR|MbWFr2crVD~Ske?&L$JWZ8cFvnEwQAH9=m7ipnUV{IZI1qmKm zqD!S;J0$zO5`Fw`+vRdIX+fzKbM9pgJ8#q({)9QSu~uXwb{o!glMF_!OTQ6)X6}ph zReJ<(78bh1et@V7SH&~Hhzd?8HKW_f^FPM>hvYdI&Ckwx(OhYZ;P>~lmfG8pU+w=o z?QZ+-wr!Fp*LZ4Hf4rKkAdX zg?v@Qeom70*H-u)qH13dT55W3__jU6UOPGL(?Vy&aJDGHarvc*RV}@NxkHm+UllW} z#>v`Ft;)bY?F!lrvI6&l=K0M{hL$C20xTi+u>c~Pim`uf79K71JDWazLy`#&Sr7UGnPj=qgHL{A=m0i~(*ROwe z2+b`B>=53|RhkmDSQ6&i1oqN%MsKjYUa>kzGiYkif`R@eEi6x_{XGXB>l^*4<2849gj0%u+CHo4ac!2 z(q;NZ6NPOybf)9MrEv&9n@FGzi4I#%ebQseETPuN|G43@+D@YzYCuNsA8x^EGx70H^t z*raD|1ifFRbdWzibmIr{}6j7$=HHbNbuPH?6f7vA~+v zO$nJHY(v`_H|u8fd)L{kEON7dt2cD=5j==nEvXvs+?>LPTKzw``eMh#&jjWN20hE7 zQrp8C*BYM`gxwSjVLgb~$~<{?KwCq~o@b-Ue6+ncP|>U(Cltp(!S1qS;MXZz&)vQ@NWyMgIMm1U ziEfnR=lsu~x@>VSA6W+$RyBC|K>Ly#;r6t;^7$&$hr*fW%Mci*ockV|{-4>;dp(^l zJ5kFWvZ4)o72gjM*C_e9P6|Cfn8!b?>7fvh0$nRM`o`aFiXEr!xsTVDp$_kt)Uc47 zPwmwoA3ZOOTssokdZ1J?-!pr2J&dZ3RG+7;;_&qBLEUDn4%-0D#D1AwMdX<2KBf1m zu^Mf!DQtnY8yTWYpOCU&Gyi&t@X7J3J?BeL8qdcwe5`7OKwqi3q-O(#FR*wnjjKm}gPp~<5sO&;u^S&=R zgbW_<)(k>_Q-Q{@z}HNol<9Wc^G@i8c<_TLtPOsUGy|J0%sI&Tv7>clq*yvu4I z+Z^9Qjz6krw#RRxjO0_xJs|5aFCfK@(YV#1;tJ2v zVri%zV$>m|rj@dzs4Wt^%>OK3nDHz0isKk28miO8IkmOUcO}w zeW9lYi+=eVWbet#t^(7+ilm2Q9&sNc2vb<8VNO1DO26+u%)1kvQD4v8IYx+Im!c3U z%c;ZGJcg-G(VI+=IM*xPh0@Tx!edn6I;8RcvgC55GiT7w_=n+Px4a5%@4z9V+~^vDu)+N^xFOw#ZKN@w4L+M8rI7>?UefHXup*`6njyXyHqnK%|D{ ziBVwgc!Ly>EGN^rOBiX|Z9vT9!e}EVXtlHd$?TL}Enun4-pI3fF}->$6;hz!Kh>z~ z+pc;Ij){1-=|lpiWn)W$U=vU`gi)*hf@ z%s2-LRpi@Ey^7bRT606*IZyL#QI6kc_WXoOd!6`LZ+hR;^7qr|P2W@p$m>mQZZhIT z@wiu@8L%q1%MqH7ES~-FVQT3hB#eMXXx@H@CCid%h_hnNPDWSht#YbnJs%57OzKWG zjdKIn(IHW&+^s6EHIJD+l_?MD6{>G@aj>b_qX9c-ko;*V^hT-tCb(Z>OO2`8soUJa z5QS~glCPG?R24o{-ed8Au`s`hM)p4V(dM59t$pSwBpRBMmlCf%O`#CU)km?S?DUwG zgJd-G*JHJhkdpw9B8MK2x|$I%U#goUKHevzEpqbV^})NTdtY3Up`E?uYTCNiZWE2s zf~!$B7y4z-{dB2g_IZaa7phu)JrX^Q_GP%g-INM_a-_eZ>Zkib-{4nt@7UuIouAEj z8)lTuhB$|C2>Us5Ss&QjW_%bjQ4c!8x#c7f*l6H4vT5WY^6X&?7FLYRWMX`Uxc0l< zLwB79uDS8fqAEi!q5B5c04TvLX( zXB3n*on{8!qhPjGonfgrY>is=ZST0taD?bDTbW9^ny|X9*dNrnO-|E0xV(zGMige1;h=$8hE+r= zx8~37azcRR8h1VOnn_A*BPi<#WW7}b&~6VsECW6qL|}4jQaY;~Y$55WsKj0OE`Lv` zUI~|o{a}nw5KG}i=;@U?mo#G!=T<|is%dgh3gl5t>6aWuT?cP1_!hmDgKPAnh)Ipt z8!mrdw&gl*e-G_`$?dz`qJzur0y6;w6XnB=^whm;MYaxZXHhfHzHAhT8ay4gFe8(5 z=1+506NZW8O=$hT{;^}_+m)$OU4q_~`!|^m_0S%L9AcYw(!$-a>$cZhR(;w#9x!Yp z!nonWhAilsl$0CR^PFkZRyN7KDfbR-m^Emkj@XZxTCM~HOqg|;Lz10FPF4%59yRF< zimD|(g6>M)zouZm81UH#&$TFNY>Zq6@G%qpg7jkS)*E@tz$bDIHzeQUJ;v75Z&2h{xjv1SDY;;y=p_JA zB4w4DK;UZJ5ysaK^n^tuox8_rsDaWZ(#h1?{!O&5n?)A&z`I$Y^NRlwiJFGo#QORo zHo{S&jkSjv(nE36z{r?R`fdmRA7Bk@%`q-PC1hDfmufv6npai3f^SUAp^VOf#?~am#g*=V9DVxnL8yeNT_qR8oz_Ic1O-a-cCK zo_qZI&63W3(+ZnYUKSGFn|MaG>r|NCc=7UujdW83vhoGRf&(=q*TQZ2};`3zm0Znxt`vEqI>R2Ry`AJxd_1Gt_17yWlV4feWITv*( z*KidwI~%(qI#{A%J@QSO(`T z77sVp7L@aD1;63bh(nARH6#ahHI>fJCmJP4yH)-6WquDBA^$??VpvF5Q5OD{yo-_#2pC)_V-{5{mG&(I;;D1Z zrn;UXdRf8a7VsSSRk_*|nlc*MWIZRHB8@y#Gx!X0Ib7;R64m4_gURdm1KlL29(RAC4 zxoKSuw#YHJr;!K#hny-tD5GMvLT)>Uq=m_}c+gI!k!*hu7r!e87k&BW-z=k+XXO?G z1;^R(^Fy+!uMrAeY}XX=5RV<*qGWU~Oh54f2G1O#&+!i1K`RQV%>#!fqy7 zbNn(U6L*U=8K`OG2=JJ$?-B&{vxrvD{Vb20&=T$p>2FnwE@stARo#(epP$)q8MT2* z6uEn=q9`-aEt;^A>h2B^d;8rDw`OCl_xZg`bq<#+*hHGTpy^f%rC3zX^&46 zz`Kxm)*{~sh|IOj@bU4jSv;#=!t#FAtDi%6=Dc)N%+o}Fvr~T|ED6R-VtiHR$PrTf z(veM4TuJtOg}x`08%m}rl)LYfeqZt)@o?iAGZ`(Ru*Z_RSw zF8)lHjIR>yk3AY|b@%?hu+A&arzuvWR%H0J`sne4UeYGLQu}w5gWgPy))9-@G4wVl z^QJU59IPzdr9XF`0XE5pFVr*vR4fK0MvA+0&eYyV4PMH`fuZJ-^suy829-S@(exk^ zw#X?wWN6>$Iim-oIGutAZ1pf-c$>Jl)!e^RxWWS{-on0q3Q8~#X{GN_WW<|&F_|u0 z^>jN#Xm_q}_4d#9jKfdRnF^rD?zJ$qSJ@X5%~j1tgcs@VVO9QFl}S}%)Ln9n@hx3x zX!IaKpy-!IvI{Y z%n_l{FNl~`=R{whXvsyC;{ku|_wK|AR(mPTj4QS6Xo{lFIg&&t&74U(OeKV4BWqrRFYlQnoY z*5NFadZ|16dK$^B(_h4ilC!g@F@+ut6U{T|S9o5(xZ}xQ+=+eDglEIccZZ85J&4iC zTJM#b)i)+YY|Mtk(#|JVP59;KqM9-amPjget{AL#bnLLB{Po4}sXhGlH}OCNMDf@34%C8I&x_F0OI`f>eNGm_v>vL$c4XeEf@ z>*H^f6-`T^UxI>c!fNg}UCVfn!<#8`0ulU*1{F9M^5e-&uqyGI+3w`!l}eGsNhcjl z&&%Plh&aW1c(R^adMqkjtBkW?wYV}xRcaz|e$UqPPCB8&i50i3lJ5S`0_L$7=HlmH z`9GR!s>@;-^nHc@wbhM!Kd1D>^?nkDkNLDZDwv6nt zV?9g#_=3eW)Nr~3?hU)ggt!lWwP%d*gDn!#zTqNNb2rCloGOeajwBd~^B;HyXU z=uy8ag^#FW&RBuMj>23)q!hf{>#4O)?bmV-#H0sR zuqtoY*a>s9Ba8c&K6`I#TSg65+X+wdqR8GYtiy>{;ml-$U0+!T2HCqrofY~ChKlZM zr!T>q8x1ESxy+mU=a92;HZlq`bX1i&w@k}IG9M0+_~UTIrC@$6)-TvrpL>Ajg;dja zUXN%=I%JaMdWF_OrX4*p{|Fw*V0KR6)NYSc&(p%*u-j@1@G_ z=rAFXZK`+rIaP$qnDY%gpr+ZB9~O%j-3Cr(!a)|ta7qG%^?fh$QaaR zKb4)#0z-in%vXj^GUR3lqSA7SP{}EYRVa&<{!+_+h0|?9I+~hzjfQY`gnl$w_5oop zAp$y*totf?*!>2L9Ja{rl~Jy-EI4^!*ISY6-EY`)~D~ zk#LCY9!YXV(jZi*m&0@aZFDSwh*xW*AfIH>A-6vr5Aw6eDjB9yi^VYwK4ez%XlO@8 zZd6Pf6`8elVAt#Ly>6TS8Psjfhrw{E z*s0s9CCaC5M+B(G<9Wj>MjuM(Y23BAARKskHC=x+_J#M-&c@iL2e&SJkM1D6bg*aU z;Cq{X?a~eWvL2+innXX*q42qw^zn*iovIDQwa97VD1=|E8Rnp;thMK#@(_v&pw-pB z{S8LU!z=l1(#OAlj`8@5U3bk1>8XA-^R%evMDXl!wg&;UnM0>+?d!CI-Tskdg+Iz$ z`d!an<{r=Fs7I;o+dllm3-92oC00bt_2a(6a^Li)I#vAq30Ed^}=?5O|4LIU;4Ru zPLZ5oD*?W^PNuBa**Kkfde5MSn9VEC9`=agEn)wHj^4Zq6`Lut@m;sEL85H?^D!k< z!Ol-v`l-QR^YV{n;-~|tGe2za`%4crA9vCPNMHI?p_I+8)l4w3wN8#|qND#8dv6&O zXVbNd5=b5_L4yRh0Kwg1LKxhFySuvunIyP{;10nZg1ZEFf;$9v9b9Ji&GWqP_w9FA z?Ok>1oI1aD{~4;7xutvcwXW`7Yh9p|^caa&0Y?4iYYoO2eFt5*Q*B!%jeJZd%)xsa z47P>U;k`R4#H-jTnND;uh$d)Di)(6cfA^^29F>R~qt0LtD*eH40lkxRvo&w4x^s5Y z|5Peqa=D#W&d?RBF7ZZTbDR1p>JXtL_vHA#|d9v@xBl(lRAirXTv4h=zzGA*&lGe49IOTiKZ=;Ad0T4js{i87_gdzFe>;lh#LML^X3s2mD{9S{mGFsD5J`g zy?xkvKdr=SVOAc76W+CvF)-|tka?|;P&{V)LR~96cek*RQtn-0dnt!3&J3O8KxfxZ zZyAKM{;?;PpA=B1qj>(KhUhKZzl;`?WS5`g`lUHQxmXD7MleVs$y*9~#dbXk8v}2|IKV*blSM zT`X*iScIB|8U@wFQQ#(OeQ&qOdk+{69(n-IZCH(eF>%+!n;=D7WU_xqU#!fktQA6% z?n^J^DB=WDpG$RRMO``&|-#b~l(LdJ1)h?3fmP6hTHv8dKPTd7qD)o zF?t`yA`de2=WD+Ev)iNt78}=xkM%Sf+ZLfm$Lbb@B9%--IBsgjY`dY|T%~s>z}xrt z^KOzXGo7gQo6%I;FGm{@8qbU7?|WQiyu%_%yffF`Pc3Dg8~%Q3Zr$0rGFFbcij`S;YnWj){pp#KuCwz^TT6?| z;W&fWpo$V0OrtJ@r9jSG#o=}R!&hC$1S{m;J<{VRX?Fm6bKj6nb<}3{X4nGGEmKh3 zTLzt~5}eB-AGO)KtXYT$K7DqQZLKJM=A!S-U$rTnFMMHj(jVTM0_l&(P5h!@@STL+ zbtJl7XjT5fD8g?V&GCQ70_eEVD!!GHiQ@z#Y61Mi>K2)oUhOY!CJL$vE^Fzk4;-C9 z{wUDcihf5()P&KLK;8i>~O2Dkp? z&f>R$vR04kj$7NrdBIynD=FUP6gtc{&@jjt)Ax@uOjz5DZnmVQ5U0`5xrHj*8NTTm zq!HCpzjJ_2Uutjl?ddpQ;vBOb?4S__silvqBow{!Ro`oF{j)hTCUfqQdweJN#OK;Z z7Gem?JXs1(oZ)h+uCL?iSugEBQ=XL7EV(dT2Da$64=ROAL-IZrnMudT*9pA6Toy8m zSJ%lKzPD3vlP}-ZM-=tV`l*CP`j*&F#`b~}$8KdV-*Ry1YU_n_+wlHEkrIZv8O%Xt%ie>KdBDhO*3mWd}L zXNg+iNtWAwU*fo}pynBR zId@7$89t3I0-+k2LV-)mA_77z;iB!2KgZngu!)_X9oo#sv`R-G;%U$k?DZ{UjfR(N zYf_=DhO2D?RmCU82R3WNu{_3gmskb2G5Ft$#w42Gz+tJ!H{nXN4AqSDRj)yY+B;qQ z1xD{_blzO>3M_Er!-JeelMra*Qjo~#J(x#rhSB@g0mtO`ho{artP;IJ{w&x>V@~Ev z7dRrV)^9N;E64b{fK<3j*t(O2W-#8t7o-B4ho(kd=;8jU)7*mRn(y*jv?tRv05tKb z;R3nG|3&65{-5x=zkmM!HCg<>7X^CYKM1(V8yKm{js8C!gYesosS6|lDChj$B9NXt z|Lf2HE&cxI|Nn8xr8D~usp{IpMT0wgx8_iKfwioqEx(QBA8Xd2WSxh7FPB(p!D zfIs}n$mzSK=JiX`d|z?~%&8RabihSi#{6|ou2bpRykygWycAq}3SBpVwdO7iU-}^*rI@I&-gWMEI1uvuk-L1YrF3p!&D~{9OJ_Lgg6Vr}8gfuR`G8E2si4DwGmsh5@_LAx3`*{rXrngP zrt#?qx92@;g4&_B(w(#I0*Qb@)+}Gp#a|PJ{RHNU>+Q6E52moA-U0nEb5d>VE}`Jc zt2e{Ukry%#Ylk=959{PI|23?VxXeXCkLMs>X!Q_U1Oj>P5)$cX$G@bwHAGfs!9(5eTsbZ%$Mj57Ue2A zCHbO+Cc1(efoqJd?doM+N*vya0X14{sz6IN3DOcq^l{ zYMQU&k8P@OyvpWp`ibjl@BD~vk;U(iaTlBu`gL01(}a8V1+gIBQRwOC|tEy=dDs_7u z?ny!jda`B`kmwjNd@QCkLv{{wG}-8m#`ic_tir-(Gk;qNgz`OGG|;cAtahKJi_feH z1?i5M!t$zqPK$!0gdh98UF4BPnp+gbi6OP7Djy*Q={bv+=4T?**h;Q(O!+DrA5CUl zKS~w4?%efzLMFc;?W~sB@?tCb_Ea6kQ!23+WMa(dHnkiu+Icm1S!Q*-UrVqso_#r! zO2vB+knq2Xty;-pOH4}y%4c@B%;zxKJ!hOVa7`9t~{1*ksP+?}fk8a~y#bUZAY`CHVpGS+CL~+k`ymt%AGkOd&CvrnpP$P#a z7)(nDwIy=VZ}wy^T_>F|bJvjH7v8AlB?ojyGMY?o>QjDsspPGubQTu0XM-PHmP}4_ zewETr5fIWpesWD0ARynTpzGJJYcRd`W_@hKO(DhT?^QU0e-c6$%-_82*3q$IckjA8 zEIFEYp~u^JW1UOG)8!P03|>$%@p`eVsJPsy?+bdLc4!C_w;4$Aqg&|;(d}-``h{7e z;tEIReAZ;L7(TNA>|{s*F0d_~k=K%am)EY&i%oZ@)H8bT1qP@6;5E=wI%5T_e&Thy zfrp&R?{|00?Q|{pgZQr#+gziI*-H9Jj@=yBx9{uI&DspBo)&xoLCWV&V z)UTT0E6}cmmDT*U(m2bke5{UZ-Ql1>rj~s|FY7yn`1A5T4i_0c3+MNu6TE&5F3c2k zD)bEQZ(%H9Vi=Pdj}{*Emo0@odF@&;_)~u8I$PBcJ2q@-f<7|Em(I1)D*AQDtsENe zc^|b#^XJD&sHkGqp$DMXM-7$}b0Lbync+0#Yt1Xxtj&PKSwo7>Gu zuZ>oaJi2`5+1C#=Ti=WFww5=h)%<6<7|(C4XOp}Hsgt42Bd7-Di871BbnX0b=yApO z9>baEL(UTk>3&;T>DLR~mkYyUW_0tvMql!mru;S5JuRyMzprvov5*lQe$kW1ikI}4 zL--I|+i-eQtXi%n1<@w$$M_jrN~-e|mnckZ$!J38*MN`^u<~pKLRGEl@d$jr-kd7C zV~>>u?PY_<=t)q@!+@*w?P1n_>|Mu^RTO>&fr?(&w?~U>;M7hj)x8gnf5NwFoPu|| z<$x2;I~H!sZGES8`ZXGr|4UXCCR{w4_L?etS6q4a=%u`#*tVp2Wiy%|Ov|p(<_?Gg zAh!mksaz;4$>MRE!0RLx+FLwTcAwu+1EC(kuH!6imm70INwQF{Lt8f;#m5GxP)!-I zU6Ue8HpQ1pn@lge`fg42)?0Rb+5 zi^Jxg*uc9`AK%w!*S+4j(Oa=ldEeNIqLw>HbnHG!>@@-`0Xx7P>_nXM+83YuSE>5J z3nY-S6)6eM8~j3{NM7I+S?s zgNDSA{%l^4yfab#iN|}I6qdL*ib(K|->b^W!BuvXsh-#lp zV>zwQL4Xt7y(Z3JUXk2G4cMw!wG}EKHeYKJH&|P+|-hH7DGem zk{j7O-O~INnO^?E*4i)koA>}WEh@YRpV0Fc00R(R`$Lz!+HiM>z6Nq$rY_cb;>j5Dm$#B_9zZtqwhjzWmSA&I|PsJ_7Ne+Z`vKB z4n088!uWHoHWv2-omHE{gI~Y+;T~9Dt7(TyRq}mUQ|!J(>o!jQ+uh^ScKn13Vy)9{ zeeV@jlQ(ys8{_2*2_eAY`~Am9OKyy4H)FY8@bK|u!HUU-k_zqOavl4ug%1qw#fbFZ z#@rAI)%Y`FgL2v;jO;y$y>rA%_q{NbsY(6SvVg3?&h+-%Q zzuV$p0!Q^NjD00Gbyo;G9Pj%vDSW5JxBS%reff26S@TtM?wdvEOG7>|hV$#e4}0H+YuxtFce_2dm7NydS=*e%PebF(%# z4av1U+9qC(j7B&Ayf&(LIbo$U`3h9)!IMZiwu+MzFo`JiKKKO)aOV1ikd|$n_t_}MP-)^c=Pwk4W!S*l|b9JtBu`g z&rvfZq-iC*kCH4pkfirnh6o`RU&qeI5IP%%&q8o00d{m-$Im0>=DZPI$F}14^FPn% zFpo)+k%yL1^ACa~XDcmL@x0rn>rl#@ zP;b}1axI0lSvwA^p)T_luYeEp(|DffWS!cDLrg@5s3rDAjn5UhVUv$Xf!lWP5pzr5yT z&^Dp`o6~6e%UH6_8ce_HeAaJcxCi9QQ@X-F$NO}N+QkBkn2^QOY@cmU;jSpRW#2Wn zFFx54(_?UDz2hIdAHV_G><=@iG?u3+#2J3#Qd&}4{>2lq!k3@mc$F* zn72c=w{7gJEI!QH?K(6h72n@ySnl~QGNMED?3h`VkCSRQBVP!&TeRF1e*cVCRHr zmQ+*!+<|2tz}1jUF%T z20s8Et>37K$cMB`XXz&2HQj9QrG~J&2{Cr`z+C*}jv_fMasODcx%FTwi6!Bvh<2Q5 zFQI?{a_geS72I!baZ);p{ab(9_$^h26yeBsU`FC?^$fo;4ed+im)jvO1$Q^of!UV? zxy6J_zIDpVCpcDSk0rs0`TT4$zIhHO_8al1l?W|4o5+6lk;rDn)%qj!;+ zgb2vLVD6;5Z%uV%bXoH-Kfj`~e;<()(m!F&jvehkU5ktTLJsqLjG}2FCU&$w#P4(C zolxcG0LoqhIVw^JliPIiPP0QX->}&$-ZBfL$_1@#Ea?vUcB17~@}h25fb=R)vpqPa4aw-)cSVWNp zKRl$r(Dz!WHMBNvHd&rme-st?BdYpk%#BBp2yKcenE-RNXDZ~4=hgDM z9=$6H5Ho$Jy2s1|MU5Z-K%>WYFyOZWtvkM{f#xD&U~6e>e^W~tJXqAxkq>S!dgnk* zRpqM542GKh@;&(+!6Qp-Qx#oQb>~Qm9ei!tqAK$ms{@^qs!H8zF_Z*<20lV>-{}iT-Nwwuw0X(O`f&YaoZ-8l8VY$$DOp8{^SD7Q)v4?|6d{Bk zz~h*MWYD)&d#fYw1l6sIo6H$1rV_5-;8E$(tkP3a9Sl^k5V$U%JH7Vt^Bc=6UnP!F+@QG$_D$u9F}(X=6(H?z(2QqHYpIS0D# zCO9uh%q%hl`9r`VOz~6NAm3uBq#REl+5l;@JTBcs{ZMk;)c8}^<;JILZ-Gbl^Ly-V zPDhQhS~kVotLp@Jy7`gqgRkEBJdkVa=%N7$(}Ry5H?%kVu!E^uqVU)>^~h1`ZI~5D zD0xn+pOQnXz2$V$JEq@S>SNmdN@`{=n?XO<=kh?@?sb9#E7mifin4L%hz|Cqj^0Pj zZ?_M4sG9R+`W_4JpW7Ah=e%-v79c9AIEFtAB6DAzdRyd{wK_!h$M*Xio{r#+;xKHZ z`sfH+6QzDjFM%&^34+#}PCzGg}x*tOYj z@Vvd}Z7UYEfrdt7CRBYB0Y`sC@BALO#WU2j+9 zFC`+!bv4Izd+vnKk;e8JFYgufZpDo3M$N))$G^@dBER_b(%JB+@0+e?d->$$nV!>K zV4-R&XavIHdp%emB*7TnmCfS?;$U_BX?55XB%HW#xXHEe+Sk?=<2Bli7s?|-c6oyC z(*S-=LTGQrJX@Oxnf0tX{}kI*)Yw}mcr>d+5`A6Mn!b3Fe3rtv#xq@StzDkk8O-G#1@>qdRI;&?O;B|08#`nt{uVtjHZmG?=$^s@qXJ zhuk%|+Iy(azGj4PRrgk#H>dZn%xG)QPmpm?(|-3UKn~t3hsj&iCzZp?E*9`~-mlAv zL{{473#mS^OtkvwuIdztfC$MC;L~|ViCJwzY%OZ%*D#q zvEOj@?GtW)4jt;+bN%K)Y(G@9b2OPe8$v*Iwq-s)cfF2-?WQe{B#b^bSxfZb$uRur z>{}VgN7?SpeHAMDmjh<6kySM&7s8jm)TM(=EzN_^t%v#<&sMr_ZtPXmdTv8V(gRy& zY<)Ig-8!cJ?S>K9v}x6Ht1*)lB$IO}u7@mGG1%^ApSH@9Zcv@5j!J81(Vlfps5TxS z6~{coVE=S0)`;roG5*tO%W}Y{*>V*D9r=74j1S^GXbC0Bq?6L@DF=7AY3A36!;fPe?`=!IlgDav%S5TuB-GEj)emd zdNpp33aTit^{3td;w9uBjNmh0$HLsxxUFo#x)rxZ6Gx!oQG;% z>g_M$%!uXJJS%o7DdlSI7dv53o117@f6=f+elj`BC1v^NnU2lYgCT!k8YmOYn3V8$ z3E!t!p8p8I`R5D&aRTuFKC0*cUKHqo|Nr}x7PpS+;P!Z?3)kIA8TbNH< zyQ%&}LMi$oA2$|oE=cv^rk$|?20S65SgAD}I)1kIR`*w#j&+qXPQB?O4u{C#7@VFu zi~IgIzNm3zV_X7w7gW?W__wt?;4sk8cK+O>y+~nH$-Dx?!hR+D0lMNNvvLnqz%zk~p5Eo3r?)|@ z^z2|>YYkd|gLW=UZ!N2vIqn5qMg^}EPTKWE5;_7@Gpldob=RuZ;gq)Q?+gr;)It4EUtLK z*TY%WEA*G==cU19e>dS&AnFiMRg3rb@aSCA zfXbP@&8UvvhmFhc6!S_F4hP^AY9ciQ?h-lCeH++HCyWY-8K*$rQmJ}L$?r8sJ`#-C z866gVI`PJ7Yqv)}(FM+hEuPd7H4f1Q{|(Z4=jLVm`bB<{(dSlUgmF4$>WYeEp_3Wo zFp&dSfv?g2ry%X)KX?8iKz)_l(=JTRwx|zT4U4_cI?@@6?f2?^&JKUD+oMM0wR&IM z&sUkX=sc_cr9Yoq#gXGQJ>FR#)7{nBaXXm_t1%x*QL>rscHZodRx44D^kD2AU4_H0 z`}_O<{P}Zc-%t3CnR)BX6NqTvSpLrLe7Qfpu&_|%Vz)Fu?{&U80A$+xv=DPhoJO65 zh~1FmdH6h@`IA2w?ms*tCb|A411p#Bha+qZq-C}-$j2w?-6%}Fw>bxf8*8nzmznY* z3Q-q=(2ye<0~y1(vA{8OeKTb-I7O;HQT(c#he2@%_v}{LD^2dlCthENNaF*AUt3=v zhV;BW?1*OZ75Rn|n2AqlBZ>fRPmxT}N*SRbM{-2hS_JSG-=t{vwW3TI{x~-Rb@WJ& zJe>4SU(f3HsYTw4-n6R#W|<^opZehK-qjw5kW=WX;jEl6vwbHTpYUB4yl&t#eRFSV zRvE)x-5dk;xLgo^FlaCKV>A*X5^zEd>^!sYOiHn4e0zIy6XAi|8(06-u089G4;}f^ z#@ptVRF@|vX1od;j$O@MmRf&d4yS2D*q6vU;pv^1$Q^O)-J6&QIrg{Y#AT2}kD+jI8Hq< z#)X-#9ybQNk`hPvpC<{==NszKCCGr#(hkGpAB*I-Pw~(aOO4u(a38;(-$v7V+B9Y_N8}SCPib}_(Dz8NYR-P11s^Z^u#YFPKJj@w>1h=cx zt!H*>&@5y5>iKmBT`rr2Ce0#K8hgf$=UjmaNPygOxnRaolo=v|p)mt9GwIV%0u`5l zM9MS!p1E=BTA+B^#&TGt2^SHdU*6Y@XR>)Iq?R)<0X12^yn>4qHEk?}73&baBz5q; zUC#K*pmC0QwbKfy?rd#=zN6fPTOemGt}+9K_z-?TP)?swAc=paG*3g&4F>Aeg=uD-}O^4EgJ(HlpKNfhuKgk}q$-t67 zAbE>Zja!2506DI+w#!&Ni>9S<=IXrM+7X%REZN5o+n!{fd@J*@Wc+owZZ>h;h-&W4OM4=Cee znLFj`0f;;*0t<6*C7}egQNq{+SPSjUHiNE!WDTu4(dL1?6ify6$zIBSKh^d_z#R#D z5rc3gL!0g1cjng13kT?jSWt>&UW<@0IF2z9*84*0hD@&2LtLO3VGhJ7Ta4{ z9SEuFdNftdtKoEPlxw}dI$gs0aF}`iZ-zz7t8vm3%5jvnE)YJl#-K}!sYOh@-M2w? zh4gg83m&3n7$MNfxP4V-Lr+7-Kuc@B?|P3LEl0cIXzxqZLW~;VJHsGU2)pek;e(VqEzH;x|e?e6kQ^ z<(_x??bqI)9~mgJ0a%eh^8VRyY&+sMJ7WksaSJzxovu(cO1Q(cQupqSN`KOq~#uD3ehAmHB{nR4bqeiFU8~6D<;pzQd#?`A;m!DGe&Qp?W z7eaUcHDyy{@TF#Yg1o#wEooVF0Yg_V6hAYywznoqMwYHVD~yb(SF+`529@05Dk1;A za;x+!;AyVB51@N^)-m-_Nr^p=>=LNYO@|YjKbis&NdGpprI0T9m^@;hGrp@3bvL9o zQxB;7_|Zj6j$wW42 zYg4OD7R3u#@wjDaSy_7S8vJYTsg@N|)R32efNNdz2_gGBU6p5c~lP%QTqzou47tE&25#RGW z_);crIN4y!Dg0xCK-4DPM#KK(M2SFbVE=7pm65#Mu7^PNOm`64%`d-|k?qlAQsK_8 zH$2U;A&9;Wm&~NJvKkl$9CV&`rCj}qycgwxvoQDhct9}Awo>mmA(`i+>^J0jK!Wrn z?}a^?J=;j)^<+b`herdO(i3(V4ucTn@c!F(ClyT|{R6CPOMV~^N}SL;kvscN0loQ$ zPZ`D2#v9I3t1DKAKY=PBk~x(d%U>CqC#At}Ywsu^4$>4Rhd=K=x#Ugy)Cm)1+1m@M zmNsfSI8JjuK^Fbtm*fvbqn&=t=CUO{?i&`E`#%?Shme2EBfSg>>4Ki0TC@z8%I|Jm z_6wcmKzlrVFW4xb9x(zt;sN=y6WwdHz9)44-+?vFj`ii@H+@?!ZC%P{kJYBwexMUV zgbI6>Zc76A!@T_g*EIR!p=*bWoU!6+3aUC4!Hp?p#a9WR>nX{kC%zFXUtVia`ZP4< zN9V*(qKM%nBm{=F=qXBsJ-kT|O=m~%@Dxz-R3 zyB{vNrZ3{8KYnG+R@JQN>rw0d1NNBp)h)c!{Kf6d66&=^q#ldu*%5qVeqm4jdKUWM zP>2GR69seu7s^jYCc6_NBoq>&8;66~XX_ zmUZe6LHhi168|=}982}@4uN%-y<-aNUQUR&`ebCLwStRGJmFMI=Uq%H6D)* zw)XuC;zoW#$E3p~LVEK)tw;F*V>_SPu7{C05tAuED`6gr8oHc|hZs~TZ#GjKt=CA& zN#*dLZexNx343Pf127o5BFRT!dWC|si>kLX6$&<-8&!6cAm-KcmRt&(l8?TRdXsQJ zm1XnF-r+QSo{@s%Pt7GhfX}UF5$I3c8QGFTOpa(7%G9$oEW-#NF*?>yZ~%zbg65uY z*4GS1WUX}_{{$EC#r{+`!7LaolB}RUv_T4TXT><(Im}D@)(knU$ zJc=QGe$Dk0mY0`vX)D`C##An-d0M!ybc0*ns+~<*-#wHTkmg zK{#}B2%}(fJH_}9jp7)wKL#RUMQtUgu!&m2AZ8Nra6D-I_%QTPOup z%J#;{n&)Xlnk0447y}k83;l^QYIfB?J~#hLFtgLDe>?R3F8-6^hYIvJbqu^FIuZ(B zilZ4rWtP&;wr8H7-PGbBh!~e_1Bu})hMX-T}-iGb!Q&1)}q0BvaV-GnOAnQh=A{k_a zr>7VwEhkzcpKQqOBRjG1z{%h$pVf)A86jer3UGaU>n8^0m5 zV~&T1r?RgE0aS0PqAmAJZ-LzbfV53{O=Zs8@(@LC+!wYhvgrEWtD3p!%xXWDl`=#3 z?vV(~F{1?Ox}!pXRfo0q0%k^8GvI5EIQ2S8!(WA`bK5mkuKtU|)&cYh;3V&xZ_itd zEl|7seNQfuI_%am!2rEUqOrYgUR9;rq32DL{RVrK*Qt?ox3I$HKdqO-7KEiMIJ51kKBSx94Wba#zsc z;}PaY?P*i@cxVgX=EcuBU4M9bwZCuT&`D!z4w6UU-}!*C&o!czzSH2x?k_PD&vkzN zz{6uAKE?-cl$NfFSVF{Ojw0INLHxXU5234gxNOOvUjjB?c%w=@VkT`Xah?ViScm?q zcGGyJCyTH|&Y(%m_zLr?^}Yx~f*C~(in@xKX6%oiw#dMs~scf3-iz?zh|pr zO>bA-UA)xnKRI96Qf9KIl+85=LOR~@10(G?Yx`Gg@{M0BCmHnxHHQabZ<#!9+`kbA z0%7q$yL1C~5!i-bH?3|R9UUDA&TuYPSn5QQ&JcsVKc2k3?tUgW$Ca5FrF$q18`=&! z)NegKjs;;m@ugL^&p$ghMSJ8XvC?z_isyHb=H}%cEj3Qg&enS*T3J{W^cYi885pce zl6lckQ+KK98yFY>f;8akRS4u{>Wgz+j!#%z_=A^Exumznn0_~OT_PAYMVnb9%hN)K6y+{`x+FcV;yCJw*a$yPL1YupVj?+hXveKZ7WN|1t&IKt|Uw|hw$J9HL`zuJ0Tf(+v1UXtu}M)fJBaS z5*ijva^bxV#v6f`!pqo5j0_A!K9TB;x@2$OcwLVGWm}SbUhZ?6?PD-nP5)&~9A5wp zz40j$QlP~Wim$sJ3Al)~deBhmoR2BhaOdqknSc1>$?LfeYy|!vK!}t4GA5(sbV|t$oh)p4HXjeI}KNCTRFA7fm@ucY@`;S^nQV~ z@ze1))(TstT{Pe>shBT}m;NAv`og;w zZ;-xcfx-}eX`UMhE;AtJ-QE?M<%e4zxvLBQa{t zwfiIziDY?`^{%<~yN=li=`W4dPX}extb$;a$guQs1O!X&sod=77J127Pz)C_S3w&F zC0C*QEjhgMfzg0_mbJOePm8E4I;Xc+qDV{%$d&ifMXx+GuU}Mb3+5X7$>iOsea5-5 zA0C9$_3eaq{pXTFu37+sbZ9m`;UVcEw8l~`i4STZSn`tqHdx(=((4$V zgZ9y^3+6UXj_gDAlt2wV!|f&-!F4c(g0J~T*fH@IBFBH8XAPHTx~}DN!FQ1|e!dxk zVSliSgNcpj<)yuX`x6F53k1|;TczYnGN->An%%LU{*op30REv0mM`)YrFj4HCEiYe zY8Wy?dJ4nH>{u}yYZ2q9(e8yQ4B@~sJwWQ~>uhaEFpM2SQL zZa%ek3Gp(r3Gx4vVDI<-av_${!(YJTj=P?fg?G5+_-d$51U5{~s8c*2=P@Q3T;gzZ zIT5Oi9D#+%_Xky$6cM<00Ed)?K~1X%1>x4Y>;8*aTiVuM-tVf|6wec87VAmTJnuGw zAI4D-N>maz4|deF5~uKu7NRy$V&R@U{=<)zhX8!TrWe)`gFUGd?aK&M0>*okrHo$vgl>@-%rp)OJRM8o7|>| zmvJKT_^XGsk_OUG9$N6+Kj^OSYLxfdl}-gQkE#~q$|9V0!MRpAYmP1$5k5E5Sh>7P zAYJ@U*?2;cam+wj$9BXhckn@6DFRF-k)sro`K=0p7H)y&jX@b8fpqmChLWcuD?3zU zHJSU3%2$^qv1{$)s84X;hEmD2xVX47at!U7D zsDDXfoWLAM7%SS^-P)txv9OYB7+CP8MreP&dEc?>R=DxIP9tOPLUj59okgd~(_Sz1 z865)0M-!8rDazdV$vsttXF>>bNAB@5Z{1$+byXc|*d5GG9K+`sGcN1@rNlpy;}ntP$FU>{CUB?`s3ZmY^H{po~lX`r4^haB_B8;$oqyk@OO6wU=cm5 zbXn$X_qLYwd? zXIZ0w~Gd45e$ySRt%$1kSat z(rtdD&xc}~*7vh@t*9g{4~@>Fus)zRqc<*kG%2__B(n$+IlvBe`4TE z7!lF7*@=hw;8_=sh>*~eUX=Wf1cR~QQWyc8qZDLBFZ0etDHQw_D?5z`xkxo^qX8G+ zC@&nU)$-! zqNTbmxnxLDVi;ezJr@O#>Nh=>_v-x!yoLME&x2<;cC5lTH+5;^tL=NuvZc-Hu;bR1 z4MB^X;faX}DmN6XE$7DU@xm#k+^D{dv-?M7$!HM(Ope^4U*`yF!CA=Jac=mbc zc4kJsDCa##5F;EV>o9TP1sr7nbG6E=+5OC=exx3$-C+bcm62U-cwC@GisJ^|14XnS zxC3t-qkrkL%>E1Sgvg?LjFm>sgP%Y}PDv?x>PxOP7t!P4_Z>ryf)PQ(sfi8rh{+)+ z_r>`1NDsGoxJ}pxb{36-^fXroOqHh^ zuK~bTx=zI^^1ygo@9@Z!vN_FVRJ)()o72Rmk&4r8tgc}^@eF}3B$EsS=F+zodE}P} zLPT&^*U;$H*VR!qVmF6b!yN;#!h5LPUQ6||-AbZR?ir1NXy|f%l+7HM+gBG~Z?AgS z&I1WYm)f5-J)nJpk5>Z7WG9L9fRbDuK%{_^d}p*j&pOr|v9f>guI>2w$@0#U%RYOb z=I_Yl25$#cFls~N*nHExe$kXLYux*VUq3#s*pd1n7`pA_RQqL2Hs#hy>HimIm_+kv z{W})mzwtwWRCTM<#-bgzbV4>yb`mzycD7H8gs|Fnx0Wbi-PYJy6xVC*oL3{(sW^wm z68_FQ9|?h{?)sHEOcMJvV14-!72)g$>$y_NHIw&o{*?%XMzjPlXFqK(SWJ^~Np!Ty ze}YBa*3UoWS>yTTjaRenqdKj%j8{!A*Z|@ z*S4`>WqB#i$5Pea_y*y#)!i{30+|Q>YIMmD`972^SK0H<1b29^d!`^3Cz~rv4h9mz zPQWi`WT&|=)GhQJHP$UXY|Yt!vfo{4duOP!Q^!$0jbK*9XJPnvAIFt+htj)Q$&rkZ z4!Wp$?VMz{A@T&pp5XA8_!zotzf(5NA_<72oy1Y#6siJr#n9 z={$);+;$6GMLAOBQGF(rnsL?hl!PxiUaF|7(zsC=B+_>_`L49v@w@gDj7%3L&Ip>v z?I34MBk)a-6Z_>+blQ8gQ*`#aL-+M43bL9eiGWJsc0n?#P4GC22h~b*EeTn ziSEDuuKNVggXRrs-+u2>2X1YIU+|*|$O@&-{U-@9abG&wk^JD%3iDEgy#9>vUW1}3 zYieqo82i~@>qQ$ZSCTOfkQF2Zaj%8yFepp1vByG$+I-alh=j7+xKfJrMGU(f;HFS0 zT?GoI349l?`VC;PliTTh-4XWMDFzoclegi5a8Awt^6taOh&LNRq{|1>2aKgvT#*U# z|3TMR21FHYZI37=AP5LZN+T%UHHcD5BO$3M-Q5fz0)l{qbVy4M-Ju{L-5nz>IrIQi z-@$v|d+!_Hzxg+N&YZpWvz}O+Lc!amcjW*>6P$-2k8oi+fZuy|7sMt1GL#Ib^LIAk zM>qeNB;KJJGd;L?QKzv*Si#isJA^aa?$`f)b=J1^_8pm{Cky9K16o0Gx0K{U`QjmDPlECSP(E5fBa{15 zMGK_$@+wRY{erX(BN z_whe=<~8^Mz+ur-i*lO?@agQt=D^OrB11XtIeFEzux`4`76i}E#zt#W3&NW`u;R>R z)!wys^lfTKRd&Z|;0aa+D(2To2>NrVAPXLcvanHZy}U(0CNGCF_kzSEVa66CpGO^` zshqsH?#O|j=@sz`>?Thyq;ucqn03Vo4qDjH;(rs8*pR3wJ;*3Uuqx%Z13w^OBOD5_zueml@9&m98S)Shx}u^CZO=&KM8vyC~=9MD17=p z75g=qPY;QE7xavzK#YXIamh8hX6+22{DhuWmGKK@!u~ z-PA>g6d(o=bP9f^00A@Ai==GppdLz~dC1udUBZyjTI4oZc(uB_76-fXwvII&vgse^ z439L|dQ9nM5d4ykj*hpbg$u5*~dVNc9I8w~m*Jn2JG zj_=3HRCnfRd*ae+{rb^9-D60IsWC-KmRhK(T3#sQe(&K5S z{{Rof&Jhss(9gx>=yHm4xk`RQSdE7(lR5OyneR19EBxw3biGf8=^vKzzey#EH=qr>zhh#Tmr%t@8v-o$ke(h7t6lq(w=CKmSLJ9kn!_^yU;4)8&(Cj9CRt#{YF z(4GrrFxg8O?3=*RLHP(873K$jDCKp);L&&y`}x-NK0@~Ljh{ZhV)3uN{tzO&DDNkf z3Zo^(583U}3WYg5Kuv(dPuiyz_enT?h5W?%bMOzliGtJujzp5(f}Ce<2dhljj=>UyI&SBO14nFBt(1}W^a=ozz(k7 zDTRAp<>`xk5dwYZZS_3L_j6jfdUb1ua+27dt#hR^*ET=zFJ$<#+{CrbMouUnv7U}4 z#W(LTMqmD(4gh(-MZ}Rfw6pOMgrpSdW3AOUHb^|>Bf#;}&pshlF_j<K_yCFPIA_tb>;;T@XFs*czy)GmI;!hDG?bdV;18xAk+gu1-&oF5IBX(pU24NhT z8^(zZc|Z^X0=-|OHg^8;TYTiC_~mVoFzt{_0nz*d+@X;!)p;;2ik{B7mQns)q z^M?+kc_RY!+R7tgzlnN^($v6eWbnd8nxcovZ?DS8sqG%vU+HcID)xr7Ee-SO*=wNl zyXn6O`MFOL%0j62MlJd&9pSm1L7y*10hL3rPSb9@p?{y)kl@2pobL~jtur$-e5S45 zq2Q0X-ze&1dR^%CHygz=#Mh$^bFHmB04$W_BBD@{E^-Gn-+B|?;60gmI|vj35OFr` zm}A9WiM1gL`(A|kuMA}8kt$7*&7iK*ec--?0PjhDfZ%vUy}fw!!_Ns)x%PQxVX4dX z=(J`g)w31FX|_}yRrGw~b#~^YW?{hEnn?DL5r4ZJk7BFej01R3)L7>uQyW2aCR%2t zK^E?VIAM&Tw$w`jcvubY;b?Xr)~9w;FTEgp&SrZ}k1N#nfqqcsZ$GG|wo(=uQrM4E z{1(_${Fh9BmqOm@2qB1;sW?G@L7;q1a=#V&N^b|cY3SdQn`p6`3gA0hGs8fAA4=}g z6$b1ky=aiZCUa)Fh|RlQI`>W^u68P#(B?(As@~QAC+?G2$u-w|&PE}_|^jYBiz40s@;J8QKY#?!wI44s&U zXtW>3thq}raRrX3azvW5kqgj9YvYHk-%h}eg)5P;vN7+ShmWREEJ3BuM@D@#q>*OS z647?;h)&?{_45}8jh9R{i~;5QMQ0xCFJW(pX$t+ZKk!fV(SB95bkmO5rV;rvk!L)GE&Z0ZZ=T z*2T1rMKjaiLHf&X(4Y05_}kg>FrI~9^Qw8Iv9yW=%!xx6^_ytiD8`OoeK`4Hic{w` zbbDDbyo@{fK8>12MUPu7<^@#(>puZkd@HbH8Do3RtGGBXNpIm0cja?lB1qU0TKWCi z*duO!qzp>=MEJlJq-j1rlUz+bb1&P>fi8+Bcg)(!3F;aTc|x}6D8_-k2BcuT_8zS+ z-sIg04X{ifuCN+urf4Wxzd1&koDP#mtng_w!=M)vqp9n2F#44SzPxpNBVA4V5i&2X zX0C?~f53nv!RDgx&8rb>;llmEsN-6W%tCN~fCSqeOmYwb2xtukwmxNx@?x=~=X8&W zyDG~yJnE zUr1#JOtkG5b~ogAWSBZ2-vair2mHK&-wYqtO}S3Z&T?HdYwnU0I-qReLH8aejQ1q6 zhc|gY9|JcuA%~o5o2-}l3Hx19p!v{5U#@gDh@kGOtyJcEYlsRSZ0gkNzS86NV z%Q$dmZ3J8`)q88ny%O03b7#YGS?KmcImd)d|6cEVt*OV5>uUIeU3qqd#pNSRq2MzQf`AW?d>bO9vvIyYe7;H& zZnpbvg$LStTvA#=J8^N%ib7RPs!ltYyoUX)mMY%21&8JLysp!q6wU;9^yGd;PZ2y^ z{IUFNrCs?!zXJI~tx)>MCTY--iQ@NK6)96{+{10|{nn!A0Nx7_)WCE%Vl|bN5O=fr zXd-)>#Dnz8r?#qbak~{ZHVQ1uZ>5tSeR~76RsFj@R1PRi;zg!3)y*GrzQU)y18_g8q^N>;J9B z2a(0F3tMXK-9Ja7a&8smaAj#6t5(d}dnQf}OFIouUNxtUFP;vAert6RaWRT{#9f+++P+%XsO?vZBgSd_ z&i+X#wl-(2rXujVC*Hul;-P&}k>OkG>|y@q3KLRrU0M=#7McH1R$R<+GnrEZWqVt^ z0xphAsHYEhR8u%LGChiG#@~!sMboMYN|5dCx;E4Yw%e__b-=3HS5T$NCbA9f%IQ77 z4}mAe7N@p^!2|fk#GIeQA_&ZC@%KJ}?#hq0t_uBwT6e+ngvf&EkfHnrb zyxUCE3N?1WfnjnieMB6YibWEEk!)$}gx^OmR@$wt%uYwr?>N{ay5n2b)xNCn0u??} zM{88mlD5uIoZ+u9^>FJOIBUzu$mr-B*u7^x6&1B_m*u^qR$+S->)7u9Ll{XYHS?^w zz72gm0ByURq-c(}auVk$A7QyZg~-1HrN4j>R`0{vvRBu!ycDs7UI(mf54kT9-qOJC zmFR;!AbU+?8QKRC?sEc9Hk1m{M7m9VXc9SJ+{w-B^$J^EEXw?GfDvcbOeu2U5~=gS zGrjuRR<-2yi|GbPsx%!uCRy5vicXtIv8M6(-yjcIG0|PYH*+_^26~Wqc7gW+^BWDS z=~8}E6D=&PHg(~DwSWiz;F)Io6_ftO5IFztJ zUvLp7+Cm17^}dKW7-W=Dtq}D6`>0rk4#v|B;i(S-x!abE+qSljxhxI3y!B*a?dB&Q z;I^iUzZSgn0bVvmIssxk?**6c%NJpSlPYTO7`s)>8+YstU!CHH0ZX!wu^m@{)yo_w zcVPVj>mDLzqdWf(PD=G;+iToaoIdo^ABJB?o&BT>ephR9AP|m#kKG&M~bWnAPZ0#s=o7Krb`UAy@O25Y;LoMOcpHdrFnVBVWiouZSqeFCJ8L=XQrir$BvF`K&kMw3bktqK|5G|j<;3Rt?GEfN zkaPNeQJe=%0qSVX2X z52NaPW)j6&=Y#KyPu}6ZU(;?)E=O;8J|bpak*;exXghFx#<%Yd7pIJ1)9x-i*mEgV zu(}tsp_UQmEWUSgndmmj^jC@Yo@A1I3mgF-&Zjh_t3)vyAU@Zl*Iq@`&*_}Fk(>76 z+M7OiwS@t6rD=HNFoe%MhbltK)_vj;W|%lIliA8jow7^?8^04v9w_*>4qH1icto|q zh7FbrdXsXO1N+{bzN3S4JvQ)C%f6yu)0oPCagcuPAik#mlqJR_(s8lj?r8H3R60p9SC9HGo3}Df`MZ1R#BLCmM(G zh5aD|7$8VdqXKBDsj7u>*?!r$pA^@A!4Y5iTpRhWPJ8ji>MoxJ(Lc^C(xJdnRFr9x z#`Z|ud#b-PIk^gyCQW6!ur~%tUnC<>nL~k}a?ROroUAj=rn#lZFr~LRzuAZKwpoQ^ zl_svw#LtUwx*&-Hjmi%?cjt@?TW#hO}Nc)>1&8vPrtv&%~NpI53gyEnxs~-<%4CoA# zQeFDez~{0$=!YjnLwwD;_1oZz{YeyJa1ujEw;#yy_mi+`)ANDkg5zwsEJmR9CK7e; z*ublKH`^e9ZMRL#d{i@8-Q`g+4|o4W*401{p4xU5o@F4G|c$(K)jF zZO_HapB)ZrEjHw$^zzDt*QB%XUDObrY~-{*sqOWLD=yDmJNHSShQ~kFJjwcn1LEFs zW!>&=#D^`Rb5SXWe3u z8PV=_?i;_u`Q+mXx|?JUFwY|O@0tq;ik!FRB#YJ|Jpq~EP^lS5OT)WT>wK(0C^M|t zyyR_YDj0-%>$es6+Wyb8AUWa7`68u2X$TH_N(6@>XOQ`1Jv)V`XoM{0vDC}bS9S_r zD5AoLc1Cmn>nB`4IZZAF3Yx9j-Y8GN!uIQW_2!Qs+c8~)8?=9|psfkImaP6&g}yz? zPJDh0z*9ya%dQ{J8F>*qWvhz|!(QqS-`x0msr;wI%-u z6jJ2Ufg_~7MACet+%_1JqW=zW{&r2zH~YXF##a;BDh#)Pb>$PABZj@aJl)du(fgUx z+Z0H(-d379bWd8H#eny>q{}V^&;RU+-l6@K01w$)IMX2s8-DVs82w?vt)uX*m@V8R zXu1hQ`>(^O*nf#c)0Z_)Crh^`=f`+0X_C=m``sSh*^M~6Mt%vcUvaceUmJ)zJafS5RTicNjIp}v z#{kQ>u-~)JlvxEFOYhUO(Nh!qHE^G`gTk^_aOfyC<@i1+&^xI(0}t9BMS$8?Lb)OVQU8yyY+O(z*%Op2 zKIGBEX9)riZiV#_fP!u-d%QWYw+dD)NYi(aD&<1d1~#ien*xF-9LE7nANe^$&_TmDPqt`?M-rcz52=vsZ}a&#ldJ{%z9pYzrII4I*2UOYZA9LT-g zh-ZL*7&iWwZYI64?IpNMr@ZHh@8yAqt5xPv>2b&NFr|%%W8M^d8UdMI?I2+Jo}3CU zV96z*HMAHn?GUnCEYR7rqJNq?S82ZT6Y9m37iKY62AdY3SDOHA?tpal?h#J{q<;Gf zUefR^@+Hu;U!x|be%bwTp%MDewZwQ9BF~1a^_=+vRs%hsDnUNoBy+1vD=kV!y=zY> z{PpSfiLTBI)5Ycc5gfQrZn+NLPKo?%h+nE-dfT;j>Vt$C=gEjL08QNW3WYco3C4Cc z@|14M_x6Q&5m8I}SLTTg9!X=n_c$QlQ<xXtV% zb-jnFWcLdHwswf~L1+lVQ2GZA{`w!ujt95j`=MX8Z6;k(*f*&L$5n0ebE26Ea_l*f z>%jG9`yFVwT{fD?wVEgr_T z6ClQ8x&UcTS@5j0zUzH~bJ8A()qi;bu2s<94W?5CV@w2>e3p7!W1+R_J*r67(Df`- z6ow>r>Fn7_K9SW%J#@jL`6^B2r=K^yKWM;S{bcR%SiQVV+bGVtY<7PJG+7gkWUMKXm?&C@!U7xWzp*l zE9k5H?tdr}HEwo2=5xp7o3<{)MicOaU8)=k!L8o2`?Ld&}grpS|uYNj?c z5-vefEaG>td<*nC6@n~x!<;90rClnJGt_kBp^f3e_Mpv!|Iz|EM!ceE_GELbfon}b{4-{L zRx*GCYU$W|Xrr9LRDCF>l-mlwLHa1C%k+mzRv@96TbaV5JC>$q|Kf}s}keLs?3B${y zCk5SlZ5`VYrfnkAWXAvQ*^; z7nWD1)n)5qHz*;Cu(8$+nA2%S0D@#EkmGNO{zd@o15WY*2*Urud5zx8(4 z)D%}ruVYcbXU-&nZQgSReKc=rH1WQ$J{IPIeA_AtZFKEFeU3YY!|JnpyXU;|y!U~V0mI2!w>6TWV-`sD_u|!1i(Od5y+VG$KrITX=HL;u4%@+d!5K4?gvNdkss=<0tGs-dlW&Q4$~I2-V-t!-bM`?XXcvr?5fIY7c~{+mUxIsQmvez6KH&Fpux5~(!xDYVT9Q;!vQC?kng8qV zr(UT=L;k-P7j*)U!bs@Uvrs@>eI!NT?O`?`HbEd@5G57W9j8*>>?(cK z15DJwPyQjqmQ}?F_P?k!g^JM--zOjW-S{Aa_UXa=%^zD0?s(?(d<16iFiN=QeR9Mg zIkmVFoFR?j;dJRaiB`a^3tZQJaDc%CmQJNB7dI9kS0f#~L zN|n3=xG3Y#!~sbmCZd;~ocl#w*7USz!wl0WXOQhpyLw6PjA8jy3pBhPdC>WRNFOMP zQ@Z*p3`Fv&j)3EZE3kn{8ll29oT;0Ej3VNUUTt6?qOg~kJv1Zd(XVK|+)Vd^FPj({ z@pwEMS8>2(mohHKOKyvte!9npu`i_LA=<^(xQ?X_fbD_M3oqD&hx%m&m*KsO1@;{% zrsM}HES%BTdMsCg6N^dU)L?9TU2r<4#FXiG&~$Bq1q1-#FEutbT`uNRi^|Llex11H zol^upx@ex}E17*3L@}ItH`7q$hb5|OD*Dbugl9dIw6|utUUFjMC-cG7!L1XYwq=zB zLG+qmtuuYY+uoQ|A*<&M%4vY0_$^_-AIl#mZxxWEv%^IuZ?|AyBZiWVPO)za#@d-4fJKk1%3W=ejeO3?;k%1QMR$MnO;9Hn*3K6FH{5yMCP>P zFV6#<=3NDW9zvHG@a6`e?-k^zg9Y?cJg~#+_eSPHc81s+HgM-(tEi>^qXF)caNK{R zVY+z0^q>10kcP*7DoHeO0mA9ymH<2{Yb2g%tHOKj%GYnN(O=R*7@3f$Zde2$t( zCZNStHs(tUroPYt0mz*S5_+eLX&3fi8G6nSpZNdDL`zBCH1qjDASog;Z0~%gkk>ak zY8rH_7vd`}-kBXQA;hr#)!ar3Z zb2bzEA@YTjNxNnDj#3s)iiA3y4ceL^6*3XM^{RjnF9*v?ufcE-#V93OyO0eE5($6h zt+$dvn4~maU+*3s#yeh8{PDwW!$n1cXa<0SF8jY@nN1O%Et zzMO%{V0}LZHmm*z5XU1G|C}`UHPwD(H;RqCIA{3&v(mz0?qLQiQDcEAfjpG;i)5xO zqq}RNfEQIbu6mvSq~sAr87sQu(cdUc%_aESMv^Si&G1%@vRj$WT$r}t&aJ*Lg9(tF zlgy&Pn!2yG&A#(x+X({$&j)%{UO9WeEZeWc4?KfT-s>=~s{JuoSK)(N5i^4w?SI%1 zw}~2^DO?AfVW>DZQIwK))Xxtkg(u+_7|_>xbtCa|)Yx!-h;MYC_Wwx7eD-f=WsgXV z9CV?wcSMk~V?Fi6p#4`vipWIr36~j|?{cVIBAs$Rec5>}?S;>^z!4Q0srdMG5a2ih zWMQF%tUF;TxPM!JhkrW60Uolr*}{vr!Ex@QuR<(q4x_bl;WCv#yEDSg$HT+NS4GR# zEa3<*DU$uwcD28dEJ6eNE^**!XF^?Cb}mT}eGlL%Nw9IJ5pDFV4Op4 z21#f7R?EOqwhJCLJpof<0^3s@XdCKiJe(V0ZxRzwN_k_;%ccP38Sm9fd?pt5!J1-%`DaBJ-+Fx!1#ruQtcBm=eexkF` z3vWgXn5OF+OIY}_Co!-9qU;^+@&IXR)Nhj&8KPaDGGDw0clT|Vu7=a4TzONQkIb;Q zK$>5Mn(do!dfj-(_%C{4a;Ns(B$DG2h_FEST)hyzy&JonR2%J;LsLv^t17|N3i4kI z#hpAYe&|{yC#EVvY<>Y^?qy)XU-9D)bNau;kHrLR-lV+`TH5W6f?B}P)l4>WfCuWh zbktYhe|!|}mSpVP=i^adiy^lMj}Ft>dUjlN22b-kJs|t2$xglKz7qE(N!M1U3G}MyP7sKrwoaGO0v@ zfX1Z(xUxEo0yMff)lt}_MD`)a4Owfu1Mq(*oH0`ply}0{YWr`k$5_1Tvy&YE`0suI zyKeRgKeo|0Gqd96&_caf1A_#)>ZguF{FyAF?z^aWXFQOL?uyE7KCFaHbY}Ra9M|`z zddHY0mjQ->f#T(IKH19EoR+}}OIc^fgNGfBO-)T-zy4;Up0%$}OG_JzqGFc#0c4~E zaW;(o7ff_CT%z0Xgs`99@pIWdW|1lXUGI5{K0r^4!h;^jXRNV)oE&`-BJQ5W1Pao6 znyu@#HHA=GWFa* z`KXVFC#L|r<<`O; zSI@(bPuC?FCu?&y$L;vgzY5fM4>(1PUh8fd?msp9O#DTc{jjKl@y74OKI|ZHVh^Uc z#1m19w0tC!WZt_BXQP&E++I_YGBWw`I$mlMxA{D+h1ZMQdO|>c^xh|iJI}%}7u{yq zyaW)pJi%Qbpi?YQOWS+x&}8_q6OdDYBSvmR&wuUQPyf+F3D49VCq&1LTMcizV;vu6 z&MDreLZYm!R6rV559`(7%LjqTmzHIL7>QCfxuD2n2J( zswSdm?L~6@nnD(JEanaW8FesdH1WAwDfeT= zOv8<1C7Z~}T&l0_py0G=>ZLCq}%loP+msa&jNmL39XTW3uz$`&C0PZ8Tfyw&F|7t zQ8MYKS4duX(xzM~wlBOn<{?B4?~eWE?c|N;b^Dd8<3`EQS5;SWoOqTo6N-jlL#0ia z`kn=d7)g(V$Q{tU$aJL@=Lz=lQob|QV{9hPsr-yG<*i6m=L z5wmN(Tz)aUiaTl-)2>82pqj_{oe=l`Uy~h<`647AGsUut%aWg-dy<&>JfO5l4_NX_ zem-NB;&65J9&t|^v6(aij;1TyTJ(l_Rb&=^WmN_siG@gs@5;S;FAREly4b=pFNs!m zszT6Upsas~gHKE}U2TVKy#JkGbaxY?J@_XaY`ri$O&3}8%%!pbtjgVAUcHfF;s_kc zvj_WN!n`qYDnxqKHS>Zi^N!-bl8p%t#~NSwS}zYiphS`17j#LNBp??prUiO0+9`7Y zh=X1_0VEdUP|c*yFs!n+gveHm-?|cb?&a9;NAEg)L9G}oHV2W}~>H2;Cv zx;%H9JBC2D?y07|a?l%f0*6-Lr$RB|(_vU2ew4Ap92yhph^A8Eg#`4r=0`gN-(Pc| zxPLzN=*;NH{thwTTNM!hwusL2*>>gQ6IL{2$(@V-MS#y|CNjOEthL1U9vz8t4BnV< z`L~2h0%qSf(`#E!-^o&ELp$aV++0lB)$|+6+yt!7jXX4TNtaAAJ*3GQ zWb*gbv-$)CW(FOfQJ zj9jEs-`m++YZsr=P#`F0Q85La-$3ON8+-ot3dq6O1C9E_ErkZ+tcK+$3euqy%W_OGOS1B`F69v7vJM?#dAZmbeF@ zTvjI#?k}t22fNf>zdrn@{_Mkn?Fzk9W^~qB5wd$=V2Jl)b4o+@YDDJdGb2sNWvr8- zVJ44yW^w*(C_b7@q4kZp`UXGu~8lKED<13sN9MJAo*5q7DK& z409qb|9}Eljpo?^6LO8P=Y5Nr537ChWwrct%^aI-6z`BK$At25o5rzx5tvrvh|dvB zAK|K`)b~19gC=dfL|2d1jSXm6v;oG5pj017Cecj%9tj$=rtd>rl3Z)lGG`uh0H60y zpUPswS`gT}JnK775O&SWlm7(kopldHe5a)e$_@l7ulYI8=eSa`$i1&(!h0!hS$5do z%vhuB_U1lrM0ix$zqM+OS0JVc4zb$H<%M06p7?OoT^i#(Y)EhpZ?W6@&L5Qz5-oS| z8(MJUy!+pbt39-!R_$+0rTkk@>S2|SO208u{X2${3X!D<@7i=)Zujd*!~U)-FL7f^ z8^(z^I@m;G{7Y4ybX;BgdTLi%8c^t!UcCl7rSoRKLA)1s{@y7`oH|yJDetrYN^@5nEDI zQGr??vs+V#{@sCEcpo!Gx;cp;h%GShX+%W)tgRw*BUSIcD(Bxx50735(oHsV zLA0SY+Nzt{W{Mt*Z-p^oev`LM{>VwFnG-y4Zx#S#X>(~H;-H2XEk+5On1YSbT>=v> z8V7)cQ1;^zQNhH=`aCKbdEM5+C9!@i!?u5|z4QLpvl;9qJ`QI9{KFxVt&O`&!y!v^uQ=rxFigU@j?dp1V*9Ry$j$n>Zafs2 z+t`#J7ejkpR!*^VEr^T49VFy8Tw}PeK+RBlzA!ONJ|~~b_N`HVXQcI)6*4jxbw+YlbYsER{&H`^t!|2<+8zF+ zJY!2)O~lyG_@%sy>PF}@qFxj$WsZ06FuNNSN_eNcmFY_Zo}2nPTTVi9*Uv(7lta~*0^>W(PZL;cJXktp0rbwE7uZ&IuJM}*ZnkV7uuwtO2jX$ zY{JFhZX}1Xk^9GR%2zhCCqck*ft$UwIch;VjT7X66f5XoML3S%2r|r0nl*Ss-7G#S zkvwp(+%4aSk@h|;-6&?i!qMLLNK23$7{xuBgd(hsz6z(o6*1S0m-`a-d1jYARN3mV z0|vKxCCZ^#)*@ugo3(X6Y|OfH?DpB`UtM4e8sz6UkG85ZMUD2V&E|&F|LsQuN!Z-axP?R-lWf#4T1U+b=kon5#Mou}k4Zm^|k^;x)$+Z~HS8u9Wn~ zy(?IUf8L!SnI?~;UZhCF;0Tp}_EJ*XkP~Kq*=wK|RpYoW(?~bkt;(U3_yKE6SF;|z z27W1e(G=9%TX!KL4*tcoPD!jjPWD{XfZA*6`?KWmI>F_i2HpA6ZM^CaU(Yd{H!OU4 zUf|x@@sH6E>K8zy!LsIl7kEdYDjZU8Ik4Kq=X6%M`Hqy$!;JBp1&ERa|GDYKC%VRS z>@i#I*T?e8CC)z}l%6}e+b4|iXg|Mbc-Xd(6~+)BB&)N{y#JH4F!rvbL(L{UxgFic zL%4vI`Wzq6;!gPFd~7&@ZeWDp6bu55+-!R73yC+x7#ztpX@AM2O(utuR}AuxSqr;1 zg)XB3=R@hes|IzNyYle~2_GtOKBfR~gOe#@vuHSd*%Sj=BS|A6EepI0daz$VM~byu z&mAeX+_=yB`j8m_`IHVG z^-HG|cWJ4TpH0+`SyMLOW;4IumAaQ>`9{mMPNO&%3o-o@6+;@O(29-kT*tdlmD8L% z!Sbtl)7zoEH&97SJXHR7Q%+-%;pKsJhlpS7#Z%@CRNKxq8%62<7!W(FEqlO)`$YY7 zp5Bvv%)8Ii*a^G0_-w^*@4dt>R|z=|CaNFfgSH=?%|sU3q$Q2+%QCZxQh60Vli@tp zAR1%qse8#(?rCZ9vw>k<#zRSUc==;oqZA{ zB4z34GYv|+7x?l7A3}hC=8KGF$YxbHQBqCAMj?HDrTS&?zL7d3_WZr}P$O<9R060? zZ-VVG%s=M_UYHs!Ij=3=7u(?-es2QYR1wu}zP%pG{4`88L%iw?mZ@%~qeB5Y-JU2y zuiHv9j;~3T+Qa?zr)Kx;_LrbuJ{Lrg{a@6B=zs-$jqIfngjMwfz7+>*YIB z{n1$9YLA+^GEFX){47nThK2p!Sf*GYi~ZSGK45X%ol(M-jhD9qxfOLK9)|tODh){; zT8j&J;DTKZO`|z!_a4iBFiaNZD*Cgzn(@wOX561)3j0z#V^ zLZcKjm-+rSbyBHXClt(yfqXOhPVO#0DtvskJBlgrVx_vedU4S>$L=m5Cttwc*ll@{ zu-W{AR#S9=_lK*_s&YTlRRo^TN#Xsv5ww6^KK(F+c$6+9>NVNuZ_;~Ki#ZB_w;>!` z_Unhum~E;0U^P~;5%pJgwr@%w4VglI*Ez);fIW(f?@NPjIDA5_fvGnO&#pGsfM6)V zG)UTYW=eVhuP{(Ptr`}}^-A6AuTGF?{5$lYhxH&Ay<&fUE}xK=4QRvTNq_OF^L-w8 zyO!WC=|Fr30Su4{4T&yAnU8gvWO|iM%nUX0`aQdF;!R`Vx1N;S^2+uHCS%^Pvv~2n z0i6(=ks##MH{;=9Hd?9={nmGG0wnFt39(ekh|>2SW+>x@$xb|PP!@==wqi$VAR?p? zM|$KY7niG>XFq!a7&JLE!^z3{f)fRdTLj`NnTK%C7CKtmd;UHH_<6KCbcTW@2VGHXSneL? z0RpK_0G_Bn+@B*piZDr$ei=j~k(|&bujvG@Wp4*{#a#Lz3)@lzeEy=F6V4Z4n`PMH zuW0H&V+Is@73YKw#x4=8rokn{6h3KIrMS$N)eYypBZ$mplH(jxqqV8)O|$5IJfo&4RkCd=LXH6~+xyPH+Tafivt{ppxxPI>C! z=I=LwlRP6{!f5t*LOnY>?_G6fZ+Ui7mhEw+G^z3LkPY^?>yy#?D?wJFWIGZrVUTv) z1%ybLR8hkV%vK-1YT$Z>0#hs2fnCfpMI1`XY||-#CHQy~V%cTqg!-B#x#ox|wVEza z4R98AC|<02%OrF$9Dcz6o8E7d1e|H_UR%k?RHQY=S$?45nVu$7mQk2yXpp>x(uBy% zsp#C2Q;*Yn?~!nFiZlHQz`&fq2$reIl&M!usSGk%aYAhE!^YIZ@nrJ)!~pVTyJLKL z5Kz^C-rC-tNz`j=W)0|+L7d)9k-Qv8W<(x#SR}Yc)y9op#!3SB_1wy4Z7XU%x`f`D znK$yzN}l-Hb<7(J^?1?|#l`jNNVoY}9gE@kZz>AJ2#R_2$Is$4bt5u-QC^YvG=|Uf8ROd zJ7=7G?q7Ed|A38St+n6v&N-j?%=tdkCMwL8Q5XZ?Q_sqwO%kd z`m)`cM;`i$kG+oWOOFv--Qv+_d=_lN&@Ph-3>~o>AQi5dWpW}|?RW%>*&`~Hc+X9T zg+RW;E*V8dnO((lGhRPNN=6kcZU{>LZ-qS(TK>z^Vo^COUZ!n)E;1@?H#)= zWXYvwylG^jE~wdHpT@=~+&hbtBw*7AbQ%8`G-7)S7UX{@-M_LIN^@{iFA2Py=BSom zM|Oj0mkJ?(NWM&U)5a#(+k9U8wZW*F{=2zZkcUU(qzzl&@DUmsTUZo~1O|Nj3lE9; z-F^eVNKxXxNgBMpy%izy>y*w~P0QH$xVyKVwmj3e9=;jYsIY^eTXcJ$Z3{V;Ztn`h z+e0NgkBBr=NLW8=Fov#8^5uBDt=opiGkcxhM)TyCA)KdspJ~BT87}nnChB5hVoqA< zhdYA&okLqn9jbrxpA4qDtc%`YcHbA`1Ny`37*zOF{z@E5nD=_CGITUFk`~LJ;o{W! z`zo(vHH!myXV9s7dutD9|C&UU;8}^3GM(tIXLzy^{YRq8^Do8ow=hyt6<1jXuXUA! z5um<=FSZfXpzEenfmV4I#+Q~dUFozK(L-8PkcD@>3~#;RNDmcFVWg)=KWDsq_@ZTs&YvKOsf$@ijuzTP$;jwSSboWG;tLEfg}1 zF(25QhP$G*eVyA`D30~<;}|_6f8NG~_4LJ{G=@oB^pK)mG0h|()%JLljC=hfpzmj3 zX!sXe2M99jcg|iFdt54+ELe_*M$4$_4Y9ieVhGLWjx>|MLTM0t9?(L=PiYIXowtMA z`wib}@WTc%DITn5h-7kh=5S88HGMSsP2<*5z6BbdY#|~fSJn|h^e$W0-+jk)gMPsT zZ@Bx?gtHaOFI@UZ=jaHbA6o(0D%Q!dT8f^Td$AS9B3w_>QAoZHp!5Y{oKP-t` z=C&mXx0@J?dqDWbe9LIAA-8-}#*;rT{uE1;1Q3JBS5}NsUWy0K=it6fX@mQ;q#0}M zemq)nPt_+5&!L;weThsA^DoVJx+UFDRukH1hF**-f@~mF!NBCL^QtCCiSYhohZh@7 z*GqgR13!P`hbUyVKJF0I*tb`!racaxV~JcQ%x>k}spqu`ghCR%lY87%(yvdY7XY8t zUNM=LT<rhYD8ApgN;A7+ z65L}S8yj0Y3Eq1PEY#G2uW%+RZ_E9LCPV$_ZNcaX`+Lh4b=|{N=laYZt8WK@pG)q)cs}yWBK(yg3kb1_9we3-S#q$ z*3rS+NG~=~xDuXdy6jKpcpWr=`d66JDDH{7S2Dl-pBZ!cfyP1HDYIY}HEC(-o#vb2Trx1+&EZKBD=P(`-E{>hT(ACw1at=1c(?4t0)wj`S~LvL(X!1|TG_B= zHjh|^G+}FN`kBkMfTkl%?6;qvAFj7?snBn`@lgRf(k34YN+FB0u~d1Z`P@=XiN!NV(RI+DF=7F*U;lHBkjTppwGv1s z%~qQl{LKfO8)##^fQ*{W65Mc;A_DBI+O^LnF2G@Vr-Q1!@v0pdWTU;R3NcMi<@6l1 zyiOn`NgH1WyfHNM1%bC`Dg_9YrfbP_(U;LZ{nwj>kN@OT8cc+VIe-nP^>3DI76bfL z#CmOCpY*~W&LwfsV{H}P8A+|_(m%qmyXh{N3UmgDY_sA+mlSoB_D?{0mF7rnPHlRU z*eGeeM)mC{Ef#8nA>cxt)bsIscTjtp&N^#${NC)TXvyzClm(@w@^W%O?gON+09V7i zx_s!u#lay-_kf0oP%#^}3jTY~NRZtCByY76f{$%qAC}Gw?2vdmTH5TM;bK>h@2U31 zGa83y)mk$VxFNpUfGo!gTJL+#cB|d#9{~Z#QiqVDO^;+y?1;w$~zZD)5eql7>wUM&GL z+D)q2k=b;eu+2N+HUC%f~wF;{~YVd&rD zDy%MLSgrSfrns-Og(eaIn2z4nMOgGZ6IbVT$kp@_uRan;Q`{)vhMU3{%~u`8dAbUE z^yfHNRq=&!C-fk@0$-a^s>4T=KdvA7FLs10f``}u#e2pqof7q9v}L`AApbRn zGp{&LsnCgIc*oM(yhrC**KlqVtRm@lG=@oz;rfP$rIpc~ZSL3o;%^_R7umljf#$Wn zXTbYbP}c{xok0$>*A5s?OzlKKf6)kQy(=FFTa^N=>{a=fD;#%qIZ|qH(zrzCFAnLu z>**ZBGa$MC)Or$u`-Jh}3WpR(1gS1$SmFz#dB%X=3%30&T`X{5ypK-12uH-_PB1E66g zHv5k!z-9*m>(1+TK#v8Gx=b%b2Lqw&T~c7lOE2#4g+)bw8M(jj%-O3xHHG_*Vt;?XNw;)F zB_Lwa#+5ha(a-;*x(VEs4i{arpbQCx9Q$4ZJrKj?o|?u+xqHT(1jyb>lz~{X-{J;E zpk+S)y#Q0f?gj=8=xIHxlrl=7DWXGyMZyQ>IQO~YPGP1 zsRNyA=!d5MemAK0FDM9t*jl&|69#aI;!Q4VoZg3F5uk_H?B-_6x_&yX#%%u?Hd1lz z2mYWxDF*KLf^alA#~e^x`_H=}G(b`8imzAJclNopC?hXCdLIk2?}w z%r9yzQ8sulsmbS&Y)9~|{%#=B`(COaBpD{+0-oU=QVQt-{xpJ5;y2DKK(hJ5Q* z))ee&F`?kMu()hg{;TYlTk2}M20@tE(Mj<@1IT0wsbBUCV!PljIjXI@CB zV}^p>bzdtvuYZ4CIKN%w3H0=27pE-is7z)8?)jZcwAW zBDj#x<6h4)-pFCnv1u`z=Pdpwi3Fm6O%31!b^{+CH zFimfffu9G0F=1iD;d4bl|2xw)I%P14AtLGEQ2v-=bv(K*46+k2kKKk)6mpg7G~uO| zMRmMY(csUMX+N86NYbFo-bDV;@a}Z!a_l6LY7%w|PyB;BWxzJ;&(+e5d_A1e#nfD) zf=0W4H<+z&#{1f*J0xQ=1THs9}T zt*n5FlZP7ts~y2L(1!j^3xPV|ya~}_BkE@D`0l~1y|u`(4qcM8Ync^O-V3 z8ENUG`hAvkOX0j=O}g7qmRotPy6Ij0;Hz->eX(dj39T|}Zs%$xoUzR9bS)U{Q(IKT z#Konyqp1+gtu|S`d9L@GhsUmOe0Y#vF4`3uLw~cre~C84;f~0!XKCwhu=pj&ByD=Q zO7KH3x-1?djhqzRe#3YEe9TLmlKN8tCKY8sCo22~mHZ0urG7|!57Z=b_Y$MSwzDlJ zZ3_|dn?h%Ijh^R~V>2tICA}5*0f#-$-?6lUc^8eUYby1oS*cX*o2aCu`Y+~YNVrU@ zDV*xGqY|kPT^fK124Yg{PZ3uB+P}tD>+y936Y2Bk)J|0T?f(|>9^t4C!w;7m>VS-I z=bw8+S*?D-ej9Zcn+qm&A%fn2Dm(p_XoCg9%>@mY&RJ_#^%$AXR0I#vK`4B)t`r5h zeDb7)8R#|Uk$?z!X!`bp05IcWdOv%7Nxw|pEHMt?%YmI_Q6&u|a^KfqGcwv23jJen z0>Nc^AM84W@F!r~ZVEC{=|f6b3rilQ>op<2JP$F4bE&gR^XhNFB< zj=Gc&3y-L)J?t=IlY0FC=jWm1=oe#t_i_H-(sSg)W>;hCFVP0eW; zA>Unu@HlkWwRG!SUU|Dti~$V#`=aC*oZR2gnoHlA0E?5~WT5DYp~40vH(!GQ!*+VB z-OZ{A;OcRCoJfgh6 z`}#k8F7D@pcp=aUYesoHo7`f*efUL-U5dc*?i zg_Izv55mfSCVTIS3d2b>^TW7A*ZkH2r%EpZ?75EI6beYIks&(5O8O-P^u8Be?5j{K zqgeu#m?QiT0*|?X_7p2EEv*?rsIZ8LU)ELAv>8WKy>7N*{-nhtWPKzqdqvc~e8B2} zpud@%S8Z-K>aa(XFgvO7eeJn+kP9ZuR(qmDEJ9>)ctTYF1mC81OGxBL$3D&kN4+=f zeR9Z>Q?1Wt1YHWn(Eha}z&)Cgq%&kh_4{zrCv3Xq2Fs3jhY`ojtOJXWf`Zbcs1kX73?h&?k3_qyBp zXbD8Pb$OYuvR|7rd~~Wb$;0NvME?gvQD2xwx=eoLBMnO(_d~(S0{5wS7N9r86wT-U z*IS45=L2Lfi-l_FqaLF!Ra^0_r7i%04J#sWBYV+_R)BsE`4FXMHlFx6yzFE|Q24Xs zSs?M%!Fv#W=0);+tH~9KCRygLXc+C6s{V-t9#XVW`wuueMqYkk6}Q0qKYIeXFG#?+ zD_<(VpW8;n^-d7q08Qv9IIvF`2>%0?$Q=jR7d|0628P|fJ<+J3UN8cHZs;4eDd0cY z<#0&9swl*S9SCjG3(QKQ+tnx<<4Q4R4 zrMD47WNA~yxmI(P`6VSK(a*~(Dvle^hgK$nv(Tc2Ec{d8=z+nRfzY$o-KaFVSrR>m zWJRg|@5%O@bsCX)4#FGTDjdbCih?YZxkl5*G=ki(sqrs?gUwh#=>T7k2dIy}0(v!5 zuCRfc^|;f&Dgu951%EUg!urTJtK22!fJWz4 zyb>D;g669Ro6iO%f=ip1g#;JY-IZT@sS@6ru_e$Dz<|^&IyyT0ISdHCiuo~eHvk3) zra1vz;g|QVR__dNBm@8^j^+_IL=ZMAJ2L(SW7C7C`W19do~MRKe;;D&jE@P3N0T#NJ&aYPBdQw z8LZ-Tq4Mi0&0-)w#G#@ADkqQm4X^?TEynOf6h|S!Sooj)^P|CW-65t5!0$^zpNOu0 z=>_XNd)ydU{7kNGVLq?}xF*rnbQeP!Y!ElrZ>);}n06;DEbOLBFG7rky8F==4b-G7 zkG^}?XqG-{k@s% zS<>dmJ<4+?=FsAsqEnTX=|jW6WP|~>xY|yk+2kau(~fMrThNugjotTL{G<0pTx8LL zzXIKx+sq;TZ6T5LR);TXa_`=;6$Ni{NNb8ywmx4Gmg+wntJ-avlw}sAmA#xSpm>qO z;8X@w!m|)NAS1W6F%R=}e!-!A zV2HbP@@qy;mSVL50qU{ftg8ZZCxCL7Zj|1U224~69q5%%A{xBQlH7TT3tR+O#Lvv^ zY6yP!r&m13x<Ur9H< zHL!^Ne#e9Xeg;~`9p%;HfCCqo{pkKexWySi)<9m&s)d&0rsJCm6rpJr9 zG%W5qJ$bAx{$=d2zWT&k z6pC@e55h%ZCM_Dr$KGK!`gWL=m4&V&qE7o?i)&GHG)ezXMc}L0p>ypaJYJH5owFV% z*`X1tqoSkKKP$=)@|BjOHILyx(O|IeQro@5;Od3H1Y(XzJgzFLzcBCf6@>Rd*{9e4W|fvN ziQ{az6Hf_I@p0}2tNNd%dJ+2|+-Ibu49wpo!)Pwdh3V^oIH86J zTz2o3WyQ6!VQBxp5P-FA8VYg4f zY3tTtQfd8Ofe-W!Lr3zb)lwr5Gqd%dNGcJcXRqM*%W$(q7A7Wqz>P96GFE67b1gbW zKUP@*#U>S%NkHo1kwHsLNRRg+K zOa~L40Y@lwz2NxxK!C8Vw3pXIflA@yZ3SUB>xc6-$Fmg@J0k5y{c+5tO;>ZHnG%oW z!ruNqLhELG!@{q&lZeP#U}c2^{DBuH*))EBbln5tHo3gxA)y-76@vH(LqOaOjbIQi z#o3*CT2f@Ds)Aq4oGhJOGSDsZ+rq}iL<69P+I#jdr3;c`j5X5aMP$g)5WmdLY5s#k z0DRoB5#fi82B$^_F*CW2^|?KKAqTLA1QQDmW@}pL{Wc!V>MGZmMvgWuCO6__gH@QU z$9{oU+~NacoH6a>3{}VbE<&1h=nBz2naeh*=BezYfaC*BYCFzetOdXV8aa&`i9fu9`e%2#J~iTm(&3 zEcFv!pzwhk@(OUaJ1Z*?8b$4jy0!O#y=xb^vF|v~_ZFB^_Rg9ShtC5do$oE;VRnr_ zP7x~?P124nGL6|1qksei)}MtESq~8Y0slH}|xRdz0ZtX2xdE6r-@| z7I;j@;kgo9`C3nOco>c^C~^v~Iw0AaNP#hLAC0c3haU8+MpN+cPI!*kU6K`3hbsRv zMw*!nxS15xEBnevTsRoWHGZ^hM6`o|*D!iYro{)*{pPq==m<$GH5}B67r0zo;0o(l zP8g}t5^ZkbRR^T(R9IO1&qoeU&-rl?t;u2DYbs^Z_3_d^vOj*Sd zixDd5*a?wm{_5GxI^o|X5S1SqH#{R^&9_F`z61(2kIXVof4N|qJh1&GEW*j@cL0|| zgZT5|hr$~;wfzclH_{W8d#(cQVnn-}pEd#T0uQzKhBo)|?PxK5t>|?Tn~0}uxA{n% zx!AtqndaV=#oLD&u{E6zWFTy?V59YXHF`eXM9-qlb#F9XwquiO;Upl2we4YPVF8?) z&T?!53cO@JQrjLtspHqLcHoFj7GP)rfw$mqpdXB8D-VZ!ARaI#rJ7S6q0o;{vIIhB z2tp_PxKDiPd`hRsZ~pLV?GKvmJFGD8fRX;_ z@4orP=|M$9KLDp6`}8ee3g5884K!qPJ2=0`gqxNfdVJwdLPFvyd+YsNqNg7T7adm` zyY;*%0_VaU@`5Vs&?m;0YxOE4mGKi9ApU}h5ZxzURN~iNyYkuhAr6y&9L(i zz@^jQ5ySAFQFQyz@R}}{a3h1nVmcccA~t35>Yo7SLC*$!Ye`As`yY9$xWME};A@lt z)t*SuT-?WwnTXC3Yjn`(=Z{0y0u+z|E>+>?^OnTulO%wU1wML72H1c|A+*`-=j|sa z%VlKH%#+760YDqxW7zZ}zWEW80vTR^|M$x2U#XtggYX@IxR%6ayTvWHBU0bBehS3H zS&OPCI+Cu|OO3ZXqnV)3hF;civO?D?D7DiUB5tRhk#y1EfRc;`3k!>@qeVt1g2hcc zZs4c>@}iMIC(Isu&+8*=D=Vzs!b%_HIYA<|v+Q@aOI|l8tDgxTb)l(fgo$C9z}(yW z9Qm}POOHU7EG^*NHPEjM%*761$+9eV-?Eh-gkFR`{Xup!uT^0Jj0?AXyd&(o1>SUj z)F7fZsxvt`iAlub)UYT*G{&r3>blwaShYa13F-| ztDWDTi^SWJysVqk`@Zdp=S-a&w$aLJ0jTUfe!RE07r?U07SJ-ssDbyMX)qpGP@v$4 z32b_uR*VO9luyL!F!FiSW z32NIWyUHTd0rh@gz*NGY89~p`E%{VceI6<&$s;YF-J6m)-t{PnOyf(FDQSID)i398 z=dR;r;H0MGV*JG5^l7y<@!D$6UD}w;Qz=e-M^hx^dP3!ZPutqw?R{)yVnR|{EDce8 zj)K8n9}d|&4KfO$?7i&$wAkoH)+a*k+Dex&oqM_8BwP!LrPSLIo_maaeex5W?(a!T z_tOl?5({3DLkLcHc070_oNwD+EBQyG)bVPhIaj?ECG{3Mv}-*9BmQ3^nBZ%{X!2b% z@>2AcZ#}IM3LCAeAbh!j)8c+Tt@`3POk6o}N}CmH_U#3sa@&a@+G>|;Q`@FX4UOkS zzM!rzMZ<&aTZ6>$>4f#a6%6-XnuWI8;$5D)U9QM#B#5Xz7fCL*g09u5#%gJlMy$KN zE_U0hVDB;MMAuRKYFAwjXVU#$-7bUTn+ba88?a}GxxoKSnBAc}4(&nbn>DKpX3w0j zJZqGKcCDc?b^$Ewt1y}2**j?C3A02P9n zS57zNbJzCCLyAKWl_Utha{j+WRJBfUFU$1K(How$hHuA`H?~J_PpI)%eP*AuGG;b( zUmr*5ArEbJRdqw;}A}=EPTIu<3y3$35{I5< z&SCgrmnnOQO)?joW`{mw%VsVCW^FM(ALUQlzH>=H{8eRBV z=vYIKNfw5V`q$)HUih(${8F=3??46?r&bfRm}ET7lVLxnl!BPX$+)d!+D51TWU(Ve z7|AK735uk>xP>z8W#=`pKy*Y}96x=iD{!1pS87C9(#WbbAqytgW&PjkVPCnmFi?Oj z(Y^7lCIr_-I`{cL-xRMwO% z*lfyrEhF4W7@+X8%3Jfc79U5x#wXdH8}!64hrDoF*;t_3qPrNC2ZS}`<~5ybs9Id; z*2(>G!gZMZY=V~~@Osc~3zk#`Xjo1($mph37f&Ek-(%`%qT5$P3kiMt!Wyo-hqvP~ zbv~Mv1KYh~k zmYBx;{C{P{o^{!tDzpKGmlAJJY|*2zA9Idq7h+=m2Qsc|c1q58GyKlIEKs}d^-$fy zvNEyM7~TCfk4CWDwjR(&p@=QGl`)8@rLP6r`?lTyu^3stW9+EQDjg#IQL0(qFoD>S zQIV_6XENp4LAkongt2{ER{pd0CJ%OD_x)^`u%x8P{z!}K=+3KoWHLZ~@yfPeDMVK` zZ?3u5%5qh?>CQw~>1Y1o-8X~S385w;CD+Eegfu+X?VsaxdkcFo67K*O#yntj(-x6M zmBQ&{U{BK~-}T#|7J@2h$=G+|gk!xH&ongDZ|>n`-%E%n4ci;9ZF~b9rP3~MK@+dB zU;=(X^OalZEeB~@ybtV}Ikw;!lh84o^j9d}3DqmNp%x}QEn*Q#bt1UKP1nLpEA00= zYZil?k%?2+4|vXrx;=+nlZYQoZ_w&gr{BRMi&>#g(9xJtNAVC7$+gY(6au)$jsE3t zxTmpKqH{d(6&Fqodw87XW?R(2g+)~}hghPQ_m^RG;1{G-p}fT1@@eH);gg|p6`A9l zsr8GNea@}}KRG_^l>5mPHyNFW_iGzwD&FU$3Pt@)EzrkKVg~K57BJ)U*6M;3ES;^7 zH{ucxX-O^FUp-V&YA!FN2LfM@JLuF`WD+)|RHBN&?e&qX9S%guX4u{S(D0}ki@cVNF)bY&?4co~d9sB_E9cOb|pCmt>%rt)wB%4m5i<&qf zgZ!(D{Ak^7f*=clrzTT9JZ{Fbx;Z_18#iT!{QJzT^QE6cS4w~M>~Cruf9s=`mL_|y zua%hMyp<@^Ow#9LZ)BG%6aGi)7aGdMqnf(8}C(F&Sz-51QS?Jy4l6#@%NX6ibpF@6NrG8s-55b(E{fgayF z67olLdv!kSuLqVeb4WIpnSEF*v4ul>rBeE=3vIuHPv;kq(+i*A1nCR2sMVyU)5I70 zo?bjqh%<4k$?igk2H|~tTcZiZ$Tkeip2IB$4iqy#74|eg#+uTEwC%Yln28Nw)9rAZ8cY{l<(OkV*ov0QS zUAM46jqdfDkFdtZm4hc~$?JAjp(T%gIAbNCUiY~AkZ>^ev`8|HzRnLZ=(2{d^G-dI zF~jT7Y)C_=9_D`=+L8&>JYg4lV#mKu;`YLATN+$XI?@)*zYcMdu@L_Hn*6exAwrHp z8sr<+8zFE9rK|C}a_$P9CJI@x9osx7yMO3%PrJ48JiTm?FwRSuYueuux_np=7F8V_ zcrJ|eKoG^a5wNaKxU$fYwC%-2zwt?MFT4W0c(pr*!gxgkby;e>cg%pJk-q?XSHq=0 z%4QHJbs^ckYdovw#q2cQ<+b%V9M2s-_PORuJvK<^6Zx~byC|lOyb9YfTVVX%1qekb7 z@^X*MJ=)I*^j|j^^1mrqmmwr4Q4eS`y_3$B!LKaBHMDSvWAfOAmIJ4F8{c>uh~wuP zT%GikdKv7Ue?2&fPoqHfT_=!hDYjbAwG0kVc1jU^(eYb5DJ!~Xe{3#DAfLVBu1&~t z7#Wmt>XGg;-sfyu0=+q%>c*Dbzy5Pkx|CdL;eAQ%&a-w?;ez6^>-1vEpe#8pYr?96 zdo!P5^x;0#QN{Y+tWAcN4o%K|wT`6A{L{ldBhbNvrlWYoX`VW0Ou+2ARb`WA$hfFi z+}BVTTD`rnun-j$Wrq(`=g_6JO>|RpvK~xHxhkDat*ZC79ZbQ7a?2&D^t#_eE^US2 zM?TOrU60H3edkf{?SqQb{6Mt|PdIc{7X%uzpJ)0ohS@8iswyZZIvLVP0gM94{zfXSblT6}Y%Z;2~5Ww2v-ZY$joL9Jv= z#Q|XqqM5Et5zFO}LBBe1S|4QN(*1hNJQF5CCJnrIxzzI{zH&96^V4b9SA@Hh$xU~m zkl3gn?4j%dELe}Y76kgmnCRrc*EOtq*myNW^$lN1t$wZlbw@y5M}`Y!e(qiW?J&IG zzQZy!M;pZ@6b1BNJb&%kdZvAj|D9R{ORu01eD?bML2tS0zGfJ5G7_0*eJKfCQgmSf zQo8Voo0Cp#RIz5eVzBCNZ4@4N+3 zjV_{h3J`U+^(l_D;>+vl+28;cO)ub;rg?OFM=Uqr7he;xl>C{xV@C7gj)iW=)9KMV zX*>dZ3o+x59!7IPxzkOj)z<2X3!kbC8w}PAal*dOeP?gJtH|Cj*bp=P?p1yBr_Mj- z^|h<~5?<^zorQH`{UT0ii%zYTnO2aCOug3C<#Z z)jZFR_%fIGfs!7JR^x+YHA`Ft`Mo})XGhvGm{i5GHD#{a;L?T;^Sh3-cE!OtZD{{{ z3O(-2Q+pDgmLzAm>$zHE*-Ua0`qY_quwpK3S)ikV=U-h83mJ7+ zQwAN`H8LUC1s8t$B$bypKd5_#e`8F4OcdPzk`ZIJht{HCb$8OAOx4t#g$**ghBTGsZQ11s0XBP&JhX0H#^>-81w+YET+F3Q^beU}( zz2?9Blf_hK3jbcpwdC}uoecQ!l#As* zf5~{vg8zODpQ;WXE)HR$LBCF06PK3u>J_c<_o(u!I!M*>x08@3brMO{r zV^eT)yw50zg~{lesN2a--n$<1wV5o~SvDUj zPCruxZqwAzC(l@JLlK>MxI;7LQdIPCV_wKdIZbl~-1Z_O7Zl*Vk7zfLU#@dsKvuv1 zDqQmT;Bcnc5({C;kf+jf8qbn-ojDV&kdNmy$`&KXVZJ+Ue;(qPzoZsX$HONWKu|ND zV*BE_jEsR#U1!^DHILZZq8oQzCI5BX^4Q-O^y6tqi<|oZpNuZ#zW;d`+I!keWpgfp zn+?5gqfTuv3P*$eV9a=kS`PvhK0;(ycG%TKH zMRaiWyauD^5_UYV-!Y}YPg#%}j^p+o41|U5iVk<= zmwn34(vC|XSdK;o40Ri~#&X~G)UZITcdIp7-gA?Dsr^zjm<)6&G&|s)6!*W~LYn9D zx*9#hk--krGzOmre{Y9ymA<{cHv{5=X83R(Ohwm(}ku8 zp-ghs#yoXv*LVIUH=_Xf;&Uw*`SFu5XilJKYA8)@E%|P5VR?IB{d~$;zKQ$#>H3N5 ztBWQ|DSPL6Lcyjyr0^2W7>9&FMJELwd(Pw@3LW^BA7ql|T2{?f@#+7Tw-*Ti zu3_Og&@MYU1mA?gc=z6$ZaW{p5cOraw;IhDPP)66SBY_P(e1ItaIHW>mGiht5vt-O zrB}Y2z>W3%a^yWellYW%(7ncGVSIQ&kDq?Z+oNd-I106tbusqgw5`W039{X(Fl@Vc|JI5X~m^y+Rj$etAfoKR99p``d zOvO@|`L3zct^2uo%MFbrpG*yr!;dABj18qtWEsW~ecRrc&fVz`7d_Z41>_T50Mfy6 zMeN2yuQ9$XiYkE2o)ej8+~T2iBPJ#~Nt~Q7mT9kKz=H9ccbz9 z@-w;I=aoodqK;#tTDz>y$4Og!N)jZw{8s}5TH_#4NuS3_VW8{5jCT}id?;k(?~3yV zVZy$t9LhHou49He3}1fecNE9pv$#Ge*<$5=`>v`**CwA=^>`K3YRRW*sFH1zQ;5Lb zJ&Ltgd*UkIaCOLoRhKDI9sX&Ah~Q~CpbXxoLllZN-&wSr{nvhWEX|T*SkBH8% z8n11^?l&2E#f%z!21d;-Ue|)df!l96Iji2hnRhFx6SF20WC92?`6|$g3y(UlZZ^7e z@uKMF0vCLip$?CYMN{Am-gMUa!N2qc$GxQ!5S*}mvSc7Q*u9;d7D29D z(9Ehnz-i-qwYU*N%t>I!s|nJ~9XxM6P2R4o`ySjGxwO+T!1?ZVm2Jb1p>MnSFSdTq zB91h3DVKzN5*FwBmbfWS>UBa(KrhoL#cqVr|$IyGjof_}F`9+6sd7e@%%? zo&W;-)&|p~QH7k-?=3W!&m|_zJ;-a8l`$h|G{Ox*(C1# z9v~0{qoWhSf|_lUsrGK(%s%ejPa&yLK&k1xHm?tgv$Cu)bby6~)PdE(5=!Z};`fpz zJ;$8bAnvi3*1B4~+HB6i`8?h{u4b&LWNVOPkWJQ+NX%YXI8w}rB#tDA#6)j6+0h5$ zoAWiKW?|>v=TZU)VOu;n!?Th2>8hXATyM?#WyiY5uOHxm(2nQ#g&sVn2epomc*=aMdnq zx0Q26tk&7LcC1neZTtM!v2O`k(kd>D+6S)FNaiIFy89OUm)%^4GCG@O90;h8UMy$P+YNer2hbYo z<0jI$>en+#yDzQ1*;tu-3QvJrrtg01w1m@i9lfmTdFRaw8TC2(LxO1{f9S6xh)(4c z{gSgml}STwD+dEKX0wfxR<|-cVAF0WTBk~)qpIBpRAIy4Z7sV2tmF{U8ZLk8oI3B#r+g9WAcYfy4pvH zQLm6ep!L!9D!)q4bv{>=XjMk8%~KS^TrF8j#XmnY&zqfTW$Ulzr!dx)o_`yb2`LV% zhpy^>kCrH;^w<-_d6sMS>b1#UgPy?Fn`ncCFr^0iTonOIW<=kKc%o@Gyj>~NZRv_m z0|enwny0?2Em{WSV>Fi1^2GUq4%i@&hIht9!ADu_K+#55T# z;n!J?k%`UWhEy`nhPXk!TG7b!s}_?*l0a%nV2REhk{8OBqu<)L8T$LBy9{Ph>*+Z^ zoDPiEO5&uGBD>zVsC+})l*3}wYjIF>_a8MgD;m(ND`EPqZIc5K6~#Q|j@zf=&p-i9 zceFB=I999$T)f&g!TDuQp2Ag>g3Q8CtR-4}wg<&XzpHYQ4!+8~x)=>_p}6kGmceP5 zGNy|<{+XONUwu|%5WZwKp$dkc-e(pKEF74DK%%!xIw-M+q<4IU@1UNeHGGAKOcXPg zq3=p&#^QLKy~e6oz;*^bTR9Eq(^lM?gR|FK za&dp%(&;2}8U>pQ;nHc+aF?^77!=>i7r?n~U0WBW_x1C@PfKPF3ZRv7!s9vj$|(fU zuRnd{4D^~c^}nPoIrccSff9TbuOOYPFR_jGI7*-XJ3vOdX=xATME$W5!M4%^w9=mA znc&&jjpCHXOxf!@tv|JY>ES;si=aK@#5zl3cDOXWJ#gzK0~ov++dgUEb?LnJYpH>c z*!5e+$0i+=q}M*YrtY$9>#QG1Y1nq>Ns5k!aCn7*SG$hqoimgK9r^Vc(MwlY;dgR> zx=qHAQ2@-vZUY`i;5EO_E-ujG(EE~&ac+3j$taC(KOwifMqY{Q=%!^`FlvDL#-kuC z+G)B%sRmMQe;WUt>XXlk8ErwUzvzw#MTREG4}Tpp8o)kV5@lmnkM)=>m@@2$Z&N&Rcu;JW1cp zmZz9?R*wSuXTejcMAYk5)Uo^R=pxvw!Oh$-gbXX&vtJpf@oa<=0rY+`$Xy;D*|>3% zQKn6aTzS+P;RzoGI>ray#**_hr94P`qJ(5B8PM)SbjxSBX>^C|lL|4f`PjiJd~acM zM9qv(JzJo*>9NS?RLsVvwhvzBZ*j-M&`iEZG$RC_s@Q5Z=S(~uQLxIs=z*x$JF(*S z;mLm-R;|TmO*yTcN!pW$Rd?QJxoWcVw z;|?dIR_MnR@NMt6WvcjbX_nb7rNKieve#_aZr0npb0J(T^NRbZ^#yaH3D>2%d(V(q zdAAeHk&K8q7G=0FRF<2C`=`CCM|6^j*S4!v$${?PdfTlg^J$pp^kC3vx%I7Um(dvL z(6&D=l`lU2=Y9JGM=}iPy5u6UpZFyVnxiH??X>tBGPT>vr zZYuyZ<@yslL%u|`z*i=O$yb9FXmWBf=jf83pI$dZ%Zvtt7JG0G*!|z*32b%c9`#ER zg5IAAE!|6AK!cEIje8a-G35q4ou?*}H}+5Y627M0O-cNA7(9w9fA#(4=~7LuVB}(1 zsygA1M2pZ!K>w8T5PaM$TLi26{qO;}sF3lI=(Vw4u5r&EpWZ#lZ`y{>rvU50z(Kd} zdUfG@GLypfVf+h%5sKBdoJMG>XBTUr3axAazsEs8J2+{?{^#1&w4mS4Q$9>anLSC7 zbc)lPhU*RErd2?$1zE(Gm;O1=gOG_UOhOd)`Cj6%(6N1Pf-<2Rv9SNDq-jG)y z+-%uIWCf*92=65aIY8Tfe{HwAP_VH$u{`9-Gtk;AIYhI(y#K@ATSdk3MBSr}hv4oK zED4(64k5uUxO;GScMt9k!QFjux8UyX?hbd7{Jy)sxBGI}UH9QXPd&Y6s;awBSJgge zpM3yQ_9P1a{5;nIDl{IW2VoMJQg8*?nj>g<#vZT0m2{WUxxK@&-I?Klbu{6K^zQ6M zYU+g-_5{>qi>Ytmi0SBV!BTZ?Yzpt^L(V7q9A-E+;>@y9!>HaESLB)iB$B8PhcB-w z+`WT(fnq7n@Y#J78EyCzhNV0z>hr%G@7`E{S2kO1t%UGg@;=!cJ# z%2?&gosZUt$se_8G+9nt(ju?# zmLmA>?~e1UoZ)ar)Ev4I0m-HWIt{HHM<`TX1%a#urBY+L2zQSW<=vwDSNgFB&=aZO zSgp?_ID6;MXh4j%`!`{HTWm>P4lfNWi=InC?4O-N<0x}$Reihq(E8;o2MXceLI4e; zN6YH#Y@cALBW7K?grr0ge8sgqE`GF(9G!t9adJI3;t>fL@_HT8t1q&;)3*x^tfcx| ze)9EQKe9&3hHQ<8q|=MWlb11tNfrEh-oFj*hJu7}ki|DyHzb(#8@RfZt?u|gNRcWRUX|DlIPF+RhRUf&ueh-lXq^+~2aeM|I zFRPs#U47_gm+KjdrCX+Yl-i{WdD8vvq}crF*WFA>#pzl#7OFd;-w44^?*YtG{foTE z;*z9ZZjMi}EQ=d8w?{x~kKM#n0d_Z*GZG89bi_ zo%dfj*XMHJ_cs>Ihju($R=YN3C;}lFFk+uK)R?N`h9762+$4m*c0{Z;_2+O!i(&KJ zB|b8l&*(7@DnDFKz#RTGM@;BiulPv9W%3l7Kr)O-`Rja_jrfJ?dRe8iw~8uFv$%A6 z;;W|0!xsz0FSVFusuflV-bibk6?0~&lq+1f*RB}!p|7z9muyXiGD9|55P*@~z=ETp z(sJ#_BgZ_okJ&<#-OS}994$UOnT$)vn>O~#eb2Cj5FO856Jt(UWt!oB&U#9wC~k_c zpADsT=$KfYCsON|i&+Wp8h;~k3wA)46fB_d%u21(7?i(H)#zOI+yOl*lz3ViK(@& z?RckaZ|n2^4qoG^;2bio+L`ZqaE2mxdKPJk!lM$Ms>fPRt=yeqD9mDn?DK09B^aiY zxeU@&J4iaat`}7nGU*N}xi$C?I`8IkUkf$?gIgMwrN!n(evxKarx8j5oeE=1b6dVY z1k$e0OjS^{?7~-nRetp!ayT(}Bd2^=;Dv*GcLk?@`OnC{v!UvKh5(>Pr6o|cmzoUZ z^q#&lk&K4R_4{PQE%TYGk{pkk-l{>oLG}uaaTu*R;vF~PfFK=mbGt!kY~f*fAC5^} zRB-|IT+%~14e;Nczd=TYw0_JT{d}U@REbv+G2xl}ia~4c$d^h|_GfrQte)+a z;ydspl;KDI--M{aBIz6#M_HZzr0*`+M?*)LzAo=f1ce5)hXsXkrR=&t4DuW*KDog9 zaq}WHuN5&*QaCpT#+noz5Wc9uukUJp@9w2p3k0dYB?Ex&C^W+^uD~_;c5xRly^-U3 zJrlvf6Qn#zL&o~9QCh%F)?{_$@&M0Q2IVK}y)y&axtyWo>ZhF+C1i9cK6S?Z17ha> z{~N5k{Q>Lb^jtY4Ip4jfwe6tJ%6Ga@1w0}DCu~zjyq_yNd@}v|k%KKRKF-4x^z#=4 z;Pv&*sJX#x^L6&TcyTX-%7uVKl`NvfadeeroDzuL%4s*yinRkO<5oQo!g{>M4e&bS z+NF;+e>9(c!PtIeJzCsAkVw7TSTYnU7=fZd~`Y#BF~NSvqMTr++J50ASnneqJU{~YYguOq3WoR~zh@$o(}v+Q@%NRu2bOj8%LEkCbETfJvZ_8a zU8D~VLdkl9tY#+?Uhh{XbZ#RW`0kp0**#=GDle7*2e!M$4)iKEpZ2^Bg-^5^t@cl@ z>idN4JaIhiO^TMjt4@g+T!SB!o1|cmWR-{UxQ2sts(y9y7m9=pgV#42!VNqB$~tFS ztXsib)gK@!7Kg~{hl^4cg%h%UhfL!BPW4N2`|%c&ppaf@7)F;kgt6A7)S_NNP37!z zg_A*2<+lL}46RRBW(jQR{08ahA@XHkX|14VY;FmdAMTRNE7+HfZHOyyoh! zMTkF{Zs5|w7byl~s>5poHkh0yN$^n^6&wsHaV*#861V3;jcp{M?956=&uvhZmvv*( z&k%a+muVAerLRtA%%?VR@BCg=*)RTk>FfJ9W4e|EO_fUOLQ|j4)?C3gJZC$nGw4mh ziu4d_wgW5f$y;min~x$~8S$|^&h6!{FV$CS8qITxN-KmhTpX=-vpaEZ`P^=u|?XkkfbRyU1IK{c+163exBeLX`&Drf=;H{IbjJzF>=gcH{RkA^ZMdCz?GEcIyz z*_gh4Zacr|9X6(DxBcO)$ruB_&3O(nIE0O2LN8!ottDK!LQ6YvHghG3cGzx(pd8J#PrfPt;gqh+38x1KhTn}j(3eX zt43q4f6Hx7TjBk%_sEp`Y^VIB|Hue_ujoe9EL-fehyF5w{l+Oe`&l1seW%FZdo=4B=f$@t_o@5-WR9!8*!_lZ`+L;8n$(dj2hJVED*MJN-kE zH1n#umaJl~tynabq^F%*bN7V$?Alv`Pc}wAzvma0rpzu+xvJW7YQgHN?z3Y?#P3O+ zwN-!BXEtv+-6vs-#W}Br;>pS70zi5lh)Qu{F104|2Hge8UUI3sl4Jj^v#wBde-?*( zRZdA!2mk$T+Syeo92~Tr*jy?Q{8C@;VUL*R;+g{-VX3hyb#q@WT!jK)H;1~V=wvXH z{KOgJ_5J#)TI2O1T@RE~O0tIITAQC+=rX4zFRkT*RN7DWmf7)B7UE2RVjX zPkoVVLp?6S16A`GtYI#CBi*>Ew-IC4UAl=wWo-YJ**Dh|Kc;6RB-FcIj`JDvd&J5f z^P5-*v{IjBH=zz^TOtGINiq-L%z`xx{_JV490j^pxh|8sCuy+FsoCUaZ?o}Cufuc2 zBMOOK_3^1Z{mPiY$rXS1uFow&kFRE}S0*8?tHX!1xNy^3_>-M%HMZT^h4L4PUe&bM zp_y#eTP>{*|9Y|D29IPJzaWq9RKoLO;C+Ug&6r#EkPQp2`*?gy+_7=$%+b?r-zt`B z`Ltmpb5nfViI;)vPnt(qmnqC&)x0 z*T1y4VV7b5+WCI>xd=?xlseKVZ+VaZourig&2a z^12G6x~iqc{@)O3Pr~Q{fdT^VX#&p=h{+W?2Fb36sVs|XHhI+GoilHvy`lT}9U>_C z)Nm*w1+MydamHRlKeX7|%$k}i8B1#@Eou~Tkz;~)+rX!Xtl|6cI|={R0(l0>j8C-5 z(n+Qv(&;n!=Ey(EBVh%pZp4vetN;L4SnqsXHmsxLjZbHc>{p4$x8|ao!rgCyHS}3N zEpf2OEGsdK5r^7L83{YPV@10K%RILx(5VsA)~KHPnZMv8Z)ts$*=t1(#Qxi%fz54v zUySaK%6;ExEH@(t-@4{2`Lvm%tUs8}&14l9&)OQRk$Tp>d4l~Yq%U9)Xa_fR56z$_ zD}-D8ynN82CI<7>hDiXzi#eU1o@dg`s==Ek{v%n8Y_x(d;5>sd`lHOnV;p3h#Doqr zyS4i-7CDCG15R&H^Lm`>wSV7bvq}_8$YsBkiZg3wntUMw4cA`tx|zQVzrygaUs-W8 zcl<&1Wr=*zrY?Ns%>-77O4Sc>mM(oL0$c_L-6z{>;$e9-`FqcF_X?+p$D|#oKBRMo zP<(71j@F0Gd$of`0c6oimDmK4W=C*VY0jXsHvRJ5zM2-M%M;WOn>gw`&R<*8DCU== zp+Q()P3z#h*tP`HJM^3@en-?Sx$26k)(jSx{-+DTorwYG(&ll{k%gBp)PiolcwG*+ z`_`Z>W&I=1Wt>!;J#RwoWQWd8k#axIY4*UGdWL#VG% z()NpdnjX3mcDq(5ytN(*-cC;h^&bJCz4=lLrIkXB$J$=`k%42g&XG7ZTRo+T6SE+` zQvm_{gs}p`jSxEpDXZIcx7EYa2bZ5`XPj_AqnK5mhlMLQ|U zhgu(w(%uK$d$=2}B+JnQYiQ64u6urKW%C)d9#rCxH8*!MU|>g5&8Kpc!GJUB$$s#A z!m*Y20MPz@pE2^-O#R@^k>Sy}_1qW8*u;vX`q5_%XxA_EOy4rj_)YkZe1qg}uc??>i7SVeS!j==dyJjMjY(M(bqfD~?_^XlC+`+pA#sa3?aJ*Dm#6?=N08 zJxTp}9oByI@j`p}_9A1&pDZmpA<&iZ|D6R32*8t@-DEYPMoVGc>9TVnB=9{nk@4Ny z9w>~30^skSTS7coD|c1J0|4RkQ7-1nnD@x2k43ERo{5deNjNr>Q#MIiqjUSx%N<+| z2fIwiEzmUzEDkOfs%{LmiBR7JP7bSy2cq$$h&ikZ8UnCL0|PeXU;)eX8zRXfYiUUW z{L(jy`H~*%-txrDnmIJ;ue8YTFfIWv)Q}#W(d>LIy%RhFI@mMu5izCl@c!{zBw18` zXp{kooxQ_@Qef=v5QVE0DsR}u$f`ynFNlQq*uP1E@^B{3ZY3_FgtpX2b0Ao0|*w2P1@orsEay~>oDGpP8+sjE}UfESe@zHsv z4rN-a)>zqUN2TN@$k;O2XAcY-JiECwoV=V#E53j%XQy<>7UXe%14Ke0HP@%e&tn742w^aaNcs{ zNCFEnJ?4?QysxT?r7I9$icXFw3;I(Z&cfZ;CN zPD=igwaMh^wW#B295rASqxmbWr|qThsn5%Gu0Td~BHR-PVI)_>@x0!4=Q+#idh-4j zB^a61>%shC#|;TA0XWWF5zMy(ckaXuyal`(JFlApff33Tj$2R!g|TB#`0LZ|S>r85 zm(VTp0XJPno-x4Mbg7^*jXyn_NFg<68WZyF&j^s=Y<$WIGPxJj?`5gh5I@LRMBtb3&6OHB&J|8-4_;&p>=FiW17XtA*t5iQs!L|}O(hQ(8Y#%eTteMVp6r$7h%R7hnx%K%832<*B)4!O*4hIXYga>{P$LJ7+!Ea%(a}iq^ z%@(2xJLgm9Z15D<`)-e?u4Er)^4I2Ms znlHW9DWil74vAdOXIl~qySYbS@GyG6(Joj0Xk`8>iceFlOiqj|bEfk0U=Q<7u9q*d z+T=XBC!;G!Q`gcxNrhMv01m5WPp^WSBXe>hC0$$9(BjhEM{|nHA==)d3@18WrC^CG z$=}L|h^k$mFT)Y3h&o0D?=iEIq;N=_tLS_|;-FYJgLeyaRq`NKB%&cxE53@t``j$2 zrg%icO4B)C^V*lYQzz#ht9zC>N;uUcm71Ij4sKR)wtdgX<)nR)Jw*Grw*;|RuecnR z5@^8sjZrpGBfR}>eao9P+cc@`B2~dRIkxr(e8xkFU}#J)2*sTO8P!u%IU#hm~o3xTb+)j)B~zFVjpA7qcsMx*^!vbgcCdp189Ye+Asv&@*1u@&Pv*0d0n0A?~VUcsk zsW_a3*fe9Cd&)59BY)4(?;6aE3~#okpYNtUUK*OJW4Qs>aZkQ`4Q>yyg37I$@Sq`A zmxq3BHCw&nf>k0tVI)y>73O^D_*exN+z+p<80qQfYhC~{z%CX){8D2)MPN(SyVm@V zzB-j%feV{q7>0U~eBYg?B?ePS4%-Who}*BGnKmGe&DYHIT050ut-WOZwCS4n!gi32 zgkVbs4cf-Y(uyu+Egd<0xuU8!*bIr$GI7&>2H{M6di;wCGSgxHs#OliKU~g=Go!^h zBFexo~r=KWSGx2If68e7y@ch@< z|LNr4dn@q&fDixwc)J?dK!7JpubP}G@9{4;0OIG`??|;1*pC`|YJ~&45ZdDUdgT5Z z4x{YDdE}DBbyJ?-8zNto)Xz@CX4vq=f7^M^5+m2Mm2yHTt*~MG5K>$GNmw`}h#tj;OS+!j9(h6(qC&-u zepZ@PBq&m($r?whdiz`T4=;uf1{?d4I)r4)c;WtoQYi!x_Ws*M{ds&T$S>*)poZg#SGL_K?3BdE;PRtFvic9S%{~Z}}tCZ*&ZC ze|(XYl-Wu5wc_(Q**Im{vOb6Dp{iqs#fxQWAZ#RcT#W9w!f3B=`x#AOYC!KV)0e0B z!)Od=o6!;9gdL)kmT!Er?M;i)Xn7O5Ad|DwSQEUXMqdmk+AVU_5(b)e)aD~xU>gP* zcCJZuKI6#OozR+myoCh%F&-@3K9Hr_uE0xe))Ry%;zIy)=&v6?cGM4)Y2Do>F0c}n ztd`~rLee843c8eHN6<0f95#QSj~g{mtsTyIE#cUJ@sPNPN-U%PQ%*+U&GMkj(x*>- zmQ!9{;^EE?-k(_y*fN+|WoFAHl>ApMwT*0cd6T64f0VFB?^JM0MfkVwK`w*VUb_$3 zW%HI7kDh7Du4i@n52%kVryX`*-C&ZLIdPE_wkw>uJ#{+D8o_3&ufDanW|1muh(o7s+QLhKqJ@BULWrVOHn`1@wY?OG|e>q43;u4eDcP_6tB`3AX6H z{&t5_BGtxyqUOQZCe(FITJ!4{w>R}))XP<39_Z2+PFuJzP7r)eT2iiWcK1`HaqzM^=S>~qZCgDXM^GW?5((n79CEUXeKNEr5f+`NuhvOmcmvwBX`MR zHPfFfJUa~*B=bbXF)ynGKQ-s_m|jK`*6yTn@t)h;gWfZ3d3G3G(S@)cI@X;Dy&s?_ zywFD@#QuUhXm#x_s>|wCS!msbw~9o)Iju`Qs7CFMN=y}Ty9ogA&{0Gp`h(HaQZ7Av zZYHN0Fr;i^B}O3664|g+j&?O~IO>U+#It@xDb>v9awx7`N;d}uFteJ6|| zSa-qWFJim^*ojkR4K@AMcXar^O|-4^-*rATz~?=?ruCW94Qm40ftqqO zp(3FNgbNxZ?H@3X$-}rcKO``MKA7P{2OVu`nTKv?lhH=Q>?c~cW zeW;Vg*f#(^EI^qBE92r&D_r2ll$GWvR)cz>5X-k>UC<>GtFP{KqNSu0ISN9=pA=`7SwAv0-?)crJ zXg#BP5|y6!s9JvQ^kTCV{+zl+WE;hokFgc)#%5eOmkmY$DLRHvmeFn@m0FDSrw^%e zDwa$S01&J5?)llxes~6-;9Cz&&l#x5;Vf3EZW_!tfnXE`j>ow~-ov;>ecYzM4 z07p)0+f=-E7H-m~0$`yH0?$*iO`Ss)1wK?n&8VSkkycjr>nv!$OAn znYb4>$aX0CuPJa|rc?Og_-?!aYhbY=Y!0+dW{V#oYX>B$#`g6LCl2%nC^$Gr+qWEg zY>U)t8n7sm)yibvq_Q8w&@)=3C*)>zEi0Uld9`il$XZG1C$#1p(F^hd>SU`?-e}4j z@%zMX-#`Fznli7hL5yGM-zHq#`O#WpDs{tXgUtBcehpBS*7hFM%7G_YjpIGC$Uv~x zO3I=~1><%uQlAiPC@KWNo7ax^()8X^vUI=3_Xa}5cdJXD+qC%HC}g$= zP}^e5O7N>t4QHtVQ^d_+6n%vr3ErS@Zpm<;u^+{8=C4RbwMPdD!e`3Dtd|zDC~z9# z19OD>BK*nywhb!=Idg-N?Jr+=qan4`?*)gU+$YX}sD+3jWO#Q6)S&ffEuDo!&Sf)8 zn>+4ybn8@*TCNf}E(D&OHCj?gpIiwA1BX_*wzIsQoRS$iI`VnsxICLOy}jp3exqSf zEP(PTUG3tCz4-4pTl}d)ib3N=sd6SkJSe;5+q($}3{Ly*C5jx5iR4*5i6`<`m}u%IVQIrJqSDIHU$y@Mn$-=f5Aq z%ZiGm4Q!OBItV{uvYbw^@$WMufeQsGMe`R3WQ1+cn1JEbn;K~oq??YyVC-8#1ENTjDk77nSq1x1VSWqm^+pnCbRUeSX2 zQ@fnUrNL|W%nXa#p+o(6d(;*p{8hd4K~0_wPMem^mTkTN?Pb#k@`;s;i3LxR3580E zN;kto@0ba`ms+&9*ZK=SefCw&9$$&z0S8|lwWOAeZ%I4SiWx;(4h8#uN>f}xx#o3I zHrp-aS*PLXpalu83)7h?SL3?XkXMQ0S^2iC?~Z=R8a#A+R_FVcVrEpZLd!|l&00f_ zFyK&h@ct-@oPg3a@c}(UnM?7Uh5j$cj6|-SlPZ_5t?`%xzIl>Ix9YUHg{Cj9a~qJ_ zT;`91%0(Y!>Xx1#Uj{SO8qgrr^A{I$Zo`r|@5YBDTk=#>Qg|P0_rLXl&c`l2k`Hz_ z8|;UBpKBdL`cH34Vs8V6=<5UhCPBOW+nxtJ%vNxDMRE=4u-Uaa{)$5WQUsGUNH?=2WctNo5c?PFTBEU{%YIlUV6> z#OC5KYO5c^k{nKX^ke$^-fM#J=9YQ!0WUf-lLn0K;Mpj#_j0QXGB@a+8kE zbUd&?h^CPrBu!lClpp>oQdX0)o>7y?x;^{!eSeK6_Y1LFZd^(_`g8Ypg%Y5Bm!3zZ z{vz?kFNOe%HoBq(q&TvSayxo?XosZLQ+dHAmRN(it>IAx{gI3Mx0a^vSQ;{L7+ zPAuNX5R~}t!slFCpm$dA5q^_(in(lVwxTHx83o~p(!jJ-BW_wcYF4=6{MV%3jAe-( zia16E-f6Jjf`R?-hVM4F{(l0(Onhyn-rl@jOo4??@ruhzeA8I$-Xu{Kn}u_vA=hUz z!>5XX7p&wEYl<=vr`RUbOon@tJ6i_y#OD|~7As-Mx_=ZXdZh+K8jOC_T2tAxauYZb@b%J;@emm53~{jAIH z)l6AgqR)LFJ(#~x)~m7}moDVA9qmAPT)+u^zyNOr2mSndIFAT+J}zsulskyRVEKb9o9VhmLAE4yYWorSw!hA_{ z!}eS9_A>XvFm#l5?M%+ky!iRCDUzm+2@HJ45P+#%S0|m}%#4ftM|hH@${J8dl-PzS4UwreU>ov-@K?};S;u|Qb zNk=uBHXd{GxG2k=+~%~HPxK8#g{aL}9tYoufz5fxlEx0W8ils9=mFwih^JF)${RAi zqp$w3>n&YOql=K=afx}!Tb^-9X_$&vH(b#JLhv^C4^SN0aFp*FF=Y90Nm*Iy-5RUQ zLNO!o%yfEa&|C2Kb@loU)ze8>%4bY7Wrqh&AwkxDC;Tik$ITC|>Cu zm7Q$p>IOnG2tQJ>`*@F|L$)igHcwh<4l}MOmTRrdwZ1w!PUykoaH~sN>P;}kth8}$ z*M7OMx``{+D0@2+irjxxPW%A?DwLNsW^#uPSQ*C_au$_mTyo-0CizU}UP0j*XSXE8 zIAi6G-9(wR;F{PcGc?C!dY_870uY z###s`Iq0qLV<~7BPFX6u{7xehhl1{U9EhVORa5gAzXSFHCVHQd^Xi%_+k3r6E35q-EGB{`B*4SD3*`o9Db^`^6BYXQ`=F{5QIdIt5V#PX|5#Nm&VCN}iZdWjq z9zoM!uT0)kc3Ewq_v4E|E$RV~&l1y05uw%Z@XH$QxGRQ#d|3ROT)t^mxAIRlyqob=5V+&sDSLyHLP3r3eHmINqO7 z@4TE_)PjL6Jon<;@J-`q+{MdF-qXVZ2d8me20E^r3Ao=dz z#FO3&N4<=S4L}iT<=6eRRWHjNV$Lkf3{IlabYj4nLd5aX9q73#q;83Ug)A{_QUs01 z(@!RHWiGo)t8AZJyEEY5=BLJ|y*K`Z>tT5n-F^^*d%d$aX6w6nF;`*s8h`q`Z2A3y zX3&~>nbkdfy9rZCxfM#p*zqMa56<*4T<<;MG!*~{@V|lZ>OnvSfW%N_qRlchhZw-D zP)neX304Jue|%R7>e%{-BS*ugdn5Ea&}Dap0|^X^C}c*gVsT69amy|$7JMctW>Q*| zSS9)_b(jmf^Y-t>PKkohKK}|;E^iZlcLQ<3U}8VadzjZ17H^>Dq`Iv;4)Z@;09h^G z_ScuW;A&jQmS9K#d7D$U$eg(WTwUR;P8C7`;wf%2p#ErePJaH^BEjJ$KKXMfwJ&_B6APl9@bJHtxO z2{3R{r$+0yKC=Z)HB`UFDGgM3#zr~K zqkTt>^!~43A>Q24CJ_-^g>Os4yncO~hQ^QmS!@B133Cj!PRI%XyJoj}~U%B0XqGLznn6CVYJ%bl~UD5IVK&XeDDas9>~O(K>&@0 zw39q;J!+6J(D|?qqny|pHw>A)y=Q$@vd)uBIo09{=Oad^Fx;L}-FRH?G?hjB5$3Cf zqrEzWAOsLe{OHYC3f=H5Cl4%jha({?AK(gV>DY!PfLpm=2HuD) z$e2J%X(UhsaeS4G+hDXnE$7*M5kuA#viZ8dZO^G;5q0xBO~%4wqE{qS3nbO`*Q;X@T4!mQQQ);h4R)D0)iObZ?i9#Zzip; zv;Hw#my3S)bQHt8Yvj8W3g9&<9s{&*e0(nRI5^8FcgV|@&*`BP7k~KFA#as;ETV83 z@YeN`3w7W)hol0w2#igD(X0j}}cB1!$=xuSm z-JfQ`R3m=b@#9s2d|twMh_)Ev-?J!X1W{`Qe}A#tojw3hcpG;iL2EghqoyScST!v2 zX)4O3_g3;EPJK(*Sz1MmJIdy{(-peqpN!|)AEDD<6dgQ4_O zHUsh~;4yUpnlTDVNYv2L6DR=n9)623eIf&j^QT=yvS`r0Nt*!cI-9c51*m(zkv*hqro1x;dn zWjBrH^EYPV5}%?w%$L%KRtb0&nXVmW(0B|jqdC}sKsysszM;dqx*98rHu7z- z>2$EIaZLmn9_g{Uu<8fx%4}WVN7S`ykR&2tg$V%(iJN>Pr4_DZm<@Q5V-~^y%wHX< zq27QribJ_xV+HZ^C0Zp|Dxs6Q^hiQCF7c|?w6}jUE;edF`L7qjcbEZHgWDMx43Rf) zhoY)Gc8dajS>qg&j^TAhTG)D&CNQi4!i*&e{LpKk7Y{{A&Fk>tfDn<;m;vvMXi_B` zh@(97xOgfqW5*c9rbU;PG}J0D@63y4ouY5Nk+8=XzC!MhNjr_EsH=IV5ZOR6-w*Yk~6(hHw5qr zza)+-p={XrsHTv&0}XtFs(9$&X2Jc*!quE@qjFIUi5FEw=vxRt|MIpPJsT3!49IBt zwP1WZPMk37(V|H8k2`W0aXF)AxxB{+1Rj6nGB>u9E5&95rp-Q)l-t%poOMTcj5ETl zy52?RwL>r`%!&^hn}2?>lXpy}Bwhl`MeobPrxc?zv2fs(jL%QZ@oh8^mmGiTdrX z!4KDAzxBc(!b-*d9phvUcSvZiDV-fhB~mYBNI&LmtPRr-#YFZ-O>QWUOpgf0EJ{nU zMxJiNik_AtW0S|G>O2z9t4BOE>K2()b@rHzKplL&W$xnYxeMjw&Cs_QGQB9wmFayCkZ>I* z5nEXaM|}`R?B31Fm=Sl<);0a-eWLT02@+{kwe6}54O&N%YnrZ4pHJuFKO0kLE|{qr zP&eP}t)@d)!fy6lN>rO`-#@_oqd>f6vzI9{7a%_S^Ca$S}OL1gdQgA&*-mH4f)^!8Q}w^5@d-RV!= z2?XrAwedl_^O}Wu?j5gc1sLq5bQ{if(nWkt`!v^_;X(4o8Lk`IyrUTpn{T5OEkEe% zEw2<>oXX!CSqY!^lQ>YSjb=E8M=`J!J*NlpB}QJk{W84k(W%V>J=KL!?;iH6mc;dS zwJMjGYExrMhfDZ9a_i(4h;XetTU@?a&N$o8^u8T#pXop$KPceStw}KrVZk^VXM3RP zLubYcyNQsrN4k5dq3M@rZ*$A6R;3^_=pB`|PwT-)bO?+lZ>=cF{Cv74?EXUZE)KVK zP~RL~SMBzGbzz%?Dfx&{C4|`}JKP?{d_u`{S62j_IX!#<$Jcx&nVCd{5Or%B7MHVFB3& zLp{TBp0=9K`qb>ICFZP(Q*X?;7mg!(>>ut8%>BE5>_(?9JLse}rc%l

`RCsMA%N zu(-JUYJg)YszbE~DV077Funx}TkOR=#e+vRIQ4O7uu%yZ)~m6Y3Uj(CjfKjjDhjdL z&-v6}c~+K->K1g0n5+YI#Wi%Fo&hCQU6T*vL0LM%??4{Pm6!wsgf1Ax#>s}XG~gr( zC*QKA_Ix@{Cvj+ zWxF?>o#ktl!;o@aXksYR3Uj}alD&n~JpM@``2doQkt^=Ts~j(sMqeR`O zJuQIT!j7_Oco5~b>Pccg z*+|0M*OHzRecz5{xuvJ%36|Yyk36xhp#C2PsN3)Yp%TiY~Zd@SXdSm(xcx}+Mp<%HO(D?{;ssO zw_73rgOkmNSaza0x`+J z=r+fqMVu9!oQ;VMXZMG-c=DbVBW?y zM;lc$-{FMHu1-9V2+d(B|9~TESuh?$YJ${^E?p&MXl-6uIGsy}4lNTWl#eb4ub{_B zTS1-T5PEGCDoHO6c23zaf695AOPPnyBSC8sY zP%7bW7~zJXq^z2%>Pxye-Q8Llq%=NKA9ABBm*(x0wcq(OMhyatE5Q^*!tA1I5~0+O z9dMo(BQ3<}X-Ev7wXK{`Fa;|sne|V7cLL`D2nm>6j3B|82Sd%4lP>nk8ag@&)AfXxC^NX$(YzrRG`pxR_0{NW z=!Pw$2<+nPaX2D$1r%8Zc^@nGcTyvMVH#Axl zdW6|AlIm`f`Mzdeq_LTn)vZdONN;K$b@l)s@f$l>IT|<0o13uP5r#4z{}IpZ&3KyM z!WxEAL05u}+(6=Iw})sL@d*1>k^GFFpI6mfSj8tRXwvTQo4fQDheFiM=3ieR|8(l* z6ZyRU?K;e_c6vLZDTuu+pJ7Ti?&VAxL?)*v1JbgK-@w4&wH??F2?~-#l)O?%mA%{T z9=uRF8c2^}Pifif*Ff2Mk&j95)9sa}o4|jgJX8--!}0F+HCfGrL`&wnSqxC75n{vAf{qW(_{7ldGL$lnit`qzIw z06@R(o7aCQ0cYg@)9Bw@@&DyL?K1@Z>C-EQRQkr;-BY|=-gbQ39N9qiaY}plMyj3- zSFa}i;yy$bpJ?H?0z3dj>i>P9q)PoAt)-UF_iPb7PkT%lT5hG~=Gz@8jH0JAtFK2d zh7JKg1j!V!1{If_9%su;n3?*xz}CaR-d^#?NczzZ#tu=IZ>!rvxpp`jx=4cU`P@gB zrCro48lHvup7(Ft&Xay>pe$d<)C+C>p8C_V`@fEbKafBANV2)V=4+|#pd!2SpWJVj zYw9rEKd{Y~DbI~n{Ma^M1=*jU*+#UjkABYt_of2(*RpJ&XWce=UO{y|Hr-H}bR=#P zbU5^UcR{8-KS9A^1YG0SV55!n`D?n3o!T6XcNkFzM1=9bw-o}3w+1z^wup`*a!SEY zSg)f1nAeNJ3~)AqL<0Cf^@=6RGV=1V`_yYjTUP8s_@xG_G1_D3Fz7mDla(24oSYz? zE?7aCpdcNCGNghclcF>o+%O^A3Tv0gE8p>Ohnj4jzy*CNiU56)ZOq_zESuMa^XU(L z_dW>p?F_4pK4%$97LD83>7!Yvg@ab#x(jga-~kQ>?2wj+gJN(olq7oS@BlPs?ty{s z8Y*yx06hMB;^;fFg;w`RoYpgcu=1KRi4e>e_PcR8nb@gw!oK0V$YhM;D%$)qu-uC1 z*CyH_#I7IGJPn%amOJH23K%7cW0-}h?L|KmQ_-W*gcPcdU1A1)a1(oOhOwOuIhOs9 z`R+H##=@dszsS(I6)k~NmBE&Uj+QUR>bv6ycBK9&1^cO5Zx&3G${@qTs^b!@blTX2 zawKaDUeFkl2#9ttcv#SbQ$1hq!KITX`K(X)`n=CZty87)x#kidA_uFELQ`Nk##=2{ zTL%XRj|z7s>LbF#lZOptFspOYnU;BZc}J>B27U}iyNuYmvJ)cv3ph0Kh#zoFf-edM z85yKrHpxA@^TKZ_F~s;7^DAeN(#=k=lyb4maWtLBvh|*-W9iG?D0r8&of$|xXovOf z+ly7!B8J%W{gSGh0k~2TJJI1x>DRka3rafi)Jml~d!ce;Nq@W7yi)gt8B>Dr)ys^F zxN^7c1=Dh#Y8ZpC{B)OtWYe+%JFNZ7ofJz}HnxP=*#C>Kw}7g$`?|%krKBYlM7pK> zC>_$>BHi6_R2l>Xq)WQHq(P*GLpK~6>CRLC2j2JlefR#qdoRN=4myTo?B_Xq%{A9t zbMIx^^bL@d{hArVBO@b|*=Q7kgM%0BFM%uaxeR1dXz;@l4ZXJqm@9;bP8ZHfaQu8) za$i~}YpI-#Kr8r%#)lh$;ZLvN5LT9}Cl&*p>q@{PSNWv@=>HQ=coFRd+&D$W5<`Lc z75J?kEJux>!rt0sb;jQA@1)m0eMmXmynU%;`!NA*Zl;jWJS>=(mzT`;s;pGAgzC@c zbL9*UvPkb%(Q%F3Xom2FoQnG?nIyr$G~WX7OC=6ok!u1K{+P5O0%^J&%CpE!|9jsU z|6Yf<{=Epa{R;;M1dnqcO_?a!V#yJDf+aFmGkh!;dB>83%3(cbJi%ss{%c%^prby& zT|r{)xC}doOJFD;B&CMa!12P$!2#lmZZ$bF;^JKFONIlfXl4k38``LD}-@Y4LQk%xhvLfFdL;HNY3s}QdH z2Ec4#j?4U;X7__<3=;LH$Q{*iw}T}GQzrvS2^#LwR+#9`d7OhSRN%KjN;#sTZo0zN zv}uXOl!I_e`XV5Lhw!NiA(w%e?y+#DhK)g{6;{`ue!Z)T#3AzJuXgTAG(_=cNsl)2 z$%o5ChmZYCR;C489Xw!Dmr=B4=f{_e(8k)ZE%WlY`?D3FmRP?jj`zX~uDZ}!U_df0 z$lItl5_Xz@Ls?f>j_}@{<4yoh=tQIp3>h^R9^@HhB+=M-x_jC6KRTDrR9HsudEE0_ z$(O&ftrx^eN}FB$v9;rktmKLSTdPJF#oMb8?#{luT>Hs(M8#ycunJg#_KPc|<1g~B z2(R8gJ%hFv;&BqDT$aVQH#T$36}yFU4pkf$|Kwr}Nd`iuLV>Xu83{tk@()c6YsdUu z96>dI-f<>>rzvuz^C&DPdlnMeL+}TZ*=j-yZUDjO><@iJkmo+V*|DVrKLjeQZ25=uuHik*YH1N356YEg zAS5FvFR7@2xW2NY9(2~y($dqLKgJ# zoe6Dsd=R`@TD-z{&&wp*Lmu@IPv<;k;Nzmjy#AoI-FaSqO#e(A1H_oB=fs6JRq z4m?tlXyJN)Xz)ugL&n6BZM+6lMX$wMrMtQb5#f$-AMaqFzEqIHm-tFg0*g*`7kWR3 zxg$Tk{HRMG2$QqUl-rGyJh`-#v5h}goRyX3NY_CSGrny)dw(aaPMal&t27|gGsD5k zDtU?XrK0fF=8{LV-{->I4FMe$S{+#tZamqR#@=QJ7K%{1uVgDbyjw&O%yJ4`d;w=6 z1P|6?pbjs6D=bPKSl&x)NX|Xl*eprlPaSxFo)^;A0`cSKCXETbU?*viK7Q_BGkB?(=)lQ@r}91)hwk)y ze_^ekW2_8qb_`#3RqjcA#7;it$Ub^&{XFdE<>3>*H3{`x0;Y&_aNY05mf8nbCkf_KzmDOyLYPeQf_(Yt8}teBtgvYLp~e|ER^`? zza5_IKy2^&^w3eqcD#w#M2zjMPDTtuDl!FXsLbAZ#qg0t(_cdGkJ$Xg+RBjA|- z3f@K0WqlW)XjaN*qc)G&==vqTW58It99iPj2@{M>H!A1vh2@F?aQ@Mf`sOU5^Sl)~ zv2~$ZQ(rZ4l}tM_55HDs5X(}2 zHxv^kL7mOute<5o^E|qjG?+2X>{)DoE{$vuSEIPbq$9sAHyKX7ep4ZQhz%OG((kFd zsUyW)IcU8vD~aA4Cy3o%HBGq)Hv051s*Lf?nP-9F{Jvk!U~^lqk_25$1?kg=&@02PQLc~EU`xxV@7|~% z36h%mB!|G*4^*3x6DOtb4YW$X-g0 zBR?7lm<29dtGCh*ssc+LvDEBGClkpiJ^c=EmfjdK40FEMs46cT@RpSC9;h)F@&-HQ znK_x=v#a<9#znp=t6T=6@9pEp#e**xsF%^b_6@{}S2z#vXEQukyo#2f7R{`1{MDY# z5D*YB-UY%lSW@-(7w-q^RK*W!0T*_`h+R?twOFY{4NBQQAE^MWt`?}!k^nvpb+bxo z9eKIEOP!`}6xT523C>+X^EQj&KXHy}4fV+x)mENn4$tmZefwe*t7~RoS|<`xN~EhV zO({R%4Wiby$ft$%~-Ufkl`w6knxCM~EU+wB672gD@x5UVBo%{-ZzoAcwTq zy}ownB>n|M)Hc7(n_le&$ zfk^ee*G(F1Y<~UxfC3!}q@el5w*7#+6a+k9rj19sNQEa7Zw=mXrUX9#aJ|)^+S*Bn zcb}XRyC?K-m*1L$>$QJEKlF37=2Evi%M`LY7G|vC!1wezrftw=D17S=4@U|yL zbL1MbDzc+PMu_-%hii#=-f_2kXt{Col%z?a@RL?QAYg$^Mf9`ya?6;a4q%}RAUWuSYa>U`LPT~ zJUmQBWs=kc{-h_zs-2Ku4cN(xcRTqHkJWsQ(tfRhA8LBql?1rsg&ewcb2b1KZaj}N zBhETRabzhM{@9d}rp8kt6o?uG5-a}y9|vBu^;cQu_Sur9f89S<=npR@{%LLzqu8}i zirosY+KLoeVnq;0m`-BGci%wjHP}j6%h`D(2~I8z|1R)QP<;%wq)z*JI9a}NDzC3; zKt`cl=vB zTibdm!{wYF+;j~L39pN{VvUmkBE>WFQXUj+0%|FpFWgOVF5nFBz#4UmO{Wf zDw&IdXAuoPufu>j!y{mRyu|nq>eShzANHU|e_42++Jgf_2O?@Q2P4Y$RhQJw%QNdM zDZ9QEzjRmpY^K_9zu4}EnOIA>#?6wGx=7-Ay5qZ6gXQE8?_-JF<~!DV+K!WG2vks> zxe!X^=p?IfL^sNX=njt71-n+Gx;K4{z!y0@e6`{jlAA@(AIpo~5cHemuN4#*B&~99c<(;j)5?bzwWi!%haC znv!bQ$QeqEp!=WE@Ztq1BJ+BNoEX^{C4N__XPYtc;K{~5e0Q~3Kqmh+d!8}_D=1uh zJO5ckPA$uK<@$>!OFLxpv|wrssXUJ+zc%I478$-%c*rzJYrly7{-0NY;3Te@Egh6kXUqHtAFgnM#EJqUA|fn& zJ%_?BdU|?Fi?^=!vAQT`MY*|MC)T3p3{_O#2nv#6JytpuZX7dIS0{WH5)vF-eOq`A zJEa*`Kw!Im&GQ~q*93TY;<@-mMJjM6yyUuH*E~Hg16L(N`r{#tj{-Uawywv#c<5N$ z?LKf871q(-(B_dDeB_Lg$u+#}**Fcg1b%>5eSXgJhAfyqXT>u%UzWzqh{2x&t^`7T zO(k|2i#h^rk+}8X;`W)D*EF`Z`JZkMk$Rn9`XFkAPxOJZ5KX{rvB}K{v7^;sH$OEt z7Q3yxteu-R?e7n*fc`yU{!(V&tx@LZ;qSFoHopY#UBiZfuQw94%5={=9^mNvUvKn+ zeC+J@(=;*B9PbAZjJ||^FD+!>qwqV(c_l0Qlf{S$aY%o%=k`Kl+D%=7ato|(kTH^K z&=yGJF)<@(9j>I^F@+3cN9=UHl@OfatoVI2Q8u`j&&>)8F=}&pCV~R^)wk32kwE>1 z3n&V<>G+mYn?)b0qzOxt*DEnU)eq_VY>R`YW5$4sI1l!NI8e&c5GBy2cQA!BDecy6 zkOXg$)Fgl$+7RTK*4*LrQEu3I`N0H>R7vG^1Kw6H*AYU}tFdBO$&$uy@?LYXpfvQS zsBiaxTF9K?yMiTX6iz@qP`J8y)jNW(a=*WgXT#=C zoie!_Uctp5yZVUne!Z=U8(&*)#FQzuo@Zv2(>}BJ-MWd)=Yh6ZNoKJe`{M(2ZH2{8 zN#d^Zidy3YG=Sf0PQk|9DPr3nW$EH#X|`RP-V<|P8ZHQUa{%=>b#lsvQnz4R`Jc1d zseyW?LG<41OwrVL@B$^CjBTyYweN0))K3dqs75gC9efmIzc6(X zWJzS+{UY*lHfKT2_epM6dzNWViJiWDQ0qK#sy~BPo$rvq@Y#3JB^@=4@BRmpp6o8F z_kgd^<;&@pROI=WR89k_`u`WGUWA}>vkzXPD@xIpbpUj5Xil6mnD6WgM3yuo^t_yH zik*S)@FjL@WA7m6d*LDqZAfNls6}$#X?XjLO0uhi)Jce1@DcbiO{*Rl)q#NQVfBYg zlrnuU`+0m(VK8ZFZcafD19@L;fv$&|m-ut=8D;BTugjpw&6J?$a`w6&<6A}ydZX&j z%i5YAvO3Q~P$D^5FO8ByQ_{^N24lAYrhy;ga|rxQdVGL=h*46dp{f z{dFQZKg4YPJf56Sa*wo?tk3Q#%7wlD61Z&NpVJe8b9m#M`ufe)&1)!d!8%B1;dpr` zylX2}7Qya|(nIlh&Vo>bk<7bTE5pPGjR@$0egiyimyN5pYbEI-t=;eF2{9#q(ztew z&^4?pY)zO(%R<8v@T7+iT7fmODREvy14T1a9Ndx zb}bli0!h*r7KbYwF1u2}Ap(tCjlF=1>vfW9qoo~Mn6;wUAliO5+zb8bbABGx_3ZuE zD94CtWV=widt+Sa-_jmEqo0R69()IJ>^8|5kNcGuM;2N*2(%pKnnbd0MOid7`VSl= zoPeIQv7zeN`zk!B4x1cuDx4|lL|0VUSxDnpD(h2NY*Y?SltW5L8-)j+H^iNn$>jT?+%*9`bEZ?XH(itgi=70>r=RGiy#L2-jAr zwJkYmPq!Oym*F7O&YlRKO%@VvJ&e=*5SmYI%(;uX+dy=?q_^ce#|my<`QCnNbT5pw zJ_;91_0(YDd5Ir);h5t0BOlVTJ9#NFKuxzp1?@BCmiqxAQckr6kO9{_PJ37T+I(QBFy}1)Hmye*Y+D5?8k+bZGwJ-c$0A9wuet z&Hn7<$uE??$wO={zJ+(Fsyh&8EtS?2`tK% z=5|~hk!lcpv>a^Vf3`@uU~FmR4wV~pg6(N&07C^8FjH(jTj4k z&BbYUtHSE-9CV%K8KP79hf#jUy?rL+7k(priV~87S=HL$+3b@D@;!x(O;6QV)G?re zUy;=;9&T=Rb#-}L*;FVX5_yI-4Sxk8mPlfFC$n#)TTJa;DeRJctV{LTA|PF_;jlInw#40D5b{`GA86g6t&=y* z8z`?p*N$iQFMgq;KEHiS*bRKf$o$?qH50#L=d$6UNc|Qcz)B4+94xPzI7^pL4Yg#z zUE~q=nx^!?ev}koLtiqPt4;8A1A?h7{WRE`9qIz3b0nd!|}B&PCbUR zQ>3l=JUkEk+pTOe>(A$cU;7}tS^)xZc-dBL;CBgL@b7LBkM0e2wGK^; zZO5P%=~%S|Pgc8jpNZ;>pCyC!=jSSaCitxIT`q!ty0+;MT=>I>rH*4Oko&Yc&C?;E zH2&jvx8ICaP`#`tFUzAu#cckPXX`DUr@{~m&f&AjAfWyZFAf)O$e(m`$}y>+HyyQk z>0fXex!9(T(@sy=yf*tnH{(2L6%gaFXo=!wXg|u(YG|0KIw!CvlCAsDu|EYG)hJRb z?oz}->sblCA+*iqn+vz!!9kso_O^`3?>Xz|4Z(x15)w{bElR#p%JB4bq!nb+kO6N< z&*if9S6n;TPyRBWZA^%X9X4l|+3u~QiDfNVY_G&H{*P7QPixlO+Fm|d=Hg%zV)dl5 z8-$5$Q>z;4)DTMU(;}w0^1%RpGWDvmZ?40XOePf~Lz_QT;g-u^)AQ-qZ(` zK$P8pmj~S6lP2FFYAj`RPB6 zTFT!o20}tGsh%!;<|MoFrp-+<`Fmx)yz*Jf4Dy>Y1=9b(H-_14uBY%Y$NLo)mv8Us zXk}C4`d?&xC>)=}X* zi^_`y56q~;FH@f+YU(}dd0nO6fcm`dFm}RMR@k}$C!@A5sh;{y0zEkCFP_u2(y6Po zy^t)v>vBk~Z5y!@(glM1NWlEYT!aJc^k``OHFQEoSKX~j_USaigmgu6Db-3#CuMqU zMPueol`P}4vpULph{i?x#6b=IOCjNx(Ns@^Y9@{Dnj1ohGGt5El>_eu%75^KzN*CM z)~3lxb>kaP{@-uT_N2v&)-yTr68syAz;5!5rNza9r^0~JoLL_YclI-)c}0K+;S}yo zAmJ%_o^bnM0zBPY6NH&!>32|VTms@Z11g+1iy7a_rV_T9fC`85A)lz@Ry0W49SS*Y zDV=4)vG;`w@j?Bx&L0skwvnWxLY)5ZdIfYl1Z4ptyO|93i+du5%sD)Fq9Bl`(fJrV z@HMnTu3AolEXqLJ{z9qSBsSLjlL;xrbWaF8gBNdF)}f*67LoS6tb;cpx#F-;a0o+< z@V31oadk_!Z#^Ia4wk3+qWc1uM(x>BG}ME|egT*uPJ0u3(8!Q70>Ud!$&4I+3*3*i7gzUi;>-2 zIO=4WzF#fr;!7NCOwzn4+OQS)<)5Cu0A#PYrs%bwh<{N(9m5+3$OmP;y2|WNsv|6Q z{}UV&hHg7HRoa{8+P$@SaI806H#c4hlVNGau`26qmX5y2J;dVG3!d(-xKZS>okJPIZz;ED#@4k!DA(YQ+(o2xHhOZXx+-aKvS=o>kf>Z_ zzNm1WBVxK%BR}B=fC-`C`|JlKFEAu>VoO(MnM#o3LuNpfCQPRnS(RaR5zgt6Y(D=- zA`%r}4Br@@_D3z4O+Q{A%q#zuk3 z3>lzX{Sa^a{ASyr)KXe#&ZXRd_9SSvxY!U}?=k~wECMAd%dnsD2F&cHhSL%St7Ajz z1(*MO-+Y>hdxCY$_(qL*P(^^`R3m2%yXkr7Yo)nvbA;`m35#W zh1ThkKYiOei_~Ntn3y>y;z^9Q&(6Lo^vf2#FECuP{6@p*mZ1RafzfA!z&XwH#B|g- zeGukRuzA>SMWO~K#qK4H%irL7LR3INmZJa{x#H^PBbLkta8wNazhx-I{_xJB2rMsN zk(#>TbP$ZX$sra&hNOEK)vg8#wE&$@cJ0cyC6kIcs8|}=SP2Q;GIu2W`pLXNl)N9W z8HxFgtaF}7-J##bKZ@H!$SGjlu-x)O7WQs=`*0?kl1Sx@n#zi}Ecw$&szjwNIF+sy zG@$%Xm%;7xe5CQA>N&wi8U^jw3*k(OTy7oJ*Y)M20BdHP{H>NJfqx?XfZ(#_r_Z#b zvH%D3j}{ppSaA(oVFLu|rc{u=#xEQDRceh?5d6HJa`tJ#10!W1ocr92!5r`Dy0Q%Y zr#_6RauBMADv}^8S^=j=MXP&4g(+~)OjDC*d2RI9uM}=2?Y1Q@<#P`{e;V8nbXNG8 zefRS^8ghQ&DQwT18ojW?KEIPz;+|mk=v`q^k-+TfRl#bs-3%Wuky4$KM4r}#q{H?1 zwGR&`(HBC1z8p1D;XVz`_A~|l?T)=pPETj<5nOtT zWhHU9no+a3MFsBcaS2WO_^JWDQ0b*@1E^5BqS51Tuc%YPW!scn=JG*7t4g2m+D~?N zcH3S8eINkNW$y&5AK0E~XwK=vJY(m_!!IaXnvH#+g&^Foeds({#@uO3wpHi(@A--W-M#TYff5+fj_PMc0|W(z^z@b>eJ8f+ zeI1sROCcP*rJYXahgx`pWzsM|e&KxAXp3tSd8N0XJckV&*xdY1@p1txVX4&FJWV2lPTPD%|7m4V0w(b%nlUD$6 zacO2hlAcI|R^>w?q@nKTvq)hmk8q=l<%t&g0%i2Wus(Z2y^^x6Kg|AC(|@C~`jcpC zu(lui>y!-{3*LNuXmKBM3izk2eD1ylV1sjDTBCMxps102<^2-~)N6x?`MIEZu#bRn z?e1bb{8$MBY91D0fcjnTkZu085afC&-8)y|R;sWiznLvIPXq7ETRxeu((E485{6`+ zq&R}XPOU>fwvPd?LRlSog_Xzl!>CaB%J1^5erf2g9+Zo&zu2%#gJ`$Hvgju=&AP;} zu=20rKE)y+8a%ov61dHY}z6Z8_DiHRp6nVW*Sf8e6CA5hcV=ehZX z0dt95iN9H#iC3B3`b>=VZ$e)D zP~--2C~`;_pb6YY-2gtiWTv3N(_tY=PD%gY)l}y1J}SqaX~s#vbT+Ay-^<0iro;)| z$FrI~T1S;a8~hKo)nn`KYgtViv=#b#8OTa29EW9QN);CecNyn6Z6-WKt5JW;`~}zo z9Tdub74R()_V$e8@W!co>mb#HsynC!^`amMvX)n=o^f@9m~Vh_=q zS?u(abF(YQnC=C8kALkp(Y-)$Vp>#+-#K3XwcFgeUe3?Y3yu~5y|S(9==I@+y}J@_ z%)d-UALzjI^=b^ikW_j6*x&Q>;vlC3luvk`A@k-?~Ki zgz}ax+Xr|tQoY?F`J4t66h{A%Jav^}OXQI~Uz!Y62w9(OzJ4K1^J*~Mdh7s4G)vpL zcVxX{>QE25i_=Ym-K$Bo3tH5#Zz(tRGSF1ov>@rR2g*~7KI1!@SB8+=xuBw!y^6ssmIXu)4Pt6Y0KH!N~Rneoq7ZR6&A&=La+=&k#Rgq{zP`Q;!H=4n%yi(xjltI{C5EphI&63Js-pGb#{~MKN(>&X zjZPSeAFo582P%_@l33ldl-=R-ri0LQINAxxFOaEr1HEa<(9X670Bvzoqt2aXW}THZ zL6>_AvJy1qKd)URGCDt04StP5p?wpGE#0F?!Rn4hR&x7WT3N@33M9e9l8Pj%!PE)K z`1d#;{*zF{*B4LV4w?Qo1@tWR1R-`e*??AoYO-S#)iDvshZ3m#S9K>SC;ZZa zB;PSXK<2i=p^&+dRk%|^>4So`F0H)%ZkA-JPZtarc^-JsYETEiQWaO(W^a~iM{M_e zRrj}6*T4?xlFE5A?kD62q?vza1O&Y;ABenW1w!>3>jaYcpzU<)a1S$1QOlLTCRn@y z?7q9V_XjXuZMZkTj*+=FNdTbEtt@+>YHezKKALj!5{639 zCO>n%aA6U*-l6O0M1td;!&wFm3=9A@&Y8r17-$Q3^nbgwpZTZ*+p9pOvp!*>``HF4<%>_$au^1dmp|y~>E&a7)(^UaM*0B8k|NgWR18b@V_#KW z-4tN24OrJbpJ=<3%BlKMu0<#r(S0)db{S+ix44ha$Rvp`OD)JTkr~>TyV2g{EIP<> z{%mcHe9^(M z2s^0yAo6ckaBll%q)CqK9*eV)aj1nUOdUoq2t{g@3UHijO_t~ zSJCc8Ag2MK)8?knT~6S<+;?c%38$T$BJB(>8yFD;pPQLkw&_TRCAXkM%I z74A6i_b3+$Lgn;s=-siO0LDG0;!+CT+Dx;;nZoNi`kln-Sx$Avw-;h}9mKd!b)JTJ z*_9s>^(=CzkiI1kO5KKjuW>ak0ZcQO%Q#$&APkFbAL{Ubt!8kByT;{-^AYNA77siu zltw3heZ8|CgF^4)1dUnVLbj2LQEWxO(ZBvPuG_2vXe`3F8q4~M10&ie_;10K`H!HR z?XGU{)GtGdnnnqyR7)-T@xj!R;)A&m(B>eCe-WzwS3xP1s;5vUoPH7cqIDB9~9y?#*xsmVv5yvfvsZlN^TiA)!tOQ4g!ad`N?#nEN{ zTpYS^GNxr-mpVPj-riz8SqwX9J{*&07eU4-C@8q77;Of1t$dkdVj+vP=1%#Qe3~gk z1DHl%W;&;{6G8=q8?$;*6A7JRj$bU20rojiLjeef7oB0PZH%lCR%aq0{Dy?}kf;wO z#bjX;7+b7g|KnTWz&u&nqVyd;(XwIF9}{k0)%HMJUv|{mnwrk=;&q;cq*7R+zQ5Y{ zJZDuJN@Npm^lM31v{*8Q5iq6Ae9Io+R`gYV*MG0(*PDZ=5tI&Y(*}hPGR+IhF<+aLmEV8&q`*0 zOk+^Quyp@gdE2r3VrTyB!f49j#RMD5+l@{%Jlb}V)6~+we6U0QK}VVlO?+!x5dhuz zN=R6y&=a~}5-=1GW5eGAoti#|0|>xdhRs+@Q-#n9&{sgMu}fv`;N|~$+6QtKnEP1J z$omXw)Fz6nC1b`Y+cUM>cCIwd?nVXJ>D~rA4r@UDJC4u9qu~e1ZsbOl>faj&2KIR) z-id#I`oy=*|1cD6Nn>enw%y*gRbQgVntT;MIX$m*u`%4T1AEBuRP8vo7=LigCVdHH z`(j}iJ6fSTC)W4YG(it3OZ5w3tCfJ-qV7Kb8{GyFMP|aa8dy1u zfvF#{h8KYxnWT-Am5s%)a1JRhUU8hUMC zCKYHNwFI=i<&PP$#CqO5XxtAcGbsaK&Ny%yh$jGqYVYsgTMzr%{|36>6B8|(zQrB& z+P%hUn|%>Q8)i%$mRrg&?gI;BxV1F0JMsqbBR(rLaP#tN8X0Alf8=_9XaB9QuYXT` zzcGBs_1+`e+KqtdqK#i0Pqdmn%CuGZs+>#;gx5Oq$^Ea4KrQZ_pOsY58S@==AK0G& zCeY`2EKw;p!J9!t*ylX6-{8i)Kn-;3x`^&2i0-{*c@OV@p71|x{AkOBJZ-p$QUrX2 zrVMob;Z2}x%1{)GI9Udf*HrEXyR9ofPUiYQroFk6VG>CdRlU`FX=G>cf`3kam1%^y zH?949^b7ZeyRXKXW3g}egRK=G_sCMR7KQ=Y>z?0(qM*CVhpXXWK$0rHgo&>G~^Ye$@UmC382 z(Q0`307uv{%>?L}d$hwA`1tq$FR0}9^ic{LIOUF_b>w@z zj;mVC=}e+Q9Rubw=Q%QetYenfj-|!;y`1t)tRDK_nt(;LM%~c$dl=$*M4scO>#ZUj zLCikvO7yJ@7dak!cjjz+cfB7yOlE`v@yu?a`ScuG+I)EFLuTK$Wi&PQ=YZE<_~qgD zw!rYc?(Xgyb^*Iv-KdOp94JZ9)*AuMq~+H1-~UVKg^KKRuLLs^>TucWQh*)82haVK z9A08csI9@n%ouoUSm+yL~EO^W4<@FL+l-#y?KLq7{Do``njny?VF(G{X zo4`ez2cY>XeQ!{GKRkp96`yWkrl!w1eo!^(`~ybg|KLd7G)+#H8lGB5ma0M!w?6uT zg{~ja|Bc!jlv3)gGF1i>iMQ3`Qw9{YSe?pkU1>}uyNvBPKM4dMWh(x!=g1a=FC6CP z3hY|zS3)6jW6L+a?H-WvtO$G5sK*p<<{wMCU}I;Sa$?{GWjLbp`3$|bBNbpROED^g zJUAfl9Jldt6j#0He!M*MkZSqzRIENgwdLw9|c5U zuL3TXAF*?qR;)ZWVNd%hm*2m=Qlihg9xF+Gi5jdhf~VO9ouXd~ z#pTP6=dz-+=Gn<*)h0OWJuKE1KKi7gqhny;VW6d>r3G%*xLkPyo&0!*7s;odSe_XmH^p-QZCi|_|2jMI=Z z<0xk-hf;-FCc)chZDEJ+N`xGp8%qW$A5$qh%mFq!_%;(*H8x3E1HMjfF|A=*bi=3egE??Z2q$K=0aY6$ZIFGbtko_i$gG(D74VMm^ zwn4!}?A+WXIx!d{KrHYpaEDfUP+3X2YzA^?h>qv&x@*Yi@a_1RMLVO-ZxjjgM~mhI+E#YYN@svLiNVyy7d14$H_UOl~k6?ctn zgk6miCg*;8M5_2sjUbp#H!&K3tfbZ~HpPG8Sl<1d*E?+5Mhw16&!>_+#X#6w7b zpHcC{a^Y)fgaRh$XOU6BO;%dlQJxZTp(!^No}8W7kF5O8xKywF53H27BG$(=Ry_Pb zU8Jh28u%>B$cj59sW>QCoe6g8ovM$#wkam3Fj(C7QwxFaV8}53+P9#2i2Lwpq5rp$ z7%rh~pw2VTAB0JqBesf$) zw>tgVBocfGY}fm*PRpBhu3^CVz!!~WRNb}~!ehnhdMxKgh!TCTE=;eg>U!PM@Q8>Y zU{%TQ^DVW}pblT9UcWmt!dcjgXH{TBZGT1#qn&n&V&Vw*o;ovjpQ1ff#;2xzQ{M4S zCMK|Pvdh`mUhda-eZ=0I0I)Bbb)BGlKG5x#R0#?S5|jQcffaW*q6k5?!-yloV}mSV zHY|QW@|JnS-k_@R)jCH0!pVfV+DA()#Wce38KyoGp}gg$a~GwM?v3a)gT_n|o+(OG zZ8YVvi8H*SM*)*H;a~63RbP5abKo9wYriwYs(z6m>sr2JNQ) zh=ug&m{vY5yXvK2dY-SqrD9rraP;On+AWUF9@5y>CpC7-u`_a;$N6gYzYHZFy z4{ls9O+LET&hb%sRo)|wqDXUOedrNFHw^cc>$4bol<$aFc2-th_llzr{2kp>s>Fdc zjh*%NcUH;C+qw&+IKVwyP4|=DO2qS+>)vSWB^UVXDY!KYia-kSi-|LE>QiuDSXDL$ zP)+BW5q}*}=ox+*=nWx`dibj}Hh+EW6r`FMDYK}uHRJpH0EcTqpbq}|n>_7!aIa=0 zdSG^%XB>EH{K{Z*-(-lTQpb`U{Dy6{c$7rbqzy?eNi z3fN2c?HY-J=SPHAQR-eE7w=EKu6{iYqpeT`GEX{%Rv(pAAkCO#LFL%(D`z)Kc8A(zZTap3I1 zt!HD?UNUpNWb>pxvgzPpwkDr2uRkb?#9-_VppdIGyAlFpGT4F~^xHomQ_Is-@?8~H zXK@x)lRrcp-gnDH*mV}f2qEDi$vL&}_iwa}ZhRH4)2iPGygim+u9>A!(?jU6JkJ>= z5kkChZ%)rHq->vru51olOldZ_KYn1$n?V;+P?R4n1FP?+O^l_T?Kk17OYyV{=(;qJ z-x6vjv(2DFqhU0C6#SbozJ;0aIAr#nwbv}r4c#2b-!QD0UUB?AtO;froSK?i)dUw# z|4xRVp6?p@?4DL`_dta?cW<16Pc^ZQc@AODlf@8G@NV7D&Ou6&&p~Ja^ks>5PVEci zW*}z48H`ccJN|!SqM>ueO+H-&HPVb|9o*}r2F5jX_15zS&;JiV2=+Ze-6S;S*wHt* zW55~_^ny%ei}!*beTszEEeaX;;g)?IRDsdC=u!O{0A}*&{^Aj6YVI}9E&OY)Mb%MB zI_JsAcwIH^!sy@L4x9;dj|E4qm1;86q3)i;MU>j&;uL3?t*MW{>KDfWeRNc+pw&h- z2IUi6=9<>q-HM@c8W{-L-0Ql=oa#xjKlLRbO$gITi72u%f6DsM@fxl!HTxRuS;#{0 z1O?I5*Jp8kiWo5EJU>0~_{B*U`Ec|JwBw-{gyQp)3^aa?k!ZBw4Moa0Tws}q?LY0A zMUy)-$y4{x2qlT0KgW3S2>*=}9V&6T3~oES$vd8|G{m`LdyAHZ(N*D{H&eu$h=Y<* zFL6Q(Y|gQ`&DPT-+(#(w#NKZ0i~VQ9=YQ33%wqRpqW;Kj;!S#B)Hs}{SibFNTN|?D z^8*^$i$5v+QEU9q&}3r2_nTf{UoQfOO*pU~pgA|{Qwxlrze;dVe!qDM%c0L;UQ|?3 zXCMsE@GN%~dWX9h;*Ai^dMb_f9ebLGzv&AW@roxSt+7tj+P$Y}8GqUKJTM@Ydp(D9 z_U!M8*1f$lEOe$0FQxn2QStV`IY%^Hq3QX>(3_D@?~xC%VSEmyW^^I~gMl>&V60SD zTQ{HYO~XcHW5bfKMo;6o5AF8-{nOrgM?Gj%uU`k`Z;x-82-Qclrr)d1 z9^F3+qInoolwTnmZFA0f@7u}SiTP&!j3+ObO9g;ZN#-f_JGRK+t17?cIuCdE`#Wk6 zIIor1{A``dmD0nKQSmEhLF|6`z=}VI+u2+8`G1(#Q!ac5UV;5|pn5tc# zbhW3>vlZ+q0pfVSX|`3&qPnZwcHt?b*6Ul(gQh?2#^ibR@53A$6>H> zP!Hym8ELd0;#FYw*jS$Y)^NI>j*gD9GL14Y@G zyA$UFT;TXg`MQRHzSZa4e6WO;*lRLhAr&~NrCF};HErN=uJ%Lh)VuXfoz&dnBjniB{UZU&?K}d;(Gl`BmJ?ggFYdt_UHFzMDDwifS>N>F zxz;D+CpN1QT--{ET<6=8aI_d1pe^^kR5grQD?t53{3Qt5MwvR7x2dgur$27_%Gt30)$}lm~R*75!}J_4olK*a(A zK@6IY)Jn*A>ENNusS^c8W@gWWdW*qyoM-~m65|OQMI-H!(5xX(JTVz{lFY&0o)QgL z^Zq@79EhHyG_@A`%qbg$r4^Mt1`EP`i}l}x){x~BF?-JFOaz$~eOem=`vt|c0fcg0 zl@t1npGWJm@B3aGPL-~?;tHmr_v!E(w%~Iw}gOncQZ&x zDcvdEAl;0Dh;)Orbm!19b2i@Z9Z#J19R3ZCf`$gO&?KgDRLwQ6)Rty#cj zy}y|R_U{7kzQ?_>gK7-MNSKn?TDrcoa^iZ9H_|$Kn=w3$?hTb;yyf-0hB&y^2)$o2 zUcds$Kb@14UIW5@r(f{z$xm>lXlQ^pAG3^o(N|~HC|?9DpdN3Wjs09@;{e2EW6AW> z*uY5glV^T9$Ac!~XXz@_jE`@80hNm2plR{Oz8v|(3`rGC2rZcoPRsaLX98PYY-XyTvjLX6=<18yPt zJ!I*oCFb z6XP(!F9<$w!MGfbRZdXsT*pFeV@-u^mAxyXgs;Z^&Zk1* zsL%Ly4IqiYWA${p*O%wO#36Gl15}V2fwrC=<70Y^^8T!b{iYXjqO*%9a_qz7y@7~r zL*msgxkIJK!9-B2jU(u(@ZCdrX>xoEoxE2;gGQjq?DM73IB!Ggg5Q18zViG}z77U^YqM_ep z5IR_X2cx=(Y4gkp&mt(2#3#nAU&bjYEW)o#2-Htta=!+GY`OFP+7iniKlrV^s%H4Y zTtBncM8xPMValno(>z($B_IO<4qC^N7mh2?UDmh9tTQZE2z2{SrR0{kt!m1q z3OXLS@_90nL-Hc(D|wfixQQI)nQtdnIIU;Xt@)#NM$~*O-TCq|!`rAU=w;6r6e@Y< zo&6^uwT&cESErwsvtm_%@__}M>hlN^zv-V>#&7S})?_$fz@qvGpU;F}>iv5M;l?KG zxI#Sc=tR*nJ2*&;>e<$8^Rx$WsvSTa z3Y@iR89K=Mj|M7P7<^gtj>6ursBmURmB@_FB1La#G3%obX!>eN%qlHJ%%kPKK4D|c!h<&r3|u02)aA(JDHEWTEITVLm8 z&yUD|3e~P^kZWRD13cO|dA$s$Uv!DBt@<{^*_btYjr%$hH-pmYG!Z~MUy-mgDN7I?Otq|e_1LT)fjorIdY%efa{r&g%lO0!N9wf zQFc$IS7qt=CIOo)zMSM3u*cguAg1Q-AUofE%Ug#NixX1c@9U)SPdE7pfI5zUqREbi@Kal=h02=B_$H{F~$^Or{=j*LSW4Mfu z*3yP5Ik?^StFKP&>12pbIR7t1Y>BUn$g?e;$h0|Mh(XVV?;D${ufT#wBWvH6!UKgY zoyBGRH`NJb)Xh|_0zv-{djx>6r@c`opTATNG)A<$AGmvYK<$UE|=9W z{TUM@QeZY@Rd!m-B6nq?Po=GRI8QXP>PPE^8fQ*6aWDp<{BBpX1qb}-Ve zXq`p`=?ht8GVEZ;_-4siuNC1%h89P}u)icRRi#9$H>@9AIaa3)&5sdqGn1mWF@XO} znC~f?RE*-9oIJm!YrJ=VMf`L7$ay~V#U8Vl<8sFWnYi0ith=jtbH17}YQV|iW##X{ zkYgJm=c`-IUNz6$m*`*pyw9X%{WFj|x;r2RllT6-y434xY*QV* zk4=o{enV;#tT|9*hrM8HzdQ+jLcqhz?)L7efb6l6Xztd1zhx85gjj!2(Ym5bJ#`buq7(ck@bZxRWa=C@5fBf%(o6PiGyMBbAa{1 z$UUsfs2Cz@XOR0fZ8JxU;mUD+{T=mi<#54oDqc?@z;&d|!rR~Orj5KWM0AvvyE=z= z#5spw0=VOEGuw8SA*}6}CtXp#?Sp-CgwM@&)W_vQq*^dpaNZTYUcdBOvB+Fl%yXuq z8O}QUrs)QRN9^R2BiX_nIrrO}o1ZDgyY5CQVoU1l@j)7zntmzq#EO2}+SK~W0iOdT8*EW3BVXknSPH8Q$GON1O9-mt!>id z-&z8;+<08XQA@K!{ecpUXM69$a)5d%b|m)=DhMcFSQ+QKAGXEL*#8DQo8MTf>PZdI zX?(<}&r(WGO3_xk96PogPJ4&*rBeBaaqV6seMNaE>kAaTLYu@R@LqFsn$}8g5Ke>c z@luOrZympG(l)!;na4<3a7FxC5%yxISP>n^%~U$xjfYLzJuPtOVIA`v^9hd{n6qk@ zWX;obROFedBI3+Coj3%ToYTKQ>Ug6v^ZTki zx81m53fW1F$U(GKIieRMu~##*Lee14*A*nmd0hqGC!u);AZtfQirgm+O0z$I2~rVo zogLNg0*bD-ot>4{wnvDEK52TRr}*G$*G%YnR0SF1f~oSvC^d#KVJix9^=WzhL*@Vx?IWtzj- z{Z`fs_)$8vyI!2U$g1*f%7e>lbv{4DKj7yVH&jahyA-k8*-;^G4c6mI4N>sM{z{Gl ztX*e0{c`XR)DO;gF;!NmWX?&-&a13iq_MfNK_=q;Rh47sQ)gdxr{C&K!6o?o;j|{`_YwSUsI=z{B5=MT-Y@> ze!l7Z4@~g&w9~<;+4+RlvV0MAFUl`^ldVnxpVFGOX|=M@ljOmaz}#_#aIN1_v3G(x}|29*~^60@$A)jf$tx zOlu|C;t54f{=RL5&#|UAo%PSr+k!aLa{`^0c5Yaj*wbFU0gseXDAsS?`iw5E2kt#+ zTRBY3Z=MFx^d#kq=fl@xDEjP;563>Vay_FCef?=HCe7gasPQ=&mu8$P81yr+q3(ks zC9fgabtnSphDrQ~dVpeH0I#vqr1aHT7ao7CjZ%v{|2LSMD>|8jOE zEi3HT@ro1W6)TBK+$t(F5DKwl&Uj-{%F*S!V$Fm18k4YHqhk7%c297AX?M5OW0=H; zlK^L27j1x}5r}Y|o}V<;7Sat~%!qLd1D^u==IlaMv_?mfwqi6OmK}nvby};@js#?E zByq*0Zlmsv+{-$OiA#s4VS*rqV!5&6XDYKSVFu@vj@SOD9nvzYa2L;bNtutLRoWFz zytaLC5!KQsq3VixIQF)MRIk;a9+?Qog6%wBv0{rL(Pk}x%by@4e1m#Aoj}WpK5jz9 z6pHNDj3ItnDM!H<%lVL!0;fmE{VHh$2ML#wlH)O>GTT|0Q1xza3{xj8%1xhFDJDhz zur$AeDo|pjQQq|p>6$$?Je9NRuE~FH3+t;!(M(!$Zf;R80}0^3$Ekj|97B2S_{glC zJ#I;`^b)j6HK0FNc-Ck3WnZvYdffT}wX`->nTWf{aNf=)z0xTdO2*HZE3m(_%RY~UQ+L``X2De;WoiQtsaaqqQ^ zkoKX-lAbfNJsM$T-^ecmIBpHRmP#mnOP8spsNg)*3XV5C8WeBbT*g)zYbk5#8x`~d^Fl(-g5Ya>jxb8CDT)O19m4wMUU1!< z`3f(Gu zimOy@jt4rEfZLhD-&dWde=28n)3=k+v`$n_+hD}t78r4`e^Uf7h%3#2_VDV?CgWv) zskhvLP2a1&{AX|8iXea^p2-M+;&71qpc)!9)In4cR6S2XOw@WuN-mD1@`mq6#6h7_ z)WK9{R%d`FZb~9c5G{NQbQ6l@s+M*99tBAeQv)iE-Qvksen9`OkB>P+%!~B&_#DC4 z-;l!riQaZHK|Xhq0!cvdG~TOeUNX5t*9eyRsl}_m&QBFSoMis=$j56Yb>EICIi`II z{wkw5{Mn1ko^@Hksk0|?{Mt-0oTY`Lg31CH&t1|v_&=JL&ZIG>4{z0)_dO+-f!F@k ze&=t6GYnx?vjJY+tHMiC3h(q3q(ZkRwa$1vvKYRly%og^CMnk>vvv0hqrr_Spi8jN zb{+YB9^FyD!ZW#WKjeR65Lq;v3lIk8ceGgac2%A3O?A}js*>0Yj0_G>U0vCNY+%po&vfj8O=ZFj z^s>nNwt7%?!B?l_cH9UcnyTa#GE=U;*byzzSIOoDd`s#zQ*X4pMbCC}I<90{^82I7 zFpj3>b8zg62&=L@ilMTf&Y}Br29+V4^T@rlVU~?PAOMNN!)rBV>^Q#?_iqk^yH=zs$%iIqEq*?rWB&lA<(-5sAGE8*C6%CqH$t3z?tq;(Y8RVzaWi%Vh^lu7e1M9asy>uR&Om4mh%%#s2lnk{Trc4AU z1wPQ1ra7_weGr-Q@r51Mv>Dyi&UV1}lmkaTBk*Ok8=7=v^_>@gO4& z`aR!>+J&Nu=I^vY$C{_$ji6%`*i`$=9B?TJ&-P}eIg)N#iP)L z1cV!P-+5NvTw#IYK+b@!72h@kxxE6|4T+WOiK_Ka0vJ@cKMclA68j0BVf+zBRQf#J zez}kW53suk(-WkZk5d#5Q4;7z2uh;+_>x8TIUdKkYgOmY24rxbSFTv_Xwlm zbyyy)FqqY85(uCsWg<Wfb$wv^F{b=tg%jWLMFs!jbvSWP%a)pZ%v{HOo&j){z;bA@j)1!t;QRGi(@tYr z@ohmGc2gnLpys9jS}VA_2tP~f+|M+)n=hXk0M^%Hjx);^*-f2J05_|L$14dC$p7o8 zl)|4#o7RB4@xR5&-|6=NfJ^cb*Wcw0Wc5EQ2g@7c*3G{|;%PTucy>E&5x9ziOX@!$ z)`(ve9$b)`mjKVOesEaCqs5i5-@Ki!jvVcL__0B4doveK_w<=1Y<$?BuMZfZ1Yj7} zJeP05;lucqHqdHTCj)Q`At-^MZgDD9U*;nza!BB5u50S?`z>rxHiG5D>Wr8w?C`M; zSzAAyJG<{8KnA>^{Oe6xrYZM$$6vzz7mifLOTQjhcuotg7)+6^)Q4~%h&6@FXIFLG zgafyVS}tu;d8qwlgPG>jm$z%cBndO-8=(I}D7%_60WOhNRqMmA)m&9(Z*zzV0x8Q^ zkEjHCyArX{vylgiyQzG@eAn8A-Sb#h{NJrRa1CUX3|1i@sCsS}n=ZFI3KHqSHC*ygf;jAYkgLBFsn3FE)Q%|8P7>?f0w`H;QM!#Q2fBpWaVhV=vYs4 z&M>fhdPxe0sz8pENpNCsPWu=$6u=Fb5+c6(U!l7)2{Ic^ZpSkw4+OR>???jW1aj#+ zQ((6H^7H%5ZK66@Ds#JJJzt2C%`8WlN=(lY~Z~m1V+m2Po_+&SSWljcjy~Q68{wpwSTi zU;cqDKpDU;$n--vl0o?wrYNdzRt9?d^>AA-uZ?wXA24Gbo#tcL2Y`Pow$=N0z2TAZ zQGw<>=W|mEw7qOV?TN|g^n-w7=zOk2MI106E~SA>yhUDVf_)~m6f&pl^)Vv1B2Ty@ zVZA0^0)V)TbeBzq?_soy<9x?}61dO&mX$m(IH;om37ppGe!aS}BZfqJd0tS;|AX#_ zd>`gC*31v4_Zy$2q*JPo;U+&+${>zF#GWKDnQlytMk6L>{8tTxE_BaS>z}#)`CxI= zHqWbQyhtv%A~^NK#~k`H0(zStH%UzNmmnPfAX?{Eb3elmzd4v1a>Ibhpst)|n`r;O zD6mrc6_Ix{S}LCHe2ZmZSb@!ZTv%0JD(2EU{itbPNGqK>61v}|F*G{ ztg6I)0se+Yjbx?_LwA4`CY(?);mqI5Cl+nbr~>mVxQN3a?#2L`YX=8-yktwNw8`UG}y#z`?>TyAuPX#DqS(L1!W^ z!b@NZ;aU(yc&S)IW~0U5et-0+2uXWH6*%PJ09;5?C}aPOGM^DJpzXidtyb4);e23W z#bYr2gPm?laZL&4cS5#Mch(cNahH>kj&QW+;lzGZSRNh6KseEVU=bvDpHw8=SF;mG+M;6#0G~-|n`gnp{f`PTS6R2*)&n zHMiq%IC~3olM=IvUZlm&{s%vC+IQu}(|S+%`DmJO#)0<`^hNOtP*gg4&VBv!zJUX) z8RpSBcZiKj%Kj=QBQDwBW9Cn_8lADMabm&3iIJV66=ye4j`W>jTu(mIty?%Gkm&_L zx&Nh@3b-%mk*Jecr!~00I~U<=RVMt^@!_6}*Gk6jN|XP0y8?yOlronsi1pt(lg@1w z01PMzu%l&+Tkz*BjMPgK$srtf;NEq?Sq*xYZ)5NJ{I8!I2A)w`-tafgI#twqp^$*C zo64Z2z-AP-8!ipk&0xZ)UbD!ujTOs!U`i@w;yu7MX7C-)52Q>J>VU5|3??zF);OBC zN*H?U$>8%T=0Kg+t9=j{`s1d=ySt?_eB{__`?YQ9#nbvM$tB8y$-nj7-}11B|52m` zM0%?V{z@+uUs&s$K4^Gf*S}xsZQJ{C)=sCa_LJrpL-wRZ_xLq{s_R**v%?5{mjT%N z=VKLvBa=tfhMhBZIIJd?+2LZ|tL7`8Ibg+Fy0BGjpQ7PDM8QrihW75-c_L3Fu1Vt_ovW%ek5JWDEc^8B zd@p$m2))Noc94rS=JxjqX)}ta_ehuzC4rKOmxXT3mk-JZ{k4O;GuxY`2;W-G%>|8a zN9`^^UpSvPkXxpX{pA=CHGeFr87;lLLK?Mi>EXJd9PyKZ930_w;$Rb3?zhL9+*uz0blB52OJ;lb2l<%)~A3Nqp|I0dg1J!tiKT7acz~Kxc6?d#|68U`De`(N3B|; z7`_TcK7p&YIvz=2PHtbvUXfSjdX6zqkHnKKFYG@U!VT*z{lArTd@XhOD9>tcPU<;A z$Uy-2hJsDe;k?>?Bn?c2*>!&+;p;1;oJ%@RsOWdqu)6c8Lvmw6rC0jqx2(_V?iPM? zlrNAOQ3nSJFv)|i+qUn$yx30FrZ-^wzV%s8{^*g)JA-gjQWg7wDmH0YNNC$KFtk(~ z4%2O00!Th>5#6@Vii(PY#~)gSM|l;PoE~(W1n@!=?(2ur6!4Q{j-#h!d@n5F4&@X< zwe|t#GGS)7P@e#`--M6FFst>28OZe+e^fi=4XOVtu*bj>q~l}V?9qz;>S*UWf4x%h zqQCH9Dx_nc`)uyyyQ;V-+BY2@Bz=>cjOqE{gcnigse7@UbZY(5)8X@iFT0;e{$lJW z=`>UNHBM~(PGjT2ph<(e|9_QMuF^Wruu(@OzSKvldCX^=2 zP{BAeH9tW!d1fT)XT5?VGVQ^a*FI4SfnhaU6H<@-I+uQ>yM%iUVQln%Y&JnY!asZ{ zS=x*E{6MPPja-*B?4((qhSq>p&hsobn!y}Tu+2;MyJHNM73_IdQXrxXl6@aTwgp=$ zANFXynM=QfSI#>}j7*MI?nm(MF!Tg3`pAbZj}>r%Ul&kOSMri*fb~dESUOStcsMhM zUja)>dd*%v|4h7hd=fsF$~T?zHmhr_U-VuM-9;Io^B6D%SI?bl8m~msss6wsrGd;^ zoXJUOeh;1VU{{)1uqfe8FE%js0T7=6IsgDM{|6=jsA^YQf%mg;zwL6CH*#W0?clFy%-s9^R%h;Hm<=w(35A;C$eFj z+tSnGwG0=L11piD5Ly>en6LN@;jVfEO&lAao=9i0J`X(q{@*hUNc%g6N0|IEL)}tcS*&Vt z`>v_!${Xe8$*XO?^~kLlA!tk*VGy%ig&1V`^l^`RbT#^_2Kcob`duJmi+N{C?)(8J zh2@8*1W>u-s5AeI5x9YMU3pwq-(FTDG8o!1XZl&fL_j^R60B$`i=E^dki^fy|}$t#d$gipmbjkn>-C%C-9 zRPI9AW9n46_&#UYD&aNOmjzczy<^Q3j~YvcmR*J#*(by=+=CSQYVJlJo#X@`l{9rQ zEy!uK|HY1H*MqtMxNVNVk3ZevJB1CH{|PAQxBm7!{XpGpZhd-|5jh3IxjdrW zsD?9nmop&#xW9d>v7{pAh@;aEu10>#JyyIxaJF+LZ|mc+8%sO#eRuuyhc>Q@figvxAcxp_3uXfG&tYrJ{gB zLKGy92}6M8XSO zQOJD$7NB$f-=d}2BI8e?n?TXs>hn2zNzL+dV`DLpK}0kB!T}L5jWdiL8c_owXfPOA zS(z`p@{8x}&UYQ%FT?m7-Z^v;dHqrCeHMn-hXv}41T<(go-g6Ru9lCczU)UNzyHyC zLw_lsXN>1_rkXK8!IxYsX$8!OIJ=gro4WTeSlm8JDw|eseu@Fy&Ck)${x1>FyHP)P z?#$uZuK+FClk)s7b-F4uw!srcW$OkBK_daJ-oGv(CNbVoJdvv8CFZo4fQ|JPI`>ml zkL&I?2BO)LviKY!^mUuze_L;U&+U!wUkH)K054)K=H}-O7Wc6K^2(qS-V~=h z+w~d5XP$m`nYS@uc8IEG*Dbl#cO!0D?C$%&B&%KF?$3pA5!mXA$s(bEJ>L*)K}!^5 zK6m>w>S^!!Hq<$HX~(iw55@m%WetnW@#wy1ug8k*59%N9lW={o0Fb{NlZ(@JyP$Y1 zUi4^M@22O@=c z#V4;XCamC1^hqhX{7KeEy3+@UagDlx=c%eR=@l>&b$h}Fna9jOB!44sJCOFq`;EEx zfOM>wZzlz)n*mFUVVUbV&!*+dF-cKg9#<%orIdxB3=r$pr*BWczcgWVt-F}ogH6u2 zcXO3(q^@(1-o13um?*{F=W#94U`qZuwnMBuY3qUcFhl(>0U8kI|Iq-dU}6ND!c261 z;1D-N1HEgxC~KuuHayc`N|Xr`xH);_cH5mQh7LRpO7d(sz1;MWo$)Uu&@u7oi(5T? zMfU%rne@$fQn&NJ)&vI199O$lR@vS(^#^R7TycRxU*x4^v4*T(Kgz&}(1{U6peaY? z83yP-bYLLW*)O+CD1iv|6rx0AIVL~<`HAcI^9+4Pf}0DO5Oco84HUrD7BQat=1EJW zO6QTNP0|dix&vtyg}rOp=7dD(lK&4BfI`-{&F@$~!gzJCJCvJ*!`g=!^eN%}ZZgox zi9vFij4xaSYx`TYJa19OdF}7t@z-A8G>56#;y-!D`z&WsX?j;y?6+^jcw%91#-kns zFrmrA`1cE`TNzF4CTY7AD-58C##3|d#Nv$edVdzu4i2X45SHtD~02I1$+FUz~Yvg9- zpdwkuLq|JLryG0#;TV(iK(OSn+|6FlcJ{|$^{ZW&h`INh5`IDTbP`p>Z$<^ES8hWO z9FHhAu5copHXf~q)5XP%ZR}e5ROA?XlcZ1tV6?S(erMPdmFZ9uZso<6qBZY!J1xE9 zu@`k`iq$N`v%P9XlL!ZC!xiMrb!?uvj~ILQmeCusdzn${gm9DWl3oih^&xw{XvYe1 zC(bj6MB#5=lS0+Tv^qX5US|W0RQ)uefDa0`1P%!3Ld`?N2C;*^-zwknw_@QhF{uxL ztf@J^P|iIP+h$f3;(t4jA77+c0AP$uh4_?RzAB8gX6Y!|B8FL+k}dHkJ>CAro

696hVJjY`te$}>KCuCvufNK>`2F2uoKYdvCbvwX8ftu+&qWr97h6r2#0CJ zCi6#N7cwhaL~t_>fsJ+6$G4j?Hk#}Str*o^#LFc;tHsZUT%3H>9c3d^mz~b^;io2D1Bq%O^C;w-ek+a~{0Vb372xBwP zQCjFwmjflF*WU`wH5PhlmMCwXW+ z-^i{OYrH-Q8AF8#rONFxorotaq%ug)VJ~Bt&Z{ujNjeL(=!pLaSir7s z23(UTCY`m@Sm_VnjU^{@bYCdfeas}Y2R}*7>`qs%pk;PF(Yths!6**e`)c{mC?0JIRaiL zIv*gEz%r1R2gF1$FUb~nUP2&@s$`IWn9NHY=Se)%&WKHHN&hl=##jNcdSzNWWu>Kx zbkQD4qo9blohuGa<^`ZX0MQlK85u3nM3w8={1?-jx%A!9SMDh^qv)e9Xsu9Y5>X%M zDwQNr>+zykOS+%d_vc0M@f`n3bw>p1EK9hjp{V!MqLI*9d7C zw9yvQ=26|_zyym@pv41vjhf~&iM`Ob{Ch2}#=mkUcQesJ`*S?geiXaX(PcXn$Y{^- z4Orj8>KQvFSjlf~*(gB{IJugCZxbk!H?En#%Gf`E5{lPN(>3~R#G#gJ$8n5}8f))6s-C5@HRIz5~4SP&O0~dp$C7%!zcbZk+ zaB+WJ>tSuA%+@vX%2RS;Y_YQCz$B-X3n?qf64eUERRzby7@mYGh?5_*D;2|DH6OQ)i?k>jvMDbr+;^tHy2EMrg z(Ec<~4)RuGUl;%O6a~IlZKW$fI5^$gYt-n3?>}aAef)W@dIj7!gE6-4&7zxRh-gQA z>ov?Dcp-oX9Rjb<`thfVU7GabUg^#Hm6vA>8!a}vr1)sTQ!~}KUEa!Jh9`ufkc!{> zv`8hVOJZEWii|syyf!QnEJoX%))HzfdH92a`+f+z^i{k%ndBQn-toU~j#u|Au1fCA z3$jaw7&=PA01kJg2|)1lLHsk6r@E%ru0>gxB6j}X@9qtX54$8 zmN$bZb$8J%9Sm&QTIS&oPrw%@u8LWO-3E-=$k2pg0V%P1FMdMARrzf3r?OS&Zx}!}% zSoNF^h;%)ws;=h!R$#AVu$Cg=zIA43VBf&h&x{K=RdAPn{ErsET7K)TXV=HEUx{XQ zC+FzlYefK56YDH>U`7q-xZBWs&KHUpkol)+Kl4!@Kb>FK{Nw|>r7hm-w^Sf!OER1eypRT;SeN~%SeU)Zv;?)STWX1rRD^UW z#r0cK1(MtzsV><&K|aI+G<12GqPpb|BGCcC00(%d)W-sTz*0EPIE?!uyZNHOcAv{; zcLy@FXGD8Z@9NxnyZqbkkD%63V6wRx=bH4HO2PTzZ%oH;sUuFRLq_wuc~nSrV*h79 zX#)7kS*`r-K9GL?e#_3mDQ9K1EVSBWjzEBRLmaZ21FeQlSM|yuY4m7M*7U$n$axSn zEOr1meL9>g9rQqDL&ZL!zBu`>FBGVlfX$`=)PXN%HweB<%)BLL$zv|trmnQ8_OWaq zGM6ZYU3q`;9#$;jzwWZy0E}llY&+M;nYR7GUoDPplk;6mp)+6aaMXT zmO+n7<`XKe$x{VgXP;6petuEzH+-D=-e1E}pME<{T7%b7K$ zGe?o=PGW^-CZW&yh3;UJ<)J{t_Cf~a+^pU^l4rZ`4$92vh{L*b?AKod`jnd&5j~Ta zj+$&i*yk5%H>TlZm%QmsAt60HdLa-6UBaf(uo4~?k}54n0j-NNiB_e`eU_Aj{?aeI}R<~(R*{Y zwio~I7(Xki7h^O>q9S-Lfg)uYD;C(l?FClyo8sJ8kNnv1pDONqS_p8Y2_WSFebD8u zK;q5OV79EqK7CihdNXQ})g{2VwI3kXtdLbQlL8gb>6l&Lts}Ei5RJ~O<`Tbab0gCE zQ`3W6T7J$tE9rC`dh3e`*qkk7T@(77|Dj6!A?2hQ!YuQih0kBu-R8KaPf7j8R|}7R zcl5^VwY*n5%0eECJ7oXz$CkW8(D2F@k2bdGMyJ^A>kV_K&(RApdz<6OhVsQdMymS`y12Ee*T8~I&PIhzL}M0zn-!?{H9fXqTr`{I?A zLM!OQF`UVM>;A1nnZJ5JMk8g9y_kApW(5An*y>%X{REVVkz>*wZcDNA6?_b*E%kw> z<#z)hSl<OXq>^8j|vJ$-4JA)RyGsodV2JvP5^%afBM@gz%k{}e*762T??(Y zU6lMd{-g2xcx$s~|0**;i8@1E+a?LTVkfQK9qunazoi#B>~f3xt@lnzkUCe~qr101 zxh(Ajou^q*SryT^4WF3mN|lStiA^K`Zi^U~$#6F(Z?83NoJY5hTi&zmfJLLmSo-*` zxLAAIf7NXXUb$tw$r-Dvt>x7X#BoV3>Ben9zd%*m{si(CQ*)?WeS1bsW|7}Nk=@g& z1wf5}Sg>;Q`GvIX68b8J7ja##3DV^`E4f~mV8m?jzUhH~zBXNM38Q_u!?s4m44TyP2*~3@#lh6_cC$`+9Ci4it?8{wgF$6zeClQ6%+`8(M__s4biBKQaeD6c7wN6xom#`gE49Wi1IgQs;y?| z!MuAYm+jMS+U8;8wSBAE*s1j_oD2WsD+=$jdPU51z4_G0Y?aAQ(#~U|OCreC=pl}e zZyL>v2>Ddtmbd_dC}{L-V}YFbSa#-Y3dQ{TuYt(OKze+sl#X!2VXbI0$458huE ziNr2o5=%t|YGqSgdS5YM#32v}b?mK_GsIb8(k;}XuJ)gUI( z7~j*gxc0-%DQ>$7M zL=u5Jx8&ya`Zg@$xX<+@NKmk~rAW~H>lJ3xNDf2D*=sJ}58DPXvD(pxjghyp(g@JH zO6LY@?IlzS8}-R$cAASRuB!pV6-BK>j79%+Sz0|>gj~lyhNya;mgqJf$iCbl+tU%g zdZiT@7|7Lfx!N}m^xgs2MPC^rzi-lD$s483*k-braM>=lcrG0z{Wzoe5R=e7ko|*B zJo?@IkB_WRJUxZ5ovpiAluX(5WFDHY666%izguC+(N6^&!>B^V@<~JkT47fL`PHJt z{l)!W^y+?}9?}*OtyCF$(v_|I2CFP~zm5d=gjVj@cL{1k(MPjhVZ&~tF+bsjT7#*=AySfj z99{5MsE%9y#j-?KO3eWfqM_HUe}B#8OeS9y=<>ZQAm~ZF+PiqsHV&f_ou0DYsL)JJ zkQDdx9(vVs&ZVzP&8AE&G{q4k4zZewh`Z$viYhh!@S93V>Uj8FB8KCHj6Gz-Btg}Z z#j{K93UOj&>SJ3pC&{<7@RgMQ+FJr&6Bh^+Mf)!2jpT7*i>!Zb+=SybGP0x1pYy=zC;$hc7G- zBMYMxr>#yI>3V^q4Zs+~n4?57zZ4_LncG zy;iXdu)cuFSm!Za37@Y`0?U{L@xk&Nfu@Xps}r}}QPwBuxD3<9Gz4-Z9{o5llqY5> zsOZTpECZgEpuzPOiQ)@vVogU!2%yv1*|Excza_S!U!r#6(G)^}*1z0)qgUT!#?->4 z4HR2=HZ*bsYzaquG*PH?e!B5z9GJ_cWMoM2q}7&?@)B}ILI7j6F=V|E_D9xC#fij^ zM5(2|kj@%l`7p~>=1$6nD<2W2!FTA8-q#$)*PNz(rK_4+hJT%Y^NtT#=&y@)ep=ijLOZ!`K=zx=}ydLDF|UHL5&?Tzy_FDnjns2`4ESdwo5W zjp2YcwtH8I37zQy6yF|VvRB8Z_Dk9Al&i5qA*7Lpyv_q4VsBWn6DPabPc#(aVl`PA zCIx3=Ae8m%{8`pzr@6*5tK;cnxR5sVrFSUKl*Q9DWGlNeAONy#VJrBmCtC}U1k&U=DM@>ku*cC4_8vMq8I=7cxkx1yN#EE?dqVB`j36F z7VBM|x7Ii6c&>7@Ve%V^{pFBX@H8$^#4J2bhv{=L%99&B+RSs9{O_dd&3@tQLb-g z9%}j~E)YjA+U5HpKp*I3BGaAr8*gM&2k~g10Q`GE`$DHjstk##fG)W*85Gq?UCy8q zWNvOX*C?;Hb3wwRwU@~E)syhkvE}L(K`WqeXi;IRAZSfBm*)P!-9@1cFT)R1ihurLCXxet0^uK?!Y%G9v?|!n9f4WcnRp0LYNtE$|MIp$Q068SeRdd5mM9p znCNnz(BRT$6_iRBi>M)dJd$g2}T?ra5nXDaP}qhxWlM_tc? zQLHa*elgrBJ?_sL%tWuJRSdToLSl_N-SWC3p^;esf)G{LnX`@`6OtRGV$N(U82~Y9`R0t2MAR$)QO|QG$h7-pGw(S?L{DjHDUgyxC#*J?mSeG1 zw=kqn-lY^TU7E)53^}KKaoLZ5MukeC{b>#N;w|G@JGeKkyjlI=d<_i?7%VlFOC8Sp z$O@UD(GdVB7=qaBzff}9x*5li2*V<}JMxP|Hk-itv9Zc@Z;Pgy8N)L?PR8>8uBQa6 zs1Qf>I)g9uMeGMcD$D{91NL{prK8L1@G3^PffR_#8O3)Dtv&z>IXgSYXT+=g0CYFD zzu~=W02IGeza^ZCxX{=aQ@frtE@q8bX~1!#O&wO#(h5DqpT-4e-r$V#@+KSjj&}{a z4e$9U%4#yB4qa>^veTwD-vSs&jxx33S0vl_M^8W2kVnOl7gfjmzHsM?oMDcl^r305 z9&L0GvRV0_$QAjR+!%sIOo)~R*qb&DhXne*K|Mbfw*AEth%d3Xn(IO8tSOE=`wjYA z^FMjC;;~u^Y#LAn5=u7-NNfqfOA@txW?!!A&r#U)n6G%ok=ZMaa_aZhhaUp$k`TkM zd&9`ul2F7+E1MSuak+)rPmD@`Wm&$3`kI4a?V=mSpL`?G7~n#=8CSIRWMQobE1nf% z4+kZ!Y?HT=&!pLK6fF}?d>{;I+p7j9Mv$Z%EzI-h{=i4*EH9ONuhbG1+4gZQ6~VRZ z+d;#?oiHL-m<-b8sHZMi)9_B~^t5lHDIjJ6$ToqN!o3~H2rjQ}dejAZ7&|6CXD4lGw9nQFaQU)K{eXSce~lbGi*G-iG#W z{OI(bfjbB~g_1iz!6ntU=f!9mq5*B_T0XvLuIt)xJixR(o@FO6_;B+5(Ee5OV@b46 z3%0MKLj0?y%Zit2|p3bufz00tbHF>3F4|dTFD@&a0ZN*!-qgi2cIV&q`lT|@0kO>zexhf`x(bCfL@$p$GpU51sD6oa^+@wjF zXBK#y?c<_caD!1BJ0pOMStp>z-lUUmyQfYDq-X!-C3%Fdt}YH^rlCHvCyOLiWacp+-TXC1o5Ur)qC|pds(_EAX3>KcgjpM=v$xn8}pTc1}Gn zO_c%bQdICRjoHm+CPEY?B_BF|nC&$kf9I9NoAJM4!U_rsqST|fD=>qKx&6qZ-o1U@ z-@AUDw;=ktuc$dcq!-)lQ)mjw#jr-j-Pe6pFg-a*Vq>c<-l^^P!}d>-UI$30wjZHB zru9ubKF*i_h3OY4B73H4vl2=@w0LG=_vsabt&?9i@Px$pnQ$&8tyj@8py>0Gg26yUYD_-Jh{C%JNQR=kNiaXo5aNYCR>m7Ezc=qf^y${J5Lvy0BEP2RC?;k{dYeB8d+{lM?o58866r z-nRCo?mc;JX;jCn_9&;IaQ;tfc4QEB@I zQ(kMc!bn%Ul!|q10Xr|Mb9mdHYGGKJ8mMOJ+ewO^&C$+z#(aYkWB+CzB_-f?Xqc(2 zKauagwv;Hp(&Fd&T2RFnsSYe|mQp$Ob*~RaCEab$6$Dd_NnL+1!Y4 zy)!`8-5O&PL#1be9Zd9jlKEo*7sGex*QpOLYI+6T?v_DWD9!CC=95Y1csZ&r3V70a zGb~0eAt!01!Y9_`)5tuS=CS;0q z*!lW7ZH6p3ue!5DL%cD`Bgfq4!Ns@8eoSFuVUCWDBbne<*CXvh%)4PGFK;WTw=v$+KRB&-g-VioJSN6G4l8=SvEPH9SS>E4 zv`Z&gUashKd1!NWN!lpjyb5I<#9fvD(DreM{~$>vbA5++Q|x!t7m-c3+w+L0Ho7(Y zb=p2+9}bj`ma}<`((WP+GqO&MQ9;7Ooxj5JrKU2A9<2EC+;e3ADk*t-{<=Q-t!^M} z)c@BB{!Gr;m~x8zMpZ^89B38zUahbu%K{ZuRgyL1`n7^xtLT-_1#0HBCmX`cC={|% zLwBFWb25ZzF^!q*f2_FrWtmrg4+)cdO^#cyrj$U0Ik!XZl9sr3y;ut+=s{+8M=6NH z3FW2zrb4$r-S{G(==8p*mT8dk%b6Wt=Ck6Y_`wK3An)NUrA+GA@oeq<6jDuo2TM~% zY+E~Cxj&p+4jj*G-_mD{U^gezY!J|)Z>p~n6!tz?fS-buG&E|B#D9JXi^8Pri7$mYbwP@u>m@2-wZG9jTY*fo5{bV8H4P2& zNI6)9R!AtL-B6LJv#tu!Yv$x(v#>?f8NV=#@8p%uBFJi{o5n74f9oyu@%h;FGJbGw zUY;@u(ofa~8bqzcmmi@SCBGd_`W|lY=$x=gnI?Gc$F~<_H>53htCmgJl6`O1LZsy1 zp{yS_o@At>0PljI67CW&NxdpX)SulE%g895bbs;inb4=Z@@c{D0;G@IOH&gcc){6H z;A>(>403x8STFc~Ax%GRFjaE~K9r~4L$B}BBcP8{5mE&=YA4a-`q3~jfc+?J3{N8- zCHAP0IB30sOB9bX77VfI&Zsu2lp9xyyEG{ZMb3q0Lg2SMnGm7=mp};d;e3$UobeM& z?+AxW2O2jBB*^pkmZ*}!Za+#;DZVwv7s(9-avI=2vO76uE}PQeT0X5>Y!zVB!zLOG zFoJ^=L$@C)bM6eyK2FWHBTwW_s#_l}NGro&>muT@$YR)3adud7)~=h)Ip0s>WN-6& zNxcqR;RmhndB2|1w6r{*f&+-ijivri!;9|BYnqn-^a4mULi!r7`oQGSixE5!%R-+=4>#RdGZ25 zt-`edHZpvIU7D_S_g@X!lrbg)E(ibs$s{l&Z~Xf-QrEJlj*a+U*i_3`(TfU{PBy4F zAdGd(wV$g&K`59T!Jk`-*?&y!d)Bh4e`eOzd_(XC-otAiz?Hc`%$u(_SD>5z*esacQ0o9A=PHLp_) zbm0qE(G;Pg&cmKF-|Rld5dP(8i{zoZyu+!I>2#bseDcH7#^c$ATe@&6#Ef#`GobSU zz2ExX7qhAQ-8G;)JAr8d41M46<*!!4@mob4!lb;0h6X=rcxpZa_KmC4F=vtZ&Jh&_UB-+zDv4ouv^Z^$I&_egu@tXtL9req$lU1=|`amcr%2Fl4Lzpsms*2 zzy{>A2_Fb*737jWLERI&n7#BtZN;bZ_hl(xwyJH+%UnFPNwunJ1z8hN(9eBQ7dJtx z;B9j}Iqe~{)FpWr{5VXTg`)YI#C_V`*M$#HKz^fZUD5>tQSTzL$DP8?`oTW~fHYL0 z=xG5Em2D${N}>Ss2iTi}7tLd?eG&`uTZ`4pZ1EYlAuE6S&_Yj3zZss?-n={7xww74 zR%Y6=4GR8TP*9*M`NZOj{(HoDa!3(d0s2$meND<_ap<8pDhvRzcWs>gMsLsW=;(NS z*qCfRY9d+pS3qVWwJZV{`DVAD!etj4}4qq4k`O|K= z)gc~l73*p^|1QC~FpBu!!Rd_4?tI!r1Wc-EU7zZXH)zthh z9@gjLrul!2W42_KproAS{=>nbB%L=?y1z~;0c&g0H6anvGpnN3gallnOfnu$gzS63 zppbH=4z!|;rIoeC5{Li|Rs_dchqKfdlfGR-uYpFd)M2w~ZQ!TC>GH@r4_^t4l}Xx2 z*zoy)b6xU12`{F|w>sv=o3|gw!@K!jZ9H{ClYUUKJx4r0B}4J^i2gzg%$1qWyLvge zUQ7B#xKCXVmS{yerS$a!0D&IQ22M`SD=1W%B_q! z_H2*4U8!X_2eVHf`x++GT(sJ0QEerIMSz3>7dN*G@Pj<*Bfz6km;>UFxZccM4bn!F z2*ivEA#^V3EUD9Es8h^!k%h{G+9Kp=YC~XTejY>0$%%i2idL(rJ)4tuS-ztO^6kr9 z_p>jLDU%R_R%<*O4nO7}OghbE-fLc5^>O(cwfhU|sKb3#U=w}gHZT}alt4qE;W!9` zR86}sOjo`@DeX^>E=+nk=A^2DK4pSijWf%?ZGPYd$ROEQ9pe@Zy7*Cg0}dN^vMSk5 z+GiBUj-m`4Qx4jfOFsT4Ejkrhq>cm_sEu_5d*HAz?bOyiK3b|t%S-X4=Y&|ayBf3AF#>?c-3aa@QuDaeml zyM*h*LAkT8gm|$6R`v8QW~?H%T69nsRlVe^O(jakkN$zb`*TTH3~9&}2aF!{q-bg-+R6L$)AC?G;X(3bJd6Bg<71Q@$pRygofT}vSll20$>blBf z%Yu)k%~w<3Y4~zWiL1woayaRoWu+{xa846wD9~*l^3jfQa`)i%QbvNa)FHGBADxSj zf3}efvNkziSd@MwNEJPZdtOJ~Dsv|E2DG}fn+)UUcx$lw$NB6AMjDN4_anTQBW6Cw zicOCxYbVHG$G_dYaOg1ganszskcqY9Fc+-Ufz_~CFWD1$A$niIKno)->Z`YF9L;4<439nT&PtcRo@+mZ-nouu zUCB^Rya^h-AE?s)Tc(_xNr~jCN&@gJ)zUqx7W_+(NcDhH)o0C zQTSdi&$Pnu{`Z0X5f{Hm!$dy!mXA^{rNhT;0k4(z#mG1`tcd-iBhJ*}xJ-7tgaK|Y zu2Rhk|9x8T{M_82(H#W{wZKxcEMO3Qi-onev@3N_B3kMgxvVmQL0-ki_XPlo4rfWR zzCz*95umUaP0``JDnRd&Oq0)-&kqj|&*8%q)~J#TH{O!2<~rB_4#oJe+KW$t_Wtke z)2M(qC=`>lnMu^4PRK4FKLP&4e#a2NuPmez-)d>Qbbg~7Ki^x$PHM-D*EubS-5DeC z;s+y?c(t^u-Io43Vd1<2Vkb$Pfz4jmbn(*HquM3V_3m(#tl?B{uf&*qnR~YtAApP9 z0iO-I++%=YV=F78Kp=@Kquv&A)=Rc9KM$N*i$^(eu+-Gn;%5EH?tK@QMMl==ErLpQ zflK*~+V{h1RltO;s_M>#=T#|3w~~;$iRIy5y@B+D6(FeO=oO7xKPxL?k6CGF-*<7a zew+1I|K#>!hgr7{lYn54W(A>JJ2Nv=G_{|Ulw_H)GB#!pjQO~$X(VcAFshTtfQ=_^ zNxMwVdFmC7i%9j*k{3;;=hLUid6)RzP0t0qSsH?7idb2|y*<$}|4G0eSEtl!K>N2kc;`@5(J;A{bV#_4fg zQ7Ydc$6Z3H(Dk4qdIA%ctscAf&`M)Ki(@CW3nA#WB;d_PWc->ZjS^Y!GY@~#@Oh>` z#Iw1jzMhwwI;aER+tSc*wWzGc6Bz08xG#QQq`Sb!z$+gp77OiGc_J;kihR6+tHXtXng6THo90? z1JJk<_>FhVweVR}$Y)I}E4s&yH1+WYNk3e}EimXZSjq`}x*&Okdu8p>9W|5k-qn^(ez~mI=M4xb%Y9=vZF`+ezvm)GD zZ9H40&3E4uFa@#_*rgeSYj;@wN70oDUzupdEOd8_@NzKLB z+$<3n)spf3Au5q9ff!A?m1OGh9J-nJVu7XW-Ah^l1B;T~x|Pt`%nLOW>Gn9^`>LmY zTHAYNj6MOt+RK;-1Vg47*8nD=hY{_n%6;X_E+e!P@t2@cD)2jM*2hHt4L9;Y%+BrvIO(RmRR! z;ItCfig?U%KqJV^a|Jxa@aST@Lfv&uT1!jl=!NP|ey)GJQ{>}R_#;00L-nWkR%1Q? zGD~dTrzY~8QqGzM2rQ%? zGj0vA7JxKZkUNvmuF)Z=r$1e?`#44LJzn^g?ql}5T|rJdz&qoU#K4=?sZK0NpCNl|BpW7&~n}nMJK0^pE5rPKxN6PTZur0?w(&*^8b1+jyJ7PO}ej;0kXSUVhtFk55nWgMHHx$*CM~Vm%*Z2X6&RYngKMlKB#PKOR0HD170kQtk=xZZ0D#qm+=yW%%nV=X-wDdr%(U|@6xB#zLHQvht39(&!|Lh-74eAi~pjvb00|t=j>Jm?V1vLO`kItoqn&OfP z#?q{0zM&tSjW4+23_?Z6B4s>JSyNC~v+C3I7??QA*J8lMq3f`OF9q(K zb9hvOV9pQ09I8k{7p>63!__EmNB4<{A-z}K^2y&Owm=DS9v&yj?90QA9*xG0!%^Q$ zFZbj;3{>|%>3*jjU3Oz}qx^9XG^E1FNmTc0Qq$ocO?Z`C2V&2oFfGT`6LFfeV#pLj z3!=~gG#m%5)xI0xv{}*HJ0+HGPR(QrJvtaeMq(ZRWeFD94N|aHFkr2R)IhT!e0*^0 z-ud5{Ts=mmVOlKaYtKNm0O*F`lQPDDDPzFEf>S1H&o45Usu1E8hi8Y)%=9KQACgcE zVHA>Z>$fLcVcETQF%xm)?&}ribIqvmxwCI^&E* z(;<3GOH0*|Rh9n!{tM&Tl}YC7?@?@#E$i9sPPBVW6ab^r%7R1wcNA-xKCG=7H&I>h zX!kuyXn5!@(#_XbHd9mbX~PZEa|aJ=BsK8$6K6@+m4i5$WI15A;Jc^4gK_^gsVbSs zZO^xrYQK!DdlxH{2r4_ww=I0Hca!lybJ|i5rMsUD^B-=3Nq+s?QZ{?nqzx-<*re`f ztn@ge&qphB*O$sA%4W3YdP`b5-zd+ul}|%MV{U3XtX~CE1ew8~3EL%U!v8piVfo6+ zD$RjekVu)!p_ZtRfICoYwI8KQqf-1?%{uqrkDC03V%$+Wsk9 zskeX9%eu*Sh4yY!gsIDfwFmy<1@CU!K~WOwW8~ZHv3GiGitT4@o(vH`c$tMLT?9#A zRY+e2^@x1hY;;sRG3nR9Mj^jccHO@Cpku()$9K>;(Ag0ERwuJ8p+!Zl$$B0Mr;WDT!Y3ehidb-*oC_N0`E!M6@4_{SWQmo} z2X3vp_&twoW2xpY@5MHMh`B1KhVASkn5dX4pk25x2va2DEIybQU!_ID4+Od(0s5^F zKFO-;zHi~&=373BY1MMrg73ZFn<$uvf=jRUw$|n!v_=cN?bHigL*EPaXZtdi&Bkb5 zhQy@9zi<*VVhhOM+CE<8(N?A9JpUr6-Ii>D?$bOi`cK|E&=7&bJOrw(;ZXuFv~irNr$oYQFoRt8r%O36ZnSVf+|LGEjtmDZmP!f$r z28=RebFV|C@lOx)q1$&{Y^*GvFcloK+HfD|_?~qQR1TTSunp&Z=_Pv|OpS77x+sy| z#)nk>f|ypl*wjzoQ1rUP8!KPL*GGrdn;lhvcZ&Lc_|xRJgkt;k-)xQd zj320F??|gA$L=~19T*BcSvq0ZZtUsl@tfZ-`-ku+e{W~8o-fpb#?U}7C?|=bAG~Np@560F=Jtqu)pJm7tK#d$mm7R9#n&e zEd0(d837u5n?R)pDc`0@W{o7VdIC~87w|w7s~e~cQXH>gnHMpZKd3l@G10F_#?O5ACz#_fPB!|AiZoeS}@I z`Rrp!%maj`&uqzSYio}bWJCr)xO*a)Q4bZ~(sNs1Bb2qa>ZW)#rQ!KrxtC2spr_A= zV90(6#A=d4HGL}uALd#6Q;%qVrN;r{}{m`@Vk81A`Aqu?w*iDh$8gZ!u27?Fi){u=VVEH@|j6 zv?$qgNfh$y=!h6HOi>(yb761QBNtTgkBj@No9c>3#~%nJ4$v;qANS=8BeCl_UXh0= zJ-D`GMFxGRJI{!bPxVPuNmG;TWjkKLt`k>BD(E)LM9(H^;y6_mX_rjFvHN+ zU}tNM2^XT26zZc7rngA{CmEXKb6)1-Wj#RZ|8-{f)~oCp=x+a5c$x*ewY z;&FdVCmv;A=rdKy^1vv=Jg*aF;@Bl#Hyi?-EOGs$SXitxGg~yp|7R5fkp0+i^qF5x z1!|?D-->BHHBuda!vCUD430CLJ&E6@&JEn2LH5IVe%SR2)Q0x?Tr}}@t%cin2}%OF z&!g5VJFV`rO=Z#*syc#KbD$wcx9dX73P8nySe-apH<=n@4x$v5zV8$yMt*bq$5}4`h=Mi$;Um-t3mtgZv0wrtzRke7{wF8#Ox9?P; z+W%SxbPPX!q{{Utpelzg!yep#bH}WRlvLC|o(37x-!-lRQOFsovBpFIkN2ap=jiz7 zB;;Yq5}_7XqMC!K@Nh;Spe(}8rRl+L|7fvZwNg^#W*D0jLwxc`C z%u$+eph9cE|7z27yv`Nu9m3~=F-4|mrh>0tC3|JHcFP*6G95qlV}b$lC#gM17|bPi zmH}IrE*amp{F53fF*u=H=xK1i*nW6@))ZH7gqEda`;4PPGz8rT;68wsL3Vq0cCYR6 z3nbDfXsKJE+OXi|>Dd6d4@6M$n9yDkGP(;Vx}tSndIBsV`^*ta7BJ5@^FiOO%rhq+ zVP~G&+C0C-%V_Gc2^Cudv|pKxCJz_HcURjSNRvq+g$0^*4A6MPYrmmZ56kPO4h zB|g%S&NYokis<_K+Re>P2MDr+nwKsBGhG46eAw(F&V9RK$*P)yr3e5s#!`Q-f)@on znubmKy&8UKu#2IPhg7WF7r4r~Vh=N;*WHS`0<-qpP_@q1qtc|6rnx@cm8V?02Pgei zBhlF4N0mXR{RJrH=r-f1NH-Vzi(Sc0jesIpQo=|@Wukk&va%A5`Re6M4?wsTJOX;4 z+*~AbG0d*IR8WpZ+`$I z#=VTteex?Kf4taq7$>U@I}(*e506hMw|Ej&T~l+na}7Eg6AB0B-y?n1>IM=nDw$iO zrNsRndj7Z~itZz!8_- z)Fl-mDCl?A>gQrRLrZOSncmGT=NK?uB_$?kabfUn%NhwCZ3h>Bs8fim zeQ)4=uIkDlf;-%9U~cUePdYEvZN~o^S%u1DYWW!)J&+}^*NiOHnV=)|4GrM9h9^r+Q$he1_0HrLOk<@iAYkJQ&C8FBO=gQ_{MCnu)PtNm&UB^*<%C&GBXvA2{- zw-}&d%^@y|51uzK`S(VgQKXKAv?f%7Qk>}uk9t0+67udz!AaC^Q=J%?3sK#|#4cMW zpTLKf;xbwmmGUfYh(2tOqN2}d(0ZFpod%SL^3{dgOs7$%Y>x=xIr?bkHiCWEG9ez( zJG&W->w-F6ToYJfon)*aL)W1=h}@^}Zjo3@YgnDG2?wDAMM{#L=sz9q>bXCy-Y^ns z7^Senox0D^7S@DgWykC1$FpqqW+A)#t^AJuBpWKnyvSZ6XTmc3?M5ZKc-RYmSPl4sp}(T#|#wiYc-iY_m!h znK#fwM5MuRDRk?~4vFjUGOBoLUIhLhYQ5d9ZR>i0|3q*qZeXBULXsLmRcnjz4p?;Z zZ%X)AhZ|d<*9nwz{dHlYa7?T1B-;{`@2#FO`JFnHf|CqSm2vn(V8%=c39z1(GZIC2f@$Rzjbiv%LlSSIp?37J(u!vDHr_y86l1pU8hWjz zE%5XC8i#(w5!<79Ok*DlDVmyJTqIWPhxqsiQXvd`ESAOjtHhaUIkOKt1V21|O2$9J z?B@@8?0bmYa?42RU($l=R zA$gvZ2{;y9FE0~k_FHZ1)-0Rsv`he~CuPDyD0N@va>?a#$rQEP!;E&32AzTEHvgR# zf+zpZ0jlpmQZA$C zdAt8nD66&c%T3OC#6tAQRE_4a&oeRmv88Za?_W1sS5qTTcE$h!M%0+jrt$mCSnIpA zRF{)jel)g!rW9G9Wp6+`d+Bd$*+$aF!obNH|4*0_yv0NLO-tvUU;6fBHddwbz0R$r z($hi>lD>uJ;i1XjY(GDdC*g1QfItwD{~6LuROq173Xn$cV|vpuFnrmM%Uky~@`Yt* z$>yApeqg9i!fOI>3A~P$!!;J66R*)?$#KOG95&pZcXpS~i3sWJ{4f|v9t}KY^a7&UkbH7Q4$M^|LTx^oXnSrBs4vW4w zGezf(=2{3bDCeL}%&; zH^)>}W48xLvN4d><0o>D()SI@c9{*!Z-XoP$HcDEzBig5r5U0{?qX z6gzzY; zW|J$LX3-tW{Hh2n*~`ovOy7uNiKz5tz0!l2CCebRoST?S$33LAXwUca{=h>fU0jo zi@c;dgt*Sk0K^0r#E>vkmm%8Dy)o_;Wgwj|;GsOR)OANbFgTd1$3&>gA91pvzBCeq&YmpGfJ<**a|dV+ z>P4JCYwjlZoXe-kl30P&+y1J!3&+0FDk%;4exhO~cEEw>5i3TB_ABnSB+lNe>N??v zye}zhzV}D93Ib$Si_`Mq`(WwFp7dq%CX#$mwUEfk%?}BVXQ@)G`r)VIiBR)NP zqScH^r%o~Vqe7Uc@R7)CXqB|c=3M{X#FGrkl?mJkyNxy`v%lm| zTV`ay^%OcfHso^R1>=xkPOOX&HTL`Vem8sqUqDV=d5h%@1WD6G5%&*dDSW~C7~Yr7 zON-n$E78=;Bh?gv-`fpW?Rmb&!GNiP04@PcK7etL{1%yA0>N}QAG5T8)BU>6f_l_9 z60`ZYSPkVIk?KpyLUfyp16Zbjo4urGi>qkXUAc(BQ8%1QDn&N0J_2<}rwSh$l1~I0 zdlGZ&sWdftJ1}#UZ4=b35Q-_1kTb_0gNga_qIU@le^B<)vMI{7bMN*=YnM~N zvHeD-GPJ_FS!l@`Qm$2G)PW^3!*?H{?zPxazH47PZguT#sEV?qabP6M(^)&WvS}lb zYg3DXE|n-Q7UXD4(0Q*m^Y2MmBfiTjm=xOGd{JSB@P7jO8g|^@y45#W-zzpy7*{{> zJ8UUD9t99*Yu!gHe$0el+YUC1GlCGpTsTLwc^rof1)djB=m&QI^R#oaq|Ks zIm~BS9B0}68%LztaMajZs9ia6JvcAADiaan+m961!h4-_t zHLXk?WV~AyLAgTbxqFd5`iqIkzCjbMXLLr4ITWpaiRUDG1ERiQMnGK6o(+X^ga`|&d$z9yGIfPFp5f2Ap3@g+uvNpTds^SHzdS1M*l8Tox!Me zs1NU+r~`RAzVv<>4DencP&B(-IzStn68cnYAZT^!Sp=2(Fg6y?&+b;Nv|mrRR8-$f za-UX0U&9`eN7n$*b9bBanhi-s-hE@|`JmMMX3{rIC$156X zXQeZLJb;FQG2T_Kv~`;JkHv`9C!%_=X!A;u+GR&U9=GWaB&1!ly8uMhAnda~pjP3@eFp+XY~0%M4IZlm)xI0^E5OD=9Ztirjo_b(c9|)W zpHajNmwJxuG4KQ(jEvuZ9iTY`(Go=E&H#GuJ}(|Ws1e{J!(WPn1ePs$9Tr#Jq_2lL z@i|tbxUIhAj~~c?`zLpj=_dvZ<5%${hX)+PfD-s$JN$QF7G!zMExV;zH|J!Z$i7yR zxs5$G5ToZ}yvysx6jFS?rGro)Bx$ zputCG%d}ncA4Dkqh~DulwoHq|hG|~iA#h+oF3l@Qi)&RD#ZUKe6>Jo zIDSv%bMoaRce#rd^O)wyVxtk@w7SAZj`y4(FO>k0o{S5He`&MS2_r zpaUy;2iO=xB}zNLV&XE+#T#6j)!;OEVH$3K9KiscS|@Q_Kie~*;rWyk6Op$&;tyjo z$WRI-)hL8gpLy=fpl=>68?9PNvprNl#Z>#}^ZGmjto94%b^KJ#!-aH6{#jPz?RmAo z_-FV1B^W};h4;kH^Cv$U=UKz*^#LHYLvW}k_pILxA@$i%IjE?wf;+) zkngG&1b!~X00NCo#0L=QpbZQLvC-Kw6-vvv6kQZ9_v0F)kQX>tC^hSQ9C9SrzuA5k z)&6M7{A`iq|6Qv z757~ryvk;3kQ(nYK|c_moH#)fvxS{^+D(Iv5*S1Q;N?e}e-7}JpOUy)jl zJW>wLa>oB~Bo+>dLd_nMuI6~6DAQ_cc@5teJ~~cH0Qc*&C~`NM#f~umi=AUHlL|N+YzQUy^=1Ej)epJ3^NG+fOHUura{)~#h94Ovz5Zg1%7=mZ_u^#g7o?+@aSeqV zR^H~H`=20fO~F5Rw2RO|;3o zbta;(&JsOIL|86?To7QE%mo^=Px2@I2UA$- zkd%Afr=2$OR417z1-B;vo-zam#B2h$0;Fl7U<}m&=LbMG)6MJ(KD*cDvIZQlc-LL- z8SixNUU>C|(yKKnvV?TMHsVbywYME3t>^K4BEbF>G^B7T3&*moJ#+8sLU3&G%zUPy z5dd7OAU}f2BpWg1OM7O4G&QW)MV~*v@?UZ8Nxux@#yfD5p<_pz;e307b2$4&iG=xw z!iH8?sUC1Y&pc=UOTcafjIcS*tkyp24#&e1N#(YKmU2iWLRIqRfpPY?3Igo5J0&+- zMZ;=&#0f(`|dh&s5q=XaTwqi;=pm{Wa<;o;J zh7!ic#{MrOmTqam=s9=P1cN^5$wgjA|P^A16bI-WamrzY1C(%Mw*Z zW(#>*KEIOLQum7!irM}VGAy0Phl_YR3IB?qrK20@?+0f1z1XF-2z3ZM6;JtQGqovM#Ds5A`c^_CpKreBO+_5kpDmaafO$B<&v@ z^jiIj4EVGRVC#65gk+{sq=F4jJMuuFLnW891TZgJmKnh@dL(mYjF8rD>RL8Z?OL>& zJ+BN}TF(I2Lr4i3>sW`P0HqXd*sI%*Gx3;}tNr)l7X>`NdU_d9hDvJ-3i~t}1X1}P6)aO#E@YxyGuD5wQ_;QrpxYU!G`Q+v~vQE(u%N7|ZCBPH` zy7?gchccbU%TaPX#rQl@V5{;xEto+Hr{K2krHwuPe*E+D$zA94 z^SQ8~%#6kn6BbgP)>)rz;{)6k*e%dJAa1&IN$=kUvyw?^R%v9g^x^^8*A~&qO|l*A zPi}we++$U~QE8Sx%F9Dfbno^nV#}3r!epSoUunM`@ZlAwlBY7GT^Y>X9p~4&s-fup z!OSbDd@z$Yr7z*{^0bZspN5e!av#&|5i6Qk-?7ON?b$Z~BQ8WIsaX#a#OeT*HN0mg z%DM#lkt-aFV1T#EdTIUJ>gJ}&^|9M^ulN!f9_MYD1>2myfcAnJfj)2HU5ja75WGBI z{uc6RUt`V8W0Uwmu3R=DJdv2=Uj+X zY?nh?m7=xT+&-!$9?gW!Ei75pul z+Irciebf7Qe1so}hcS9kh2cuo(t$O$OY`1gvoyY< zF9WoSvgPu6gZ0N0l6|9?I}lAcF!P#EMU3ejLO+_*h5*B>J)5J)Ee~3!=NdWKOzv7` z>g5bvIo+4(uHGc>Y*X{Vdu3cOg2Ob^diey>kN9tJ-<2|42_XtYDbU zAy3T=aCi=1jT^zwr~Cm$*{Or#dhn2vQ$w~B1F6RhjzZ3{DH1TUKyU4{!lv({65=L< z5`qK(Rl{Y!O)}t5fhITpHvu`itm(}^0U7C-p^bzdV8*6*TbsX}R|VRaD{|oubWe13 zCL{ce`%DhV?uGEq3F|O=D{g|aYOlNyHj_&RdS7M;8<&`Y$6KEl=lcuL5ZRbG0Lnpz zo-Dv-$p1rujG0=5;DtnMa5(>uaI#ea#MOt|#T2BKzdwLi14 z)LHD!;$cVv4GQkiMEvcy|MUZ>BCk^dG?!I1V8MN?7Qj@v$vxnT&BtimTj#%R25tTj zdznZ2urHV0m1&CCwSn>uyOTV?-*=49=y`n@4f;=@I?zabYY`QN{P zafpe%bxcnkxmVH~Z;!Pa7z_aS?Eg&18or$@rgV{XQocbuW{#GvJ3iC(6~Qz{JDxVm zVGaO_2IG**b3g2alS{&Kcfb|3m-OdF!*w{emx~-e5sNT&`P1aqkZ#TgXxB#)$ z194bAq8~U`Pc+<1*bucP;D;o<*8+^Zc@e;R<#B-osB$QeCUNA>^1pvoAEO>G`Vqy2 zjLI2ubVBLYzCz^Yv6EkZ5xk+YN$7vNOGXbY{~{M@N_^g2T5M+?9-BoIcvPdFaDsXM~*&rn#zeJTHt%le( znhz15OtIF^LSebFd~SfZSA^-W#UXrS=jwiP2eBMoTiy1V^q;lXYyV;!^%1W6hJ)Qf zc`xsFw5r+4vf20uYa^Kb1aK|c9lVnXc>RF8|q>(fr2H|ds?BwmBe-mWdflYb=e0e|{$cj3Q=hw#yj z*Gz$=&64F-hlai*J#}f3wHj*--*|n4!HL5SB?>?VyhX-Vd^Kz18t9y>U^j4|EiJk3 zFXjPl38*K^zgPtAIXC1;M$7w-jHL6MQv%x&w5rb1A7xW_xL4AZ@ugms;Z?U(xV(;@ zwzF^$I4hVJ@pZd9XD529x}JO1dhhUl%FA57+5LdC|8@&+(CVuBq4Uj6qS%XkpefCC zhJM$bJ#YgTBt09_O6UUCS1* zl)?L|8N{+?o+YPd2twDr{m?n|bSjgk&cO8wofvK&H$D$+u0GSMd9@Yp6IuNxi}pZv zl)THu9(SjcRUpgZX!w1wJ);RSmluy}4zuJiVoTO!P+$FW1q9syaUU>1(tj?t@%??O*VWF; zGPuB6Wvj+=X4Ct}6oA_QQ_p&Ld;QGaUo%jLFM>+sE3e^GRA7XgUqZH6Y$OHk30_~q znJCXk798KUIG#lT6#y}CM# z$3V1qn$~Yx>4{8CQ+-tNT!TS#_2ITmjl)|%A855g);|JnX}WHoWq#NaL)shR`(@x} z_f23eg5h`7O-7Pt<75)9ca>8XRi94lXj#}F^)=9qthV+%gD+uRd*3IVLSKvHeG=!| zE=DvtU0GWA&S60?aM(|n#)y#CZuU-(yETldsg$)_;RQP74pIO%J+6su-ag@o02<%@ zs;f8cA7v;-5m5eu#)r$TC~P@v0ArJz9Fvq^q6=KL*Tme9+8Q1mZO-y^ z*8};Vdm+U+5O6Q-msneYSV+FZM&F;ONaV0D#E4rakhS3{L%C{$EZ}x>u~gzjxBdp>cbS^@CAY5Dpemmu8*T>gra2rtPBLF zK;RL*slaS~M>72iHh}-+BZ?19R3^lx?cya< z;3aNjCvRXy&kXeX5hgAjTsb|t`t^+bF-CUSpXWQo{x3%6y?+5p0$q+)O5NM44_ME~ zXx!JTA4{T)@)$9B5K9U+q_7i)z9tV8`=4F@E)LIxksrYWSeTGWRc-IRP}Ca&6s2lR zzyCq2lr6jnDzSd9GlK%Zx$XW#=yUz)>qB?b>Lt}$xTRa8feD1!$;g7tD0}Qdy%`@aNOAGIO*hJc(E~a0(4tgv`6S8)*D-uPA&fO z1#;p%sX~-M9=K_pUADM;wk%|Cb7()@8cBbnqb?3v8e3kL3wA8W_rFhs$U&EYi+MrA z{{h88^Kk+iN(dfocRGl;Br2w51=67e-y5qhU((`B;7t1^2nX5wW?Sbw{S^1{nrc>r zlG4ifWHjdfdy=J|;}u`$QrO(R;Eq5GytH>_B?vQ?1lDsudG{g>Fc-R=XFez{)|*`f zsauCcZX#9UNd`7O6j6;P-%o1L%gR`n>122C0K9E}MCCu*EAoi6`~TBg5CGtLVWP+V zCmpIxy7G0!bH@cNEEe8m<*P$C#rY|g^7T)))k)wUdAE@M)^;mIr+H$_bd8tx@mukx zy9>1*-_fD|0t4 zW=&yZz_+Eu!ap=i%me&hLkXlebN}fD_@DKx7eK@to8j?N$@w;_xs!=s+H(ITK8ms~ zX-%Er&#QQ571y{}P$}S{c0j*K&0>PJOoLE8!%J3@-)WY!L~K|#N#gsKecAE&K zv23?b>?a`=8RHI_T>=)w(|3>OET|3kk&9UA7yaM$r&pac<$UKz|zpM^gz5PJA5XV)5l7tz1|Z6$ws3l2lT_XJ==ZmzUe>m%o3<%u>YXnc-() zDfhhE2mU1^gY>!CTPg$y#P=02_4-MD-$6hAG&*BTt4gN+Ex&mdaX42AxPs%T5PN%j z8PKRG^mO?UOzm3R^lLdfy?GM!-mSqzy8|bo(gs+2E?mF&)E*r772Bv|#p9AGT|R%x zs(JMPWA81a;^?|{Q6gA?U?D&Vkl+&BJxCyUaQ6T~g1a?Ia1HJd+}%C6L(s%j=S%Vd(O|Z#(>dU)wOEPxt{qTeKX-Kp znbe;R$?`lMhR`Cm^sitU<|~NSva>}+@Q>T(cz^7RtE;h|i)gAnAS^6wAtok9TwFZg z8fc7C?<~Bu(A?ZyuGhT%kk06-p#i=)0N>LBxr{tXa%jU00Yg^)1jmk;DB_u)Uh_Kz z?Y}oe9frByP7)^6^pa8V?v`tPs&dl??q-{Ys%mUZj3dwqyw(b=Op1|+Gxw>+#H7tA zzi3_F{rp^#Mq@Oa<>vVwjq5So0DJfZi-gG-^xK!4;YPj6ByW5?d=)vu%_exkc zF1Mp9}LvKkn$72Z-?7hU@Cy3}cr>oA2Pl9M~ zU$i2v)HsZ{mz-YIgaH};%}bCC1DUrm15?w}AApoglj^y14n6^%Ci3Odw>j8xzOAJP_TVbwD+P)dr_dizPpMgUF2{r_w zonCg|HaNMsfO74Qjf;hfX@r<4PXfDwgMxkmlwhTH>Bz{4Smw|D_g>w5PBUO|Ur&z` zDcUc_j{xWc#^do!XfpgXvUpkG%$+w-cSWDrTaJ8a!w0tbM6dOUI5SvaMOWxfaj72{ zTpQ7r0C#CSPr9YOVZvS>$=GTDjxh}EuMbB;`bNRTG;(&933b|F2N8;eMP-zFKiMhQ zfoKV8bH1hU3&*+9=P^^~`|(Xq=-%-qG!2M*f1YaeAK9RZH!ZO5V>|c~NYfYp88Gy) z^Zd#Hc(96x#>j3%LLTrwvucb+b;-b1V0++pIcQQ4B*T6?HM@KU%`jls9S5t3S9|Zh5gFsK7x^eZ|g6*=R!kQ z?x95qL4kLqe?`9Rny`N@fR&Ol7Ck0_kOgep`jGc-yf0d=DW6>cTq`-Nx-x$ zbPqZ>zP6%1@9mR4uO@gqO9RxEHOjvF$n;Mj-RfOw`?A2YS4`Rq{Ax z#F&7l3Bt>QhgFj}H8`g4W-%WFMT4+`?gThdtf%BizXEr3Z()q(^{)E6o7&8RvD~>k z+wG7$;~kNv|S|ql!5}5A?VJ|$LD^SioP;2fk#NlW*7X!+lG3<{DCUYkG+4+bAaE#svMDif{47jJmj^zAs^X!jVeUQ{}><7!xL|P z!&ogOc;E9gXs2yxp~c*G^7khBB%+;6Z>d8af#Y~V0Ehop* z^NIDN#A&eHTJ|2;BZOe?s=U7q(TtG*NyO<$Q1(3TK>vON_Ce#rXcEUr_>G4KIk_OlVxhWmcR954^gGZ>T5!9U z*EYBme&yA#1TW=&4RE`_L#@|t|CDToJU3mui3BSm5LO95KwLl z7*#7-Jk;FWE)zM@#u|@sG1o*oom^bffi4U;LvpFSY=S+&)cVrtkILwrovZCbE7}*zaJ1v*cuuO>Bql*1k~qv zb0?Kp3iw)(~H0c6oEN^SWJ#BJqDL^U;?a>dnS~Nsk@Sz+i3jYFMw#nCPms#^jah&fW>Y; z?*|ss-mJh!zH$Y3eiKBU_HMJ{kF4@?Rb>7x#fl@Shl1idr=q_9SG~I@!liknNrobtrTnBN8^87)u!@2R?BYu{Ft?aDgnw+4R)!K= zkRJ)C0SDc5CG@me9+APQnmBC(js3(pFdvF^8*_bng9rK9C&P_II9Z~>yGb@N{ zQoA@|XrNW`P)1VKd86JmgH*sS1lmZ<5)4~jI&ATTUVl4Hrgd(=&b}2E@wmjmMlAV{ zqHnA#(?qUZ*EAvYljL5WW=X~5?RUA;mPZzX z4x9ai2412Pk-|5-zfbDxC*gbGuR8GNJ!Opq88+z&_sn&SHV~U69^=#RE9uaJ*%dj_ ztVdjR=#RkCE7zeb5pF_Xqy*;2g6Wx&%-Pb1dmNJ2U-LeFmQ+%|Aj*M1A4c)LL44wAknf1PPEnJQc!0vwNKoAI~kb zkM0ZvBkm2cw1>Gpdb+byeE=tpM;RB|5dGryaxSs3ACpPOxZ`d)d0 zYb-2C#$^8^DlSm04j7?;8tv}weV&Sqy=wOM4)9hkkRu^=Qh8SAJH{#mrDP_$?MUQJ zuZX@_n2cJP_J$qaH`7Xk_;(;ccKZS`n!@K^d-H{Pzo9_%JOyK6y_0_bh~Ius@5}&| zyMV{&0vI7cPnQ%VL#LZDQ;|e|=~n{3RN6)EL5^kjwu`RQ<-(^)us(9ayWOM0kR}kbE>qvIS>OBRxsI-yN-{KZ)j)` zfHa67igk8FuQCK&ZT6=MjS2B+fa(~p`+)MF!Fp&C4F9Z0T$f!SHCJSIN658N76Aa^ z`u7Pm8U0?DrbFW|K~_3a2)&gIvbF-3R8Af#N&xf?n@0Kv zyd|J=o7SaEIfz*Fzin9$Cale9hna9NaegX$=FT>-Fm4pvZw`w&I##C#+BGz;2&{ra zgO%uM0y3vWKeKCHXQ)a@OG_Ik;HdxS3JttCPHOmXJCf1n-!+;wP5wMpAF(%HcPSe^ zs|lkryPT3nT=?&#{vl|14KcZ4-sJi9ggan+S)fiCC_x&=U1>~+^R;FAgXGsDAZ}O1 z(gDod@cR?SQT6Ar(8lkaecM>Bg-A-6QooSiylAsIL70J~V$4~tnO*9x1)h6P)*V4< z$MknGb;nCr7GNUG%rx{yuW{w0dqr5|?&Tp*jAH@d{s&_XBtc5YP87m-Vd+_0y)I9R z(6@mkd#vsE%)5zZG2a#B_0C-xINOiL9w(6Cq3HoEAboO5N^n78p+({l0_3Ua!92D} zm}BT_cTADsc}t!+*k+=Na9A6j5WMJviL@+fOZ5X9yxw+&8Gu6^K$R(;6)zP4JL~}D z;2y`AASLkf+08-uScZrJ@G@YNkE97`Jbd7wyqzX%W=6ddBbbcnQsn#7z5r$LYuPRV z8UoxAu-Wy}AXiTuBjYKGyu>=87oI#JGc>CA{gEN@Yw^ZZ#gzcC<6 zj9Cu|awb@toSWmztE+Erz6Ca%7tPOt4ABsjw5tw~Z8h18qBazmnBI1XvANI|Q;V@^ z2NXZZG_gX1$YU4C68EK>#NG9|X%)(cU5<`IA|Mc=5OjUEB#)mci}0k&WdmVhjzg|a zE@u~)h{9tQw9dq2r1I>CAr8zzKEU$Y zrmv(z__xKI_)&KN0G|>=BIRpQd=p@b>axorM*+KQ6J2XxT2bg$YpeL(QXPR$)brXK zoCHTYO}S~7GCz4en$y$2*5!K8E{UAp5>=u53DpUdLKh6 zBM>cW6Dg6YpJ=P6QP7;Uv0v`v_G>MtJf)zz`gUQc`bG&@r#U_xFYrNaZ(4U&@bRc- zGDe$@=HIFUQfp$;_vdcLgz0r}PfvIuxPFJh=pVxjv{}KfPJt$lb@c#IO$^_K_ ztZ`BNYOMUm)}j`B2sVcyvr?`jq~Y2{+qe-SuniyvhYwFA_`>c`i5Tw`hA6VVwT+}r zla#;Il(Ow?D=pFb7p>h-3;+|^RL-JEM=JgzXZ3gCZLl#NQ4UeGd=O|o146b2y7law7&XST!u?cr(t*dc;}Tu)qm1m4sMdic6XzE7Y)fA zz{jg8jHxzTBu)+o?lW2@8>a%zCY7c4%L5nQ#c#_fHu+sW`%d$sLnosKBIaI016MrP zB)T@ravG8lVhf<_Wb)cQIx7mAbFFiyptl0z%Q&AJ7&F+lA#1IR?LX1Je<>-Mjj1=EBxoZ4@l}$)YyRreA+}_)q@) zC?SzS$aJmt!^h;q+H3UzhX{f(Vu4bB#ey7R*Xf0q%|EqTtdB(>Rwxk}b{~3)jI(=5 znna;0p6f8v&zpe08Cu$>8)Yp-y1Le3UdCeIsUm_SJ^2{D*WSquk`(sN=fVMD=gcef zT=^^Pgb6o47W|Gyvdmtpdr82oLL91vI6sscC@Gy5x52-0ml;LM9??=LGnd=_>}e`w zy+XJlOBQ;}e$xKR_TjMh+n>^B+2o^ls6h^omUn0^vYg)IQ_51UFTg}K7Kvqg3jG{o zn0LAw8i>B`$0Bqu??0(NGvc1A?PY;k--KsjLEAvFZ3K%PjT2GP|WC|kSIQr1AuK#m9m4C- z$;tE8c0dWlu}G-sWSEqU0B-BWj7|#o*H;h7`W{%YrF2?faP3 zjbKe;&yl|K5ZRw~q}Pte%AT%$$or62ZW@Z$(8jtMfN{DdL9N5q<)xSh#2*M&1IFJI zLm?1hX8@|GSpBJaRHFm7g_fj^C_2yQr~z z?R{Lebf%}ZCdV1&1A5a0j$4m5Ku&?XT#qy!qGvst0jYV%l>nXrLm50FFg(SX{ky`c>9s`;T6(Fd4DPH$JtYu#| z@bNHQGz9Cr{*O4N$IB=W->5`d6RDcB zt^?X{2Ha@baFDiMuZJ$weI+S07R$`b+wLJWc+ng};s&(ethb&6Om?sefNs`9SzKIP zLiGACTFxhA+i#bk-DT?@!uRKsz;SN8gex%`$GF z?U?)o2uyx8ak1jl{zXS;AxITZxi5kgzuAw3Xe+a+Ps6~Q+6BGdxW*X{U1>Ks#LIf* zLizC3_rUTN%4T};$ge+r!~DeN!NcfF2bdM&;xE6mp+Vh&=SV^7z(qclctccuy#4nB z1%AgY&(}0_dwxmBN#_m}CdtaC>Q`dy|9B2U@_LHYi_+$^;fSI7%pzJYbCeCV`m&pu zuKTG?AhoN0ksL!D1N+N&jD^7c$YNb%Ovvl~$RR6vyC_`GwCdavmr@ujom({(7*3sW zW=#0PSpNA?Oj6;1a-nKGKMS}@h+m}hKY5~WjPtGk50J)1Ms877+uHdkMXOwiN;+3M z1k?N5(R(8j42U`%? zstw3g`_M0}veNf3zni+&Gdpv^7I&z`K%9IXy9B3!xRhFN z_~mryX*MUO=Mi*XNGsQwM9zVN;%mT4u2=5jxb>~!trI9*e!5%_S1f{iG^EV4n-Dh9?asm>~ z{a^*>vk*p0M5HgmlgvHFVk?67H5^9k8e4`Bo1@%Hp`X`A`^0@@DVSZ6E zK~&=e$khKqscY1s>W(e}4VCn#!EX!-hIEw;9Au)at5{TRhx_0MPFjOlV+8Cpf$&GF>vH3Q&=8i#m_GGaHt|&j%JsxyT*3i^n zFo$@<9^n|n?)siZq*#^V@n|ni7GD+tfpVBGpP{3FlXk+-tg%#PZ6TvKuj%N$BH1(u ziyptMv_j{*ddri0@THm0T+dX`vDw%~0Y1Ne2OD>%3k~{G#%8Oo1@lNbA&m(kXgH$2 zq?FlMNHR{~s?vo0G%vp9bLF7V$G(1_Ul1EC=sGMLbuPv)#LK#LeS|mLpPo6-k5<>lVNc|my%7ErC!s+UftIQmr zpXSS&O7GSfz{w+_VZ+M|lTMY47`T#OmyE8M*-fbkS@A@F7Myac5{Phe-+c3IYn%N> zGQ=9Zd3H-7c!#a%=ngNEnX6WVarBf?v41$J_i?(pI~y|}ZMiesabJdBVO#HoWfXj( z>!v$O9_93g2S8G=tSV$Ta$g@w!2K+#r3Iw4UU4{&R!U z{PycxNCkmQwU@BLs@D*UzRz23fYH@13%0Vh7IHtI6@imA-373LemB%vv>9Fw ziw*W&T5vCL#Y++`12R5`(|(cUc0nR|K3N`jyFuq5p~#t+*NRQc2pGBd8LqJ6+V+yx zF9XYjQm0rU(hQyA1g+`RbGMfJ(IzkC$bZZ1rJv0pk>z#jkAF~ujhY3&DP zbbH&bg^HR(TkA}LnfukK@mYwjV@NeGNo&Wr9IvvCwzWf1NZNt3Tdpk@a2usHwJ2(D zQC*nPgxm7R)=&UbqU~nCxOf4TE1jRcNq@$W)$R0fPetl^kX}C*@()pp8_y$V`*b1h zvK?{|6wsaC)?qb8iB~tIj>AMZSp$*;k7Obr5J`iF!MTaourD~)W zagBz6yPWbed6T~)+j@lQFghv0zh9apO2kVmS=_9QS(|NJ7dbT}D}UI+I9jr)CF_4D zH1AmPL$%u4>7&J6Et5zytIW>fYu~s^JkeG6thQYo952BE&7=ZT|I~_dD%Z-Md%MCj zZxyGu(k^4C-0n)zOd0cQ8px?T-=q2z%4xhUtd-tx?`w3sfog#7CH5Ey`1m5ec-TG} zvnP`*O!XO&l6Yj3LcbH|aU|eeXKr&BLz#6l9j8#P^Jj<|y8z>-7J-SD@){$L#8w^S zWL!-s`Jd`3g-6Cg!Gqz{6r&?ix*7H`8QT(vrtkZF%&PWz(upcEMs#*At@mo2dJ<*+F7!MA3Fzz!SAm8h=P-L3%6! zfOXCj%O-Q(Qw0ug&6;;e1=7OQ$w!_@W4|Caa44Dq&t?k&JtLcEXY2|p$ER-%y3}K$ zl?Z(nXDbxQOBc!pC+KxZ?DclS6WQ@!GV_DF4G0A02 z!G%`I!kXRH1-gR(DYJV1WQexfM_5`swb_+eipi5`#8D?4|55X7YP*8DmSucfRPl>f z*2OY{TDdUnV7-?Bh9P>ZL|8JlpwFI^M?m<6cvMNXLDuMImZ+7~Tz?_i80?hI6z({4 zsdsLZs>-HPob0gF_km8m%ow#L@q20%Ylu@E!-tk=v9C7YOru85zbiE^m&CZgw!$C{ z>wr5=J^!Y`%)JLr4$-GmDq;JQhZZhS8lRSqpRbY|I}~Ma$$D z)>xE42pqS&)HfJU*K>(C>vU$g3p9AKqI{xxS|BVu( zLGNO;R_R*HprqraddMzTT55DQ#2=Sxgtt|6csCL_-@{B-GmkxoL0BC3P8^dlBYO;# z|90L-9t6<82|8^pc{>Uj)x6NR)G7THleO+vR)XvC{1j!4t-`nKZvzhL42p4^*MnZ* zW1>iH(>jy91EE597_?+rLqcz(WbXzf>mh$d?Y_#UI`Us4BoG_JEmzk@dP$ zMje0UP%fQIbQMQ-2hfis`SKoYU`C^2?eW zREq5Xr0#nk`wMC1LJCUr4~;r#8vb4L;bO3uiNxZZGP{7nZX<|fd$X5mgd}~8Om5Y= z+=k9`=ug|=KSVRll{@}3S8abV^s5@nU=e8KYZXV0a}$Xc$fK-~GAsOsBjlt3wh7vy za>QGl>cg2EJn7< z8&uxeO0Ew)3w=P-hN^%Q_9%9I^iJMAadqPTco==cL+1HjgfKaKbY#iQ} zH}RUwxXGNWx5cVdgtFNbI6$hB@_0u=*?I zrB65f`eCh@$l*vfUly5f4U6}Ld)Es=ca!K*nxRF=BHWe1H*e>=e(RN(ZstWUc-q#1 zU*xgv^Smn4E^FQ9qK;AXYN@`e>uYqc&aS*K+7dIIZ9W;4;%dn>+tDErC~u!dBi^>L zF7G!XKDybs$Ko2=8ZMc}TBCDlXs%qt;)*dY+O@j!wvOhoJji5Kb;C+i%S&pS&TxAL zwoHn>OFnM-1y(XBNY5~^Kfm7g$E@!58Hw~wG`;0!QQrekyQligNVd*9m-Yze&S6&D zRp%ovX@t6VWfc8$d)JP7FJ)r9Lf{V4X~d~HloS+DoBYu6z3Q9v6yU$6YpJ6h)Zq8l?B0N`vFfEW z)nK(;o_~>Dtl;62n?Fs;mfrHUS>}F|PzI%GLvs{S^iL;4X*>(w{p>ZUXB=2a<=B!5pnu@@+r}kh2IRN?2 zUbj`YO904sirFt;$J?zrN~>jVQaLVTV&x4rx^&lN;e&(|kpcvXkjRb=&6d;FR{V=N z_Nbi*tWhPwF_ol5cRW+cQggDJ2ILk&wdW-E_&~;Z`RhqDKx)NzXg3oMo zrOh6BvT3gIaVs5Q70c?z@l1-uGmR)=x`iz*Y+`z)vL6SA zQ^~5TP+mc74ymt^+ve072aRn)hq6^Mm=Nc+t@h%+7{KNm ztI=GQv6<&`y!Q6%S`JU(nhU9mI^l{rO_mL-Sc6C!I~^3us697nQU&zEl$lLb>8S?8 zvmd}a+?4X7fvgcdLthmZnK8KzB&Z9_FW1IGtdXo-uH<1j+l8zsnsCPFZhh zZ^PR=aYYQV5j3uDIT+BhWzo%18&N$-r>`p?Z{ELc(uW(x*gAy>NHBD?_9P~0WNL1ec!&$|GZ)x#sdE_=>%`W36lZH5Fl*tL$pS9M zFbn;i^6*!=ax^iymf_T%k{$k_N4>0m2qhM!Lbg=24hY658W<{z#(V6C1vC3g&ym$B$JeGytxvNb)| z)L}lvk<=8J)RabWKQ~_69AVC(Iw@|jZs%&k;$xeplQlDp?rro0)0Ky2h+vc0?dNaL zunp>Gu-L!%kM6a7ysfUvGabQ(YB&0mSO45D(R9VoefLNVZCppE@iS2+rXfh6UY%LH8P!R=x)@ zbkNX3CfLel%)nlZK*RpZixQZ`$v!lckS?gdmF+6Ooj-?jKeiH+6ka-4?0yYTIEA7i3$TNE#QC9qoKEw{_Ir0gWV{2`*n5Z`wyzKo`?}dH!eD zv#hQgY0uWGR}e1wI(dT)=BbD&P`j=E1xSaiK3)hvAe2VmmS6T zve3n-j`>2*$ds#sF$MHe>g#$Z!TMKCA+s$ZqszJb%LCxCsA^2M66MwcyXqmrHoOIr zqw+m9hXCo=;+HSyv1_m5Y``30E?-R$-^l{GY}=wuZ|ZV6yttm*@}T_aFr2P_#{r(_ zosto=N^GvoEpuwrPwQmI3rH*DZn3eo#xMv6&xeZ!2FKB1xL$o7nDJVA$JfPboy=|P zEl;!(Dh}(WYA;IBXls!yad;Kb!2iUA&K4V`d@-CNe#wl7N?&aQfE8&ShR1Fv;txQ8 zg$UC{Jt=_KIbfg|Q!2b`DZIn5c|pHHvoy66r)zL$_l$=t=q}(X5|M0Cx@mAwT8iyX zH#4%KbtpQ@k1ipF2W^hTT4zVygehxWxMQNSOUp?xm=T-gcPvO~05r0hO*<&i_r^b~ zt(i$^F|0ZKjB&-SiJDHj=enStBwHuICsI7MyJ}wVcdlYLEiX(Lr&YK-84U*MXM2cvRQX)zgwX3+)0G^iY|Udf)}lbWXJN_0c%Iha^n zJc5H-rd`af0vOWhsK=Vb_B2hL4NpaUkHsF6asVF<8~9bYwFNaOht4t8sY5a%sGzp( z^W66bi-Ahkdsb6z)62;!p0 zHC7ErQ;KaVYYo>tHh)hCQfBb(obhD+HK$a=`2g?bqjN`xxNIG-^!Ik`qG+U5RFB?y zD90qfi=)YBb}MXw{(NfCe$t4{;II0MQeI)iYhwM&D`W9%z1+(@xk(I53W^TXBwDpi zwM*x2&##L8pRoxWQOTEzyt5vOe&K%4#)a zM+t1cn{iP8k-5o=iz`jEejrn?QCmyi#&Q-=u$K6yKzR ze7?k-=w6`DJ21rgN#G>LMf``x9$mAOGRaZBdYE*kgO);vLYw)PoNsk$B^zUEpU!+^ zsmEBn*BN(uke_9!y=j_*sp$`(`dcw?pY4!$o*zjIso2tURFSb?if<_>B)`sS7R*}I zLDqwVR+KbrbWE2Uw4E=)s_DfQC4Q4`(S6{vYxqu~CoTV)@Qtym`$6$5MW9n|MxUBm zO9S+#N9DW`8)G!>NtqHBQs%!*Q&vn&Mw6hRCk1si;lG%Te>-kLh38bI_R+Qi24f7Rk6L zk89Mq^vY7LG+U?l7Qejq%3ShohWz}Mn7xOw-aT9&%Vhqwxa2}V-!x9VEvv-tIhH~5 zid1mbH~dHac)8uT*9U3P-$grx{c2#==xSP|-&p^WtwqkOHWXo{(v$_ld1{7I^|iSJ zhpQ!3>_oXp^oAkurrD^1d|KWoxk)W-wC`_pZMRSt1&fu&_H92|ZqxN2K}f>K%!ctWACJTYaOtbhEj#p&B}B(x1{Q1mqGvdI4O8Plb88vNN#(XH^OkJRdzWF|d$@BL zyj=yhHNHXU_SM$(?^}jHU#Yx|8wG5$`rZ1D(qEpHr>P!Lt zm|-U290s@gBAfD|TCh>N;}0*UYd4wCs0yWh{hFs|ZSWKgtRY&8WVPLb+9c)0OSQ&V zx$o383>RFd`)_>zFym0mlz)+%N={dGf9|R3?#@GUZ(0&|Cv5bdye-m}R=xMRmJ#S4 zb5aK#$#2M6B=$ieAhEts?`1!2nXr`I_9dUf%>MMTfObOv`M4(|W6F!X{q>f>pdh-{ zMx$L*(|0Ec*}_=PyoS4|qXRnkKFgMP4MK5sT>fKBFJC}LZn_`Iek7t7KZ0K}lFe9G zTd%Sc7Lv@Q3?-;;<}U79pnS?>EKJ&Xs(d9J`mkn~3R-MNah_)8;~}Auoc~KB8ra%t zj80bw_Q|RiCAA{x@raXBNh!u|kjX2n=)EIXG}d_E=_2ra2l-yjtbB6))&8w`;lOAr zxp-%dq`x_TVtiqGjBaN~3rrMO8eX<7DoejfzbV9Jm*;H@dlstlb?z=|BpUKgSIbAG z`nX!_X4tnntI0O#+lrRfP;s^M;DPYqTCvGTlK?t?-pI0?{=0l1b|&De`r#n z#kG7=>!4(%4_$(DN%K0a#=*bxi>LSfP8{{nv%}AK5uz878hwzO@dec46j<9$LZ20YirXpb|NZP5pJAIK3h zF-a^zU|DJy5b*LZfi<*Va~00l>Lr)~Z|3{oH@0pyo;|ZDEnX*vg%hIqM*aAQ=VP5Y z9*yHS@h;2uxERr2fYv=-G)2)%THIdDkvd03al-hdcJ~Q&sUzt*8B_0=1nU|n64R4!AMvbz_{&N90Wsq5bBb8%o6U>V zmSsFk-?%XGY}iGa)#dK^`co8FIO(NJ;$@lR?XPfZEj+Wfaz11d-VgdkoN`K_A7wy8H7?R!F2YOVA$H2zd0?*2nJ* z<|Nr-6BRP^rmKA9vTY0s(ms%lXX*z)iS%DLd98QU<@*PiQPX9X$`m84yGfH}Wa=Wm+>FE=p1&_ypto3omb&b`ccia!vIfZ5%P@x`0(!HWY^z!!}pz3Xv6{=Rik>nu4WNlSC%L{+H#wH54Tof6ls;p`!Sj`V4*3ED^gpXc@4=boZQZ8Ceg-XEGO$;t;!=Q&{5M&Kd;7#M@xK^yppHmkYfSj8WpUQ|ioxLv*Ed@JB!TJsnR`tgMW=5f3!z-GW$>p(% z8YDCG!xm*#@(L6b#zDMmZ$q7f)rC|>e+s&)h?_WDEPKT{G%4T8wb{RfmVA9fVd!HGYjpA)U9!#l-m5Od;LpxjT{f zS~WuefMos*z-3y2O}(){b82%nUNuozYCn`zLh=O?R~}JBx&|(F?1qPWH2fRlESpWAG4Fh)S!FheN#PUV za!wXwlQ$Q7gZBNsgwxd0M~2!a+Z#dkd{Bd{KaVz|f_1MbY?iLmv-GM+o>^&3H)b`P~C3%VNeFsAJ z(KI!E3M1FtA7h-%>UdGB8~!t=x=A$#qs^&*_SZajmQ6n2lb-nGn;O}iyjAOHR&(~$ zD{FmzmI`n7ddybU4VNK`Kf)RSuBFtF>IW(+4(ys6&xL^QCWA)WMCi@6w)YQ0Yr?0- zIbA9HiZJUBdZypGat>jDn7>gtY>=V z(SL_d7EtWAVC4Y6M|_&Auw@dSO85eWNlef{-ApMMd-wvg_;t&#Sfh_$+Fd>qmIa-K zIgIa5d?=PhF1G?ICQU@&K0YYp9Pmsyk2>G)!udO+QBT)^{N0AZBghN!xk<|l-^g;0 zLea(^8x`K_Nvr+ez#msFZtx>!Rzr+9IeW>E2KvxvM3@z2`rtCZ^8 zxmWQO<(>ALztW#zO{&Tiec|n%d>0R>D5t42s`h~keB31mktRXMvS=4KKTfrkZH#4_ z^f+KGIOKj8T%`ZDt=}g5<;3kVRRT`bpH&C+DaX7g(D}U=Qz&1b;Zka_=zkSb&UIfI zxnSPA;KTsv=Z5K!zuS)F6Ul)L6=UZ4k&5od zeLmq;$F{HRKr|i*vYpI#wX;q#wj z*76f&c!-in8r0Tb&9@|tN1XF(P8q- z8#P%u6!#y~zc{?@RH`3cCfQU$8Ah#{*@jgZRGS{7nlO}CjJ7kR$WGmScxIE(#br0~ zZO_4{m`$a0s9asUK4E+8WHvsDQ9I6G=Lq?f{{Tz>~5Mo*gIhz z5X6um6TWN=iSdGAk-H_;Fa8?LXLmJS9uWkB5dr7ll<1fU5+=DumIfUeGaX`agsU+P zx~=0{8IXUG%?86CAHIuD(7I(PK2SF&T6NVbZRU!v{Rjk80uI&_KxZ0W$NK9qzAHi* zGL|orG4bjvURb5m5tOMG!Ohk)sqVMVQ&WJyBwNx=LWGFd+BmKaldu3Wlz{m&Xf*qu zPFdhb{2l+l{j>iSko|M0{{v3n|9jAfsr=t}qzBI;ht>R^yQ+J==ytd6P7w}?8a$M! z#AwnYMCdkNh2O*DZDg*~Z&t@33PhHfDrNU$K3?3STn8fQEduUT>b!N%Q*isS!GKXz z!coN19hyfMrjM!O(H!Coaq9CHhgzNyvGpVg%wI*DO26!I+!{klDv`vewS^rHZO`BM z0}q4~HQVI!Gq&AM5lYF5Ao4~TQyV3KygF9}899~QQqQ~5;Fa|2o;nthySkV!cV{eS zrRhEw_>0|eLD23hVsY!f6Fes>Au+iX@~As2Qn zN&QF@Iyu9Rn5lKDal*%?UW`+}*bl2=B!p!Zaqpak3L(;Y>Df3`?wiTwTc&rK(RZ*( z?+m8RpJyELt}KCHkffw?3JaN@Dw4~9EN@DE?4-^wn$oLjeNH;Y^_W^OXuE0UnjKGv zL{tz?6Q$)3`DRFxpQbMEWNAbY-Aj`cZq>br%?$J1_40N(^RyvPx!t?7LL2@=4sb`0s?{# zC0)`n3L_oT(%ms2B{>Sx-3>}N(lJ9xw{#5M-3>EykNW=H=X-y+f53g7dwzm>X3jZ# z@3q%@t=C$6Em%U5le*!!SbpN7>}lijAi7C4eDB2fkiq}QwZ~?YdjD%d=dHMjmx(tj zori-;LHeTLm`PpU?2849HCSfBX_KP~uT&Y4XW(m8oFA-Px%!qu z&&w;X!sqp3ng!eZ*rL$dHBiFkyf$YKj=wPf?JeQM_^x&=@jy|IrF%asMXOlyikFIr zw?8_CH?Zqy*<8z}A`6@bnX46E*fvNab6K`cb5os^*zV}q{3w2G!Lo1b^;?Y;1B;{ZambIhDj7=@ zwubBAhC<1#dRMC)Qe}Pt&rjy|m*X}};NaSmchQku!cMbdrNQuGMEuKnx!-%j3``>g zt`9}rTuTmi5@TS6Dv-7%T5(R=iBZBkmj{XGXMDy1EpjSOm`3pTq%9hc6KD2@T}}~F~W{m z3TLpCf9f^I6AsU6sP4}JJJ0i>o=mb9Jy=iog}ceSWZ!@|p|juaBdXU5+Lzx`#v6X> zH^87!!3z8Hv}3ADLW1^=Z-d-#)+P#Qcb~qIs4xzeCV6u z3cn}GsyBtoE|Rsf7>h1tgN?x2v(H@KDW}7V?2s;7bhu^IrfD$E55IfI-w@8jT7&A6 zK{MCx@s=_qo^}mAqL96^ND&(4m@17)UBtJ9u3dZ=$BGH{IIx3~S~d^Sb8_UY_g!>; zu&Hg3v9ius63VlIYfB9CdI~01S{@;vIZ@&2U?w|@CNjAAjx*W)cXN} zD~@O2^hu`(N>hnWHkC>HdAE`K1&#P=?C16*pRs#nHIsQ=!d7<)ug@J`xSwN)UkpDX zz@~rTTT?^aed>nqtn$rn`>5;gHSP}j*J!|&H%>4*XJsSwpg1}(faZPG<=l@c0$aE{ z!jwv|%8Ol(1Ok!u+L{%)#uN{eulNUz=Hnj)1lW#JMqQR%=PMYwPPnYiV=nu783TPM zqub5WQ@4~g&=)@}k*qgsz4lCRJa=9+?3nD`qsJUCX{bi~25eGt%iPRKpZz#z^LWYa zKbUwqH~8(>JL@44!hQE#q)^4R<@J=V&*crdN~gT~O|h)vxG7DJdjxNVszFNKMWdV# zINEw>4W@y97l^3rmt~d*DheOVP!nBdF*13seareD8^G-UI8CT$35wbO7Iyvek7ihq zAitxIM&<5kkm{?u9jP%Va~?Zu?>yUHv#ELpC3YUBKKH8QHh>2d*uL4Bfh~R{Cx~pU zKIulOc5a1mQR&)wb!P0u&hD92zu_tH*{h>puL&NwG2N4{ZY;{UYKN)u5^$X@e$#Q7 z1SSk4%*IxD0kwY`P2>AMoGN;S{Pz5f%`j86TT31Wv9oD3g`50IQM7{k7+ z(07avl8HvxpU(%KF$n>UvAL$Qpvt?0`CYt<3q?~}`{hichFG zeZF=vWZqpqHJQ!+vM;u#@WXX+es09?N7!FQ>GD^OY8(m*ue5>wGQRzNmljWw1G@UK z!#+D04U<@X6Air@<$XUmbM7*2MN4roaMR`z3sb37=4GnvTt`#`mobMl*-RYT zu_bYyRUnz?{bb1i7`EB&p^7P+#9M5&HWyNTk6m$l3IajIp5MJk7}@^Oqwb0qhH^W@ z5bp>vLASPtOG;f{23IVVT~1l$d~V3 z=ki-L2A9L7GBpmCV3TkxT`r5=Q;5(R_vhfb0a?_C2CklzX z5!JAQNQ7ev_I+#TQlE_dzVq;KeA5|jSL!aY@BW<2VAB)k8PQ1x!gW}q`1-q5d7~9i zp>{pGkr5VMUmhC}fN0<=5_)7pB*i*GSyjr)vUS6`Jlsh* zU>z*A>qh0IsFA7gYbM18A3?b(l2D^VN)Sgz6jje`m`_5Wn5jK$b%}HoQ#5%MD>`co zI$A2#B1?446$>l#@gDET)!>A!`B^|9Zoc)IWc%w3HD|faPa1zGvxML%q{>26)Iepk zKl>l!X~LR5085>QZ6L%$@O)HNXZw(KACG`zii;>``o6oF0#3pvAPG*&!p;1T$fo@@ z^cecNuGt#Ks$Q|PUX}LVRjd&p+0c4+?SP|<_R<+Xt`l(W=u}rZUfT=YtQe`?ogwz$ zCvP{qmVGKh)k+N-d(>UVdTzx&6=}e934ug$b~8NMWU>smxIKQ_INj*7b*qte=d*uS z&}?A9%Bepi%cvSGFC}h{T!6V|_@p78Ba_gqn3 zMPo$o-{bt!=*ydWwSd1Ky>DiwLi%J%92ogthnFhCROp6`0Kyw|&XB#3M+ZX$W%Oo@1i ztb%t`r?oCfky)EmxPPZ7;NWKll}l<)`(J^g++u?Q680K(ArO(d>fo?qlSgMzVnKVw zzAFunS777oq=47(aIK4!=SZTviXOUK1qmv9-{+lk;Ndn*=ZCsi2X!J4RT7ys%>IM| zsv_gdHjLh<#%Xb_)qs#d!+gOFE;wiha{*=WIA!|<1iav(bs8>ykXWtY`+kBuo^?Nj zjlh*P(Zz~}hc4R2eOO;aM(l!J3x}0=sB!@!Zx-x6dQ->Wq;)V<&cH-hW-|uQLVz;0 zd`(qe2z;z84NQG`_Wb58x#(B=BkC={PQ?@v+gQ1W3Y8kXK6?)3^73j|MDWO7B3~j4r`V+N1+C4G065y1|ms{DwY+ zzQs)S`telLAgtiS?((2sjp3a8xr??Y6=cbC&(Wo_q23d1Q|HKubjIOv_`R>CY?b&m zplf0TAWSEoet!(^Oc{sWAm)XB-Bg&ilN*?Y`b%L{ve>QoF!%-}SmUN1&h4LOuGN)7f}s{Ph%9d}x-UdVYR95rr&fcd&!` zRKL7#Z-;eWNK6WGE$`xwEW}m&U|F<2bh|jKx@__Z4YL(9Cjp16jOn9#A369Cj|Da| zRBF@}Cqxq|u|4w|Seq|}WD?JcUx;_Yo3R)pP8e( zjK^oE1pL2NOWV?boL_?D#0mtE5;4NO8R@Sm;pKTjWFe5YG}0LMQ7&#DPj$duNj?F+ zyWLqLH=;RxS{XcNJVeB08HK8~P5Cw%(e?l|aTo}wB;)so3vQMw+?J=1wxht*(c^dL zZG2xNfI)X78d>G=e=56}VVs7<^mkThDL|mbqMs1?!A@wIXWnVYoPzl2q~H~@mw5~2 zFD+x%->chm{z^$zeV3JN&5UDvY32DPiHx^Xo&Oy`s32E@fa@c3JpRMC0LX5jH^(Vhq z{lt7j8|I8kc>y1e+4uw1Rk*C^3YSICl_Xu{x-{~|P;q(9Aft+a=hUOBS<;EVmwE~y zP;kApJVab)qIIVk+nR?!L**XHs)G`H20aiWXg1v*_A5hEQHxS5`b7tCWMj?H6uDnNV4dS_+kp<;fzaN3< z+)Xk*NUkg!!5gMN(vA>noCv%BIB#3QX$TBB2^yEa6WKB<@NED%T&^zHf2q3mO~aej zXh1^+3e$Ak@!RdVE2C`kUOaM~{_~b!9wm3K#BV>}cOsB0I;jo~r*nzjs_kUFEcG`# zY>GsR7s3Qg?&M)#g;|)LK#`>{tJbZYNcD58H~jDE(540f@*5)dUo zvYHsWyP{JV^F_vfJ8_-N2wu(fhTP+Se=Q8c>>GGpAf^Tt3<;krpx`H?-`yPW7!PmZ zdAXnV47*jnoipa3ygmv3IluZ1#Y%5|-CY~vo$6bFHsm0sjVH&Q`S_I*qU-JCAT$Lr zQ7vbT&fZZniuIjD|8lDC-|Zy^tbb6?R&Q2NLF9{9z!5{N=BirMAm%1K)9B^V>9eh; z?VPCv4GjfTUDyY|wV~0P`Pn69@1Mkx8efzgh=29#%7VMqt(^{*dj43Q5#;h1RG`k_ zgFvjmzU)?Kvrr?^X+(GMR+)|BIVU%^ODhEyb$}gpLekZvj|xOR*9=j0TRdE?c4h6d zramhBbr-C<9|FRm(HSZ~mF3!&~cj=8`~<&2+=V=EBA!ofBwAz?Oayh z4ly2A=Put%-OXhv6&cp2D}1l1gBfZnMQ4vQClS{J4Na8q%m$2hK6cVEAO#!vrK@YX z7gugho;zXchgHUV9^&KU>|?fGMDwOX^YW~sdt!5$G&K=-;^YtEW`3pd1*oN1t5TTj zvS75Cq4sE+gs6AosN>;q4x&RdHMVQ!!-Plw7W`uu9jM+F#p`c1P?^*ZVRV(Tk49>j zmF`*=shdNg*EA@!k2)rsJvmVG>F&G5mXKqT`Pu?yjqW1FiJvGK=!Ec|`NIVj2QVqVKb7wmA7`nCAPGOPus>F=w7YXpqi}J?YTs- zcrqZrCd(T(HIre^@6@;!Is!&qk5x@`#$5g{7eI`=<3J;_v^b4y_1&l=Dtp<FZ0`OpfNbp z!hdkZF6neRynGGg8#6>`+ApgV)4H8~dr2oaIjT+Yc>*T!Gce?`6K5PF<292Awp z=C-9jmQNg<7*Lc14az2AA@bsESAH`jrc{_`!z)ET#zGwSTjyA(BC#mRn`~{V7MsXz z??uDDNlHfMI!rms#PyP{L;hjCV#T?)XhucUJCvL&@~$m%PN5{tl4H9VqSnt#dz#zp z^h@KbD17hW-78Nz$`Rd$x`|XTOmvp6D-5JCcVI z6V}JLNo;W{jfbnnFg=~?JkiW^Y!3ku_JBpYkQBa=jN<&*M)&$7Ngu7s&prDvflQ;f zVtVs-4%=gNp6_**6RpVRe@4`Kp>`Yozz6ovc!gtI8>IKMrq4SPLz3Ilo?f#II{ANhd%bS15R@5e;D*rO4tDm8FbuF$;b;xSRhQ0nxMrrz z*K5Cm&HD2TK}6QMEz@KE^4in$5E)AMjfT3%K#l4THswIjYl+1RpDEGyylbQ?_98Q z2J6$?Y0p$2UR^w{KBDR?NFo>XqMQ{V(T}m4d?#B_uLvs|a0&b(x4V_m`|SB@Tys}| z6G2$mqdx<^;y#0;$d47C?%l`(3|ZMsCLL}K_?3LEPxnfSy0TPvH2)(LGLRvT&zkfFQlKoz%lv8odmgQH_9A%BLIAq{63{-@d=4=X>xc5Wg_m;dS_5xETwKV=MEg zm=AE-eR}=Bti=?Hpub64z^C;7OMhkvxSG3v{5xOp|9|!Wdov*Mt%T8CLV8CVzyn;Y8>8L7)?E-Ftc|Hyh@d$kM=5V^5fH@g zg@yJ?^Ld%f8_LJ8#!Okgx9Y8VMuHh|-pJ_HB|)4WjV~P|+k1*kGKyQ$8AY@Owub(U zjlaCec!*9u?u~mW6#>Dfh()Bs#h^ z+x2pFmgGccPD?4}+8Q{lCU1w8?ubK6knbe078uZv-O#3Dk=qN%e z^c2%7uc@4Qj7#J)j&36igs3R1mTWiDNIanr0S4ndb-y(^B(h5>yde(M$Oqm@3 z+^2)5$pjO-6yL;#s+5(0^sk>aCiiL}==l4F?EkzU)oZkWvQ+*DqWI1#tEB^1$d*Z30!RK=V3o@VQKdQMc05XJ-ZvPwPSYdrl-LSaSgE*9Ey-A z@1j(#i4yDq!N#&q{dyhiIm9Ify^pbOCFe-gVg3BEcznQF;qRpd2MUXQWQMHl?b$X7 zO%H`RS7y$>+Pzp74Eb<_&l^XTF0pIrs}n8%gXU=Y`m`P)6^9c2G;J`@v4c2i(+Z|B z)s8fEWF-W%ePtrbMgb6H>LkIdCP68GBtZRI?#odxPsQdpXs z+c`dD?Q>*Z&bA(at$JQq1JQQJ!$y6LciLBTBNkMwFBUpr#cI8#6@hukhn=ZSKx4p) zshP|8A#0v<0pg#VH5iB z&p;5sK;ZiJcO^8msHwMVcjr}fb;Xv(GUgsD#MC86PozDR2YVXJID-Azyf_RAR(qd% zvKh4Mv;S&s4Gs$0V?Q%c{m4>-Vuuzq8ByQDx`^cD;^MWLUIC^(p*|-k3}Oi1gBpMc zWHgiucW#W3_^AV{@?umX9-mLQ#=^o3D_>bnd@a(si@ta)ICBi%;lmD&=394g*s|r; z?&>alB=7b*XFY?Psz~noThnsIUTo7&#S5$S0p>c-A`lawo|wVX!l6S+PQ?+B(0oz$ z*;^l6kY7|ctE8oDwDj<9gG&Lh$G)U>C~)(8T?p4ZEvIF@*slJhr%OBh%&ehOJ*gJD z>;VEPt=sA(m3Uk=`zr)WCN{XMQ@Q|cKdZeFH(CnL_-Pyp%unol@u50}YB}I!<|XN1 zu-v?TRCGD4xOQ-YVg#pfwf0YG1(&ecpJxpA<8UpW{%JU^BBb2ADE=(gKih=4!x+|} zRcRiXDQ1UdyekmF-JJUF5PX~OaFagl_u30BCC*r8#b24vr`&BrC9Rd>V0syn;{*h% z&q%r)xv4OQi=AhKa_FCQ^ek@fd-eli>g7Bl-!&~Y?rnagB_7aooO&*yW}(dNREQsydVp>p!qNnz>x@=b<2 zCIsZ~mg4Yp?fNg9BdfOGj%VgqHl7Blm|Ra8R*k>0>LV@`1AV0mU)}io8+|^VdPfFM z&fy`&5S~Aabb6p<9=dL|s_jy9kKpUs?)+Hw%~=<%@3=TS(pz+g{M#h54*Dy04}S0d0X)t5wQC*=UDHl4w-=R=MXu+CR*!m|quUFH zk_(f$n*fKC`$s)tFl{WFb(kNRR zDL_#Lj>g&g(2`gK8{3iOVl|+VE=$CDXJ^I>626&NPciHTz~(v5xe^rko1clLPC~qJ zy5ln<@ZiRXlhiU*%8H#RvPd;|EE#s${05&F=rh{Qb+4syMr>Y2x`fG8?nujh-&msa zvCHkhlJG@2KQGU&CGdB|pY?7vpG7eAn)y%WT)AI<)JPr}y^N4HyFtjhSufuV#Ersz z^IdTLh3}Jclm0zk)10qe@7Vo6xpn(|C_GD*^y=di0`pyGEpTL=`<)Qz|GmCCD-fz{ zJq_DF{|s-6XLuoZ{}a@E72n~Hfe9CXwO!*y%tbxv-)TyqLI2rxhKOg;w+$|S_1@sk zVj2us98#pLYcd3vYrDSc)3|XB)#uSyw?dr~?#(*Aw3^_+(QueatYc%oSo6;*Fzw&@ zkPgq4-KcI(?4bjW_KWXRHhs8hJ$PLdCnx|Icsr${Nfv)xrD>>w`1|a{rq5DjOwQVp zz#iE`!*B05G5ypsGN`&S&Oh1k^$W$97dr%d{kZTS@|yx8srmsokmIHIHL&7t50>x> z5tn7h&i8k**ymKbrafwl^!4^4(g015$tmC5@V&a99Tp%0|NV0jZ9z@+GKBB%z}eGY z5B2kMudjJnH;;`W#%yKX26hvUELsv^s=)3ZAjv zwhnpv+3JLjji;c?9y|Gum(qc+EY(ps)x!3)lz>kliUaSz3Ekt{E-bGGd+ENi*{yYu z9xrge@qsS?q8nwZk{a;=T3elYNE=ikmQ0alxTo#BF{z=!ZSTT6WRK2|-mZeZ40-|8 z&|LfDhM8I>4&j1&VtYO~I7nre*w)rYMKGG+usKXi!W{ZnaKy?n+BRmZP-P$lfOf+r zNNxP&fT^3_G27qv$or|On>$L$WWlFAHZw&E9)p=UZajr4%+w3c0TW8#QYLRv?RY~W zI5qgA?9UVF1P|WvoDZeMe0e2A&%lt6x%a2i=s-Hq`3NXcD*-7~FRj)y^{7HplJ!rp zFf+$U(wIwJZ&dcK9<}=8ho^oP&uv)u67D*!+zd6QHJs7?e%ytaHE+}|y+VC0HFbMK z5LxrN1sF4ZuW_mjjxY-ck*pQ5_!C95Rj}p$9Xvt5F-s>$Ja%gj55U%ef`Wp?G1=G8 zcQJ;kM^(}4fgAC-Idt;)Ao%`ctS|PjxVRifa^y(ZbW?=gt22%4ZB*Tf3^|P|N_Vt` z@~%<5(7Bs+Fs2TuiQY_m=eFM`0fRo}*H2mbNXY&udpAk>`1zYtFdJi)ILPqHX%UJr z=bq4cb7dyUR=92+uZ93+I`Kvor<;;CX*z|=Mi{uU^|AUNr%&G_B6Waeu?_lSNfIWn zHi_?tJO2J#kq&k_WmNl8;Gr_Asn8sp8LD36sj*u6|6T-+aR?9BdS$3^Myj@Q?Yh@05pBLGEaE(05P z*~@9;SD=hibh{|zwCC&*3?s6Bc`x%{JJ;p$|Eju>2SpblM`w5v5*)1h#Et-tN=+M+CdVg^m(~is zZ`2p_rH{J|uLDcL!g3_JK77+Q-)5YWp+wO62Rd20Z*jf;pVj7j*5|z^5K%RvbvZB< z3>;QS&(0o>-A@s&w)a}kZ0CJSl@6b?eUsZJ?`GZs(J~64ieDb6)C+_IA)$)33f$9F zmJ0gIk^~~MXG=ci^V$DQJupQM(zdT@f}F3312Y;{{6MCh=>g?vJ$Ipw>Par#oQA~nR=L=@v%+q}x{tY)C?W1Gzpx2KT=;>Ufv1ko2-eLS z4+v<@<^l0rHKZICQNt8dhFElt(PQw0?_WFCod4exYC)brGw|URzvwdpMRo#SQWC_D z*A(pB-=)sT;c1;JSexv=`DJ;T*t2FtLPi$zLgB=)0$60$P6{v9f9nD)ZUPa(im*%h z{6wrASwms~V$#-u{`&o0S0aM>5uaAWf*oRKqlf`>FTkKnu#Kw!(Smxv+e=OnsdB^5wftjTNgLW7EhnVXr~!jXC7BgOV8R4t|N z*wGSs&%ios66J}x?CZ4yANl`Ts*XIT;~dBFo3qC@0kEUYeals{V!&C@TGed05$}8m zOK8P+Yp3{3@P*(D`p=0A>a70*yiNP{8;T@8;c<`BD;&=ocbUOZZS;E?J9h=+QY$Tg z%e(nOuKl;Fg&iX+H?A8!VEDB8x3}n=+-zMN%2V6F>x1_uz;DR=iIKOpU3i%^lBZAj zxP(%aqr<+H~oBZlA znwjL$&zl`!F1s@pUlF#w%$k)On+mL>4AZ;hiFh4@OW(r#u%}!Redg6qI0UFHt+_(P zH`q)5z0((E`&5nJyMUXrXsppY=7-_eb=E1dvEi*YKLMD7@w9!dIB$W$-xDv*0ljt1#p=^~gJzZHc+bkK2j;=7MLMUtE!(*zTXHXI&be&I zF41$8)ru%D4!x&cM#xeg=OE=&&{u=6wwO$r)EcA9={vtz+$*@=*E4Ad)|6<9-AjiO zxF8?rM9qfN<@QE7-!RZiqnl90y+?4;>Bc~Bc( zH`iJl4I$6PlkWrfbR|PR@1YM0c~!p(ok6IyIlt+Czo3$<_rAHUqkYBMQtY?c-MzRu zb-7x}m(%NgxTzOsz34wbZK2U}gQb+$Y1oPRWm%Ug2E4%+Rz^lfZ9C8#K%!Haw+~*< zE!+Q67?_X92N3OBN_Wn5_@RE}kaZQy%cFD6=R{<`hKtg`u>om~3ZoMRA8kVtUo~bv zk-~c@}eF)=Iz+PbC)N9 zRvZ*Byzc-IaM44fsg`|7f4TwOa{z z^X zyXYs14hNKvkle>UIZXMYBkrj-t7wMk^Uk`CEK3$Nk^0zWDt|H!CG`sH((dpgfxG`p$P&kNu4 z-CB4p&x3pQ@{ejbD2kpJI2OyCHm-g$q=#!6m?xD@e}7fa27xJQwr$<9!dp`-eg&v& z2Vp8ej{_F?db_63=lzJ!uiJ>QrxQ$-_d%AHBdE2mY+e|#XKB~`qJ|IA+e2`YcRK-v ziA`uxMRXHTRL`%ifs!(RaM#OGmCi=!lYlUT{!g}MeRew8goERBj7Kd9;L$hqdu9)n zT#xihve@>GiWuV3KOX|Sj@OpIY?&n4Wktm8U=g0=lproFUb#XRs~{JxjDN^&uA+Jo)vmFK~2FHpBdm`X4^qA1>2xzxf``lO$n zrb-D->r`k})505|dG0IaO`sV)KIX>cCXeQU{9{<01QES~J>h!YOiF#IMK#kU$)_Vr zpJ>XU&9W@c>$}ldL)SKgW_oSk;E-xEQ?Og@CCw)6gaV4f(i^|D^& zJA1{JDpU=>^}+sB$?e(%FOmp}Ea44$RzDrJatmhPVzu6C$K)NkJ8pLfO9_tKtTrN~ zvzP|lEK1+l&vyVRijL0bzviFOHvuHro6UioeZHBrN~S>Rqa$14_-$e8HQQVDAAoMH zJ{PkkH%_Y$^n~AT4^Mm};$GjK`^jAhp$||%UJt~5M)F&I)lV4IN$xpEzu|99i1V`B z$E%Mz9UfN=KMp>w39x^82P-ppTcr71er_z;E!wxM$B=VZflyM*i5#u#nEBp{eU@?m zIfNKMFlLO&FcAO(gzj&8=?uIqcNS6XIwu>)Yj!dyXc}q{Q~=x4u)KuINGlE3_a)Va z?D$x`XrM=02I0zN0o9H+FTKf9nO%_OzEZe?7+T?E)#a|=fYP;gbVbHxduq?wfWYes z8&4>(G~karO;I~{l`^lQwlHcKc{yQbId)*Q>LRz=(5-6!xVdg1vn ztLLomh7oB9`)q8QM^%#v{TfTBaBzbL?Zt)sWt6?`z9157#PMTpWIIHGJioY09Yt%rq!wjCeS;#;x+Nwc*# zl{TC3IasMYFn~Q_`IX zb*+@BF61+~l4cS^u7BMrNUl7^DfZ2iGaDuAOSSr`8TbxTQ@X?z5U{*gZJe`UZK7N; zUBU(SNtzZINF7LAxgLsk*}ps=^HZBufz8QukJ$L!t6!`3TLe(&p0+{N%+MfJx0jjW>JY1&Ur@8=1_;VXr#O+MZsX8&=#j2#pVtU<(L5oRmf* zeK9A2a~=vVp>tWoi2$D86%Y0&KOVm-HvBqRN=3F%&}e0AMXFnSgde5?(9fYU1lzj* z%LT9k=)>9eMLb>CnuHA(d|>yimCCmw6sRIQ%fzN1*3(L5j6o|b`6m>=`^JP3Y^)zOBLMxfHo1 zkUBKOPxzK5{c^=e@!rIWI<$HU9Nb%uHkLOxN*3`dz;ryw4gGb^vo~!p`8V46<<%lh zLsIl=&3V_@S=GLAHrqKhP=d?3nuEnbp{GmsL+|;r{f%jRfBUULMF_mL%f-N#xuqD*@A`#CdoRlS)VmKK! z;{}1sYxBGPXsjr}KQS#0d91@HoQF&;#s%G8lei$!wz5#$4m2lBhg-Gx4fWjre&^qN z5Sq+6VE4|k)odka%3kOMeZH%!bTS%O>7r(%UE$+gf}^&w3n<%<5s}X44-rwy zd4EVthL(TIh$`^0D^2~fDN)s2%$3)VtoK&5NgJ(Rfd8VeH!wHI6sBt)@w&3wq;}6X zyyKe)Cw*4wt>U>Wjhz~9mfX^=8`m9x*aQe_zo+4U_sKv0;aA_KY0_ynXRBLGR+rq>t0W!;8SS8!y4q?wm1kGU^dIDBz{;MGa+l4s!|9HA2 zABC>$@JsP&@-_8G3f+8ZQ5-#`I9RO9n@wqKdA38(K7RZ{Ea=3s(-|0=rxfL$+vf;_)Y)6H}9%n z@mn%X9ag&}iWIfF$v~88*u2aju6mcrd+N~xyjy8@pC$mGH&oCib;Sms7yr7Sc-Zut z*(_7RM!AjAPpHqfITu9e4YmczjqL0e&Cj^z{*k(%ZCt0~Pw*cXfqAM{dA0dC3-{hw zo#1V8cBR9C6HniMre$7*zMhyFXGa_W)p0-kr{lkfb(QlJ^&@%+B(`HXHR2^cN~ii5 z-p0D%_;n-PAT-16W@Hvj;{R-+ea?9$14ZElX)~znPv$XjKavcw!C^1|OV%`hNC+BC zC9najM^N@Z|7?Kug$u)i_ z%e8P}vKRHB`a%91tPWhfzabp4mLLV>cfo^o2Wg21w>|%m?f>FAoexcR*Eh>vf|`?C zEp2!$@l3UrsU1Y;G>DEhdzk&z{~t5$n@_WkkWWf~dK31V4wPU8d)*=}4Q>>sd_WSm zmUR4;8eqQ(HK;Dmu7+?Q0gz5WD=OzfF@=o7McrTH*)#~CMRpni!^=QIWr#4<;>DsV zz&QCNjk)=blM=LTU;ZA7y2>OWYrOhVR;69h7Y{$AK{@*smQM>7AYJ)mD zKK+gVpWys3ZCGoC7AB90M~_ACRv)%lfP=U5wzS^(RcZLH4p$G*W_o+7b$lDZId%F> zk4e*Z&;rW9{tdJE==UHSRz3+}MXDbKI`QKg+z4vo)0X}Y-v!rCXl z0HA&r3)YY-=7lMGjoc~&Jjj%2AineEoqixMB^$x6G_ulET>SHJ`D(Y6y+owSno17y$w70NCgkb? z)?;NmIj;?ODWl}H07p8p_)b#1a_7REVo1n$%o!=LtxXQ!gtWapA zyqdBulX7K^L!HOFgpV|D`@w1|Dk`-LG5Ub-c#?E_mYHoL=|F-4gflDF*)GmQ1tp z!56PZROu=lwa3h63T^}o&;vc-zWIvxMjxhTAF}f|a|wN+SKb8P)mDMny{yyLekBP> z<~K2k@&F>Gz;#k8XT}`)ntAs6I2v_|&U=#n%t4fQ1`Jet;MOp+TCseAoMj2GRt@ePeWNRsh8Sy7<$h(Bz+L~ z8l_-5=_LnOsB|m&x#1QJ_~{K9hmJR!e_n&+Uk+97e==y$dv)?`Y8zAXdqa+rnb&7Z z)Y!$LDK?-hxW68ngc&&xtk*Fz@p&-d4Ql56p z-gmej6MNKBoPx1JVgS(=J$&RFODY}Oxc<~DF!CK!rkFa!pN~=K3rE=I-*~RRha%-n zHLXO9?n_0L_#Lf|cz@>FQm8?)gW%EzR01G1J@9Fydz+ig;TQm-#=rG#r6b@dh2j~9id z_QC88Af>&Ms`bQGUyIbX_-?(0(4;Id94yKnp1Zwb@Ho0VI21&pkNBse0M#8D&M@D= z#I%Zf!?CWGDHV4;{`E*h%TQviD??{WUK_j@P=v&;`xA3^OSxJ!MY5g2V43e_AbObJ zC#AiQFw<|+`#)9at;NE0E{n3}9N|YbgdBH1Q$lmZJLR2Yu@6%8wR~EEAlNIW|3xKV zwoL12;j^OJ@rLFu_`r$fGf-rCN{_cg&*Vw8)jjH?Ac1U6DIwMlLJFF{YB*fEiT~3O zVER?&RKPdu(131z!v5qffOQrDn8lIz?NVOT1FEzv5YSBa3d0?WZrm@*Zf;^YHKV=s&Df=I%AI#VR;eU9*y;_x6u6U> zn@dFCKe8(Ob;$fpa)0$1z&jP`HcXF~j9G??QxjZXT}`uv14%}n-=>8V@V30)8Pne9 z*bzUK3Bdyh#(G2f0F;}imR6_ejX71FGpX}3;sa0jfM-gq6DW5TtFa@r_Y{+ zZWQ93reT7edM4y`X*2X#6OsTtUfK>IqvnLz#Kby_k(^)>wt+NZ1&U~{kI;K)w7n*1`2(f~a(n3k+C5Ow;YPJbLoqT1=$jD>0J z;WAj^Z%66z*{x1JwJ)Vd#jZ|4b915ck^(L-uZveApTmG~PV;F3&SNiei@IS?8Hw%E z!V;Qe-;ktscP=#-M8=-zyTHKcC&WV1w?cUTDt*ePr(|Lk(~U=FPc#Ba_hdPfSbCgs z2^cEx16hU9+hs9?#`jRL6k1(25D&l=@_KD8JeMy?wo=74&Kl6uIF9r z`@XgOgFjs7ocYDx*S_|(_cjr;`^+d{=JN)KjX5qI_*+u)yOf11M=-FUo3>uBgA#<) zyC2sBt%?ta%2FFQ+l^;-7SLC0DhB3HE0z&TYR-*i{iR;tWTt%*YaRupWkKG3$9@SatGQ9y*x^J^a9R2}Hbz9wuo*N7I&cv?>d z&lBdi#Jd~LL`Q1Om_v7@U&(5w#nyvOURWbV|EgWQSU9xG=-K7Srf=bePvnU_u4-q& z^p%v?mre7AyDjzEoyHZB7{07#b++da1J>|Yxgjt_q0WiYi`=T@i$4y-Wkw!Rqig;s zf?8HGQS(Z|l~-gBS(O#3724Kcbl#5pQ|>Q~yZrQ?IVg5O-FbT=yM8}WYB3*;-n*OEdI?Jh)q^|$~-r0d7n=Cx4#E%$+odS^03EXM4gPa9)11TxMo>8N}nZb z*0vw`4?GH_U*?SAiT#Gc9smJdRVEb$HplE7WQ_3N`D_tir!Os#%O9}(P`KWykW_1A zyQLP*uU)jy`-)4h*!iP8%8CQze!E+{vFGwkA@Sa{*kx{i#k=E;3)?sM+8w2$$|T+& zO(L?D9`2+Z;cW`2; zmCkb1?_AP_V9*1lW0r*(;rL0z@c6-zV>_MxbFhg zlS#)N<>IkMgwg z%GqW;OlF2iN@^?Pzd-ChyA>V--fg==zgPUnp@SVyLII!o{YSA{9(*CO+CN#92^4U4=+!>L@5`(_>A-m99@mfJ1+}t`z%v-$6JVY61Hj?9lN{|o9Rj8 zLVBI&AUGv{=OC{}RkE;p53Ipp5sE9EdcPkTw*1w{R}VY(Tw+An+e-*Lx2rC1 z%KaWJSi|O;5V?;YUADA6eOH9k@@<^7!Faa9mLe)@mgLbplQv?vr?f3E3qe*aPpig^ z`ugx|Z*Ts9-foBV3B#jX@6(&2qBZ#WEly3%={G?0*kQMNr` zH=ddvGN}vUNrEp@hP?FDWRywFS~7}`Rh@b!TFgi!RGglLKc};avR1Lv7EJU{qG^3g zsb{sk8~LGJlix5lm4}3+dl1e$0ug=LM>OiH$tHTqQbLZlu@t4MG`Ta+e(jo^&kzW6U)1U6q$t7 zf}V39bv^sdYj|qi(b1it!rZpVGwk~rqHW|SKmD2cN(loP*FE5PNQdBat?`FZA57Q+ za5vYxL1Fl%6X?+*v5B=HA+vfxaN`u->sUMY&$2~*>?(y3q0(&GxuqF8nwm+qt|#rx z)WhhN384v_LO}x!k$&|zQC?_oEYnPKU% z_=I>^{nDUZh9GYshK*UXx73H!0INK&d53?`Q~scF>kcQD7RBLqef5@l-L?UyBVNa- z9@sf$I_Ayu4^Wm2b=cxt5HBY%C`DvH%mi~KV_XYS1JfuG;e}l4xi)HfYfRMl$C|2W$8-SrNH@%G(1=kgL(j zVk<`eqhG^ezPZR~hfRcHH_|)mvlSkAjyS#NqteWi)KfOouA}d#=}*PC=a+mY=z3F7uEsReXYbA4ZC>N*?rzZJvI8wu$P)Gp z{S4UR^OXVyW+p~iQzVW1u@Exw{hNl9M^xpR$oZlo$d6W1Vw~d!VL?{Z?e_sW^DREm z6R*2QaF1dQPk`Ke;fmtY?M8pB5SzO_WZ%z~B}m)?lFifZH#6NX&31Z6YJ2VTqT!e* z^3^_6LzGHk32bz*`P}^sRphJJt}6OlX{z?#`wqnN;*Tas*8l?*O|%PkBiIm0&gIp( ztVL^RjHd_GgG;*Mcvr~yc2}$SCG+sK3BkKEg4+q?d6L-2!)XD%p0$I4mjg6S+IN8&==Zsk? z4(PYV<>eQhNdPTSPMMB(F`L0a&2+mJa^yEw?|!-HgBScbdKLZYNrTbv&wG?vZLjQT z?T*$p(fHA~Et2u%)`>fxL{(h9_OH<{f4%dppJE6`|4oKYwQv$mdjkj+Rz`%8{0<(Vup;-6MUlh9>`%a4{TeIPi%Xt^)Ae}dl@)e@z7ky<=G zYXGHl?M!c8J$4YF!F$L4U@#*G)(kXSL{-$16Es?s4?iPP^1Gjz+xE8wm&wB?uX?W% z^>LUfN%*IctbarY2IrVZmXch3gWu`hC}T=kgi39$0yo3pw+e$hQGqJ{ricc$ei9oQ zhk;*%copY*ozf4~fYFqho&ree?Uqnie}+B))7!< zr@@LVS|2Xk@pSP>9e1mVgf;NvKXI@;*fjUxvgeQ3(6i zy5DsbceE6oI2U^ODcb&LU$do)2Bc?9{R_{#Opt(6Nz7eL%x!Vh{nj=*1#kXJTQrs6 z9FG|q=Uhh*vjI|qz^Ojltsn&ndnwOMI1`n;x4-RNRxtoqnZKuXm_uOjzCJ7 z*J7fQGGi-M6SZAJ^8JbZ$0POPfvKlZ)290)hD#f({-AlCR;+@ng2V3fpF8z$Q$#dpa+URTm$uj4Kj0o+6+7_rJ>rCG3R&CFm z#zwqES^ToMgK`+z_@P!TZ#Ji*_@kLP-G+~KiNnH?sDF=5<(esLh=uX;kLLFoip5>D~%pA9`mZ{6BLHeV@j~ z?fh!RgDH|1smdg(Rly6}O$Qcbg-xA?pAU|JX{INU%{##irPXAEVf zu!$8b=5`^}W@^s1R@1x0rc742J6`QqxfR4hp3j8eg_f_vI zy?C27=@rt-HChvz&zNfqD@v}TatAoxHW>kLZ1^mBAw?jfQ&KUnajtIus1pZQ!h*0h zXKbIP#0k!R6udV5mfY{t`^;wdjZQ=jKqt19zm5Ja=ROKs2Y8Ir<96=-<~PrIVA7+buh5agdd7Ba&jSX^0+u%ZF3ZVXIOqKI52@O zmE`%e&`@HT-8z<-iyMIVB<>!vt`&T365{i&#{Tyrt&CVfu=LhQCfVio)*+OWw&||K zZ|K1+0(*Qio;vEQIS1Vt>r}$OUy!n3%Xn+lo+d<@4KMm)0lLwb6OS2r34$<;(2X}e z`wM9xqS3Pb>@u&ZbI)_*!&|~-F$Y7h&21=v!mexpvwP}u zp6Tzq6A_+YTe2X}Tvqx~BtGF+jyULpa+H(Q2%qPJxc~TRV8U>Ty!oig^De}Rl|+=< zz!M+8#fc9pThL*~L+`_A5&dsxl<{!M5lqM(TKkG@+svD7DzD;(dHoyolLnAdq+rvD z5fGr=_w(6@p5fn^m9$K!hBrLw#!Dp`#1C)eR6hC={8V9jJbWTXHHa!9ZOe{@NrlOW zOAc63b~@R9P+7n5&!P>XDD_d!4~{;@vIEA&?Y{K7HBHBJK9@Uj83N)k%%f7TyMv=g zk=4moU@+KXC}nkrK@!k~pS$XH?Wx)$nT7 z%o8KOWkR#9g+>-!8k~QKgUv9XqRDlIzEt6IpW3MS)^xS`pv*&d;YlZ($|gsSEps24 zF=M8B^Y&#n?19|&XLTx9A5Db+R}1hHJX1lytUimZ`0>Ni((+aAGOhu+y`?4bN7@E& zh*UIrab@MzzK5%;t37lXXS6SJp~j6If7nSl#NuhqWIQvAgCER_y?t3;o0-&H8nL# z8T^i;B3B-QgrjdYTH;DrAg=Tv{ZINQk~8iLRv%e2bzXTweD;h{amU>~Px=c|)iwpH z>Ti~BFZN(QXTxadSc`7hd1WEc!=+|)U3oZ~>C$&TTOrl@vxSIOIW)9}CBoA6+qJid z(HRgvDLzN8auaEEZFYPA%%;!f2n6kYnJ}ID;SR&YZ4!`tg|k?A?LN479gNmpAjp$W z&9YCysZx;;UvC`aXR&MRE->&6o^#xp(2uP4+sTXH+)is6?hrYmm;>PJp-J+xfa>yCX+EU~2|-&CTwn**e`{21f+y)Dn&nETNJZrE-y}$HGBR`GR~<6;JfSXr zH+2_JyLgaQT%4CWIx5tp5)t(JGpJ2RRo(kQmU-`IQxp-7 zVV!1}Dsx(qMism_vE8y?eK@B}2SJ|*T6DiP*0{i(#t$Ia zDDEq=Iu_~u^5R`PH`uttI6+5)S)})R+d2H>L zp9Iu1fo6U+<`=e>!p_N7e?rp6@{;Ei%tB#H0Qo*mgshXpFMu^h# zd}b1Lk;GZ1koJw0Yu0W+aO>SX@IHra?leh>Ex{0A3jsqPK_Vj<1vOEApFKaXtrW6? znkTvkxch2QnJmn213p-OiHj8yDyEC2kWJj?AgLr)(7PqSoR;;|a;xiWb5Di0$L;d| zP1f@{J;}Q&$^?IN%!((8?Ut6d{n4sMZ zniTs{LwoF*Le@j>vgJ!7t$lfkss+Q%kATonDN}!VN-u|5hsM4?*N*qOL~-5*Q|HwK z)TSDqx2Y{(P;=e=#2nJ;X?c6SE5}MixzS3{nQmWsF!&vKnng;e~iwkPN3YKriu5`nt+foh)4kV+JpeQDb0%*|aA zlEVTAJ)huX3!CvR(`8~Fm{*H~Xs+z60Bb1uJd1l%V$=5U_$=Qp`x4u31b_l?ODKpL zaXYoCPECfJ2{HerM`dqwSU~h#L~A zCD0j1!w9f=XU09A54DF4Br*eOEK0p79Hc&uTl3h%2baJ}FS|s)+Udz%k`&LlU(Za+ zvhNlTrfePCZ(s-|;s|F3loM$&GhJH~HvJPHC}{LDV&QE}CaLmMMUSiI^AL zy8oHO;>X`%LVS#z^K`k^^dcFAyv%eVF6Y!LAq}zBY!W^8BRz$Ap1br4~ke zal!I%4#I9*e--RS@br)oeqB}b4=_p*v5NdgmCxsj=>-7=bYfyW<8Vt0vosd}*=-Hx zcAF!Tz(@t%+a%rDpAg@Bm%illaj7nDWe)B2_0d0Px|zb2e}v)_T$!C+OCTisXJ_bh zCybeZ`Nhp{`7=pcvf(JjhaJsm{N*6mxG^UpvBNnY&?6|EGdoksA{hi&2apZMR)m%k z1sQ4ko;w5xGBC^@>C7EE=Dc^0ph<`so_GHQu60aW6mk3@?r?8m5j{gj8=EuA9^MSL z`A2V5n<@I*@&GMs&?Nt6t*XuW!CfRBW#zgSP$ldRZSq8s&X=z}zs8D?pYNn26??Q- zHT&>EZ88^7sSDx@fdv`x=u$(AeD3?ae zJe)f8j|R5lo}R^|(i*x=Z+*2Jfhpz+c{c0)u=WLNVoB~NTW(e1s|4%z365A}J+HXY zH-JG{Y4yN3ZG4glt0@lqagUmDVqlQvUHYq{wPxEE$?#r!gk~~%@XZ3KM=3ls8-KO& z4={t|bnZ5Ve$WePnAFH`;$!q_x=lWIojUkr?icXoEm;iCI*M*h)HIG(pE7>H1d{2M z%RWg96B3^v1B9Sx>;=+C1KIO=p1EAa7V-G?XNafAE&JxUqWu_swVS}hCsS3%c@|6D zsw$xa2VzBIc7pFz)lAJst9t}hu4Ce=OjX(kv1W|_@HqXf3;H6LN`I$YU6bZJ6GIew zW&t+v>aTW`!%Q3KV2vBz!vkT$pmb*a8_%{Kt3;=&+r(_nB@B!ee;}bUv8~_2Ybtvm zC<*(%+ojHJn4kyLqcsF(iVE6}-6^jAfySdjIRG-<=SFw1GUktmtSv))&pNw%qTwbz z=B$ye@giPgP#Wxq&*_cucUBvZ9?0jL8W5+h%Rkn0o!CnIvPT=dL^3ffp~lW&CYnVB z?8(e(feomX!gO(H*QM+q3;km`r1P))C^Udmz4f{lfmq(+$&8D;^c&t@wH5)?^`(S{ z&YYP&)nT<}7{5VA>*Yz z*`NA0JMW@35rhk**3GJL9rv@XUCRf2c@&Q*dguMGEubsfZTIk&fKlReN$T8iw1WA@ z)MxOB`==s<&YN(c;sa%@SF>RLTM?7WUSzA-n^r}O(2?JBGh~3TJdPc`?Yk-L5ZlJe zZiNb+ihnwv52{-4gkKzkO`7c+Nk;04&J}wDOb$SQK_(UCY*g;!&~yp!24PE9=2_Mo zE6Y$rjbc5C}c|t8!GeYhJHa$aB|P| z!6dK;)mql2jBcNGpG0J_{IG1fb4OVeFSi}O|9NTv6PnPuDH_NXHMkmsus3_Z+f)*l zGY_Mocxns%tb=z7pnp_=5vZK$4LoNJ+8o_rtWTjGtSiY%!ihCDxXlS-T4vmHT|Ez;nHLNb z!}Zaxp97+)eYtLv>$!0L`d&x_8<%o4YzX7F>skNU$>!Q>W3#@TtpWF|%3q4TL*gw4vDL zwhp!&j5XStxz_E`OwtD*1~m4SPEHCN2Mqz<;k zvAR5P)U$DrD#J@PX$)DUI{w&?@!OH?1Ol4`7=qc4K?vG{f z7=9F&aen+25Gwr&U;oF{%YNN9u`HK$qdUnE)L^MDlN4nqf_1Nl!&|Neao0S9{^qis z@xE4g$^l!d+P zUh2$Hwd9vV>li1wV$;nm?v@pIyoaw*&^o%U@LR}nCQ6$>pz1wF11OKH4Ky~7^^Fl` z|Bk9l`o9h~hN#!vbyd(TkruocOj{MO$`cQ6arit{h(O;`Fi@6ln&y# z;9Nwo{N~_X5DQI8e2EXc)b0pHsewW`OjROM`rc){jqlUr!$qR@q9T_Ctm#Zk%PRjD zD-Ky|T@`x7WL;+`7&WNv1~*>5WffM^%9MRdy*I$wH0~X;yWJ z>KhoqpioJm0`Vkz4-WNkaXAk_;qq?gh!j_0|_o3C8U z@s++^S+!SPT%7CfFkg2Cg{Z5pFa8xv+*oYed)*MtNU3|tVj$tN)g5`}0%&PD7}TA= ztJ2HGO1(^VEwJA{D&zcTBefB{VJE`MrOfGxuUt)S#!JUC!pI=E)I>Z9e?iU7JvI5- zBS5a{dp5L|kW#Szs%vu@S3-Vv)B=Mtu@|<5^_b)6P=365esL25X`pA;S&;?TUccCJFaDU zAD+0d=)b?a%+1HVEvuF_qgK0n``W5Tz3E*CW%C;W{qF-LNh+eUIYCc2D1v=Z3eQ!7bCbH<>hCkP2lXz2@xkgO3f!IN~aeCadeb4GKv(YJ!+CGK}zp;+=za-gd;f3 znnFW2P8pj98iRt&wD^Q;EoW;axwPF?uMdi zs=PigZS`|6$sP|g_3E#?gKF$V*5>BL$3vjSsw=E)nA;CU*tji{U#5LjOIU>U^+03! zUG3Tjt%KUCoX<1!P2|0p6ieM|$$*9`z~XNr&f?&kg{NTnYm>pjfw{(kf>z{Kc52+q zQWiFq$u(3B*}ww(L<>obKcylx6f}S#vi;{e^1VGRbqW8+lFanEv-`C7qo>!0b2K#G zji%WgM~M^yat2W46!AKK`_Zi^r>T3s)$F_Wko1c~6HLGwSdn@kj!JQ~xMGibtbMjD zLc7*l)hPO9YaS8RN7}wKgB#HK+V7iAMU!LMUC6&QK{te=v~;$)==_KFB!jf4Rw~nk zsxN!O@JWXhYAq=y^EX+z-^Hd3?aPU&Vy!sLh5+1EWl4ZTRcv|Ot!q0Hth;LLHtL<0 zuTcDwnsZOq4ZqaFqOp9mLb}SX**G(uS9Jct_2*dJX$>cKh`G}5VqV6e>MFW9?E}vL zxBFPAu}$nBMUE?V`zmE-`m$2TCCZ-=RNkQ{|9#%APi=2rmE$qtt2^B zESp|^E7OQ`w15WYUT1DDBztPz+3@~}>cVzzVea^+GxWt9v{<<;w3<1agM)+3&CPm! zRz}8t0&f5^AZevxpEU2~vz#Jis%=VltuSy&+$;(X2seBZ!g|wwr~BqvF|i0;ZO*k( z`a$Hw^_w}|K6o5c*fo}2rp-;*Tlc?X)4+K-YgH%7(Lzu8t*_8y&Xp16n#zFv9#hC* zXpUYviK;JFDoL0}LPngMORCn$V1jVE$4ubM&=ZdNX(&|mxE-xt)97@g9}^Rkh7j;U zksqpdbmS)M1z-G=v@!F8E>1c|;&C*&&@wzm$3m|Z%nTUSDlu#bK>TO}pL5&gki73} z?BM{5@}s6s!@<=(--tP<(5rb9gYm-QJ~dFMv+9!SKh6iaH8Ana@$#O$F|=<{Xh1rd zJ+ZAoJzZu^?dIfh2L~s~1(sOZ@;h3brsJZOxf+Wj7zC!K(|jiC4}<|kk6(fizkvxE z7DrdzC;NTSj9?310)fEEbpJi>qx`-0j8##zf_SS17 zLJ1IBjiNi{IDWaodBMzH`Uy6ia7j33uGg<4u{$SVm+stWB|l;Oq^8c)Kuo=`^~=zS zZvjzPxRfpo-^zvGsm)4%h+jzxh*(hCEZ`bxwjsM|3*Op?0MZbMu;5?nv}YjflJILg z%(-Nfv-}tUzkGzU!d+P!6C#sv;&eDdr_?R?Y{U79;1})VwFzH~LFN}219v3){M+9Q zJ?)@`63e@~_eYboHU-vVpLspOP@sp*?QWt3yINsOE4>94FgL_N&AK_PZ6>YnyO6ea z2|bW+PWn@A@j%+4NER&h?mizJ(X(t??u?LhBh)a%pgZqG`=|Wa)2{6mig*1=fYEt(B+WNzzc3vvPXjsdVCy zw77UrTnimZcTX03ah6s-b*>f}b7=x+Ht2yLUC{^7PD@?gWv;|W0HI!AQ2Oa&ZEH#F zr-c;0{hU|#>T1Cx3P>Arkjl!u(T=T^h~3=b2xelEXk?b~T)&PxxINtS4=FgZlB4SX z4m{7UQ};x+qBJ&Y|k?cVXr8+JzzeiN|vhByhcv%O zpVAN&bz~Igcc>NiD(vGqLu9vF@BiV3?d2%?`FWIK05RAg5E2GY78Hb(atn$K;yugF zm+60|m7uYF1~3c9j*;(PH7K0KsBlF7nry1G6%j#rF(HBL^l-axyMCgAD;NdU9ozcH zmKDgEN?R{GXt+M%1#?TCg0rXmDxk4ZFAJa5{S(yf zIC$8dKM2qlK8@EiiEY)qIybRt1$&+;VII_TB`qC%)f-tiH$c;H!-Tbd=jCLW^%Jhr z$tr4twrXM?L_SqRMdFg%pG7zUk(U&qA{B&J;PU>r*ENLz?7X~iZfsnEoSND;Vh4ns zMIw8WeRDr9Ry(tQ_h^?isp7e1j@>CA#{GcN~ViVGcw0?-v*Nky`YUayaUR}yHu zg}2;hwsZvNBdUD`YP_;JOdoS??m&`@*dh#?G*d6!Bsb+gDLrT-IkZViRmlMU#aa)h zbntIJ7~6xV2xb0)ui?X z7+NxjYgJ?3^wa_Wgz>JZZ^6Om**><2Y3rH`gQfZ9AOi?WyZ4RdNhRY=gKTxlRL<#9 z=g?dV(na}`W7Ta?)F-OpQ6X{x*v1Yp+NS;a-|%ygmSk|_{C$vvSxLfl!V$Zfy3JCJ z0A9`A?@Y_LuKrpxeRN*Hn|~T+)dc~RFEQQ5LUsWXW`Y?MvdbL>oW-8tr)I2?!x^A_ zz~xA1oWeGVcGDHaMF^UUC!KQgihd&XPWsKRLLu;+)XvqiG`@=}0g*~HX=*W=504aB zla>$u5qoq*C^faGu=bDn3p@ZfmAZdf?@TGEp>JIC2gGsp$HOYWw8|??O!v(Abl7yh z(Rvg~ihb2V0{hKo&c-g#ZtDA!-Ctkr&@XeN8&SqUEvEK+w=9#uDx82?sTaL%H+wqS zV98&5!FUZviq`PW^J}8?c^&z!fBaUy{#*$_QMAW~pkM)%m)k*y(fRT7=UaC>M9zL1 zx=Y5K&I06OQpv^5mH9JuDXIomfS<>m*0ZhTpE(B%InFphxty@AZtpT@nwRaBz4;?_TsFS-$DFj@{2dX zY#Tk!lwVKd@ASM?>Mvx$Ftbelz`2ua-JSeURGkJ}D26)*2tF(}e4vN;9w4=hsMd$A zHcUC)k=zri%n(^r0PNTsjB9b#l&oX*xjEly?et+@SDU^QIo6pMnU_52k$OlkWI}50 zKfc~G9VF{VS=?LE_w)|o?38;#ArFd+8oY%!M-LYu{@lV$(c(N~ zxBNvJ_2OS?MaNHHy_p|i#|B3en$qFbFKD*Ra70B3Sv^NrS#R*u6Erk{#>6toK;8Qz z)MQRUdpY7nx!YdW0pa z?TgK$&8mZtvr{SPG`XEW9C>(ex8ER!)JrFUpdylkN<@Oo2h|Gehibj1#5aetdH}qmv|B~` zOty)vcH{T-@#1gtnKBuSu$hwl&QRU-7P#J7BD*#Q9S2SJDv{&)RP}SOhyp~`+|$Jj zi)L1f$~sJ-yKdrzuSyW|ZV(RRci4*iO`D1Dh{(MkN?cwhsEcNEzPG6DQFOL+2r$*1 z1!ZU28~l95cDhkO*n1%V&(-lo;s4pKbU;XA=H2SoT$BfV*Re_@?PdXBq~pw54N_~r zm}vzYX`&S_oM4GgVYlepd#$&VDC9XpH{M<;Q7nx46klQa7^3ho1S~9@VOazUH8(et zXJHVNk#f85&Z4H1+Cd-amK)e%4H6*Q^_y!>5j->@gH>}jMCl3{S&oBO$vVz%*$kSF z?6|Gh>!~55yjhLb`}=h`%I6JZe13kFTvpA|*c4!*4-b1f{I?WLxLfyBHA7avioV8O z-=DJcD=viwlREL~v<^W-14{1bv<4s#5n|)^Nc6Ez_;%Fr2aqyDgq@>$*M;%iKx;_2_Z<_CviLvAFH0w^}EdQU?w?Jp5XA-|nc z=O}<0%#ser+5#r60QaJI6#?{%MiI+rzWc6f!B%NCHD*U(u%bDh`YS|^JNWS;9G%+h zv{EYO?z{b4SG@7%_+(xXulH(X7T)ZeKC zNoRyV>K%QbpUubyfFdYmfsfOF^0$}Vizuy9(eIzsR7;|Q!?&|Y`Og$nq^sCOFf<7L zj+`&1mw{Xu8`&)Au(XN@X^_?GR#DaJu2AFJ-i*jdImr=-K({xqGfnd7XF2&vtpztS z)5{yb7yeyaH^L4kyULtm%k)i;LYy!6>!gpQG2Zqb4{c#_gb>$Ss7o7S+Dr$?lz;H?T11tO= zfWUKQ88`(CIyVcoRfM+B<$zIWnB|3sZQ+p0(|XuVJ;wEW#UuB3P+l@UTO$e!Y#XCy zcV7+vg#%O8jLSbu#^c0q0$0PRQ>51DMIn|=lgf?h7BRI&uK-2n%9SUuM#Nb9*0G{M zmE_Pao8>RbBwT6Q@x#os#bf2m+;frKqC^Rum&8kLfke(K>d%?vIO*(`N=Hj7Dze_1 z9K*K{C#~uTHF*AL3pTT&742cKICXsS6sM)c8(Mcl2gg{@tc6miWOR$M)?E2l-mMMs z^k0unpXDAL{qv$Pt+NVQl-(ZindXbv8Idy}LIZKT%2N=Ll58Q4aB13Udgs@_0UB?h zuDf37e%==8fLl8M^s;#wJ>fbxzn=1H4Kt9&iJ|Hv`1%-7m#tpOt%o#Mg8Us1#an+H)4;)Kz`C+5 z+!BBo+Nb(qSJ@%SLxgG>;3;>PCr=%yA|q_fdUZKd1e;h)uHP49#1nN%G;!LpuVlBv zV*f7x1FP?l!W`~K#<^U+*tWR(1i86G;>?ww9#N8|Hn8a6_&byq%iU(~w3>;RiH>pp zLWqMjb|7igT1W7<5GB;mquDQ!TLu5{{1EF$ zrM3;ZlSg{?0dLI`A$61KpX*S323a^404(9HckfmT$2msJTrQqD9bG

80rGtpObw7>Kytqv54yi!D#&f#|u_Icy&294^W8cRK$-96%nOA1tqNWWOuw zc548{#Z%rH%S0&cmj9{RmS6xF^Gs955*V%jem_&xyZu8Um8RA?#?pwd>`C|)hOv6vkD;));L&dgp?kuyO8@V3A8w=B0MXr8 zeT-wI$)a4(?@fN*g?19H-|iCP;$7X{fRcQFgm1G$-JhQHMdcw#K^&ru6EEx>h^N#3 z)gNU=D<~*vZf=fVF{5Fda`6K#b=_L``I;s=7WLgs|6m{v zVk}si-K%|-rIO7yFKGa=sU&o9)8Xu{=t{AqZOSQU{Ey~03;NzIQ75VRB#8e;$bo^C zpkOng<+9B*rvV>ojUBy4q0UX|8=J@oa<67bImQPVQ!MwttyLMg3#e3&<0m(jBN3Y- zOk(CEx4#0H8LxeLvP3v=88y+tSy+}8;DTH ztW`?~IZJ_#oo6ByL?mh{wq@nE=~)Hg<(GLN?kiC*|0DeO&hSZ>zSJNiR* zmeS^_j*$q$3ds5ZMB}{SBXu~D6#0PG^$z3GXLjalb(RG2;I^BaxlW5p3^BAC!XCQC zO(@d9E6e}VPYOUP?@zG5IIMC3Aqj`Igsf@*OLwIsDRFVZ=|V)wpBHLng}>w{xd?&s zr35)Cops(LvpTHXl0O$A`?kRR)4s9V=O!A9L_CHCvlvG9_r}pWo$!;TTqtP`EoVX~ zRD`_#aFxIQ5DTT92&k>60#w|1`arMrV}q@M@Vuw$8YwvpF2rUN5egE6B7_Ni&YowF zRO-!ox*SkJKZ{%U-jvQ#r?ijGh~T$V@E;*VPezk4`~H&@{nsjlzSy)Ey_gE&GZ^Ws zULJ^RM63lSSL=Zn4tL9=joC7`z5^vw(CQkBrKDGsS5k3ROX@me&LN3?MuEOU(Hz{f zUlKt#n;TeP%kIJ+*x7%(sRoBmXR)x)Ecyu0NYjzV5}_K>`gmT>`}>r%Evv?)Ei{Yc zM!Npw0rYgt+lnOpfMCP0;ok-)IAIfRQ#J#?>!2JF z*4LM|rOy!{MO z^Qz0&2JhD4;3{ABPA3bHN4K>O+SJy$YduxH2i)Qj$PrSy?uv=r4~+f~J>7lb0zd|o z6+BuQ=E;qQ@xAAOx;&)qbh!Yu>7Fi84 zb!gtx>VG**<}C~wx?wJUKG>tDn0Sl@8PmQUJ{`OpEQ#1m~uKxt`d zIGek~dZVD`?=)cKYPQxFFmTLZ|6453j=tDL)i7u6eSbvH#$9_gbtV286_qf3+WG*= zj1nSd3Ixp(!SJ3XzyDc;pubiJ1<`jOKYwd(qO>{snp&~G8%SGA9ivrNTGFh^zdWXj zBs+@bM+Jt`Ic!q_OJ?XYQJ2ERP6{aWuoaG&S1L(`0IYs(JLQe^f3Ir_?U>!c>&(7i zX_oWC3lYQX_yRjwBLRwO4K@GW?#K@hH5x}!d#>d+(yuhQgCVRmIjxgrU^Dut)sQFX z#6rWZwurz-4al1a0md3{FM<6V*EnDj0Pd5gE4a!i0pn)!p$8#D8YY&bT+| z40u{zRYd4~Gs`w<6zBmXgccqBS#uIlFD)-RtshVM)(*S(Ft^{d?>+ln4g0^B@&P?! z>Lbf8?P0n$JAVL5Ovn8w%9om&tje4jd`L|V-@B84#pgFcJcs{#6t8F~h4?V%n_h8K z(>%5ujp=`61^CFZ(f{iFP%^6OwZWA@vqgS!-d;X+v6yv??L})L-I8m;9 zkGee2!R;sju~uOx2eTk^ygfuXlQa2~#YTyBZH_UlXX}l{)*_Eb%YN4a;BmBF<)QVc zn})WQf=TmwAF-dIgJHmWXnEy|d)Mn+j^L|^MwLb4Z2D^pBA5SumGI@?`2t+p3QMKP z)AG84N#j~^j?ex;Q0pFrDd1UbW+OeOaz8v0Xv+H)j$okdaDXMlVXWd--IfkW~6QWrE`>n5nU z4qLXhI;zGA(N@qae{)8XIW8*ds@n&obkOXM=of=}gHcfsM9Y7B86Uf$TUQ{Aj*9hQ z(>i5$u#F@56jv#X*l$H8Y`v{d;j$hb4(QwetpfhWkDSHaKD^IEcRJ0?a(Tkm5@ z^IUjvczw1uEx}o5kn!BaBKB07@2u`=Ldql+Xgx1BoDx{{mi|*)1El%P36c$RytQos zB;@Kfxmfs*2NyR;ptraY!``YI8s37BvEM=OJ0Db*9VSRSLzZEkBxP>8qVEkKJD@*! zHwz0?ZY%^Q$nJZgE%C9{@8eIaOH@ZJvGF%VYo=cPSZtGjXAU(r#b-ZS;78t;eD*n! zuSbvGIS1}vL{%D4tp$Ew8W(ZiJVBx)wQdJ+Mu3Idsng_{nJ?YdserB3rJo4Vb3J0r8t3AIOZ%XyzY)1S}N-Ty(xj8Cme42 zLv=ZSiq`>3w17D{S_64+j1Oh~pbh48j5O|GPkS}$8}LKj80g;ssF_?xUwvdmjg3H$ zD&@ZkOJgkv!Jh0Odnke2#*F_<90o%A@%f2oVo6Z9D3xOS_bYq zMBg`F+bH#}*{P==B;FOVlgMAjED0)yCH*CJf!Nc5-)*yOYzdQ`Nb+Vw0jyOU~cQUe3{#)k6(t( z>S{ZG4z(D=qxpxb_#1F(TNZ~F<2YkO-~JG$==8;z}0uh>V{ll&~oBpNP|O1y_kjce^kz?eY?^P zpV##~*6O!~l#g4gmutI55CCle|GFdln!Dphh_A}1n@%8F<7Ztv`@J@O0817TbqNFY zJu0}5Py&E@nImiA&m58RlC7OMrF^qTyH}n6e0^c{LNd2+PwZ*yXCMKXEAY20lxOoS z?pjc03rlU9v|v9;DzbBxmX?-W$Zd$H5%?9?Z8Gt1aB;Jbu0K)&MIKr#gz#%{Wow;r z_cQo4khH7RZT!GDRK9t5Vv7_$-Of`e*At-<`+^quh+i+MZOwmJY_giLC-`kkjt!)- z@dlWBa3kGwgt-m?V=nWdhKt+j>1mBheF}y3%d%E_pdyr z$P#7pf{43mIRr6mT^pRwOpCUC6cI05teW>a{E&phl3QDnI;@cFAJb15i96?fY>pJ? zKp|Ct@=mHnR*nuKGJ5@G7%daiZckK$72-k(ET?3!>F{HGc24I8Z*Cs&lvER2@gsn@O6Elo{L_1BQO+qZAusuaFB?fH5Xy-;(RQIZEo%pY}*YsiHd!%K>P^Kj3Dl9u;}#^UjD8E z^VZ3zpy&n7IBb7ZXv+Hk%u8}6ZLlemX0goQfBouFy-%c~3g+S}+OQZTQRrtDfOn@- zSpa&X4vnLcVGhy+@F!Xy+f6pJWxqEUcQ1-9@XoOQTmsy2+ur}f-djgS)xYhd7>^24 zLn8t*lyrl%zyQ+H4N6LPmm=NWA>AM?t#l(DLr4xiF!T&@Ha_3?J?}Z|{qy(FS?lb@ zTJG(d9iP0f`?~Am?y|k^yma1Z9hX#aROIlJGdFc4!OX2poveE2)@FNb&fHu7)Ue@;My7aDa4tWYoQN6Xws;U}krnkg zPCINkv$U1amvanYlYhS54=iPjT!uWU&kCtvU|}*1VasAkmRi1TS*E_LT@X%87HA%Z zKuoCD@a8??CxT(2m_D$TLnlvZ!0!1{8-;W4w@Yw$2>KtPrU{3a+UKV;UzP0I93+(; zN6$B2HueAw{v$U9>)7mN0rncy-{fz0?osdF)=GWh#Mh9f%_8luy zqHz&RS5vLASU`Q3>u3KaC3BnC(a<~{k+cbCNa>>mr$@dy55B3w4|t3r;lI@;Nn24l$fU8!?l-=SB$_bdXEi^kDEY$rj-9U530da#Op7 zsOjq;M2&dz|3{|$i%S0}`dmnyta3$yA7rmO7@{^#6E%`Nm-pZo34ZUQwFZ^hM)(B zj|hD8QF+rvMuuli3;pr?+eq|?)FvPmmeKwN(eDB462N#CvW9nQt<>KB60&tR+3wRm z`Rb9iU$_r=00Hpx-o^{)&p<=$y#za&E@54;Ia{V=37c|{pA*1DQxbmd@!Ga?%7Kak zeN5TKrJj-q9Z+BInc07W%5-O;V2jW$3;pa}D^t6up=1=~i`a%&7Jo%67 z`)ico@cpv3=Ss#bTq}Lj0QCfdp%XO#-%_7MKeS~0-XprmkGIqZ234j1&yo41`EeBJ zX8W=@xQpL4i~%5*Ut)Z3JaoeSg;TzK8ZvLW?(mcq@#b&di@%dDir)M_=MmbGPSv5N zm97>5^tG*{aFEb`squC3(EjH@$Ow>!>u;VsG?L=d<=>0TJRf3)s+IP~GGCA)JQsv1 z&;g^tf01_10!~}Q_8ms`RcBxt6f$lnamqE%5n&xFD;n znA3q_;oSETyQT=rM{sIbrK4}smMa7A=_-u)hm zJrDD{;}I}Jk&$Zy&#-yvN6W)wNDb9;{^mS3gwX}$gHC7EDr(5LG9|~jzO^PG8cwHq z29jh~?L}ygh2NU0)J;+WEp#XRS5q>GV)3xDn$Bn)v`JO@>$_Y^n~n%KT#RQWSq|ck zJOI}3BZ?E)wcq)nNaX-(ner~3WS9^uMlB~aFlo-7yEs$qfJVA8cBUb~*simjDWkc5 z%9f>WDWOS0b?i!DdsOJR4Tg>1!Z5kuwlz-OjD&M-kgcb`Ib$_ph~l6m77qg--G>Aw zytoz0#b*z}M9%_dPgW^6n=8F5FU|&B3OxJ{@$l9zP@(T|1z3)k1zm;P>gwC5#YqG# zSAllOqix?SN1sDStT@PB7$_$gy?Ecn^H?^{t8w^qvg$V_S=G^P`Z=cVWwPUnLt4fWlKn(n+Ksij5~4ad&>VXCN=D3nZFbXke;LSRD#Z2aW)u>OV95GXOjW z?Qv05%zLoF1xyrAIi$m>5H&<}kqkZm%?r*Ro8=kvTd1=DfL@Gj3mFJ7iO!z?{_T74 zF#Z!t)6ho}!oa{FL;>GR;A(0BGQ5O_04~VmKngmA0QX*VFI821kk3WS3Tgwo(YBWWCYi|&nLyz+?{(hFcwNYrwm;)=Z z@)*+BU~~r@s^xexD=9UcL5JDmjf@#{>VY?{?W4+kg$GpLy?YkjS5{iOyEIqdX167@ z)1Z{jJ7U7Akm26@(d*s2ceCoj0Ri%VziQS?-@rgo_~N8H!pXVOP5%rCNv3-zwBFzY zwAAlr&k!gT4t(z$Yq)16x)e^_UCHZ|UZAkf+avse&2f`b^H8-V-52G|^os8>xwh`2 z{jT$V?V(B|TToG?#uNbU^s_r?`j(318~-D;S1HHp3pCZ!pX$!bC&wt*b-G_DtR1lt z9(nX%1Ecj!_le1W^3{H=2Mx#Zb^Xhhn~%*@+hcI6BaAA#Rg^3O6eGrwSe z%zfjstM*Oe{(YYrume>K+Q{WY@ly&<>>6Z`2%C8~sv*sDFee?@<|+IVSuq0lkZcQ(g94P=odN zgUF{^bj=vTJhO zWf#|Mt&R&59(#`oBrxKK1P?x@d9(#PKkX>G&;>4eth0(PeS=ud1Mslq?kmnwd(mUrbJhWoY;Bz z3pBKpv^l=X`t*?r7*=5yZJ+?7j|lDFPaEDmB?`SBGlfAQQ?R7O{3>TxZB3X>N%LLZ zV}VkKF7?(VeXXI#0uje5cJB-`ZK6iGUveK)&VBnYUw}lLSx2}@zHP;`bMJn2q@!%_ zQ+XsSkev0!q-&(HbSdwfsE+>iG-kCYM=`XELVhyscvD zK4bb!p9M{Al!L42V8Zb`Z7)YQb730h3A|6iy#vvdK%^hLc{pO(P6^0}K4cjbf|tyB zF3Aws!b}QPYczO*SZ3|p(@7f3B)p%kA6|MeVYsE3u%%#%%9w#ax5E6t1A! z`qi{I`K+svzFsfkFtV3vtw3dK1`sV>?;FNsLZT#dqVlxs5d#Us`pryne87%8G#Og5 zS2{9;kR9Y-N*nN-UdI-`!;f7r9_aj3CnLK8rV9zn z!YB1QflF$(Dq8WdtTD31U7-csw7#wp87&)M>beDKnKT8rGM;0N8^ymQwtSwZSq zp@K1JA=$a^+D7ju&Y6A{*Ue-)*bYHN+2DKkpCz6m!X$9}YUdb&l#qSqq0q#^J;5u- zri?iqCh5|Fl~b-Mb^Oo*gvh#%-e#e!iYe_A)cfRWP(ZXC?k6`v8JdsCR(`zL7(K;g zV))#wKONfV&n?~AJd9Wgd<>7r-q^F!OGju;na68^>4;&NTJP4Q$}ZKV4B7->+dq9B*8bSPoRHO-sNPx+XosjShFSkQ*k%{95;cvEo-8XY7)Qb z?^t=oceG!_R^T1ZJ1WTa0X|WAy7jLVHNK5I9W1XQ3iWILL^D&bhj z*kKn+BvN5KHPq^UoI3`aw5-oFK;)}xYF-qH-jvkTZ~}X+mXi|~Ncd_aJ_oQPN0|!M zxnO&wn%_By9rb9QFYF#iz@8W%Pj%&U&{Bwj9a9?$3*j3R;c`VkFN~#+>Do-yKbj)c zcAuQ$!CW}XPF8#wVnaTC6pQW3_}=wlv2Z*z$m@V+i_=DNtfAOZgmI#m;~ZbIDV@_f zmUFT!RBUPE!K{OMI9FCbJHv^5l~v)yBhj>&Yb6L?b-s8e%T1qYF0LU=E5lR#717!KV3n?JJr+cDE6)BrY!gkX-Kb)r~LMvQ(68`>kysfkg{Cc7#?Z?w%w5$l?5k zWAi&^^sUz7;8|s44ez=SUF}ptJi)bLwEojmB!+@wAnPStJsYU&h_q=JDe!x6)AHEL z%u0B?^QR=w%;<|#y9L!1E<3#u+*rqU#$}sWyHO^e92%SR?`+r@-qjGu!C|{Z%A2Ax zD-+eDb;YeBk(|a#X7(Eq>h0M^62W)3^k9#Sj2Bu-)an z0P!ks?;uN?s+IJ*!Ru?d=|O}Zh2RF0f8fv$v2@?JNlrgZ*4G%059Y4L&~@Rm(F}_4 zrHzXB&rj;@zMJlSb$eN-M#=_mxC*LX#>8J-+0!mdG-2?W>5gOEFsh7mIknh&?#OBS za=p}?yv?q4*3wj_Ns8iBNzko<-Nhx!MrAEc~yYh5;g0O0HZg?|VA-=`GriA@`r%G?rg|GhVj{3a{&j-hM5K19F<1r8jNK zD?}Ec4Cj^9BEWgu5Yu02e?|ZuL)l7$bL$apo7GFLs5JdGcT;OL;}>4BDx~w;=g1fE zD4m&}WSJt_EhR{VXJQclM`o+-dHd#JHMU71ZvT~s*$InuH)+;ZHNSp5O1+y6M5f0W zP4bCkW;as_&z8;h?{AUMOXf4_h$d=q1O55K##?=cu41((A|j==91_H{Wh?Z0=<>C2 z@uV17)8ddfp1tYSam~EiaxI4%QHLIBw_$SXrvlFAv24zuOO-2e)0iqRC8MRn;Z}9z zDOOwtH?1~Ct)H>FE$64NEhFX*o>E3RzD_~1`AU(-8RoKQ(2+IzFqNegOQ z`L&20T!)nLW8ahH$*YC*D4Ces&Th||J~vcIbV>{wxjN7HA13^Q?XCt0(V0wSr{`I( z&03nHs=~$iQp>{^#~T~Lp(SmF+w?jG2_GRUrik8Lu7fYw04V!_=b&*oI5kYn6`|Fv z)mTu+`ujMd4PQ6m5to4!szLbom&m;4S^U&1-C^=CQO0_6k^B}zn?^yPKDxp-*KQHQ z%H{#RBYzD-7PHHUHyQ;f3+0i`x53;*w{aZbe<>n6V-MRJYFMn)R5-eZH7sX{Q!*c5 zxe#D^OpoT$Ke3pJyW8kLGVMg8^6l_TNCW+gz0f{@JMVSg`{VPg;0?-uPha&@{Z9n? ze*?+?lRy1P@CF-JNI#kwfKH1Dn5vMPA>01kORQLeJqca7-|MoYe5M$ODB-@23rsqUHy@JR z8@7HhTZR(Fw4TDf>= zNScj{#h1L1c52#g+K5=Iq*?jj(3(9TIuNQwSY=8)!|B6UZC7Z;mtHVNCLb(^;i880 zmYwVG^2{c?sT!K9X*TcEaY2} zC#0K7Bf^@8%pN*}<&4G8^&;*km+Ax0+ts~e9eHW}!Qi^VEUXEW$ORi-^~apW$6fyc zLv0QywKAfA!Bx(MBk8;~x`QI|e*-k~jtHrAnuetGIW$Ru8SbjtH%tw$uY~=$FeNaL zLqq-xN2Qy#Ym|LYAFOx~>XhMLaYcssadt&Ud^WFT#W_2~JrF8<5ujBot3X(Yae4#r z1ZWYoGWd z4@XBW2K%8IbAG=?t!SHC$GWJJhD;3_3Vk#3?!O~NC+50qcHPmjn|-Z)<$&~_72x_n zrSMp_S?5QXq3OnS*;%>^rg95Z28`a-(XI=|iyPX(INazEVln{7vdHq-ThKmr!jps; zBu^@c$9^9Zi-15jY;hK_;ZG#ad98L`+oY!KxM08=LQu8@a@`wig`HjZs~q@hr9xpnKkmT;)nAYyfAQ<1K;2f(3eqj%2Bl5iaPmu|doS_hKxYsKyC3Yp7M=25a z-ANrclowh7ul3gbnz-35+w)CzwJ4%Qi-mRli4F2W>g{(1`W??+`sjGoM?(g8*`j?} z<_wAqIHMccwG^zZA~QvKhUIF#Al)HH8r5EoO3!noNY(UUoC=ls69)4MEla`SS`Ij+%%SO38_eCM}lY zwVJ4?@2tN`Jp3$j|EXP?7V+I0dDR!N!TCp0;aYhzn4i=h@ST+gE@fsNS3>T@26}sL z;5|7?$jTJ-_>#s~fGc@}8}i1^0{PGzbro#{2kktH)cI$|0F-t<8)-k`y?WTt}Z z9kT3ErKOmxy=f~G*ORDy@C0Fn=5l$RjC=-CMd~(>e`kyn37f8QZLltC?=P6BlA{41 z@|C^z#fV!|c6L-GgJ zNj(Zh;H?$bLoX85hS_Tq)6V`eGyjwO($J-fp7W$JMbMM)4U?Ox!NhuD|N&t z+F@$e2o6=8?#NM6k1#v_dZKfZJOMuNXl2g|HE@(_F8f1 zO7Wb~J8Cn|FS~p+FLNHtryZqO;==4ajuPoB9k(MNSSu}h?xaXV5|w1aXt!Mrix9+M z4nGUxjvl5 z&;NSq&%{F6sVnoQ375E-6z2Hk{-GCssg+Uj&jQ6$5nsxegs~nOsA@#8Z}RnCOT#d_ zR4_*%FLj&w)ye5WK^TEGmro72wH6_eqzd#nmx9XLrrr0ej|8;^DwJ@0%_-p`*``Vw zVX}0fzdy|y;p6vQ)1oIRB-C9V@O;aFj}geJZh2UJ$R!AXrK$`j^yi=;nKjKqShApP zD+U-`wu+T!qEKkd3u=mB8zp3{l|Rh#xho+WRKYxZWWX&7)iU^|`p$Oous$WKIRVGQ zix{3rXEkM8TjHORpAlEVxjrc?lMiWNjMmjm=#Y|!(9D>Lu<*ed%a4||NYEnaY3}(> zG_*(T&AD85R-{rWsmv4kte~+}-h%wY57SktS`CHHwG*vk>D5I{^F*AO)jd~R-49hg z3`$eVJ7g66CM%b1OZg2|wKEumGZLHUyFn$q=~35^f#DVRZd*3p6}X!nIXvIWkUKP+ zg)`gS3VS=z{FK28QDFc!A~E`r$L|@wb%1|dc(yMt)FCGGc}#T zzMEXluVhAqBY!uRl%yV7Up9>iiq%5th&9i~v@Lall-sDhTGVSRUQxULEbCU;`4)e= zR9^pOquR$mT4PX>wP8k#Mx*WM98|V6zu{byNLyv;2_q?{m>%4VG%b?8Wd$moiXrZq ze>C=IW(_f`_|$PBJ(xVigohv1$uKne&%dQa6C=u6D^J%uEebkD93PJgLd>za7EtvghYFB5_#R{tG5f%&K%!z8TG741Ifq zkM<$s)xTh$WRx^KFup|dJj&I4iU~her?$xpO@vxE<9E6Q7pD1@&(@=%HQ1nO4N2;u z0uyY@w&71ER7X{E&@{9yRWMJ5A+$bgXEcelKxMx6C`)UYz1zVy_liO1pY%It*>k%v z1$EXaHg#4_?ET6{zBw7@_%jOv+^SI~_s-Z_sdyiD5p(IScPe+#p5%c76Z3<0dqy-8 zjJUrl$LG}_nTk;(hP3;%+M4?a0rHV0R8Q`59aJ5eOXt!^?cg*Xn@2Q+E;o)OBOYl1 z!JIv0z=vGF3}`?n13R|x+xW3(Pus&}hMD*X%ZYSdO$(Xh`V$!D;W|%wIqY9b%ilRX zp_vgXi=WoXWa65TM(RHLYs#n;*?Z;5UsUFI^tDaCAJ=X22`(Cm)%7aQUzI3j4T6WB zC(p(@pmsjrZ*EfkD9Slq7W)StAK`}3xX&jnw@Iqwsm$G4R(fTnr)^$uoqm2Zqc^iw z8lF?E)KMMSL++cal6okNB7B3{hd!DulaQG9)FZlh?Xpt7=z<8*F%!KR*%n5! zWPa)IiCCzUmzKSK!d=WYu**i6SQ)&3xK2EcI zs%{{-_ewj$n#TO$XXT)O70D0m-W*-_GkH>SgnM{! z#fWC<9sE5-Qdf9VcFChA3y>~>2?_n&x=dF38ObT^9Q@3Mc^Xy{D|Bcc$Z&zJ&mt!Z z_`!KcB!++#QlFHe=$eND1F|q7b%qy5#Ev_f=GiLWlw49DS_$Pugs0FkzNEEtK^_vZwQ22QV}Klb zGI@+KnN@xYU>P>^J;w9u6KzG#GV)zsG%$n zR~g2BTY+B!of-E(0U%Zx-Aml!!OH90&E(u6OK=hUy$Ad})F~ zg4k%iDj_RyoVW3Rp$1yS^S&&#O*6*2g#3T<_m*tDZIuO3j+?RB5q&t%(SE-FkKlaF zQ^j*fRsZp0s7J6FO*kqNW3)!Sg8v(j#s-a1o9O4y;{R@`xcin^C1sAO2V1#3>kBBj zKt_efOq}mG>vg02TeJuNnM!~Or2k|6|4I7ce~=oVzpXMV#0LaCQ*{-5NIqb*9+pjj zLCN!@_Ym_eNbPFz^ujJL?9C9{bd4zz<6`^iz1t8P`wY;~$KhLXxm^*K)x)R0=w#bk zQvOT|@dfRZiEysaz2_JPzuAqQ8G(M7x8B=rd_Mb5`qoJ5?72ikKD$r6116O2{y}KH zPvZPOzgl(gnCs8qYAuXcCrF%$GH6;JXR#dmFj`&mA4OaNH zg)5_pM@JZ8A^k2BF$IyyG6CWPivjd!y~so2#6t8yizn+dPfb3?oqmOMYoD7G6a(xD zm3S!g>iGVIPG!-X(Hay#U~(g&3!ODf&SF)>!Y}MtX>$ufp#I3J((Yc@>g@=~ypvBF zfSS+wsoRQci68z_Y2!5@qD$zX)`r(q#s`kk-ZMU?rBm1I!W{0}>ui(L4tP25U#p75 z)I&KTW3AIiLCQ6Bs(k6iW!|fc<%q0I1G!4sH*OM=vxWd*-C=^sc8UjljJr(Sx!PQs ze4MgLnmSY-#22yndE)P&(s2TqMJ%WE$9BG6a^c6z&>gFUnTs%H12CA5fkl1rg=ozw z#iy&{gvgm4JW>E5)0>k@rb^OloqrN5g~l`swD?nlgDxTF7|-`yB!ddQ93o^x}4spd)YM&;&~+sxw)&u4;Z#q}XYJsn(S4M(MdW7(80XK#tX}G z+f+J*B9;El6a2}B@#3bbmt-r(1ehw^5dEbnZZ6A#@ljhPgn76ME zlGDq=K@-NEXONee$-*Ek2Z5;57Suatvc~o+d!!YZrtM9x6ucPmuFTE#YDeP|6`O96 z>;A!r!Nr4q?BHa|!Fg3H$!ZB(glv4zav9HktgaN@=BOT4BiKYyc&$sV!RV>6h0lT4 zF27v+0*x9kK6$+rV$MvYSZGkP?}46YklH_$Hu5Ow7D~2HlQdv!+I)9SP~kt~CqRqh z>nUU;?n`C_G~l+#Cmfs%^V+C)m47$ezq*gSr=Ev{van_~zeqlGvD`80=G)u`9oPuZ zW#N<6yciQmTQ;&>Jh?d`B%^Yun|wG9-7A20BV#LLWb{zHt2-_WF~78vMhv96&B*OC z8$u5^4X&i8c57Y;1)c5GWG|sgR|h+2#difkpGks5|KK)G4pd*2c`8vSlry<{WJYa0 zsPA^@W^b%ZcomOqx|tsXx*c9hPw9sYZPLU|gbdz!{=cvnA4NYq zro*B_;cnwaCIh=5hq0GYudCy9nykUBxoG-ezm;PWJ_yuTk55eysd)K!=Usm^Ao`PL z)|y%1&xG~ZZ{|Z3^bB#DpE2wfS;=EzlncpJY=?dl8cv*IRLy=50>!GesVP#<@dwtJ z{}zE2yM-1!YUzbGxf*T6WgR1;axWBKx)dyMnleiy$(Cu(sz39ZGdWmx+uQUL4xeHy z3~`Z3IPqd#>8YV1#<{ePmmKOhZRB&~Q^W$4wroq0vP2>d7CG`*Y`KG>o@c?K1Pnxv;49MxFZLr;AEAsifqiL*(*WPp>Cw1<{|=;`?| zS6YRgowD|sjHBs9Z%u!sq}?`GeDP_|(;A%SZY2UD8&#-Vem9dkDI9~#FP;h(Zx7kz zN{b-I%Y>VK%IfFA(kg%xv8NrF(vF1>@qSv{Le{*{F2fQIn%U>)16r|-3@pK0Ax&Hli|{z%~d z+UZnaR|os6d?na+VqwOttyRDSkVi$(NML0Vq9*pI_=>_w&a64s%xj@L(U}Os-;c3>8x8I*Mw5~PgyVThAVdjNOLL5K#`-c4N_WyYI zw?$}ZVt|vyf1mz!7Xz-|#sG-r-~Ywk_z!UW*PZGA|LOl1dmsw%9+FCLG`+|gbDhz* zp_cva-Qt}18gSQ|nwBV_HxS;mMZ|vivRmtyNlmiB*Y=ht z32@Q&k{V+f1fnrXDVJ=k*KNl2dWy-eoNRkdHvx4_PQrp368 zZ*bxpy0L|RJ=?847&aGuvt8`!>K+pE+H4*X?NWO^c%5x1qM^FN#Rv%6pf~N zmAN?QC9xTJz*?IICO4?Tc#9>kQO81(cM_#TgQ`*b2T1tQvsj%9&pqAMMz40z$()T~ z^b}>O8+5^^8z&M2pE18?AI?CuI4E7;c9`fkc3_79q0dzL*$>&w;@uDUorGvrA04!iEldOhj0R5m4jvc!i)yj3Uf3tD#4iWVp42mrlft=CPX z`nnwkZwj2{P@ZR)_>~nKh2Ubzk0~iqoh(sp7M)R-9EHYbdb3`vQje`%T*|w7WI`n% zZp-mL9ouFK@2E@0jwCM<&DeBBfqbgE3O@X^cOlQuP^_+ZMSl)G3g!)|Pv~>CFRl;! zaBdIDa`gQ?+G`HVPR<1Xq#0Kg2%gXzOr7uBMVoIa8BU9w%$R$zjV&WOzrq|iIub(O@oIJmJ*@bfc#yqVCWAH_FT1Hu$aU!WX7MQa$tads5Z&F$V!oiH z4AWL=A6J>%jPQw_DGr5w)H;LRm#T_Cjix6URyK|L5B+8q@{0omY4J z%c)0lUq5KNxUny$EB`y@o3*ta(_>n^kFAyX?3I2u!V z?5}#oBh5|>?`-dxfX^k2Z{qJjr#?K>gr}~j3R;i+F|&&nPuA_=9_}-5TxDGoC@YyL zeZT#3x2nm2o=wj(Q**Bh4$1nJFV}qPJ?eYHHs;ZiY9@@=N4+vF#TN_TEkTNS91B-N zHHO>kQ3tNOPOB6AmK30?VZX-dbKAD-O%W#eF{(hh(cpIlXp!vXk{O&!XLS>t?8Vn| zanr^xMc87}ZDW~-aK3!O=7xJ!x|a_1Y~+X?6LnRBr`&ynyFOrhBl=fm`m>jg=5(vu z?ry3ya!W+1yxmMGRA-ar<#!yaIi_brty2EpFN4$gj`9{c*n|xvER5^x6xs?ZvmtoF zMenFT*a=jN#96TsaASq7hnqcxfdZsPzV+VbnUsn0AMXy#(6Pn#O>X1A-f(NH)4A$r zHr&jPw&N7EYJ*JG>?E6}gJDVS<$6E&ibZTkxC9mX1!aCRhiY1Ta7#JVdLmvj-tIA( z3kX5{TZ|2wCyh&8W*S^EXlk?X4*l~rpoiyku=#`62iS7PlY?P-3$l}A7d$%h^;GQ-t*&7zInf2hj~~7i$e&y^ZK=52i(amFF6-%UZv2^#&lSSMaR;RO{gte=Z33X zF)svv1CND^TniFLvo`JvyW7G=b!;f`hBI;u2$FA(UjCD9-KUgKZe6EOy%uOU|J`w^ zp3PBI=x&v8hce3qaqGV9JueN3_P)qz+-f`!eqRff-7_#~@}2oGJ!&<)%>usk)nC||dR19^o*ko1|lVOcO3 zOrx#Drt4LU{YmNSc0K*}5ZHoW*{+6~7i@_wk6_0Qd)`*lYCGicl@%aMpSt-e?@{Oe z4)9pru-7m=M>h+bD)rE%IHWLx^V{8wi%D0s#24pP`{*P%&n!G8HXFfzFJZH=oMYMB z23JaIBNnm|S*vsxdN>|;*`#|*E9D@EzFV;}EOL@cl-BZII5Who8-lBUzV8RGYS>Mw zmouWsrpJ@VQPN?b3e1*cs^l_EP5S;}`JB6?!p^4>f8(~V#78qE#ON?erLHl{^4Pub zIZ=0yNmW+SA@r9BlbMZP=2gbnN#a4*^RWFE02LSQ4s{(eMy4kT{>hLKn#$6>`tb_9 zCNsl|wSt22j@2jm6fIhH09mA)-*=3Gst*T_q^$1Ju1@Z5_w8(WDbWu#D7-$}UV7}Q zqIR1pR#)1Z_p6yCgvh@NndWSLy+aLFH_%an*qncXH+7?*pC1UcL6=J|dpjH3iSY@4 z;p$fi`+_&uGU|um1}z^nHco&46P_Cq;{A2`Kp6t;BBh8ZzD!5gpkNN=)~NPJq<^Kn zr^07jU~9GZ`{0^ArFyD0DEz#tkJjCK39(*hr!v}fn zmic~fboGkQV+`Yo!3yu><5^@)r;EJnfEP*-22{TenLz7!$#mYDAJXCbWt!PC%o zDxfPy7H%yEn<$xxkNN@0s*1ANoCW(`IE}F7H6ie`+Z->qBs~6!_O=^neg+0B?Wrp= z-rORzl{c&`ErxHZRJ}5};_&jH|FhAw*wDDI*OUwWcTk@}N4eG4_F;5L@yJKie0pD_?}FJ0-Z<{`X!rsm z!stPCNdMOUd*&Z*H{jTxuv+~~qdj_-?&pRVUujpOX~1}pG1MErt8KTu;?>3I+f&Ir z#BE+WCDMAbroOYR;bRzx1WTfzP@gsxvY%51{Vwt)7LsT zN%$O693R_r!>W4Xpd&v_c#+eQiBukA*>GsGsU_^0$DvrdKCroPU3l@*!>?4Jd#T*4 zfA(X`81^0%d_9(JPdMGLZJD zK`s|jRF@$fg>y1v%%UJMA{6~Be1j|6$4$~=K{$+TH=gb0Om47JFc^Py(AR9UIvMT- z`BNgKippDSzBQgrl?Li0hxWU<+&KgXhj@?r&@agyc9?&!KMe9L4!--bU7+RYHsK7; z9gJ_Vqy$|JMhoh!H%mW0{iRx^bD6S%T=9QndVVbuee_MK?~f9xQs(uT-(}Rhw-Ax# zPP=!;g<~P;Fovm%4pdtTNp1kjxS0+y$Er8%1;4CWL{O4n<~Cc^jD2$xR6d~Z^Ly_) zzR2jk6O5BJcY3h0#Mb?<(V1lG@w?mLo;_^UpCLs3W&zUgW-1bTamN#TfehRs0@yxA zW#=HtcZNEwc({RMtvOyv#7^){L>K`ch?>JBH%AN z44@;)f#=}IkOrMGrtOf>KwtRrD*gY0hVrM&d zKnKNEEVyoCc#KZNJ2<_T{`6qqyvI3l7?+;Qs2_X}&P`&w$h;WeWV_I2xJq}698^oo z$>v=4&kx))2gSm=YD$-$DiU69wI>2DH!#H>_^tZ!JII)g^T95aQ)1;$6_F`eP@}^| zbpvm%{bt)99GjMc%?MdH{*3Z|qxJu7^6vI&7@o`7!7Ojn08GebTpHqE9Fp?{#+0-e z|2wl+ey?e5^cPOsjLx(;gn)xf?Jdo-O%SNQVP)46x+G2=aC1xK>_<(xmY-+a{JOFX z-h7Ps8TiYZxMgDNcRwkl+xQZ)QLArBxg%q3*Z=?uHf|$2kg~g$rjno4Sx4qOck7-$MJ{U>ZKr+if)WeU?RwSN z^#y;e{mcRL^2}JBeULO{BQF-GI4dWGP!{4m?{kX*ihXsSRX;8)=%Mcx7`^Ama$`y7 zA7Um{(`vpcM4|LMb8)G&joUwc-dto7axoZ}vdoh6T!Wbb)iWr}a3Ms*vOTzLGvnkj zeM1&1uy9V8&G-qgIB~UcdG#fACLlIercd~W$5!P4IZBK>Uccp2ltFo${gr4bp4;h2 zT;vcM|Jf3(!S-1=AcD6dpRPTRL5h6`26l zqGGbx+Q@4$*v@BnBcyCHs}`FS-c*?MOKEaSWm*xE9~1TMZxGW5%QOI<+CB2qndQ?T zosYoE2Q-jXhz1{^hJ-M1VJvFUYQi|{8B~= zrJwX*gu74HgH!`6ucx5cv8FT8!G?3|Q+Me__En1ukC2e&G1@<|pZg`+XQ+~dpHuGg z^_yXI>IPSygYbsaiwrjwoVQA0VJ+8cHcgvr&3^jT3LcY9NaN5Q3e4X=EWCS7{>G8& ziqXNuva^-glWFX#xQdVrr((=kwDiSczU>z$9Rf8MaDi9iPYcn$@33 z*cIBcc!$^FMlA}y0=8^f%86d0|ED?HI^dTNEoSm z+IbW&B?!dj%;g*mm9s3L|ugcWF(!_$4C*EjnpJWq3uVRh@Ptz@%FEMZtC zLE90_CQOvGo+nc<**x(UIrBnWXtp^mQk**F0fSXHuyr|YnhR{#6mhs7>dgP4jo8mQ zL^(K6k1rI|x>9^Pz7%S}-bpIRJ9onA5;E1y*b2{F^|m}|IJho8j6HKQ4ha$Ry4!=> zL{|p3fuv=6&)zOI^yVmXwV@usDkG)B{~8@1*P~AxPR$Q4Eojb}HI7I_a^#Gt zL+DkUZ$7vgTOE8@PqiY`frf+-SGK#w4ClM0I9>db%plBO325R;tV@fo2~e_Dk~iYbgFPc_4Tf>+vi&m z*M&&r^zytuk4?4);gqt8wfu}*vfnke_0Xj= zBj0XJm!1kd7W{NfeR<(s__AT;bESgo$YdYqkv6y=IdAUKw%5$qI%veb)k?~_%oxUDP6jJ!|6|*7>u8|XjR^*4w*blF z1%BMF#8Sg?pM~_#n%&kFH0M+dkhPy@n@i$(h=_I!Y!uhE07EwngGx10y)tWf4`3(Q z;a8X7(YbRCoLYA%;GHkA!@tU2bNiH zSg$_I&lqg;40kjH4-Yi2<>euv>!KR`XL$}jzo!&Wa#?jFghH!*8&svk^Kr3Cv zB2uIwSxHGZFlZ@Rr9t~*1KjRx8qm6g4#+8T{|TG_8j(z4oW-P8(%1k z0AGTDf;9vT-Ys^~qeXZ;i4vy_p^reP1~7>3m?3rZmJRu@>j>R4bvq{yn3s+`ZDaOK3_z=<)~2=h#Ypnpat$Y}U%DbWPlIe+aq* z6wV;v97bLD&VyoIU$ zW8R$YpcGCGH@icbzXn-r_#+mx&HG{;SH+e z&g+};E1-VUYDOY0O||B8hIgAM z(N&x)dc||MCW{Y)fRZ?gmSCXWw$1-jfJ9Qfs&K7B`)J>(a_wj0BLUivJ5?!1I?`}@%HLhj=uXZBlGi)xaWxl(u z`wa>m)cjJo_@3bA#d6|J z8)`;Mk=hZyGTZS}D^$Qh=)}>ht9V(%>q!L{b`t zu2Do7TDlpf8)<135Xq4o8p)xD?wL6^==1*GbIw0-J{&*k^Pu;Rwbx$jx~{zzt}7T< z_@>D5o;q6=nGpSMFKJ(rkr>i>d}dzMZ)-63%Xrrj3>_VvJU)tj(}!x6be%)bP$~3~ z+e6)=BIH!y0g0BjC8%?72F1fv(2-n87%>sC|3L?0#yYyz!-J%z#;C|XwH8b@F!E4r zuk3q~#6(ry@di#N6!ndXXJ*`#6l0z&@rKjg4T?@EP|x1LUOVD0f(;0gxTe+MjhU zn{)Z%#~|8jRiJE=uVD{XV6cLn*WXh(GZvj3?5A+ZDsR@wrItlSf4pZ<99!FTwBFD^GBLHbV=9D|^)qUj#SS6SaNGf_hYsZ-?6ifJP%b#+ zM9Vhu({X(ZZ0!VV5?`GtdlH}vZ``xo?VqMUV-d&xijSErl zE;CoiS{o@y%sm0b&%{0#RdClH`zAkfZ%FWHwj>Gta_XRuXLf|saW9|3Bc=aAJaS8N z^G=M6)5*;I@UJqlWN6!TStudA92T)|-#WG-slr!bGa4 zbzXu#aVCFBzBXIVS=4@k{$8_S?0Tf z?2ZYh$}w2a9Cv(r6!ZGS=|cP@6*=Dv(m$6G9p0qWAU((_?Fq->qKCeE9yqh(uMLANs6YNwlXsl z6CYZ1?4Y#<2DVBRrNg#09AOM$47VZ~Zr!}9@QT4A!=kd+g0Ck9(vOaQNA%(?v$7Q? zpv*+jdpSkqsINY@R{|x1kT~gv9fvGR7{O_;2Itf)m(31uNvMUzm8D<~(}Uj8hx-L$ zPG--K)KKHXFV$aGo*9a~ci0RVNJ=F+jy<~dB4|pJRHO;}+fk9C*7R@bffzO>+ff*6 zir><(J1GM4T{rJZ=sT#4^e7^Ux}je)gC#Y~```<>&SD^*6QO%+jRfEDgm5``LKWQ; z)rY*KQ5j;v*h=Wu&;bs!LlG4hz*J~&?=eb^|F)OD{m@MjXS;DdFAK zr@7U0-&1*~aB1wwRARTxn1Lxn%5!>&6rMp|)58)d^98pReP&X&Cki8@K3H^-X>DdV zXP)GmM_$Hw-@Y-m9$%^p0a`&Ra5DCC40w32Y5vxq0{$yy$h^3W>cM|nfXmz#_`}b? z^?3m6g1P*y=>+`t|Nhth{~!JTEpLrq^1Cqu=PiQHesSf)M$b_KqDS+SaE&dcz9q}tO=@$LdfCT)+^ zG5gTw`aYSUi~6RooWE>bEE-d~idVYBEP$cW2$^GtoZS{RXr~+%OO!e3VZ$4 z9QZN3uS9CoQ6r#$wNlcCeS*@#X(MC;AGCv?(9~`qNDR;gytR2}iQ`@R971=;6L=WQ zYu6F>Ktx1jE{aLjo7w|ixI?LErOc_2R88yZUzz_}*Bzxze$N-^YllHYXPdksf9!(I zP9=wZcix*(et5x{)N^h2P9TvN%o5@ACc>O%fwI#}@8?dZ5X^ZAl*i1$fPLCQar(q*t(-l)iO~xq!_ZkRrLoEX)gpau#tuR3S|{o5JwyV z-D-){Y<^zTms1u0OojTY>BRkrneN;udP*33e3PQ=yps%$NJ>pT-KatYixDwprIP1= z`$7`KEid!P3J>P^P{`4!*~k5BHq3mhXmgT!5Gq{vggXN!Uz{fFq;pu=>7gZ>nQYOt zt?8W!U5FIo5)ZK)pUJ8~hg|hXfsx1k8DiXx8uT z^Sy8NQ6IgwArdbZ(w;f|pr8HoVu#)?UUBK3&=k=Y8w`f^?WH^ZQ4o`j#eZAiG2b5Q z96~gk!&e_<#Dz?Wp-WO?h*XwLw&pha>YQ^mT#GK6^~DT9Q0(NU?5N9+O%HCpFAP)a zK?XEfBjm`)2eBZ%_tRK2@Nl)TZ)H2wG@pt+q52e=9|B1)3M$P zEfzgVJ<%MQi+b`R@}Usd6l2hp$|#^-;VUoaC3@NGcLIQ?uGEh=*B-R`9oIQE;nEV; zLm$Nw#X)i0iBEsk|M7sj*bf(|&)sJv_~?EP6l!aGKTrSSb6!Vi0uIpZ8kJP+2jlB5 z>d-n>Hi|QktY|*%VEOR4DO@CaX8DH84gF4_{veO|anO~=c>>R#1&amoJRjWw>ZlSBhCy$|K+qRH z{cX%keSNAeq91twKEQ@$1UqvB|L3`lo4Y%(LjU_O@fpnl-r!dG@dkcW$pU_5lmP2S z8MR>@Ll}4w)sae$y4}b*CG{Qe%OvPJRoOP+6M}U8F3Pt8pOuT!$v=vQ)0WzK`g$|= ze4@_ClNesKS#jgkdp22s-P#b(zDaxM(ce;rZ<9$b3ff()Q_R8#HQQ0N)3$3MLl|FS zdye*4x=9oTb32;_NjV&M63H?bMcn`VvgjQ!Y-w|d52(0SL<0M*E`Z(?IrAv&9U4r} zZ7(r6F}F+QyaRguClh*$*8cB3M)WY9u791P-w*M7wHto>yt8L#S`xZ)?fo85M(?}Z z+%DbZ#U&T?{J&|0%;dfALqOdYj|}b3S|-&-*tPi*S1~5c!DjLsm`wF9{cKs{!RK>< zFVCfUJ~v$~SL1)nb-w~7DY}~$ZD&$ka5mY$fN(0CF&Uz!RC_6p&WN#7eko5GEFWJ% zLL&1TCLhnE9Hz5VN=06ieouGInQQ8@!WWU|{6(ch!0<%lTf_%;Mw1Sng|IryKn>lC zlNPKWgkVv zNnLDC$Y7`T(?drmv_Sx2C;c0j3FDb>KG?Rt{yy!erPC(wsexY?xt;*d)zQbZfJFBV?)R4!CgKb zK0%>XLw-n~&%ug-gWih|;V*Uw8MTMzpU8KV1Ge&(m@EbJO^*8S?8>-Rpb&JIPQK9g z(>5MD`1Xu$q)F=^)D07|P@C6@W-i_y>Zu;-BD_zeelUDK4*6Kw=yzi->TgJLGn-B2 z?3eZp==p|T9g;hCWq;c-3gqtzB34h7!el=TQ{p38(%4|3Uj?@5x6xO`#Wez4IuwOL z^)7z0I$Yb_k2}{&Yqp>;K_t~i0+BH2!C@O_g{PeZf=46)@{a8N%Q)<9av;|Vu&JHVpR6%ca&on_6@s4L ziyB>Ac4;*uBg0F?VD%q7w)NGM>qn>FOl!_3SCOari;GEfPxV$gngag8$g$P1*%EQB|n+H?B44yC$B{pf6?(6ErE*gt+(vLNSNMSeYo zB51~miisqY(&cR-Q%ZGMz4>6Em)k80G8~buM^ZV%6i_qvxqum;HJ7(D69SH%sa*|4 z9zc?~IqqA(#KVip{yfxo_q9i?&&R!T`lOv1?Bm8B-! zEI2slUf)gaFVew0fS3BNJ~U5o5nK=zTTo}KLrxze%W9yQ@{=ggj7fy01s>56b>ozs zCympZ+g?_3W`U6ai?7>62UH}{>M<6Pem(*p1#YVI)e#(l^yAXkCJLt|=ZnmdSv{Mi zG8)~?q?H7Z(ys>rZuHju#vacd1z@FMmk@Wf)p`ukTJpqI5R}K?Q8`sb#u0hmLWGtUX+-o}v@-vFoSHVn%(oG8J z7bF@$kbL0MxL%by>KF~$E{&$B)vnu;7~pk?kAZnf`8f{7JbDUT72KNTG;uM-8jZCbWza!vh5Mxcvs;a!)$<0l%u?0Yz zG4#j@tu7DK;#e{~qn?rkb%xhGL(Md(=C`S%Nsf@7g6`fhHVJj_p6d_}_nh5(M%gL} zq*5m&5qM-co_Q~+^L3OC6SbXcFJE#S?R~%pl`i*{J8ZC@bsf})F^a$Gie;h5{0N87 zp4AN4Xcgm+@p3l&Oz6h0Gs&KQRZvji7{P<1LtLAjDE8%HK%Dok*=klRpuo*{`XIS7bqAp8~{G;J4ymC{U^+T+kQ(U<9^%xTE z${V><$S4Kcj52e7cE>ug!CjWDX5XXCGd80;!1=fnl1c-PPZeGfOa-h9wK8?G^-OMc zNH_1^8~wHUj&OX?xDCx$Z+ExIH03;47%(XR#!qrDfT0jt(2DE?S^4d+3|u>G4zCuL z-i9~`3}zLU>Ee8w+B%4N@1(S&P-@7t#Z3$VUV@qO;W3rH`%^hi##ZdgBUTw}_j$Ha zg)xU{yrlcUsh7Gq^$&anuHk+A=&U{I>0ywVm=@xpRP=&3-u)`?731F*VG2E}=X8sG zJUey-j;uYq@wp-Q3?L-VOy!wlf3MPFU@*RWYiL9leMa8Hq!i&FJ1>}lo5}2z=gFNP zw1BGzr`2|Iq&;c_aP@=(^z3e&-Nittz6NBnNue?6eU9p#Yk#whMDEr_Z}(=tHgKFhZ^3`Xh^EU|#9+^JRkYh>la-@qf|h{-ziG#Z3vT zD^c^=so<8l z?a^)URW-0YWE97wq#201e4umF|@Fdrw zl+`_YTJNJ`S)oo+V(E58W{%r8S~jJbv&c$!L%n5^mRL=X^;ScRTv9?CqbAgvVeF%_3X361sz`+47B2WJl_ef{QF zYt9|%aUVwd?*g$9b))!wLZl~OXZW-U^v^m8WIn61#I0HJyJEHC>WW*&3(z9Cm@PzCH}3nW=4t->5Kkjb3h!g>YszEBP-vF&w}mr?2&=34kH0Weu$ z!3s-@D)sfXbV|e}KGA~$;!)Grv}u+y+*d)R=ZCqZOSki(Ig@s!&r&S4JmlX4iRRlC zGAHBT*DjnuWC;*2VL#L~XQPH61O$KnAppep;P{w=Ud6$&M*sQM=$l%uwNrn*Uw!{N zPhdRjf8|Q>^$zH*9gnmOB-!I5p!LU!imYo*jR-8l-@1}7@^D{G5cy>_7i6!L+`2}t z#CG{Xwg>-xaJ!^&YO2)(DJbTz$j;x&+`K5>yl+L3+QM6U7CFr-5g_&BWefXUOz|C9 z!4F&LQSr|>T8VcLuUb6e=LemX6^{F~*x6>fd`r4Y$xMc0WOkJVRSdQ$x#`u%|3W}e zzG0mWt2hbF0^yh;Jiu0}W!JB&pGJzTRA+tKbJqWzh-ijmU#g(=t%txCWGSAzNuloo zvAKNzMBh&9l-Kc4TdiZcDMt*=o2?d{t5SW(nCA;i^CX7f$!?qTg(8+B3bYcEr7vui zU;fhX!Z{QaeW?X9G#}y21fIRlTm-iQg14wMXWdV7qn9x04zN%a)RD1%bh>cH{w1bC z8-Z*6o;oeqUd`gV3f(8BH5)%kFn_kzW4bIJ4}ExcuEq8nK?;~gVuuFB{&(rooky`% zSVT)Okh(OeI`!9O*G4A1blPccZCQsms?j*Q!k9FZQ=KOlOU6I2o3#F3+dubpYU`h< zZR@)q(y0bs;3q|Art*t)esog2B`y?O6>Rfv9u!0w$D*`sQ-o$7#Fsr5wY!$Wji7lo zc~9+NvN-ejKy|t=x>V0+8eC=TEcc2suB+PbWS2n}Xl(%edcV_UW_Xc>nSh`LlM~Y1xwNNHsZr8w>GkjUk%Pkia=bWKDVa}|8h9L za`Scvl{@8T zm4sScyw3Uhauk%zkwz1ZGElr89s*vl+t+b@mnMmqK!w7z1`%0l-pxCuE%|gsf~#H7MLK$%eGdYPsi2&&1v9Z#1Acp@V34D z&CaH#PulY6^wBs$k9mygjS0+5W9z&)1EyHB}`g)XupMsx$;RTvOwBIiCo;`))_A(+(^=NaHhu5{J>yd~xa|G+agXNR zDJPRjlfiN=mupkr&&#UgyB!)v1l?Pmb0dM`x8VzP==Y z-b(#BD)HaKL=B43H5N5}Dd6eMi8(O*#^K6utqk|iX_$|zn}IrRCPT~91627R-irJS zoyp}`_T8QDnT-A~nZlK*mjp3pIgFIv7eadGnLPFNFngo#7^^-6`ta=l*FvIgt!*2V zCd~WvcoUu zP3o;*PYZuc{oel;-rLWG50A|Hc=*p|zxDv^1+*c8-6x zc{|)HubyL6vSv-tdZ2ydv<~iVVCB8^nS_d%J>h%v+-wc1yfq-Z>Rh+g%Q+YTd5h&% zu{u5V7%&?a{~v}6D<~=T&^4^9L(At+)|L0mfzA0zfa9&5nHCRR5>vOFuM~dy32sG)b#IHqez zY1E;}|5J|e@eL8(dTd{Jx>H$m*>Dy!Q}#|Cex%1}kS8yS_%ECc3?**ZH6h~QL`JkZ zsg+G5PpZa7(W9Q_jlFD^f#le1sBEO4&y08Zq1?sCx2;Kk6y|m4e=NG^Nd|!QWLXEl z)iL|))&|w>4!Bp+kgiH!RQz&}d~%NO@<;8Q`9JiPZxD{f70l%e7!>@pq+#X}f=0}G zp!u%g*9|D$&P5}k&S+ToY0z}X$p@eZxLI-&*@2oPrPye?%}abpcnhqXI*SZQ_gEWy zbcwcrN4Kv4XraDy=n>KFGa-lCNPPirU0Lv=YoneAc+F*I^!K+D@$%AyBr*y$MaSZa z)8(Qi_x(B)`FtksN{2Pj&W#qt#JtX3vWcd(Io}0`q%DL3Ci`AivYdG{*kyG{_6E)a zbE}Iwi~15fq`Xt>b*hEn4mende=}7p}hP{$jQms zMj0(2g4*;ycrbdnW|g0|Koy+wI#yZ1JYV%mA3uu))$k*iDG0Re(Pq{@I6ARU3de;jwAVH$-We}ymi!e0## z5wGUWik2VCfoAl_PksqwWZ+ZN(<6RaBgYxQ|0VCC-VB zhi8>qVTMifb)+)B|1&pkIGQvmi}9>t3?iR-WePOUPjv8ynFsqgmN6gsWExX%!yz zsBD{FSjd>6W9k-SnR>$o2re({0zl~{%{kx(?ai%_Xvmz~Q=q<`e|vYD)#A#{y^VmoirmT^ zw8y2yKY62e#vay9Ohd6vj)98u#58eM&)eW-ribbie`hpMo66F@qFnR3_2 zqD>9s3w0wEkP6A~lEh^US_UNVC*8+Y0%02ub6Gd2T<_#a;+cBfr$2~>H$P3#SJaCsnl{&`#CNG2J*}F&U$HXNiNPm~g9}`=1 zsu*gq(Pbb1qpmd8%mXodl=|7}wMcs18n*@vFcKg&{)puEb1UdrT8gdd=Ded>#;)4D z@5S{*5+Qh<;@|0N+1(U0465(ZjZaI!6gfTf2R}Q1G#Jsmw@NQ0fkW#;PnU`Ve^u=2 zN8@HIcdsPAyQbcBNuWK_)2FvtSp)j@!YJB7)@qpoWQ86!_s%Ryi680QR8 zoY0!!g3HscOIhgW)pn%B<#5r=9gggHUs@D^WPN)Hi+bE>`Tg1v@P06O>r>@^R~_hF z%WUL!+I#bapAkP|cTQ4&(@J)?|8*UK3GR-Sr#+;b{&E7Yi7?eMUrF(?eV2sN-TnM) zPkQ8GbI1HbS$6|hh`pGYGFwE)PnKs+=}mNL;tTp&|M$E-dLvwmN?cyDg+rO1eK+z+ zsTYV3i7@!*5MNnhGEx$8bX7tgK36B9)TCr}D&0m$GSm2`0D;nANRv5}lJ(rRo>}Vq z>5W*$WXJMWH2xKnR_Cv8+I?Sjpz4DGp7?^(lK!64XYA2;n3U-6w3job9NC1HuFSeI zn_@?ml7Qp!@)w%{cvM;tFfW(Xt)#jLU-WZL_o_!CQibYY(uxgI8^ql=B`{WZvVPs2 z2M{9m+jp7`u;+3kqqSQzat}1Rf=a^cXq&@hhz0H1JJFIhhXO*iOj zAT4>j`iu50GvCZ++TGSQayFQaI=Uoeln@3)9pA_?Rd+NhV~247p0@O*Z)6!Xx-2U3 z!Hn`w$t_{`mpXjrS{KQ~i|*6&AKajlN|9!!7+FTzgpzuH*JYMqH}(Jd{P=$eiS##J zhtrUn{Vu^HH6D($=ID>fyUyeEcuaZ#vXxIes#>S^gQx3uSkG=8Nci{HGcP%n@S#rl z%t$mPQ%rkRW^$G`S)MH^nFdpXhjNkBQ{300j^%xTa!~D4=~mSUVWfJ?_-~0BPsOo6 zPL8Gogl>G0!~^3Cb$a?@*l&lK`ny?3TgHWuQZm&TH5|Z7^Ey^{cfdLE6(*`HK(tLV z;#&CPu^CKZfCHXvSfC5Hb5bL=2^BM30}c_ zE8m{j+bmUcQk7k1c(-CUz#5L}=2H!wtQxk6q924c2fL97WWC_N!f(@QK_ zZ?Yrye&%78Aa=8j7|W8yQ3ZWn@?0LU?FTnkRorrxe(*<=&1eMV-glt8c5v6$?gue| z_`ik!eU3!p%+%*NEiL@+(7oN3BpeG%xXzO&?SXHd!~cDkeH|3wv5#(y5|c=0BWe2> z4YCsiwej3#wVV-qDu#%E&fHSHWb6AkgAxosXz z6V|$w%J2}fhLd@3I6fok-ynrjV-)Q-0%X>iX!T};S~s75fJY;q7WiJG%PD!>P|>!O z_A$5B*3EWN3rkjxq7JF7zY3puKngGJ zDt~=2?K;Q)te##V;ng3^e_bT+aY=M-gON!K8WwM9wahx%=wA|4-!SH;;*x5_vFfkI zm=K{F{+oRGfB(}kqv>Gnp8Nf#uBa^UIF-tGtS6a!Xm~Kqfwo0XlL3X5t7UwEKctO&2$kvL=lV(h0 zP3d%Gh9eVo+GnLfHn3FkM~7tXk9^M_@lNqx4Fa%uz|l}%mra76Y!W^;eC*$1xi=an z)ar$D3P_MGjaPtO?_Y4OxA3VN*9btiA(hh%o;^11GCF6uf)}`+{k{*q;Z$PlsyAr_ zj?XM;vJ5=w9(bG(!2;l^OxpPbo)LIvzzO(Kei3(4f3DL7`V3M<@cD1_K6Jg?sVd>q zRM9p1d-(Un_~u^M((O|+NQ21J>GTX;8{R_a0Z{K>eZV~x<1MxOlMT(-+bO5Oak$n` z#j_rn&__>eD>6K+ZN9#IL!B$Z=D}c~)@D=e{$Q`(*I{~J6!i;2fK-8Bh)XhPRwTTV0nThJ#!dO}Iloyl?%~!$oEEYWT2MMU`Fp7)i^o!<_VaA@Oo>|Uk)PKW@4e57 zEOOY->p7q?QWFjepa?%ZXc@;{sQE@ZU2$4csyS&Gs|q_ zq_jzBWp-UK>PE%L+_>JjpW_b5aq6q8P#s_km}?id&}W<(KzbfHc2<+N&?-D5nH3Wv z`y|8ef-2^r@krvnz!>npefzs09vZjX!mT5ZZw&O@#^l}3^7<-waNc@^tQJ-q^M(ET z6yv_{%&cJ_o(s7*_a8s<_n>ZSxjySGaP&y8kAUaIAf76$i&@J#O!}TZJj&Q8sBsTx zq7v9mQ{>N!^5WW7_1L={>xZMce?AK3k$>z zoFF!jXX*=#oT@U1^;|0eG?pMP$Sf16=RATR5>?j-j?v;<1=7HfG+KIkpu<6ge!0Di zm*+4*%nJJlNHLoC`LLiUqU(;nJb^46N8H1w_Vp82pezk+wyj=`Yb$l^=T4GeCd&u> zpU_m-74Y@HIXAdeZ1jBogV52KJd4}#daL(Hmg`X-ioT5UXupb3RTe{t*Crs+xt;y+ zr`w@3U6k5OMc`_xUQceGUNWZ~8 z^U^xPm6-r`dWl*P;3^lqViI`971n67MW<5!XAI0`v{0jiUF2$A0HmuuqI*PX2I343 zUC3WG_1hv$BgcBr$(NMZ_-|J&LCY6UBYw#~_1Q|g+xN)1gp%me8{6NqN24g(*T_3Z z*G3bEJg*1?%A#zcD8w%vAJeh%iLKomiHE+z0Y^KB-sVH(s=e_%IqLGEY@F<)cd%n( zNFtxuzY0qA59Kr4z@yBgQa|S%mA&$mWG>54ShcxRL!Hr7ZFVYsjJXHh*!H1nql>c3 z&MGeQVL};AjC4mJ&`A;yx!wFFns_VgI2t#4P#+10f$Oy@ZEQz3zqY7uKr>T!I9hjfOu#V?#NAfSMYBP9Ty+r%%&crM8{lx+ zrEv~@YOzu-dckqBFTW00DPS&#n`3hAfKyS>59T67j<0^K`q@%Wt~Udk*Z-j1o@obT zy3`50VjJ7|F}1AXuXJzuc-(s zelzlGJZxbTkjz9;un@F`Gf9KNe2D`bme4Ve>W~Fm2iy0u%WgfQQ~8cJyOlM#)03(4 z+@zWL5Q5+f83?hhLHdot3FnJei0{AYIR?BsoNP7N*-TBb{1Bz6`+k!iAwJR!oxG3Q zSQ0W!juB=?Ig83?4@9=xb!Pn?cjpA6lwRjqy+|f3oen35pXRPOj&Pz@0&|t)>I`jb zk<};O?yV-ai6jdoT5P zqHImypO$Nl;*$BDpl6R<)b(|G(Q6bbDC#Vr!zS$3fAW=Yf7tdMjq1B`r|W5lTK1dH zx$K6A+FJQ}h!Z&aNdvEw#a7#qn+IfZ3WvZ=Hln`tvGD3CTF>|(N=Y>>9=TSpP+XpY z#WD9`x`Oj-Q#7q-UnRB=P;#4-MTXNYn$bQYpV6ev0pHgtxO&uD9PKBxlr&n{8Ic96 zD?7Xdr85STGYfksYJEo{{?k5Noyh2elMlc(HyckB+xvt4Vxf~LIk4VQdqnJ(l`;Cx zfG!5_O3CNrZkZm}e7!U=Bf3_Gm^W?;?BnF&3j8mi?RvIms1GOZn^soK`iFLl()lj$>^+s+98~KEK6`N%@ywdV zBM3+FDWEqv!1p>#&vc0d%m1QScqDLNiejk)G^>vpPkd^+C86QqU~{w`z{Fmx%x8Ef z{INVz16gIvH28>W)#=k~kD6 z&hKaelTk^QtzOQX$IAL^Nz$~g&9ZpLq-K3u$dt4BG_Nsf$~}b&j(0t2dIYEY6o9%( zb(i3}CFf^!X$1-|)BW9Ld08>bu8{5Mg6dwqS0a0*18zMd9nmdSt$A@b)z3jdx3S`9 zBctJO+Is*M2sCvmEWfMR0%^$?zbH2s&W_O01prcJqOU)0c>aGhHrFlkA5Z5FDEBG=5+jZn zvfMYC)pwX|H_RkUTm?K%uF^O259XXBJaM!QzoGf*8cw0@s)jiBljD!m} z+a3iANsfKN&4gA}Tr_if{=YB>x|C^5Ka1H0&iE|cb8Tg(W61SDaVKKYFIt-8Y;*;;X1r{YMP)^t-7;z5wFu z887B|%_ps`FHXZ;xrdV~t(<=9U1c0lbu-V;b&UKg7LXJ7aVS$I@D&)|%+k8lMaRf8 zPw0)G_v;>nq-C&fNPy$vu{`C>4*fQoz*LdAvd@eF9Vn;jXLaxUgNzQGOtnJ*c22N{ zA9^~Y$=_BvF{^npp~-3D*xNW?>5_u2ZN_;ohu%CLD!OrPBVbDZJcXB2ai zR4U59RX%yLET#?7<)ZQT3DUM)dqE-&gy1s5oS@0$)zRkI2Cnecjhl^ph-xQ9O*+~; z=|xcwS4O6>&rT>xL!O1Ozn@bT=^*i%6jt^`z}O@?D{Qs@+hx9K8b<1o)VHcK!d?|O zOaYg;1*dnId^z4UIbmW4xx+*?TzO-Ge>o`!mCJm63)e~}jKpgrjq7OT@ZX4c^$+-} zbLQ!2dNz+=HV!qy{;770{PV?Xk{q{^D@4)fYvG3@6n3C5^{yH8!ua0}$~Z$bPz`K4 zA1AN~aT$6XtP6z=jEv18&pf>c-HYZ4HrpU~xRyrl3%EIy*aPx%1G{w(NK<-(k!fnn zCxL7p?T3?flX&2k@I+PEnK-CEVwm|Pa9D79QDan{gsW;W!4>ycz#uN2mzGp4yv<;zUv_d=YV&i_nY1~vm6@G(G72t z25Y*;m2oPhNp>eYDFS!oaUbhw`I@MI;Fl$sgKXvww5%zo2OhYDL;ZaXh9qZDw1I;1U1 z;El`@gyLz#;XJwmtnOBC1czczKI9vFj9XIlJ=)*knq|QN^>yF=CZgdQsKVujwAR;u z<(XzcpXKXlRN8d>_+eu}M3nYdwC3YA(dh5r`XJPxvH3kzv@`r6;j`A}@)iJMm~~lG zsgFpoU@b(m^(elS54Gv5%{DAHl%P=q17S|^eZvRW8I>Or^h}K^x=k1DTEjwmQ8cZNW#*qkWWoO5B=G6UtuEmCMRrb=i zT0zhhP}8fdVQaqb$}kd%`-2bq`t5qNNvl~~@b^x@^E#7PjEv?Ev(wftj7T=dZu-V$ zybv-aFf{tjc?mYnQDu~s0w#me`N6Z$&)$wiP}26(|rX9 zM@g<%Oe|Zl5DRM+Tl> zRc|{qflL?G)Z03-7Nll55igUJPVbS)86YroSX^P@K6k&L_PVP_aMeOm4nuH)o5IwA zN^yrZvn|dz-~ot+hQ>w9^1Hq2nNIcJ{{XLwxR+J|rT4*DQ+*E|rx+?zjEQM9Il2c+ z{Fh#sJK5SeUrXG29Fjk|faJ&>zd7Px)n6T7T;Yy`cpB>xvYNB3hIZpyMNhbWS+ekr zj<=j>@NbN&1wirU0I6=i022Z&EVw=VwPt>PZckiVi5aZD3Pif-^&PTh+$jAH0z4T~ zH#v_RjF0p0e-#j*vBX*DB(^(L-1cPy&Z}Q(5tVnU%H;nlsJ!ag$CKS&Mgdjcgnnb^ zyo{KEWIss#Y6lc;N3@!Si6gk#=1@2NZ1D?z-`1|NA5apsogrznAd+9d*C4n5712$< zb*_P~A61iF=`8=YD(&;xNu+skYDQ$nmh$@|VPjo4AHv7^A!mJ^BS8Lu36uv@-!L>^ zFSB(O7Ecc{^YIJF2NXn*&b*X^B3+ze=Z+t`r;^|e07?(CTT=OGSG7%^4ZKx<)as$U zmJ2jo2PQXQat;qcj*S1dAVhRaZ^lzCtme{+Z7QMg9e3&+r@g0m%q_eAfM6@<^9Qj?XaJ6?Q@UJ&zs_~@2#)pocF$8w7q>s1z`uj! zn7RaZ=xTS>e*)sm=x8LHUg>Txb{J0gX4=!C#whPcny9!yINDNC*=j&Rd=HJ??C z`!F#fT66(kGZU!dxvfY_jP#x|K<~!1e(#9uu__f`VbY09u~v5-acbTF-a?0bk5m!p z5-SxmHN=FLpSP^mSjI_>jJRLR=y=PNb-YR8lcDWT{Gb&pE z9n9G|_?^%vf|#+oUDhmc!bYt^imiwexY&{71XF8ZXGqxdd+7svcn>5eRugTMzxWul z7|fgD*x$Aznoz?0XlH5@=wg38tLd0x6CZ@L&&QSUhJy1H>jOFl)2iCEIC@+u#1Hne z5haFoYoYF7*LvHDqksx~Sw3Q7Z#&TW-o*X3Sp0GU;u;6+W?S)*w|up3qsY-iHGikZ zbTZN;9OgsX&lzlU$0$(Z<0)~>i^uA4$79}N1`+kA>ux8M z!2oen|Fg=Yk)ub2B=o$A!Cd|DT968`clL8C(Gt9S8%vlc#lw_?kiA|EsF{FjRL^xm zniF#0z>tO}n>Te&QSoVh0Iskn3FBDfq)bgry!vz24_8SgPCRhr1Gl3tW*H=#b;fm1 z>5t}lgoG@nf;Bzn#^!sKCJF$G>(AhqR4^@24)ox=(t3|I;^_eVfXCAf{gB%?td$oc znurN8yo|}D@)Ija@{TG&yaZi(p$MpLU#SlJrsq2LgV44$EKvJo;h)JxRCxXDb;jo< z()Q`9-j9WJT8{Hm9B#cVU{7!UvMtX2N#H?ZO8*g%zC0HOj*4W32mEO4XV`n+%umq> zNArb(>876Ax4+l8{}B#=#vlX0Je*1;-P=HIFI8igGgrsuQq$o&b*=$Yj}v;S91H}O zrXuU4ub_a}lYlkQ>sfcq;=bh8uZ(t5kk8KpMCJ1@@)0sDCwu`Wj#rJQfl4=e54O_TIzkaLn()(A(A^j*;Zb z%7d?{e>{qRC;;I61i<@ECqPEwi&&+_MpB=I)ytebF4cI<$W+SoJvRLhMf*3a02=Ns zk(rszy=VpGhu!yhmp4tB8Ao^jGyfHJ8|+`Cnmqd$vyZ_Px6O}GQ(To6alWjOjAOv z0UjHe*MrUt0P1dHP^6S4l40Wg(q$TrXq~ieeS+}XA~l`RI2-u9{EhnE!eM6IW?7er z0wC3|=YN0yVh3M;I`$OYnAq+59!v*Ec-ro%@9HDVy<2V02`S9RLBRUrJ+Er3%38RE z{!a^#o!8g5V<{3m??y|k{mcW%lg{o-G!K9d7AF1uX`_7n%L7FmF&Rf{Ey?|SxneMW-2t@^p+t=7<~v6h1z2!G3L z*)r`egY=mh;`atXLb)ry=*9IfqA@L*_I=L3&g$8pQSDT{C~OSgW16?DOk|yfFU}9mD81TVV-zBSs7*vlWo!Yi+cYFE3>WnDgxU_V2-5 z`AGPuk6V&6TXR>H_GQXV?cIG~1#H@4fQYD@Nz+6zP{+H7jg5z8N~SgI zY&_4O@M4RNqjHY(r4a4=(X-sQY*Y3RD|d{l5r%>#!)dFKie^LXXion zX{1cs9fiB_>iH9MHsR9tppqi!SyxEQ^$O$7asV7C?^nt2*tj{4OP*QdH*dy(8uYR?YX$$On6jdV5Nag&sB&RuD$zD$|S(sF}2G9hJBXUqkYCEywO5LCZ<-CD(s;4vGY^yae?PbWszkzCkJj_hL)-84`FN?-WfuRP0Yhwi<4wE z0zQgo>0?lf^X%%k6EE-A&3{J0iIp`@_M9EQ>NxR^AJb*jk4}3JpV7L<%rnJrx{^pG z_4mr9O>Q$8hATN8|Bz!!4b!t<&GEpDH;zjOD_S>SE~EI1AXuc91z&}jvw4MOKW^U( zKA=|0Sh|F)>XFQ0447s4b4>wEq|R|R<{&)Bn2HRrYE3eM45}0I$eBWM)8=aaOY0Ff zd^o1sM>nb=hn!vGyt1A2mW7vQj_1)U^A+^l!Te!$Wc7-tHO5h*|J3L~^;Srp*CBG$ z(TFYD)T^e{VCe=vX?-1aRn!1LStDXZ*>)9$PXQQVMe{|!#%`*#UBc-g2vSwpM-h2@`D~1Z!FtOX{iuD z|1ffmiY$t+4CXwQs?Nq_^000F)~c&K9wje8-zYXAZm3oPlUYZ2@35N%TTkwFX{WnusQhH763rUGZ-u0_) z3|+EAioJZ!maRo^{r$$a1P_cH%~bypPQ zQh*=Bk&VcgDWRJVf)f*obrXqBty8#o9?yeIc|rkjUXVSKq%Hsc7WEOK)GE&`NJ96O zb;RfN-K+3tgVxZhiqI68n@Lxi`kf`H8{Phkz&lB}4`;iLu!E&eU5NUdH@~-?7#?wO z+z@B_Q>$8GGvDm(F$=PSV`F1`A%xE7#j2N$LC><;lmXk6sL5Olju<$nVXP8sv5H-f z07d4FK)4gl>@!}*HAit&L-ny_RzJ)x@P+`VhxVk?$HUgm$gl`Fvb_iE|lse8ocw^zRGktLAd|+q~+nYlVC{~qPYajJgeAZT<6>b~xg>$g5 z2;weq#N?fsM0941n)RQ*A|QMco_a1WF7n0e3D;j=a2`wM|E&WKLlD{ahjv7}01j{kbEto}vZ>G5eB=6DG8 zLB=~?@lm6-4c5#|ZEqi>2vcUq>KhwVb6ZE}!pV?wZlkwz+9c!^I5`EUpXYU}Ps}0q zyrT}?*NI`hR}O2>HYPsyZT4_I`XohL=yCQ$9Pk)MMn*dP31M)2o*OZoxq-NdTW%3A zLUEG7K%&SKc&yj)AFdW{$5rKfX&{QIomxIAhTKvpV4XAHo`f6`*BUHDjT?`@5EWx( zWkqnk;0HSDbkv%)b=byTalNsD9rBBkONg@CDYQl2L!SMmid%HieD$zz#rdiAnNbfI z)Y`6G070XpWs(yh&ytWTJ-sDVgbOsQ?cRjv^@&b48f5SH`BPbYIQA@3Q2N|F%9S<_ zef@*tu>BV6<#rR#oQycW&tq@@B#pn#a%Co7&EhP#KRR>ra{ZAxv$Ehn;%5J{@ApDp zK_`ZLTOo`G!<-XQ3%kCRch;R(n3e!Lj?kg0Kx{|xo_UjmcrZh z2b6M(`Lm?c-;pD7(}GSf$gqF@-$?KL;jmQkhH$EhArK<&TC56rDnFJE?R9*m4O#V0 zYngYFZPrB&biY2g^ixg2sumejsTBxNx@odqBb|On{9v=%mPrDU@+lblrjpilIoE?Gay z`wwZqJolVm1Z#Ys_jCLS_VnNa=A*lpRwNl14&$j8 zkdDLL;b%x)yk>fKJ#&B6lq*_a=}1lci^*xROO_r{I+@|!cPFOLHYe(z@7+jb;L1?x zbba=wq(mba-C$kaC ztiE?=mcK23r||J-SwllMaa55m-vSL4hURIa)3$@m8_%wghOsE0eD(_Kpc8bZ|G7#+O4jY?Rvzxh z`|`Mv$w&t>v2cJ|&hN>l>RE01v{Kr@IMj3ZIU5KQnCy3keDfX4%T5JH^$x@#`Oa$PR>HqedyB~v6-i$x6@}NCUGS@jJyU75X$*iE8`WMOtg z_(kljchP(@pRet>aMBDWA7N{bd%p*Z9^8uKp$V!t-}BO>g}q>_#gR~K3OIeV)po`j+7~LIjLLsB|M&+addNk z98is7ad^3PpH>_=1+?{=@G`rE?s##b5gjA@@>*Ah_^=pW=d#v#gPter=nMN%{doyV zdP3bj9j~zYb=9NGn|lYvcbE4hr#Ul&Uw1w{X}?pSaEKev`x_U=>G{Lf5^WVd6I2SwOTOH9Mzxzw2C6HWUa&u>c?+bJK% z;^_ViDp(U}XKQ!G^V3-5msDHO=rKI%deU3L1Qik$XH()dF_PrFkTS}W2*Z{nNlM8@6Hnxy=>wqD^Ysa+`0?0Duy>VyTs0i+G9O!) zk3ahA#sZ8rJ270jw-|FzVNsgi3JT!gDZze9xNOW zAOyqPcpM#@6cv$t7;~sZd9Pz#L4@DIW3JMI=LinaK?cpOz!`%83RN!1WL3V+I?@$4Rx^ z%Xvy^dVwJ=Qm}Umy<{%UWD<9@UqduI&1ZI7sySH=&!+Qy_xCvkwdKb`c)qXtOKn4< zA>Gr+tlE5A!#h*-zuvCUgD^e&%J*GS9Ex^(s~Z0OUjO_2^t*8k)uZiUGQcK15Z$DS zV#iHst848N`!SG>icE>aetuKx=~7JAe!|$DU-|3!yQ1o6lWE1Td{1X3t@n3XFm)sI zI6Is-261%!MrBV`89EuWIqS;lIQFQdYqYJ(z5%=^o(^|0_H~XC*5_=ehc{VhT_f|& z;z##OUyUJqpip>|q^_*q&AOnaB_;VWj}Y_z*j3X9;f6!=3W`VG{?=DT2(Rb_@K@}1N zD%?wnAkBNtGP;tO#yjQl$AU2#Hc9ZP6?i$Q&Y$W8Dv^oyr|@YrYxn905-Ws*SGm#? ze~79C`^O#Da&o;-C8|+|Sc0LLyX|IPbx&6c18D#H;qQmF#qy;xzUm2Zwa+v~WdzN= zQ09()F%AIG0jNfy{_a?#^+E-@JwyHEv~RS}!)-FaG}e{E>ilGzYQh|{k(k}PFYis5 zYY8zZ|KOhCbecY7W{A+^qZh*-%sw0{p{4A&`_1}1kb1-~ifh=7cfWJT<|wMRf@Ws4 zo6BN{B)YG^>0E95>HDj*)-INn`#!!g3gYH15YQ%oA03Ey{v;vI-NIsP4olQVhyNpO zU&#Ho&B)S&2x%>1PD{wb!9kjg)@^r1S+$aR#BAtruQjrva|K}>e^h!H!HKPBVBYlp zeEzn=fv`cnqh4CY3yNv=8wu03n0lHACRu*?G!&m-8re%bDa)NpG<=xxlP?dZs?5hA zEj3LnPYLf^`rb`Sc$HA0UiM~OC$XP1hRdGFXNs5UVRydbK%P&WL$5+8DJgsL#>Z5t zbETs+H(&SBimqvC1sic#q8f60u)@e*>PJjH1HhGu2a(@n6CrG9C7v6;Lg&0^i8+s8hDC&x}gs zF-Ld)uJFpn)+GT>KL*n%3rC`p)&5+;qMs#l=DsJTcG_8r_tTrJ@k#@8>5c^x{he{`Zg|E+H<-kLeEGXCmgp;O4Tb;%|5H28?JRKiZx> z+^)rs*&SMr2(SxZL^Q=rj7O&33sAP%7n{l$ClS@rtgJ<{f0w^U!uzJO>h7eNB(HI{ zZN;das@n5t&7$xVjjQQOKP2C+*5OWUks$$UDWqSyf=I6l zqW^PumY$L*H@dcZa*oKk>gy`dj#{`ltn<|3M+>^0)#Fc1tXjRi>g@1I5-eDi1Q?(n z{fGy$wJEoSeVNaQsa}gpXM*LGoPFTl_B|)CdBopp5?$Co?20zda+b7{OCLVmnW+Ic zuHmr)!`-`C%LLveezz+^f+TgZq!L9%dswWGrLUz`%D~yZCrk7F`vaPLny-9$OMM0c z4i=!<$hlF05EF<>xZJN@_p{bY?Q6+yJIqvl@`|E}3Bo|@DmHupy5~)O$YvS<{YesQ zt|Yq0Pz0EGwqC1C$2Z?$_;~Ij%Z;(q%s@P~%2(H}_N{_FBqHkZy+C41>*FKkEJc7+ zWwkq&MTranN9W6?^pSFbC$JjRrFABhHx&yE`OI$FOGCH#c>gzNAko8iyfqk|-#Bwu z+Trc6JK?aEhtoXbs<2+|_CC_x>{l&!GtGSv=f7>yxMtG7ZB)0DL`cWZemc)cw1KmU z05-$B)9uJab$6#=vCfNOqXtw5NmwQ|{LEp%zV!qGWTVGg=&S^X$1PVD)-ChOUaF-> zo_+Zc5Y>Ax!41RG{i5wtzn1Vnm!V``%INy21ESltj4zo3}2q-Aoi1O+CgCV zM9|f&q5x*Dty#Z_a4zU~M0ew;lf_92oEScU*@1eEwNC;q2l0mr<|8a_VsqI^lMX+M z^A2`Z?`H$i&Hdm03Mr(oOHBBG*176zw0*kXzLmsyldRSBHiY9YzmAUlV}0k44zdzF3Wj9mK^{P zG&l@syOLv~1v>FU!HEq!2h?M!?I^1K93b;IS|G{G#iMVK+1clc!|1lO`&v9Rr+;Da z?$(>_6pJYeN&1Mhuj-MS=C?8rzY{Sz5@*fc`*-H47zocYNZz*JlJu zeR^8HkbkYciSjBZa^EBms*5c54b?s!*fw;KoGPTC)7WLHHu1VVkq)-v=fpisroeG5 ztO}JH-Zly?RH!x?XO}80cRTT^Y&3}Ak;5D=YvY=_2-Pf13S*TuzNJ?j=;ExA0*+nf zioLui-L>9J;C3{_>=@GO&3*ZxJmdAPPk)3ZS6h$)gO=3iL-|xM&aW9bYK!;Ihq@4Vm(u3m{< z=o!X!8b919y?KP6$ciVU3H)qHR2kPpQ?-yq7x6h8LB9?SLFae{=99gQP0yw~;C4qO z>1COkm$|Y)vxY}iY3AR@hTLjPyh_zDyPZq6dj(@vTN0ZF6~!s*>}QY}NBhZT8sxpu zFf@O`8E=2?#)!Yuof%OYoC>4Pb8>atss~p?QheVog=FJ>QZ5HOzrjc2Mx_VTR_ALF zE6W@M+{!x)cMV1|TFr;$g{89;o4TTLwPlBIcp#xrLE~Ky_x}C41qkE+%phtZMWk;2 zd(15EAo~-^B^FY|2d9IQMu-NrYd02|IlYVBo=*CHWND$(78^$d6NQ_Jwx`nEz6aGj z8l&Bw!g(6&qn@@=l08ZgDT(+IPmXv0;D_r&_2H5zw#pD4Er%~Zma5y~!iAkrPR%N; z3I^{`p%xQjxR&JWxkCLeC7;+-q}SrGUOBEq_~eVI`TENQ04-@8$yc|`L+@0jA73&V zVA0iUSp78p-K%oAF(6Hl2LkA$h};O~A=CYd9ck(Bznn00%byhG{3$M$GrKtvULp9` zaiZ7&tw#fnqNogsN){agXit#KIX*mcT4byc^?}XRXnk5;%i)5Fe7xl#47Z;2rF7?> zXWK;rt5qlztHZw>l5l+%=FTEo49p24gfBQ5*DW2u;?2jziSEU{qY*aV+{vGvc7?xIue z^tKbZl9`#=-u@t8a%`+i8|3-BHmZsJIgT|7QS7uQd#{4Ex{0{bErPJfYi$&~ClINE zuUw1|xSn;_AfE<`%pDP>$@sgC_7`?l#k%YyBxCk!2^AgOgo;Sh4Tgie^1}N?1?tf- z-@^O{^Ruw~?Y=i?wA9rpg!bolYf{<(1`8sWXjRSp7X+HC@j`T(rx?Tdo;H9-~PlMlrYei*yw9Ik0RABF*R0Lz2{LohU&<#XIF( z<~pfa$olOg1z7kfHN3HVJ81FFRMO(111}BgBs~zSvp+Tx{Hm|cZ6=UL#ECSlX<)HN z(!?Zdc6awKov_3EJr`+dPN>HqA1xgn-9KRFAvLV7wdu(_=cIhJBAMsM-{mc{Pa>Su zybewgELu-TuEw%HY|6Q^R* zZU@`JI&d^KB_W?-IB#mjmoM#vlfuYJu!V%i)Ev-rvPDy{04qP*el{^BgiC7X09#r{ z`^Zvx_0Ec;q6%*at=QDzF21iz*Ou!+T1TyMp;r30=nJ9ykDp;Ifcxm@w;@newfiNW zl{3G)Gd=!#D2J!9^Us^oAXoQ6`)o>{H5CS#QLD-N$jzh~5zKTwY&;SDiOMvT;|%e^ zzH^PJKOH;0>m@B2ker8)@q)o6t-d@b^BC@Y$KBf2mW%-v_UQBdQBMo2D>(cv~CP%H`tn?AgbccLZ_!p7(aUgl}Rogm$7p)N#|^|EtRr@h6KmNU%#t1Vuc& zCCurn?BN|fZ%W0huqFkEF};;iBeyR!S(OsiBz_)#b&D!}TvEq8FJdhyf};F#63}A3 z&v2EGt`60bD*Nw&Y*B04JN2e&L3rCZW(h&qc@8vm)L!tml)E{VR07-JrCh@%PaC z7%D^jk~GH+jt3e3d~be+YGK>8mX)>UC3=(SXuGf&FL7S6bL`Y-BUy z^l_>90e{d%yen}moY?W6xWJe@$J~JdetgSPBbG1*14m&#q^QmSX}_(ca1w`u^@3Av zcURZ*6E|=uO%^cu3Pnt50g8}EiV4T<7iq{?Z@v5;dV**_D`w@17AsxCjai;`2ZUZq zmW*CX-*r7mjRW$R@^q}{ZXH^aPGoQ$u_sB)WuX3rGk{n0ne(gpfra_Zsdt~W$h%@f7Hh&bBq_l5){iU?H z@0o+$r{}2^naRpbSw0(m1T9a1(CDiej_ZS);P51V>7`0CJZrCUG&DoXZ@%*F)zVib zzZ&96x06l7{}5r1_4m^-#2z_A67}rL26qm=>X^uNb(F5ey*^9%1U%^!$fjX|PCpHn zMd~4L+VKIAZ)>a6q1LN^U3ScA(Mg;TBwEDXBI9b=C>6}Dx0&H&Y&Cj|_UQMpb7){0 z3Sx}@HHgScs@-z#S=U;C9*!5L+mgq8u`|C|pr);%L0JO_j`NzoY*OP=drEmU(1>F$iI?iL z*oFw^02U;{rzrO zyBJ`?xFGH}R>EYqN51A0m(t?->1;zbhLj0dM^}R6>~BHiF9YZDd6?67eP~?fSOrlH z{n*uNVFqQUkecNt{|MuiY|c}QHCPkPE@3@McRWsI!ltTe-QiJU!i{NetM|4motBX& zwAR`u%f(<3amEe}8`Z0yVXaQ>nMZpl*{CFe^XeQ^#&}wvvl4{#n&e@xeamw5l22yj(ciq!&^&KNvsctwZ7Wg**0fSCPT$| z9t8UNEyaPxTfaA$8MLM_Q@=%3-t0&YJ3IBo(433*%%!nBuXbhE$(##0`80_YZqEVf zg^m^?;JND|(H-j2TdMr(rKRncBWYoCuhZ?Dszz)iC!iUJnuHQ#c%I~dJ)RI9F;jLl zZRsp3$o^gAc2lwSa0p6&7-iC*+MQXfQBLn%NEdwsT{}vz9Hmi19!T1OXU`NxM0=~7($>{dH#O=GO`!YPhV{%k_8 z)fB(r?t~f}ThwWMCV$@|NyjZ=2@eA;DIZOWxQO#^oM{oSi|4DIyT2ULwya?1nvg=L zmv0F8Bk+r?Y%Mt~C)79Bl25G%SQ4!o&hWxm6!OsfpY;nkcKw!OK~k^Nr^(4$h9TvM z>d%4p-0vTaiKZ!uq#v4^w(rIQ5x^oDrW&b%uf zTm)kHEX99oj4qh1@7)}XYLS=qcwj9zCWwv z%WDbmu_@ou9a^Ac4M+P8HDmlCtN20BQ@LZq?b~|lmK(jec?c73fX99&{X1QovrRBF zKQdndW2F3r`B`80<9q%(be8YUxbVirlw>4?5x87-vI-Z47H`=`W7;P-X{;C+eB`u7 zL0Ie0^{JX*`93zkAM}Ah$d3Rj=(T($u`PxT<8qz5HfFtj@SD5?vW1pT28)&1TZ zMVX!`Anf?k%HZ_o-788cO(@g((hq8(;nrG5TkhGZj!zbI2i;{+ry~b_v zV>qW-{Djzru*m%U;^|Twj|r@>ua4BXaf>;0WUIjP5QXnWfB&ZGt*u~n;_8OCGSQl| z^+Annn|Y<=%0cnuiRyXbCQKWu2k&R6P#4qoOB+1?;K^mDm6kB~bbB;;qc*P?bHy3< zVxAw@_h8=d#I9w_-I^rzNF@wfMA_z$zUyC>?n>LPcC80en?1+0B-iqGO^5<&2GAsO6W=DFI&JPghJYjULzg z3BO6^Ey&IlkO|=vQtW*~Ci~8hegkn0Hmc-2Zq? zO`_|EJ;wsKR=DG~Liq$W?(tW)pAOuLPS&|_ZL!9wO}eRUHq;={|qWHme@#f zpV%*Zld$k>cjQ(a^Y;t;`FxB(hLcgb))xnFK*dA)qTl9;rJVXG&MoWzFTij<4oMft9v~w0 zaT5<7(y49<8=MebP-`Pr99d<$cRX6*`sL@i`d8&ezBqqWyjov|it2>$#I(nW1g)N+ z#;gKaiX-INKM;1=sXB{egT!wA-p3W{&{|7zT{BLRpTHphG%~oEg+Ue56-?|R?wOz$ zn#`k%JY8GcfBg_26I^U~A*69gcY&dT?i5^QweFkdiVlx7eJFl$hW-{im&P2JmoiCI zTc>!zpj=YYDdZnj5#m&tqTi9PE+*q{on zMPL7RPR~hV&8_Cp6lbg{#^NQtl{gsIXIk^SVz`*lTS^zr<@8PyaGk;qJ!A7qn>{OK zAE_m(KH&$9c+CNxOsisEL$=w`)eBTyA6b&?+WSfUmyQT)_>U9f6I`ZQUSYrfCQmGi zpwRFc%vl0#3XSnjqF0r=LR+o| zE885q6j239x|Jx__hz^py7HIEq4jZfBc^w8M=z^`{3OF-OIN~sY8f6XN>~tldl1ReNxhBZ%J^Vl!wGgAiLlL4FsFWfuB328EH(ZkJ zD)@x@4fgiDN}q`_9_!>iy@2H`re6@k9VK?MxwYLn~*MZox_p zQHjkY-AA;k_UIWQ=ypJVKfq3{Ezz+ZTJVHtDg{*fk(dRUPQ zACFiKv=tiqWANsQXz}h}?0DQtWK~)gd#}>62ZbCmZTse{0g!n9)mz_>;6AXr?M-L6 z5x?p&meNk-1UIox>vlI07@$sRT@cGR3q1PywuOR8F+j2)qPVG*5^eR&;C2vf*rv2~I8$|EeY0lv$AeAI3EV2r^-~o9XuyZL(nIhl$ zYjRol@ye$}rY_O&C$zM%IT+h32h~7MkgE-PhkFu5bg$xEj{KA!t2b;GbvG8Tk72#; zqJVUHS((FJ;Ab~q?anS@GvqK`^lrX|qXVk!WrfM)+v2l|M!ttmgP&8=m4n72f4?Rm zjm~em@psndFEOo{kFu;adc|kkCTiBxD$~u?n2UJ>R4R5%da$!?0gYe^FE2X5iu#HT zF?fD?V|AsA4E3fd^=7^2yt(yOo%4i;N@E?g?B1N$+^m2wu6Wg@bU6L3QGO6|?aUaS zQ;RX-oT$wFH6zMDD*WrMyuT{j^Xiw9@wx+He;qsaTeyMRe+YYIxhqNSbl634-Ib2~ zi+M`&qN_jPv@V-4Ix_>P=SEGWmy3ks*w%Npq6GfcfX94fVL9&%qPLcFwV25Qa5Kd3 zAT}{@T)SnI2%8r2=?D62e!qykRY(F12IOD_Y;1kJ>~Zl?CR}T8QXvFU#y}fYWWsH_1A)7YmrLkQL73LhW?=>WR!OL+hFzgl)ptYppqF|ilN@C_R*)&r z{`2nOP}{Knn!xxjyu?1qVu8GOuZS$uUx zfYyazTP+}S{w1dR{jL2gCR8+}G;J=%zHKEVe)L*$>8R?~3}Q97Ax~}T*zl+y4nqwY zDmExZ91pkC)>$gYqni8gGo+Vgm8%??c4-W0uL-zst%AD*?b>dc^lamuh-hcnJHYC8d$^)3LYj6H< zRBB58au&5~!rO0wO!DW`wLSC<{jSI^=1`T{&?}J<@~V?MOu)=xo<#*VeF~}3UrsNA z2kD`vJc+LPu5+yGGFH8=fP!eD)>WEWQIauvYOtS<)c<{~EGTP1#*?jVEm5XR+0MY% zD2BE^hWvd6fc`ct|L~ihZb$;wHs&rf8CkJUneFgU{tNX=k!df6>NZl7kt4(E)!)Zm zJk~V*r$?Ty!s@-LC54ocwyZ*|Roqhv*fm1EU7N-;%cPb?6RHCwews{AjwUU!L`2EW z-DFWwKpf!K*|H1z8u6b43Kj_A|I&naGYEfAsyVe_ch}OC>Uf5Zc4kP7MMV+j(S49d zIJcdy*pWanA~Mx)!9J@wx^P4oncuT~l#+|4Qj=;SN09XQlxHH;Ng4xrJGA(982@De z(f~{Sk5u<1;4HZym%HpCg*~yz;W#S#6cpzx=w74s9k)J8qu~Jriu(Bs3A&uqtRkAv z)rH>W39A%Mo{}6TEeyl*>NguK-0VN5rMd5@go_goPe#vVwA~tX5Sa?i%UYh|;(4%S z-K}wLm4k&C*?mJVrl!~XoRsPYVMZK0XQum&@;s!9Iwwi=^b(j=_nhaUPKGGn&dJ)? z6FwhPW;o{r{B@jO1W?t#M-COQdg&NYK4!qGx62-+ zB9(0$XmayaN`KtPOBM#FbFmwq*BxaCT!8X8$l&@mCRnTWqisE}7Cw+m^!am~-oC!N z<1z~>jzRL$5lSPbF}rg}SaZ31BdKUxcw!4NA|xC-F`W`EkmBY?fAi>3wNm=twF|-F znE9&I!$O&LoG3>xK`CwI-|iW>C(dmpS5LM2?VQDC8UmfqhX{Pg$eM?VVp9EZ*4wzE zki@k@w0{Tlr3PWUw3^mshr?}J@iqD|zeu~48B>>LPT%gq`ge7w&j=JiLN?Z5MFd@oSU>tKG;SYbm`-t6R?Efa+4OX`uD{D!`5@r{xSt$`>BllmDn!d$2NrhXYWc9{&U@jw^5e11z&Y0wNdI@ zBb?b;X%scsMO?K^w08f}+nZAJy`PlL5EN~kM==aaw`x-!1~A5O=@MbV{5po|A|u|^ zetYn@Z^(k3m-~k+hlk>*qI1!dINPiZp~<%2d;WoP#R2pyU|_w?O;5UawZ85OSupM0 zO%gvdArT&ksg#~FMB1}DW1N8I|2hLECH4(}c=LFk6jLM~M<$~wv=!B^-hLWX_HMKg zGP3>V&H}XI$hGe}#?G#_bOCTyn9#%SY^4rOkS;YDkwjSjKej$tv7d7zTL=&L{DM57 z5>J1hoAHrGG{vvNKs?=2l*tP7tV=8DLpJSd!cvq-B!O*`)D{r?Z-R_qw$^- z{?V2Ed4Fl1Acd3^zsVW(B8-y$zSAt0vyZp8LMwoK0c4PgC4{5u%3C(~iB>3&WCAZl zK>(fOFKN5CuaT?jQH0taRw+-3i}Qp0=#?0KYMhQqK*Xr2Vqmh&tk|c}!QK8oia8Ly zFwdlS!iXyd)OeGg~m*U@klY{-7a}TAoBFTLjnIxYNR)9 zyOj69iG-6>sWgI2GEBebE;1?q%#bfYBUrTm=Gmv>92L8pQ3ucTK*D zT#Hh@HsXa&z+tp0evLlCS6ol%rqdh#Dzu!dl!G0s$7vy7i`CZ{SaoRz+gs3K=e?Q5 zmGAUXS{s4tCn#n+H3ZU7=8_nKF0Y+&-5uyLI+RWnjvURndzqgIjKhzTaB~uOD!UBl zbAmmM?0hHQNQ#*2oJLbI1c%c^8PvIcK8`1o)eqpc_|NRoxrNY}t9$;8sw+Y4u(8*C zC}rxcSZl-~`kFOQg4Ga$3faH+7A8vQNeXLERz%4+O*!7d99MJ8C+`F$m^+Cid6gge zv<7CS2y3+E4laT2Z%=c_yp&2ybB{)*y}b3a-L)Gz9t!!xgG+vyMj3y~MV=M;Uu(W) z`djAT1{AXN|CtfCeYLo1>3z5JWCW5=-S#3hjVb!;O8Y(U`X zoddGh18}d&T~kstT=!PI@|6{qC{B|Juhl8uS0h7TWr@7DK;WG{%>4dK{HG(TF|-nE z-wOUD6gw@z*F15%l(UX_Zf!8Va@On2^@d+)C=7E1clYe*Lvoy=z~J=1b=R88D($PX zYGWP8j;lY(7}lH|{n!~k?Iniel)a_vkSr1>mOXtN97p zbthwR=(NjfxwEm(TB3B`Xb2qyD@ypDH~vr*BzQ>u!vhQH($q>RVkhow>z!sSe8Rsj z-pJy#JqIiCo;3wFtGAdb0g-EKYAbxZZvo_0ZqAuM82MM4(yIQowu&Nagi5&>$-80h zq4!Wve5H*}vciVGOc~j7(l{dHKCyb)qRz-CMICh;DQ6omx&(0vJaPsG3t}fzKJCpsR_mSjRwvoFOMs2nozdGlCJzgb>ZLNOu4Uxu z&X4Ji)e8k#2SF0h6N-YzYvDa=xKH<3WW}LiwFqeeAFT54&NrDU?T;1Yavk02XFI(r zbd7;wXS3g#$hGozT<89g_U7@gUJSA)C}NVzwwIl{HP(TGYgCCN2J{a2%uNp>;`dH) zUFShf9%H~UxKFxv7h3jS$=22u?qX?a36GvI*($i*ybnK_I2i*BFWT=q$Gv3ki3+Oj zAI;bI)^Crrx%P)ZTe8cHWYGktD{Y)ePwo3vVxP{=Vq!{U6^$%jP0=-}+J0GDWIVaS zx4d2{{@Q?%;b2SqWTZApC1y%&RL&=Dl46|KjN!82m!se-ZEbXrnM1dfKlgk&tiW`}jkLsk+5cwP8x7%{eAM9=m0;zCo=3)+F?2!q@?P*+dzGy0hY}20 z=gCi|l-^#kHVC^g4Q{u&a8dMLi{!5ovCMvgix+?s+IEB*?sb%BQqj?w8t1&3yFVS= z;FLlssJc{(@m7H|N;Xn@xIQ0wv^*@$G%Lc+!>9Jbq*q4}cjk52D}cELV+{SLLbQ%; z^Uq5WJd=@^uRmQRY_HgQv+eWFHrMO>v!UUrtO zSSp+yX?bZ?JNxFfrunK2nPDHL=3Bl}Ny)al&0Vc%FGu;st69F0H5&F>3SN31!Rw)QO6W}1l?$hFtQ0(_Pi()9P^p>niX zxGvv~zTG3D=>jyR&mi0K1K(ed@bG8R4@H&F;?p}Tm8FXQ?ZtXldtR}3T#DcFDnRF$w7UcE;IC)>74$qLzqr6QnFrd#GYmZ%$F$Rc zOC004wqWL@Um0!yk>CAB3-HMEJIu7O)oMQziV{N1;o*$&V()`|u>! z+8z`l435ojqegpETt8^hyD#{l*?iiJ;*RiXsKI^#H?u^f<Vuf!J9|OIA*2%AbJL>7RJ_WFQJDbK~XRX z35lv1;UEzApT9&?5-`Dd9|8|tjk-;i*~RJSj*<1);p-SmA6dl#tc}h@YTNOpdNVtP zg(`hp{h(3+he9Z~8Xu_LB3ne>+4lKegZIo$oIX<)y&SGqVKKdx}9yVs03jPg-G7*^2(d zq)PcxWDR(V|2aw$WzET8@B_1gMSI7|699cl($fdUtXXU@9~nlFDxGEt4AO7DYY5`I zf1Ph-iePOF*gM2{jrYZ?kpm{nz1vBmoPOf;PbEiG*8?Rb*O_!LsbLvMfkuy(@HtiB z+i8sOpIIHqQ=NPSNc3W>M=5AsMyWQxwBJYC%0U|;3kaK5P+(w_Gq3?(1@CfJTU%R$ zn5}a#=Hy=Y%#`Qjssm;pgqc%D>l0o*?L<=)<~nkIdI+cEv^#_8co2H&qBjwpu%8Do z>7k+rSNadmLzTtcye`Buc)A?uZw&$NkMMa4nbUdJ;8Hz8SoSFip zm+l(qIg04R2TrpMt*4+7=#!`eUQi|BMq_DKVf&Jx4^XF96ZMk7b|px$CqxN|amqGH zEx5h`?XTnc%xg?oWF#ddTEMRvPMzsFIi{eerE=}k;M7!|aqry}MJ;=EOa4jlah;xZ z6+YE#j+j8djA+Nk<}#RU;PC>LJD_Mhm9Y^|O&dpUbG3uAAsVX*&;cia7lt<)$<-G0{x(obvuZZaZ}7#~d&}jDSUhJFO@zEKtddgbH+(l;^M8 zl$NIU4*%YX6$oxhck&ip7|Siu1WsP)X+r%8;LBtc)GdY9j~bEM*eWChlT7RvR5z)_pC`*yBJ z^MPef@Q42NMY}>G#yoD{;Ym8~S5P<6QD|MQ(HZm5)#r-Q&?E znZxd<&Z=ik%i_c72vCeuNO{*^8wEC}&Z8-0OktA}zhOTF5AW*S2?m1{xR>9buOsRi z>+LwIT`jV!b%bI+Onx+|J3$2K|I%7jrN>PWvHONBQUz-$&HvNcTSvv+Jbj`>2_7U! zkVys&uE8M$_XL6k_XKx>ClK7-nc(iOgS!NGcO77WL57(fp6B>5r1$h=JvYSfgP;3C7g!t@tKESP)l)sB2S60iJ+~B>HWY2+|$!4r6 zW^VWQrKZa93Jr6!6^?Bzx#0i`D+@a(UP+Q*lh^)X@f@t>ptmYab8n?aY1@GIl&b^~nL8!|gm#Zis?=y|ag(?P*=yd+Rw3 z{pk$>@2uD$PK73=6$oAsX0p}63+R*%6!p5OqS9Ew+uq>hybXoeSpX{OxpZ$b52e-6 z@G($#dZ^_6|4vrE&s#QgW!e}Kb8hQ3nS%0^x@2LIAi?*u9bSEuYVS$#b4htnX?o=Y zTu@)qmy!vF6ABB4bhFzC)Eb#D33!ptZ1LzJ~akB2i_5DIkPF zrXEXy=34wWa1u@Mj`^0v#f&H~%ISmo!eR^qZ_@rWYQ<&>C9h@1sN$$9h1{=|^(0Jm#pBzUrwmI`RdgxiHnO#<)Q6!nIIJCY_39d0T6#a zF#zA9t|Q8eihx<>ATRaoaft^&q^GN^D8bfp>Z@Mdnl>9;c6NU5;2ZVvjP0}LDN46| z7SEro6hcLs=3l`GQFdDT$>YWQo^L#yxp~g8J0OG>w73m%dXd2h1QJMA9&V}7#SUzr zpFsC3Ku3CrhQ;HA0TVVboRe^>@y+8+?Bj;9-Pi$>@|JVCS=z{oK9os76)8;@d%w-* z^ET_h*ge!oGR6-4!!GLfL{|%4l;{@@?}mQJ^T{+MYtjuK8hcVp zUczLtvGt4a4!ZI5uFuME48q<>0A%OMSK7K0lq403pRq|MJ)AG&zh%?TVs8>?E!9gD z^@Vui4_r!gTbWJ*@^|udx>d=ZquiRP1lsFBu_s)}M`+L4W`wpo;$4NIyD;SGUofgr zNV3~*NT6Bn8Z%1MuVt-UsyaMh{comvw%{mlndqNFcO>8%3)g%!fscaK?zhrQiRUj2 zU-%Nq{opv)AYOmPQs#BBur1}T)}*5dCjO4tvPhA_m-@^Q%oJ0DKQOKSl!QDwNNL<( zDY1F2vS+;=h9`MepTIYt5eeRCbzIqXURks%hm~^)nCgMnFX@Mg58gVH7ht~jga$rT zU}%?=K^)^gWR!hy?H=^%+!EmxMR}dj%ASoWp)DE~$G%4WgCfKm8st?^-K4RC#lH;= zbf|`B`>2vUDEdkH@!VG0%3}^GERgr@ z0BQ_XJ-wCnuE0{the}XrIv4|B-HQ77(<>J+EaK|Q#WK*ql!fK0?ezC1pJm`f7qr=* zTdS;di*i2*XMN#-f11{SG|0L!kx2uOL&!f3Xbq3(Ez!|t4LQDv^sD(~bA6?8099~| z(e%2>dQ0=5E5Bc~e(iZ`0{w&TuJsVXT7g-10%f_OjYs$X9^G&wC6WOjmU(7*oYa(N zFmv2L!d5z!`a?Yr+s#SkoUlWscj5Cg53%lvXrd>hf=(0XcpvUbifGn=mwss}mxEEG zMR(3tj?5x5NE>@w+tKw%(10CUqCm#!pmP2+_5L@sk9*#-ibrR-^YUGh609He*T%y~ zyK&Sm!2a{|K3W977;UXyoa=%!}@zbn-1h63&+Sq`RfG^X#efGp3dPWxY>XT=QKBu6*ud66VqJ zw|HQ*)s@}}pL{m>pNM*wPr6}~Hgd{7+INe7U0GDYuNk=#N>ZxXSLq-rym2;Zu?bqI zrm`L8YQ3W%^H5ecWAB9CEg^U5LrOAnj~>Yd z8*0qJpB>g*ha;N}wvZVnbG7!ZJBaUYY)Y`$&}XV~CM#2yf$8Rm-Q))1 z&TCP$pRM=@n6sv{7pbXt_4M~9VcRAQ(2sKJb!O+{7!Hy6?7)A`vjYSeN1Fe%E0uuKj!D?bqYqcwU;PTgw$-7 zvfGYcK{Jj@#v1nOC(LkN6Ki<(g~L@5mk{4>4z}mcNLR$IYbnxmb4NI9%XL?T_shD1 zB9303Y+))Elm3g)6NIbRtl<_?&jeBTV^mvyA*UaMSSt9Co2syrhaG{TikPwc(_xg(*UTRV zUk!cj{Wp0#T8u>Sfb1sfjpU@XKm@KO>+Hu~*@Y2oKY6 z_;$p1IYySjeg^UuYxJ>xdp#qLX}z1y{jbXym9_<-hU^coiMao&)PNJ{%WYOK9haNw zzW?1+MUQRR#BCJ64BsN-{^U9R%g#{-0g*IUA|H@seWlA7-Pja0{>_TEX3P`Sm2*#He;Plxz#7s1u zy;9bpT%GMm;aJ9@8wuD32`aBd5ixHLmQoHcpUI0;OM8yzO=OCD2IBHeTZXESlv`@m zdp8v=n*Zc&XCOc{HduTfh3^g$u^OEW+sAqgPJjdDtxpL@L&}rqFzK>oqplnD+{eWj z6Y*Vdod~0kzM4BR%{E#Bje252K~}0a0Xzo@(yFnKt#7(koTE@*@%Id|0|7@mf`yQu zqr3i;H+6PuWySmIXuf2-eEFKaP(JYpbHT1coyN$K7wjhKW=VeUUf>_(&CS^;ss;8? zr`xwb1A7S6t#qVBQVF}CEH%#RHOt;DR>aaPBX&hmfmFoosUs*)p|1h82Lz5w94gs* zSZpi>y~Jg=2Rl1sr-L^&?z~oSQPsrmSM1s`pSDT92{?w62fnK8UL4Vgo%E{l953=Z zHzxt}E$qzRr4I3$6Z41bbFrY(1g@%D*ZHw#7pA7TX!eusSG+vxlCoaq(~apSBCR$i z?6j$HYDp~_eb|H1BQP;O2Pw~Lb9;aniv^?E=jQdQ+Fjam<4N;JSHDYbNlY=$ZIlJh zwv!L?GRv+ym`x205%~QbLpHy;0h7qm5LDT<*nLj}ZGM#jHaa{Tu1({l`0Nws2)%9o z;OJQG=O=C`zWi$YcGDNVtb#p`a~%KM3{$IY(NtKP8s3_~m7z^b<;5}hhsvu?&iGKHYk$<=v?6ec*lHM5yqTy~}?U6{~u0;Jk_X~Ycf4yaKC-AE~YT(TUm zL-2!tM$2?h8SS^YVT{@9UJW6rx&SI{+6^sx3V%y9^gKDg*eL5?3t|i)Tl>2v{gkL| ztuos)cCoHEaZut@-Y~xypPK5yBp!i_>+0n@)x%V`xKhT9X25m}gV-LwTuWfjkW((= zgez>>Xa44*;H|>Mb1e>V=CxDf$J(_mJIjbUGcpPr#Py~+tTVUtw%}|_R!8FE)Wf}J#oQlJ_uvslSV{JU7#iHOJN=7#RDMvZ09A(jfpfnHJ zO(V}6SJ;Y4H5=))69Wr)#inShDN=_*DB)qn#~FP=vJNP)%hF4^{o&J!+E4|0y|mc@0=-- z6?pOz-ClRK>Zhr%&4%!9sezo{jSlOnYE+X6!}3)7#(>3@X6aF#gHF{xJeurWquH^O zqQJmA5rd)0)xz0|AT*br6^Bje=BQ7>Wi2|NtdH^{;x|_W_Whu}HHmre*HH%+8Y63P z_BuPQD<=P1alXnaU4KCnFr%3a zKH?H_8C4ajOQ*40U>T~04ds=;IGHpmm`>OC?QQshtukjP`qOA@YBxatR@L^#?ozn} zV=~`gaM9^}><3*=ce!>A5Gt%Cm*X1|XVO})kGO97wwQkSL1K%X91>gY2yKc%&%ksv zZhU^+>=vI@QL~3$LODm-M|K!Nrv21{-Ju&aK6~I40gP5y-=sTJkO}s296#W7NYBSj ztl#4~1e#~=4kETM4Z-TE8xeGUd}`3q>@+c8$Nb7d1FJz zGu~gxc&xnRnfaVPid4-#>;6dn{fkqXMLU3I_86 zX4C*y?DpMPF-(C^i-_t@q1RHQdve^hiq?rFoF|!Q(Q>I&&mdvNlW}|73{bJ2YFjhsm<;wzdyqzNbZz>wZ~E&J{pOdgM61>?Tp=ao*4mX@A}Q6-rfXl z)I*Vu2b$xPMwtlzAziG22dfc9Kf}e(A6$PZGT8iII@6#4^*{6;_y4M^W0eb<0oFC) zf;GgJX_jo1opo@np<9%`AZI3GpY`Q%Q>m5~h~hM<&V3!Um9DpTAhH^xuj00rNF1DM zyR|tm7WSn=J+x}*h1qh%5`03gHQ3yAFiM)9RurGvG$^Rw=&xr~F+$3`MrKRI&^SXj z3>u@!zffi{p6C+?HeP)s&NVg|_m-NTR=J{@xM5Gmu(uPwNj>3+V7vYp+*IEa1GVnR&AZC){2shLwLwpXuM;> zmKqk-qarap?s!{oey>T`(q++p-E~vTMUAO{Q=NAkqV1kpO*>&!>uU}@5cvOTw5a3O zV9%t2Y3J^}tI{Hf$K*7@x!l%OD&Of@{&qBYEDs7#{OmY# zB?-m8ntpJHp)Eg_LagI$6w6OJhcs;UeaUc09#$k15aEcqxAFMySNr7fYZjuidN)rg z92U!w2>z>iHLoXfF_2>ia7|VyBPEC>f-Ha1(J?hPMgr@Dl0)^7i{0!1|0T6DQVO!; zT`Lit)j6dd+fdhx3-Uyw{oS{TjZJsImD4GvzT~m;eihsjJQ4wK4J!^I*rcZQSz`xY zwHbEY_bBGS=9g~%{tK#*f-@wVs2~>(Yq(IXC`XAmDovLz3bpV~P3o>6kH`<`PZ*+} zKf|fnyP7@c6)0Xjd__b8tC+Ys7{Cd1zT;O`Tk8>dq)xWBYc!>X^mN@kv?wumnyBe} zlFkNymsTJ;Ec={6nAGB0%sipkmxvOly+82jyA%s2cXDlv301P>sFyn(1Y^*V-lmBq zUr5qggN!nI^Y=#&+Xy-c%s+$X`wt2#Qdj5y%iDYQV*rA*!-N}y05BWcD9SIh&AFij zvH`|uaa6GO2KCG`?}kee%5JBhq{R=J9!teWfBUN&a$IK69TPDOx^C(;ov*zo)aZjb z|9O%hz6DGSVWnhrbbgyXJ}1|$Q=E4)qI145tQbGCQ_V6l9?GfuF+F&6P7%v)b=Sn( zge;byx&>75N44?3=3%Wq@cKskTpKUkB&Zvu2_Un~C6pveeXS$m`jPGRFS;wzJ*GAB zNNphcV9RM z0#+M`IL0UUSi8rn+t?_zV@pE+Oal)?S3K|+kh^&Ozm+WWPP4HnWaf#dFY+(<* zn-s5y+3Wb7Ey9l?xzQC^fV;m82~}bMwLSTiPn@(#ZvsVo#mYRRtSy9W+BAXOdpJ2i z{&vw-FQ*y_#b-=?+pFmW{Rg!j_DrcY6s7q?c_EO*xM&e-k-6heffsnncmi%MmD?MJ z@UsA^^Dm56CHdnz`S(qO25;?claC(xPl&YDYtdTO6_&u?{aOtGc7oySDa=JVy@2JPDsE7h!Jy+w8 zBUn2ILb-P7`fwwIUW5j{Am3qfBobZ|w7UK@c6XeOQi5w1V2mom*gU0VAyoa~VA->f z&7SSRe;Dp;IUi)2iFnk+)2HV}=?pNj9PNMJ1gjxs#8e0CnUH~^&weMe;N3%ar~?mh z!h2)yzP`(|*X58_%TSJSk!R zaMFaDS3UKKa{*uWeqiR$FGO`(?5+Z`+e9HPeaj@_Hek zFDhLAy2hzOS6pYd)U=m`x3Y*GyXv16SFea3T`qwU1nVaj=qzi0Cr7kj@o_i!LHI{* zm+=tPyu#~Z=Ju^BsQFS5nJQJ?`aS4T6M5j3i&e){ zpD=?7Yu(tJ4%a+&R5?BOCh0Lvb}pswla>f_tceU=ebY=A)#a51eRL6uE!Hfmj_;6C zs$Hps>}FnY<@fwH*_RZ-_A$?tGC&DL4E3eSFGWkkUcU#+5mN~KnY>$~_SEw>Yr$GA z7;!V6z5=8^b+9u@bH9s77dCg#8QJ#IxgQ;2;)Pi8?J~Tu-fCB6o6bqxQACwXHbI>P z$v1w%C)K+3>GMR&5+;!kQzEg;O|Is(P&8lEp;k?7<9?02ezGKpQnTr&mf2-ZQa)9| zu2KC&dPN^>lfxk=ozwC0U&z^3J3!IseUcvXK|%j-#Pxob&_PNgtI*n_xh2XTFFf)~ zNbv8x=gWwN=hU-v_sq;I{3NR5zY&&?kMb^**MgqK4r8@KLwB0m zb4i>sReMes3+*S)JoxNp7FPlRo*r;rCu@~H-@R8YcHb`3IgB&wVe-!sWmCjSTsh}& zvDdemS)_GGPNgLR1(6M05Vqla8CAC<>`RpbDUn#X$!U)vE<(ob42F|O9q-Y$PUS^w z855OymiKlCw2y(ea9u9`(*qFWgIXQ5pSaD44JxqfU~fW{Hti_A(&!C0 z_J_{Yz)jt<@Wh=I?hnpwy>{T>fhQ-Lm$P)FQ~R>zR8*;rbeEs;&REhr$!oRM%NYvz zvKC9R#ss1E6SHk@xG#~w1mE=OhHeF_QmJN zdWry(1@x)!+v08wA>i$j^bNlYKzmWeI^coD_SzShDR8)#^i3+jZKwfB>~q>U>g0W) zNX6qd8kSV%Llf0#rE2zB+v?z=UwarHy!Xj)rY=B?5Jb6*42erGzqNB-87taqtTxXZ zPU5gUDT&%C)#CTyCy01euG6686+~^6q`rBk%2J{^EOPouUh~F$qNByA{b8QyCFr;Rq3Rctl3N;|#o2gwLbm-w@&tAs`#6=4627b+ zE_ob=Wpat>`q?sCAS&wj7FZ6`t9utx8EM;Xc=QVQj$tYaw~;%!r3>GMPST8GS38?S!;+N5<@&Ez1ACHSHO+^9XwGsmb-cn+9TDG;^`iDmHZ6XLbp zFu+I$d4ttd-MafHz0M!K^i!t*&4Z?RaRPKSO%33|(j{!DdEvcFs<% z3}mnJ$$Re_ldJdi7MAxT8=%`xQht1DtmG7eg!C)*^`lBIuZkDg?e6UVB7D17(vgPL zUNpqgZU#qlNobAK<%=;K=J5+_oK8~(46AnsP?C`$Mjk@^+EizKBI34cS+7CiH{bp& z1!D^mkLjyfMd5^pM7$P>DstrHbZ{1m4jl{n($|vqGqx-EZ?g}^JndQtLr_3p8G7OA)nG>fp(8#-!wtYZbXN-5>C@=E^0h^_p=@{*;u-;RQCP- zxSB+jEMZO5n|H!4Dm2+FbGcw~TO2evE9paJRaCwUW_C~-(fW-FuB3ue${XV`3-vM< zAr7a@6fz8vGG~*+^^SGTdVCx@|UwfTIg}wP< zi^fPRz?)&w`4dsyH=RF$T=>CvqTDyi-dKr8@Dr<;j~^u>fRO%izvVIdah26ipcKkQ zOZ=`2gXB@Ird*dXjc}W>h|r6KYKf^yXy$Ly@)L0#m8DjJfTD~D=a;d9~Ez*4dEb^_w7OZuaSS<2ZBZ8K;l`lzYZsc2a`bp=70 zFv7M?1?*3AqR2%gKC!<~=~p{(A4+`mQxI?2_CY(K&34LYCh$(F??;j~dY>sro)mhC zdbp+!OiavJ6)VRWbOvecD>CMbQKt)61QFHT-k7zfQ4rP;Uhns&E~?NBZ4nO+^l3SNm5A?FmSE6`URpbu42f<%a7O$;Ccl5&cWUyNTdNV)oa zW^3#6!oT4KOXN!G1w)GWfMBdR{Ah!ccat;8i`^NS9GRH-vzo=l!tR}(N^dHwb=Gsw zHRQCiTqY6ercz@`;|LP-)K_yI69>JDp>^R^Xy za$ea+OA(M6(yeilvT3IrGoR(pn_3x~!8+^Nn4jddfLXmESzf#FO=)+1nRK5SVlU&f z$k3~V7dzB<&ghY;DJ(2BF*4g3ZNC_Mrn# zNXMQsTLr4<>?hla$BYIu@kd40O)XTt)K)qT^x_gNm1R|Vh~M)cg>!74sEPS)^U58v zsFg>`_T&A44V7rlPZQFjt=WkEt8fg4m;*8Oeh6$T4Aw1a)mJtlan~>1-YJ{Z<#@*l zcOlF4DbNHuQvQLqXxemCq`VM-amtE*l_BVcRX=4Bl-T$ga0R;|M5l(LS8#^?>#v+^ zs=oEopA~rC``c}4+8+iVxHo-eE+j2+D^q?}@e!>d^?&hID%}0Yu~}g+^H6W;fvhP0 zMboH1$pimY?I_&Z`ybVVih%h4?b>c;w@^C?a<`D%Ni?&RK5s_lzvn4v<*hfJVCHi( znSrBfA+Fr`<*O-Ya9_BsgaZA{Ba8fG;v%=Lb=WnSJr zv%63)>B{aNN#K5wJjU}&^3eK>3+x}CJn!fQo3O8PZPi%^3lBQmFdjzwS@vrS&@Pn( z9%Q?vHQLe7b{h#yA#^R%TOU<(KE6HVxw*NEIG2#%_@I zRrgwF#@Ee9DE&ANG1p&d$qs{h2m$#F*KbXr3;X2Paf(o5nm)|kduZzV_t>u@W3zA* ztNq+CIXOkA;)U~w+f-w737ZZKE<^@<5G{}(ObxVUd)K65yFu&a)kTm;7onnOdH!?V zue-pF1#3{mbEO!4yt2~`i@S}jT>cE@nHAGd%E&y^udzwN!%v2h?AT0yq2f4q0(pLr z8J--#K&Su-6jBDDhHJhXvb8xEiH-CIQ%Z(M7Zkd z-*@y`9Lq`ro4oJS`P{L|_o*xS2A>%B^*-&V^bfGb#6o882JcyU&ClS?*fMh5g~^ff z7OjqV&gu_n!be><0ypU4-}%(6^s4gaWF)!|>r95Va^E*#Q}Qql!rE@(lF%x-^4gr$ z3d=`pku_LDiQ zriKk2(8$Cv*4VVEbg}rxQs*CuT+7%tIYv2gaR2%w)I-{m6e9=8&_k5M;%R;UlzE6u zN0Le&<=BnuI2RfQsC2xvMQ@T0kfI66w3{>bzZ;%^djy-mBZxhvb0 zkeK>ZRjNY3#UVAC#MH*j>)uBjevz=sMhtU-k@N6>{i*?<#s?ZvkoJib?cuLDB!B57 zm{fkX079;3PlrzI=8cSLYOC1Ztez2?0XZSFGY;My7{5+bf+Y^fuCM3V=l!XA$fsAy zCjAU4g6s(@kEXR6=jgeYpxyx#-I#3tHHsVec7JpfwziIV{`z@yq{|hW8wcd6hjqm2-~!B7 zTs9n=U6PK|`s8`aYJ1>M8q0uK0Tn5yy&u$l05x-kSzXQMm6b1+RiqKz1gO*wYS4LH?#L^^z*`1CAqK-+)WBwE>ui~oi6hwADF zu;2`^(Mg^%W9Z6KZSM5^GMm-K)|R;xpZ{6zvuVP#msF#n`tX|JBqF`0?2oVST;iDUD}f0V+iiwj z;u4hRH~eYU*+EC0d`xT4IGwU*hA})?#e+udOOQ%w|vh^a=5wR7=Fwrt*Oo zeHlo&3u2U_Sn8fhL+83!Pxd)dj`U^^V?3*|XL~x~IKI_LH9H4af=1(JFd0JhSv4Q0 zEvf-(M!j3|NyqtDUO<_!BvSwyxo7fvRRI0$WS9}IN zTT|&RGU62m?qcp^8Z_ znG;<$ieAI$S9_I3S$XmC2dI)s2aZODZZ!`%T_#SUvQ>Oe4p|2hXWBQf=u;LJnDfM& zs}qN{&*m$O1AEhjg>{)P7lhGAc2u3WA*HO>pw93x_txl54;*qb#=`l!ebw}FlNp~> zP_6iCWey_Y(42;Oh}@$1qAx7XZab8B7aw`Cm$3k=cAZUTX0aX#2=PTFg;W(c6z{ya zc)gLoWoe1$vXX}!ZYzaY;E{z%7h$e`nXPrwf76o z!Ru#rDkb$Pv5C7be7@ve@Gxt%8NXeV_ zpBn~*pFc$>`xA>PYNsd|G0w)m!Baf7biAXjb_hmicWUXa+ zC)e*rCq`v-mg3qN!QKQwixP$f*HKaL9GJg=57z->5pMdH4D&-Q^w}%Gz4E*_;U?K@ z@}f6g^`_JLDaLk5{61)<~wKJIlOhv@;->3CV|7+%}7kWD|l`;r>r$UNyrFtz^zOP9F{t=S5PzOaiQ*EJUjG zT~8Yd=O;b_gTI{YlNYGS1OSN@4E{w>-Fb>HEIXx-55PaRmw=4rC?34-#p=N&nAZZ?PBW|=b8=v8y(`@exc*a|;So})nAsq~ATuDjyH2g|8j zE(ed{{_*{i$5+7ua=zkKm~*MIfr2L6{R!J;Rp=0=jS@TZOI_caNYcU>U1P<6_k=Lc v{|t`k%6>?0e4zh(+vDHg|DVki81j(=X65_8WUGY_KarJGk|_IV;QK!SN%p1p literal 0 HcmV?d00001 From 322b976a087535c1710f15d87684f70f7787e150 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Jun 2024 15:38:06 +0100 Subject: [PATCH 0877/1003] chore(deps): update dependencies ```s cargo update Updating crates.io index Locking 35 packages to latest compatible versions Updating backtrace v0.3.72 -> v0.3.73 Updating clap v4.5.6 -> v4.5.7 Updating clap_builder v4.5.6 -> v4.5.7 Adding displaydoc v0.2.4 Updating http-body-util v0.1.1 -> v0.1.2 Updating httparse v1.8.0 -> v1.9.3 Adding icu_collections v1.5.0 Adding icu_locid v1.5.0 Adding icu_locid_transform v1.5.0 Adding icu_locid_transform_data v1.5.0 Adding icu_normalizer v1.5.0 Adding icu_normalizer_data v1.5.0 Adding icu_properties v1.5.0 Adding icu_properties_data v1.5.0 Adding icu_provider v1.5.0 Adding icu_provider_macros v1.5.0 Updating idna v0.5.0 -> v1.0.0 Adding litemap v0.7.3 Updating memchr v2.7.2 -> v2.7.4 Updating object v0.35.0 -> v0.36.0 Updating redox_syscall v0.5.1 -> v0.5.2 Adding stable_deref_trait v1.2.0 Adding synstructure v0.13.1 Adding tinystr v0.7.6 Removing unicode-bidi v0.3.15 Removing unicode-normalization v0.1.23 Updating url v2.5.0 -> v2.5.1 Adding utf16_iter v1.0.5 Adding utf8_iter v1.0.4 Adding write16 v1.0.0 Adding writeable v0.5.5 Adding yoke v0.7.4 Adding yoke-derive v0.7.4 Adding zerofrom v0.1.4 Adding zerofrom-derive v0.1.4 Adding zerovec v0.10.2 Adding zerovec-derive v0.10.2 ``` --- Cargo.lock | 316 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 278 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 36508e261..1c10516b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -507,9 +507,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.72" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -821,9 +821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.6" +version = "4.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9689a29b593160de5bc4aacab7b5d54fb52231de70122626c178e6a368994c7" +checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f" dependencies = [ "clap_builder", "clap_derive", @@ -831,9 +831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.6" +version = "4.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5387378c84f6faa26890ebf9f0a92989f8873d4d380467bcd0d8d8620424df" +checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f" dependencies = [ "anstream", "anstyle", @@ -1148,6 +1148,17 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "downcast" version = "0.11.0" @@ -1676,12 +1687,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", - "futures-core", + "futures-util", "http", "http-body", "pin-project-lite", @@ -1689,9 +1700,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "d0e7a4dd27b9476dc40cb050d3632d3bba3a70ddbff012285f7f8559a1e7e545" [[package]] name = "httpdate" @@ -1779,6 +1790,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f8ac670d7422d7f76b32e17a5db556510825b29ec9154f235977c9caba61036" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -1787,12 +1916,14 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.5.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "4716a3a0933a1d01c2f72450e89596eb51dd34ef3c211ccd875acdf1f8fe47ed" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", + "smallvec", + "utf8_iter", ] [[package]] @@ -2061,6 +2192,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "local-ip-address" version = "0.6.1" @@ -2109,9 +2246,9 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "mime" @@ -2386,9 +2523,9 @@ dependencies = [ [[package]] name = "object" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" +checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434" dependencies = [ "memchr", ] @@ -2902,9 +3039,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" dependencies = [ "bitflags 2.5.0", ] @@ -3525,6 +3662,12 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -3593,6 +3736,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -3725,6 +3879,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -4172,27 +4336,12 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicode-bidi" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" - [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" -[[package]] -name = "unicode-normalization" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" -dependencies = [ - "tinyvec", -] - [[package]] name = "untrusted" version = "0.9.0" @@ -4201,15 +4350,27 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -4564,6 +4725,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wyz" version = "0.5.1" @@ -4579,6 +4752,30 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.34" @@ -4600,6 +4797,49 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", + "synstructure", +] + +[[package]] +name = "zerovec" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.66", +] + [[package]] name = "zstd" version = "0.13.1" From a88082aa40c99a1d59082b396210e3f2e39f68df Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Jun 2024 16:22:43 +0100 Subject: [PATCH 0878/1003] fix: [#893] enable color in logs --- src/bootstrap/logging.rs | 2 +- src/console/ci/e2e/logs_parser.rs | 57 +++++++++++++++++++++++++----- src/console/ci/e2e/runner.rs | 2 +- src/console/clients/checker/app.rs | 2 +- src/console/clients/udp/app.rs | 2 +- 5 files changed, 52 insertions(+), 13 deletions(-) diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 5194f06ea..14756565f 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -46,7 +46,7 @@ fn config_level_or_default(log_level: &Option) -> LevelFilter { } fn tracing_stdout_init(filter: LevelFilter, style: &TraceStyle) { - let builder = tracing_subscriber::fmt().with_max_level(filter).with_ansi(false); + let builder = tracing_subscriber::fmt().with_max_level(filter).with_ansi(true); let () = match style { TraceStyle::Default => builder.init(), diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 2a1876a11..ff4028f80 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -1,9 +1,16 @@ //! Utilities to parse Torrust Tracker logs. use serde::{Deserialize, Serialize}; -const UDP_TRACKER_PATTERN: &str = "UDP TRACKER: Started on: udp://"; -const HTTP_TRACKER_PATTERN: &str = "HTTP TRACKER: Started on: "; -const HEALTH_CHECK_PATTERN: &str = "HEALTH CHECK API: Started on: "; +const INFO_LOG_LEVEL: &str = "INFO"; + +const UDP_TRACKER_TARGET: &str = "UDP TRACKER"; +const UDP_TRACKER_SOCKET_ADDR_START_PATTERN: &str = "Started on: udp://"; + +const HTTP_TRACKER_TARGET: &str = "HTTP TRACKER"; +const HTTP_TRACKER_URL_START_PATTERN: &str = "Started on: "; + +const HEALTH_CHECK_TARGET: &str = "HEALTH CHECK API"; +const HEALTH_CHECK_URL_START_PATTERN: &str = "Started on: "; #[derive(Serialize, Deserialize, Debug, Default)] pub struct RunningServices { @@ -59,11 +66,11 @@ impl RunningServices { let mut health_checks: Vec = Vec::new(); for line in logs.lines() { - if let Some(address) = Self::extract_address_if_matches(line, UDP_TRACKER_PATTERN) { + if let Some(address) = Self::extract_udp_tracker_url(line) { udp_trackers.push(address); - } else if let Some(address) = Self::extract_address_if_matches(line, HTTP_TRACKER_PATTERN) { + } else if let Some(address) = Self::extract_http_tracker_url(line) { http_trackers.push(address); - } else if let Some(address) = Self::extract_address_if_matches(line, HEALTH_CHECK_PATTERN) { + } else if let Some(address) = Self::extract_health_check_api_url(line) { health_checks.push(format!("{address}/health_check")); } } @@ -75,9 +82,32 @@ impl RunningServices { } } - fn extract_address_if_matches(line: &str, pattern: &str) -> Option { - line.find(pattern) - .map(|start| Self::replace_wildcard_ip_with_localhost(line[start + pattern.len()..].trim())) + fn extract_udp_tracker_url(line: &str) -> Option { + if !line.contains(INFO_LOG_LEVEL) || !line.contains(UDP_TRACKER_TARGET) { + return None; + }; + + line.find(UDP_TRACKER_SOCKET_ADDR_START_PATTERN).map(|start| { + Self::replace_wildcard_ip_with_localhost(line[start + UDP_TRACKER_SOCKET_ADDR_START_PATTERN.len()..].trim()) + }) + } + + fn extract_http_tracker_url(line: &str) -> Option { + if !line.contains(INFO_LOG_LEVEL) || !line.contains(HTTP_TRACKER_TARGET) { + return None; + }; + + line.find(HTTP_TRACKER_URL_START_PATTERN) + .map(|start| Self::replace_wildcard_ip_with_localhost(line[start + HTTP_TRACKER_URL_START_PATTERN.len()..].trim())) + } + + fn extract_health_check_api_url(line: &str) -> Option { + if !line.contains(INFO_LOG_LEVEL) || !line.contains(HEALTH_CHECK_TARGET) { + return None; + }; + + line.find(HEALTH_CHECK_URL_START_PATTERN) + .map(|start| Self::replace_wildcard_ip_with_localhost(line[start + HEALTH_CHECK_URL_START_PATTERN.len()..].trim())) } fn replace_wildcard_ip_with_localhost(address: &str) -> String { @@ -127,6 +157,15 @@ mod tests { assert_eq!(running_services.health_checks, vec!["http://127.0.0.1:1313/health_check"]); } + #[test] + fn it_should_support_colored_output() { + let logs = "\x1b[2m2024-06-14T14:40:13.028824Z\x1b[0m \x1b[33mINFO\x1b[0m \x1b[2mUDP TRACKER\x1b[0m: \x1b[37mStarted on: udp://0.0.0.0:6969\x1b[0m"; + + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6969"]); + } + #[test] fn it_should_ignore_logs_with_no_matching_lines() { let logs = "[Other Service][INFO] Started on: 0.0.0.0:7070"; diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index a80b65ce2..a3d61894e 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -116,7 +116,7 @@ pub fn run() -> anyhow::Result<()> { } fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).with_ansi(false).init(); + tracing_subscriber::fmt().with_max_level(filter).init(); info!("Logging initialized."); } diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs index ade1d4820..84802688d 100644 --- a/src/console/clients/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -58,7 +58,7 @@ pub async fn run() -> Result> { } fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).with_ansi(false).init(); + tracing_subscriber::fmt().with_max_level(filter).init(); info!("logging initialized."); } diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index 323fca1b6..c780157f4 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -127,7 +127,7 @@ pub async fn run() -> anyhow::Result<()> { } fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).with_ansi(false).init(); + tracing_subscriber::fmt().with_max_level(filter).init(); info!("logging initialized."); } From a293373bb271d543bf0f03ef0ebb3807148b55d7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Jun 2024 16:36:14 +0100 Subject: [PATCH 0879/1003] chore(deps): add cargo dependency regex To parse cargo logs. --- Cargo.lock | 1 + Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1c10516b2..7a54063bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4067,6 +4067,7 @@ dependencies = [ "r2d2_mysql", "r2d2_sqlite", "rand", + "regex", "reqwest", "ringbuf", "serde", diff --git a/Cargo.toml b/Cargo.toml index 418bcb3ed..8b58154ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ r2d2 = "0" r2d2_mysql = "24" r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" +regex = "1.10.5" reqwest = { version = "0", features = ["json"] } ringbuf = "0" serde = { version = "1", features = ["derive"] } From eb928bcd041ba9942b777d150bf86b95b07829c8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Jun 2024 16:44:32 +0100 Subject: [PATCH 0880/1003] fix: [#893] enable color for logs It was disabled becuase parsing lgos to extract the services URLs was not working due to hidden color chars. This changes the parser to ignore color chars. --- src/console/ci/e2e/logs_parser.rs | 77 ++++++++++++++----------------- 1 file changed, 34 insertions(+), 43 deletions(-) diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index ff4028f80..a4024f29d 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -1,16 +1,11 @@ //! Utilities to parse Torrust Tracker logs. +use regex::Regex; use serde::{Deserialize, Serialize}; const INFO_LOG_LEVEL: &str = "INFO"; - -const UDP_TRACKER_TARGET: &str = "UDP TRACKER"; -const UDP_TRACKER_SOCKET_ADDR_START_PATTERN: &str = "Started on: udp://"; - -const HTTP_TRACKER_TARGET: &str = "HTTP TRACKER"; -const HTTP_TRACKER_URL_START_PATTERN: &str = "Started on: "; - -const HEALTH_CHECK_TARGET: &str = "HEALTH CHECK API"; -const HEALTH_CHECK_URL_START_PATTERN: &str = "Started on: "; +const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; +const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; +const HEALTH_CHECK_API_LOG_TARGET: &str = "HEALTH CHECK API"; #[derive(Serialize, Deserialize, Debug, Default)] pub struct RunningServices { @@ -59,19 +54,43 @@ impl RunningServices { /// /// NOTICE: Using colors in the console output could affect this method /// due to the hidden control chars. + /// + /// # Panics + /// + /// Will panic is the regular expression to parse the services can't be compiled. #[must_use] pub fn parse_from_logs(logs: &str) -> Self { let mut udp_trackers: Vec = Vec::new(); let mut http_trackers: Vec = Vec::new(); let mut health_checks: Vec = Vec::new(); + let udp_re = Regex::new(r"Started on: udp://([0-9.]+:[0-9]+)").unwrap(); + let http_re = Regex::new(r"Started on: (https?://[0-9.]+:[0-9]+)").unwrap(); // DevSkim: ignore DS137138 + let health_re = Regex::new(r"Started on: (https?://[0-9.]+:[0-9]+)").unwrap(); // DevSkim: ignore DS137138 + let ansi_escape_re = Regex::new(r"\x1b\[[0-9;]*m").unwrap(); + for line in logs.lines() { - if let Some(address) = Self::extract_udp_tracker_url(line) { - udp_trackers.push(address); - } else if let Some(address) = Self::extract_http_tracker_url(line) { - http_trackers.push(address); - } else if let Some(address) = Self::extract_health_check_api_url(line) { - health_checks.push(format!("{address}/health_check")); + let clean_line = ansi_escape_re.replace_all(line, ""); + + if !line.contains(INFO_LOG_LEVEL) { + continue; + }; + + if line.contains(UDP_TRACKER_LOG_TARGET) { + if let Some(captures) = udp_re.captures(&clean_line) { + let address = Self::replace_wildcard_ip_with_localhost(&captures[1]); + udp_trackers.push(address); + } + } else if line.contains(HTTP_TRACKER_LOG_TARGET) { + if let Some(captures) = http_re.captures(&clean_line) { + let address = Self::replace_wildcard_ip_with_localhost(&captures[1]); + http_trackers.push(address); + } + } else if line.contains(HEALTH_CHECK_API_LOG_TARGET) { + if let Some(captures) = health_re.captures(&clean_line) { + let address = format!("{}/health_check", Self::replace_wildcard_ip_with_localhost(&captures[1])); + health_checks.push(address); + } } } @@ -82,34 +101,6 @@ impl RunningServices { } } - fn extract_udp_tracker_url(line: &str) -> Option { - if !line.contains(INFO_LOG_LEVEL) || !line.contains(UDP_TRACKER_TARGET) { - return None; - }; - - line.find(UDP_TRACKER_SOCKET_ADDR_START_PATTERN).map(|start| { - Self::replace_wildcard_ip_with_localhost(line[start + UDP_TRACKER_SOCKET_ADDR_START_PATTERN.len()..].trim()) - }) - } - - fn extract_http_tracker_url(line: &str) -> Option { - if !line.contains(INFO_LOG_LEVEL) || !line.contains(HTTP_TRACKER_TARGET) { - return None; - }; - - line.find(HTTP_TRACKER_URL_START_PATTERN) - .map(|start| Self::replace_wildcard_ip_with_localhost(line[start + HTTP_TRACKER_URL_START_PATTERN.len()..].trim())) - } - - fn extract_health_check_api_url(line: &str) -> Option { - if !line.contains(INFO_LOG_LEVEL) || !line.contains(HEALTH_CHECK_TARGET) { - return None; - }; - - line.find(HEALTH_CHECK_URL_START_PATTERN) - .map(|start| Self::replace_wildcard_ip_with_localhost(line[start + HEALTH_CHECK_URL_START_PATTERN.len()..].trim())) - } - fn replace_wildcard_ip_with_localhost(address: &str) -> String { address.replace("0.0.0.0", "127.0.0.1") } From 3c715fbbf7fd4d1e934361064972ae676e94ee95 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Jun 2024 10:24:25 +0100 Subject: [PATCH 0881/1003] fix: [#898] docker build error: failed to load bitcode of module criterion Command: ``` docker build --target release --tag torrust-tracker:release --file Containerfile . ``` Error: ```s => ERROR [build 3/3] RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive- 56.8s ------ > [build 3/3] RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-tracker.tar.zst --release: 0.674 Compiling torrust-tracker-located-error v3.0.0-alpha.12-develop (/build/src/packages/located-error) 0.675 Compiling torrust-tracker-primitives v3.0.0-alpha.12-develop (/build/src/packages/primitives) 0.679 Compiling torrust-tracker-contrib-bencode v3.0.0-alpha.12-develop (/build/src/contrib/bencode) 0.763 Compiling torrust-tracker-configuration v3.0.0-alpha.12-develop (/build/src/packages/configuration) 0.763 Compiling torrust-tracker-clock v3.0.0-alpha.12-develop (/build/src/packages/clock) 0.936 Compiling torrust-tracker-torrent-repository v3.0.0-alpha.12-develop (/build/src/packages/torrent-repository) 0.936 Compiling torrust-tracker-test-helpers v3.0.0-alpha.12-develop (/build/src/packages/test-helpers) 1.181 Compiling torrust-tracker v3.0.0-alpha.12-develop (/build/src) 1.891 warning: Invalid value (Producer: 'LLVM18.1.7-rust-1.79.0-stable' Reader: 'LLVM 18.1.7-rust-1.79.0-stable') 1.891 1.891 error: failed to load bitcode of module "criterion-af9a3f7183f1573d.criterion.b69900c842eb33fa-cgu.08.rcgu.o": 1.891 1.991 warning: `torrust-tracker-contrib-bencode` (bench "bencode_benchmark") generated 1 warning 1.991 error: could not compile `torrust-tracker-contrib-bencode` (bench "bencode_benchmark") due to 1 previous error; 1 warning emitted 1.991 warning: build failed, waiting for other jobs to finish... 3.936 warning: `torrust-tracker-torrent-repository` (bench "repository_benchmark") generated 1 warning (1 duplicate) 3.936 error: could not compile `torrust-tracker-torrent-repository` (bench "repository_benchmark") due to 1 previous error; 1 warning emitted 56.80 error: command `/usr/local/rustup/toolchains/1.79.0-x86_64-unknown-linux-gnu/bin/cargo test --no-run --message-format json-render-diagnostics --workspace --examples --tests --benches --all-targets --all-features --release` exited with code 101 ------ Containerfile:61 -------------------- 59 | WORKDIR /build/src 60 | COPY . /build/src 61 | >>> RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-tracker.tar.zst --release 62 | 63 | -------------------- ERROR: failed to solve: process "/bin/sh -c cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-tracker.tar.zst --release" did not complete successfully: exit code: 101 ``` - Docker: version 25.0.2, build 29cf629 - Rust: nightly-x86_64-unknown-linux-gnu (default). rustc 1.81.0-nightly (d7f6ebace 2024-06-16) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 8b58154ec..072a21a7e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,7 +104,7 @@ members = [ [profile.dev] debug = 1 -lto = "thin" +lto = "fat" opt-level = 1 [profile.release] From ef9461a5b98d59f081935185074245da483d3f2b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Jun 2024 09:48:17 +0100 Subject: [PATCH 0882/1003] feat!: [#878] extract logging and core section in toml config files --- Containerfile | 4 +-- compose.yaml | 2 +- docs/benchmarking.md | 2 ++ docs/containers.md | 2 +- packages/configuration/src/v1/core.rs | 19 ++++++------- packages/configuration/src/v1/logging.rs | 27 +++++++++++++++++++ packages/configuration/src/v1/mod.rs | 21 +++++++++++++-- packages/test-helpers/src/configuration.rs | 2 +- share/container/entry_script_sh | 10 +++---- .../config/tracker.container.mysql.toml | 1 + .../config/tracker.container.sqlite3.toml | 1 + .../config/tracker.e2e.container.sqlite3.toml | 1 + .../config/tracker.udp.benchmarking.toml | 3 +++ src/bootstrap/logging.rs | 2 +- src/core/mod.rs | 3 +++ src/lib.rs | 5 +++- 16 files changed, 80 insertions(+), 25 deletions(-) create mode 100644 packages/configuration/src/v1/logging.rs diff --git a/Containerfile b/Containerfile index 79fae692f..cdd70e337 100644 --- a/Containerfile +++ b/Containerfile @@ -96,7 +96,7 @@ RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/ COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec ARG TORRUST_TRACKER_CONFIG_TOML_PATH="/etc/torrust/tracker/tracker.toml" -ARG TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER="Sqlite3" +ARG TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER="Sqlite3" ARG USER_ID=1000 ARG UDP_PORT=6969 ARG HTTP_PORT=7070 @@ -104,7 +104,7 @@ ARG API_PORT=1212 ARG HEALTH_CHECK_API_PORT=1313 ENV TORRUST_TRACKER_CONFIG_TOML_PATH=${TORRUST_TRACKER_CONFIG_TOML_PATH} -ENV TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER} +ENV TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER} ENV USER_ID=${USER_ID} ENV UDP_PORT=${UDP_PORT} ENV HTTP_PORT=${HTTP_PORT} diff --git a/compose.yaml b/compose.yaml index 1d425c743..a02302a26 100644 --- a/compose.yaml +++ b/compose.yaml @@ -4,7 +4,7 @@ services: image: torrust-tracker:release tty: true environment: - - TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER:-MySQL} + - TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER:-MySQL} - TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=${TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN:-MyAccessToken} networks: - server_side diff --git a/docs/benchmarking.md b/docs/benchmarking.md index 1758e0de4..67b680fdc 100644 --- a/docs/benchmarking.md +++ b/docs/benchmarking.md @@ -29,6 +29,7 @@ cargo build --release -p aquatic_udp_load_test Run the tracker with UDP service enabled and other services disabled and set log level to `error`. ```toml +[logging] log_level = "error" [[udp_trackers]] @@ -163,6 +164,7 @@ Announce responses per info hash: Run the tracker with UDP service enabled and other services disabled and set log level to `error`. ```toml +[logging] log_level = "error" [[udp_trackers]] diff --git a/docs/containers.md b/docs/containers.md index a0ba59d4b..ff15cd7cc 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -149,7 +149,7 @@ The following environmental variables can be set: - `TORRUST_TRACKER_CONFIG_TOML_PATH` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). - `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN` - Override of the admin token. If set, this value overrides any value set in the config. -- `TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER` - The database type used for the container, (options: `Sqlite3`, `MySQL`, default `Sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER` - The database type used for the container, (options: `Sqlite3`, `MySQL`, default `Sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. - `TORRUST_TRACKER_CONFIG_TOML` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG_TOML=$(cat tracker-tracker.toml)`). - `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). - `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index 5d00c67ab..ae66f54fa 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -3,15 +3,11 @@ use std::net::{IpAddr, Ipv4Addr}; use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; -use crate::{AnnouncePolicy, LogLevel}; +use crate::AnnouncePolicy; #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct Core { - /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, - /// `Debug` and `Trace`. Default is `Info`. - #[serde(default = "Core::default_log_level")] - pub log_level: Option, /// Tracker mode. See [`TrackerMode`] for more information. #[serde(default = "Core::default_mode")] pub mode: TrackerMode, @@ -20,6 +16,7 @@ pub struct Core { /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. #[serde(default = "Core::default_db_driver")] pub db_driver: DatabaseDriver, + /// Database connection string. The format depends on the database driver. /// For `Sqlite3`, the format is `path/to/database.db`, for example: /// `./storage/tracker/lib/database/sqlite3.db`. @@ -35,11 +32,13 @@ pub struct Core { /// See [`AnnouncePolicy::interval_min`] #[serde(default = "AnnouncePolicy::default_interval_min")] pub min_announce_interval: u32, + /// Weather the tracker is behind a reverse proxy or not. /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header /// sent from the proxy will be used to get the client's IP address. #[serde(default = "Core::default_on_reverse_proxy")] pub on_reverse_proxy: bool, + /// The external IP address of the tracker. If the client is using a /// loopback IP address, this IP address will be used instead. If the peer /// is using a loopback IP address, the tracker assumes that the peer is @@ -47,6 +46,7 @@ pub struct Core { /// address instead. #[serde(default = "Core::default_external_ip")] pub external_ip: Option, + /// Weather the tracker should collect statistics about tracker usage. /// If enabled, the tracker will collect statistics like the number of /// connections handled, the number of announce requests handled, etc. @@ -54,6 +54,7 @@ pub struct Core { /// information about the collected metrics. #[serde(default = "Core::default_tracker_usage_statistics")] pub tracker_usage_statistics: bool, + /// If enabled the tracker will persist the number of completed downloads. /// That's how many times a torrent has been downloaded completely. #[serde(default = "Core::default_persistent_torrent_completed_stat")] @@ -65,10 +66,12 @@ pub struct Core { /// time, it will be removed from the torrent peer list. #[serde(default = "Core::default_max_peer_timeout")] pub max_peer_timeout: u32, + /// Interval in seconds that the cleanup job will run to remove inactive /// peers from the torrent peer list. #[serde(default = "Core::default_inactive_peer_cleanup_interval")] pub inactive_peer_cleanup_interval: u64, + /// If enabled, the tracker will remove torrents that have no peers. /// The clean up torrent job runs every `inactive_peer_cleanup_interval` /// seconds and it removes inactive peers. Eventually, the peer list of a @@ -83,7 +86,6 @@ impl Default for Core { let announce_policy = AnnouncePolicy::default(); Self { - log_level: Self::default_log_level(), mode: Self::default_mode(), db_driver: Self::default_db_driver(), db_path: Self::default_db_path(), @@ -101,11 +103,6 @@ impl Default for Core { } impl Core { - #[allow(clippy::unnecessary_wraps)] - fn default_log_level() -> Option { - Some(LogLevel::Info) - } - fn default_mode() -> TrackerMode { TrackerMode::Public } diff --git a/packages/configuration/src/v1/logging.rs b/packages/configuration/src/v1/logging.rs new file mode 100644 index 000000000..c85564a05 --- /dev/null +++ b/packages/configuration/src/v1/logging.rs @@ -0,0 +1,27 @@ +use serde::{Deserialize, Serialize}; + +use crate::LogLevel; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Logging { + /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, + /// `Debug` and `Trace`. Default is `Info`. + #[serde(default = "Logging::default_log_level")] + pub log_level: Option, +} + +impl Default for Logging { + fn default() -> Self { + Self { + log_level: Self::default_log_level(), + } + } +} + +impl Logging { + #[allow(clippy::unnecessary_wraps)] + fn default_log_level() -> Option { + Some(LogLevel::Info) + } +} diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 8d45270b8..809970506 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -193,7 +193,10 @@ //! The default configuration is: //! //! ```toml +//! [logging] //! log_level = "info" +//! +//! [core] //! mode = "public" //! db_driver = "Sqlite3" //! db_path = "./storage/tracker/lib/database/sqlite3.db" @@ -233,6 +236,7 @@ pub mod core; pub mod health_check_api; pub mod http_tracker; +pub mod logging; pub mod tracker_api; pub mod udp_tracker; @@ -241,6 +245,7 @@ use std::net::IpAddr; use figment::providers::{Env, Format, Serialized, Toml}; use figment::Figment; +use logging::Logging; use serde::{Deserialize, Serialize}; use self::core::Core; @@ -258,19 +263,25 @@ const CONFIG_OVERRIDE_SEPARATOR: &str = "__"; /// Core configuration for the tracker. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { + /// Logging configuration + pub logging: Logging, + /// Core configuration. - #[serde(flatten)] pub core: Core, + /// The list of UDP trackers the tracker is running. Each UDP tracker /// represents a UDP server that the tracker is running and it has its own /// configuration. pub udp_trackers: Vec, + /// The list of HTTP trackers the tracker is running. Each HTTP tracker /// represents a HTTP server that the tracker is running and it has its own /// configuration. pub http_trackers: Vec, + /// The HTTP API configuration. pub http_api: HttpApi, + /// The Health Check API configuration. pub health_check_api: HealthCheckApi, } @@ -278,6 +289,7 @@ pub struct Configuration { impl Default for Configuration { fn default() -> Self { Self { + logging: Logging::default(), core: Core::default(), udp_trackers: vec![UdpTracker::default()], http_trackers: vec![HttpTracker::default()], @@ -365,7 +377,10 @@ mod tests { #[cfg(test)] fn default_config_toml() -> String { - let config = r#"log_level = "info" + let config = r#"[logging] + log_level = "info" + + [core] mode = "public" db_driver = "Sqlite3" db_path = "./storage/tracker/lib/database/sqlite3.db" @@ -475,6 +490,7 @@ mod tests { fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { figment::Jail::expect_with(|_jail| { let config_toml = r#" + [core] db_path = "OVERWRITTEN DEFAULT DB PATH" "# .to_string(); @@ -498,6 +514,7 @@ mod tests { jail.create_file( "tracker.toml", r#" + [core] db_path = "OVERWRITTEN DEFAULT DB PATH" "#, )?; diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 15ecd5280..c35d0a851 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -29,7 +29,7 @@ pub fn ephemeral() -> Configuration { let mut config = Configuration::default(); - config.core.log_level = Some(LogLevel::Off); // Change to `debug` for tests debugging + config.logging.log_level = Some(LogLevel::Off); // Change to `debug` for tests debugging // Ephemeral socket address for API let api_port = 0u16; diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh index 8c704ea67..51df717c6 100644 --- a/share/container/entry_script_sh +++ b/share/container/entry_script_sh @@ -26,8 +26,8 @@ chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust # Install the database and config: -if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER" ]; then - if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER" "Sqlite3"; then +if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER" ]; then + if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER" "Sqlite3"; then # Select Sqlite3 empty database default_database="/usr/share/torrust/default/database/tracker.sqlite3.db" @@ -35,7 +35,7 @@ if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER" ]; then # Select Sqlite3 default configuration default_config="/usr/share/torrust/default/config/tracker.container.sqlite3.toml" - elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER" "MySQL"; then + elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER" "MySQL"; then # (no database file needed for MySQL) @@ -43,12 +43,12 @@ if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER" ]; then default_config="/usr/share/torrust/default/config/tracker.container.mysql.toml" else - echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER\"." + echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER\"." echo "Please Note: Supported Database Types: \"Sqlite3\", \"MySQL\"." exit 1 fi else - echo "Error: \"\$TORRUST_TRACKER_CONFIG_OVERRIDE_DB_DRIVER\" was not set!"; exit 1; + echo "Error: \"\$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER\" was not set!"; exit 1; fi install_config="/etc/torrust/tracker/tracker.toml" diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 7678327ab..617450562 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -1,3 +1,4 @@ +[core] db_driver = "MySQL" db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index da8259286..01ca655c3 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,3 +1,4 @@ +[core] db_path = "/var/lib/torrust/tracker/database/sqlite3.db" [[http_trackers]] diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index 767b56116..60d7a798a 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -1,3 +1,4 @@ +[core] db_path = "/var/lib/torrust/tracker/database/sqlite3.db" [[udp_trackers]] diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index 00f62628b..cd193c40a 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -1,4 +1,7 @@ +[logging] log_level = "error" + +[core] remove_peerless_torrents = false tracker_usage_statistics = false diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 14756565f..f6868602d 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -20,7 +20,7 @@ static INIT: Once = Once::new(); /// It redirects the log info to the standard output with the log level defined in the configuration pub fn setup(cfg: &Configuration) { - let tracing_level = config_level_or_default(&cfg.core.log_level); + let tracing_level = config_level_or_default(&cfg.logging.log_level); if tracing_level == LevelFilter::OFF { return; diff --git a/src/core/mod.rs b/src/core/mod.rs index 6af28199f..1b60ad6f9 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -312,7 +312,10 @@ //! You can control the behavior of this module with the module settings: //! //! ```toml +//! [logging] //! log_level = "debug" +//! +//! [core] //! mode = "public" //! db_driver = "Sqlite3" //! db_path = "./storage/tracker/lib/database/sqlite3.db" diff --git a/src/lib.rs b/src/lib.rs index 39d0b5b3d..2ed88a68b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -167,12 +167,15 @@ //! The default configuration is: //! //! ```toml +//! [logging] +//! log_level = "info" +//! +//! [core] //! announce_interval = 120 //! db_driver = "Sqlite3" //! db_path = "./storage/tracker/lib/database/sqlite3.db" //! external_ip = "0.0.0.0" //! inactive_peer_cleanup_interval = 600 -//! log_level = "info" //! max_peer_timeout = 900 //! min_announce_interval = 120 //! mode = "public" From 77dd938f807cbf7d2e423f2ee584ca7b75287f10 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Jun 2024 16:08:23 +0100 Subject: [PATCH 0883/1003] feat!: [#878] make log_level config value mandatory Althought, it has a default value `info` so you can omit it in the TOML config file. --- packages/configuration/src/v1/logging.rs | 7 +++---- packages/test-helpers/src/configuration.rs | 2 +- src/bootstrap/logging.rs | 19 ++++++++----------- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/packages/configuration/src/v1/logging.rs b/packages/configuration/src/v1/logging.rs index c85564a05..e33522db4 100644 --- a/packages/configuration/src/v1/logging.rs +++ b/packages/configuration/src/v1/logging.rs @@ -8,7 +8,7 @@ pub struct Logging { /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, /// `Debug` and `Trace`. Default is `Info`. #[serde(default = "Logging::default_log_level")] - pub log_level: Option, + pub log_level: LogLevel, } impl Default for Logging { @@ -20,8 +20,7 @@ impl Default for Logging { } impl Logging { - #[allow(clippy::unnecessary_wraps)] - fn default_log_level() -> Option { - Some(LogLevel::Info) + fn default_log_level() -> LogLevel { + LogLevel::Info } } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index c35d0a851..f70bebcf7 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -29,7 +29,7 @@ pub fn ephemeral() -> Configuration { let mut config = Configuration::default(); - config.logging.log_level = Some(LogLevel::Off); // Change to `debug` for tests debugging + config.logging.log_level = LogLevel::Off; // Change to `debug` for tests debugging // Ephemeral socket address for API let api_port = 0u16; diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index f6868602d..649495dc7 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -20,7 +20,7 @@ static INIT: Once = Once::new(); /// It redirects the log info to the standard output with the log level defined in the configuration pub fn setup(cfg: &Configuration) { - let tracing_level = config_level_or_default(&cfg.logging.log_level); + let tracing_level = map_to_tracing_level_filter(&cfg.logging.log_level); if tracing_level == LevelFilter::OFF { return; @@ -31,17 +31,14 @@ pub fn setup(cfg: &Configuration) { }); } -fn config_level_or_default(log_level: &Option) -> LevelFilter { +fn map_to_tracing_level_filter(log_level: &LogLevel) -> LevelFilter { match log_level { - None => LevelFilter::INFO, - Some(level) => match level { - LogLevel::Off => LevelFilter::OFF, - LogLevel::Error => LevelFilter::ERROR, - LogLevel::Warn => LevelFilter::WARN, - LogLevel::Info => LevelFilter::INFO, - LogLevel::Debug => LevelFilter::DEBUG, - LogLevel::Trace => LevelFilter::TRACE, - }, + LogLevel::Off => LevelFilter::OFF, + LogLevel::Error => LevelFilter::ERROR, + LogLevel::Warn => LevelFilter::WARN, + LogLevel::Info => LevelFilter::INFO, + LogLevel::Debug => LevelFilter::DEBUG, + LogLevel::Trace => LevelFilter::TRACE, } } From 2f94f6caa5673b898d7873b4570f4c87a26be15f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Jun 2024 16:39:01 +0100 Subject: [PATCH 0884/1003] feat!: [#878] extract database section in core config section --- Containerfile | 4 +- compose.yaml | 2 +- docs/containers.md | 7 ++-- packages/configuration/src/v1/core.rs | 29 ++++---------- packages/configuration/src/v1/database.rs | 38 +++++++++++++++++++ packages/configuration/src/v1/mod.rs | 25 +++++++----- packages/test-helpers/src/configuration.rs | 2 +- share/container/entry_script_sh | 10 ++--- .../config/tracker.container.mysql.toml | 6 +-- .../config/tracker.container.sqlite3.toml | 4 +- .../config/tracker.e2e.container.sqlite3.toml | 4 +- src/console/ci/e2e/logs_parser.rs | 18 +-------- src/core/mod.rs | 8 ++-- src/lib.rs | 6 ++- 14 files changed, 92 insertions(+), 71 deletions(-) create mode 100644 packages/configuration/src/v1/database.rs diff --git a/Containerfile b/Containerfile index cdd70e337..d55d2f300 100644 --- a/Containerfile +++ b/Containerfile @@ -96,7 +96,7 @@ RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/ COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec ARG TORRUST_TRACKER_CONFIG_TOML_PATH="/etc/torrust/tracker/tracker.toml" -ARG TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER="Sqlite3" +ARG TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER="Sqlite3" ARG USER_ID=1000 ARG UDP_PORT=6969 ARG HTTP_PORT=7070 @@ -104,7 +104,7 @@ ARG API_PORT=1212 ARG HEALTH_CHECK_API_PORT=1313 ENV TORRUST_TRACKER_CONFIG_TOML_PATH=${TORRUST_TRACKER_CONFIG_TOML_PATH} -ENV TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER} +ENV TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER} ENV USER_ID=${USER_ID} ENV UDP_PORT=${UDP_PORT} ENV HTTP_PORT=${HTTP_PORT} diff --git a/compose.yaml b/compose.yaml index a02302a26..cab5c6d5e 100644 --- a/compose.yaml +++ b/compose.yaml @@ -4,7 +4,7 @@ services: image: torrust-tracker:release tty: true environment: - - TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER:-MySQL} + - TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER:-MySQL} - TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=${TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN:-MyAccessToken} networks: - server_side diff --git a/docs/containers.md b/docs/containers.md index ff15cd7cc..1a1ea2f0d 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -149,7 +149,7 @@ The following environmental variables can be set: - `TORRUST_TRACKER_CONFIG_TOML_PATH` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). - `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN` - Override of the admin token. If set, this value overrides any value set in the config. -- `TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER` - The database type used for the container, (options: `Sqlite3`, `MySQL`, default `Sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER` - The database type used for the container, (options: `Sqlite3`, `MySQL`, default `Sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. - `TORRUST_TRACKER_CONFIG_TOML` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG_TOML=$(cat tracker-tracker.toml)`). - `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). - `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). @@ -243,8 +243,9 @@ podman run -it \ The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you should verify the `/etc/torrust/tracker/tracker.toml` (i.e `./storage/tracker/etc/tracker.toml`) configuration: ```toml -db_driver = "MySQL" -db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +[core.database] +driver = "MySQL" +path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" ``` ### Build and Run: diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index ae66f54fa..17ac36ee0 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -1,8 +1,9 @@ use std::net::{IpAddr, Ipv4Addr}; use serde::{Deserialize, Serialize}; -use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +use torrust_tracker_primitives::TrackerMode; +use crate::v1::database::Database; use crate::AnnouncePolicy; #[allow(clippy::struct_excessive_bools)] @@ -12,18 +13,9 @@ pub struct Core { #[serde(default = "Core::default_mode")] pub mode: TrackerMode, - // Database configuration - /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. - #[serde(default = "Core::default_db_driver")] - pub db_driver: DatabaseDriver, - - /// Database connection string. The format depends on the database driver. - /// For `Sqlite3`, the format is `path/to/database.db`, for example: - /// `./storage/tracker/lib/database/sqlite3.db`. - /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for - /// example: `root:password@localhost:3306/torrust`. - #[serde(default = "Core::default_db_path")] - pub db_path: String, + // Database configuration. + #[serde(default = "Core::default_database")] + pub database: Database, /// See [`AnnouncePolicy::interval`] #[serde(default = "AnnouncePolicy::default_interval")] @@ -87,8 +79,7 @@ impl Default for Core { Self { mode: Self::default_mode(), - db_driver: Self::default_db_driver(), - db_path: Self::default_db_path(), + database: Self::default_database(), announce_interval: announce_policy.interval, min_announce_interval: announce_policy.interval_min, max_peer_timeout: Self::default_max_peer_timeout(), @@ -107,12 +98,8 @@ impl Core { TrackerMode::Public } - fn default_db_driver() -> DatabaseDriver { - DatabaseDriver::Sqlite3 - } - - fn default_db_path() -> String { - String::from("./storage/tracker/lib/database/sqlite3.db") + fn default_database() -> Database { + Database::default() } fn default_on_reverse_proxy() -> bool { diff --git a/packages/configuration/src/v1/database.rs b/packages/configuration/src/v1/database.rs new file mode 100644 index 000000000..b029175ce --- /dev/null +++ b/packages/configuration/src/v1/database.rs @@ -0,0 +1,38 @@ +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::DatabaseDriver; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Database { + // Database configuration + /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. + #[serde(default = "Database::default_driver")] + pub driver: DatabaseDriver, + + /// Database connection string. The format depends on the database driver. + /// For `Sqlite3`, the format is `path/to/database.db`, for example: + /// `./storage/tracker/lib/database/sqlite3.db`. + /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for + /// example: `root:password@localhost:3306/torrust`. + #[serde(default = "Database::default_path")] + pub path: String, +} + +impl Default for Database { + fn default() -> Self { + Self { + driver: Self::default_driver(), + path: Self::default_path(), + } + } +} + +impl Database { + fn default_driver() -> DatabaseDriver { + DatabaseDriver::Sqlite3 + } + + fn default_path() -> String { + String::from("./storage/tracker/lib/database/sqlite3.db") + } +} diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 809970506..5b3cad3ea 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -198,8 +198,6 @@ //! //! [core] //! mode = "public" -//! db_driver = "Sqlite3" -//! db_path = "./storage/tracker/lib/database/sqlite3.db" //! announce_interval = 120 //! min_announce_interval = 120 //! on_reverse_proxy = false @@ -210,6 +208,10 @@ //! inactive_peer_cleanup_interval = 600 //! remove_peerless_torrents = true //! +//! [core.database] +//! driver = "Sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! //! [[udp_trackers]] //! enabled = false //! bind_address = "0.0.0.0:6969" @@ -234,6 +236,7 @@ //! bind_address = "127.0.0.1:1313" //!``` pub mod core; +pub mod database; pub mod health_check_api; pub mod http_tracker; pub mod logging; @@ -382,8 +385,6 @@ mod tests { [core] mode = "public" - db_driver = "Sqlite3" - db_path = "./storage/tracker/lib/database/sqlite3.db" announce_interval = 120 min_announce_interval = 120 on_reverse_proxy = false @@ -394,6 +395,10 @@ mod tests { inactive_peer_cleanup_interval = 600 remove_peerless_torrents = true + [core.database] + driver = "Sqlite3" + path = "./storage/tracker/lib/database/sqlite3.db" + [[udp_trackers]] enabled = false bind_address = "0.0.0.0:6969" @@ -490,8 +495,8 @@ mod tests { fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { figment::Jail::expect_with(|_jail| { let config_toml = r#" - [core] - db_path = "OVERWRITTEN DEFAULT DB PATH" + [core.database] + path = "OVERWRITTEN DEFAULT DB PATH" "# .to_string(); @@ -502,7 +507,7 @@ mod tests { let configuration = Configuration::load(&info).expect("Could not load configuration from file"); - assert_eq!(configuration.core.db_path, "OVERWRITTEN DEFAULT DB PATH".to_string()); + assert_eq!(configuration.core.database.path, "OVERWRITTEN DEFAULT DB PATH".to_string()); Ok(()) }); @@ -514,8 +519,8 @@ mod tests { jail.create_file( "tracker.toml", r#" - [core] - db_path = "OVERWRITTEN DEFAULT DB PATH" + [core.database] + path = "OVERWRITTEN DEFAULT DB PATH" "#, )?; @@ -526,7 +531,7 @@ mod tests { let configuration = Configuration::load(&info).expect("Could not load configuration from file"); - assert_eq!(configuration.core.db_path, "OVERWRITTEN DEFAULT DB PATH".to_string()); + assert_eq!(configuration.core.database.path, "OVERWRITTEN DEFAULT DB PATH".to_string()); Ok(()) }); diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index f70bebcf7..fe05407d9 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -54,7 +54,7 @@ pub fn ephemeral() -> Configuration { let temp_directory = env::temp_dir(); let random_db_id = random::string(16); let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); - temp_file.to_str().unwrap().clone_into(&mut config.core.db_path); + temp_file.to_str().unwrap().clone_into(&mut config.core.database.path); config } diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh index 51df717c6..0668114fd 100644 --- a/share/container/entry_script_sh +++ b/share/container/entry_script_sh @@ -26,8 +26,8 @@ chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust # Install the database and config: -if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER" ]; then - if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER" "Sqlite3"; then +if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" ]; then + if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "Sqlite3"; then # Select Sqlite3 empty database default_database="/usr/share/torrust/default/database/tracker.sqlite3.db" @@ -35,7 +35,7 @@ if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER" ]; then # Select Sqlite3 default configuration default_config="/usr/share/torrust/default/config/tracker.container.sqlite3.toml" - elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER" "MySQL"; then + elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "MySQL"; then # (no database file needed for MySQL) @@ -43,12 +43,12 @@ if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER" ]; then default_config="/usr/share/torrust/default/config/tracker.container.mysql.toml" else - echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER\"." + echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER\"." echo "Please Note: Supported Database Types: \"Sqlite3\", \"MySQL\"." exit 1 fi else - echo "Error: \"\$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DB_DRIVER\" was not set!"; exit 1; + echo "Error: \"\$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER\" was not set!"; exit 1; fi install_config="/etc/torrust/tracker/tracker.toml" diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 617450562..75cc57b64 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -1,6 +1,6 @@ -[core] -db_driver = "MySQL" -db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +[core.database] +driver = "MySQL" +path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" [[http_trackers]] ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 01ca655c3..433e36127 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,5 +1,5 @@ -[core] -db_path = "/var/lib/torrust/tracker/database/sqlite3.db" +[core.database] +path = "/var/lib/torrust/tracker/database/sqlite3.db" [[http_trackers]] ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index 60d7a798a..b8adedefb 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -1,5 +1,5 @@ -[core] -db_path = "/var/lib/torrust/tracker/database/sqlite3.db" +[core.database] +path = "/var/lib/torrust/tracker/database/sqlite3.db" [[udp_trackers]] enabled = true diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index a4024f29d..4886786de 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -112,21 +112,7 @@ mod tests { #[test] fn it_should_parse_from_logs_with_valid_logs() { - let logs = r#" - Loading configuration from environment variable db_path = "/var/lib/torrust/tracker/database/sqlite3.db" - - [[udp_trackers]] - enabled = true - - [[http_trackers]] - enabled = true - ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" - ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - - [http_api] - ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" - ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - + let logs = r" Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: logging initialized. 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 @@ -139,7 +125,7 @@ mod tests { 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 - "#; + "; let running_services = RunningServices::parse_from_logs(logs); diff --git a/src/core/mod.rs b/src/core/mod.rs index 1b60ad6f9..c5171ab58 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -317,8 +317,6 @@ //! //! [core] //! mode = "public" -//! db_driver = "Sqlite3" -//! db_path = "./storage/tracker/lib/database/sqlite3.db" //! announce_interval = 120 //! min_announce_interval = 120 //! max_peer_timeout = 900 @@ -328,6 +326,10 @@ //! persistent_torrent_completed_stat = true //! inactive_peer_cleanup_interval = 600 //! remove_peerless_torrents = false +//! +//! [core.database] +//! driver = "Sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" //! ``` //! //! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. @@ -548,7 +550,7 @@ impl Tracker { stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = Arc::new(databases::driver::build(&config.db_driver, &config.db_path)?); + let database = Arc::new(databases::driver::build(&config.database.driver, &config.database.path)?); let mode = config.mode.clone(); diff --git a/src/lib.rs b/src/lib.rs index 2ed88a68b..b94da4717 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -172,8 +172,6 @@ //! //! [core] //! announce_interval = 120 -//! db_driver = "Sqlite3" -//! db_path = "./storage/tracker/lib/database/sqlite3.db" //! external_ip = "0.0.0.0" //! inactive_peer_cleanup_interval = 600 //! max_peer_timeout = 900 @@ -184,6 +182,10 @@ //! remove_peerless_torrents = true //! tracker_usage_statistics = true //! +//! [core.database] +//! driver = "Sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! //! [[udp_trackers]] //! bind_address = "0.0.0.0:6969" //! enabled = false From edc706cc146035d586e86741da7b1df1db4bd08d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Jun 2024 16:59:42 +0100 Subject: [PATCH 0885/1003] feat!: [#878] extract net section in core config section --- packages/configuration/src/v1/core.rs | 55 ++++++++-------------- packages/configuration/src/v1/mod.rs | 34 ++++++++----- packages/configuration/src/v1/network.rs | 41 ++++++++++++++++ packages/test-helpers/src/configuration.rs | 6 +-- src/core/mod.rs | 16 ++++--- src/lib.rs | 6 ++- src/servers/http/v1/services/announce.rs | 2 +- src/servers/udp/handlers.rs | 2 +- 8 files changed, 99 insertions(+), 63 deletions(-) create mode 100644 packages/configuration/src/v1/network.rs diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index 17ac36ee0..5d8ab0b33 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -1,8 +1,7 @@ -use std::net::{IpAddr, Ipv4Addr}; - use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::TrackerMode; +use super::network::Network; use crate::v1::database::Database; use crate::AnnouncePolicy; @@ -13,10 +12,6 @@ pub struct Core { #[serde(default = "Core::default_mode")] pub mode: TrackerMode, - // Database configuration. - #[serde(default = "Core::default_database")] - pub database: Database, - /// See [`AnnouncePolicy::interval`] #[serde(default = "AnnouncePolicy::default_interval")] pub announce_interval: u32, @@ -25,20 +20,6 @@ pub struct Core { #[serde(default = "AnnouncePolicy::default_interval_min")] pub min_announce_interval: u32, - /// Weather the tracker is behind a reverse proxy or not. - /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header - /// sent from the proxy will be used to get the client's IP address. - #[serde(default = "Core::default_on_reverse_proxy")] - pub on_reverse_proxy: bool, - - /// The external IP address of the tracker. If the client is using a - /// loopback IP address, this IP address will be used instead. If the peer - /// is using a loopback IP address, the tracker assumes that the peer is - /// in the same network as the tracker and will use the tracker's IP - /// address instead. - #[serde(default = "Core::default_external_ip")] - pub external_ip: Option, - /// Weather the tracker should collect statistics about tracker usage. /// If enabled, the tracker will collect statistics like the number of /// connections handled, the number of announce requests handled, etc. @@ -71,6 +52,14 @@ pub struct Core { /// enabled. #[serde(default = "Core::default_remove_peerless_torrents")] pub remove_peerless_torrents: bool, + + // Database configuration. + #[serde(default = "Core::default_database")] + pub database: Database, + + // Network configuration. + #[serde(default = "Core::default_network")] + pub net: Network, } impl Default for Core { @@ -79,16 +68,15 @@ impl Default for Core { Self { mode: Self::default_mode(), - database: Self::default_database(), announce_interval: announce_policy.interval, min_announce_interval: announce_policy.interval_min, max_peer_timeout: Self::default_max_peer_timeout(), - on_reverse_proxy: Self::default_on_reverse_proxy(), - external_ip: Self::default_external_ip(), tracker_usage_statistics: Self::default_tracker_usage_statistics(), persistent_torrent_completed_stat: Self::default_persistent_torrent_completed_stat(), inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), remove_peerless_torrents: Self::default_remove_peerless_torrents(), + database: Self::default_database(), + net: Self::default_network(), } } } @@ -98,19 +86,6 @@ impl Core { TrackerMode::Public } - fn default_database() -> Database { - Database::default() - } - - fn default_on_reverse_proxy() -> bool { - false - } - - #[allow(clippy::unnecessary_wraps)] - fn default_external_ip() -> Option { - Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) - } - fn default_tracker_usage_statistics() -> bool { true } @@ -130,4 +105,12 @@ impl Core { fn default_remove_peerless_torrents() -> bool { true } + + fn default_database() -> Database { + Database::default() + } + + fn default_network() -> Network { + Network::default() + } } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 5b3cad3ea..d96e1335c 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -200,17 +200,19 @@ //! mode = "public" //! announce_interval = 120 //! min_announce_interval = 120 -//! on_reverse_proxy = false -//! external_ip = "0.0.0.0" //! tracker_usage_statistics = true //! persistent_torrent_completed_stat = false //! max_peer_timeout = 900 //! inactive_peer_cleanup_interval = 600 //! remove_peerless_torrents = true //! -//! [core.database] -//! driver = "Sqlite3" -//! path = "./storage/tracker/lib/database/sqlite3.db" +//! [core.database] +//! driver = "Sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] +//! external_ip = "0.0.0.0" +//! on_reverse_proxy = false //! //! [[udp_trackers]] //! enabled = false @@ -240,6 +242,7 @@ pub mod database; pub mod health_check_api; pub mod http_tracker; pub mod logging; +pub mod network; pub mod tracker_api; pub mod udp_tracker; @@ -307,7 +310,7 @@ impl Configuration { /// and `None` otherwise. #[must_use] pub fn get_ext_ip(&self) -> Option { - self.core.external_ip.as_ref().map(|external_ip| *external_ip) + self.core.net.external_ip.as_ref().map(|external_ip| *external_ip) } /// Saves the default configuration at the given path. @@ -387,18 +390,20 @@ mod tests { mode = "public" announce_interval = 120 min_announce_interval = 120 - on_reverse_proxy = false - external_ip = "0.0.0.0" tracker_usage_statistics = true persistent_torrent_completed_stat = false max_peer_timeout = 900 inactive_peer_cleanup_interval = 600 remove_peerless_torrents = true - [core.database] - driver = "Sqlite3" - path = "./storage/tracker/lib/database/sqlite3.db" - + [core.database] + driver = "Sqlite3" + path = "./storage/tracker/lib/database/sqlite3.db" + + [core.net] + external_ip = "0.0.0.0" + on_reverse_proxy = false + [[udp_trackers]] enabled = false bind_address = "0.0.0.0:6969" @@ -443,7 +448,10 @@ mod tests { fn configuration_should_contain_the_external_ip() { let configuration = Configuration::default(); - assert_eq!(configuration.core.external_ip, Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)))); + assert_eq!( + configuration.core.net.external_ip, + Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + ); } #[test] diff --git a/packages/configuration/src/v1/network.rs b/packages/configuration/src/v1/network.rs new file mode 100644 index 000000000..8e53d419c --- /dev/null +++ b/packages/configuration/src/v1/network.rs @@ -0,0 +1,41 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use serde::{Deserialize, Serialize}; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Network { + /// The external IP address of the tracker. If the client is using a + /// loopback IP address, this IP address will be used instead. If the peer + /// is using a loopback IP address, the tracker assumes that the peer is + /// in the same network as the tracker and will use the tracker's IP + /// address instead. + #[serde(default = "Network::default_external_ip")] + pub external_ip: Option, + + /// Weather the tracker is behind a reverse proxy or not. + /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header + /// sent from the proxy will be used to get the client's IP address. + #[serde(default = "Network::default_on_reverse_proxy")] + pub on_reverse_proxy: bool, +} + +impl Default for Network { + fn default() -> Self { + Self { + external_ip: Self::default_external_ip(), + on_reverse_proxy: Self::default_on_reverse_proxy(), + } + } +} + +impl Network { + #[allow(clippy::unnecessary_wraps)] + fn default_external_ip() -> Option { + Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + } + + fn default_on_reverse_proxy() -> bool { + false + } +} diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index fe05407d9..9c6c0fe11 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -64,7 +64,7 @@ pub fn ephemeral() -> Configuration { pub fn ephemeral_with_reverse_proxy() -> Configuration { let mut cfg = ephemeral(); - cfg.core.on_reverse_proxy = true; + cfg.core.net.on_reverse_proxy = true; cfg } @@ -74,7 +74,7 @@ pub fn ephemeral_with_reverse_proxy() -> Configuration { pub fn ephemeral_without_reverse_proxy() -> Configuration { let mut cfg = ephemeral(); - cfg.core.on_reverse_proxy = false; + cfg.core.net.on_reverse_proxy = false; cfg } @@ -124,7 +124,7 @@ pub fn ephemeral_mode_private_whitelisted() -> Configuration { pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { let mut cfg = ephemeral(); - cfg.core.external_ip = Some(ip); + cfg.core.net.external_ip = Some(ip); cfg } diff --git a/src/core/mod.rs b/src/core/mod.rs index c5171ab58..f4eb2c335 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -320,16 +320,18 @@ //! announce_interval = 120 //! min_announce_interval = 120 //! max_peer_timeout = 900 -//! on_reverse_proxy = false -//! external_ip = "2.137.87.41" //! tracker_usage_statistics = true //! persistent_torrent_completed_stat = true //! inactive_peer_cleanup_interval = 600 //! remove_peerless_torrents = false //! -//! [core.database] -//! driver = "Sqlite3" -//! path = "./storage/tracker/lib/database/sqlite3.db" +//! [core.database] +//! driver = "Sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] +//! on_reverse_proxy = false +//! external_ip = "2.137.87.41" //! ``` //! //! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. @@ -564,13 +566,13 @@ impl Tracker { stats_event_sender, stats_repository, database, - external_ip: config.external_ip, + external_ip: config.net.external_ip, policy: TrackerPolicy::new( config.remove_peerless_torrents, config.max_peer_timeout, config.persistent_torrent_completed_stat, ), - on_reverse_proxy: config.on_reverse_proxy, + on_reverse_proxy: config.net.on_reverse_proxy, }) } diff --git a/src/lib.rs b/src/lib.rs index b94da4717..bf9257123 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -172,12 +172,10 @@ //! //! [core] //! announce_interval = 120 -//! external_ip = "0.0.0.0" //! inactive_peer_cleanup_interval = 600 //! max_peer_timeout = 900 //! min_announce_interval = 120 //! mode = "public" -//! on_reverse_proxy = false //! persistent_torrent_completed_stat = false //! remove_peerless_torrents = true //! tracker_usage_statistics = true @@ -186,6 +184,10 @@ //! driver = "Sqlite3" //! path = "./storage/tracker/lib/database/sqlite3.db" //! +//! [core.net] +//! external_ip = "0.0.0.0" +//! on_reverse_proxy = false +//! //! [[udp_trackers]] //! bind_address = "0.0.0.0:6969" //! enabled = false diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 253140fbc..eee5e4688 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -151,7 +151,7 @@ mod tests { fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { let mut configuration = configuration::ephemeral(); - configuration.core.external_ip = Some(IpAddr::V6(Ipv6Addr::new( + configuration.core.net.external_ip = Some(IpAddr::V6(Ipv6Addr::new( 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 858d6606c..36825f084 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -425,7 +425,7 @@ mod tests { } pub fn with_external_ip(mut self, external_ip: &str) -> Self { - self.configuration.core.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); + self.configuration.core.net.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); self } From fc046e0441302c3af8db3a1b1173d38e4383369e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Jun 2024 17:21:03 +0100 Subject: [PATCH 0886/1003] feat!: [#878] extract announce_policy section in core config section --- packages/configuration/src/lib.rs | 4 +++- packages/configuration/src/v1/core.rs | 21 +++++++++------------ packages/configuration/src/v1/mod.rs | 12 ++++++++---- src/core/mod.rs | 14 ++++++++------ src/lib.rs | 18 ++++++++++-------- 5 files changed, 38 insertions(+), 31 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 46ece96ab..3b719f742 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -86,7 +86,7 @@ impl Info { } /// Announce policy -#[derive(PartialEq, Eq, Debug, Clone, Copy, Constructor)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy, Constructor)] pub struct AnnouncePolicy { /// Interval in seconds that the client should wait between sending regular /// announce requests to the tracker. @@ -99,6 +99,7 @@ pub struct AnnouncePolicy { /// client's initial request. It serves as a guideline for clients to know /// how often they should contact the tracker for updates on the peer list, /// while ensuring that the tracker is not overwhelmed with requests. + #[serde(default = "AnnouncePolicy::default_interval")] pub interval: u32, /// Minimum announce interval. Clients must not reannounce more frequently @@ -112,6 +113,7 @@ pub struct AnnouncePolicy { /// value to prevent sending too many requests in a short period, which /// could lead to excessive load on the tracker or even getting banned by /// the tracker for not adhering to the rules. + #[serde(default = "AnnouncePolicy::default_interval_min")] pub interval_min: u32, } diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index 5d8ab0b33..266da21ed 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -12,14 +12,6 @@ pub struct Core { #[serde(default = "Core::default_mode")] pub mode: TrackerMode, - /// See [`AnnouncePolicy::interval`] - #[serde(default = "AnnouncePolicy::default_interval")] - pub announce_interval: u32, - - /// See [`AnnouncePolicy::interval_min`] - #[serde(default = "AnnouncePolicy::default_interval_min")] - pub min_announce_interval: u32, - /// Weather the tracker should collect statistics about tracker usage. /// If enabled, the tracker will collect statistics like the number of /// connections handled, the number of announce requests handled, etc. @@ -53,6 +45,10 @@ pub struct Core { #[serde(default = "Core::default_remove_peerless_torrents")] pub remove_peerless_torrents: bool, + // Announce policy configuration. + #[serde(default = "Core::default_announce_policy")] + pub announce_policy: AnnouncePolicy, + // Database configuration. #[serde(default = "Core::default_database")] pub database: Database, @@ -64,17 +60,14 @@ pub struct Core { impl Default for Core { fn default() -> Self { - let announce_policy = AnnouncePolicy::default(); - Self { mode: Self::default_mode(), - announce_interval: announce_policy.interval, - min_announce_interval: announce_policy.interval_min, max_peer_timeout: Self::default_max_peer_timeout(), tracker_usage_statistics: Self::default_tracker_usage_statistics(), persistent_torrent_completed_stat: Self::default_persistent_torrent_completed_stat(), inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), remove_peerless_torrents: Self::default_remove_peerless_torrents(), + announce_policy: Self::default_announce_policy(), database: Self::default_database(), net: Self::default_network(), } @@ -106,6 +99,10 @@ impl Core { true } + fn default_announce_policy() -> AnnouncePolicy { + AnnouncePolicy::default() + } + fn default_database() -> Database { Database::default() } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index d96e1335c..d2f2a3012 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -198,14 +198,16 @@ //! //! [core] //! mode = "public" -//! announce_interval = 120 -//! min_announce_interval = 120 //! tracker_usage_statistics = true //! persistent_torrent_completed_stat = false //! max_peer_timeout = 900 //! inactive_peer_cleanup_interval = 600 //! remove_peerless_torrents = true //! +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 +//! //! [core.database] //! driver = "Sqlite3" //! path = "./storage/tracker/lib/database/sqlite3.db" @@ -388,14 +390,16 @@ mod tests { [core] mode = "public" - announce_interval = 120 - min_announce_interval = 120 tracker_usage_statistics = true persistent_torrent_completed_stat = false max_peer_timeout = 900 inactive_peer_cleanup_interval = 600 remove_peerless_torrents = true + [core.announce_policy] + interval = 120 + interval_min = 120 + [core.database] driver = "Sqlite3" path = "./storage/tracker/lib/database/sqlite3.db" diff --git a/src/core/mod.rs b/src/core/mod.rs index f4eb2c335..77f7099af 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -113,10 +113,10 @@ //! } //! //! // Core tracker configuration -//! pub struct Configuration { +//! pub struct AnnounceInterval { //! // ... -//! pub announce_interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker -//! pub min_announce_interval: u32, // Minimum announce interval. Clients must not reannounce more frequently than this +//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker +//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this //! // ... //! } //! ``` @@ -317,14 +317,16 @@ //! //! [core] //! mode = "public" -//! announce_interval = 120 -//! min_announce_interval = 120 //! max_peer_timeout = 900 //! tracker_usage_statistics = true //! persistent_torrent_completed_stat = true //! inactive_peer_cleanup_interval = 600 //! remove_peerless_torrents = false //! +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 +//! //! [core.database] //! driver = "Sqlite3" //! path = "./storage/tracker/lib/database/sqlite3.db" @@ -558,7 +560,7 @@ impl Tracker { Ok(Tracker { //config, - announce_policy: AnnouncePolicy::new(config.announce_interval, config.min_announce_interval), + announce_policy: config.announce_policy, mode, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), diff --git a/src/lib.rs b/src/lib.rs index bf9257123..5c0fd4b56 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -171,22 +171,24 @@ //! log_level = "info" //! //! [core] -//! announce_interval = 120 //! inactive_peer_cleanup_interval = 600 //! max_peer_timeout = 900 -//! min_announce_interval = 120 //! mode = "public" //! persistent_torrent_completed_stat = false //! remove_peerless_torrents = true //! tracker_usage_statistics = true //! -//! [core.database] -//! driver = "Sqlite3" -//! path = "./storage/tracker/lib/database/sqlite3.db" +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 //! -//! [core.net] -//! external_ip = "0.0.0.0" -//! on_reverse_proxy = false +//! [core.database] +//! driver = "Sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] +//! external_ip = "0.0.0.0" +//! on_reverse_proxy = false //! //! [[udp_trackers]] //! bind_address = "0.0.0.0:6969" From 7b2f75724494c883c8e0d6faae9153c4ab47a562 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Jun 2024 18:19:57 +0100 Subject: [PATCH 0887/1003] feat!: [#878] extract tracker_policy section in core config section --- packages/configuration/src/lib.rs | 44 ++++++++++++++++++- packages/configuration/src/v1/core.rs | 40 +++-------------- packages/configuration/src/v1/mod.rs | 12 +++-- .../torrent-repository/tests/entry/mod.rs | 8 ++-- .../tests/repository/mod.rs | 8 ++-- src/core/mod.rs | 16 +++---- src/lib.rs | 6 ++- 7 files changed, 76 insertions(+), 58 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 3b719f742..594a283db 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -37,11 +37,51 @@ pub type HealthCheckApi = v1::health_check_api::HealthCheckApi; pub type AccessTokens = HashMap; -#[derive(Copy, Clone, Debug, PartialEq, Constructor)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Constructor)] pub struct TrackerPolicy { - pub remove_peerless_torrents: bool, + // Cleanup job configuration + /// Maximum time in seconds that a peer can be inactive before being + /// considered an inactive peer. If a peer is inactive for more than this + /// time, it will be removed from the torrent peer list. + #[serde(default = "TrackerPolicy::default_max_peer_timeout")] pub max_peer_timeout: u32, + + /// If enabled the tracker will persist the number of completed downloads. + /// That's how many times a torrent has been downloaded completely. + #[serde(default = "TrackerPolicy::default_persistent_torrent_completed_stat")] pub persistent_torrent_completed_stat: bool, + + /// If enabled, the tracker will remove torrents that have no peers. + /// The clean up torrent job runs every `inactive_peer_cleanup_interval` + /// seconds and it removes inactive peers. Eventually, the peer list of a + /// torrent could be empty and the torrent will be removed if this option is + /// enabled. + #[serde(default = "TrackerPolicy::default_remove_peerless_torrents")] + pub remove_peerless_torrents: bool, +} + +impl Default for TrackerPolicy { + fn default() -> Self { + Self { + max_peer_timeout: Self::default_max_peer_timeout(), + persistent_torrent_completed_stat: Self::default_persistent_torrent_completed_stat(), + remove_peerless_torrents: Self::default_remove_peerless_torrents(), + } + } +} + +impl TrackerPolicy { + fn default_max_peer_timeout() -> u32 { + 900 + } + + fn default_persistent_torrent_completed_stat() -> bool { + false + } + + fn default_remove_peerless_torrents() -> bool { + true + } } /// Information required for loading config diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index 266da21ed..49fdf2a80 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -3,7 +3,7 @@ use torrust_tracker_primitives::TrackerMode; use super::network::Network; use crate::v1::database::Database; -use crate::AnnouncePolicy; +use crate::{AnnouncePolicy, TrackerPolicy}; #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] @@ -20,30 +20,14 @@ pub struct Core { #[serde(default = "Core::default_tracker_usage_statistics")] pub tracker_usage_statistics: bool, - /// If enabled the tracker will persist the number of completed downloads. - /// That's how many times a torrent has been downloaded completely. - #[serde(default = "Core::default_persistent_torrent_completed_stat")] - pub persistent_torrent_completed_stat: bool, - - // Cleanup job configuration - /// Maximum time in seconds that a peer can be inactive before being - /// considered an inactive peer. If a peer is inactive for more than this - /// time, it will be removed from the torrent peer list. - #[serde(default = "Core::default_max_peer_timeout")] - pub max_peer_timeout: u32, - /// Interval in seconds that the cleanup job will run to remove inactive /// peers from the torrent peer list. #[serde(default = "Core::default_inactive_peer_cleanup_interval")] pub inactive_peer_cleanup_interval: u64, - /// If enabled, the tracker will remove torrents that have no peers. - /// The clean up torrent job runs every `inactive_peer_cleanup_interval` - /// seconds and it removes inactive peers. Eventually, the peer list of a - /// torrent could be empty and the torrent will be removed if this option is - /// enabled. - #[serde(default = "Core::default_remove_peerless_torrents")] - pub remove_peerless_torrents: bool, + // Tracker policy configuration. + #[serde(default = "Core::default_tracker_policy")] + pub tracker_policy: TrackerPolicy, // Announce policy configuration. #[serde(default = "Core::default_announce_policy")] @@ -62,11 +46,9 @@ impl Default for Core { fn default() -> Self { Self { mode: Self::default_mode(), - max_peer_timeout: Self::default_max_peer_timeout(), tracker_usage_statistics: Self::default_tracker_usage_statistics(), - persistent_torrent_completed_stat: Self::default_persistent_torrent_completed_stat(), inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), - remove_peerless_torrents: Self::default_remove_peerless_torrents(), + tracker_policy: Self::default_tracker_policy(), announce_policy: Self::default_announce_policy(), database: Self::default_database(), net: Self::default_network(), @@ -83,20 +65,12 @@ impl Core { true } - fn default_persistent_torrent_completed_stat() -> bool { - false - } - - fn default_max_peer_timeout() -> u32 { - 900 - } - fn default_inactive_peer_cleanup_interval() -> u64 { 600 } - fn default_remove_peerless_torrents() -> bool { - true + fn default_tracker_policy() -> TrackerPolicy { + TrackerPolicy::default() } fn default_announce_policy() -> AnnouncePolicy { diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index d2f2a3012..19499cd4a 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -199,9 +199,11 @@ //! [core] //! mode = "public" //! tracker_usage_statistics = true -//! persistent_torrent_completed_stat = false -//! max_peer_timeout = 900 //! inactive_peer_cleanup_interval = 600 +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false //! remove_peerless_torrents = true //! //! [core.announce_policy] @@ -391,9 +393,11 @@ mod tests { [core] mode = "public" tracker_usage_statistics = true - persistent_torrent_completed_stat = false - max_peer_timeout = 900 inactive_peer_cleanup_interval = 600 + + [core.tracker_policy] + max_peer_timeout = 900 + persistent_torrent_completed_stat = false remove_peerless_torrents = true [core.announce_policy] diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 3b9f3e3ad..fdbe211b3 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -43,22 +43,22 @@ fn rw_lock_parking_lot() -> Torrent { #[fixture] fn policy_none() -> TrackerPolicy { - TrackerPolicy::new(false, 0, false) + TrackerPolicy::new(0, false, false) } #[fixture] fn policy_persist() -> TrackerPolicy { - TrackerPolicy::new(false, 0, true) + TrackerPolicy::new(0, true, false) } #[fixture] fn policy_remove() -> TrackerPolicy { - TrackerPolicy::new(true, 0, false) + TrackerPolicy::new(0, false, true) } #[fixture] fn policy_remove_persist() -> TrackerPolicy { - TrackerPolicy::new(true, 0, true) + TrackerPolicy::new(0, true, true) } pub enum Makes { diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index dd9893cc9..b10f4a64a 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -220,22 +220,22 @@ fn paginated_limit_one_offset_one() -> Pagination { #[fixture] fn policy_none() -> TrackerPolicy { - TrackerPolicy::new(false, 0, false) + TrackerPolicy::new(0, false, false) } #[fixture] fn policy_persist() -> TrackerPolicy { - TrackerPolicy::new(false, 0, true) + TrackerPolicy::new(0, true, false) } #[fixture] fn policy_remove() -> TrackerPolicy { - TrackerPolicy::new(true, 0, false) + TrackerPolicy::new(0, false, true) } #[fixture] fn policy_remove_persist() -> TrackerPolicy { - TrackerPolicy::new(true, 0, true) + TrackerPolicy::new(0, true, true) } #[rstest] diff --git a/src/core/mod.rs b/src/core/mod.rs index 77f7099af..bd7ce4883 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -317,11 +317,13 @@ //! //! [core] //! mode = "public" -//! max_peer_timeout = 900 //! tracker_usage_statistics = true -//! persistent_torrent_completed_stat = true //! inactive_peer_cleanup_interval = 600 -//! remove_peerless_torrents = false +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true //! //! [core.announce_policy] //! interval = 120 @@ -569,11 +571,7 @@ impl Tracker { stats_repository, database, external_ip: config.net.external_ip, - policy: TrackerPolicy::new( - config.remove_peerless_torrents, - config.max_peer_timeout, - config.persistent_torrent_completed_stat, - ), + policy: config.tracker_policy.clone(), on_reverse_proxy: config.net.on_reverse_proxy, }) } @@ -1043,7 +1041,7 @@ mod tests { pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut configuration = configuration::ephemeral(); - configuration.core.persistent_torrent_completed_stat = true; + configuration.core.tracker_policy.persistent_torrent_completed_stat = true; tracker_factory(&configuration) } diff --git a/src/lib.rs b/src/lib.rs index 5c0fd4b56..c059f6df7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -172,11 +172,13 @@ //! //! [core] //! inactive_peer_cleanup_interval = 600 -//! max_peer_timeout = 900 //! mode = "public" +//! tracker_usage_statistics = true +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 //! persistent_torrent_completed_stat = false //! remove_peerless_torrents = true -//! tracker_usage_statistics = true //! //! [core.announce_policy] //! interval = 120 From c5cc9fd6a461ad439fb81b405ecbe652d82fd4fb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Jun 2024 19:34:56 +0100 Subject: [PATCH 0888/1003] feat: [#878] extract tsl_config in toml config TSL configuration for HTTP trackers and the Tracker API is still optional. However, when it's provided is enabled. The `ssl_enabled` field was removed. You can remove the whole `tsl_config` to disable TSL. If you want to kee a copy in the TOML file you can just comment the lines. ```toml [[http_trackers]] ... [http_trackers.tsl_config] ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" ssl_key_path = "./storage/tracker/lib/tls/localhost.key" [http_api] ... [http_api.tsl_config] ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" ssl_key_path = "./storage/tracker/lib/tls/localhost.key" ``` --- Cargo.toml | 2 +- docs/containers.md | 23 +++--- packages/configuration/Cargo.toml | 2 +- packages/configuration/src/lib.rs | 17 +++-- packages/configuration/src/v1/http_tracker.rs | 19 +++-- packages/configuration/src/v1/mod.rs | 20 ++---- packages/configuration/src/v1/tracker_api.rs | 19 +++-- .../config/tracker.container.mysql.toml | 8 +-- .../config/tracker.container.sqlite3.toml | 8 --- .../config/tracker.e2e.container.sqlite3.toml | 6 -- src/bootstrap/jobs/http_tracker.rs | 2 +- src/bootstrap/jobs/mod.rs | 70 +++++++++---------- src/bootstrap/jobs/tracker_apis.rs | 2 +- src/lib.rs | 9 --- src/servers/apis/mod.rs | 12 ++-- src/servers/apis/server.rs | 2 +- src/servers/http/server.rs | 2 +- tests/servers/api/environment.rs | 2 +- tests/servers/http/environment.rs | 2 +- 19 files changed, 92 insertions(+), 135 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 072a21a7e..c22c3dd45 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } -camino = { version = "1.1.6", features = ["serde"] } +camino = { version = "1.1.6", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0.1" diff --git a/docs/containers.md b/docs/containers.md index 1a1ea2f0d..82c67c26e 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -330,24 +330,23 @@ The storage folder must contain your certificates: ```s storage/tracker/lib/tls - ├── localhost.crt - └── localhost.key + ├── localhost.crt + └── localhost.key +storage/http_api/lib/tls + ├── localhost.crt + └── localhost.key ``` You have not enabled it in your `tracker.toml` file: ```toml +[http_trackers.tsl_config] +ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +ssl_key_path = "./storage/tracker/lib/tls/localhost.key" -[[http_trackers]] -# ... -ssl_enabled = true -# ... - -[http_api] -# ... -ssl_enabled = true -# ... - +[http_api.tsl_config] +ssl_cert_path = "./storage/http_api/lib/tls/localhost.crt" +ssl_key_path = "./storage/http_api/lib/tls/localhost.key" ``` > NOTE: you can enable it independently for each HTTP tracker or the API. diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index bac2132d5..53e4e4cfa 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -15,7 +15,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -camino = { version = "1.1.6", features = ["serde"] } +camino = { version = "1.1.6", features = ["serde", "serde1"] } derive_more = "0" figment = { version = "0.10.18", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 594a283db..c8c91443a 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use camino::Utf8PathBuf; use derive_more::Constructor; use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, NoneAsEmptyString}; +use serde_with::serde_as; use thiserror::Error; use torrust_tracker_located_error::{DynError, LocatedError}; @@ -215,24 +215,23 @@ impl From for Error { #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Default)] pub struct TslConfig { /// Path to the SSL certificate file. - #[serde_as(as = "NoneAsEmptyString")] #[serde(default = "TslConfig::default_ssl_cert_path")] - pub ssl_cert_path: Option, + pub ssl_cert_path: Utf8PathBuf, + /// Path to the SSL key file. - #[serde_as(as = "NoneAsEmptyString")] #[serde(default = "TslConfig::default_ssl_key_path")] - pub ssl_key_path: Option, + pub ssl_key_path: Utf8PathBuf, } impl TslConfig { #[allow(clippy::unnecessary_wraps)] - fn default_ssl_cert_path() -> Option { - Some(Utf8PathBuf::new()) + fn default_ssl_cert_path() -> Utf8PathBuf { + Utf8PathBuf::new() } #[allow(clippy::unnecessary_wraps)] - fn default_ssl_key_path() -> Option { - Some(Utf8PathBuf::new()) + fn default_ssl_key_path() -> Utf8PathBuf { + Utf8PathBuf::new() } } diff --git a/packages/configuration/src/v1/http_tracker.rs b/packages/configuration/src/v1/http_tracker.rs index b1fe1437b..fed2282a5 100644 --- a/packages/configuration/src/v1/http_tracker.rs +++ b/packages/configuration/src/v1/http_tracker.rs @@ -12,19 +12,17 @@ pub struct HttpTracker { /// Weather the HTTP tracker is enabled or not. #[serde(default = "HttpTracker::default_enabled")] pub enabled: bool, + /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. #[serde(default = "HttpTracker::default_bind_address")] pub bind_address: SocketAddr, - /// Weather the HTTP tracker will use SSL or not. - #[serde(default = "HttpTracker::default_ssl_enabled")] - pub ssl_enabled: bool, - /// TSL config. Only used if `ssl_enabled` is true. - #[serde(flatten)] - #[serde(default = "TslConfig::default")] - pub tsl_config: TslConfig, + + /// TSL config. + #[serde(default = "HttpTracker::default_tsl_config")] + pub tsl_config: Option, } impl Default for HttpTracker { @@ -32,8 +30,7 @@ impl Default for HttpTracker { Self { enabled: Self::default_enabled(), bind_address: Self::default_bind_address(), - ssl_enabled: Self::default_ssl_enabled(), - tsl_config: TslConfig::default(), + tsl_config: Self::default_tsl_config(), } } } @@ -47,7 +44,7 @@ impl HttpTracker { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070) } - fn default_ssl_enabled() -> bool { - false + fn default_tsl_config() -> Option { + None } } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 19499cd4a..603be85d2 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -176,14 +176,16 @@ //! //! ```s //! [[http_trackers]] -//! enabled = true //! ... +//! +//! [http_trackers.tsl_config] //! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" //! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! //! [http_api] -//! enabled = true //! ... +//! +//! [http_api.tsl_config] //! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" //! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! ``` @@ -225,16 +227,10 @@ //! [[http_trackers]] //! enabled = false //! bind_address = "0.0.0.0:7070" -//! ssl_enabled = false -//! ssl_cert_path = "" -//! ssl_key_path = "" //! //! [http_api] //! enabled = true //! bind_address = "127.0.0.1:1212" -//! ssl_enabled = false -//! ssl_cert_path = "" -//! ssl_key_path = "" //! //! [http_api.access_tokens] //! admin = "MyAccessToken" @@ -419,16 +415,10 @@ mod tests { [[http_trackers]] enabled = false bind_address = "0.0.0.0:7070" - ssl_enabled = false - ssl_cert_path = "" - ssl_key_path = "" - + [http_api] enabled = true bind_address = "127.0.0.1:1212" - ssl_enabled = false - ssl_cert_path = "" - ssl_key_path = "" [http_api.access_tokens] admin = "MyAccessToken" diff --git a/packages/configuration/src/v1/tracker_api.rs b/packages/configuration/src/v1/tracker_api.rs index c2e3e5ee9..42794ad18 100644 --- a/packages/configuration/src/v1/tracker_api.rs +++ b/packages/configuration/src/v1/tracker_api.rs @@ -15,19 +15,18 @@ pub struct HttpApi { /// Weather the HTTP API is enabled or not. #[serde(default = "HttpApi::default_enabled")] pub enabled: bool, + /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating /// system to choose a random port, use port `0`. #[serde(default = "HttpApi::default_bind_address")] pub bind_address: SocketAddr, - /// Weather the HTTP API will use SSL or not. - #[serde(default = "HttpApi::default_ssl_enabled")] - pub ssl_enabled: bool, + /// TSL config. Only used if `ssl_enabled` is true. - #[serde(flatten)] - #[serde(default = "TslConfig::default")] - pub tsl_config: TslConfig, + #[serde(default = "HttpApi::default_tsl_config")] + pub tsl_config: Option, + /// Access tokens for the HTTP API. The key is a label identifying the /// token and the value is the token itself. The token is used to /// authenticate the user. All tokens are valid for all endpoints and have @@ -41,8 +40,7 @@ impl Default for HttpApi { Self { enabled: Self::default_enabled(), bind_address: Self::default_bind_address(), - ssl_enabled: Self::default_ssl_enabled(), - tsl_config: TslConfig::default(), + tsl_config: Self::default_tsl_config(), access_tokens: Self::default_access_tokens(), } } @@ -57,8 +55,9 @@ impl HttpApi { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212) } - fn default_ssl_enabled() -> bool { - false + #[allow(clippy::unnecessary_wraps)] + fn default_tsl_config() -> Option { + None } fn default_access_tokens() -> AccessTokens { diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 75cc57b64..70ee8b500 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -3,9 +3,5 @@ driver = "MySQL" path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" [[http_trackers]] -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -[http_api] -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" +bind_address = "0.0.0.0:7070" +enabled = true diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 433e36127..f7bb6b8bb 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,10 +1,2 @@ [core.database] path = "/var/lib/torrust/tracker/database/sqlite3.db" - -[[http_trackers]] -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -[http_api] -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index b8adedefb..744d267fd 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -6,12 +6,6 @@ enabled = true [[http_trackers]] enabled = true -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -[http_api] -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" [health_check_api] # Must be bound to wildcard IP to be accessible from outside the container diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index e9eb6bc16..05bfe2341 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -42,7 +42,7 @@ pub async fn start_job( if config.enabled { let socket = config.bind_address; - let tls = make_rust_tls(config.ssl_enabled, &config.tsl_config) + let tls = make_rust_tls(&config.tsl_config) .await .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 316e5746c..f42a843f4 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -20,27 +20,33 @@ pub struct Started { pub address: std::net::SocketAddr, } -pub async fn make_rust_tls(enabled: bool, tsl_config: &TslConfig) -> Option> { - if !enabled { - info!("TLS not enabled"); - return None; - } +pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option> { + match opt_tsl_config { + Some(tsl_config) => { + let cert = tsl_config.ssl_cert_path.clone(); + let key = tsl_config.ssl_key_path.clone(); - if let (Some(cert), Some(key)) = (tsl_config.ssl_cert_path.clone(), tsl_config.ssl_key_path.clone()) { - info!("Using https: cert path: {cert}."); - info!("Using https: key path: {key}."); - - Some( - RustlsConfig::from_pem_file(cert, key) - .await - .map_err(|err| Error::BadTlsConfig { - source: (Arc::new(err) as DynError).into(), - }), - ) - } else { - Some(Err(Error::MissingTlsConfig { - location: Location::caller(), - })) + if !cert.exists() || !key.exists() { + return Some(Err(Error::MissingTlsConfig { + location: Location::caller(), + })); + } + + info!("Using https: cert path: {cert}."); + info!("Using https: key path: {key}."); + + Some( + RustlsConfig::from_pem_file(cert, key) + .await + .map_err(|err| Error::BadTlsConfig { + source: (Arc::new(err) as DynError).into(), + }), + ) + } + None => { + info!("TLS not enabled"); + None + } } } @@ -54,29 +60,23 @@ mod tests { #[tokio::test] async fn it_should_error_on_bad_tls_config() { - let err = make_rust_tls( - true, - &TslConfig { - ssl_cert_path: Some(Utf8PathBuf::from("bad cert path")), - ssl_key_path: Some(Utf8PathBuf::from("bad key path")), - }, - ) + let err = make_rust_tls(&Some(TslConfig { + ssl_cert_path: Utf8PathBuf::from("bad cert path"), + ssl_key_path: Utf8PathBuf::from("bad key path"), + })) .await .expect("tls_was_enabled") .expect_err("bad_cert_and_key_files"); - assert!(matches!(err, Error::BadTlsConfig { source: _ })); + assert!(matches!(err, Error::MissingTlsConfig { location: _ })); } #[tokio::test] async fn it_should_error_on_missing_cert_or_key_paths() { - let err = make_rust_tls( - true, - &TslConfig { - ssl_cert_path: None, - ssl_key_path: None, - }, - ) + let err = make_rust_tls(&Some(TslConfig { + ssl_cert_path: Utf8PathBuf::from(""), + ssl_key_path: Utf8PathBuf::from(""), + })) .await .expect("tls_was_enabled") .expect_err("missing_config"); diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 3c1f13255..c3b12d7a1 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -63,7 +63,7 @@ pub async fn start_job( if config.enabled { let bind_to = config.bind_address; - let tls = make_rust_tls(config.ssl_enabled, &config.tsl_config) + let tls = make_rust_tls(&config.tsl_config) .await .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); diff --git a/src/lib.rs b/src/lib.rs index c059f6df7..7f8c70a47 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -199,16 +199,10 @@ //! [[http_trackers]] //! bind_address = "0.0.0.0:7070" //! enabled = false -//! ssl_cert_path = "" -//! ssl_enabled = false -//! ssl_key_path = "" //! //! [http_api] //! bind_address = "127.0.0.1:1212" //! enabled = true -//! ssl_cert_path = "" -//! ssl_enabled = false -//! ssl_key_path = "" //! //! [http_api.access_tokens] //! admin = "MyAccessToken" @@ -261,9 +255,6 @@ //! [http_api] //! enabled = true //! bind_address = "127.0.0.1:1212" -//! ssl_enabled = false -//! ssl_cert_path = "" -//! ssl_key_path = "" //! ``` //! //! By default it's enabled on port `1212`. You also need to add access tokens in the configuration: diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index 47d40c654..02b93efa6 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -27,7 +27,8 @@ //! [http_api] //! enabled = true //! bind_address = "0.0.0.0:1212" -//! ssl_enabled = false +//! +//! [http_api.tsl_config] //! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" //! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! @@ -106,16 +107,15 @@ //! //! # Setup SSL (optional) //! -//! The API server supports SSL. You can enable it by setting the -//! [`ssl_enabled`](torrust_tracker_configuration::HttpApi::ssl_enabled) option -//! to `true` in the configuration file -//! ([`http_api`](torrust_tracker_configuration::HttpApi) section). +//! The API server supports SSL. You can enable it by adding the `tsl_config` +//! section to the configuration. //! //! ```toml //! [http_api] //! enabled = true //! bind_address = "0.0.0.0:1212" -//! ssl_enabled = true +//! +//! [http_api.tsl_config] //! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" //! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 7c5b8983b..74dc89692 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -280,7 +280,7 @@ mod tests { let bind_to = config.bind_address; - let tls = make_rust_tls(config.ssl_enabled, &config.tsl_config) + let tls = make_rust_tls(&config.tsl_config) .await .map(|tls| tls.expect("tls config failed")); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 5c33fc8fa..bbe0c3cc1 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -239,7 +239,7 @@ mod tests { let bind_to = config.bind_address; - let tls = make_rust_tls(config.ssl_enabled, &config.tsl_config) + let tls = make_rust_tls(&config.tsl_config) .await .map(|tls| tls.expect("tls config failed")); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index cacde8af9..8f84620dd 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -35,7 +35,7 @@ impl Environment { let bind_to = config.bind_address; - let tls = block_on(make_rust_tls(config.ssl_enabled, &config.tsl_config)).map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls(&config.tsl_config)).map(|tls| tls.expect("tls config failed")); let server = ApiServer::new(Launcher::new(bind_to, tls)); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 61837c40f..6e80569ec 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -33,7 +33,7 @@ impl Environment { let bind_to = config.bind_address; - let tls = block_on(make_rust_tls(config.ssl_enabled, &config.tsl_config)).map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls(&config.tsl_config)).map(|tls| tls.expect("tls config failed")); let server = HttpServer::new(Launcher::new(bind_to, tls)); From 50bef25af092414b46d13ce393dacc22b4f9a2cf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Jun 2024 08:36:57 +0100 Subject: [PATCH 0889/1003] feat: remove ambiguous log entry It remvoes this line: ``` 2024-06-17T18:52:49.196708Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled ``` becuase it doesn't specifu which service the TSl is not enabled for. On the other hand, the output already indicates whether the service is runnig on HTTP or HTTPs: ``` 2024-06-18T07:37:58.595692Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 ``` --- src/bootstrap/jobs/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index f42a843f4..87a607720 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -44,7 +44,6 @@ pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option { - info!("TLS not enabled"); None } } From 06ad5dabe82ebef947b8b54c19ac9a74eca33335 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Jun 2024 10:11:11 +0100 Subject: [PATCH 0890/1003] feat!: [#878] remove enabled fields in config By default all services are disabled. If the service section is missing in the TOML config file it means the service is disabled. From: ```toml [[udp_trackers]] enabled = false bind_address = "0.0.0.0:6969" ``` To: ```toml ``` The `http_api` section has been disabled by default becuase there is no way to override it to disable it, if it's enabled by default. You nned to explicitly enabled the API now. --- docs/benchmarking.md | 4 +- packages/configuration/src/v1/http_tracker.rs | 9 --- packages/configuration/src/v1/mod.rs | 47 ++--------- packages/configuration/src/v1/tracker_api.rs | 9 --- packages/configuration/src/v1/udp_tracker.rs | 8 -- packages/test-helpers/src/configuration.rs | 39 ++++++--- .../config/tracker.container.mysql.toml | 16 +++- .../config/tracker.container.sqlite3.toml | 14 ++++ .../config/tracker.development.sqlite3.toml | 10 ++- .../config/tracker.e2e.container.sqlite3.toml | 10 ++- .../config/tracker.udp.benchmarking.toml | 5 +- src/app.rs | 81 ++++++++++--------- src/bootstrap/jobs/http_tracker.rs | 21 ++--- src/bootstrap/jobs/mod.rs | 4 +- src/bootstrap/jobs/tracker_apis.rs | 22 ++--- src/lib.rs | 21 +---- src/servers/apis/mod.rs | 2 - src/servers/apis/server.rs | 2 +- src/servers/http/server.rs | 5 +- src/servers/udp/server.rs | 3 +- tests/servers/api/environment.rs | 2 +- tests/servers/http/environment.rs | 7 +- tests/servers/udp/environment.rs | 4 +- 23 files changed, 156 insertions(+), 189 deletions(-) diff --git a/docs/benchmarking.md b/docs/benchmarking.md index 67b680fdc..ce3b69057 100644 --- a/docs/benchmarking.md +++ b/docs/benchmarking.md @@ -33,7 +33,7 @@ Run the tracker with UDP service enabled and other services disabled and set log log_level = "error" [[udp_trackers]] -enabled = true +bind_address = "0.0.0.0:6969" ``` Build and run the tracker: @@ -168,7 +168,7 @@ Run the tracker with UDP service enabled and other services disabled and set log log_level = "error" [[udp_trackers]] -enabled = true +bind_address = "0.0.0.0:6969" ``` ```console diff --git a/packages/configuration/src/v1/http_tracker.rs b/packages/configuration/src/v1/http_tracker.rs index fed2282a5..42ec02bf2 100644 --- a/packages/configuration/src/v1/http_tracker.rs +++ b/packages/configuration/src/v1/http_tracker.rs @@ -9,10 +9,6 @@ use crate::TslConfig; #[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpTracker { - /// Weather the HTTP tracker is enabled or not. - #[serde(default = "HttpTracker::default_enabled")] - pub enabled: bool, - /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating @@ -28,7 +24,6 @@ pub struct HttpTracker { impl Default for HttpTracker { fn default() -> Self { Self { - enabled: Self::default_enabled(), bind_address: Self::default_bind_address(), tsl_config: Self::default_tsl_config(), } @@ -36,10 +31,6 @@ impl Default for HttpTracker { } impl HttpTracker { - fn default_enabled() -> bool { - false - } - fn default_bind_address() -> SocketAddr { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070) } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 603be85d2..546f55b6e 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -220,16 +220,7 @@ //! external_ip = "0.0.0.0" //! on_reverse_proxy = false //! -//! [[udp_trackers]] -//! enabled = false -//! bind_address = "0.0.0.0:6969" -//! -//! [[http_trackers]] -//! enabled = false -//! bind_address = "0.0.0.0:7070" -//! //! [http_api] -//! enabled = true //! bind_address = "127.0.0.1:1212" //! //! [http_api.access_tokens] @@ -267,7 +258,7 @@ const CONFIG_OVERRIDE_PREFIX: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_"; const CONFIG_OVERRIDE_SEPARATOR: &str = "__"; /// Core configuration for the tracker. -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default)] pub struct Configuration { /// Logging configuration pub logging: Logging, @@ -278,33 +269,20 @@ pub struct Configuration { /// The list of UDP trackers the tracker is running. Each UDP tracker /// represents a UDP server that the tracker is running and it has its own /// configuration. - pub udp_trackers: Vec, + pub udp_trackers: Option>, /// The list of HTTP trackers the tracker is running. Each HTTP tracker /// represents a HTTP server that the tracker is running and it has its own /// configuration. - pub http_trackers: Vec, + pub http_trackers: Option>, /// The HTTP API configuration. - pub http_api: HttpApi, + pub http_api: Option, /// The Health Check API configuration. pub health_check_api: HealthCheckApi, } -impl Default for Configuration { - fn default() -> Self { - Self { - logging: Logging::default(), - core: Core::default(), - udp_trackers: vec![UdpTracker::default()], - http_trackers: vec![HttpTracker::default()], - http_api: HttpApi::default(), - health_check_api: HealthCheckApi::default(), - } - } -} - impl Configuration { /// Returns the tracker public IP address id defined in the configuration, /// and `None` otherwise. @@ -408,21 +386,6 @@ mod tests { external_ip = "0.0.0.0" on_reverse_proxy = false - [[udp_trackers]] - enabled = false - bind_address = "0.0.0.0:6969" - - [[http_trackers]] - enabled = false - bind_address = "0.0.0.0:7070" - - [http_api] - enabled = true - bind_address = "127.0.0.1:1212" - - [http_api.access_tokens] - admin = "MyAccessToken" - [health_check_api] bind_address = "127.0.0.1:1313" "# @@ -556,7 +519,7 @@ mod tests { let configuration = Configuration::load(&info).expect("Could not load configuration from file"); assert_eq!( - configuration.http_api.access_tokens.get("admin"), + configuration.http_api.unwrap().access_tokens.get("admin"), Some("NewToken".to_owned()).as_ref() ); diff --git a/packages/configuration/src/v1/tracker_api.rs b/packages/configuration/src/v1/tracker_api.rs index 42794ad18..302a4ee95 100644 --- a/packages/configuration/src/v1/tracker_api.rs +++ b/packages/configuration/src/v1/tracker_api.rs @@ -12,10 +12,6 @@ pub type AccessTokens = HashMap; #[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpApi { - /// Weather the HTTP API is enabled or not. - #[serde(default = "HttpApi::default_enabled")] - pub enabled: bool, - /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating @@ -38,7 +34,6 @@ pub struct HttpApi { impl Default for HttpApi { fn default() -> Self { Self { - enabled: Self::default_enabled(), bind_address: Self::default_bind_address(), tsl_config: Self::default_tsl_config(), access_tokens: Self::default_access_tokens(), @@ -47,10 +42,6 @@ impl Default for HttpApi { } impl HttpApi { - fn default_enabled() -> bool { - true - } - fn default_bind_address() -> SocketAddr { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212) } diff --git a/packages/configuration/src/v1/udp_tracker.rs b/packages/configuration/src/v1/udp_tracker.rs index f8387202e..b3d420d72 100644 --- a/packages/configuration/src/v1/udp_tracker.rs +++ b/packages/configuration/src/v1/udp_tracker.rs @@ -4,9 +4,6 @@ use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct UdpTracker { - /// Weather the UDP tracker is enabled or not. - #[serde(default = "UdpTracker::default_enabled")] - pub enabled: bool, /// The address the tracker will bind to. /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to /// listen to all interfaces, use `0.0.0.0`. If you want the operating @@ -17,17 +14,12 @@ pub struct UdpTracker { impl Default for UdpTracker { fn default() -> Self { Self { - enabled: Self::default_enabled(), bind_address: Self::default_bind_address(), } } } impl UdpTracker { - fn default_enabled() -> bool { - false - } - fn default_bind_address() -> SocketAddr { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969) } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 9c6c0fe11..646617b32 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -2,7 +2,7 @@ use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use torrust_tracker_configuration::{Configuration, LogLevel}; +use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, LogLevel, UdpTracker}; use torrust_tracker_primitives::TrackerMode; use crate::random; @@ -33,8 +33,10 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for API let api_port = 0u16; - config.http_api.enabled = true; - config.http_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port); + config.http_api = Some(HttpApi { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port), + ..Default::default() + }); // Ephemeral socket address for Health Check API let health_check_api_port = 0u16; @@ -42,13 +44,16 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for UDP tracker let udp_port = 0u16; - config.udp_trackers[0].enabled = true; - config.udp_trackers[0].bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port); + config.udp_trackers = Some(vec![UdpTracker { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port), + }]); // Ephemeral socket address for HTTP tracker let http_port = 0u16; - config.http_trackers[0].enabled = true; - config.http_trackers[0].bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port); + config.http_trackers = Some(vec![HttpTracker { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port), + tsl_config: None, + }]); // Ephemeral sqlite database let temp_directory = env::temp_dir(); @@ -137,9 +142,17 @@ pub fn ephemeral_ipv6() -> Configuration { let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); - cfg.http_api.bind_address.clone_from(&ipv6); - cfg.http_trackers[0].bind_address.clone_from(&ipv6); - cfg.udp_trackers[0].bind_address = ipv6; + if let Some(ref mut http_api) = cfg.http_api { + http_api.bind_address.clone_from(&ipv6); + }; + + if let Some(ref mut http_trackers) = cfg.http_trackers { + http_trackers[0].bind_address.clone_from(&ipv6); + } + + if let Some(ref mut udp_trackers) = cfg.udp_trackers { + udp_trackers[0].bind_address.clone_from(&ipv6); + } cfg } @@ -149,9 +162,9 @@ pub fn ephemeral_ipv6() -> Configuration { pub fn ephemeral_with_no_services() -> Configuration { let mut cfg = ephemeral(); - cfg.http_api.enabled = false; - cfg.http_trackers[0].enabled = false; - cfg.udp_trackers[0].enabled = false; + cfg.http_api = None; + cfg.http_trackers = None; + cfg.udp_trackers = None; cfg } diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 70ee8b500..68cc8db8a 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -2,6 +2,16 @@ driver = "MySQL" path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" -[[http_trackers]] -bind_address = "0.0.0.0:7070" -enabled = true +# Uncomment to enable services + +#[[udp_trackers]] +#bind_address = "0.0.0.0:6969" + +#[[http_trackers]] +#bind_address = "0.0.0.0:7070" + +#[http_api] +#bind_address = "0.0.0.0:1212" + +#[http_api.access_tokens] +#admin = "MyAccessToken" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index f7bb6b8bb..63e169a70 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,2 +1,16 @@ [core.database] path = "/var/lib/torrust/tracker/database/sqlite3.db" + +# Uncomment to enable services + +#[[udp_trackers]] +#bind_address = "0.0.0.0:6969" + +#[[http_trackers]] +#bind_address = "0.0.0.0:7070" + +#[http_api] +#bind_address = "0.0.0.0:1212" + +#[http_api.access_tokens] +#admin = "MyAccessToken" diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index bf6478492..84754794e 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -1,5 +1,11 @@ [[udp_trackers]] -enabled = true +bind_address = "0.0.0.0:6969" [[http_trackers]] -enabled = true +bind_address = "0.0.0.0:7070" + +[http_api] +bind_address = "0.0.0.0:1212" + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index 744d267fd..fb33a8e32 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -2,10 +2,16 @@ path = "/var/lib/torrust/tracker/database/sqlite3.db" [[udp_trackers]] -enabled = true +bind_address = "0.0.0.0:6969" [[http_trackers]] -enabled = true +bind_address = "0.0.0.0:7070" + +[http_api] +bind_address = "0.0.0.0:1212" + +[http_api.access_tokens] +admin = "MyAccessToken" [health_check_api] # Must be bound to wildcard IP to be accessible from outside the container diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index cd193c40a..d9361cf10 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -6,7 +6,4 @@ remove_peerless_torrents = false tracker_usage_statistics = false [[udp_trackers]] -enabled = true - -[http_api] -enabled = false +bind_address = "0.0.0.0:6969" diff --git a/src/app.rs b/src/app.rs index b41f4098e..f6a909002 100644 --- a/src/app.rs +++ b/src/app.rs @@ -25,7 +25,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; -use tracing::warn; +use tracing::{info, warn}; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::servers::registar::Registar; @@ -59,51 +59,56 @@ pub async fn start(config: &Configuration, tracker: Arc) -> Vec { + for udp_tracker_config in udp_trackers { + if tracker.is_private() { + warn!( + "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", + udp_tracker_config.bind_address, config.core.mode + ); + } else { + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone(), registar.give_form()).await); + } + } } + None => info!("No UDP blocks in configuration"), } // Start the HTTP blocks - for http_tracker_config in &config.http_trackers { - if !http_tracker_config.enabled { - continue; + match &config.http_trackers { + Some(http_trackers) => { + for http_tracker_config in http_trackers { + if let Some(job) = http_tracker::start_job( + http_tracker_config, + tracker.clone(), + registar.give_form(), + servers::http::Version::V1, + ) + .await + { + jobs.push(job); + }; + } } - - if let Some(job) = http_tracker::start_job( - http_tracker_config, - tracker.clone(), - registar.give_form(), - servers::http::Version::V1, - ) - .await - { - jobs.push(job); - }; + None => info!("No HTTP blocks in configuration"), } // Start HTTP API - if config.http_api.enabled { - if let Some(job) = tracker_apis::start_job( - &config.http_api, - tracker.clone(), - registar.give_form(), - servers::apis::Version::V1, - ) - .await - { - jobs.push(job); - }; + match &config.http_api { + Some(http_api_config) => { + if let Some(job) = tracker_apis::start_job( + http_api_config, + tracker.clone(), + registar.give_form(), + servers::apis::Version::V1, + ) + .await + { + jobs.push(job); + }; + } + None => info!("No API block in configuration"), } // Start runners to remove torrents without peers, every interval diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 05bfe2341..fed4e5347 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -16,7 +16,6 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use tracing::info; use super::make_rust_tls; use crate::core; @@ -39,19 +38,14 @@ pub async fn start_job( form: ServiceRegistrationForm, version: Version, ) -> Option> { - if config.enabled { - let socket = config.bind_address; + let socket = config.bind_address; - let tls = make_rust_tls(&config.tsl_config) - .await - .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); - match version { - Version::V1 => Some(start_v1(socket, tls, tracker.clone(), form).await), - } - } else { - info!("Note: Not loading Http Tracker Service, Not Enabled in Configuration."); - None + match version { + Version::V1 => Some(start_v1(socket, tls, tracker.clone(), form).await), } } @@ -93,7 +87,8 @@ mod tests { #[tokio::test] async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_mode_public()); - let config = &cfg.http_trackers[0]; + let http_tracker = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); + let config = &http_tracker[0]; let tracker = initialize_with_configuration(&cfg); let version = Version::V1; diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 87a607720..79a4347ef 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -43,9 +43,7 @@ pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option { - None - } + None => None, } } diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index c3b12d7a1..2067e5b0c 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -26,7 +26,6 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; use torrust_tracker_configuration::{AccessTokens, HttpApi}; -use tracing::info; use super::make_rust_tls; use crate::core; @@ -60,21 +59,16 @@ pub async fn start_job( form: ServiceRegistrationForm, version: Version, ) -> Option> { - if config.enabled { - let bind_to = config.bind_address; + let bind_to = config.bind_address; - let tls = make_rust_tls(&config.tsl_config) - .await - .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); - let access_tokens = Arc::new(config.access_tokens.clone()); + let access_tokens = Arc::new(config.access_tokens.clone()); - match version { - Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), form, access_tokens).await), - } - } else { - info!("Note: Not loading Http Tracker Service, Not Enabled in Configuration."); - None + match version { + Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), form, access_tokens).await), } } @@ -110,7 +104,7 @@ mod tests { #[tokio::test] async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_mode_public()); - let config = &cfg.http_api; + let config = &cfg.http_api.clone().unwrap(); let tracker = initialize_with_configuration(&cfg); let version = Version::V1; diff --git a/src/lib.rs b/src/lib.rs index 7f8c70a47..cf2834418 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -192,21 +192,6 @@ //! external_ip = "0.0.0.0" //! on_reverse_proxy = false //! -//! [[udp_trackers]] -//! bind_address = "0.0.0.0:6969" -//! enabled = false -//! -//! [[http_trackers]] -//! bind_address = "0.0.0.0:7070" -//! enabled = false -//! -//! [http_api] -//! bind_address = "127.0.0.1:1212" -//! enabled = true -//! -//! [http_api.access_tokens] -//! admin = "MyAccessToken" -//! //! [health_check_api] //! bind_address = "127.0.0.1:1313" //!``` @@ -253,8 +238,10 @@ //! //! ```toml //! [http_api] -//! enabled = true //! bind_address = "127.0.0.1:1212" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" //! ``` //! //! By default it's enabled on port `1212`. You also need to add access tokens in the configuration: @@ -310,7 +297,6 @@ //! //! ```toml //! [[http_trackers]] -//! enabled = true //! bind_address = "0.0.0.0:7070" //! ``` //! @@ -405,7 +391,6 @@ //! //! ```toml //! [[udp_trackers]] -//! enabled = true //! bind_address = "0.0.0.0:6969" //! ``` //! diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index 02b93efa6..6dae66c2d 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -25,7 +25,6 @@ //! //! ```toml //! [http_api] -//! enabled = true //! bind_address = "0.0.0.0:1212" //! //! [http_api.tsl_config] @@ -112,7 +111,6 @@ //! //! ```toml //! [http_api] -//! enabled = true //! bind_address = "0.0.0.0:1212" //! //! [http_api.tsl_config] diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 74dc89692..246660ab1 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -274,7 +274,7 @@ mod tests { #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_mode_public()); - let config = &cfg.http_api; + let config = &cfg.http_api.clone().unwrap(); let tracker = initialize_with_configuration(&cfg); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index bbe0c3cc1..5798f7c10 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -206,7 +206,7 @@ impl HttpServer { /// Or if the request returns an error. #[must_use] pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { - let url = format!("http://{binding}/health_check"); + let url = format!("http://{binding}/health_check"); // DevSkim: ignore DS137138 let info = format!("checking http tracker health check at: {url}"); @@ -235,7 +235,8 @@ mod tests { async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_mode_public()); let tracker = initialize_with_configuration(&cfg); - let config = &cfg.http_trackers[0]; + let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); + let config = &http_trackers[0]; let bind_to = config.bind_address; diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index b2c72258d..f36f7df45 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -460,7 +460,8 @@ mod tests { async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_mode_public()); let tracker = initialize_with_configuration(&cfg); - let config = &cfg.udp_trackers[0]; + let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); + let config = &udp_trackers[0]; let bind_to = config.bind_address; let register = &Registar::default(); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 8f84620dd..dc2f70a76 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -31,7 +31,7 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { let tracker = initialize_with_configuration(configuration); - let config = Arc::new(configuration.http_api.clone()); + let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); let bind_to = config.bind_address; diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 6e80569ec..2133ed6d0 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -29,7 +29,12 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { let tracker = initialize_with_configuration(configuration); - let config = Arc::new(configuration.http_trackers[0].clone()); + let http_tracker = configuration + .http_trackers + .clone() + .expect("missing HTTP tracker configuration"); + + let config = Arc::new(http_tracker[0].clone()); let bind_to = config.bind_address; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index c1fecbdd3..1ba038c70 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -29,7 +29,9 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { let tracker = initialize_with_configuration(configuration); - let config = Arc::new(configuration.udp_trackers[0].clone()); + let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); + + let config = Arc::new(udp_tracker[0].clone()); let bind_to = config.bind_address; From 0bcca80fe6b570d874c2fd49d763221778b70912 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Jun 2024 10:51:26 +0100 Subject: [PATCH 0891/1003] chore(deps): update dependencies ```console cargo update Updating crates.io index Locking 18 packages to latest compatible versions Updating bytemuck v1.16.0 -> v1.16.1 Updating derive_more v0.99.17 -> v0.99.18 Removing displaydoc v0.2.4 Adding hermit-abi v0.4.0 Updating httparse v1.9.3 -> v1.9.4 Adding hyper-rustls v0.27.2 Removing icu_collections v1.5.0 Removing icu_locid v1.5.0 Removing icu_locid_transform v1.5.0 Removing icu_locid_transform_data v1.5.0 Removing icu_normalizer v1.5.0 Removing icu_normalizer_data v1.5.0 Removing icu_properties v1.5.0 Removing icu_properties_data v1.5.0 Removing icu_provider v1.5.0 Removing icu_provider_macros v1.5.0 Downgrading idna v1.0.0 -> v0.5.0 (latest: v1.0.1) Removing litemap v0.7.3 Updating miniz_oxide v0.7.3 -> v0.7.4 Updating polling v3.7.1 -> v3.7.2 Updating reqwest v0.12.4 -> v0.12.5 Adding rustls v0.23.10 Adding rustls-webpki v0.102.4 Removing stable_deref_trait v1.2.0 Adding subtle v2.5.0 Removing synstructure v0.13.1 Removing tinystr v0.7.6 Adding tokio-rustls v0.26.0 Adding unicode-bidi v0.3.15 Adding unicode-normalization v0.1.23 Updating url v2.5.1 -> v2.5.2 Removing utf16_iter v1.0.5 Removing utf8_iter v1.0.4 Removing write16 v1.0.0 Removing writeable v0.5.5 Removing yoke v0.7.4 Removing yoke-derive v0.7.4 Removing zerofrom v0.1.4 Removing zerofrom-derive v0.1.4 Adding zeroize v1.8.1 Removing zerovec v0.10.2 Removing zerovec-derive v0.10.2 Updating zstd-sys v2.0.10+zstd.1.5.6 -> v2.0.11+zstd.1.5.6 ``` --- Cargo.lock | 397 +++++++++++++++-------------------------------------- 1 file changed, 114 insertions(+), 283 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7a54063bf..94fbe7d87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -288,7 +288,7 @@ dependencies = [ "futures-io", "futures-lite 2.3.0", "parking", - "polling 3.7.1", + "polling 3.7.2", "rustix 0.38.34", "slab", "tracing", @@ -497,10 +497,10 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls", + "rustls 0.21.12", "rustls-pemfile", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tower", "tower-service", ] @@ -696,9 +696,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.16.0" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" +checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" [[package]] name = "byteorder" @@ -1116,15 +1116,15 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case", "proc-macro2", "quote", "rustc_version", - "syn 1.0.109", + "syn 2.0.66", ] [[package]] @@ -1148,17 +1148,6 @@ dependencies = [ "crypto-common", ] -[[package]] -name = "displaydoc" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - [[package]] name = "downcast" version = "0.11.0" @@ -1652,6 +1641,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + [[package]] name = "hex" version = "0.4.3" @@ -1700,9 +1695,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0e7a4dd27b9476dc40cb050d3632d3bba3a70ddbff012285f7f8559a1e7e545" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -1731,6 +1726,23 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "rustls 0.23.10", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.6.0" @@ -1790,124 +1802,6 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f8ac670d7422d7f76b32e17a5db556510825b29ec9154f235977c9caba61036" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -1916,14 +1810,12 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4716a3a0933a1d01c2f72450e89596eb51dd34ef3c211ccd875acdf1f8fe47ed" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "icu_normalizer", - "icu_properties", - "smallvec", - "utf8_iter", + "unicode-bidi", + "unicode-normalization", ] [[package]] @@ -1978,7 +1870,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -1995,7 +1887,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", "windows-sys 0.52.0", ] @@ -2192,12 +2084,6 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" -[[package]] -name = "litemap" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" - [[package]] name = "local-ip-address" version = "0.6.1" @@ -2264,9 +2150,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] @@ -2517,7 +2403,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", ] @@ -2793,13 +2679,13 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.1" +version = "3.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6a007746f34ed64099e88783b0ae369eaa3da6392868ba262e2af9b8fbaea1" +checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi", + "hermit-abi 0.4.0", "pin-project-lite", "rustix 0.38.34", "tracing", @@ -3092,9 +2978,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" dependencies = [ "base64 0.22.1", "bytes", @@ -3106,6 +2992,7 @@ dependencies = [ "http-body", "http-body-util", "hyper", + "hyper-rustls", "hyper-tls", "hyper-util", "ipnet", @@ -3120,7 +3007,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "system-configuration", "tokio", "tokio-native-tls", @@ -3301,10 +3188,23 @@ checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring", - "rustls-webpki", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.23.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +dependencies = [ + "once_cell", + "rustls-pki-types", + "rustls-webpki 0.102.4", + "subtle", + "zeroize", +] + [[package]] name = "rustls-pemfile" version = "2.1.2" @@ -3331,6 +3231,17 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.102.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.17" @@ -3662,12 +3573,6 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "static_assertions" version = "1.1.0" @@ -3690,6 +3595,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + [[package]] name = "syn" version = "1.0.109" @@ -3736,17 +3647,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" -[[package]] -name = "synstructure" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - [[package]] name = "system-configuration" version = "0.5.1" @@ -3879,16 +3779,6 @@ dependencies = [ "time-core", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -3959,7 +3849,18 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.12", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +dependencies = [ + "rustls 0.23.10", + "rustls-pki-types", "tokio", ] @@ -4337,12 +4238,27 @@ dependencies = [ "version_check", ] +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "unicode-normalization" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] + [[package]] name = "untrusted" version = "0.9.0" @@ -4351,27 +4267,15 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "utf8parse" version = "0.2.2" @@ -4726,18 +4630,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "wyz" version = "0.5.1" @@ -4753,30 +4645,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" -[[package]] -name = "yoke" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", - "synstructure", -] - [[package]] name = "zerocopy" version = "0.7.34" @@ -4799,47 +4667,10 @@ dependencies = [ ] [[package]] -name = "zerofrom" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", - "synstructure", -] - -[[package]] -name = "zerovec" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.2" +name = "zeroize" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] name = "zstd" @@ -4861,9 +4692,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.10+zstd.1.5.6" +version = "2.0.11+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" +checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" dependencies = [ "cc", "pkg-config", From 84cc1a1d39432456b2b0c306d8289f6a618da7b1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 19 Jun 2024 15:44:02 +0200 Subject: [PATCH 0892/1003] dev: use stream for udp requests --- .cargo/config.toml | 1 - cSpell.json | 2 + src/lib.rs | 2 +- src/servers/udp/handlers.rs | 8 +- src/servers/udp/server.rs | 476 +++++++++++++------ src/shared/bit_torrent/tracker/udp/client.rs | 57 ++- tests/servers/udp/contract.rs | 66 ++- tests/servers/udp/environment.rs | 11 +- 8 files changed, 425 insertions(+), 198 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 34d6230b9..a88db5f38 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -23,4 +23,3 @@ rustflags = [ "-D", "unused", ] - diff --git a/cSpell.json b/cSpell.json index ef807f035..6a9da0324 100644 --- a/cSpell.json +++ b/cSpell.json @@ -34,10 +34,12 @@ "codecov", "codegen", "completei", + "Condvar", "connectionless", "Containerfile", "conv", "curr", + "cvar", "Cyberneering", "dashmap", "datagram", diff --git a/src/lib.rs b/src/lib.rs index cf2834418..bb6826dd1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -494,7 +494,7 @@ pub mod bootstrap; pub mod console; pub mod core; pub mod servers; -pub mod shared; +pub mod shared; #[macro_use] extern crate lazy_static; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 36825f084..f7e3aac64 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -10,7 +10,6 @@ use aquatic_udp_protocol::{ ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use tokio::net::UdpSocket; use torrust_tracker_located_error::DynError; use torrust_tracker_primitives::info_hash::InfoHash; use tracing::debug; @@ -34,13 +33,12 @@ use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc, socket: Arc) -> Response { +pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc, addr: SocketAddr) -> Response { debug!("Handling Packets: {udp_request:?}"); let start_time = Instant::now(); let request_id = RequestId::make(&udp_request); - let server_socket_addr = socket.local_addr().expect("Could not get local_addr for socket."); match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| { Error::InternalServer { @@ -49,7 +47,7 @@ pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc { - log_request(&request, &request_id, &server_socket_addr); + log_request(&request, &request_id, &addr); let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, @@ -64,7 +62,7 @@ pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc { /// /// It panics if unable to receive the bound socket address from service. /// - pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, Error> { + pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, std::io::Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); @@ -129,6 +138,7 @@ impl UdpServer { let task = self.state.launcher.start(tracker, tx_start, rx_halt); let binding = rx_start.await.expect("it should be able to start the service").address; + let local_addr = format!("udp://{binding}"); form.send(ServiceRegistration::new(binding, Udp::check)) .expect("it should be able to send service registration"); @@ -141,7 +151,7 @@ impl UdpServer { }, }; - trace!("Running UDP Tracker on Socket: {}", running_udp_server.state.binding); + tracing::trace!(target: "UDP TRACKER: UdpServer::start", local_addr, "(running)"); Ok(running_udp_server) } @@ -159,13 +169,13 @@ impl UdpServer { /// # Panics /// /// It panics if unable to shutdown service. - pub async fn stop(self) -> Result, Error> { + pub async fn stop(self) -> Result, UdpError> { self.state .halt_task .send(Halted::Normal) - .map_err(|e| Error::Error(e.to_string()))?; + .map_err(|e| UdpError::Error(e.to_string()))?; - let launcher = self.state.task.await.expect("unable to shutdown service"); + let launcher = self.state.task.await.expect("it should shutdown service"); let stopped_api_server: UdpServer = UdpServer { state: Stopped { launcher }, @@ -200,23 +210,12 @@ impl Launcher { } } +/// Ring-Buffer of Active Requests +#[derive(Default)] struct ActiveRequests { rb: StaticRb, // the number of requests we handle at the same time. } -impl ActiveRequests { - /// Creates a new [`ActiveRequests`] filled with finished tasks. - async fn new() -> Self { - let mut rb = StaticRb::default(); - - let () = while rb.try_push(tokio::task::spawn_blocking(|| ()).abort_handle()).is_ok() {}; - - task::yield_now().await; - - Self { rb } - } -} - impl std::fmt::Debug for ActiveRequests { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let (left, right) = &self.rb.as_slices(); @@ -235,6 +234,84 @@ impl Drop for ActiveRequests { } } +/// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. +struct Socket { + socket: Arc, +} + +impl Socket { + async fn new(addr: SocketAddr) -> Result> { + let socket = tokio::net::UdpSocket::bind(addr).await; + + let socket = match socket { + Ok(socket) => socket, + Err(e) => Err(e)?, + }; + + let local_addr = format!("udp://{addr}"); + tracing::debug!(target: "UDP TRACKER: UdpSocket::new", local_addr, "(bound)"); + + Ok(Self { + socket: Arc::new(socket), + }) + } +} + +impl Deref for Socket { + type Target = tokio::net::UdpSocket; + + fn deref(&self) -> &Self::Target { + &self.socket + } +} + +impl Debug for Socket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let local_addr = match self.socket.local_addr() { + Ok(socket) => format!("Receiving From: {socket}"), + Err(err) => format!("Socket Broken: {err}"), + }; + + f.debug_struct("UdpSocket").field("addr", &local_addr).finish_non_exhaustive() + } +} + +struct Receiver { + socket: Arc, + tracker: Arc, + data: RefCell<[u8; MAX_PACKET_SIZE]>, +} + +impl Stream for Receiver { + type Item = std::io::Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut buf = *self.data.borrow_mut(); + let mut buf = tokio::io::ReadBuf::new(&mut buf); + + let Poll::Ready(ready) = self.socket.poll_recv_from(cx, &mut buf) else { + return Poll::Pending; + }; + + let res = match ready { + Ok(from) => { + let payload = buf.filled().to_vec(); + let request = UdpRequest { payload, from }; + + Some(Ok(tokio::task::spawn(Udp::process_request( + request, + self.tracker.clone(), + self.socket.clone(), + )) + .abort_handle())) + } + Err(err) => Some(Err(err)), + }; + + Poll::Ready(res) + } +} + /// A UDP server instance launcher. #[derive(Constructor)] pub struct Udp; @@ -252,124 +329,178 @@ impl Udp { tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, ) { - let socket = Arc::new( - UdpSocket::bind(bind_to) - .await - .unwrap_or_else(|_| panic!("Could not bind to {bind_to}.")), - ); - let address = socket - .local_addr() - .unwrap_or_else(|_| panic!("Could not get local_addr from {bind_to}.")); - let halt = shutdown_signal_with_message(rx_halt, format!("Halting Http Service Bound to Socket: {address}")); - - info!(target: "UDP TRACKER", "Starting on: udp://{}", address); - - let running = tokio::task::spawn(async move { - debug!(target: "UDP TRACKER", "Started: Waiting for packets on socket address: udp://{address} ..."); - Self::run_udp_server(tracker, socket).await; - }); + let halt_task = tokio::task::spawn(shutdown_signal_with_message( + rx_halt, + format!("Halting Http Service Bound to Socket: {bind_to}"), + )); + + let socket = tokio::time::timeout(Duration::from_millis(5000), Socket::new(bind_to)) + .await + .expect("it should bind to the socket within five seconds"); + + let socket = match socket { + Ok(socket) => socket, + Err(e) => { + tracing::error!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", addr = %bind_to, err = %e, "panic! (error when building socket)" ); + panic!("could not bind to socket!"); + } + }; + + let address = socket.local_addr().expect("it should get the locally bound address"); + let local_addr = format!("udp://{address}"); + + // note: this log message is parsed by our container. i.e: + // + // `[UDP TRACKER][INFO] Starting on: udp://` + // + tracing::info!(target: "UDP TRACKER", "Starting on: {local_addr}"); + + let socket = socket.socket; + + let direct = Receiver { + socket, + tracker, + data: RefCell::new([0; MAX_PACKET_SIZE]), + }; + + tracing::trace!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_addr, "(spawning main loop)"); + let running = { + let local_addr = local_addr.clone(); + tokio::task::spawn(async move { + tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown::task", local_addr, "(listening...)"); + let () = Self::run_udp_server_main(direct).await; + }) + }; tx_start .send(Started { address }) .expect("the UDP Tracker service should not be dropped"); - info!(target: "UDP TRACKER", "Started on: udp://{}", address); + tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_addr, "(started)"); let stop = running.abort_handle(); select! { - _ = running => { debug!(target: "UDP TRACKER", "Socket listener stopped on address: udp://{address}"); }, - () = halt => { debug!(target: "UDP TRACKER", "Halt signal spawned task stopped on address: udp://{address}"); } + _ = running => { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_addr, "(stopped)"); }, + _ = halt_task => { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown",local_addr, "(halting)"); } } stop.abort(); - task::yield_now().await; // lets allow the other threads to complete. + tokio::task::yield_now().await; // lets allow the other threads to complete. } - async fn run_udp_server(tracker: Arc, socket: Arc) { - let tracker = tracker.clone(); - let socket = socket.clone(); + async fn run_udp_server_main(mut direct: Receiver) { + let reqs = &mut ActiveRequests::default(); - let reqs = &mut ActiveRequests::new().await; + let addr = direct.socket.local_addr().expect("it should get local address"); + let local_addr = format!("udp://{addr}"); loop { - task::yield_now().await; - for h in reqs.rb.iter_mut() { - if h.is_finished() { - std::mem::swap( - h, - &mut Self::spawn_request_processor( - Self::receive_request(socket.clone()).await, - tracker.clone(), - socket.clone(), - ) - .abort_handle(), - ); - } else { - // the task is still running, lets yield and give it a chance to flush. + if let Some(req) = { + tracing::trace!(target: "UDP TRACKER: Udp::run_udp_server", local_addr, "(wait for request)"); + direct.next().await + } { + tracing::trace!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, "(in)"); + + let req = match req { + Ok(req) => req, + Err(e) => { + if e.kind() == std::io::ErrorKind::Interrupted { + tracing::warn!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, err = %e, "(interrupted)"); + return; + } + tracing::error!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, err = %e, "break: (got error)"); + break; + } + }; + + if req.is_finished() { + continue; + } + + // fill buffer with requests + let Err(req) = reqs.rb.try_push(req) else { + continue; + }; + + let mut finished: u64 = 0; + let mut unfinished_task = None; + // buffer is full.. lets make some space. + for h in reqs.rb.pop_iter() { + // remove some finished tasks + if h.is_finished() { + finished += 1; + continue; + } + + // task is unfinished.. give it another chance. tokio::task::yield_now().await; - h.abort(); + // if now finished, we continue. + if h.is_finished() { + finished += 1; + continue; + } - let server_socket_addr = socket.local_addr().expect("Could not get local_addr for socket."); + tracing::debug!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, removed_count = finished, "(got unfinished task)"); - tracing::span!( - target: "UDP TRACKER", - tracing::Level::WARN, "request-aborted", server_socket_addr = %server_socket_addr); + if finished == 0 { + // we have _no_ finished tasks.. will abort the unfinished task to make space... + h.abort(); - // force-break a single thread, then loop again. - break; - } - } - } - } + tracing::warn!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, "aborting request: (no finished tasks)"); + break; + } - async fn receive_request(socket: Arc) -> Result> { - // Wait for the socket to be readable - socket.readable().await?; + // we have space, return unfinished task for re-entry. + unfinished_task = Some(h); + } - let mut buf = Vec::with_capacity(MAX_PACKET_SIZE); + // re-insert the previous unfinished task. + if let Some(h) = unfinished_task { + reqs.rb.try_push(h).expect("it was previously inserted"); + } - match socket.recv_buf_from(&mut buf).await { - Ok((n, from)) => { - Vec::truncate(&mut buf, n); - trace!("GOT {buf:?}"); - Ok(UdpRequest { payload: buf, from }) + // insert the new task. + if !req.is_finished() { + reqs.rb.try_push(req).expect("it should remove at least one element."); + } + } else { + tokio::task::yield_now().await; + // the request iterator returned `None`. + tracing::error!(target: "UDP TRACKER: Udp::run_udp_server", local_addr, "breaking: (ran dry, should not happen in production!)"); + break; } - - Err(e) => Err(Box::new(e)), } } - fn spawn_request_processor( - result: Result>, - tracker: Arc, - socket: Arc, - ) -> JoinHandle<()> { - tokio::task::spawn(Self::process_request(result, tracker, socket)) - } - - async fn process_request(result: Result>, tracker: Arc, socket: Arc) { - match result { - Ok(udp_request) => { - trace!("Received Request from: {}", udp_request.from); - Self::process_valid_request(tracker.clone(), socket.clone(), udp_request).await; - } - Err(error) => { - debug!("error: {error}"); - } - } + async fn process_request(request: UdpRequest, tracker: Arc, socket: Arc) { + tracing::trace!(target: "UDP TRACKER: Udp::process_request", request = %request.from, "(receiving)"); + Self::process_valid_request(tracker, socket, request).await; } async fn process_valid_request(tracker: Arc, socket: Arc, udp_request: UdpRequest) { - trace!("Making Response to {udp_request:?}"); + tracing::trace!(target: "UDP TRACKER: Udp::process_valid_request", "Making Response to {udp_request:?}"); let from = udp_request.from; - let response = handlers::handle_packet(udp_request, &tracker.clone(), socket.clone()).await; + let response = handlers::handle_packet( + udp_request, + &tracker.clone(), + socket.local_addr().expect("it should get the local address"), + ) + .await; Self::send_response(&socket.clone(), from, response).await; } async fn send_response(socket: &Arc, to: SocketAddr, response: Response) { - trace!("Sending Response: {response:?} to: {to:?}"); + let response_type = match &response { + Response::Connect(_) => "Connect".to_string(), + Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), + Response::AnnounceIpv6(_) => "AnnounceIpv6".to_string(), + Response::Scrape(_) => "Scrape".to_string(), + Response::Error(e) => format!("Error: {e:?}"), + }; + + tracing::debug!(target: "UDP TRACKER: Udp::send_response", target = ?to, response_type, "(sending)"); let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); @@ -380,22 +511,21 @@ impl Udp { let position = cursor.position() as usize; let inner = cursor.get_ref(); - debug!("Sending {} bytes ...", &inner[..position].len()); - debug!("To: {:?}", &to); - debug!("Payload: {:?}", &inner[..position]); + tracing::debug!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), "(sending...)" ); + tracing::trace!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), payload = ?&inner[..position], "(sending...)"); Self::send_packet(socket, &to, &inner[..position]).await; - debug!("{} bytes sent", &inner[..position].len()); + tracing::trace!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), "(sent)"); } - Err(_) => { - error!("could not write response to bytes."); + Err(e) => { + tracing::error!(target: "UDP TRACKER: Udp::send_response", ?to, response_type, err = %e, "(error)"); } } } async fn send_packet(socket: &Arc, remote_addr: &SocketAddr, payload: &[u8]) { - trace!("Sending Packets: {payload:?} to: {remote_addr:?}"); + tracing::trace!(target: "UDP TRACKER: Udp::send_response", to = %remote_addr, ?payload, "(sending)"); // doesn't matter if it reaches or not drop(socket.send_to(payload, remote_addr).await); @@ -413,55 +543,46 @@ impl Udp { #[cfg(test)] mod tests { - use std::sync::Arc; - use std::time::Duration; + use std::{sync::Arc, time::Duration}; - use ringbuf::traits::{Consumer, Observer, RingBuffer}; use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; - use super::ActiveRequests; - use crate::bootstrap::app::initialize_with_configuration; - use crate::servers::registar::Registar; - use crate::servers::udp::server::{Launcher, UdpServer}; + use crate::{ + bootstrap::app::initialize_with_configuration, + servers::{ + registar::Registar, + udp::server::{Launcher, UdpServer}, + }, + }; #[tokio::test] - async fn it_should_return_to_the_start_of_the_ring_buffer() { - let mut a_req = ActiveRequests::new().await; - - tokio::time::sleep(Duration::from_millis(10)).await; + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_mode_public()); + let tracker = initialize_with_configuration(&cfg); + let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); + let config = &udp_trackers[0]; + let bind_to = config.bind_address; + let register = &Registar::default(); - let mut count: usize = 0; - let cap: usize = a_req.rb.capacity().into(); + let stopped = UdpServer::new(Launcher::new(bind_to)); - // Add a single pending task to check that the ring-buffer is looping correctly. - a_req - .rb - .push_overwrite(tokio::task::spawn(std::future::pending::<()>()).abort_handle()); + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); - count += 1; + let stopped = started.stop().await.expect("it should stop the server"); - for _ in 0..2 { - for h in a_req.rb.iter() { - let first = count % cap; - println!("{count},{first},{}", h.is_finished()); - - if first == 0 { - assert!(!h.is_finished()); - } else { - assert!(h.is_finished()); - } + tokio::time::sleep(Duration::from_secs(1)).await; - count += 1; - } - } + assert_eq!(stopped.state.launcher.bind_to, bind_to); } #[tokio::test] - async fn it_should_be_able_to_start_and_stop() { + async fn it_should_be_able_to_start_and_stop_with_wait() { let cfg = Arc::new(ephemeral_mode_public()); let tracker = initialize_with_configuration(&cfg); - let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); - let config = &udp_trackers[0]; + let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; let register = &Registar::default(); @@ -472,6 +593,8 @@ mod tests { .await .expect("it should start the server"); + tokio::time::sleep(Duration::from_secs(1)).await; + let stopped = started.stop().await.expect("it should stop the server"); tokio::time::sleep(Duration::from_secs(1)).await; @@ -479,3 +602,68 @@ mod tests { assert_eq!(stopped.state.launcher.bind_to, bind_to); } } + +/// Todo: submit test to tokio documentation. +#[cfg(test)] +mod test_tokio { + use std::sync::Arc; + use std::time::Duration; + + use tokio::sync::Barrier; + use tokio::task::JoinSet; + + #[tokio::test] + async fn test_barrier_with_aborted_tasks() { + // Create a barrier that requires 10 tasks to proceed. + let barrier = Arc::new(Barrier::new(10)); + let mut tasks = JoinSet::default(); + let mut handles = Vec::default(); + + // Set Barrier to 9/10. + for _ in 0..9 { + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + } + + // Abort two tasks: Barrier: 7/10. + for _ in 0..2 { + if let Some(handle) = handles.pop() { + handle.abort(); + } + } + + // Spawn a single task: Barrier 8/10. + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + + // give a chance fro the barrier to release. + tokio::time::sleep(Duration::from_millis(50)).await; + + // assert that the barrier isn't removed, i.e. 8, not 10. + for h in &handles { + assert!(!h.is_finished()); + } + + // Spawn two more tasks to trigger the barrier release: Barrier 10/10. + for _ in 0..2 { + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + } + + // give a chance fro the barrier to release. + tokio::time::sleep(Duration::from_millis(50)).await; + + // assert that the barrier has been triggered + for h in &handles { + assert!(h.is_finished()); + } + + tasks.shutdown().await; + } +} diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 45b51ad35..900543462 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -15,7 +15,7 @@ use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; /// Default timeout for sending and receiving packets. And waiting for sockets /// to be readable and writable. -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); +pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); #[allow(clippy::module_name_repetitions)] #[derive(Debug)] @@ -37,7 +37,16 @@ impl UdpClient { .parse::() .context(format!("{local_address} is not a valid socket address"))?; - let socket = UdpSocket::bind(socket_addr).await?; + let socket = match time::timeout(DEFAULT_TIMEOUT, UdpSocket::bind(socket_addr)).await { + Ok(bind_result) => match bind_result { + Ok(socket) => { + debug!("Bound to socket: {socket_addr}"); + Ok(socket) + } + Err(e) => Err(anyhow!("Failed to bind to socket: {socket_addr}, error: {e:?}")), + }, + Err(e) => Err(anyhow!("Timeout waiting to bind to socket: {socket_addr}, error: {e:?}")), + }?; let udp_client = Self { socket: Arc::new(socket), @@ -54,12 +63,15 @@ impl UdpClient { .parse::() .context(format!("{remote_address} is not a valid socket address"))?; - match self.socket.connect(socket_addr).await { - Ok(()) => { - debug!("Connected successfully"); - Ok(()) - } - Err(e) => Err(anyhow!("Failed to connect: {e:?}")), + match time::timeout(self.timeout, self.socket.connect(socket_addr)).await { + Ok(connect_result) => match connect_result { + Ok(()) => { + debug!("Connected to socket {socket_addr}"); + Ok(()) + } + Err(e) => Err(anyhow!("Failed to connect to socket {socket_addr}: {e:?}")), + }, + Err(e) => Err(anyhow!("Timeout waiting to connect to socket {socket_addr}, error: {e:?}")), } } @@ -100,7 +112,9 @@ impl UdpClient { /// /// # Panics /// - pub async fn receive(&self, bytes: &mut [u8]) -> Result { + pub async fn receive(&self) -> Result> { + let mut response_buffer = [0u8; MAX_PACKET_SIZE]; + debug!(target: "UDP client", "receiving ..."); match time::timeout(self.timeout, self.socket.readable()).await { @@ -113,21 +127,20 @@ impl UdpClient { Err(e) => return Err(anyhow!("Timeout waiting for the socket to become readable: {e:?}")), }; - let size_result = match time::timeout(self.timeout, self.socket.recv(bytes)).await { + let size = match time::timeout(self.timeout, self.socket.recv(&mut response_buffer)).await { Ok(recv_result) => match recv_result { Ok(size) => Ok(size), Err(e) => Err(anyhow!("IO error during send: {e:?}")), }, Err(e) => Err(anyhow!("Receive operation timed out: {e:?}")), - }; + }?; - if size_result.is_ok() { - let size = size_result.as_ref().unwrap(); - debug!(target: "UDP client", "{size} bytes received {bytes:?}"); - size_result - } else { - size_result - } + let mut res: Vec = response_buffer.to_vec(); + Vec::truncate(&mut res, size); + + debug!(target: "UDP client", "{size} bytes received {res:?}"); + + Ok(res) } } @@ -181,13 +194,11 @@ impl UdpTrackerClient { /// /// Will return error if can't create response from the received payload (bytes buffer). pub async fn receive(&self) -> Result { - let mut response_buffer = [0u8; MAX_PACKET_SIZE]; - - let payload_size = self.udp_client.receive(&mut response_buffer).await?; + let payload = self.udp_client.receive().await?; - debug!(target: "UDP tracker client", "received {payload_size} bytes. Response {response_buffer:?}"); + debug!(target: "UDP tracker client", "received {} bytes. Response {payload:?}", payload.len()); - let response = Response::parse_bytes(&response_buffer[..payload_size], true)?; + let response = Response::parse_bytes(&payload, true)?; Ok(response) } diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 7abd6092c..b23b20907 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -17,10 +17,6 @@ fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { [0; MAX_PACKET_SIZE] } -fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] -} - async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { let connect_request = ConnectRequest { transaction_id }; @@ -54,13 +50,12 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req Err(err) => panic!("{err}"), }; - let mut buffer = empty_buffer(); - match client.receive(&mut buffer).await { - Ok(_) => (), + let response = match client.receive().await { + Ok(response) => response, Err(err) => panic!("{err}"), }; - let response = Response::parse_bytes(&buffer, true).unwrap(); + let response = Response::parse_bytes(&response, true).unwrap(); assert!(is_error_response(&response, "bad request")); @@ -111,30 +106,20 @@ mod receiving_an_announce_request { AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, TransactionId, }; - use torrust_tracker::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::{new_udp_tracker_client_connected, UdpTrackerClient}; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_ipv4_announce_response; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::Started; - #[tokio::test] - async fn should_return_an_announce_response() { - let env = Started::new(&configuration::ephemeral().into()).await; - - let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { - Ok(udp_tracker_client) => udp_tracker_client, - Err(err) => panic!("{err}"), - }; - - let connection_id = send_connection_request(TransactionId::new(123), &client).await; - + pub async fn send_and_get_announce(tx_id: TransactionId, c_id: ConnectionId, client: &UdpTrackerClient) { // Send announce request let announce_request = AnnounceRequest { - connection_id: ConnectionId(connection_id.0), + connection_id: ConnectionId(c_id.0), action_placeholder: AnnounceActionPlaceholder::default(), - transaction_id: TransactionId::new(123i32), + transaction_id: tx_id, info_hash: InfoHash([0u8; 20]), peer_id: PeerId([255u8; 20]), bytes_downloaded: NumberOfBytes(0i64.into()), @@ -160,6 +145,43 @@ mod receiving_an_announce_request { println!("test response {response:?}"); assert!(is_ipv4_announce_response(&response)); + } + + #[tokio::test] + async fn should_return_an_announce_response() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; + + let tx_id = TransactionId::new(123); + + let c_id = send_connection_request(tx_id, &client).await; + + send_and_get_announce(tx_id, c_id, &client).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_many_announce_response() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; + + let tx_id = TransactionId::new(123); + + let c_id = send_connection_request(tx_id, &client).await; + + for x in 0..1000 { + tracing::info!("req no: {x}"); + send_and_get_announce(tx_id, c_id, &client).await; + } env.stop().await; } diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 1ba038c70..7b21defce 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -5,6 +5,7 @@ use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::core::Tracker; use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::udp::server::{Launcher, Running, Stopped, UdpServer}; +use torrust_tracker::shared::bit_torrent::tracker::udp::client::DEFAULT_TIMEOUT; use torrust_tracker_configuration::{Configuration, UdpTracker}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; @@ -58,16 +59,22 @@ impl Environment { impl Environment { pub async fn new(configuration: &Arc) -> Self { - Environment::::new(configuration).start().await + tokio::time::timeout(DEFAULT_TIMEOUT, Environment::::new(configuration).start()) + .await + .expect("it should create an environment within the timeout") } #[allow(dead_code)] pub async fn stop(self) -> Environment { + let stopped = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) + .await + .expect("it should stop the environment within the timeout"); + Environment { config: self.config, tracker: self.tracker, registar: Registar::default(), - server: self.server.stop().await.expect("it stop the udp tracker service"), + server: stopped.expect("it stop the udp tracker service"), } } From 9b3b75bd5fdf1dfc812d656294dbeac65b2643ca Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 07:59:21 +0100 Subject: [PATCH 0893/1003] fix: log message --- src/servers/udp/server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 64f2fa2ab..af52e2de3 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -331,7 +331,7 @@ impl Udp { ) { let halt_task = tokio::task::spawn(shutdown_signal_with_message( rx_halt, - format!("Halting Http Service Bound to Socket: {bind_to}"), + format!("Halting UDP Service Bound to Socket: {bind_to}"), )); let socket = tokio::time::timeout(Duration::from_millis(5000), Socket::new(bind_to)) From 0e3678d2d6b4f4c0f0f6be7218b01e2b9e6e3fe3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 08:02:05 +0100 Subject: [PATCH 0894/1003] refactor: rename Socket to BoundSocket and fix format errors" --- src/lib.rs | 2 +- src/servers/udp/server.rs | 23 ++++++++++------------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index bb6826dd1..cf2834418 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -494,7 +494,7 @@ pub mod bootstrap; pub mod console; pub mod core; pub mod servers; -pub mod shared; +pub mod shared; #[macro_use] extern crate lazy_static; diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index af52e2de3..3fb494238 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -235,11 +235,11 @@ impl Drop for ActiveRequests { } /// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. -struct Socket { +struct BoundSocket { socket: Arc, } -impl Socket { +impl BoundSocket { async fn new(addr: SocketAddr) -> Result> { let socket = tokio::net::UdpSocket::bind(addr).await; @@ -257,7 +257,7 @@ impl Socket { } } -impl Deref for Socket { +impl Deref for BoundSocket { type Target = tokio::net::UdpSocket; fn deref(&self) -> &Self::Target { @@ -265,7 +265,7 @@ impl Deref for Socket { } } -impl Debug for Socket { +impl Debug for BoundSocket { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let local_addr = match self.socket.local_addr() { Ok(socket) => format!("Receiving From: {socket}"), @@ -334,7 +334,7 @@ impl Udp { format!("Halting UDP Service Bound to Socket: {bind_to}"), )); - let socket = tokio::time::timeout(Duration::from_millis(5000), Socket::new(bind_to)) + let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) .await .expect("it should bind to the socket within five seconds"); @@ -543,17 +543,14 @@ impl Udp { #[cfg(test)] mod tests { - use std::{sync::Arc, time::Duration}; + use std::sync::Arc; + use std::time::Duration; use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; - use crate::{ - bootstrap::app::initialize_with_configuration, - servers::{ - registar::Registar, - udp::server::{Launcher, UdpServer}, - }, - }; + use crate::bootstrap::app::initialize_with_configuration; + use crate::servers::registar::Registar; + use crate::servers::udp::server::{Launcher, UdpServer}; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { From 7ff0cd249fe62adf4c8ba9b3c4815fb68d747b69 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 08:26:25 +0100 Subject: [PATCH 0895/1003] refactor: rename var --- src/servers/udp/server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 3fb494238..c9f7e458f 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -357,7 +357,7 @@ impl Udp { let socket = socket.socket; - let direct = Receiver { + let receiver = Receiver { socket, tracker, data: RefCell::new([0; MAX_PACKET_SIZE]), @@ -368,7 +368,7 @@ impl Udp { let local_addr = local_addr.clone(); tokio::task::spawn(async move { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown::task", local_addr, "(listening...)"); - let () = Self::run_udp_server_main(direct).await; + let () = Self::run_udp_server_main(receiver).await; }) }; From 16ae4fd14bdb03c8704c2af9ecb20873e6c396d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 08:51:48 +0100 Subject: [PATCH 0896/1003] refactor: rename vars and extract constructor --- src/servers/udp/server.rs | 69 ++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 33 deletions(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index c9f7e458f..229729038 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -33,7 +33,6 @@ use derive_more::Constructor; use futures::{Stream, StreamExt}; use ringbuf::traits::{Consumer, Observer, Producer}; use ringbuf::StaticRb; -use tokio::net::UdpSocket; use tokio::select; use tokio::sync::oneshot; use tokio::task::{AbortHandle, JoinHandle}; @@ -255,6 +254,10 @@ impl BoundSocket { socket: Arc::new(socket), }) } + + fn local_addr(&self) -> SocketAddr { + self.socket.local_addr().expect("it should get local address") + } } impl Deref for BoundSocket { @@ -277,11 +280,21 @@ impl Debug for BoundSocket { } struct Receiver { - socket: Arc, + bound_socket: Arc, tracker: Arc, data: RefCell<[u8; MAX_PACKET_SIZE]>, } +impl Receiver { + pub fn new(bound_socket: Arc, tracker: Arc) -> Self { + Receiver { + bound_socket, + tracker, + data: RefCell::new([0; MAX_PACKET_SIZE]), + } + } +} + impl Stream for Receiver { type Item = std::io::Result; @@ -289,7 +302,7 @@ impl Stream for Receiver { let mut buf = *self.data.borrow_mut(); let mut buf = tokio::io::ReadBuf::new(&mut buf); - let Poll::Ready(ready) = self.socket.poll_recv_from(cx, &mut buf) else { + let Poll::Ready(ready) = self.bound_socket.poll_recv_from(cx, &mut buf) else { return Poll::Pending; }; @@ -301,7 +314,7 @@ impl Stream for Receiver { Some(Ok(tokio::task::spawn(Udp::process_request( request, self.tracker.clone(), - self.socket.clone(), + self.bound_socket.clone(), )) .abort_handle())) } @@ -338,7 +351,7 @@ impl Udp { .await .expect("it should bind to the socket within five seconds"); - let socket = match socket { + let bound_socket = match socket { Ok(socket) => socket, Err(e) => { tracing::error!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", addr = %bind_to, err = %e, "panic! (error when building socket)" ); @@ -346,26 +359,21 @@ impl Udp { } }; - let address = socket.local_addr().expect("it should get the locally bound address"); - let local_addr = format!("udp://{address}"); + let address = bound_socket.local_addr(); + let local_udp_url = format!("udp://{address}"); // note: this log message is parsed by our container. i.e: // // `[UDP TRACKER][INFO] Starting on: udp://` // - tracing::info!(target: "UDP TRACKER", "Starting on: {local_addr}"); + tracing::info!(target: "UDP TRACKER", "Starting on: {local_udp_url}"); - let socket = socket.socket; + let receiver = Receiver::new(bound_socket.into(), tracker); - let receiver = Receiver { - socket, - tracker, - data: RefCell::new([0; MAX_PACKET_SIZE]), - }; + tracing::trace!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_udp_url, "(spawning main loop)"); - tracing::trace!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_addr, "(spawning main loop)"); let running = { - let local_addr = local_addr.clone(); + let local_addr = local_udp_url.clone(); tokio::task::spawn(async move { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown::task", local_addr, "(listening...)"); let () = Self::run_udp_server_main(receiver).await; @@ -376,29 +384,29 @@ impl Udp { .send(Started { address }) .expect("the UDP Tracker service should not be dropped"); - tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_addr, "(started)"); + tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_udp_url, "(started)"); let stop = running.abort_handle(); select! { - _ = running => { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_addr, "(stopped)"); }, - _ = halt_task => { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown",local_addr, "(halting)"); } + _ = running => { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_udp_url, "(stopped)"); }, + _ = halt_task => { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown",local_udp_url, "(halting)"); } } stop.abort(); tokio::task::yield_now().await; // lets allow the other threads to complete. } - async fn run_udp_server_main(mut direct: Receiver) { + async fn run_udp_server_main(mut receiver: Receiver) { let reqs = &mut ActiveRequests::default(); - let addr = direct.socket.local_addr().expect("it should get local address"); + let addr = receiver.bound_socket.local_addr(); let local_addr = format!("udp://{addr}"); loop { if let Some(req) = { tracing::trace!(target: "UDP TRACKER: Udp::run_udp_server", local_addr, "(wait for request)"); - direct.next().await + receiver.next().await } { tracing::trace!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, "(in)"); @@ -474,24 +482,19 @@ impl Udp { } } - async fn process_request(request: UdpRequest, tracker: Arc, socket: Arc) { + async fn process_request(request: UdpRequest, tracker: Arc, socket: Arc) { tracing::trace!(target: "UDP TRACKER: Udp::process_request", request = %request.from, "(receiving)"); Self::process_valid_request(tracker, socket, request).await; } - async fn process_valid_request(tracker: Arc, socket: Arc, udp_request: UdpRequest) { + async fn process_valid_request(tracker: Arc, socket: Arc, udp_request: UdpRequest) { tracing::trace!(target: "UDP TRACKER: Udp::process_valid_request", "Making Response to {udp_request:?}"); let from = udp_request.from; - let response = handlers::handle_packet( - udp_request, - &tracker.clone(), - socket.local_addr().expect("it should get the local address"), - ) - .await; + let response = handlers::handle_packet(udp_request, &tracker.clone(), socket.local_addr()).await; Self::send_response(&socket.clone(), from, response).await; } - async fn send_response(socket: &Arc, to: SocketAddr, response: Response) { + async fn send_response(bound_socket: &Arc, to: SocketAddr, response: Response) { let response_type = match &response { Response::Connect(_) => "Connect".to_string(), Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), @@ -514,7 +517,7 @@ impl Udp { tracing::debug!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), "(sending...)" ); tracing::trace!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), payload = ?&inner[..position], "(sending...)"); - Self::send_packet(socket, &to, &inner[..position]).await; + Self::send_packet(bound_socket, &to, &inner[..position]).await; tracing::trace!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), "(sent)"); } @@ -524,7 +527,7 @@ impl Udp { } } - async fn send_packet(socket: &Arc, remote_addr: &SocketAddr, payload: &[u8]) { + async fn send_packet(socket: &Arc, remote_addr: &SocketAddr, payload: &[u8]) { tracing::trace!(target: "UDP TRACKER: Udp::send_response", to = %remote_addr, ?payload, "(sending)"); // doesn't matter if it reaches or not From 0388e1d1439bbc1d2ef7b59bf225a2d152358a2b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 13:20:17 +0100 Subject: [PATCH 0897/1003] refactor: extract consts for logging targets --- src/bootstrap/jobs/health_check_api.rs | 8 +-- src/bootstrap/jobs/udp_tracker.rs | 7 +-- src/console/ci/e2e/logs_parser.rs | 7 +-- src/servers/apis/mod.rs | 6 ++- src/servers/apis/routes.rs | 5 +- src/servers/apis/server.rs | 9 ++-- src/servers/health_check_api/mod.rs | 2 + src/servers/health_check_api/server.rs | 7 +-- src/servers/http/mod.rs | 2 + src/servers/http/server.rs | 5 +- src/servers/http/v1/routes.rs | 5 +- src/servers/udp/logging.rs | 13 ++--- src/servers/udp/mod.rs | 2 + src/servers/udp/server.rs | 54 ++++++++++--------- src/shared/bit_torrent/tracker/udp/client.rs | 12 +++-- tests/servers/health_check_api/environment.rs | 10 ++-- 16 files changed, 87 insertions(+), 67 deletions(-) diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index c22a4cf95..e79a6da77 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -20,7 +20,7 @@ use torrust_tracker_configuration::HealthCheckApi; use tracing::info; use super::Started; -use crate::servers::health_check_api::server; +use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; use crate::servers::registar::ServiceRegistry; use crate::servers::signals::Halted; @@ -44,18 +44,18 @@ pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> Jo // Run the API server let join_handle = tokio::spawn(async move { - info!(target: "HEALTH CHECK API", "Starting on: {protocol}://{}", bind_addr); + info!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting on: {protocol}://{}", bind_addr); let handle = server::start(bind_addr, tx_start, rx_halt, register); if let Ok(()) = handle.await { - info!(target: "HEALTH CHECK API", "Stopped server running on: {protocol}://{}", bind_addr); + info!(target: HEALTH_CHECK_API_LOG_TARGET, "Stopped server running on: {protocol}://{}", bind_addr); } }); // Wait until the server sends the started message match rx_start.await { - Ok(msg) => info!(target: "HEALTH CHECK API", "Started on: {protocol}://{}", msg.address), + Ok(msg) => info!(target: HEALTH_CHECK_API_LOG_TARGET, "Started on: {protocol}://{}", msg.address), Err(e) => panic!("the Health Check API server was dropped: {e}"), } diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 2c09e6de2..ba39df2fe 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -15,6 +15,7 @@ use tracing::debug; use crate::core; use crate::servers::registar::ServiceRegistrationForm; use crate::servers::udp::server::{Launcher, UdpServer}; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It starts a new UDP server with the provided configuration. /// @@ -35,8 +36,8 @@ pub async fn start_job(config: &UdpTracker, tracker: Arc, form: S .expect("it should be able to start the udp tracker"); tokio::spawn(async move { - debug!(target: "UDP TRACKER", "Wait for launcher (UDP service) to finish ..."); - debug!(target: "UDP TRACKER", "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); + debug!(target: UDP_TRACKER_LOG_TARGET, "Wait for launcher (UDP service) to finish ..."); + debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); assert!( !server.state.halt_task.is_closed(), @@ -49,6 +50,6 @@ pub async fn start_job(config: &UdpTracker, tracker: Arc, form: S .await .expect("it should be able to join to the udp tracker task"); - debug!(target: "UDP TRACKER", "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); + debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); }) } diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 4886786de..8bf7974c1 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -2,10 +2,11 @@ use regex::Regex; use serde::{Deserialize, Serialize}; +use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + const INFO_LOG_LEVEL: &str = "INFO"; -const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; -const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; -const HEALTH_CHECK_API_LOG_TARGET: &str = "HEALTH CHECK API"; #[derive(Serialize, Deserialize, Debug, Default)] pub struct RunningServices { diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index 6dae66c2d..b44ccab9f 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -157,6 +157,10 @@ pub mod routes; pub mod server; pub mod v1; +use serde::{Deserialize, Serialize}; + +pub const API_LOG_TARGET: &str = "API"; + /// The info hash URL path parameter. /// /// Some API endpoints require an info hash as a path parameter. @@ -169,8 +173,6 @@ pub mod v1; #[derive(Deserialize)] pub struct InfoHashParam(pub String); -use serde::{Deserialize, Serialize}; - /// The version of the HTTP Api. #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 087bcfa4a..2001afc2f 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -27,6 +27,7 @@ use super::v1; use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; use crate::core::Tracker; +use crate::servers::apis::API_LOG_TARGET; const TIMEOUT: Duration = Duration::from_secs(5); @@ -60,7 +61,7 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router .unwrap_or_default(); tracing::span!( - target: "API", + target: API_LOG_TARGET, tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); }) .on_response(|response: &Response, latency: Duration, _span: &Span| { @@ -73,7 +74,7 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router let latency_ms = latency.as_millis(); tracing::span!( - target: "API", + target: API_LOG_TARGET, tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); }), ) diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 246660ab1..d47e5d542 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -37,6 +37,7 @@ use tracing::{debug, error, info}; use super::routes::router; use crate::bootstrap::jobs::Started; use crate::core::Tracker; +use crate::servers::apis::API_LOG_TARGET; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; @@ -121,11 +122,11 @@ impl ApiServer { let launcher = self.state.launcher; let task = tokio::spawn(async move { - debug!(target: "API", "Starting with launcher in spawned task ..."); + debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); let _task = launcher.start(tracker, access_tokens, tx_start, rx_halt).await; - debug!(target: "API", "Started with launcher in spawned task"); + debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); launcher }); @@ -231,7 +232,7 @@ impl Launcher { let tls = self.tls.clone(); let protocol = if tls.is_some() { "https" } else { "http" }; - info!(target: "API", "Starting on {protocol}://{}", address); + info!(target: API_LOG_TARGET, "Starting on {protocol}://{}", address); let running = Box::pin(async { match tls { @@ -250,7 +251,7 @@ impl Launcher { } }); - info!(target: "API", "Started on {protocol}://{}", address); + info!(target: API_LOG_TARGET, "Started on {protocol}://{}", address); tx_start .send(Started { address }) diff --git a/src/servers/health_check_api/mod.rs b/src/servers/health_check_api/mod.rs index ec608387d..24c5232c8 100644 --- a/src/servers/health_check_api/mod.rs +++ b/src/servers/health_check_api/mod.rs @@ -2,3 +2,5 @@ pub mod handlers; pub mod resources; pub mod responses; pub mod server; + +pub const HEALTH_CHECK_API_LOG_TARGET: &str = "HEALTH CHECK API"; diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index f03753573..89fbafe45 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -22,6 +22,7 @@ use tracing::{debug, Level, Span}; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; +use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; use crate::servers::registar::ServiceRegistry; use crate::servers::signals::{graceful_shutdown, Halted}; @@ -56,7 +57,7 @@ pub fn start( .unwrap_or_default(); tracing::span!( - target: "HEALTH CHECK API", + target: HEALTH_CHECK_API_LOG_TARGET, tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); }) .on_response(|response: &Response, latency: Duration, _span: &Span| { @@ -69,7 +70,7 @@ pub fn start( let latency_ms = latency.as_millis(); tracing::span!( - target: "HEALTH CHECK API", + target: HEALTH_CHECK_API_LOG_TARGET, tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); }), ) @@ -80,7 +81,7 @@ pub fn start( let handle = Handle::new(); - debug!(target: "HEALTH CHECK API", "Starting service with graceful shutdown in a spawned task ..."); + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting service with graceful shutdown in a spawned task ..."); tokio::task::spawn(graceful_shutdown( handle.clone(), diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index e50e3c351..4ef5ca7ea 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -309,6 +309,8 @@ pub mod percent_encoding; pub mod server; pub mod v1; +pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; + /// The version of the HTTP tracker. #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 5798f7c10..9199573b0 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -13,6 +13,7 @@ use super::v1::routes::router; use crate::bootstrap::jobs::Started; use crate::core::Tracker; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; @@ -55,7 +56,7 @@ impl Launcher { let tls = self.tls.clone(); let protocol = if tls.is_some() { "https" } else { "http" }; - info!(target: "HTTP TRACKER", "Starting on: {protocol}://{}", address); + info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); let app = router(tracker, address); @@ -76,7 +77,7 @@ impl Launcher { } }); - info!(target: "HTTP TRACKER", "Started on: {protocol}://{}", address); + info!(target: HTTP_TRACKER_LOG_TARGET, "Started on: {protocol}://{}", address); tx_start .send(Started { address }) diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 14641dc1d..b2f37880c 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -20,6 +20,7 @@ use tracing::{Level, Span}; use super::handlers::{announce, health_check, scrape}; use crate::core::Tracker; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; const TIMEOUT: Duration = Duration::from_secs(5); @@ -56,7 +57,7 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { .unwrap_or_default(); tracing::span!( - target:"HTTP TRACKER", + target: HTTP_TRACKER_LOG_TARGET, tracing::Level::INFO, "request", server_socket_addr= %server_socket_addr, method = %method, uri = %uri, request_id = %request_id); }) .on_response(move |response: &Response, latency: Duration, _span: &Span| { @@ -69,7 +70,7 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { let latency_ms = latency.as_millis(); tracing::span!( - target: "HTTP TRACKER", + target: HTTP_TRACKER_LOG_TARGET, tracing::Level::INFO, "response", server_socket_addr= %server_socket_addr, latency = %latency_ms, status = %status_code, request_id = %request_id); }), ) diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs index 9bbb48f6a..3891278d7 100644 --- a/src/servers/udp/logging.rs +++ b/src/servers/udp/logging.rs @@ -7,6 +7,7 @@ use aquatic_udp_protocol::{Request, Response, TransactionId}; use torrust_tracker_primitives::info_hash::InfoHash; use super::handlers::RequestId; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr: &SocketAddr) { let action = map_action_name(request); @@ -17,7 +18,7 @@ pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr let transaction_id_str = transaction_id.0.to_string(); tracing::span!( - target: "UDP TRACKER", + target: UDP_TRACKER_LOG_TARGET, tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id); } Request::Announce(announce_request) => { @@ -27,7 +28,7 @@ pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr let info_hash_str = InfoHash::from_bytes(&announce_request.info_hash.0).to_hex_string(); tracing::span!( - target: "UDP TRACKER", + target: UDP_TRACKER_LOG_TARGET, tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id, connection_id = %connection_id_str, info_hash = %info_hash_str); } Request::Scrape(scrape_request) => { @@ -36,7 +37,7 @@ pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr let connection_id_str = scrape_request.connection_id.0.to_string(); tracing::span!( - target: "UDP TRACKER", + target: UDP_TRACKER_LOG_TARGET, tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, @@ -64,7 +65,7 @@ pub fn log_response( latency: Duration, ) { tracing::span!( - target: "UDP TRACKER", + target: UDP_TRACKER_LOG_TARGET, tracing::Level::INFO, "response", server_socket_addr = %server_socket_addr, @@ -75,12 +76,12 @@ pub fn log_response( pub fn log_bad_request(request_id: &RequestId) { tracing::span!( - target: "UDP TRACKER", + target: UDP_TRACKER_LOG_TARGET, tracing::Level::INFO, "bad request", request_id = %request_id); } pub fn log_error_response(request_id: &RequestId) { tracing::span!( - target: "UDP TRACKER", + target: UDP_TRACKER_LOG_TARGET, tracing::Level::INFO, "response", request_id = %request_id); } diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 3062a4393..5c5460397 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -649,6 +649,8 @@ pub mod peer_builder; pub mod request; pub mod server; +pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; + /// Number of bytes. pub type Bytes = u64; /// The port the peer is listening on. diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 229729038..e60e49ace 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -42,7 +42,7 @@ use crate::bootstrap::jobs::Started; use crate::core::Tracker; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{shutdown_signal_with_message, Halted}; -use crate::servers::udp::handlers; +use crate::servers::udp::{handlers, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::tracker::udp::client::check; use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; @@ -150,7 +150,7 @@ impl UdpServer { }, }; - tracing::trace!(target: "UDP TRACKER: UdpServer::start", local_addr, "(running)"); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpServer::start (running)"); Ok(running_udp_server) } @@ -248,7 +248,7 @@ impl BoundSocket { }; let local_addr = format!("udp://{addr}"); - tracing::debug!(target: "UDP TRACKER: UdpSocket::new", local_addr, "(bound)"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpSocket::new (bound)"); Ok(Self { socket: Arc::new(socket), @@ -347,6 +347,8 @@ impl Udp { format!("Halting UDP Service Bound to Socket: {bind_to}"), )); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); + let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) .await .expect("it should bind to the socket within five seconds"); @@ -354,7 +356,7 @@ impl Udp { let bound_socket = match socket { Ok(socket) => socket, Err(e) => { - tracing::error!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", addr = %bind_to, err = %e, "panic! (error when building socket)" ); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, addr = %bind_to, err = %e, "Udp::run_with_graceful_shutdown panic! (error when building socket)" ); panic!("could not bind to socket!"); } }; @@ -364,18 +366,18 @@ impl Udp { // note: this log message is parsed by our container. i.e: // - // `[UDP TRACKER][INFO] Starting on: udp://` + // `INFO UDP TRACKER: Started on: udp://0.0.0.0:6969` // - tracing::info!(target: "UDP TRACKER", "Starting on: {local_udp_url}"); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Started on: {local_udp_url}"); let receiver = Receiver::new(bound_socket.into(), tracker); - tracing::trace!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_udp_url, "(spawning main loop)"); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (spawning main loop)"); let running = { let local_addr = local_udp_url.clone(); tokio::task::spawn(async move { - tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown::task", local_addr, "(listening...)"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); let () = Self::run_udp_server_main(receiver).await; }) }; @@ -384,13 +386,13 @@ impl Udp { .send(Started { address }) .expect("the UDP Tracker service should not be dropped"); - tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_udp_url, "(started)"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (started)"); let stop = running.abort_handle(); select! { - _ = running => { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown", local_udp_url, "(stopped)"); }, - _ = halt_task => { tracing::debug!(target: "UDP TRACKER: Udp::run_with_graceful_shutdown",local_udp_url, "(halting)"); } + _ = running => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (stopped)"); }, + _ = halt_task => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (halting)"); } } stop.abort(); @@ -405,19 +407,19 @@ impl Udp { loop { if let Some(req) = { - tracing::trace!(target: "UDP TRACKER: Udp::run_udp_server", local_addr, "(wait for request)"); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); receiver.next().await } { - tracing::trace!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, "(in)"); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop (in)"); let req = match req { Ok(req) => req, Err(e) => { if e.kind() == std::io::ErrorKind::Interrupted { - tracing::warn!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, err = %e, "(interrupted)"); + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop (interrupted)"); return; } - tracing::error!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, err = %e, "break: (got error)"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop break: (got error)"); break; } }; @@ -450,13 +452,13 @@ impl Udp { continue; } - tracing::debug!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, removed_count = finished, "(got unfinished task)"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, removed_count = finished, "Udp::run_udp_server::loop (got unfinished task)"); if finished == 0 { // we have _no_ finished tasks.. will abort the unfinished task to make space... h.abort(); - tracing::warn!(target: "UDP TRACKER: Udp::run_udp_server::loop", local_addr, "aborting request: (no finished tasks)"); + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop aborting request: (no finished tasks)"); break; } @@ -476,19 +478,19 @@ impl Udp { } else { tokio::task::yield_now().await; // the request iterator returned `None`. - tracing::error!(target: "UDP TRACKER: Udp::run_udp_server", local_addr, "breaking: (ran dry, should not happen in production!)"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server breaking: (ran dry, should not happen in production!)"); break; } } } async fn process_request(request: UdpRequest, tracker: Arc, socket: Arc) { - tracing::trace!(target: "UDP TRACKER: Udp::process_request", request = %request.from, "(receiving)"); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, request = %request.from, "Udp::process_request (receiving)"); Self::process_valid_request(tracker, socket, request).await; } async fn process_valid_request(tracker: Arc, socket: Arc, udp_request: UdpRequest) { - tracing::trace!(target: "UDP TRACKER: Udp::process_valid_request", "Making Response to {udp_request:?}"); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, "Udp::process_valid_request. Making Response to {udp_request:?}"); let from = udp_request.from; let response = handlers::handle_packet(udp_request, &tracker.clone(), socket.local_addr()).await; Self::send_response(&socket.clone(), from, response).await; @@ -503,7 +505,7 @@ impl Udp { Response::Error(e) => format!("Error: {e:?}"), }; - tracing::debug!(target: "UDP TRACKER: Udp::send_response", target = ?to, response_type, "(sending)"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, target = ?to, response_type, "Udp::send_response (sending)"); let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); @@ -514,21 +516,21 @@ impl Udp { let position = cursor.position() as usize; let inner = cursor.get_ref(); - tracing::debug!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), "(sending...)" ); - tracing::trace!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), payload = ?&inner[..position], "(sending...)"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), "Udp::send_response (sending...)" ); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), payload = ?&inner[..position], "Udp::send_response (sending...)"); Self::send_packet(bound_socket, &to, &inner[..position]).await; - tracing::trace!(target: "UDP TRACKER: Udp::send_response", ?to, bytes_count = &inner[..position].len(), "(sent)"); + tracing::trace!(target:UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), "Udp::send_response (sent)"); } Err(e) => { - tracing::error!(target: "UDP TRACKER: Udp::send_response", ?to, response_type, err = %e, "(error)"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, ?to, response_type, err = %e, "Udp::send_response (error)"); } } } async fn send_packet(socket: &Arc, remote_addr: &SocketAddr, payload: &[u8]) { - tracing::trace!(target: "UDP TRACKER: Udp::send_response", to = %remote_addr, ?payload, "(sending)"); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, to = %remote_addr, ?payload, "Udp::send_response (sending)"); // doesn't matter if it reaches or not drop(socket.send_to(payload, remote_addr).await); diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index 900543462..dce596e08 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -13,6 +13,8 @@ use zerocopy::network_endian::I32; use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; +pub const UDP_CLIENT_LOG_TARGET: &str = "UDP CLIENT"; + /// Default timeout for sending and receiving packets. And waiting for sockets /// to be readable and writable. pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); @@ -82,7 +84,7 @@ impl UdpClient { /// - Can't write to the socket. /// - Can't send data. pub async fn send(&self, bytes: &[u8]) -> Result { - debug!(target: "UDP client", "sending {bytes:?} ..."); + debug!(target: UDP_CLIENT_LOG_TARGET, "sending {bytes:?} ..."); match time::timeout(self.timeout, self.socket.writable()).await { Ok(writable_result) => { @@ -115,7 +117,7 @@ impl UdpClient { pub async fn receive(&self) -> Result> { let mut response_buffer = [0u8; MAX_PACKET_SIZE]; - debug!(target: "UDP client", "receiving ..."); + debug!(target: UDP_CLIENT_LOG_TARGET, "receiving ..."); match time::timeout(self.timeout, self.socket.readable()).await { Ok(readable_result) => { @@ -138,7 +140,7 @@ impl UdpClient { let mut res: Vec = response_buffer.to_vec(); Vec::truncate(&mut res, size); - debug!(target: "UDP client", "{size} bytes received {res:?}"); + debug!(target: UDP_CLIENT_LOG_TARGET, "{size} bytes received {res:?}"); Ok(res) } @@ -168,7 +170,7 @@ impl UdpTrackerClient { /// /// Will return error if can't write request to bytes. pub async fn send(&self, request: Request) -> Result { - debug!(target: "UDP tracker client", "send request {request:?}"); + debug!(target: UDP_CLIENT_LOG_TARGET, "send request {request:?}"); // Write request into a buffer let request_buffer = vec![0u8; MAX_PACKET_SIZE]; @@ -196,7 +198,7 @@ impl UdpTrackerClient { pub async fn receive(&self) -> Result { let payload = self.udp_client.receive().await?; - debug!(target: "UDP tracker client", "received {} bytes. Response {payload:?}", payload.len()); + debug!(target: UDP_CLIENT_LOG_TARGET, "received {} bytes. Response {payload:?}", payload.len()); let response = Response::parse_bytes(&payload, true)?; diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index a50ad5156..cf0566d67 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use tokio::sync::oneshot::{self, Sender}; use tokio::task::JoinHandle; use torrust_tracker::bootstrap::jobs::Started; -use torrust_tracker::servers::health_check_api::server; +use torrust_tracker::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::signals::{self, Halted}; use torrust_tracker_configuration::HealthCheckApi; @@ -49,21 +49,21 @@ impl Environment { let register = self.registar.entries(); - debug!(target: "HEALTH CHECK API", "Spawning task to launch the service ..."); + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Spawning task to launch the service ..."); let server = tokio::spawn(async move { - debug!(target: "HEALTH CHECK API", "Starting the server in a spawned task ..."); + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting the server in a spawned task ..."); server::start(self.state.bind_to, tx_start, rx_halt, register) .await .expect("it should start the health check service"); - debug!(target: "HEALTH CHECK API", "Server started. Sending the binding {} ...", self.state.bind_to); + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Server started. Sending the binding {} ...", self.state.bind_to); self.state.bind_to }); - debug!(target: "HEALTH CHECK API", "Waiting for spawning task to send the binding ..."); + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Waiting for spawning task to send the binding ..."); let binding = rx_start.await.expect("it should send service binding").address; From b4b4515a9aa60ed5351c6f3f0c8b27ea01d9c0d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 13:41:55 +0100 Subject: [PATCH 0898/1003] refactor: extract const for logging targets And make it explicit the coupling between logs and `RunningServices` type. --- src/bootstrap/jobs/health_check_api.rs | 3 ++- src/console/ci/e2e/logs_parser.rs | 7 ++++--- src/servers/apis/server.rs | 3 ++- src/servers/http/server.rs | 3 ++- src/servers/logging.rs | 29 ++++++++++++++++++++++++++ src/servers/mod.rs | 1 + src/servers/udp/server.rs | 7 ++----- 7 files changed, 42 insertions(+), 11 deletions(-) create mode 100644 src/servers/logging.rs diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index e79a6da77..b4d4862ee 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -21,6 +21,7 @@ use tracing::info; use super::Started; use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; +use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceRegistry; use crate::servers::signals::Halted; @@ -55,7 +56,7 @@ pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> Jo // Wait until the server sends the started message match rx_start.await { - Ok(msg) => info!(target: HEALTH_CHECK_API_LOG_TARGET, "Started on: {protocol}://{}", msg.address), + Ok(msg) => info!(target: HEALTH_CHECK_API_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", msg.address), Err(e) => panic!("the Health Check API server was dropped: {e}"), } diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 8bf7974c1..37eb367b1 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::servers::logging::STARTED_ON; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; const INFO_LOG_LEVEL: &str = "INFO"; @@ -65,9 +66,9 @@ impl RunningServices { let mut http_trackers: Vec = Vec::new(); let mut health_checks: Vec = Vec::new(); - let udp_re = Regex::new(r"Started on: udp://([0-9.]+:[0-9]+)").unwrap(); - let http_re = Regex::new(r"Started on: (https?://[0-9.]+:[0-9]+)").unwrap(); // DevSkim: ignore DS137138 - let health_re = Regex::new(r"Started on: (https?://[0-9.]+:[0-9]+)").unwrap(); // DevSkim: ignore DS137138 + let udp_re = Regex::new(&format!("{STARTED_ON}: {}", r"udp://([0-9.]+:[0-9]+)")).unwrap(); + let http_re = Regex::new(&format!("{STARTED_ON}: {}", r"(https?://[0-9.]+:[0-9]+)")).unwrap(); // DevSkim: ignore DS137138 + let health_re = Regex::new(&format!("{STARTED_ON}: {}", r"(https?://[0-9.]+:[0-9]+)")).unwrap(); // DevSkim: ignore DS137138 let ansi_escape_re = Regex::new(r"\x1b\[[0-9;]*m").unwrap(); for line in logs.lines() { diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index d47e5d542..967080bd5 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -39,6 +39,7 @@ use crate::bootstrap::jobs::Started; use crate::core::Tracker; use crate::servers::apis::API_LOG_TARGET; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; +use crate::servers::logging::STARTED_ON; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; @@ -251,7 +252,7 @@ impl Launcher { } }); - info!(target: API_LOG_TARGET, "Started on {protocol}://{}", address); + info!(target: API_LOG_TARGET, "{STARTED_ON} {protocol}://{}", address); tx_start .send(Started { address }) diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 9199573b0..87f0e945b 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -14,6 +14,7 @@ use crate::bootstrap::jobs::Started; use crate::core::Tracker; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::servers::logging::STARTED_ON; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; @@ -77,7 +78,7 @@ impl Launcher { } }); - info!(target: HTTP_TRACKER_LOG_TARGET, "Started on: {protocol}://{}", address); + info!(target: HTTP_TRACKER_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); tx_start .send(Started { address }) diff --git a/src/servers/logging.rs b/src/servers/logging.rs new file mode 100644 index 000000000..ad9ccbbcc --- /dev/null +++ b/src/servers/logging.rs @@ -0,0 +1,29 @@ +/// This is the prefix used in logs to identify a started service. +/// +/// For example: +/// +/// ```text +/// 2024-06-25T12:36:25.025312Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 +/// 2024-06-25T12:36:25.025445Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 +/// 2024-06-25T12:36:25.025527Z INFO API: Started on http://0.0.0.0:1212 +/// 2024-06-25T12:36:25.025580Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 +/// ``` +pub const STARTED_ON: &str = "Started on"; + +/* + +todo: we should use a field fot the URL. + +For example, instead of: + +``` +2024-06-25T12:36:25.025312Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 +``` + +We should use something like: + +``` +2024-06-25T12:36:25.025312Z INFO UDP TRACKER started_at_url=udp://0.0.0.0:6969 +``` + +*/ diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 0c9cc5dd8..705a4728e 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -3,6 +3,7 @@ pub mod apis; pub mod custom_axum_server; pub mod health_check_api; pub mod http; +pub mod logging; pub mod registar; pub mod signals; pub mod udp; diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index e60e49ace..5e2a67c85 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -40,6 +40,7 @@ use tokio::task::{AbortHandle, JoinHandle}; use super::UdpRequest; use crate::bootstrap::jobs::Started; use crate::core::Tracker; +use crate::servers::logging::STARTED_ON; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{shutdown_signal_with_message, Halted}; use crate::servers::udp::{handlers, UDP_TRACKER_LOG_TARGET}; @@ -364,11 +365,7 @@ impl Udp { let address = bound_socket.local_addr(); let local_udp_url = format!("udp://{address}"); - // note: this log message is parsed by our container. i.e: - // - // `INFO UDP TRACKER: Started on: udp://0.0.0.0:6969` - // - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Started on: {local_udp_url}"); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "{STARTED_ON}: {local_udp_url}"); let receiver = Receiver::new(bound_socket.into(), tracker); From a5e2baf383edb593d6c8fe2e4477b8e6a61b466d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 13:52:26 +0100 Subject: [PATCH 0899/1003] refactor: extract method --- src/servers/udp/server.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 5e2a67c85..53fbaca34 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -36,6 +36,7 @@ use ringbuf::StaticRb; use tokio::select; use tokio::sync::oneshot; use tokio::task::{AbortHandle, JoinHandle}; +use url::Url; use super::UdpRequest; use crate::bootstrap::jobs::Started; @@ -241,6 +242,9 @@ struct BoundSocket { impl BoundSocket { async fn new(addr: SocketAddr) -> Result> { + let bind_addr = format!("udp://{addr}"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, bind_addr, "UdpSocket::new (binding)"); + let socket = tokio::net::UdpSocket::bind(addr).await; let socket = match socket { @@ -259,6 +263,10 @@ impl BoundSocket { fn local_addr(&self) -> SocketAddr { self.socket.local_addr().expect("it should get local address") } + + fn url(&self) -> Url { + Url::parse(&format!("udp://{}", self.local_addr())).expect("UDP socket address should be valid") + } } impl Deref for BoundSocket { @@ -363,7 +371,7 @@ impl Udp { }; let address = bound_socket.local_addr(); - let local_udp_url = format!("udp://{address}"); + let local_udp_url = bound_socket.url().to_string(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "{STARTED_ON}: {local_udp_url}"); From 35b6c84fbb3d51365cd8e099225510d98494ac46 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 16:18:11 +0100 Subject: [PATCH 0900/1003] refactor: simplify UDP server receiver It only gets new UDP requests, whitout spwaning tasks to handle them. --- src/servers/udp/server.rs | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 53fbaca34..7557bff0b 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -290,22 +290,20 @@ impl Debug for BoundSocket { struct Receiver { bound_socket: Arc, - tracker: Arc, data: RefCell<[u8; MAX_PACKET_SIZE]>, } impl Receiver { - pub fn new(bound_socket: Arc, tracker: Arc) -> Self { + pub fn new(bound_socket: Arc) -> Self { Receiver { bound_socket, - tracker, data: RefCell::new([0; MAX_PACKET_SIZE]), } } } impl Stream for Receiver { - type Item = std::io::Result; + type Item = std::io::Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut buf = *self.data.borrow_mut(); @@ -319,13 +317,7 @@ impl Stream for Receiver { Ok(from) => { let payload = buf.filled().to_vec(); let request = UdpRequest { payload, from }; - - Some(Ok(tokio::task::spawn(Udp::process_request( - request, - self.tracker.clone(), - self.bound_socket.clone(), - )) - .abort_handle())) + Some(Ok(request)) } Err(err) => Some(Err(err)), }; @@ -375,7 +367,7 @@ impl Udp { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "{STARTED_ON}: {local_udp_url}"); - let receiver = Receiver::new(bound_socket.into(), tracker); + let receiver = Receiver::new(bound_socket.into()); tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (spawning main loop)"); @@ -383,7 +375,7 @@ impl Udp { let local_addr = local_udp_url.clone(); tokio::task::spawn(async move { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); - let () = Self::run_udp_server_main(receiver).await; + let () = Self::run_udp_server_main(receiver, tracker.clone()).await; }) }; @@ -404,7 +396,7 @@ impl Udp { tokio::task::yield_now().await; // lets allow the other threads to complete. } - async fn run_udp_server_main(mut receiver: Receiver) { + async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc) { let reqs = &mut ActiveRequests::default(); let addr = receiver.bound_socket.local_addr(); @@ -429,12 +421,15 @@ impl Udp { } }; - if req.is_finished() { + let abort_handle = + tokio::task::spawn(Udp::process_request(req, tracker.clone(), receiver.bound_socket.clone())).abort_handle(); + + if abort_handle.is_finished() { continue; } // fill buffer with requests - let Err(req) = reqs.rb.try_push(req) else { + let Err(req) = reqs.rb.try_push(abort_handle) else { continue; }; From 61fb4b281d2d957be0292862d2517aebdc9dc1eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 16:45:54 +0100 Subject: [PATCH 0901/1003] refactor: move active requests logic to ActiveRequest type --- src/servers/udp/server.rs | 125 ++++++++++++++++++++++++-------------- 1 file changed, 80 insertions(+), 45 deletions(-) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index 7557bff0b..14dd0a0f6 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -235,6 +235,67 @@ impl Drop for ActiveRequests { } } +impl ActiveRequests { + /// It inserts the abort handle for the UDP request processor tasks. + /// + /// If there is no room for the new task, it tries to make place: + /// + /// - Firstly, removing finished tasks. + /// - Secondly, removing the oldest unfinished tasks. + pub async fn force_push(&mut self, abort_handle: AbortHandle, local_addr: &str) { + // fill buffer with requests + let Err(abort_handle) = self.rb.try_push(abort_handle) else { + return; + }; + + let mut finished: u64 = 0; + let mut unfinished_task = None; + + // buffer is full.. lets make some space. + for h in self.rb.pop_iter() { + // remove some finished tasks + if h.is_finished() { + finished += 1; + continue; + } + + // task is unfinished.. give it another chance. + tokio::task::yield_now().await; + + // if now finished, we continue. + if h.is_finished() { + finished += 1; + continue; + } + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, removed_count = finished, "Udp::run_udp_server::loop (got unfinished task)"); + + if finished == 0 { + // we have _no_ finished tasks.. will abort the unfinished task to make space... + h.abort(); + + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop aborting request: (no finished tasks)"); + break; + } + + // we have space, return unfinished task for re-entry. + unfinished_task = Some(h); + } + + // re-insert the previous unfinished task. + if let Some(h) = unfinished_task { + self.rb.try_push(h).expect("it was previously inserted"); + } + + // insert the new task. + if !abort_handle.is_finished() { + self.rb + .try_push(abort_handle) + .expect("it should remove at least one element."); + } + } +} + /// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. struct BoundSocket { socket: Arc, @@ -421,60 +482,34 @@ impl Udp { } }; - let abort_handle = - tokio::task::spawn(Udp::process_request(req, tracker.clone(), receiver.bound_socket.clone())).abort_handle(); - - if abort_handle.is_finished() { - continue; - } - - // fill buffer with requests - let Err(req) = reqs.rb.try_push(abort_handle) else { - continue; - }; - - let mut finished: u64 = 0; - let mut unfinished_task = None; - // buffer is full.. lets make some space. - for h in reqs.rb.pop_iter() { - // remove some finished tasks - if h.is_finished() { - finished += 1; - continue; - } + /* code-review: - // task is unfinished.. give it another chance. - tokio::task::yield_now().await; + Does it make sense to spawn a new request processor task when + the ActiveRequests buffer is full? - // if now finished, we continue. - if h.is_finished() { - finished += 1; - continue; - } + We could store the UDP request in a secondary buffer and wait + until active tasks are finished. When a active request is finished + we can move a new UDP request from the pending to process requests + buffer to the active requests buffer. - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, removed_count = finished, "Udp::run_udp_server::loop (got unfinished task)"); + This forces us to define an explicit timeout for active requests. - if finished == 0 { - // we have _no_ finished tasks.. will abort the unfinished task to make space... - h.abort(); + In the current solution the timeout is dynamic, it depends on + the system load. With high load we can remove tasks without + giving them enough time to be processed. With low load we could + keep processing running longer than a reasonable time for + the client to receive the response. - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop aborting request: (no finished tasks)"); - break; - } + */ - // we have space, return unfinished task for re-entry. - unfinished_task = Some(h); - } + let abort_handle = + tokio::task::spawn(Udp::process_request(req, tracker.clone(), receiver.bound_socket.clone())).abort_handle(); - // re-insert the previous unfinished task. - if let Some(h) = unfinished_task { - reqs.rb.try_push(h).expect("it was previously inserted"); + if abort_handle.is_finished() { + continue; } - // insert the new task. - if !req.is_finished() { - reqs.rb.try_push(req).expect("it should remove at least one element."); - } + reqs.force_push(abort_handle, &local_addr).await; } else { tokio::task::yield_now().await; // the request iterator returned `None`. From 336e0e66f0c26c6393cc6701fa30ae0b83bf5aea Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 16:48:34 +0100 Subject: [PATCH 0902/1003] refactor: reorganize mod to extract new submods --- src/servers/udp/{server.rs => server/mod.rs} | 1 - 1 file changed, 1 deletion(-) rename src/servers/udp/{server.rs => server/mod.rs} (99%) diff --git a/src/servers/udp/server.rs b/src/servers/udp/server/mod.rs similarity index 99% rename from src/servers/udp/server.rs rename to src/servers/udp/server/mod.rs index 14dd0a0f6..36c377cc4 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server/mod.rs @@ -16,7 +16,6 @@ //! because we want to be able to start and stop the server multiple times, and //! we want to know the bound address and the current state of the server. //! In production, the `Udp` launcher is used directly. -//! use std::cell::RefCell; use std::fmt::Debug; From c121bf2575ecef77e2ecf14c15b30dbb90e33031 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 16:51:07 +0100 Subject: [PATCH 0903/1003] refactor: rename UDP server types --- src/bootstrap/jobs/udp_tracker.rs | 4 ++-- src/servers/udp/server/mod.rs | 33 ++++++++++++++++--------------- tests/servers/udp/environment.rs | 4 ++-- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index ba39df2fe..e694163a9 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -14,7 +14,7 @@ use tracing::debug; use crate::core; use crate::servers::registar::ServiceRegistrationForm; -use crate::servers::udp::server::{Launcher, UdpServer}; +use crate::servers::udp::server::{Spawner, UdpServer}; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It starts a new UDP server with the provided configuration. @@ -30,7 +30,7 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { let bind_to = config.bind_address; - let server = UdpServer::new(Launcher::new(bind_to)) + let server = UdpServer::new(Spawner::new(bind_to)) .start(tracker, form) .await .expect("it should be able to start the udp tracker"); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 36c377cc4..3b1792b3d 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -96,7 +96,7 @@ pub struct UdpServer { /// A stopped UDP server state. pub struct Stopped { - launcher: Launcher, + launcher: Spawner, } /// A running UDP server state. @@ -105,13 +105,13 @@ pub struct Running { /// The address where the server is bound. pub binding: SocketAddr, pub halt_task: tokio::sync::oneshot::Sender, - pub task: JoinHandle, + pub task: JoinHandle, } impl UdpServer { /// Creates a new `UdpServer` instance in `stopped`state. #[must_use] - pub fn new(launcher: Launcher) -> Self { + pub fn new(launcher: Spawner) -> Self { Self { state: Stopped { launcher }, } @@ -140,7 +140,7 @@ impl UdpServer { let binding = rx_start.await.expect("it should be able to start the service").address; let local_addr = format!("udp://{binding}"); - form.send(ServiceRegistration::new(binding, Udp::check)) + form.send(ServiceRegistration::new(binding, Launcher::check)) .expect("it should be able to send service registration"); let running_udp_server: UdpServer = UdpServer { @@ -186,12 +186,12 @@ impl UdpServer { } #[derive(Constructor, Copy, Clone, Debug)] -pub struct Launcher { +pub struct Spawner { bind_to: SocketAddr, } -impl Launcher { - /// It starts the UDP server instance. +impl Spawner { + /// It spawns a new tasks to run the UDP server instance. /// /// # Panics /// @@ -201,10 +201,10 @@ impl Launcher { tracker: Arc, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, - ) -> JoinHandle { - let launcher = Launcher::new(self.bind_to); + ) -> JoinHandle { + let launcher = Spawner::new(self.bind_to); tokio::spawn(async move { - Udp::run_with_graceful_shutdown(tracker, launcher.bind_to, tx_start, rx_halt).await; + Launcher::run_with_graceful_shutdown(tracker, launcher.bind_to, tx_start, rx_halt).await; launcher }) } @@ -388,9 +388,9 @@ impl Stream for Receiver { /// A UDP server instance launcher. #[derive(Constructor)] -pub struct Udp; +pub struct Launcher; -impl Udp { +impl Launcher { /// It starts the UDP server instance with graceful shutdown. /// /// # Panics @@ -502,7 +502,8 @@ impl Udp { */ let abort_handle = - tokio::task::spawn(Udp::process_request(req, tracker.clone(), receiver.bound_socket.clone())).abort_handle(); + tokio::task::spawn(Launcher::process_request(req, tracker.clone(), receiver.bound_socket.clone())) + .abort_handle(); if abort_handle.is_finished() { continue; @@ -589,7 +590,7 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::servers::registar::Registar; - use crate::servers::udp::server::{Launcher, UdpServer}; + use crate::servers::udp::server::{Spawner, UdpServer}; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { @@ -600,7 +601,7 @@ mod tests { let bind_to = config.bind_address; let register = &Registar::default(); - let stopped = UdpServer::new(Launcher::new(bind_to)); + let stopped = UdpServer::new(Spawner::new(bind_to)); let started = stopped .start(tracker, register.give_form()) @@ -622,7 +623,7 @@ mod tests { let bind_to = config.bind_address; let register = &Registar::default(); - let stopped = UdpServer::new(Launcher::new(bind_to)); + let stopped = UdpServer::new(Spawner::new(bind_to)); let started = stopped .start(tracker, register.give_form()) diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 7b21defce..e8fb048ca 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::core::Tracker; use torrust_tracker::servers::registar::Registar; -use torrust_tracker::servers::udp::server::{Launcher, Running, Stopped, UdpServer}; +use torrust_tracker::servers::udp::server::{Running, Spawner, Stopped, UdpServer}; use torrust_tracker::shared::bit_torrent::tracker::udp::client::DEFAULT_TIMEOUT; use torrust_tracker_configuration::{Configuration, UdpTracker}; use torrust_tracker_primitives::info_hash::InfoHash; @@ -36,7 +36,7 @@ impl Environment { let bind_to = config.bind_address; - let server = UdpServer::new(Launcher::new(bind_to)); + let server = UdpServer::new(Spawner::new(bind_to)); Self { config, From 89bb73576af3b97f104943a6a01b7b0c37ae2489 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 17:53:14 +0100 Subject: [PATCH 0904/1003] refactor: reorganize UDP server mod --- src/bootstrap/jobs/udp_tracker.rs | 5 +- src/servers/udp/handlers.rs | 6 +- src/servers/udp/mod.rs | 2 +- src/servers/udp/server/bound_socket.rs | 73 +++ src/servers/udp/server/launcher.rs | 219 +++++++++ src/servers/udp/server/mod.rs | 550 +---------------------- src/servers/udp/server/receiver.rs | 54 +++ src/servers/udp/server/request_buffer.rs | 95 ++++ src/servers/udp/server/spawner.rs | 36 ++ src/servers/udp/server/states.rs | 115 +++++ tests/servers/udp/environment.rs | 8 +- tests/servers/udp/mod.rs | 4 +- 12 files changed, 621 insertions(+), 546 deletions(-) create mode 100644 src/servers/udp/server/bound_socket.rs create mode 100644 src/servers/udp/server/launcher.rs create mode 100644 src/servers/udp/server/receiver.rs create mode 100644 src/servers/udp/server/request_buffer.rs create mode 100644 src/servers/udp/server/spawner.rs create mode 100644 src/servers/udp/server/states.rs diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index e694163a9..647461bfc 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -14,7 +14,8 @@ use tracing::debug; use crate::core; use crate::servers::registar::ServiceRegistrationForm; -use crate::servers::udp::server::{Spawner, UdpServer}; +use crate::servers::udp::server::spawner::Spawner; +use crate::servers::udp::server::Server; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It starts a new UDP server with the provided configuration. @@ -30,7 +31,7 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { let bind_to = config.bind_address; - let server = UdpServer::new(Spawner::new(bind_to)) + let server = Server::new(Spawner::new(bind_to)) .start(tracker, form) .await .expect("it should be able to start the udp tracker"); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f7e3aac64..12ae6a250 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -17,7 +17,7 @@ use uuid::Uuid; use zerocopy::network_endian::I32; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; -use super::UdpRequest; +use super::RawRequest; use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::logging::{log_bad_request, log_error_response, log_request, log_response}; @@ -33,7 +33,7 @@ use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -pub(crate) async fn handle_packet(udp_request: UdpRequest, tracker: &Arc, addr: SocketAddr) -> Response { +pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Arc, addr: SocketAddr) -> Response { debug!("Handling Packets: {udp_request:?}"); let start_time = Instant::now(); @@ -304,7 +304,7 @@ fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { pub struct RequestId(Uuid); impl RequestId { - fn make(_request: &UdpRequest) -> RequestId { + fn make(_request: &RawRequest) -> RequestId { RequestId(Uuid::new_v4()) } } diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 5c5460397..8ea05d5b1 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -660,7 +660,7 @@ pub type Port = u16; pub type TransactionId = i64; #[derive(Clone, Debug)] -pub(crate) struct UdpRequest { +pub struct RawRequest { payload: Vec, from: SocketAddr, } diff --git a/src/servers/udp/server/bound_socket.rs b/src/servers/udp/server/bound_socket.rs new file mode 100644 index 000000000..cd416c7c5 --- /dev/null +++ b/src/servers/udp/server/bound_socket.rs @@ -0,0 +1,73 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::ops::Deref; +use std::sync::Arc; + +use url::Url; + +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. +pub struct BoundSocket { + socket: Arc, +} + +impl BoundSocket { + /// # Errors + /// + /// Will return an error if the socket can't be bound the the provided address. + pub async fn new(addr: SocketAddr) -> Result> { + let bind_addr = format!("udp://{addr}"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, bind_addr, "UdpSocket::new (binding)"); + + let socket = tokio::net::UdpSocket::bind(addr).await; + + let socket = match socket { + Ok(socket) => socket, + Err(e) => Err(e)?, + }; + + let local_addr = format!("udp://{addr}"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpSocket::new (bound)"); + + Ok(Self { + socket: Arc::new(socket), + }) + } + + /// # Panics + /// + /// Will panic if the socket can't get the address it was bound to. + #[must_use] + pub fn address(&self) -> SocketAddr { + self.socket.local_addr().expect("it should get local address") + } + + /// # Panics + /// + /// Will panic if the address the socket was bound to is not a valid address + /// to be used in a URL. + #[must_use] + pub fn url(&self) -> Url { + Url::parse(&format!("udp://{}", self.address())).expect("UDP socket address should be valid") + } +} + +impl Deref for BoundSocket { + type Target = tokio::net::UdpSocket; + + fn deref(&self) -> &Self::Target { + &self.socket + } +} + +impl Debug for BoundSocket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let local_addr = match self.socket.local_addr() { + Ok(socket) => format!("Receiving From: {socket}"), + Err(err) => format!("Socket Broken: {err}"), + }; + + f.debug_struct("UdpSocket").field("addr", &local_addr).finish_non_exhaustive() + } +} diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs new file mode 100644 index 000000000..db448c2ff --- /dev/null +++ b/src/servers/udp/server/launcher.rs @@ -0,0 +1,219 @@ +use std::io::Cursor; +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use aquatic_udp_protocol::Response; +use derive_more::Constructor; +use futures_util::StreamExt; +use tokio::select; +use tokio::sync::oneshot; + +use super::request_buffer::ActiveRequests; +use super::RawRequest; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::ServiceHealthCheckJob; +use crate::servers::signals::{shutdown_signal_with_message, Halted}; +use crate::servers::udp::server::bound_socket::BoundSocket; +use crate::servers::udp::server::receiver::Receiver; +use crate::servers::udp::{handlers, UDP_TRACKER_LOG_TARGET}; +use crate::shared::bit_torrent::tracker::udp::client::check; +use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; + +/// A UDP server instance launcher. +#[derive(Constructor)] +pub struct Launcher; + +impl Launcher { + /// It starts the UDP server instance with graceful shutdown. + /// + /// # Panics + /// + /// It panics if unable to bind to udp socket, and get the address from the udp socket. + /// It also panics if unable to send address of socket. + pub async fn run_with_graceful_shutdown( + tracker: Arc, + bind_to: SocketAddr, + tx_start: oneshot::Sender, + rx_halt: oneshot::Receiver, + ) { + let halt_task = tokio::task::spawn(shutdown_signal_with_message( + rx_halt, + format!("Halting UDP Service Bound to Socket: {bind_to}"), + )); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); + + let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) + .await + .expect("it should bind to the socket within five seconds"); + + let bound_socket = match socket { + Ok(socket) => socket, + Err(e) => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, addr = %bind_to, err = %e, "Udp::run_with_graceful_shutdown panic! (error when building socket)" ); + panic!("could not bind to socket!"); + } + }; + + let address = bound_socket.address(); + let local_udp_url = bound_socket.url().to_string(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "{STARTED_ON}: {local_udp_url}"); + + let receiver = Receiver::new(bound_socket.into()); + + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (spawning main loop)"); + + let running = { + let local_addr = local_udp_url.clone(); + tokio::task::spawn(async move { + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); + let () = Self::run_udp_server_main(receiver, tracker.clone()).await; + }) + }; + + tx_start + .send(Started { address }) + .expect("the UDP Tracker service should not be dropped"); + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (started)"); + + let stop = running.abort_handle(); + + select! { + _ = running => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (stopped)"); }, + _ = halt_task => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (halting)"); } + } + stop.abort(); + + tokio::task::yield_now().await; // lets allow the other threads to complete. + } + + #[must_use] + pub fn check(binding: &SocketAddr) -> ServiceHealthCheckJob { + let binding = *binding; + let info = format!("checking the udp tracker health check at: {binding}"); + + let job = tokio::spawn(async move { check(&binding).await }); + + ServiceHealthCheckJob::new(binding, info, job) + } + + async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc) { + let reqs = &mut ActiveRequests::default(); + + let addr = receiver.bound_socket_address(); + let local_addr = format!("udp://{addr}"); + + loop { + if let Some(req) = { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); + receiver.next().await + } { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop (in)"); + + let req = match req { + Ok(req) => req, + Err(e) => { + if e.kind() == std::io::ErrorKind::Interrupted { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop (interrupted)"); + return; + } + tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop break: (got error)"); + break; + } + }; + + /* code-review: + + Does it make sense to spawn a new request processor task when + the ActiveRequests buffer is full? + + We could store the UDP request in a secondary buffer and wait + until active tasks are finished. When a active request is finished + we can move a new UDP request from the pending to process requests + buffer to the active requests buffer. + + This forces us to define an explicit timeout for active requests. + + In the current solution the timeout is dynamic, it depends on + the system load. With high load we can remove tasks without + giving them enough time to be processed. With low load we could + keep processing running longer than a reasonable time for + the client to receive the response. + + */ + + let abort_handle = + tokio::task::spawn(Launcher::process_request(req, tracker.clone(), receiver.bound_socket.clone())) + .abort_handle(); + + if abort_handle.is_finished() { + continue; + } + + reqs.force_push(abort_handle, &local_addr).await; + } else { + tokio::task::yield_now().await; + // the request iterator returned `None`. + tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server breaking: (ran dry, should not happen in production!)"); + break; + } + } + } + + async fn process_request(request: RawRequest, tracker: Arc, socket: Arc) { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, request = %request.from, "Udp::process_request (receiving)"); + Self::process_valid_request(tracker, socket, request).await; + } + + async fn process_valid_request(tracker: Arc, socket: Arc, udp_request: RawRequest) { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, "Udp::process_valid_request. Making Response to {udp_request:?}"); + let from = udp_request.from; + let response = handlers::handle_packet(udp_request, &tracker.clone(), socket.address()).await; + Self::send_response(&socket.clone(), from, response).await; + } + + async fn send_response(bound_socket: &Arc, to: SocketAddr, response: Response) { + let response_type = match &response { + Response::Connect(_) => "Connect".to_string(), + Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), + Response::AnnounceIpv6(_) => "AnnounceIpv6".to_string(), + Response::Scrape(_) => "Scrape".to_string(), + Response::Error(e) => format!("Error: {e:?}"), + }; + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, target = ?to, response_type, "Udp::send_response (sending)"); + + let buffer = vec![0u8; MAX_PACKET_SIZE]; + let mut cursor = Cursor::new(buffer); + + match response.write_bytes(&mut cursor) { + Ok(()) => { + #[allow(clippy::cast_possible_truncation)] + let position = cursor.position() as usize; + let inner = cursor.get_ref(); + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), "Udp::send_response (sending...)" ); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), payload = ?&inner[..position], "Udp::send_response (sending...)"); + + Self::send_packet(bound_socket, &to, &inner[..position]).await; + + tracing::trace!(target:UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), "Udp::send_response (sent)"); + } + Err(e) => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, ?to, response_type, err = %e, "Udp::send_response (error)"); + } + } + } + + async fn send_packet(bound_socket: &Arc, remote_addr: &SocketAddr, payload: &[u8]) { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, to = %remote_addr, ?payload, "Udp::send_response (sending)"); + + // doesn't matter if it reaches or not + drop(bound_socket.send_to(payload, remote_addr).await); + } +} diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 3b1792b3d..1bb9831ee 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -17,35 +17,16 @@ //! we want to know the bound address and the current state of the server. //! In production, the `Udp` launcher is used directly. -use std::cell::RefCell; use std::fmt::Debug; -use std::io::Cursor; -use std::net::SocketAddr; -use std::ops::Deref; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; - -use aquatic_udp_protocol::Response; -use derive_more::Constructor; -use futures::{Stream, StreamExt}; -use ringbuf::traits::{Consumer, Observer, Producer}; -use ringbuf::StaticRb; -use tokio::select; -use tokio::sync::oneshot; -use tokio::task::{AbortHandle, JoinHandle}; -use url::Url; - -use super::UdpRequest; -use crate::bootstrap::jobs::Started; -use crate::core::Tracker; -use crate::servers::logging::STARTED_ON; -use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; -use crate::servers::signals::{shutdown_signal_with_message, Halted}; -use crate::servers::udp::{handlers, UDP_TRACKER_LOG_TARGET}; -use crate::shared::bit_torrent::tracker::udp::client::check; -use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; + +use super::RawRequest; + +pub mod bound_socket; +pub mod launcher; +pub mod receiver; +pub mod request_buffer; +pub mod spawner; +pub mod states; /// Error that can occur when starting or stopping the UDP server. /// @@ -64,21 +45,7 @@ pub enum UdpError { Error(String), } -/// A UDP server instance controller with no UDP instance running. -#[allow(clippy::module_name_repetitions)] -pub type StoppedUdpServer = UdpServer; - -/// A UDP server instance controller with a running UDP instance. -#[allow(clippy::module_name_repetitions)] -pub type RunningUdpServer = UdpServer; - -/// A UDP server instance controller. -/// -/// It's responsible for: -/// -/// - Keeping the initial configuration of the server. -/// - Starting and stopping the server. -/// - Keeping the state of the server: `running` or `stopped`. +/// A UDP server. /// /// It's an state machine. Configurations cannot be changed. This struct /// represents concrete configuration and state. It allows to start and stop the @@ -88,499 +55,11 @@ pub type RunningUdpServer = UdpServer; /// > reset to the initial value after stopping the server. This struct is not /// > intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] -pub struct UdpServer { +pub struct Server { /// The state of the server: `running` or `stopped`. pub state: S, } -/// A stopped UDP server state. - -pub struct Stopped { - launcher: Spawner, -} - -/// A running UDP server state. -#[derive(Debug, Constructor)] -pub struct Running { - /// The address where the server is bound. - pub binding: SocketAddr, - pub halt_task: tokio::sync::oneshot::Sender, - pub task: JoinHandle, -} - -impl UdpServer { - /// Creates a new `UdpServer` instance in `stopped`state. - #[must_use] - pub fn new(launcher: Spawner) -> Self { - Self { - state: Stopped { launcher }, - } - } - - /// It starts the server and returns a `UdpServer` controller in `running` - /// state. - /// - /// # Errors - /// - /// Will return `Err` if UDP can't bind to given bind address. - /// - /// # Panics - /// - /// It panics if unable to receive the bound socket address from service. - /// - pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, std::io::Error> { - let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); - let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); - - assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); - - // May need to wrap in a task to about a tokio bug. - let task = self.state.launcher.start(tracker, tx_start, rx_halt); - - let binding = rx_start.await.expect("it should be able to start the service").address; - let local_addr = format!("udp://{binding}"); - - form.send(ServiceRegistration::new(binding, Launcher::check)) - .expect("it should be able to send service registration"); - - let running_udp_server: UdpServer = UdpServer { - state: Running { - binding, - halt_task: tx_halt, - task, - }, - }; - - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpServer::start (running)"); - - Ok(running_udp_server) - } -} - -impl UdpServer { - /// It stops the server and returns a `UdpServer` controller in `stopped` - /// state. - /// - /// # Errors - /// - /// Will return `Err` if the oneshot channel to send the stop signal - /// has already been called once. - /// - /// # Panics - /// - /// It panics if unable to shutdown service. - pub async fn stop(self) -> Result, UdpError> { - self.state - .halt_task - .send(Halted::Normal) - .map_err(|e| UdpError::Error(e.to_string()))?; - - let launcher = self.state.task.await.expect("it should shutdown service"); - - let stopped_api_server: UdpServer = UdpServer { - state: Stopped { launcher }, - }; - - Ok(stopped_api_server) - } -} - -#[derive(Constructor, Copy, Clone, Debug)] -pub struct Spawner { - bind_to: SocketAddr, -} - -impl Spawner { - /// It spawns a new tasks to run the UDP server instance. - /// - /// # Panics - /// - /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - pub fn start( - &self, - tracker: Arc, - tx_start: oneshot::Sender, - rx_halt: oneshot::Receiver, - ) -> JoinHandle { - let launcher = Spawner::new(self.bind_to); - tokio::spawn(async move { - Launcher::run_with_graceful_shutdown(tracker, launcher.bind_to, tx_start, rx_halt).await; - launcher - }) - } -} - -/// Ring-Buffer of Active Requests -#[derive(Default)] -struct ActiveRequests { - rb: StaticRb, // the number of requests we handle at the same time. -} - -impl std::fmt::Debug for ActiveRequests { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let (left, right) = &self.rb.as_slices(); - let dbg = format!("capacity: {}, left: {left:?}, right: {right:?}", &self.rb.capacity()); - f.debug_struct("ActiveRequests").field("rb", &dbg).finish() - } -} - -impl Drop for ActiveRequests { - fn drop(&mut self) { - for h in self.rb.pop_iter() { - if !h.is_finished() { - h.abort(); - } - } - } -} - -impl ActiveRequests { - /// It inserts the abort handle for the UDP request processor tasks. - /// - /// If there is no room for the new task, it tries to make place: - /// - /// - Firstly, removing finished tasks. - /// - Secondly, removing the oldest unfinished tasks. - pub async fn force_push(&mut self, abort_handle: AbortHandle, local_addr: &str) { - // fill buffer with requests - let Err(abort_handle) = self.rb.try_push(abort_handle) else { - return; - }; - - let mut finished: u64 = 0; - let mut unfinished_task = None; - - // buffer is full.. lets make some space. - for h in self.rb.pop_iter() { - // remove some finished tasks - if h.is_finished() { - finished += 1; - continue; - } - - // task is unfinished.. give it another chance. - tokio::task::yield_now().await; - - // if now finished, we continue. - if h.is_finished() { - finished += 1; - continue; - } - - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, removed_count = finished, "Udp::run_udp_server::loop (got unfinished task)"); - - if finished == 0 { - // we have _no_ finished tasks.. will abort the unfinished task to make space... - h.abort(); - - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop aborting request: (no finished tasks)"); - break; - } - - // we have space, return unfinished task for re-entry. - unfinished_task = Some(h); - } - - // re-insert the previous unfinished task. - if let Some(h) = unfinished_task { - self.rb.try_push(h).expect("it was previously inserted"); - } - - // insert the new task. - if !abort_handle.is_finished() { - self.rb - .try_push(abort_handle) - .expect("it should remove at least one element."); - } - } -} - -/// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. -struct BoundSocket { - socket: Arc, -} - -impl BoundSocket { - async fn new(addr: SocketAddr) -> Result> { - let bind_addr = format!("udp://{addr}"); - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, bind_addr, "UdpSocket::new (binding)"); - - let socket = tokio::net::UdpSocket::bind(addr).await; - - let socket = match socket { - Ok(socket) => socket, - Err(e) => Err(e)?, - }; - - let local_addr = format!("udp://{addr}"); - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpSocket::new (bound)"); - - Ok(Self { - socket: Arc::new(socket), - }) - } - - fn local_addr(&self) -> SocketAddr { - self.socket.local_addr().expect("it should get local address") - } - - fn url(&self) -> Url { - Url::parse(&format!("udp://{}", self.local_addr())).expect("UDP socket address should be valid") - } -} - -impl Deref for BoundSocket { - type Target = tokio::net::UdpSocket; - - fn deref(&self) -> &Self::Target { - &self.socket - } -} - -impl Debug for BoundSocket { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let local_addr = match self.socket.local_addr() { - Ok(socket) => format!("Receiving From: {socket}"), - Err(err) => format!("Socket Broken: {err}"), - }; - - f.debug_struct("UdpSocket").field("addr", &local_addr).finish_non_exhaustive() - } -} - -struct Receiver { - bound_socket: Arc, - data: RefCell<[u8; MAX_PACKET_SIZE]>, -} - -impl Receiver { - pub fn new(bound_socket: Arc) -> Self { - Receiver { - bound_socket, - data: RefCell::new([0; MAX_PACKET_SIZE]), - } - } -} - -impl Stream for Receiver { - type Item = std::io::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut buf = *self.data.borrow_mut(); - let mut buf = tokio::io::ReadBuf::new(&mut buf); - - let Poll::Ready(ready) = self.bound_socket.poll_recv_from(cx, &mut buf) else { - return Poll::Pending; - }; - - let res = match ready { - Ok(from) => { - let payload = buf.filled().to_vec(); - let request = UdpRequest { payload, from }; - Some(Ok(request)) - } - Err(err) => Some(Err(err)), - }; - - Poll::Ready(res) - } -} - -/// A UDP server instance launcher. -#[derive(Constructor)] -pub struct Launcher; - -impl Launcher { - /// It starts the UDP server instance with graceful shutdown. - /// - /// # Panics - /// - /// It panics if unable to bind to udp socket, and get the address from the udp socket. - /// It also panics if unable to send address of socket. - async fn run_with_graceful_shutdown( - tracker: Arc, - bind_to: SocketAddr, - tx_start: oneshot::Sender, - rx_halt: oneshot::Receiver, - ) { - let halt_task = tokio::task::spawn(shutdown_signal_with_message( - rx_halt, - format!("Halting UDP Service Bound to Socket: {bind_to}"), - )); - - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); - - let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) - .await - .expect("it should bind to the socket within five seconds"); - - let bound_socket = match socket { - Ok(socket) => socket, - Err(e) => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, addr = %bind_to, err = %e, "Udp::run_with_graceful_shutdown panic! (error when building socket)" ); - panic!("could not bind to socket!"); - } - }; - - let address = bound_socket.local_addr(); - let local_udp_url = bound_socket.url().to_string(); - - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "{STARTED_ON}: {local_udp_url}"); - - let receiver = Receiver::new(bound_socket.into()); - - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (spawning main loop)"); - - let running = { - let local_addr = local_udp_url.clone(); - tokio::task::spawn(async move { - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); - let () = Self::run_udp_server_main(receiver, tracker.clone()).await; - }) - }; - - tx_start - .send(Started { address }) - .expect("the UDP Tracker service should not be dropped"); - - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (started)"); - - let stop = running.abort_handle(); - - select! { - _ = running => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (stopped)"); }, - _ = halt_task => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (halting)"); } - } - stop.abort(); - - tokio::task::yield_now().await; // lets allow the other threads to complete. - } - - async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc) { - let reqs = &mut ActiveRequests::default(); - - let addr = receiver.bound_socket.local_addr(); - let local_addr = format!("udp://{addr}"); - - loop { - if let Some(req) = { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); - receiver.next().await - } { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop (in)"); - - let req = match req { - Ok(req) => req, - Err(e) => { - if e.kind() == std::io::ErrorKind::Interrupted { - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop (interrupted)"); - return; - } - tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop break: (got error)"); - break; - } - }; - - /* code-review: - - Does it make sense to spawn a new request processor task when - the ActiveRequests buffer is full? - - We could store the UDP request in a secondary buffer and wait - until active tasks are finished. When a active request is finished - we can move a new UDP request from the pending to process requests - buffer to the active requests buffer. - - This forces us to define an explicit timeout for active requests. - - In the current solution the timeout is dynamic, it depends on - the system load. With high load we can remove tasks without - giving them enough time to be processed. With low load we could - keep processing running longer than a reasonable time for - the client to receive the response. - - */ - - let abort_handle = - tokio::task::spawn(Launcher::process_request(req, tracker.clone(), receiver.bound_socket.clone())) - .abort_handle(); - - if abort_handle.is_finished() { - continue; - } - - reqs.force_push(abort_handle, &local_addr).await; - } else { - tokio::task::yield_now().await; - // the request iterator returned `None`. - tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server breaking: (ran dry, should not happen in production!)"); - break; - } - } - } - - async fn process_request(request: UdpRequest, tracker: Arc, socket: Arc) { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, request = %request.from, "Udp::process_request (receiving)"); - Self::process_valid_request(tracker, socket, request).await; - } - - async fn process_valid_request(tracker: Arc, socket: Arc, udp_request: UdpRequest) { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, "Udp::process_valid_request. Making Response to {udp_request:?}"); - let from = udp_request.from; - let response = handlers::handle_packet(udp_request, &tracker.clone(), socket.local_addr()).await; - Self::send_response(&socket.clone(), from, response).await; - } - - async fn send_response(bound_socket: &Arc, to: SocketAddr, response: Response) { - let response_type = match &response { - Response::Connect(_) => "Connect".to_string(), - Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), - Response::AnnounceIpv6(_) => "AnnounceIpv6".to_string(), - Response::Scrape(_) => "Scrape".to_string(), - Response::Error(e) => format!("Error: {e:?}"), - }; - - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, target = ?to, response_type, "Udp::send_response (sending)"); - - let buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(buffer); - - match response.write_bytes(&mut cursor) { - Ok(()) => { - #[allow(clippy::cast_possible_truncation)] - let position = cursor.position() as usize; - let inner = cursor.get_ref(); - - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), "Udp::send_response (sending...)" ); - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), payload = ?&inner[..position], "Udp::send_response (sending...)"); - - Self::send_packet(bound_socket, &to, &inner[..position]).await; - - tracing::trace!(target:UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), "Udp::send_response (sent)"); - } - Err(e) => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, ?to, response_type, err = %e, "Udp::send_response (error)"); - } - } - } - - async fn send_packet(socket: &Arc, remote_addr: &SocketAddr, payload: &[u8]) { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, to = %remote_addr, ?payload, "Udp::send_response (sending)"); - - // doesn't matter if it reaches or not - drop(socket.send_to(payload, remote_addr).await); - } - - fn check(binding: &SocketAddr) -> ServiceHealthCheckJob { - let binding = *binding; - let info = format!("checking the udp tracker health check at: {binding}"); - - let job = tokio::spawn(async move { check(&binding).await }); - - ServiceHealthCheckJob::new(binding, info, job) - } -} - #[cfg(test)] mod tests { use std::sync::Arc; @@ -588,9 +67,10 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; + use super::spawner::Spawner; + use super::Server; use crate::bootstrap::app::initialize_with_configuration; use crate::servers::registar::Registar; - use crate::servers::udp::server::{Spawner, UdpServer}; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { @@ -601,7 +81,7 @@ mod tests { let bind_to = config.bind_address; let register = &Registar::default(); - let stopped = UdpServer::new(Spawner::new(bind_to)); + let stopped = Server::new(Spawner::new(bind_to)); let started = stopped .start(tracker, register.give_form()) @@ -623,7 +103,7 @@ mod tests { let bind_to = config.bind_address; let register = &Registar::default(); - let stopped = UdpServer::new(Spawner::new(bind_to)); + let stopped = Server::new(Spawner::new(bind_to)); let started = stopped .start(tracker, register.give_form()) diff --git a/src/servers/udp/server/receiver.rs b/src/servers/udp/server/receiver.rs new file mode 100644 index 000000000..020ab7324 --- /dev/null +++ b/src/servers/udp/server/receiver.rs @@ -0,0 +1,54 @@ +use std::cell::RefCell; +use std::net::SocketAddr; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use futures::Stream; + +use super::bound_socket::BoundSocket; +use super::RawRequest; +use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; + +pub struct Receiver { + pub bound_socket: Arc, + data: RefCell<[u8; MAX_PACKET_SIZE]>, +} + +impl Receiver { + #[must_use] + pub fn new(bound_socket: Arc) -> Self { + Receiver { + bound_socket, + data: RefCell::new([0; MAX_PACKET_SIZE]), + } + } + + pub fn bound_socket_address(&self) -> SocketAddr { + self.bound_socket.address() + } +} + +impl Stream for Receiver { + type Item = std::io::Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut buf = *self.data.borrow_mut(); + let mut buf = tokio::io::ReadBuf::new(&mut buf); + + let Poll::Ready(ready) = self.bound_socket.poll_recv_from(cx, &mut buf) else { + return Poll::Pending; + }; + + let res = match ready { + Ok(from) => { + let payload = buf.filled().to_vec(); + let request = RawRequest { payload, from }; + Some(Ok(request)) + } + Err(err) => Some(Err(err)), + }; + + Poll::Ready(res) + } +} diff --git a/src/servers/udp/server/request_buffer.rs b/src/servers/udp/server/request_buffer.rs new file mode 100644 index 000000000..c1d4f2696 --- /dev/null +++ b/src/servers/udp/server/request_buffer.rs @@ -0,0 +1,95 @@ +use ringbuf::traits::{Consumer, Observer, Producer}; +use ringbuf::StaticRb; +use tokio::task::AbortHandle; + +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// Ring-Buffer of Active Requests +#[derive(Default)] +pub struct ActiveRequests { + rb: StaticRb, // the number of requests we handle at the same time. +} + +impl std::fmt::Debug for ActiveRequests { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let (left, right) = &self.rb.as_slices(); + let dbg = format!("capacity: {}, left: {left:?}, right: {right:?}", &self.rb.capacity()); + f.debug_struct("ActiveRequests").field("rb", &dbg).finish() + } +} + +impl Drop for ActiveRequests { + fn drop(&mut self) { + for h in self.rb.pop_iter() { + if !h.is_finished() { + h.abort(); + } + } + } +} + +impl ActiveRequests { + /// It inserts the abort handle for the UDP request processor tasks. + /// + /// If there is no room for the new task, it tries to make place: + /// + /// - Firstly, removing finished tasks. + /// - Secondly, removing the oldest unfinished tasks. + /// + /// # Panics + /// + /// Will panics if it can't make space for the new handle. + pub async fn force_push(&mut self, abort_handle: AbortHandle, local_addr: &str) { + // fill buffer with requests + let Err(abort_handle) = self.rb.try_push(abort_handle) else { + return; + }; + + let mut finished: u64 = 0; + let mut unfinished_task = None; + + // buffer is full.. lets make some space. + for h in self.rb.pop_iter() { + // remove some finished tasks + if h.is_finished() { + finished += 1; + continue; + } + + // task is unfinished.. give it another chance. + tokio::task::yield_now().await; + + // if now finished, we continue. + if h.is_finished() { + finished += 1; + continue; + } + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, removed_count = finished, "Udp::run_udp_server::loop (got unfinished task)"); + + if finished == 0 { + // we have _no_ finished tasks.. will abort the unfinished task to make space... + h.abort(); + + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop aborting request: (no finished tasks)"); + + break; + } + + // we have space, return unfinished task for re-entry. + unfinished_task = Some(h); + } + + // re-insert the previous unfinished task. + if let Some(h) = unfinished_task { + self.rb.try_push(h).expect("it was previously inserted"); + } + + // insert the new task. + if !abort_handle.is_finished() { + self.rb + .try_push(abort_handle) + .expect("it should remove at least one element."); + } + } +} diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs new file mode 100644 index 000000000..a36404fce --- /dev/null +++ b/src/servers/udp/server/spawner.rs @@ -0,0 +1,36 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::Constructor; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; + +use super::launcher::Launcher; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::signals::Halted; + +#[derive(Constructor, Copy, Clone, Debug)] +pub struct Spawner { + pub bind_to: SocketAddr, +} + +impl Spawner { + /// It spawns a new tasks to run the UDP server instance. + /// + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. + pub fn start( + &self, + tracker: Arc, + tx_start: oneshot::Sender, + rx_halt: oneshot::Receiver, + ) -> JoinHandle { + let launcher = Spawner::new(self.bind_to); + tokio::spawn(async move { + Launcher::run_with_graceful_shutdown(tracker, launcher.bind_to, tx_start, rx_halt).await; + launcher + }) + } +} diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs new file mode 100644 index 000000000..919646d7b --- /dev/null +++ b/src/servers/udp/server/states.rs @@ -0,0 +1,115 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::Constructor; +use tokio::task::JoinHandle; + +use super::spawner::Spawner; +use super::{Server, UdpError}; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; +use crate::servers::signals::Halted; +use crate::servers::udp::server::launcher::Launcher; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// A UDP server instance controller with no UDP instance running. +#[allow(clippy::module_name_repetitions)] +pub type StoppedUdpServer = Server; + +/// A UDP server instance controller with a running UDP instance. +#[allow(clippy::module_name_repetitions)] +pub type RunningUdpServer = Server; + +/// A stopped UDP server state. + +pub struct Stopped { + pub launcher: Spawner, +} + +/// A running UDP server state. +#[derive(Debug, Constructor)] +pub struct Running { + /// The address where the server is bound. + pub binding: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: JoinHandle, +} + +impl Server { + /// Creates a new `UdpServer` instance in `stopped`state. + #[must_use] + pub fn new(launcher: Spawner) -> Self { + Self { + state: Stopped { launcher }, + } + } + + /// It starts the server and returns a `UdpServer` controller in `running` + /// state. + /// + /// # Errors + /// + /// Will return `Err` if UDP can't bind to given bind address. + /// + /// # Panics + /// + /// It panics if unable to receive the bound socket address from service. + /// + pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, std::io::Error> { + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); + + // May need to wrap in a task to about a tokio bug. + let task = self.state.launcher.start(tracker, tx_start, rx_halt); + + let binding = rx_start.await.expect("it should be able to start the service").address; + let local_addr = format!("udp://{binding}"); + + form.send(ServiceRegistration::new(binding, Launcher::check)) + .expect("it should be able to send service registration"); + + let running_udp_server: Server = Server { + state: Running { + binding, + halt_task: tx_halt, + task, + }, + }; + + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpServer::start (running)"); + + Ok(running_udp_server) + } +} + +impl Server { + /// It stops the server and returns a `UdpServer` controller in `stopped` + /// state. + /// + /// # Errors + /// + /// Will return `Err` if the oneshot channel to send the stop signal + /// has already been called once. + /// + /// # Panics + /// + /// It panics if unable to shutdown service. + pub async fn stop(self) -> Result, UdpError> { + self.state + .halt_task + .send(Halted::Normal) + .map_err(|e| UdpError::Error(e.to_string()))?; + + let launcher = self.state.task.await.expect("it should shutdown service"); + + let stopped_api_server: Server = Server { + state: Stopped { launcher }, + }; + + Ok(stopped_api_server) + } +} diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index e8fb048ca..2232cb0e0 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -4,7 +4,9 @@ use std::sync::Arc; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::core::Tracker; use torrust_tracker::servers::registar::Registar; -use torrust_tracker::servers::udp::server::{Running, Spawner, Stopped, UdpServer}; +use torrust_tracker::servers::udp::server::spawner::Spawner; +use torrust_tracker::servers::udp::server::states::{Running, Stopped}; +use torrust_tracker::servers::udp::server::Server; use torrust_tracker::shared::bit_torrent::tracker::udp::client::DEFAULT_TIMEOUT; use torrust_tracker_configuration::{Configuration, UdpTracker}; use torrust_tracker_primitives::info_hash::InfoHash; @@ -14,7 +16,7 @@ pub struct Environment { pub config: Arc, pub tracker: Arc, pub registar: Registar, - pub server: UdpServer, + pub server: Server, } impl Environment { @@ -36,7 +38,7 @@ impl Environment { let bind_to = config.bind_address; - let server = UdpServer::new(Spawner::new(bind_to)); + let server = Server::new(Spawner::new(bind_to)); Self { config, diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs index b13b82240..7eea8683f 100644 --- a/tests/servers/udp/mod.rs +++ b/tests/servers/udp/mod.rs @@ -1,7 +1,7 @@ -use torrust_tracker::servers::udp::server; +use torrust_tracker::servers::udp::server::states::Running; pub mod asserts; pub mod contract; pub mod environment; -pub type Started = environment::Environment; +pub type Started = environment::Environment; From f06976e33defa286e9856239f79f9a83f9d168c5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Jun 2024 18:02:01 +0100 Subject: [PATCH 0905/1003] docs: update some UDP server comments --- src/servers/udp/server/mod.rs | 24 +++--------------------- src/servers/udp/server/spawner.rs | 12 +++++++----- src/servers/udp/server/states.rs | 10 +++++----- 3 files changed, 15 insertions(+), 31 deletions(-) diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 1bb9831ee..034f71beb 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -1,22 +1,4 @@ //! Module to handle the UDP server instances. -//! -//! There are two main types in this module: -//! -//! - [`UdpServer`]: a controller to start and stop the server. -//! - [`Udp`]: the server launcher. -//! -//! The `UdpServer` is an state machine for a given configuration. This struct -//! represents concrete configuration and state. It allows to start and -//! stop the server but always keeping the same configuration. -//! -//! The `Udp` is the server launcher. It's responsible for launching the UDP -//! but without keeping any state. -//! -//! For the time being, the `UdpServer` is only used for testing purposes, -//! because we want to be able to start and stop the server multiple times, and -//! we want to know the bound address and the current state of the server. -//! In production, the `Udp` launcher is used directly. - use std::fmt::Debug; use super::RawRequest; @@ -37,7 +19,7 @@ pub mod states; /// /// Some errors triggered while stopping the server are: /// -/// - The [`UdpServer`] cannot send the shutdown signal to the spawned UDP service thread. +/// - The [`Server`] cannot send the shutdown signal to the spawned UDP service thread. #[derive(Debug)] pub enum UdpError { /// Any kind of error starting or stopping the server. @@ -92,7 +74,7 @@ mod tests { tokio::time::sleep(Duration::from_secs(1)).await; - assert_eq!(stopped.state.launcher.bind_to, bind_to); + assert_eq!(stopped.state.spawner.bind_to, bind_to); } #[tokio::test] @@ -116,7 +98,7 @@ mod tests { tokio::time::sleep(Duration::from_secs(1)).await; - assert_eq!(stopped.state.launcher.bind_to, bind_to); + assert_eq!(stopped.state.spawner.bind_to, bind_to); } } diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index a36404fce..e4612fbe0 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -1,3 +1,4 @@ +//! A thin wrapper for tokio spawn to launch the UDP server launcher as a new task. use std::net::SocketAddr; use std::sync::Arc; @@ -16,21 +17,22 @@ pub struct Spawner { } impl Spawner { - /// It spawns a new tasks to run the UDP server instance. + /// It spawns a new task to run the UDP server instance. /// /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - pub fn start( + pub fn spawn_launcher( &self, tracker: Arc, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, ) -> JoinHandle { - let launcher = Spawner::new(self.bind_to); + let spawner = Self::new(self.bind_to); + tokio::spawn(async move { - Launcher::run_with_graceful_shutdown(tracker, launcher.bind_to, tx_start, rx_halt).await; - launcher + Launcher::run_with_graceful_shutdown(tracker, spawner.bind_to, tx_start, rx_halt).await; + spawner }) } } diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 919646d7b..d0a2e4e8a 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -25,7 +25,7 @@ pub type RunningUdpServer = Server; /// A stopped UDP server state. pub struct Stopped { - pub launcher: Spawner, + pub spawner: Spawner, } /// A running UDP server state. @@ -40,9 +40,9 @@ pub struct Running { impl Server { /// Creates a new `UdpServer` instance in `stopped`state. #[must_use] - pub fn new(launcher: Spawner) -> Self { + pub fn new(spawner: Spawner) -> Self { Self { - state: Stopped { launcher }, + state: Stopped { spawner }, } } @@ -64,7 +64,7 @@ impl Server { assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); // May need to wrap in a task to about a tokio bug. - let task = self.state.launcher.start(tracker, tx_start, rx_halt); + let task = self.state.spawner.spawn_launcher(tracker, tx_start, rx_halt); let binding = rx_start.await.expect("it should be able to start the service").address; let local_addr = format!("udp://{binding}"); @@ -107,7 +107,7 @@ impl Server { let launcher = self.state.task.await.expect("it should shutdown service"); let stopped_api_server: Server = Server { - state: Stopped { launcher }, + state: Stopped { spawner: launcher }, }; Ok(stopped_api_server) From 2518c544f83ec0f7ff8d9f70f255d63ffe0460b1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Jun 2024 17:02:27 +0100 Subject: [PATCH 0906/1003] fix: [#917] clients output in JSON should not include logging --- src/console/clients/checker/app.rs | 48 ++++++++++++++++++++++++++++-- src/console/clients/udp/app.rs | 4 +-- 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs index 84802688d..e3bca2319 100644 --- a/src/console/clients/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -12,12 +12,56 @@ //! ```text //! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker //! ``` +//! +//! Another real example to test the Torrust demo tracker: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG='{ +//! "udp_trackers": ["144.126.245.19:6969"], +//! "http_trackers": ["https://tracker.torrust-demo.com"], +//! "health_checks": ["https://tracker.torrust-demo.com/api/health_check"] +//! }' cargo run --bin tracker_checker +//! ``` +//! +//! The output should be something like the following: +//! +//! ```json +//! { +//! "udp_trackers": [ +//! { +//! "url": "144.126.245.19:6969", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "http_trackers": [ +//! { +//! "url": "https://tracker.torrust-demo.com/", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "health_checks": [ +//! { +//! "url": "https://tracker.torrust-demo.com/api/health_check", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ] +//! } +//! ``` use std::path::PathBuf; use std::sync::Arc; use anyhow::{Context, Result}; use clap::Parser; -use tracing::info; +use tracing::debug; use tracing::level_filters::LevelFilter; use super::config::Configuration; @@ -59,7 +103,7 @@ pub async fn run() -> Result> { fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).init(); - info!("logging initialized."); + debug!("logging initialized."); } fn setup_config(args: Args) -> Result { diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index c780157f4..51d21b51e 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -63,8 +63,8 @@ use anyhow::Context; use aquatic_udp_protocol::{Port, Response, TransactionId}; use clap::{Parser, Subcommand}; use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; +use tracing::debug; use tracing::level_filters::LevelFilter; -use tracing::{debug, info}; use url::Url; use crate::console::clients::udp::checker; @@ -128,7 +128,7 @@ pub async fn run() -> anyhow::Result<()> { fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).init(); - info!("logging initialized."); + debug!("logging initialized."); } async fn handle_announce(tracker_socket_addr: &SocketAddr, info_hash: &TorrustInfoHash) -> anyhow::Result { From 3d567c8410b6b8ec83997ca94036ec571db43b5a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 28 Jun 2024 20:17:28 +0200 Subject: [PATCH 0907/1003] ci: nightly build for coverage --- .github/workflows/coverage.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 66def04bf..125cd2487 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -36,7 +36,7 @@ jobs: - id: setup name: Setup Toolchain - uses: dtolnay/rust-toolchain@stable + uses: dtolnay/rust-toolchain@nightly with: toolchain: nightly components: llvm-tools-preview From f0de8dd327854cd270b40247bca10ac60213c404 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 29 Jun 2024 19:07:23 +0200 Subject: [PATCH 0908/1003] ci: pre-build coverage test --- .github/workflows/coverage.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 125cd2487..024935df4 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -59,6 +59,10 @@ jobs: name: Clean Build Directory run: cargo clean + - id: build + name: Pre-build Main Project + run: cargo build + - id: test name: Run Unit Tests run: cargo test --tests --workspace --all-targets --all-features From 5f3957a5642049c96574e81126df2e665978a518 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 30 Jun 2024 14:42:38 +0200 Subject: [PATCH 0909/1003] ci: coverage build with two jobs --- .github/workflows/coverage.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 024935df4..7e929030d 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -61,7 +61,7 @@ jobs: - id: build name: Pre-build Main Project - run: cargo build + run: cargo build --jobs 2 - id: test name: Run Unit Tests From 16d4cb66ab87d33c7ba8ee8fab59898558bd8a2c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 30 Jun 2024 17:18:58 +0200 Subject: [PATCH 0910/1003] ci: coverage workflow add pre-build-test step --- .github/workflows/coverage.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 7e929030d..4dc104242 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -63,6 +63,10 @@ jobs: name: Pre-build Main Project run: cargo build --jobs 2 + - id: build_tests + name: Pre-build Tests + run: cargo build --tests --jobs 2 + - id: test name: Run Unit Tests run: cargo test --tests --workspace --all-targets --all-features From 988f1c7b8d196bf3703060cf5fc0bad3412a1c07 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 26 Jun 2024 16:20:26 +0200 Subject: [PATCH 0911/1003] dev: add vscode 'code-workspace' to git ignore file --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index c1abad7e0..b60b28991 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,5 @@ /tracker.* /tracker.toml callgrind.out -perf.data* \ No newline at end of file +perf.data* +*.code-workspace \ No newline at end of file From c202db7186c2d2baf249f89f341c5317ddb09bb2 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 28 Jun 2024 18:36:44 +0200 Subject: [PATCH 0912/1003] dev: tracker client error enums --- Cargo.lock | 1 + Cargo.toml | 2 +- packages/configuration/src/lib.rs | 5 + src/console/clients/checker/app.rs | 2 +- src/console/clients/checker/checks/health.rs | 100 ++++--- src/console/clients/checker/checks/http.rs | 169 +++++------ src/console/clients/checker/checks/udp.rs | 142 ++++----- src/console/clients/checker/service.rs | 69 +++-- src/console/clients/http/app.rs | 14 +- src/console/clients/http/mod.rs | 35 +++ src/console/clients/udp/app.rs | 33 +-- src/console/clients/udp/checker.rs | 185 +++++------- src/console/clients/udp/mod.rs | 48 +++ src/servers/apis/routes.rs | 6 +- src/servers/http/v1/routes.rs | 5 +- .../bit_torrent/tracker/http/client/mod.rs | 139 ++++++--- src/shared/bit_torrent/tracker/udp/client.rs | 276 +++++++++--------- src/shared/bit_torrent/tracker/udp/mod.rs | 62 +++- tests/servers/udp/contract.rs | 33 ++- tests/servers/udp/environment.rs | 3 +- 20 files changed, 770 insertions(+), 559 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 94fbe7d87..4b833504e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4274,6 +4274,7 @@ dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c22c3dd45..a65c2a74d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,7 +80,7 @@ tower-http = { version = "0", features = ["compression-full", "cors", "propagate trace = "0" tracing = "0" tracing-subscriber = { version = "0.3.18", features = ["json"] } -url = "2" +url = {version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } zerocopy = "0.7.33" diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index c8c91443a..ca008a49a 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -9,6 +9,7 @@ pub mod v1; use std::collections::HashMap; use std::env; use std::sync::Arc; +use std::time::Duration; use camino::Utf8PathBuf; use derive_more::Constructor; @@ -20,6 +21,10 @@ use torrust_tracker_located_error::{DynError, LocatedError}; /// The maximum number of returned peers for a torrent. pub const TORRENT_PEERS_LIMIT: usize = 74; +/// Default timeout for sending and receiving packets. And waiting for sockets +/// to be readable and writable. +pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + // Environment variables /// The whole `tracker.toml` file content. It has priority over the config file. diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs index e3bca2319..9f9825d92 100644 --- a/src/console/clients/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -98,7 +98,7 @@ pub async fn run() -> Result> { console: console_printer, }; - Ok(service.run_checks().await) + service.run_checks().await.context("it should run the check tasks") } fn tracing_stdout_init(filter: LevelFilter) { diff --git a/src/console/clients/checker/checks/health.rs b/src/console/clients/checker/checks/health.rs index 47eec4cbd..b1fb79148 100644 --- a/src/console/clients/checker/checks/health.rs +++ b/src/console/clients/checker/checks/health.rs @@ -1,49 +1,77 @@ +use std::sync::Arc; use std::time::Duration; -use reqwest::{Client as HttpClient, Url, Url as ServiceUrl}; +use anyhow::Result; +use hyper::StatusCode; +use reqwest::{Client as HttpClient, Response}; +use serde::Serialize; +use thiserror::Error; +use url::Url; -use super::structs::{CheckerOutput, Status}; -use crate::console::clients::checker::service::{CheckError, CheckResult}; +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Heath check failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Http check returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + result: Result, +} -#[allow(clippy::missing_panics_doc)] -pub async fn run(health_checks: &Vec, check_results: &mut Vec) -> Vec { - let mut health_checkers: Vec = Vec::new(); +pub async fn run(health_checks: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); - for health_check_url in health_checks { - let mut health_checker = CheckerOutput { - url: health_check_url.to_string(), - status: Status { - code: String::new(), - message: String::new(), - }, + tracing::debug!("Health checks ..."); + + for url in health_checks { + let result = match run_health_check(url.clone(), timeout).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err), }; - match run_health_check(health_check_url.clone()).await { - Ok(()) => { - check_results.push(Ok(())); - health_checker.status.code = "ok".to_string(); - } - Err(err) => { - check_results.push(Err(err)); - health_checker.status.code = "error".to_string(); - health_checker.status.message = "Health API is failing.".to_string(); - } + + let check = Checks { url, result }; + + if check.result.is_err() { + results.push(Err(check)); + } else { + results.push(Ok(check)); } - health_checkers.push(health_checker); } - health_checkers + + results } -async fn run_health_check(url: Url) -> Result<(), CheckError> { - let client = HttpClient::builder().timeout(Duration::from_secs(5)).build().unwrap(); +async fn run_health_check(url: Url, timeout: Duration) -> Result { + let client = HttpClient::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; - match client.get(url.clone()).send().await { - Ok(response) => { - if response.status().is_success() { - Ok(()) - } else { - Err(CheckError::HealthCheckError { url }) - } - } - Err(_) => Err(CheckError::HealthCheckError { url }), + let response = client + .get(url.clone()) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() })?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) } } diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index 57f8c3015..8abbeb669 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -1,120 +1,101 @@ -use std::str::FromStr; +use std::str::FromStr as _; +use std::time::Duration; -use reqwest::Url as ServiceUrl; +use serde::Serialize; use torrust_tracker_primitives::info_hash::InfoHash; -use tracing::debug; use url::Url; -use super::structs::{CheckerOutput, Status}; -use crate::console::clients::checker::service::{CheckError, CheckResult}; -use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; +use crate::console::clients::http::Error; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; use crate::shared::bit_torrent::tracker::http::client::responses::scrape; use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; -#[allow(clippy::missing_panics_doc)] -pub async fn run(http_trackers: &Vec, check_results: &mut Vec) -> Vec { - let mut http_checkers: Vec = Vec::new(); - - for http_tracker in http_trackers { - let mut http_checker = CheckerOutput { - url: http_tracker.to_string(), - status: Status { - code: String::new(), - message: String::new(), - }, +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Announce, + Scrape, +} + +pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("HTTP trackers ..."); + + for ref url in http_trackers { + let mut checks = Checks { + url: url.clone(), + results: Vec::default(), }; - match check_http_announce(http_tracker).await { - Ok(()) => { - check_results.push(Ok(())); - http_checker.status.code = "ok".to_string(); - } - Err(err) => { - check_results.push(Err(err)); - http_checker.status.code = "error".to_string(); - http_checker.status.message = "Announce is failing.".to_string(); - } + // Announce + { + let check = check_http_announce(url, timeout).await.map(|_| ()); + + checks.results.push((Check::Announce, check)); } - match check_http_scrape(http_tracker).await { - Ok(()) => { - check_results.push(Ok(())); - http_checker.status.code = "ok".to_string(); - } - Err(err) => { - check_results.push(Err(err)); - http_checker.status.code = "error".to_string(); - http_checker.status.message = "Scrape is failing.".to_string(); - } + // Scrape + { + let check = check_http_scrape(url, timeout).await.map(|_| ()); + + checks.results.push((Check::Scrape, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); } - http_checkers.push(http_checker); } - http_checkers + + results } -async fn check_http_announce(tracker_url: &Url) -> Result<(), CheckError> { +async fn check_http_announce(url: &Url, timeout: Duration) -> Result { let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); - // todo: HTTP request could panic.For example, if the server is not accessible. - // We should change the client to catch that error and return a `CheckError`. - // Otherwise the checking process will stop. The idea is to process all checks - // and return a final report. - let Ok(client) = Client::new(tracker_url.clone()) else { - return Err(CheckError::HttpError { - url: (tracker_url.to_owned()), - }); - }; - let Ok(response) = client - .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client + .announce( + &requests::announce::QueryBuilder::with_default_values() + .with_info_hash(&info_hash) + .query(), + ) .await - else { - return Err(CheckError::HttpError { - url: (tracker_url.to_owned()), - }); - }; - - if let Ok(body) = response.bytes().await { - if let Ok(_announce_response) = serde_bencode::from_bytes::(&body) { - Ok(()) - } else { - debug!("announce body {:#?}", body); - Err(CheckError::HttpError { - url: tracker_url.clone(), - }) - } - } else { - Err(CheckError::HttpError { - url: tracker_url.clone(), - }) - } + .map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = serde_bencode::from_bytes::(&response).map_err(|e| Error::ParseBencodeError { + data: response, + err: e.into(), + })?; + + Ok(response) } -async fn check_http_scrape(url: &Url) -> Result<(), CheckError> { +async fn check_http_scrape(url: &Url, timeout: Duration) -> Result { let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); - // todo: HTTP request could panic.For example, if the server is not accessible. - // We should change the client to catch that error and return a `CheckError`. - // Otherwise the checking process will stop. The idea is to process all checks - // and return a final report. - - let Ok(client) = Client::new(url.clone()) else { - return Err(CheckError::HttpError { url: (url.to_owned()) }); - }; - let Ok(response) = client.scrape(&query).await else { - return Err(CheckError::HttpError { url: (url.to_owned()) }); - }; - - if let Ok(body) = response.bytes().await { - if let Ok(_scrape_response) = scrape::Response::try_from_bencoded(&body) { - Ok(()) - } else { - debug!("scrape body {:#?}", body); - Err(CheckError::HttpError { url: url.clone() }) - } - } else { - Err(CheckError::HttpError { url: url.clone() }) - } + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client.scrape(&query).await.map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = scrape::Response::try_from_bencoded(&response).map_err(|e| Error::BencodeParseError { + data: response, + err: e.into(), + })?; + + Ok(response) } diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 072aa5ca7..dd4d5e639 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -1,94 +1,98 @@ use std::net::SocketAddr; +use std::time::Duration; -use aquatic_udp_protocol::{Port, TransactionId}; +use aquatic_udp_protocol::TransactionId; use hex_literal::hex; +use serde::Serialize; use torrust_tracker_primitives::info_hash::InfoHash; -use tracing::debug; -use crate::console::clients::checker::checks::structs::{CheckerOutput, Status}; -use crate::console::clients::checker::service::{CheckError, CheckResult}; -use crate::console::clients::udp::checker; +use crate::console::clients::udp::checker::Client; +use crate::console::clients::udp::Error; -const ASSIGNED_BY_OS: u16 = 0; -const RANDOM_TRANSACTION_ID: i32 = -888_840_697; - -#[allow(clippy::missing_panics_doc)] -pub async fn run(udp_trackers: &Vec, check_results: &mut Vec) -> Vec { - let mut udp_checkers: Vec = Vec::new(); - - for udp_tracker in udp_trackers { - let mut checker_output = CheckerOutput { - url: udp_tracker.to_string(), - status: Status { - code: String::new(), - message: String::new(), - }, - }; +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + remote_addr: SocketAddr, + results: Vec<(Check, Result<(), Error>)>, +} - debug!("UDP tracker: {:?}", udp_tracker); +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Setup, + Connect, + Announce, + Scrape, +} - let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); +#[allow(clippy::missing_panics_doc)] +pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); - let mut client = checker::Client::default(); + tracing::debug!("UDP trackers ..."); - debug!("Bind and connect"); + let info_hash = InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 - let Ok(bound_to) = client.bind_and_connect(ASSIGNED_BY_OS, udp_tracker).await else { - check_results.push(Err(CheckError::UdpError { - socket_addr: *udp_tracker, - })); - checker_output.status.code = "error".to_string(); - checker_output.status.message = "Can't connect to socket.".to_string(); - break; + for remote_addr in udp_trackers { + let mut checks = Checks { + remote_addr, + results: Vec::default(), }; - debug!("Send connection request"); - - let Ok(connection_id) = client.send_connection_request(transaction_id).await else { - check_results.push(Err(CheckError::UdpError { - socket_addr: *udp_tracker, - })); - checker_output.status.code = "error".to_string(); - checker_output.status.message = "Can't make tracker connection request.".to_string(); - break; + tracing::debug!("UDP tracker: {:?}", remote_addr); + + // Setup + let client = match Client::new(remote_addr, timeout).await { + Ok(client) => { + checks.results.push((Check::Setup, Ok(()))); + client + } + Err(err) => { + checks.results.push((Check::Setup, Err(err))); + results.push(Err(checks)); + break; + } }; - let info_hash = InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 - - debug!("Send announce request"); + let transaction_id = TransactionId::new(1); + + // Connect Remote + let connection_id = match client.send_connection_request(transaction_id).await { + Ok(connection_id) => { + checks.results.push((Check::Connect, Ok(()))); + connection_id + } + Err(err) => { + checks.results.push((Check::Connect, Err(err))); + results.push(Err(checks)); + break; + } + }; - if (client - .send_announce_request(connection_id, transaction_id, info_hash, Port(bound_to.port().into())) - .await) - .is_ok() + // Announce { - check_results.push(Ok(())); - checker_output.status.code = "ok".to_string(); - } else { - let err = CheckError::UdpError { - socket_addr: *udp_tracker, - }; - check_results.push(Err(err)); - checker_output.status.code = "error".to_string(); - checker_output.status.message = "Announce is failing.".to_string(); + let check = client + .send_announce_request(transaction_id, connection_id, info_hash) + .await + .map(|_| ()); + + checks.results.push((Check::Announce, check)); } - debug!("Send scrape request"); + // Scrape + { + let check = client + .send_scrape_request(connection_id, transaction_id, &[info_hash]) + .await + .map(|_| ()); - let info_hashes = vec![InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422"))]; // # DevSkim: ignore DS173237 + checks.results.push((Check::Announce, check)); + } - if (client.send_scrape_request(connection_id, transaction_id, info_hashes).await).is_ok() { - check_results.push(Ok(())); - checker_output.status.code = "ok".to_string(); + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); } else { - let err = CheckError::UdpError { - socket_addr: *udp_tracker, - }; - check_results.push(Err(err)); - checker_output.status.code = "error".to_string(); - checker_output.status.message = "Scrape is failing.".to_string(); + results.push(Ok(checks)); } - udp_checkers.push(checker_output); } - udp_checkers + + results } diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs index 16483e92e..acd312d8c 100644 --- a/src/console/clients/checker/service.rs +++ b/src/console/clients/checker/service.rs @@ -1,9 +1,11 @@ -use std::net::SocketAddr; use std::sync::Arc; -use reqwest::Url; +use futures::FutureExt as _; +use serde::Serialize; +use tokio::task::{JoinError, JoinSet}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; -use super::checks::{self}; +use super::checks::{health, http, udp}; use super::config::Configuration; use super::console::Console; use crate::console::clients::checker::printer::Printer; @@ -13,33 +15,48 @@ pub struct Service { pub(crate) console: Console, } -pub type CheckResult = Result<(), CheckError>; - -#[derive(Debug)] -pub enum CheckError { - UdpError { socket_addr: SocketAddr }, - HttpError { url: Url }, - HealthCheckError { url: Url }, +#[derive(Debug, Clone, Serialize)] +pub enum CheckResult { + Udp(Result), + Http(Result), + Health(Result), } impl Service { /// # Errors /// - /// Will return OK is all checks pass or an array with the check errors. - #[allow(clippy::missing_panics_doc)] - pub async fn run_checks(&self) -> Vec { - let mut check_results = vec![]; - - let udp_checkers = checks::udp::run(&self.config.udp_trackers, &mut check_results).await; - - let http_checkers = checks::http::run(&self.config.http_trackers, &mut check_results).await; - - let health_checkers = checks::health::run(&self.config.health_checks, &mut check_results).await; - - let json_output = - serde_json::json!({ "udp_trackers": udp_checkers, "http_trackers": http_checkers, "health_checks": health_checkers }); - self.console.println(&serde_json::to_string_pretty(&json_output).unwrap()); - - check_results + /// It will return an error if some of the tests panic or otherwise fail to run. + /// On success it will return a vector of `Ok(())` of [`CheckResult`]. + /// + /// # Panics + /// + /// It would panic if `serde_json` produces invalid json for the `to_string_pretty` function. + pub async fn run_checks(self) -> Result, JoinError> { + tracing::info!("Running checks for trackers ..."); + + let mut check_results = Vec::default(); + + let mut checks = JoinSet::new(); + checks.spawn( + udp::run(self.config.udp_trackers.clone(), DEFAULT_TIMEOUT).map(|mut f| f.drain(..).map(CheckResult::Udp).collect()), + ); + checks.spawn( + http::run(self.config.http_trackers.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Http).collect()), + ); + checks.spawn( + health::run(self.config.health_checks.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Health).collect()), + ); + + while let Some(results) = checks.join_next().await { + check_results.append(&mut results?); + } + + let json_output = serde_json::json!(check_results); + self.console + .println(&serde_json::to_string_pretty(&json_output).expect("it should consume valid json")); + + Ok(check_results) } } diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs index 8fc9db0c3..a54db5f8b 100644 --- a/src/console/clients/http/app.rs +++ b/src/console/clients/http/app.rs @@ -14,10 +14,12 @@ //! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq //! ``` use std::str::FromStr; +use std::time::Duration; use anyhow::Context; use clap::{Parser, Subcommand}; use reqwest::Url; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_primitives::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; @@ -46,25 +48,25 @@ pub async fn run() -> anyhow::Result<()> { match args.command { Command::Announce { tracker_url, info_hash } => { - announce_command(tracker_url, info_hash).await?; + announce_command(tracker_url, info_hash, DEFAULT_TIMEOUT).await?; } Command::Scrape { tracker_url, info_hashes, } => { - scrape_command(&tracker_url, &info_hashes).await?; + scrape_command(&tracker_url, &info_hashes, DEFAULT_TIMEOUT).await?; } } Ok(()) } -async fn announce_command(tracker_url: String, info_hash: String) -> anyhow::Result<()> { +async fn announce_command(tracker_url: String, info_hash: String, timeout: Duration) -> anyhow::Result<()> { let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; let info_hash = InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); - let response = Client::new(base_url)? + let response = Client::new(base_url, timeout)? .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) .await?; @@ -80,12 +82,12 @@ async fn announce_command(tracker_url: String, info_hash: String) -> anyhow::Res Ok(()) } -async fn scrape_command(tracker_url: &str, info_hashes: &[String]) -> anyhow::Result<()> { +async fn scrape_command(tracker_url: &str, info_hashes: &[String], timeout: Duration) -> anyhow::Result<()> { let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; - let response = Client::new(base_url)?.scrape(&query).await?; + let response = Client::new(base_url, timeout)?.scrape(&query).await?; let body = response.bytes().await?; diff --git a/src/console/clients/http/mod.rs b/src/console/clients/http/mod.rs index 309be6287..eaa71957f 100644 --- a/src/console/clients/http/mod.rs +++ b/src/console/clients/http/mod.rs @@ -1 +1,36 @@ +use std::sync::Arc; + +use serde::Serialize; +use thiserror::Error; + +use crate::shared::bit_torrent::tracker::http::client::responses::scrape::BencodeParseError; + pub mod app; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Http request did not receive a response within the timeout: {err:?}")] + HttpClientError { + err: crate::shared::bit_torrent::tracker::http::client::Error, + }, + #[error("Http failed to get a response at all: {err:?}")] + ResponseError { err: Arc }, + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + ParseBencodeError { + data: hyper::body::Bytes, + err: Arc, + }, + + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + BencodeParseError { + data: hyper::body::Bytes, + err: Arc, + }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index 51d21b51e..bcba39558 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -60,18 +60,19 @@ use std::net::{SocketAddr, ToSocketAddrs}; use std::str::FromStr; use anyhow::Context; -use aquatic_udp_protocol::{Port, Response, TransactionId}; +use aquatic_udp_protocol::{Response, TransactionId}; use clap::{Parser, Subcommand}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use tracing::debug; use tracing::level_filters::LevelFilter; use url::Url; +use super::Error; use crate::console::clients::udp::checker; use crate::console::clients::udp::responses::dto::SerializableResponse; use crate::console::clients::udp::responses::json::ToJson; -const ASSIGNED_BY_OS: u16 = 0; const RANDOM_TRANSACTION_ID: i32 = -888_840_697; #[derive(Parser, Debug)] @@ -109,13 +110,13 @@ pub async fn run() -> anyhow::Result<()> { let response = match args.command { Command::Announce { - tracker_socket_addr, + tracker_socket_addr: remote_addr, info_hash, - } => handle_announce(&tracker_socket_addr, &info_hash).await?, + } => handle_announce(remote_addr, &info_hash).await?, Command::Scrape { - tracker_socket_addr, + tracker_socket_addr: remote_addr, info_hashes, - } => handle_scrape(&tracker_socket_addr, &info_hashes).await?, + } => handle_scrape(remote_addr, &info_hashes).await?, }; let response: SerializableResponse = response.into(); @@ -131,32 +132,24 @@ fn tracing_stdout_init(filter: LevelFilter) { debug!("logging initialized."); } -async fn handle_announce(tracker_socket_addr: &SocketAddr, info_hash: &TorrustInfoHash) -> anyhow::Result { +async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); - let mut client = checker::Client::default(); - - let bound_to = client.bind_and_connect(ASSIGNED_BY_OS, tracker_socket_addr).await?; + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; let connection_id = client.send_connection_request(transaction_id).await?; - client - .send_announce_request(connection_id, transaction_id, *info_hash, Port(bound_to.port().into())) - .await + client.send_announce_request(transaction_id, connection_id, *info_hash).await } -async fn handle_scrape(tracker_socket_addr: &SocketAddr, info_hashes: &[TorrustInfoHash]) -> anyhow::Result { +async fn handle_scrape(remote_addr: SocketAddr, info_hashes: &[TorrustInfoHash]) -> Result { let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); - let mut client = checker::Client::default(); - - let _bound_to = client.bind_and_connect(ASSIGNED_BY_OS, tracker_socket_addr).await?; + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; let connection_id = client.send_connection_request(transaction_id).await?; - client - .send_scrape_request(connection_id, transaction_id, info_hashes.to_vec()) - .await + client.send_scrape_request(connection_id, transaction_id, info_hashes).await } fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index afde63d12..49f0ac41f 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -1,99 +1,46 @@ use std::net::{Ipv4Addr, SocketAddr}; +use std::num::NonZeroU16; +use std::time::Duration; -use anyhow::Context; use aquatic_udp_protocol::common::InfoHash; use aquatic_udp_protocol::{ AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, }; -use thiserror::Error; use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use tracing::debug; -use crate::shared::bit_torrent::tracker::udp::client::{UdpClient, UdpTrackerClient}; - -#[derive(Error, Debug)] -pub enum ClientError { - #[error("Local socket address is not bound yet. Try binding before connecting.")] - NotBound, - #[error("Not connected to remote tracker UDP socket. Try connecting before making requests.")] - NotConnected, - #[error("Unexpected response while connecting the the remote server.")] - UnexpectedConnectionResponse, -} +use super::Error; +use crate::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; /// A UDP Tracker client to make test requests (checks). -#[derive(Debug, Default)] +#[derive(Debug)] pub struct Client { - /// Local UDP socket. It could be 0 to assign a free port. - local_binding_address: Option, - - /// Local UDP socket after binding. It's equals to binding address if a - /// non- zero port was used. - local_bound_address: Option, - - /// Remote UDP tracker socket - remote_socket: Option, - - /// The client used to make UDP requests to the tracker. - udp_tracker_client: Option, + client: UdpTrackerClient, } impl Client { - /// Binds to the local socket and connects to the remote one. + /// Creates a new `[Client]` for checking a UDP Tracker Service /// /// # Errors /// - /// Will return an error if - /// - /// - It can't bound to the local socket address. - /// - It can't make a connection request successfully to the remote UDP server. - pub async fn bind_and_connect(&mut self, local_port: u16, remote_socket_addr: &SocketAddr) -> anyhow::Result { - let bound_to = self.bind(local_port).await?; - self.connect(remote_socket_addr).await?; - Ok(bound_to) - } - - /// Binds local client socket. - /// - /// # Errors + /// It will error if unable to bind and connect to the udp remote address. /// - /// Will return an error if it can't bound to the local address. - async fn bind(&mut self, local_port: u16) -> anyhow::Result { - let local_bind_to = format!("0.0.0.0:{local_port}"); - let binding_address = local_bind_to.parse().context("binding local address")?; - - debug!("Binding to: {local_bind_to}"); - let udp_client = UdpClient::bind(&local_bind_to).await?; - - let bound_to = udp_client.socket.local_addr().context("bound local address")?; - debug!("Bound to: {bound_to}"); - - self.local_binding_address = Some(binding_address); - self.local_bound_address = Some(bound_to); - - self.udp_tracker_client = Some(UdpTrackerClient { udp_client }); + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpTrackerClient::new(remote_addr, timeout) + .await + .map_err(|err| Error::UnableToBindAndConnect { remote_addr, err })?; - Ok(bound_to) + Ok(Self { client }) } - /// Connects to the remote server socket. + /// Returns the local addr of this [`Client`]. /// /// # Errors /// - /// Will return and error if it can't make a connection request successfully - /// to the remote UDP server. - async fn connect(&mut self, tracker_socket_addr: &SocketAddr) -> anyhow::Result<()> { - debug!("Connecting to tracker: udp://{tracker_socket_addr}"); - - match &self.udp_tracker_client { - Some(client) => { - client.udp_client.connect(&tracker_socket_addr.to_string()).await?; - self.remote_socket = Some(*tracker_socket_addr); - Ok(()) - } - None => Err(ClientError::NotBound.into()), - } + /// This function will return an error if the socket is somehow not bound. + pub fn local_addr(&self) -> std::io::Result { + self.client.client.socket.local_addr() } /// Sends a connection request to the UDP Tracker server. @@ -109,25 +56,26 @@ impl Client { /// # Panics /// /// Will panic if it receives an unexpected response. - pub async fn send_connection_request(&self, transaction_id: TransactionId) -> anyhow::Result { + pub async fn send_connection_request(&self, transaction_id: TransactionId) -> Result { debug!("Sending connection request with transaction id: {transaction_id:#?}"); let connect_request = ConnectRequest { transaction_id }; - match &self.udp_tracker_client { - Some(client) => { - client.send(connect_request.into()).await?; - - let response = client.receive().await?; - - debug!("connection request response:\n{response:#?}"); - - match response { - Response::Connect(connect_response) => Ok(connect_response.connection_id), - _ => Err(ClientError::UnexpectedConnectionResponse.into()), - } - } - None => Err(ClientError::NotConnected.into()), + let _ = self + .client + .send(connect_request.into()) + .await + .map_err(|err| Error::UnableToSendConnectionRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveConnectResponse { err })?; + + match response { + Response::Connect(connect_response) => Ok(connect_response.connection_id), + _ => Err(Error::UnexpectedConnectionResponse { response }), } } @@ -137,15 +85,28 @@ impl Client { /// /// Will return and error if the client is not connected. You have to connect /// before calling this function. + /// + /// # Panics + /// + /// It will panic if the `local_address` has a zero port. pub async fn send_announce_request( &self, - connection_id: ConnectionId, transaction_id: TransactionId, + connection_id: ConnectionId, info_hash: TorrustInfoHash, - client_port: Port, - ) -> anyhow::Result { + ) -> Result { debug!("Sending announce request with transaction id: {transaction_id:#?}"); + let port = NonZeroU16::new( + self.client + .client + .socket + .local_addr() + .expect("it should get the local address") + .port(), + ) + .expect("it should no be zero"); + let announce_request = AnnounceRequest { connection_id, action_placeholder: AnnounceActionPlaceholder::default(), @@ -159,21 +120,22 @@ impl Client { ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), - port: client_port, + port: Port::new(port), }; - match &self.udp_tracker_client { - Some(client) => { - client.send(announce_request.into()).await?; - - let response = client.receive().await?; + let _ = self + .client + .send(announce_request.into()) + .await + .map_err(|err| Error::UnableToSendAnnounceRequest { err })?; - debug!("announce request response:\n{response:#?}"); + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveAnnounceResponse { err })?; - Ok(response) - } - None => Err(ClientError::NotConnected.into()), - } + Ok(response) } /// Sends a scrape request to the UDP Tracker server. @@ -186,8 +148,8 @@ impl Client { &self, connection_id: ConnectionId, transaction_id: TransactionId, - info_hashes: Vec, - ) -> anyhow::Result { + info_hashes: &[TorrustInfoHash], + ) -> Result { debug!("Sending scrape request with transaction id: {transaction_id:#?}"); let scrape_request = ScrapeRequest { @@ -199,17 +161,18 @@ impl Client { .collect(), }; - match &self.udp_tracker_client { - Some(client) => { - client.send(scrape_request.into()).await?; - - let response = client.receive().await?; + let _ = self + .client + .send(scrape_request.into()) + .await + .map_err(|err| Error::UnableToSendScrapeRequest { err })?; - debug!("scrape request response:\n{response:#?}"); + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveScrapeResponse { err })?; - Ok(response) - } - None => Err(ClientError::NotConnected.into()), - } + Ok(response) } } diff --git a/src/console/clients/udp/mod.rs b/src/console/clients/udp/mod.rs index 2fcb26ed0..b92bed096 100644 --- a/src/console/clients/udp/mod.rs +++ b/src/console/clients/udp/mod.rs @@ -1,3 +1,51 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::Response; +use serde::Serialize; +use thiserror::Error; + +use crate::shared::bit_torrent::tracker::udp; + pub mod app; pub mod checker; pub mod responses; + +#[derive(Error, Debug, Clone, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Connect to: {remote_addr}, with error: {err}")] + UnableToBindAndConnect { remote_addr: SocketAddr, err: udp::Error }, + + #[error("Failed to send a connection request, with error: {err}")] + UnableToSendConnectionRequest { err: udp::Error }, + + #[error("Failed to receive a connect response, with error: {err}")] + UnableToReceiveConnectResponse { err: udp::Error }, + + #[error("Failed to send a announce request, with error: {err}")] + UnableToSendAnnounceRequest { err: udp::Error }, + + #[error("Failed to receive a announce response, with error: {err}")] + UnableToReceiveAnnounceResponse { err: udp::Error }, + + #[error("Failed to send a scrape request, with error: {err}")] + UnableToSendScrapeRequest { err: udp::Error }, + + #[error("Failed to receive a scrape response, with error: {err}")] + UnableToReceiveScrapeResponse { err: udp::Error }, + + #[error("Failed to receive a response, with error: {err}")] + UnableToReceiveResponse { err: udp::Error }, + + #[error("Failed to get local address for connection: {err}")] + UnableToGetLocalAddr { err: udp::Error }, + + #[error("Failed to get a connection response: {response:?}")] + UnexpectedConnectionResponse { response: Response }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 2001afc2f..4901d760d 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -14,7 +14,7 @@ use axum::response::Response; use axum::routing::get; use axum::{middleware, BoxError, Router}; use hyper::{Request, StatusCode}; -use torrust_tracker_configuration::AccessTokens; +use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; use tower_http::compression::CompressionLayer; @@ -29,8 +29,6 @@ use super::v1::middlewares::auth::State; use crate::core::Tracker; use crate::servers::apis::API_LOG_TARGET; -const TIMEOUT: Duration = Duration::from_secs(5); - /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc, access_tokens: Arc) -> Router { @@ -84,6 +82,6 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router // this middleware goes above `TimeoutLayer` because it will receive // errors returned by `TimeoutLayer` .layer(HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT })) - .layer(TimeoutLayer::new(TIMEOUT)), + .layer(TimeoutLayer::new(DEFAULT_TIMEOUT)), ) } diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index b2f37880c..c24797c4a 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -10,6 +10,7 @@ use axum::routing::get; use axum::{BoxError, Router}; use axum_client_ip::SecureClientIpSource; use hyper::{Request, StatusCode}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; use tower_http::compression::CompressionLayer; @@ -22,8 +23,6 @@ use super::handlers::{announce, health_check, scrape}; use crate::core::Tracker; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; -const TIMEOUT: Duration = Duration::from_secs(5); - /// It adds the routes to the router. /// /// > **NOTICE**: it's added a layer to get the client IP from the connection @@ -80,6 +79,6 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { // this middleware goes above `TimeoutLayer` because it will receive // errors returned by `TimeoutLayer` .layer(HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT })) - .layer(TimeoutLayer::new(TIMEOUT)), + .layer(TimeoutLayer::new(DEFAULT_TIMEOUT)), ) } diff --git a/src/shared/bit_torrent/tracker/http/client/mod.rs b/src/shared/bit_torrent/tracker/http/client/mod.rs index f5b1b3310..4c70cd68b 100644 --- a/src/shared/bit_torrent/tracker/http/client/mod.rs +++ b/src/shared/bit_torrent/tracker/http/client/mod.rs @@ -2,18 +2,30 @@ pub mod requests; pub mod responses; use std::net::IpAddr; +use std::sync::Arc; +use std::time::Duration; -use anyhow::{anyhow, Result}; -use requests::announce::{self, Query}; -use requests::scrape; -use reqwest::{Client as ReqwestClient, Response, Url}; +use hyper::StatusCode; +use requests::{announce, scrape}; +use reqwest::{Response, Url}; +use thiserror::Error; use crate::core::auth::Key; +#[derive(Debug, Clone, Error)] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + /// HTTP Tracker Client pub struct Client { + client: reqwest::Client, base_url: Url, - reqwest: ReqwestClient, key: Option, } @@ -29,11 +41,15 @@ impl Client { /// # Errors /// /// This method fails if the client builder fails. - pub fn new(base_url: Url) -> Result { - let reqwest = reqwest::Client::builder().build()?; + pub fn new(base_url: Url, timeout: Duration) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + Ok(Self { base_url, - reqwest, + client, key: None, }) } @@ -43,11 +59,16 @@ impl Client { /// # Errors /// /// This method fails if the client builder fails. - pub fn bind(base_url: Url, local_address: IpAddr) -> Result { - let reqwest = reqwest::Client::builder().local_address(local_address).build()?; + pub fn bind(base_url: Url, timeout: Duration, local_address: IpAddr) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .local_address(local_address) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + Ok(Self { base_url, - reqwest, + client, key: None, }) } @@ -55,54 +76,106 @@ impl Client { /// # Errors /// /// This method fails if the client builder fails. - pub fn authenticated(base_url: Url, key: Key) -> Result { - let reqwest = reqwest::Client::builder().build()?; + pub fn authenticated(base_url: Url, timeout: Duration, key: Key) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + Ok(Self { base_url, - reqwest, + client, key: Some(key), }) } /// # Errors - pub async fn announce(&self, query: &announce::Query) -> Result { - self.get(&self.build_announce_path_and_query(query)).await + /// + /// This method fails if the returned response was not successful + pub async fn announce(&self, query: &announce::Query) -> Result { + let response = self.get(&self.build_announce_path_and_query(query)).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } } /// # Errors - pub async fn scrape(&self, query: &scrape::Query) -> Result { - self.get(&self.build_scrape_path_and_query(query)).await + /// + /// This method fails if the returned response was not successful + pub async fn scrape(&self, query: &scrape::Query) -> Result { + let response = self.get(&self.build_scrape_path_and_query(query)).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } } /// # Errors - pub async fn announce_with_header(&self, query: &Query, key: &str, value: &str) -> Result { - self.get_with_header(&self.build_announce_path_and_query(query), key, value) - .await + /// + /// This method fails if the returned response was not successful + pub async fn announce_with_header(&self, query: &announce::Query, key: &str, value: &str) -> Result { + let response = self + .get_with_header(&self.build_announce_path_and_query(query), key, value) + .await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } } /// # Errors - pub async fn health_check(&self) -> Result { - self.get(&self.build_path("health_check")).await + /// + /// This method fails if the returned response was not successful + pub async fn health_check(&self) -> Result { + let response = self.get(&self.build_path("health_check")).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } } /// # Errors /// /// This method fails if there was an error while sending request. - pub async fn get(&self, path: &str) -> Result { - match self.reqwest.get(self.build_url(path)).send().await { - Ok(response) => Ok(response), - Err(err) => Err(anyhow!("{err}")), - } + pub async fn get(&self, path: &str) -> Result { + self.client + .get(self.build_url(path)) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() }) } /// # Errors /// /// This method fails if there was an error while sending request. - pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result { - match self.reqwest.get(self.build_url(path)).header(key, value).send().await { - Ok(response) => Ok(response), - Err(err) => Err(anyhow!("{err}")), - } + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result { + self.client + .get(self.build_url(path)) + .header(key, value) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() }) } fn build_announce_path_and_query(&self, query: &announce::Query) -> String { diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs index dce596e08..edb8adc85 100644 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -1,24 +1,20 @@ use core::result::Result::{Err, Ok}; use std::io::Cursor; -use std::net::SocketAddr; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use std::time::Duration; -use anyhow::{anyhow, Context, Result}; use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; use tokio::net::UdpSocket; use tokio::time; -use tracing::debug; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; use zerocopy::network_endian::I32; -use crate::shared::bit_torrent::tracker::udp::{source_address, MAX_PACKET_SIZE}; +use super::Error; +use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; pub const UDP_CLIENT_LOG_TARGET: &str = "UDP CLIENT"; -/// Default timeout for sending and receiving packets. And waiting for sockets -/// to be readable and writable. -pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); - #[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct UdpClient { @@ -30,51 +26,94 @@ pub struct UdpClient { } impl UdpClient { + /// Creates a new `UdpClient` bound to the default port and ipv6 address + /// /// # Errors /// - /// Will return error if the local address can't be bound. + /// Will return error if unable to bind to any port or ip address. /// - pub async fn bind(local_address: &str) -> Result { - let socket_addr = local_address - .parse::() - .context(format!("{local_address} is not a valid socket address"))?; - - let socket = match time::timeout(DEFAULT_TIMEOUT, UdpSocket::bind(socket_addr)).await { - Ok(bind_result) => match bind_result { - Ok(socket) => { - debug!("Bound to socket: {socket_addr}"); - Ok(socket) - } - Err(e) => Err(anyhow!("Failed to bind to socket: {socket_addr}, error: {e:?}")), - }, - Err(e) => Err(anyhow!("Timeout waiting to bind to socket: {socket_addr}, error: {e:?}")), - }?; + async fn bound_to_default_ipv4(timeout: Duration) -> Result { + let addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); + + Self::bound(addr, timeout).await + } + + /// Creates a new `UdpClient` bound to the default port and ipv6 address + /// + /// # Errors + /// + /// Will return error if unable to bind to any port or ip address. + /// + async fn bound_to_default_ipv6(timeout: Duration) -> Result { + let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); + + Self::bound(addr, timeout).await + } + + /// Creates a new `UdpClient` connected to a Udp server + /// + /// # Errors + /// + /// Will return any errors present in the call stack + /// + pub async fn connected(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = if remote_addr.is_ipv4() { + Self::bound_to_default_ipv4(timeout).await? + } else { + Self::bound_to_default_ipv6(timeout).await? + }; + + client.connect(remote_addr).await?; + Ok(client) + } + + /// Creates a `[UdpClient]` bound to a Socket. + /// + /// # Panics + /// + /// Panics if unable to get the `local_addr` of the bound socket. + /// + /// # Errors + /// + /// This function will return an error if the binding takes to long + /// or if there is an underlying OS error. + pub async fn bound(addr: SocketAddr, timeout: Duration) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "binding to socket: {addr:?} ..."); + + let socket = time::timeout(timeout, UdpSocket::bind(addr)) + .await + .map_err(|_| Error::TimeoutWhileBindingToSocket { addr })? + .map_err(|e| Error::UnableToBindToSocket { err: e.into(), addr })?; + + let addr = socket.local_addr().expect("it should get the local address"); + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "bound to socket: {addr:?}."); let udp_client = Self { socket: Arc::new(socket), - timeout: DEFAULT_TIMEOUT, + timeout, }; + Ok(udp_client) } /// # Errors /// /// Will return error if can't connect to the socket. - pub async fn connect(&self, remote_address: &str) -> Result<()> { - let socket_addr = remote_address - .parse::() - .context(format!("{remote_address} is not a valid socket address"))?; - - match time::timeout(self.timeout, self.socket.connect(socket_addr)).await { - Ok(connect_result) => match connect_result { - Ok(()) => { - debug!("Connected to socket {socket_addr}"); - Ok(()) - } - Err(e) => Err(anyhow!("Failed to connect to socket {socket_addr}: {e:?}")), - }, - Err(e) => Err(anyhow!("Timeout waiting to connect to socket {socket_addr}, error: {e:?}")), - } + pub async fn connect(&self, remote_addr: SocketAddr) -> Result<(), Error> { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "connecting to remote: {remote_addr:?} ..."); + + let () = time::timeout(self.timeout, self.socket.connect(remote_addr)) + .await + .map_err(|_| Error::TimeoutWhileConnectingToRemote { remote_addr })? + .map_err(|e| Error::UnableToConnectToRemote { + err: e.into(), + remote_addr, + })?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "connected to remote: {remote_addr:?}."); + + Ok(()) } /// # Errors @@ -83,26 +122,25 @@ impl UdpClient { /// /// - Can't write to the socket. /// - Can't send data. - pub async fn send(&self, bytes: &[u8]) -> Result { - debug!(target: UDP_CLIENT_LOG_TARGET, "sending {bytes:?} ..."); - - match time::timeout(self.timeout, self.socket.writable()).await { - Ok(writable_result) => { - match writable_result { - Ok(()) => (), - Err(e) => return Err(anyhow!("IO error waiting for the socket to become readable: {e:?}")), - }; - } - Err(e) => return Err(anyhow!("Timeout waiting for the socket to become readable: {e:?}")), - }; + pub async fn send(&self, bytes: &[u8]) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending {bytes:?} ..."); - match time::timeout(self.timeout, self.socket.send(bytes)).await { - Ok(send_result) => match send_result { - Ok(size) => Ok(size), - Err(e) => Err(anyhow!("IO error during send: {e:?}")), - }, - Err(e) => Err(anyhow!("Send operation timed out: {e:?}")), - } + let () = time::timeout(self.timeout, self.socket.writable()) + .await + .map_err(|_| Error::TimeoutWaitForWriteableSocket)? + .map_err(|e| Error::UnableToGetWritableSocket { err: e.into() })?; + + let sent_bytes = time::timeout(self.timeout, self.socket.send(bytes)) + .await + .map_err(|_| Error::TimeoutWhileSendingData { data: bytes.to_vec() })? + .map_err(|e| Error::UnableToSendData { + err: e.into(), + data: bytes.to_vec(), + })?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "sent {sent_bytes} bytes to remote."); + + Ok(sent_bytes) } /// # Errors @@ -114,110 +152,76 @@ impl UdpClient { /// /// # Panics /// - pub async fn receive(&self) -> Result> { - let mut response_buffer = [0u8; MAX_PACKET_SIZE]; + pub async fn receive(&self) -> Result, Error> { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "receiving ..."); - debug!(target: UDP_CLIENT_LOG_TARGET, "receiving ..."); + let mut buffer = [0u8; MAX_PACKET_SIZE]; - match time::timeout(self.timeout, self.socket.readable()).await { - Ok(readable_result) => { - match readable_result { - Ok(()) => (), - Err(e) => return Err(anyhow!("IO error waiting for the socket to become readable: {e:?}")), - }; - } - Err(e) => return Err(anyhow!("Timeout waiting for the socket to become readable: {e:?}")), - }; + let () = time::timeout(self.timeout, self.socket.readable()) + .await + .map_err(|_| Error::TimeoutWaitForReadableSocket)? + .map_err(|e| Error::UnableToGetReadableSocket { err: e.into() })?; - let size = match time::timeout(self.timeout, self.socket.recv(&mut response_buffer)).await { - Ok(recv_result) => match recv_result { - Ok(size) => Ok(size), - Err(e) => Err(anyhow!("IO error during send: {e:?}")), - }, - Err(e) => Err(anyhow!("Receive operation timed out: {e:?}")), - }?; + let received_bytes = time::timeout(self.timeout, self.socket.recv(&mut buffer)) + .await + .map_err(|_| Error::TimeoutWhileReceivingData)? + .map_err(|e| Error::UnableToReceivingData { err: e.into() })?; - let mut res: Vec = response_buffer.to_vec(); - Vec::truncate(&mut res, size); + let mut received: Vec = buffer.to_vec(); + Vec::truncate(&mut received, received_bytes); - debug!(target: UDP_CLIENT_LOG_TARGET, "{size} bytes received {res:?}"); + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {received_bytes} bytes: {received:?}"); - Ok(res) + Ok(received) } } -/// Creates a new `UdpClient` connected to a Udp server -/// -/// # Errors -/// -/// Will return any errors present in the call stack -/// -pub async fn new_udp_client_connected(remote_address: &str) -> Result { - let port = 0; // Let OS choose an unused port. - let client = UdpClient::bind(&source_address(port)).await?; - client.connect(remote_address).await?; - Ok(client) -} - #[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct UdpTrackerClient { - pub udp_client: UdpClient, + pub client: UdpClient, } impl UdpTrackerClient { + /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server + /// + /// # Errors + /// + /// If unable to connect to the remote address. + /// + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpClient::connected(remote_addr, timeout).await?; + Ok(UdpTrackerClient { client }) + } + /// # Errors /// /// Will return error if can't write request to bytes. - pub async fn send(&self, request: Request) -> Result { - debug!(target: UDP_CLIENT_LOG_TARGET, "send request {request:?}"); + pub async fn send(&self, request: Request) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending request {request:?} ..."); // Write request into a buffer - let request_buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(request_buffer); - - let request_data_result = match request.write_bytes(&mut cursor) { - Ok(()) => { - #[allow(clippy::cast_possible_truncation)] - let position = cursor.position() as usize; - let inner_request_buffer = cursor.get_ref(); - // Return slice which contains written request data - Ok(&inner_request_buffer[..position]) - } - Err(e) => Err(anyhow!("could not write request to bytes: {e}.")), - }; + // todo: optimize the pre-allocated amount based upon request type. + let mut writer = Cursor::new(Vec::with_capacity(200)); + let () = request + .write_bytes(&mut writer) + .map_err(|e| Error::UnableToWriteDataFromRequest { err: e.into(), request })?; - let request_data = request_data_result?; - - self.udp_client.send(request_data).await + self.client.send(writer.get_ref()).await } /// # Errors /// /// Will return error if can't create response from the received payload (bytes buffer). - pub async fn receive(&self) -> Result { - let payload = self.udp_client.receive().await?; - - debug!(target: UDP_CLIENT_LOG_TARGET, "received {} bytes. Response {payload:?}", payload.len()); + pub async fn receive(&self) -> Result { + let response = self.client.receive().await?; - let response = Response::parse_bytes(&payload, true)?; + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {} bytes: {response:?}", response.len()); - Ok(response) + Response::parse_bytes(&response, true).map_err(|e| Error::UnableToParseResponse { err: e.into(), response }) } } -/// Creates a new `UdpTrackerClient` connected to a Udp Tracker server -/// -/// # Errors -/// -/// Will return any errors present in the call stack -/// -pub async fn new_udp_tracker_client_connected(remote_address: &str) -> Result { - let udp_client = new_udp_client_connected(remote_address).await?; - let udp_tracker_client = UdpTrackerClient { udp_client }; - Ok(udp_tracker_client) -} - /// Helper Function to Check if a UDP Service is Connectable /// /// # Panics @@ -226,10 +230,10 @@ pub async fn new_udp_tracker_client_connected(remote_address: &str) -> Result Result { - debug!("Checking Service (detail): {binding:?}."); +pub async fn check(remote_addr: &SocketAddr) -> Result { + tracing::debug!("Checking Service (detail): {remote_addr:?}."); - match new_udp_tracker_client_connected(binding.to_string().as_str()).await { + match UdpTrackerClient::new(*remote_addr, DEFAULT_TIMEOUT).await { Ok(client) => { let connect_request = ConnectRequest { transaction_id: TransactionId(I32::new(123)), @@ -238,7 +242,7 @@ pub async fn check(binding: &SocketAddr) -> Result { // client.send() return usize, but doesn't use here match client.send(connect_request.into()).await { Ok(_) => (), - Err(e) => debug!("Error: {e:?}."), + Err(e) => tracing::debug!("Error: {e:?}."), }; let process = move |response| { diff --git a/src/shared/bit_torrent/tracker/udp/mod.rs b/src/shared/bit_torrent/tracker/udp/mod.rs index 9322ef045..b9d5f34f6 100644 --- a/src/shared/bit_torrent/tracker/udp/mod.rs +++ b/src/shared/bit_torrent/tracker/udp/mod.rs @@ -1,3 +1,10 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::Request; +use thiserror::Error; +use torrust_tracker_located_error::DynError; + pub mod client; /// The maximum number of bytes in a UDP packet. @@ -6,7 +13,56 @@ pub const MAX_PACKET_SIZE: usize = 1496; /// identify the protocol. pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; -/// Generates the source address for the UDP client -fn source_address(port: u16) -> String { - format!("127.0.0.1:{port}") +#[derive(Debug, Clone, Error)] +pub enum Error { + #[error("Timeout while waiting for socket to bind: {addr:?}")] + TimeoutWhileBindingToSocket { addr: SocketAddr }, + + #[error("Failed to bind to socket: {addr:?}, with error: {err:?}")] + UnableToBindToSocket { err: Arc, addr: SocketAddr }, + + #[error("Timeout while waiting for connection to remote: {remote_addr:?}")] + TimeoutWhileConnectingToRemote { remote_addr: SocketAddr }, + + #[error("Failed to connect to remote: {remote_addr:?}, with error: {err:?}")] + UnableToConnectToRemote { + err: Arc, + remote_addr: SocketAddr, + }, + + #[error("Timeout while waiting for the socket to become writable.")] + TimeoutWaitForWriteableSocket, + + #[error("Failed to get writable socket: {err:?}")] + UnableToGetWritableSocket { err: Arc }, + + #[error("Timeout while trying to send data: {data:?}")] + TimeoutWhileSendingData { data: Vec }, + + #[error("Failed to send data: {data:?}, with error: {err:?}")] + UnableToSendData { err: Arc, data: Vec }, + + #[error("Timeout while waiting for the socket to become readable.")] + TimeoutWaitForReadableSocket, + + #[error("Failed to get readable socket: {err:?}")] + UnableToGetReadableSocket { err: Arc }, + + #[error("Timeout while trying to receive data.")] + TimeoutWhileReceivingData, + + #[error("Failed to receive data: {err:?}")] + UnableToReceivingData { err: Arc }, + + #[error("Failed to get data from request: {request:?}, with error: {err:?}")] + UnableToWriteDataFromRequest { err: Arc, request: Request }, + + #[error("Failed to parse response: {response:?}, with error: {err:?}")] + UnableToParseResponse { err: Arc, response: Vec }, +} + +impl From for DynError { + fn from(e: Error) -> Self { + Arc::new(Box::new(e)) + } } diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index b23b20907..e37ef7bf0 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -6,8 +6,9 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; -use torrust_tracker::shared::bit_torrent::tracker::udp::client::{new_udp_client_connected, UdpTrackerClient}; +use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_error_response; @@ -40,17 +41,17 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = match new_udp_client_connected(&env.bind_address().to_string()).await { + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_client) => udp_client, Err(err) => panic!("{err}"), }; - match client.send(&empty_udp_request()).await { + match client.client.send(&empty_udp_request()).await { Ok(_) => (), Err(err) => panic!("{err}"), }; - let response = match client.receive().await { + let response = match client.client.receive().await { Ok(response) => response, Err(err) => panic!("{err}"), }; @@ -64,7 +65,8 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req mod receiving_a_connection_request { use aquatic_udp_protocol::{ConnectRequest, TransactionId}; - use torrust_tracker::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_connect_response; @@ -74,7 +76,7 @@ mod receiving_a_connection_request { async fn should_return_a_connect_response() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, Err(err) => panic!("{err}"), }; @@ -106,7 +108,8 @@ mod receiving_an_announce_request { AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, TransactionId, }; - use torrust_tracker::shared::bit_torrent::tracker::udp::client::{new_udp_tracker_client_connected, UdpTrackerClient}; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_ipv4_announce_response; @@ -129,7 +132,7 @@ mod receiving_an_announce_request { ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), - port: Port(client.udp_client.socket.local_addr().unwrap().port().into()), + port: Port(client.client.socket.local_addr().unwrap().port().into()), }; match client.send(announce_request.into()).await { @@ -151,7 +154,7 @@ mod receiving_an_announce_request { async fn should_return_an_announce_response() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, Err(err) => panic!("{err}"), }; @@ -169,7 +172,7 @@ mod receiving_an_announce_request { async fn should_return_many_announce_response() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, Err(err) => panic!("{err}"), }; @@ -189,7 +192,8 @@ mod receiving_an_announce_request { mod receiving_an_scrape_request { use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; - use torrust_tracker::shared::bit_torrent::tracker::udp::client::new_udp_tracker_client_connected; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_scrape_response; @@ -200,7 +204,7 @@ mod receiving_an_scrape_request { async fn should_return_a_scrape_response() { let env = Started::new(&configuration::ephemeral().into()).await; - let client = match new_udp_tracker_client_connected(&env.bind_address().to_string()).await { + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, Err(err) => panic!("{err}"), }; @@ -211,12 +215,13 @@ mod receiving_an_scrape_request { // Full scrapes are not allowed you need to pass an array of info hashes otherwise // it will return "bad request" error with empty vector - let info_hashes = vec![InfoHash([0u8; 20])]; + + let empty_info_hash = vec![InfoHash([0u8; 20])]; let scrape_request = ScrapeRequest { connection_id: ConnectionId(connection_id.0), transaction_id: TransactionId::new(123i32), - info_hashes, + info_hashes: empty_info_hash, }; match client.send(scrape_request.into()).await { diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 2232cb0e0..c580c3558 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -7,8 +7,7 @@ use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::udp::server::spawner::Spawner; use torrust_tracker::servers::udp::server::states::{Running, Stopped}; use torrust_tracker::servers::udp::server::Server; -use torrust_tracker::shared::bit_torrent::tracker::udp::client::DEFAULT_TIMEOUT; -use torrust_tracker_configuration::{Configuration, UdpTracker}; +use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; From 77c6954f733763a898daf7fa655d2ea2f2d896bb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 07:23:51 +0100 Subject: [PATCH 0913/1003] chore(deps): update dependencies ```s cargo update Updating crates.io index Locking 20 packages to latest compatible versions Updating bitflags v2.5.0 -> v2.6.0 Updating cc v1.0.99 -> v1.0.103 Updating clap v4.5.7 -> v4.5.8 Updating clap_builder v4.5.7 -> v4.5.8 Updating clap_derive v4.5.5 -> v4.5.8 Updating either v1.12.0 -> v1.13.0 Updating lazy_static v1.4.0 -> v1.5.0 Updating libloading v0.8.3 -> v0.8.4 Updating log v0.4.21 -> v0.4.22 Updating num-bigint v0.4.5 -> v0.4.6 Updating object v0.36.0 -> v0.36.1 Updating proc-macro2 v1.0.85 -> v1.0.86 Updating serde_bytes v0.11.14 -> v0.11.15 Updating serde_json v1.0.117 -> v1.0.119 Updating serde_with v3.8.1 -> v3.8.2 Updating serde_with_macros v3.8.1 -> v3.8.2 Updating subtle v2.5.0 -> v2.6.1 Updating syn v2.0.66 -> v2.0.68 Updating tinyvec v1.6.0 -> v1.6.1 Updating uuid v1.8.0 -> v1.9.1 ``` --- Cargo.lock | 156 ++++++++++++++++++++++++++--------------------------- 1 file changed, 78 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b833504e..f977949ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -356,7 +356,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -479,7 +479,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -555,7 +555,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -566,7 +566,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -577,9 +577,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bitvec" @@ -635,7 +635,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", "syn_derive", ] @@ -738,9 +738,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.99" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" +checksum = "2755ff20a1d93490d26ba33a6f092a38a508398a5320df5d4b3014fcccce9410" dependencies = [ "jobserver", "libc", @@ -821,9 +821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.7" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f" +checksum = "84b3edb18336f4df585bc9aa31dd99c036dfa5dc5e9a2939a722a188f3a8970d" dependencies = [ "clap_builder", "clap_derive", @@ -831,9 +831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.7" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f" +checksum = "c1c09dd5ada6c6c78075d6fd0da3f90d8080651e2d6cc8eb2f1aaa4034ced708" dependencies = [ "anstream", "anstyle", @@ -843,14 +843,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.5" +version = "4.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" +checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1077,7 +1077,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1088,7 +1088,7 @@ checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1124,7 +1124,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1135,7 +1135,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1156,9 +1156,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" @@ -1357,7 +1357,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1369,7 +1369,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1381,7 +1381,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1474,7 +1474,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -1951,9 +1951,9 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" @@ -2042,9 +2042,9 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if", "windows-targets 0.52.5", @@ -2108,9 +2108,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" dependencies = [ "value-bag", ] @@ -2192,7 +2192,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -2243,7 +2243,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", "termcolor", "thiserror", ] @@ -2257,7 +2257,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.5.0", + "bitflags 2.6.0", "bitvec", "byteorder", "bytes", @@ -2365,9 +2365,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", @@ -2409,9 +2409,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434" +checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" dependencies = [ "memchr", ] @@ -2434,7 +2434,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cfg-if", "foreign-types", "libc", @@ -2451,7 +2451,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -2527,7 +2527,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -2601,7 +2601,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -2775,9 +2775,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.85" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -2790,7 +2790,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", "version_check", "yansi", ] @@ -2929,7 +2929,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] @@ -3098,7 +3098,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.66", + "syn 2.0.68", "unicode-ident", ] @@ -3108,7 +3108,7 @@ version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -3173,7 +3173,7 @@ version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys 0.4.14", @@ -3315,7 +3315,7 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -3359,9 +3359,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.14" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" dependencies = [ "serde", ] @@ -3374,7 +3374,7 @@ checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -3392,9 +3392,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0" dependencies = [ "indexmap 2.2.6", "itoa", @@ -3420,7 +3420,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -3446,9 +3446,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.1" +version = "3.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" +checksum = "079f3a42cd87588d924ed95b533f8d30a483388c4e400ab736a7058e34f16169" dependencies = [ "base64 0.22.1", "chrono", @@ -3464,14 +3464,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.1" +version = "3.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" +checksum = "bc03aad67c1d26b7de277d51c86892e7d9a0110a2fe44bf6b26cc569fba302d6" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -3597,9 +3597,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -3614,9 +3614,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.66" +version = "2.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" dependencies = [ "proc-macro2", "quote", @@ -3632,7 +3632,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -3735,7 +3735,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -3791,9 +3791,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "c55115c6fbe2d2bef26eb09ad74bde02d8255476fc0c7b515ef09fbb35742d82" dependencies = [ "tinyvec_macros", ] @@ -3830,7 +3830,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -4097,7 +4097,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "async-compression", - "bitflags 2.5.0", + "bitflags 2.6.0", "bytes", "futures-core", "http", @@ -4155,7 +4155,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] @@ -4285,9 +4285,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.8.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" dependencies = [ "getrandom", "rand", @@ -4369,7 +4369,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", "wasm-bindgen-shared", ] @@ -4403,7 +4403,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4664,7 +4664,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.68", ] [[package]] From daeb7cc7756a83657e40121785f2525319be37e1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 1 Jul 2024 09:09:37 +0200 Subject: [PATCH 0914/1003] ci: coverage workflow pre-build fix --- .github/workflows/coverage.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 4dc104242..28c1be6d0 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -61,11 +61,11 @@ jobs: - id: build name: Pre-build Main Project - run: cargo build --jobs 2 + run: cargo build --workspace --all-targets --all-features --jobs 2 - id: build_tests name: Pre-build Tests - run: cargo build --tests --jobs 2 + run: cargo build --workspace --all-targets --all-features --tests --jobs 2 - id: test name: Run Unit Tests From 6495a4c1458660d1709b9668ea7dac1ca4319abd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Jun 2024 11:11:50 +0100 Subject: [PATCH 0915/1003] docs: [#918] add comments to the UDP server --- src/servers/udp/server/launcher.rs | 38 +++--- src/servers/udp/server/request_buffer.rs | 153 ++++++++++++++--------- 2 files changed, 112 insertions(+), 79 deletions(-) diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index db448c2ff..bb7c7d70f 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -103,7 +103,7 @@ impl Launcher { } async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc) { - let reqs = &mut ActiveRequests::default(); + let active_requests = &mut ActiveRequests::default(); let addr = receiver.bound_socket_address(); let local_addr = format!("udp://{addr}"); @@ -127,27 +127,18 @@ impl Launcher { } }; - /* code-review: - - Does it make sense to spawn a new request processor task when - the ActiveRequests buffer is full? - - We could store the UDP request in a secondary buffer and wait - until active tasks are finished. When a active request is finished - we can move a new UDP request from the pending to process requests - buffer to the active requests buffer. - - This forces us to define an explicit timeout for active requests. - - In the current solution the timeout is dynamic, it depends on - the system load. With high load we can remove tasks without - giving them enough time to be processed. With low load we could - keep processing running longer than a reasonable time for - the client to receive the response. - - */ - - let abort_handle = + // We spawn the new task even if there active requests buffer is + // full. This could seem counterintuitive because we are accepting + // more request and consuming more memory even if the server is + // already busy. However, we "force_push" the new tasks in the + // buffer. That means, in the worst scenario we will abort a + // running task to make place for the new task. + // + // Once concern could be to reach an starvation point were we + // are only adding and removing tasks without given them the + // chance to finish. However, the buffer is yielding before + // aborting one tasks, giving it the chance to finish. + let abort_handle: tokio::task::AbortHandle = tokio::task::spawn(Launcher::process_request(req, tracker.clone(), receiver.bound_socket.clone())) .abort_handle(); @@ -155,9 +146,10 @@ impl Launcher { continue; } - reqs.force_push(abort_handle, &local_addr).await; + active_requests.force_push(abort_handle, &local_addr).await; } else { tokio::task::yield_now().await; + // the request iterator returned `None`. tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server breaking: (ran dry, should not happen in production!)"); break; diff --git a/src/servers/udp/server/request_buffer.rs b/src/servers/udp/server/request_buffer.rs index c1d4f2696..b3a481b60 100644 --- a/src/servers/udp/server/request_buffer.rs +++ b/src/servers/udp/server/request_buffer.rs @@ -4,10 +4,15 @@ use tokio::task::AbortHandle; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; -/// Ring-Buffer of Active Requests +/// A ring buffer for managing active UDP request abort handles. +/// +/// The `ActiveRequests` struct maintains a fixed-size ring buffer of abort +/// handles for UDP request processor tasks. It ensures that at most 50 requests +/// are handled concurrently, and provides mechanisms to handle buffer overflow +/// by removing finished or oldest unfinished tasks. #[derive(Default)] pub struct ActiveRequests { - rb: StaticRb, // the number of requests we handle at the same time. + rb: StaticRb, // The number of requests handled simultaneously. } impl std::fmt::Debug for ActiveRequests { @@ -29,67 +34,103 @@ impl Drop for ActiveRequests { } impl ActiveRequests { - /// It inserts the abort handle for the UDP request processor tasks. + /// Inserts an abort handle for a UDP request processor task. /// - /// If there is no room for the new task, it tries to make place: + /// If the buffer is full, this method attempts to make space by: /// - /// - Firstly, removing finished tasks. - /// - Secondly, removing the oldest unfinished tasks. + /// 1. Removing finished tasks. + /// 2. Removing the oldest unfinished task if no finished tasks are found. /// /// # Panics /// - /// Will panics if it can't make space for the new handle. + /// This method will panic if it cannot make space for adding a new handle. + /// + /// # Arguments + /// + /// * `abort_handle` - The `AbortHandle` for the UDP request processor task. + /// * `local_addr` - A string slice representing the local address for logging. pub async fn force_push(&mut self, abort_handle: AbortHandle, local_addr: &str) { - // fill buffer with requests - let Err(abort_handle) = self.rb.try_push(abort_handle) else { - return; - }; - - let mut finished: u64 = 0; - let mut unfinished_task = None; - - // buffer is full.. lets make some space. - for h in self.rb.pop_iter() { - // remove some finished tasks - if h.is_finished() { - finished += 1; - continue; + // Attempt to add the new handle to the buffer. + match self.rb.try_push(abort_handle) { + Ok(()) => { + // Successfully added the task, no further action needed. } - - // task is unfinished.. give it another chance. - tokio::task::yield_now().await; - - // if now finished, we continue. - if h.is_finished() { - finished += 1; - continue; + Err(abort_handle) => { + // Buffer is full, attempt to make space. + + let mut finished: u64 = 0; + let mut unfinished_task = None; + + for removed_abort_handle in self.rb.pop_iter() { + // We found a finished tasks ... increase the counter and + // continue searching for more and ... + if removed_abort_handle.is_finished() { + finished += 1; + continue; + } + + // The current removed tasks is not finished. + + // Give it a second chance to finish. + tokio::task::yield_now().await; + + // Recheck if it finished ... increase the counter and + // continue searching for more and ... + if removed_abort_handle.is_finished() { + finished += 1; + continue; + } + + // At this point we found a "definitive" unfinished task. + + // Log unfinished task. + tracing::debug!( + target: UDP_TRACKER_LOG_TARGET, + local_addr, + removed_count = finished, + "Udp::run_udp_server::loop (got unfinished task)" + ); + + // If no finished tasks were found, abort the current + // unfinished task. + if finished == 0 { + // We make place aborting this task. + removed_abort_handle.abort(); + + tracing::warn!( + target: UDP_TRACKER_LOG_TARGET, + local_addr, + "Udp::run_udp_server::loop aborting request: (no finished tasks)" + ); + + break; + } + + // At this point we found at least one finished task, but the + // current one is not finished and it was removed from the + // buffer, so we need to re-insert in in the buffer. + + // Save the unfinished task for re-entry. + unfinished_task = Some(removed_abort_handle); + } + + // After this point there can't be a race condition because only + // one thread owns the active buffer. There is no way for the + // buffer to be full again. That means the "expects" should + // never happen. + + // Reinsert the unfinished task if any. + if let Some(h) = unfinished_task { + self.rb.try_push(h).expect("it was previously inserted"); + } + + // Insert the new task, ensuring there's space. + if !abort_handle.is_finished() { + self.rb + .try_push(abort_handle) + .expect("it should remove at least one element."); + } } - - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, removed_count = finished, "Udp::run_udp_server::loop (got unfinished task)"); - - if finished == 0 { - // we have _no_ finished tasks.. will abort the unfinished task to make space... - h.abort(); - - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop aborting request: (no finished tasks)"); - - break; - } - - // we have space, return unfinished task for re-entry. - unfinished_task = Some(h); - } - - // re-insert the previous unfinished task. - if let Some(h) = unfinished_task { - self.rb.try_push(h).expect("it was previously inserted"); - } - - // insert the new task. - if !abort_handle.is_finished() { - self.rb - .try_push(abort_handle) - .expect("it should remove at least one element."); - } + }; } } From d1c2d15c7050ff75f04e026652b429077c8c2ace Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 28 Jun 2024 16:42:15 +0100 Subject: [PATCH 0916/1003] fix: [#918] revision for UDP active reqeust buffer comments --- src/servers/udp/server/request_buffer.rs | 30 ++++++++++++++---------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/src/servers/udp/server/request_buffer.rs b/src/servers/udp/server/request_buffer.rs index b3a481b60..ffbd9565d 100644 --- a/src/servers/udp/server/request_buffer.rs +++ b/src/servers/udp/server/request_buffer.rs @@ -49,22 +49,22 @@ impl ActiveRequests { /// /// * `abort_handle` - The `AbortHandle` for the UDP request processor task. /// * `local_addr` - A string slice representing the local address for logging. - pub async fn force_push(&mut self, abort_handle: AbortHandle, local_addr: &str) { + pub async fn force_push(&mut self, new_task: AbortHandle, local_addr: &str) { // Attempt to add the new handle to the buffer. - match self.rb.try_push(abort_handle) { + match self.rb.try_push(new_task) { Ok(()) => { // Successfully added the task, no further action needed. } - Err(abort_handle) => { + Err(new_task) => { // Buffer is full, attempt to make space. let mut finished: u64 = 0; let mut unfinished_task = None; - for removed_abort_handle in self.rb.pop_iter() { + for old_task in self.rb.pop_iter() { // We found a finished tasks ... increase the counter and // continue searching for more and ... - if removed_abort_handle.is_finished() { + if old_task.is_finished() { finished += 1; continue; } @@ -76,7 +76,7 @@ impl ActiveRequests { // Recheck if it finished ... increase the counter and // continue searching for more and ... - if removed_abort_handle.is_finished() { + if old_task.is_finished() { finished += 1; continue; } @@ -95,7 +95,7 @@ impl ActiveRequests { // unfinished task. if finished == 0 { // We make place aborting this task. - removed_abort_handle.abort(); + old_task.abort(); tracing::warn!( target: UDP_TRACKER_LOG_TARGET, @@ -111,7 +111,7 @@ impl ActiveRequests { // buffer, so we need to re-insert in in the buffer. // Save the unfinished task for re-entry. - unfinished_task = Some(removed_abort_handle); + unfinished_task = Some(old_task); } // After this point there can't be a race condition because only @@ -124,11 +124,15 @@ impl ActiveRequests { self.rb.try_push(h).expect("it was previously inserted"); } - // Insert the new task, ensuring there's space. - if !abort_handle.is_finished() { - self.rb - .try_push(abort_handle) - .expect("it should remove at least one element."); + // Insert the new task. + // + // Notice that space has already been made for this new task in + // the buffer. One or many old task have already been finished + // or yielded, freeing space in the buffer. Or a single + // unfinished task has been aborted to make space for this new + // task. + if !new_task.is_finished() { + self.rb.try_push(new_task).expect("it should have space for this new task."); } } }; From 2186809608de314d915aba8c5738a4ecb40fd10c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 10:26:02 +0100 Subject: [PATCH 0917/1003] refactor: [#932] sort config core section fields When you format a toml file with a linter it sorts the keys alphabetically. It's good to have the same order in the code. It's also a common practice for JSON. This helps to make serialization deterministic. --- packages/configuration/src/v1/core.rs | 75 +++++++++++++-------------- 1 file changed, 37 insertions(+), 38 deletions(-) diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index 49fdf2a80..31da85915 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -8,27 +8,6 @@ use crate::{AnnouncePolicy, TrackerPolicy}; #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct Core { - /// Tracker mode. See [`TrackerMode`] for more information. - #[serde(default = "Core::default_mode")] - pub mode: TrackerMode, - - /// Weather the tracker should collect statistics about tracker usage. - /// If enabled, the tracker will collect statistics like the number of - /// connections handled, the number of announce requests handled, etc. - /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more - /// information about the collected metrics. - #[serde(default = "Core::default_tracker_usage_statistics")] - pub tracker_usage_statistics: bool, - - /// Interval in seconds that the cleanup job will run to remove inactive - /// peers from the torrent peer list. - #[serde(default = "Core::default_inactive_peer_cleanup_interval")] - pub inactive_peer_cleanup_interval: u64, - - // Tracker policy configuration. - #[serde(default = "Core::default_tracker_policy")] - pub tracker_policy: TrackerPolicy, - // Announce policy configuration. #[serde(default = "Core::default_announce_policy")] pub announce_policy: AnnouncePolicy, @@ -37,51 +16,71 @@ pub struct Core { #[serde(default = "Core::default_database")] pub database: Database, + /// Interval in seconds that the cleanup job will run to remove inactive + /// peers from the torrent peer list. + #[serde(default = "Core::default_inactive_peer_cleanup_interval")] + pub inactive_peer_cleanup_interval: u64, + + /// Tracker mode. See [`TrackerMode`] for more information. + #[serde(default = "Core::default_mode")] + pub mode: TrackerMode, + // Network configuration. #[serde(default = "Core::default_network")] pub net: Network, + + // Tracker policy configuration. + #[serde(default = "Core::default_tracker_policy")] + pub tracker_policy: TrackerPolicy, + + /// Weather the tracker should collect statistics about tracker usage. + /// If enabled, the tracker will collect statistics like the number of + /// connections handled, the number of announce requests handled, etc. + /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more + /// information about the collected metrics. + #[serde(default = "Core::default_tracker_usage_statistics")] + pub tracker_usage_statistics: bool, } impl Default for Core { fn default() -> Self { Self { - mode: Self::default_mode(), - tracker_usage_statistics: Self::default_tracker_usage_statistics(), - inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), - tracker_policy: Self::default_tracker_policy(), announce_policy: Self::default_announce_policy(), database: Self::default_database(), + inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), + mode: Self::default_mode(), net: Self::default_network(), + tracker_policy: Self::default_tracker_policy(), + tracker_usage_statistics: Self::default_tracker_usage_statistics(), } } } impl Core { - fn default_mode() -> TrackerMode { - TrackerMode::Public + fn default_announce_policy() -> AnnouncePolicy { + AnnouncePolicy::default() } - fn default_tracker_usage_statistics() -> bool { - true + fn default_database() -> Database { + Database::default() } fn default_inactive_peer_cleanup_interval() -> u64 { 600 } - fn default_tracker_policy() -> TrackerPolicy { - TrackerPolicy::default() + fn default_mode() -> TrackerMode { + TrackerMode::Public } - fn default_announce_policy() -> AnnouncePolicy { - AnnouncePolicy::default() + fn default_network() -> Network { + Network::default() } - fn default_database() -> Database { - Database::default() + fn default_tracker_policy() -> TrackerPolicy { + TrackerPolicy::default() } - - fn default_network() -> Network { - Network::default() + fn default_tracker_usage_statistics() -> bool { + true } } From f5d8dc6679e349578d69ed56251f30cc7f23f976 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 10:40:08 +0100 Subject: [PATCH 0918/1003] refactor: [#932] WIP. Add new core config options: private and listed --- packages/configuration/src/v1/core.rs | 18 ++++++++++++++++++ packages/configuration/src/v1/mod.rs | 4 +++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index 31da85915..9f3af36b6 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -21,6 +21,10 @@ pub struct Core { #[serde(default = "Core::default_inactive_peer_cleanup_interval")] pub inactive_peer_cleanup_interval: u64, + // Whe `true` only approved torrents can be announced in the tracker. + #[serde(default = "Core::default_listed")] + pub listed: bool, + /// Tracker mode. See [`TrackerMode`] for more information. #[serde(default = "Core::default_mode")] pub mode: TrackerMode, @@ -29,6 +33,10 @@ pub struct Core { #[serde(default = "Core::default_network")] pub net: Network, + // Whe `true` clients require a key to connect and use the tracker. + #[serde(default = "Core::default_private")] + pub private: bool, + // Tracker policy configuration. #[serde(default = "Core::default_tracker_policy")] pub tracker_policy: TrackerPolicy, @@ -48,8 +56,10 @@ impl Default for Core { announce_policy: Self::default_announce_policy(), database: Self::default_database(), inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), + listed: Self::default_listed(), mode: Self::default_mode(), net: Self::default_network(), + private: Self::default_private(), tracker_policy: Self::default_tracker_policy(), tracker_usage_statistics: Self::default_tracker_usage_statistics(), } @@ -69,6 +79,10 @@ impl Core { 600 } + fn default_listed() -> bool { + false + } + fn default_mode() -> TrackerMode { TrackerMode::Public } @@ -77,6 +91,10 @@ impl Core { Network::default() } + fn default_private() -> bool { + false + } + fn default_tracker_policy() -> TrackerPolicy { TrackerPolicy::default() } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 546f55b6e..080edde70 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -366,8 +366,10 @@ mod tests { [core] mode = "public" - tracker_usage_statistics = true inactive_peer_cleanup_interval = 600 + listed = false + private = false + tracker_usage_statistics = true [core.tracker_policy] max_peer_timeout = 900 From ca31c835ed08503f89c8470eddcc84381847e959 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 11:22:06 +0100 Subject: [PATCH 0919/1003] feat: [#932] replace `mode` core config option with `private` and `listed` flags From: ```toml [core] mode = "public" tracker_usage_statistics = true inactive_peer_cleanup_interval = 600 ``` To: ```toml [core] inactive_peer_cleanup_interval = 600 listed = false private = false tracker_usage_statistics = true ``` --- Cargo.lock | 1 - Cargo.toml | 4 +- packages/configuration/src/v1/core.rs | 14 +---- packages/configuration/src/v1/mod.rs | 26 ++++---- packages/primitives/src/lib.rs | 69 ---------------------- packages/test-helpers/Cargo.toml | 1 - packages/test-helpers/src/configuration.rs | 18 +++--- src/app.rs | 6 +- src/bootstrap/jobs/http_tracker.rs | 4 +- src/bootstrap/jobs/tracker_apis.rs | 4 +- src/core/mod.rs | 41 ++++++------- src/lib.rs | 13 ++-- src/servers/apis/server.rs | 4 +- src/servers/http/server.rs | 4 +- src/servers/http/v1/handlers/announce.rs | 4 +- src/servers/http/v1/handlers/scrape.rs | 4 +- src/servers/http/v1/services/announce.rs | 2 +- src/servers/http/v1/services/scrape.rs | 2 +- src/servers/udp/handlers.rs | 6 +- src/servers/udp/server/mod.rs | 6 +- tests/servers/http/v1/contract.rs | 62 +++++++++---------- 21 files changed, 108 insertions(+), 187 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f977949ee..3a403332a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4054,7 +4054,6 @@ version = "3.0.0-alpha.12-develop" dependencies = [ "rand", "torrust-tracker-configuration", - "torrust-tracker-primitives", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a65c2a74d..3eca9934d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,12 +80,12 @@ tower-http = { version = "0", features = ["compression-full", "cors", "propagate trace = "0" tracing = "0" tracing-subscriber = { version = "0.3.18", features = ["json"] } -url = {version = "2", features = ["serde"] } +url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } zerocopy = "0.7.33" [package.metadata.cargo-machete] -ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes"] +ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes", "torrust-tracker-primitives"] [dev-dependencies] local-ip-address = "0" diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v1/core.rs index 9f3af36b6..1f0a0f957 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v1/core.rs @@ -1,5 +1,4 @@ use serde::{Deserialize, Serialize}; -use torrust_tracker_primitives::TrackerMode; use super::network::Network; use crate::v1::database::Database; @@ -21,19 +20,15 @@ pub struct Core { #[serde(default = "Core::default_inactive_peer_cleanup_interval")] pub inactive_peer_cleanup_interval: u64, - // Whe `true` only approved torrents can be announced in the tracker. + // When `true` only approved torrents can be announced in the tracker. #[serde(default = "Core::default_listed")] pub listed: bool, - /// Tracker mode. See [`TrackerMode`] for more information. - #[serde(default = "Core::default_mode")] - pub mode: TrackerMode, - // Network configuration. #[serde(default = "Core::default_network")] pub net: Network, - // Whe `true` clients require a key to connect and use the tracker. + // When `true` clients require a key to connect and use the tracker. #[serde(default = "Core::default_private")] pub private: bool, @@ -57,7 +52,6 @@ impl Default for Core { database: Self::default_database(), inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), listed: Self::default_listed(), - mode: Self::default_mode(), net: Self::default_network(), private: Self::default_private(), tracker_policy: Self::default_tracker_policy(), @@ -83,10 +77,6 @@ impl Core { false } - fn default_mode() -> TrackerMode { - TrackerMode::Public - } - fn default_network() -> Network { Network::default() } diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v1/mod.rs index 080edde70..c5e0f9f7a 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v1/mod.rs @@ -199,14 +199,10 @@ //! log_level = "info" //! //! [core] -//! mode = "public" -//! tracker_usage_statistics = true //! inactive_peer_cleanup_interval = 600 -//! -//! [core.tracker_policy] -//! max_peer_timeout = 900 -//! persistent_torrent_completed_stat = false -//! remove_peerless_torrents = true +//! listed = false +//! private = false +//! tracker_usage_statistics = true //! //! [core.announce_policy] //! interval = 120 @@ -220,6 +216,11 @@ //! external_ip = "0.0.0.0" //! on_reverse_proxy = false //! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true +//! //! [http_api] //! bind_address = "127.0.0.1:1212" //! @@ -365,17 +366,11 @@ mod tests { log_level = "info" [core] - mode = "public" inactive_peer_cleanup_interval = 600 listed = false private = false tracker_usage_statistics = true - [core.tracker_policy] - max_peer_timeout = 900 - persistent_torrent_completed_stat = false - remove_peerless_torrents = true - [core.announce_policy] interval = 120 interval_min = 120 @@ -388,6 +383,11 @@ mod tests { external_ip = "0.0.0.0" on_reverse_proxy = false + [core.tracker_policy] + max_peer_timeout = 900 + persistent_torrent_completed_stat = false + remove_peerless_torrents = true + [health_check_api] bind_address = "127.0.0.1:1313" "# diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 454635e8d..7ad1d35b4 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -5,8 +5,6 @@ //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. use std::collections::BTreeMap; -use std::fmt; -use std::str::FromStr; use std::time::Duration; use info_hash::InfoHash; @@ -64,70 +62,3 @@ pub enum DatabaseDriver { } pub type PersistentTorrents = BTreeMap; - -/// The mode the tracker will run in. -/// -/// Refer to [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration) -/// to know how to configure the tracker to run in each mode. -#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] -pub enum TrackerMode { - /// Will track every new info hash and serve every peer. - #[serde(rename = "public")] - Public, - - /// Will only track whitelisted info hashes. - #[serde(rename = "listed")] - Listed, - - /// Will only serve authenticated peers - #[serde(rename = "private")] - Private, - - /// Will only track whitelisted info hashes and serve authenticated peers - #[serde(rename = "private_listed")] - PrivateListed, -} - -impl Default for TrackerMode { - fn default() -> Self { - Self::Public - } -} - -impl fmt::Display for TrackerMode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let display_str = match self { - TrackerMode::Public => "public", - TrackerMode::Listed => "listed", - TrackerMode::Private => "private", - TrackerMode::PrivateListed => "private_listed", - }; - write!(f, "{display_str}") - } -} - -impl FromStr for TrackerMode { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "public" => Ok(TrackerMode::Public), - "listed" => Ok(TrackerMode::Listed), - "private" => Ok(TrackerMode::Private), - "private_listed" => Ok(TrackerMode::PrivateListed), - _ => Err(format!("Unknown tracker mode: {s}")), - } - } -} - -impl TrackerMode { - #[must_use] - pub fn is_open(&self) -> bool { - matches!(self, TrackerMode::Public | TrackerMode::Listed) - } - - #[must_use] - pub fn is_close(&self) -> bool { - !self.is_open() - } -} diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 2f10c6a0f..4fed6bc42 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,4 +17,3 @@ version.workspace = true [dependencies] rand = "0" torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 646617b32..65d9d9144 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -3,7 +3,6 @@ use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, LogLevel, UdpTracker}; -use torrust_tracker_primitives::TrackerMode; use crate::random; @@ -86,40 +85,41 @@ pub fn ephemeral_without_reverse_proxy() -> Configuration { /// Ephemeral configuration with `public` mode. #[must_use] -pub fn ephemeral_mode_public() -> Configuration { +pub fn ephemeral_public() -> Configuration { let mut cfg = ephemeral(); - cfg.core.mode = TrackerMode::Public; + cfg.core.private = false; cfg } /// Ephemeral configuration with `private` mode. #[must_use] -pub fn ephemeral_mode_private() -> Configuration { +pub fn ephemeral_private() -> Configuration { let mut cfg = ephemeral(); - cfg.core.mode = TrackerMode::Private; + cfg.core.private = true; cfg } /// Ephemeral configuration with `listed` mode. #[must_use] -pub fn ephemeral_mode_whitelisted() -> Configuration { +pub fn ephemeral_listed() -> Configuration { let mut cfg = ephemeral(); - cfg.core.mode = TrackerMode::Listed; + cfg.core.listed = true; cfg } /// Ephemeral configuration with `private_listed` mode. #[must_use] -pub fn ephemeral_mode_private_whitelisted() -> Configuration { +pub fn ephemeral_private_and_listed() -> Configuration { let mut cfg = ephemeral(); - cfg.core.mode = TrackerMode::PrivateListed; + cfg.core.private = true; + cfg.core.listed = true; cfg } diff --git a/src/app.rs b/src/app.rs index f6a909002..2d70a6dde 100644 --- a/src/app.rs +++ b/src/app.rs @@ -51,7 +51,7 @@ pub async fn start(config: &Configuration, tracker: Arc) -> Vec) -> Vec>, - mode: TrackerMode, + private: bool, + listed: bool, policy: TrackerPolicy, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, @@ -558,12 +560,11 @@ impl Tracker { ) -> Result { let database = Arc::new(databases::driver::build(&config.database.driver, &config.database.path)?); - let mode = config.mode.clone(); - Ok(Tracker { //config, announce_policy: config.announce_policy, - mode, + private: config.private, + listed: config.listed, keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), torrents: Arc::default(), @@ -578,17 +579,17 @@ impl Tracker { /// Returns `true` is the tracker is in public mode. pub fn is_public(&self) -> bool { - self.mode == TrackerMode::Public + !self.private } /// Returns `true` is the tracker is in private mode. pub fn is_private(&self) -> bool { - self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + self.private } /// Returns `true` is the tracker is in whitelisted mode. - pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + pub fn is_listed(&self) -> bool { + self.listed } /// Returns `true` if the tracker requires authentication. @@ -869,7 +870,7 @@ impl Tracker { /// Will return an error if the tracker is running in `listed` mode /// and the infohash is not whitelisted. pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { - if !self.is_whitelisted() { + if !self.is_listed() { return Ok(()); } @@ -1028,15 +1029,15 @@ mod tests { use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash; fn public_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_public()) + tracker_factory(&configuration::ephemeral_public()) } fn private_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_private()) + tracker_factory(&configuration::ephemeral_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_whitelisted()) + tracker_factory(&configuration::ephemeral_listed()) } pub fn tracker_persisting_torrents_in_database() -> Tracker { diff --git a/src/lib.rs b/src/lib.rs index cf2834418..e5362259f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -172,14 +172,10 @@ //! //! [core] //! inactive_peer_cleanup_interval = 600 -//! mode = "public" +//! listed = false +//! private = false //! tracker_usage_statistics = true //! -//! [core.tracker_policy] -//! max_peer_timeout = 900 -//! persistent_torrent_completed_stat = false -//! remove_peerless_torrents = true -//! //! [core.announce_policy] //! interval = 120 //! interval_min = 120 @@ -192,6 +188,11 @@ //! external_ip = "0.0.0.0" //! on_reverse_proxy = false //! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true +//! //! [health_check_api] //! bind_address = "127.0.0.1:1313" //!``` diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 967080bd5..39a68a856 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -266,7 +266,7 @@ impl Launcher { mod tests { use std::sync::Arc; - use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; + use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::make_rust_tls; @@ -275,7 +275,7 @@ mod tests { #[tokio::test] async fn it_should_be_able_to_start_and_stop() { - let cfg = Arc::new(ephemeral_mode_public()); + let cfg = Arc::new(ephemeral_public()); let config = &cfg.http_api.clone().unwrap(); let tracker = initialize_with_configuration(&cfg); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 87f0e945b..faedaf921 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -226,7 +226,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { mod tests { use std::sync::Arc; - use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; + use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::make_rust_tls; @@ -235,7 +235,7 @@ mod tests { #[tokio::test] async fn it_should_be_able_to_start_and_stop() { - let cfg = Arc::new(ephemeral_mode_public()); + let cfg = Arc::new(ephemeral_public()); let tracker = initialize_with_configuration(&cfg); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 0b009f700..0514a9f71 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -181,11 +181,11 @@ mod tests { use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; fn private_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_private()) + tracker_factory(&configuration::ephemeral_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_whitelisted()) + tracker_factory(&configuration::ephemeral_listed()) } fn tracker_on_reverse_proxy() -> Tracker { diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 172607637..eb8875a58 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -121,11 +121,11 @@ mod tests { use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; fn private_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_private()) + tracker_factory(&configuration::ephemeral_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_whitelisted()) + tracker_factory(&configuration::ephemeral_listed()) } fn tracker_on_reverse_proxy() -> Tracker { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index eee5e4688..47175817d 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -57,7 +57,7 @@ mod tests { use crate::core::Tracker; fn public_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_public()) + tracker_factory(&configuration::ephemeral_public()) } fn sample_info_hash() -> InfoHash { diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index bf9fbd933..ee7814194 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -70,7 +70,7 @@ mod tests { use crate::core::Tracker; fn public_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_mode_public()) + tracker_factory(&configuration::ephemeral_public()) } fn sample_info_hashes() -> Vec { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 12ae6a250..f1f61ee6b 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -339,15 +339,15 @@ mod tests { } fn public_tracker() -> Arc { - initialized_tracker(&configuration::ephemeral_mode_public()) + initialized_tracker(&configuration::ephemeral_public()) } fn private_tracker() -> Arc { - initialized_tracker(&configuration::ephemeral_mode_private()) + initialized_tracker(&configuration::ephemeral_private()) } fn whitelisted_tracker() -> Arc { - initialized_tracker(&configuration::ephemeral_mode_whitelisted()) + initialized_tracker(&configuration::ephemeral_listed()) } fn initialized_tracker(configuration: &Configuration) -> Arc { diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 034f71beb..e3321f157 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -47,7 +47,7 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use torrust_tracker_test_helpers::configuration::ephemeral_mode_public; + use torrust_tracker_test_helpers::configuration::ephemeral_public; use super::spawner::Spawner; use super::Server; @@ -56,7 +56,7 @@ mod tests { #[tokio::test] async fn it_should_be_able_to_start_and_stop() { - let cfg = Arc::new(ephemeral_mode_public()); + let cfg = Arc::new(ephemeral_public()); let tracker = initialize_with_configuration(&cfg); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -79,7 +79,7 @@ mod tests { #[tokio::test] async fn it_should_be_able_to_start_and_stop_with_wait() { - let cfg = Arc::new(ephemeral_mode_public()); + let cfg = Arc::new(ephemeral_public()); let tracker = initialize_with_configuration(&cfg); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index a7962db0f..cdffead99 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -107,7 +107,7 @@ mod for_all_config_modes { #[tokio::test] async fn it_should_start_and_stop() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; env.stop().await; } @@ -376,7 +376,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let response = Client::new(*env.bind_address()) .announce( @@ -405,7 +405,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -447,7 +447,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -499,7 +499,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -526,7 +526,7 @@ mod for_all_config_modes { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -567,7 +567,7 @@ mod for_all_config_modes { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -605,7 +605,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; Client::new(*env.bind_address()) .announce(&QueryBuilder::default().query()) @@ -648,7 +648,7 @@ mod for_all_config_modes { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; Client::new(*env.bind_address()) .announce( @@ -669,7 +669,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; Client::new(*env.bind_address()) .announce(&QueryBuilder::default().query()) @@ -712,7 +712,7 @@ mod for_all_config_modes { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; Client::new(*env.bind_address()) .announce( @@ -733,7 +733,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -905,7 +905,7 @@ mod for_all_config_modes { //#[tokio::test] #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let response = Client::new(*env.bind_address()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; @@ -915,7 +915,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -932,7 +932,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -971,7 +971,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1010,7 +1010,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1029,7 +1029,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_accept_multiple_infohashes() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -1055,7 +1055,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let env = Started::new(&configuration::ephemeral_mode_public().into()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1123,7 +1123,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let env = Started::new(&configuration::ephemeral_mode_whitelisted().into()).await; + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1138,7 +1138,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let env = Started::new(&configuration::ephemeral_mode_whitelisted().into()).await; + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1172,7 +1172,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let env = Started::new(&configuration::ephemeral_mode_whitelisted().into()).await; + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1202,7 +1202,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let env = Started::new(&configuration::ephemeral_mode_whitelisted().into()).await; + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1263,7 +1263,7 @@ mod configured_as_private { #[tokio::test] async fn should_respond_to_authenticated_peers() { - let env = Started::new(&configuration::ephemeral_mode_private().into()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let expiring_key = env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); @@ -1278,7 +1278,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let env = Started::new(&configuration::ephemeral_mode_private().into()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1293,7 +1293,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let env = Started::new(&configuration::ephemeral_mode_private().into()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let invalid_key = "INVALID_KEY"; @@ -1308,7 +1308,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let env = Started::new(&configuration::ephemeral_mode_private().into()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -1341,7 +1341,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let env = Started::new(&configuration::ephemeral_mode_private().into()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let invalid_key = "INVALID_KEY"; @@ -1356,7 +1356,7 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let env = Started::new(&configuration::ephemeral_mode_private().into()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1386,7 +1386,7 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let env = Started::new(&configuration::ephemeral_mode_private().into()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1430,7 +1430,7 @@ mod configured_as_private { // There is not authentication error // code-review: should this really be this way? - let env = Started::new(&configuration::ephemeral_mode_private().into()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); From a5b9e14a47e1496ce8fddfec836f21651ad9ca5b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 12:40:39 +0100 Subject: [PATCH 0920/1003] refactor: inject the core config to the core tracker --- packages/configuration/src/lib.rs | 7 +++--- src/core/mod.rs | 41 +++++++++++-------------------- 2 files changed, 19 insertions(+), 29 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index ca008a49a..8a544b6e2 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -35,10 +35,11 @@ const ENV_VAR_CONFIG_TOML: &str = "TORRUST_TRACKER_CONFIG_TOML"; pub const ENV_VAR_CONFIG_TOML_PATH: &str = "TORRUST_TRACKER_CONFIG_TOML_PATH"; pub type Configuration = v1::Configuration; -pub type UdpTracker = v1::udp_tracker::UdpTracker; -pub type HttpTracker = v1::http_tracker::HttpTracker; -pub type HttpApi = v1::tracker_api::HttpApi; +pub type Core = v1::core::Core; pub type HealthCheckApi = v1::health_check_api::HealthCheckApi; +pub type HttpApi = v1::tracker_api::HttpApi; +pub type HttpTracker = v1::http_tracker::HttpTracker; +pub type UdpTracker = v1::udp_tracker::UdpTracker; pub type AccessTokens = HashMap; diff --git a/src/core/mod.rs b/src/core/mod.rs index 20b7c81f4..a9fe2a8a6 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -456,8 +456,7 @@ use std::time::Duration; use derive_more::Constructor; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; -use torrust_tracker_configuration::v1::core::Core; -use torrust_tracker_configuration::{AnnouncePolicy, TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -482,20 +481,15 @@ use crate::CurrentClock; /// > Typically, the `Tracker` is used by a higher application service that handles /// > the network layer. pub struct Tracker { - announce_policy: AnnouncePolicy, + config: Core, /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) /// or [`MySQL`](crate::core::databases::mysql) pub database: Arc>, - private: bool, - listed: bool, - policy: TrackerPolicy, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, pub torrents: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, - external_ip: Option, - on_reverse_proxy: bool, } /// Structure that holds the data returned by the `announce` request. @@ -561,35 +555,29 @@ impl Tracker { let database = Arc::new(databases::driver::build(&config.database.driver, &config.database.path)?); Ok(Tracker { - //config, - announce_policy: config.announce_policy, - private: config.private, - listed: config.listed, + config: config.clone(), keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), torrents: Arc::default(), stats_event_sender, stats_repository, database, - external_ip: config.net.external_ip, - policy: config.tracker_policy.clone(), - on_reverse_proxy: config.net.on_reverse_proxy, }) } /// Returns `true` is the tracker is in public mode. pub fn is_public(&self) -> bool { - !self.private + !self.config.private } /// Returns `true` is the tracker is in private mode. pub fn is_private(&self) -> bool { - self.private + self.config.private } /// Returns `true` is the tracker is in whitelisted mode. pub fn is_listed(&self) -> bool { - self.listed + self.config.listed } /// Returns `true` if the tracker requires authentication. @@ -599,15 +587,15 @@ impl Tracker { /// Returns `true` is the tracker is in whitelisted mode. pub fn is_behind_reverse_proxy(&self) -> bool { - self.on_reverse_proxy + self.config.net.on_reverse_proxy } pub fn get_announce_policy(&self) -> AnnouncePolicy { - self.announce_policy + self.config.announce_policy } pub fn get_maybe_external_ip(&self) -> Option { - self.external_ip + self.config.net.external_ip } /// It handles an announce request. @@ -632,7 +620,7 @@ impl Tracker { // responsibility into another authentication service. debug!("Before: {peer:?}"); - peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.external_ip)); + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); debug!("After: {peer:?}"); let stats = self.upsert_peer_and_get_stats(info_hash, peer).await; @@ -735,7 +723,7 @@ impl Tracker { /// /// # Context: Tracker async fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { - if self.policy.persistent_torrent_completed_stat { + if self.config.tracker_policy.persistent_torrent_completed_stat { let completed = swarm_metadata.downloaded; let info_hash = *info_hash; @@ -759,11 +747,12 @@ impl Tracker { /// # Context: Tracker pub fn cleanup_torrents(&self) { // If we don't need to remove torrents we will use the faster iter - if self.policy.remove_peerless_torrents { - self.torrents.remove_peerless_torrents(&self.policy); + if self.config.tracker_policy.remove_peerless_torrents { + self.torrents.remove_peerless_torrents(&self.config.tracker_policy); } else { let current_cutoff = - CurrentClock::now_sub(&Duration::from_secs(u64::from(self.policy.max_peer_timeout))).unwrap_or_default(); + CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) + .unwrap_or_default(); self.torrents.remove_inactive_peers(current_cutoff); } } From 5a16ea10a8d474a444eadd27b17ebfb4e9a150f3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 12:53:22 +0100 Subject: [PATCH 0921/1003] refactor: [#932] make all Tracker fields private --- src/core/mod.rs | 16 ++++++++++++++-- tests/servers/api/mod.rs | 7 +++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index a9fe2a8a6..4136966d2 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -481,13 +481,14 @@ use crate::CurrentClock; /// > Typically, the `Tracker` is used by a higher application service that handles /// > the network layer. pub struct Tracker { + // The tracker configuration. config: Core, /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) /// or [`MySQL`](crate::core::databases::mysql) - pub database: Arc>, + database: Arc>, keys: tokio::sync::RwLock>, whitelist: tokio::sync::RwLock>, - pub torrents: Arc, + torrents: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, } @@ -987,6 +988,17 @@ impl Tracker { Some(stats_event_sender) => stats_event_sender.send_event(event).await, } } + + /// It drops the database tables. + /// + /// # Errors + /// + /// Will return `Err` if unable to drop tables. + pub fn drop_database_tables(&self) -> Result<(), databases::error::Error> { + // todo: this is only used for testing. WE have to pass the database + // reference directly to the tests instead of via the tracker. + self.database.drop_database_tables() + } } #[must_use] diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs index 9c30e316a..38df46e9b 100644 --- a/tests/servers/api/mod.rs +++ b/tests/servers/api/mod.rs @@ -11,7 +11,10 @@ pub type Started = environment::Environment; /// It forces a database error by dropping all tables. /// That makes any query fail. -/// code-review: alternatively we could inject a database mock in the future. +/// code-review: +/// Alternatively we could: +/// - Inject a database mock in the future. +/// - Inject directly the database reference passed to the Tracker type. pub fn force_database_error(tracker: &Arc) { - tracker.database.drop_database_tables().unwrap(); + tracker.drop_database_tables().unwrap(); } From f61c7c36cb3592c1989aba71b0959f6477d6d8a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 13:05:11 +0100 Subject: [PATCH 0922/1003] docs: add commments to core::Tracker struct fields --- src/core/mod.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 4136966d2..9a64826c9 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -481,15 +481,26 @@ use crate::CurrentClock; /// > Typically, the `Tracker` is used by a higher application service that handles /// > the network layer. pub struct Tracker { - // The tracker configuration. + /// The tracker configuration. config: Core, + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) /// or [`MySQL`](crate::core::databases::mysql) database: Arc>, + + /// Tracker users' keys. Only for private trackers. keys: tokio::sync::RwLock>, + + /// The list of allowed torrents. Only for listed trackers. whitelist: tokio::sync::RwLock>, + + /// The in-memory torrents repository. torrents: Arc, + + /// Service to send stats events. stats_event_sender: Option>, + + /// The in-memory stats repo. stats_repository: statistics::Repo, } From b6b841d6fe244b8deaa62629791d100623a508ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 13:31:30 +0100 Subject: [PATCH 0923/1003] chore: remove crate from ignore list in cargo machete It was accidentaly added. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 3eca9934d..41afb1538 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,7 +85,7 @@ uuid = { version = "1", features = ["v4"] } zerocopy = "0.7.33" [package.metadata.cargo-machete] -ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes", "torrust-tracker-primitives"] +ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes"] [dev-dependencies] local-ip-address = "0" From 2969df35a9a93ea824b1fa15f9f28b182dc8cece Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 15:50:57 +0100 Subject: [PATCH 0924/1003] refactor: [#939] change config version The current config version is 2 leaving the previous before the config averhaul as version 1. --- packages/configuration/src/lib.rs | 14 +++++++------- packages/configuration/src/{v1 => v2}/core.rs | 2 +- packages/configuration/src/{v1 => v2}/database.rs | 0 .../src/{v1 => v2}/health_check_api.rs | 0 .../configuration/src/{v1 => v2}/http_tracker.rs | 0 packages/configuration/src/{v1 => v2}/logging.rs | 0 packages/configuration/src/{v1 => v2}/mod.rs | 2 +- packages/configuration/src/{v1 => v2}/network.rs | 0 .../configuration/src/{v1 => v2}/tracker_api.rs | 2 +- .../configuration/src/{v1 => v2}/udp_tracker.rs | 0 src/bootstrap/jobs/torrent_cleanup.rs | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) rename packages/configuration/src/{v1 => v2}/core.rs (98%) rename packages/configuration/src/{v1 => v2}/database.rs (100%) rename packages/configuration/src/{v1 => v2}/health_check_api.rs (100%) rename packages/configuration/src/{v1 => v2}/http_tracker.rs (100%) rename packages/configuration/src/{v1 => v2}/logging.rs (100%) rename packages/configuration/src/{v1 => v2}/mod.rs (99%) rename packages/configuration/src/{v1 => v2}/network.rs (100%) rename packages/configuration/src/{v1 => v2}/tracker_api.rs (98%) rename packages/configuration/src/{v1 => v2}/udp_tracker.rs (100%) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 8a544b6e2..72b998a31 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -4,7 +4,7 @@ //! Torrust Tracker, which is a `BitTorrent` tracker server. //! //! The current version for configuration is [`v1`]. -pub mod v1; +pub mod v2; use std::collections::HashMap; use std::env; @@ -34,12 +34,12 @@ const ENV_VAR_CONFIG_TOML: &str = "TORRUST_TRACKER_CONFIG_TOML"; /// The `tracker.toml` file location. pub const ENV_VAR_CONFIG_TOML_PATH: &str = "TORRUST_TRACKER_CONFIG_TOML_PATH"; -pub type Configuration = v1::Configuration; -pub type Core = v1::core::Core; -pub type HealthCheckApi = v1::health_check_api::HealthCheckApi; -pub type HttpApi = v1::tracker_api::HttpApi; -pub type HttpTracker = v1::http_tracker::HttpTracker; -pub type UdpTracker = v1::udp_tracker::UdpTracker; +pub type Configuration = v2::Configuration; +pub type Core = v2::core::Core; +pub type HealthCheckApi = v2::health_check_api::HealthCheckApi; +pub type HttpApi = v2::tracker_api::HttpApi; +pub type HttpTracker = v2::http_tracker::HttpTracker; +pub type UdpTracker = v2::udp_tracker::UdpTracker; pub type AccessTokens = HashMap; diff --git a/packages/configuration/src/v1/core.rs b/packages/configuration/src/v2/core.rs similarity index 98% rename from packages/configuration/src/v1/core.rs rename to packages/configuration/src/v2/core.rs index 1f0a0f957..09280917c 100644 --- a/packages/configuration/src/v1/core.rs +++ b/packages/configuration/src/v2/core.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use super::network::Network; -use crate::v1::database::Database; +use crate::v2::database::Database; use crate::{AnnouncePolicy, TrackerPolicy}; #[allow(clippy::struct_excessive_bools)] diff --git a/packages/configuration/src/v1/database.rs b/packages/configuration/src/v2/database.rs similarity index 100% rename from packages/configuration/src/v1/database.rs rename to packages/configuration/src/v2/database.rs diff --git a/packages/configuration/src/v1/health_check_api.rs b/packages/configuration/src/v2/health_check_api.rs similarity index 100% rename from packages/configuration/src/v1/health_check_api.rs rename to packages/configuration/src/v2/health_check_api.rs diff --git a/packages/configuration/src/v1/http_tracker.rs b/packages/configuration/src/v2/http_tracker.rs similarity index 100% rename from packages/configuration/src/v1/http_tracker.rs rename to packages/configuration/src/v2/http_tracker.rs diff --git a/packages/configuration/src/v1/logging.rs b/packages/configuration/src/v2/logging.rs similarity index 100% rename from packages/configuration/src/v1/logging.rs rename to packages/configuration/src/v2/logging.rs diff --git a/packages/configuration/src/v1/mod.rs b/packages/configuration/src/v2/mod.rs similarity index 99% rename from packages/configuration/src/v1/mod.rs rename to packages/configuration/src/v2/mod.rs index c5e0f9f7a..80e6cc0e6 100644 --- a/packages/configuration/src/v1/mod.rs +++ b/packages/configuration/src/v2/mod.rs @@ -357,7 +357,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; - use crate::v1::Configuration; + use crate::v2::Configuration; use crate::Info; #[cfg(test)] diff --git a/packages/configuration/src/v1/network.rs b/packages/configuration/src/v2/network.rs similarity index 100% rename from packages/configuration/src/v1/network.rs rename to packages/configuration/src/v2/network.rs diff --git a/packages/configuration/src/v1/tracker_api.rs b/packages/configuration/src/v2/tracker_api.rs similarity index 98% rename from packages/configuration/src/v1/tracker_api.rs rename to packages/configuration/src/v2/tracker_api.rs index 302a4ee95..1a2e0cbf0 100644 --- a/packages/configuration/src/v1/tracker_api.rs +++ b/packages/configuration/src/v2/tracker_api.rs @@ -65,7 +65,7 @@ impl HttpApi { #[cfg(test)] mod tests { - use crate::v1::tracker_api::HttpApi; + use crate::v2::tracker_api::HttpApi; #[test] fn http_api_configuration_should_check_if_it_contains_a_token() { diff --git a/packages/configuration/src/v1/udp_tracker.rs b/packages/configuration/src/v2/udp_tracker.rs similarity index 100% rename from packages/configuration/src/v1/udp_tracker.rs rename to packages/configuration/src/v2/udp_tracker.rs diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 992e7e644..c0890f6ac 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -14,7 +14,7 @@ use std::sync::Arc; use chrono::Utc; use tokio::task::JoinHandle; -use torrust_tracker_configuration::v1::core::Core; +use torrust_tracker_configuration::v2::core::Core; use tracing::info; use crate::core; From 632ad0d662f34070da38d9f82e1501a3b4f588bf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 15:54:24 +0100 Subject: [PATCH 0925/1003] refactor: use only latest config version in prod code Concrete config versions should be use for testing or config migration tools. --- packages/configuration/src/lib.rs | 1 + src/bootstrap/jobs/torrent_cleanup.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 72b998a31..5b139f573 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -40,6 +40,7 @@ pub type HealthCheckApi = v2::health_check_api::HealthCheckApi; pub type HttpApi = v2::tracker_api::HttpApi; pub type HttpTracker = v2::http_tracker::HttpTracker; pub type UdpTracker = v2::udp_tracker::UdpTracker; +pub type Database = v2::database::Database; pub type AccessTokens = HashMap; diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index c0890f6ac..6f057fb53 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -14,7 +14,7 @@ use std::sync::Arc; use chrono::Utc; use tokio::task::JoinHandle; -use torrust_tracker_configuration::v2::core::Core; +use torrust_tracker_configuration::Core; use tracing::info; use crate::core; From e299792d94af7bc373e40a136d1d232e891a2f72 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 16:12:28 +0100 Subject: [PATCH 0926/1003] feat: warn adming when no service is enabled in the configration That migth be a config error. --- src/app.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/app.rs b/src/app.rs index 2d70a6dde..fd7d6a99d 100644 --- a/src/app.rs +++ b/src/app.rs @@ -38,6 +38,13 @@ use crate::{core, servers}; /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. pub async fn start(config: &Configuration, tracker: Arc) -> Vec> { + if config.http_api.is_none() + && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) + && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) + { + warn!("No services enabled in configuration"); + } + let mut jobs: Vec> = Vec::new(); let registar = Registar::default(); From 46c3263203778d8097cf32f3c4a868ad49f599c7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 16:13:25 +0100 Subject: [PATCH 0927/1003] feat: normalize log nessages - No '.' full stop at the end. - Start message wirh uppercase. --- src/bootstrap/logging.rs | 2 +- src/console/ci/e2e/logs_parser.rs | 4 ++-- src/console/ci/e2e/runner.rs | 2 +- src/console/clients/checker/app.rs | 2 +- src/console/clients/udp/app.rs | 2 +- src/console/profiling.rs | 2 +- src/main.rs | 2 +- src/servers/apis/mod.rs | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 649495dc7..f17c1ef28 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -52,7 +52,7 @@ fn tracing_stdout_init(filter: LevelFilter, style: &TraceStyle) { TraceStyle::Json => builder.json().init(), }; - info!("logging initialized."); + info!("Logging initialized"); } #[derive(Debug)] diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 37eb367b1..fd7295eab 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -23,7 +23,7 @@ impl RunningServices { /// /// ```text /// Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... - /// 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: logging initialized. + /// 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: Logging initialized /// 2024-06-10T16:07:39.990205Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6868 /// 2024-06-10T16:07:39.990215Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6868 /// 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 @@ -116,7 +116,7 @@ mod tests { fn it_should_parse_from_logs_with_valid_logs() { let logs = r" Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... - 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: logging initialized. + 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: Logging initialized 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 2024-06-10T16:07:39.990261Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index a3d61894e..f2285938b 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -117,7 +117,7 @@ pub fn run() -> anyhow::Result<()> { fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).init(); - info!("Logging initialized."); + info!("Logging initialized"); } fn load_tracker_configuration(args: &Args) -> anyhow::Result { diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs index 9f9825d92..3bafc2661 100644 --- a/src/console/clients/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -103,7 +103,7 @@ pub async fn run() -> Result> { fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).init(); - debug!("logging initialized."); + debug!("Logging initialized"); } fn setup_config(args: Args) -> Result { diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index bcba39558..af6f10611 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -129,7 +129,7 @@ pub async fn run() -> anyhow::Result<()> { fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).init(); - debug!("logging initialized."); + debug!("Logging initialized"); } async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { diff --git a/src/console/profiling.rs b/src/console/profiling.rs index c95354d6f..3e2925d9c 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -192,7 +192,7 @@ pub async fn run() { info!("Torrust timed shutdown.."); }, _ = tokio::signal::ctrl_c() => { - info!("Torrust shutting down via Ctrl+C.."); + info!("Torrust shutting down via Ctrl+C ..."); // Await for all jobs to shutdown futures::future::join_all(jobs).await; } diff --git a/src/main.rs b/src/main.rs index bad1fdb1e..ab2af65e2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,7 +10,7 @@ async fn main() { // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { - info!("Torrust shutting down.."); + info!("Torrust shutting down ..."); // Await for all jobs to shutdown futures::future::join_all(jobs).await; diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index b44ccab9f..0451b46c0 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -42,7 +42,7 @@ //! //! ```text //! Loading configuration from config file ./tracker.toml -//! 023-03-28T12:19:24.963054069+01:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. +//! 023-03-28T12:19:24.963054069+01:00 [torrust_tracker::bootstrap::logging][INFO] Logging initialized //! ... //! 023-03-28T12:19:24.964138723+01:00 [torrust_tracker::bootstrap::jobs::tracker_apis][INFO] Starting Torrust APIs server on: http://0.0.0.0:1212 //! ``` From ddfbde3c05f89a02dc3dbb1d1fc45dc5aa836fcc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 16:30:49 +0100 Subject: [PATCH 0928/1003] feat: the configuration can be serialized as JSON We will print to logs the final configuration used to run the tracker (after Figment processes all sources): ```output Loading extra configuration from file: `storage/tracker/etc/tracker.toml` ... 2024-07-01T15:29:09.785334Z INFO torrust_tracker::bootstrap::logging: Logging initialized 2024-07-01T15:29:09.785862Z INFO torrust_tracker::bootstrap::app: Configuration: { "logging": { "log_level": "info" }, "core": { "announce_policy": { "interval": 120, "interval_min": 120 }, "database": { "driver": "Sqlite3", "path": "./storage/tracker/lib/database/sqlite3.db" }, "inactive_peer_cleanup_interval": 600, "listed": false, "net": { "external_ip": "0.0.0.0", "on_reverse_proxy": false }, "private": true, "tracker_policy": { "max_peer_timeout": 900, "persistent_torrent_completed_stat": false, "remove_peerless_torrents": true }, "tracker_usage_statistics": true }, "udp_trackers": null, "http_trackers": null, "http_api": null, "health_check_api": { "bind_address": "127.0.0.1:1313" } } 2024-07-01T15:29:09.785879Z WARN torrust_tracker::app: No services enabled in configuration 2024-07-01T15:29:09.785920Z INFO torrust_tracker::app: No UDP blocks in configuration 2024-07-01T15:29:09.785923Z INFO torrust_tracker::app: No HTTP blocks in configuration 2024-07-01T15:29:09.785924Z INFO torrust_tracker::app: No API block in configuration 2024-07-01T15:29:09.785941Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 2024-07-01T15:29:09.786035Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 ``` --- Cargo.lock | 1 + packages/configuration/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 3a403332a..96c78db81 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4012,6 +4012,7 @@ dependencies = [ "derive_more", "figment", "serde", + "serde_json", "serde_with", "thiserror", "toml", diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 53e4e4cfa..51260d082 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -19,6 +19,7 @@ camino = { version = "1.1.6", features = ["serde", "serde1"] } derive_more = "0" figment = { version = "0.10.18", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" From 397ef0f6d003d4887272b6e25a2c5bbad9cc96c8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 16:33:05 +0100 Subject: [PATCH 0929/1003] feat: log final config after processing all config sources We print to logs the final configuration used to run the tracker (after Figment processes all sources): ```output Loading extra configuration from file: `storage/tracker/etc/tracker.toml` ... 2024-07-01T15:29:09.785334Z INFO torrust_tracker::bootstrap::logging: Logging initialized 2024-07-01T15:29:09.785862Z INFO torrust_tracker::bootstrap::app: Configuration: { "logging": { "log_level": "info" }, "core": { "announce_policy": { "interval": 120, "interval_min": 120 }, "database": { "driver": "Sqlite3", "path": "./storage/tracker/lib/database/sqlite3.db" }, "inactive_peer_cleanup_interval": 600, "listed": false, "net": { "external_ip": "0.0.0.0", "on_reverse_proxy": false }, "private": true, "tracker_policy": { "max_peer_timeout": 900, "persistent_torrent_completed_stat": false, "remove_peerless_torrents": true }, "tracker_usage_statistics": true }, "udp_trackers": null, "http_trackers": null, "http_api": null, "health_check_api": { "bind_address": "127.0.0.1:1313" } } 2024-07-01T15:29:09.785879Z WARN torrust_tracker::app: No services enabled in configuration 2024-07-01T15:29:09.785920Z INFO torrust_tracker::app: No UDP blocks in configuration 2024-07-01T15:29:09.785923Z INFO torrust_tracker::app: No HTTP blocks in configuration 2024-07-01T15:29:09.785924Z INFO torrust_tracker::app: No API block in configuration 2024-07-01T15:29:09.785941Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 2024-07-01T15:29:09.786035Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 ``` --- packages/configuration/src/lib.rs | 6 +++--- packages/configuration/src/v2/mod.rs | 16 ++++++++++++++++ src/bootstrap/app.rs | 4 ++++ 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 5b139f573..dd250d280 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -111,17 +111,17 @@ impl Info { let env_var_config_toml_path = ENV_VAR_CONFIG_TOML_PATH.to_string(); let config_toml = if let Ok(config_toml) = env::var(env_var_config_toml) { - println!("Loading configuration from environment variable:\n {config_toml}"); + println!("Loading extra configuration from environment variable:\n {config_toml}"); Some(config_toml) } else { None }; let config_toml_path = if let Ok(config_toml_path) = env::var(env_var_config_toml_path) { - println!("Loading configuration from file: `{config_toml_path}` ..."); + println!("Loading extra configuration from file: `{config_toml_path}` ..."); config_toml_path } else { - println!("Loading configuration from default configuration file: `{default_config_toml_path}` ..."); + println!("Loading extra configuration from default configuration file: `{default_config_toml_path}` ..."); default_config_toml_path }; diff --git a/packages/configuration/src/v2/mod.rs b/packages/configuration/src/v2/mod.rs index 80e6cc0e6..9b6e01cb4 100644 --- a/packages/configuration/src/v2/mod.rs +++ b/packages/configuration/src/v2/mod.rs @@ -346,10 +346,26 @@ impl Configuration { } /// Encodes the configuration to TOML. + /// + /// # Panics + /// + /// Will panic if it can't be converted to TOML. + #[must_use] fn to_toml(&self) -> String { // code-review: do we need to use Figment also to serialize into toml? toml::to_string(self).expect("Could not encode TOML value") } + + /// Encodes the configuration to JSON. + /// + /// # Panics + /// + /// Will panic if it can't be converted to JSON. + #[must_use] + pub fn to_json(&self) -> String { + // code-review: do we need to use Figment also to serialize into json? + serde_json::to_string_pretty(self).expect("Could not encode JSON value") + } } #[cfg(test)] diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 396e63682..285b72133 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -15,6 +15,7 @@ use std::sync::Arc; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::Configuration; +use tracing::info; use super::config::initialize_configuration; use crate::bootstrap; @@ -26,8 +27,11 @@ use crate::shared::crypto::ephemeral_instance_keys; #[must_use] pub fn setup() -> (Configuration, Arc) { let configuration = initialize_configuration(); + let tracker = initialize_with_configuration(&configuration); + info!("Configuration:\n{}", configuration.to_json()); + (configuration, tracker) } From af61e20c43ced22dac50bcfae07891372c2a0dae Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 17:55:03 +0100 Subject: [PATCH 0930/1003] feat: [#936] rename config value log_level to threshold From: ```toml [logging] log_level = "info" ``` To: ```toml [logging] threshold = "info" ``` Threshold represetns better the concept since this value is the security level at which the app stops collecting logs, meaning it filters out logs with a lower security level. --- docs/benchmarking.md | 10 +++---- packages/configuration/src/lib.rs | 18 +----------- packages/configuration/src/v2/logging.rs | 29 ++++++++++++++----- packages/configuration/src/v2/mod.rs | 4 +-- packages/test-helpers/src/configuration.rs | 6 ++-- .../config/tracker.udp.benchmarking.toml | 2 +- src/bootstrap/app.rs | 2 +- src/bootstrap/logging.rs | 26 +++++++++-------- src/console/ci/e2e/logs_parser.rs | 4 +-- src/core/mod.rs | 2 +- src/lib.rs | 2 +- 11 files changed, 53 insertions(+), 52 deletions(-) diff --git a/docs/benchmarking.md b/docs/benchmarking.md index ce3b69057..7d0228737 100644 --- a/docs/benchmarking.md +++ b/docs/benchmarking.md @@ -26,11 +26,11 @@ cargo build --release -p aquatic_udp_load_test ### Run UDP load test -Run the tracker with UDP service enabled and other services disabled and set log level to `error`. +Run the tracker with UDP service enabled and other services disabled and set log threshold to `error`. ```toml [logging] -log_level = "error" +threshold = "error" [[udp_trackers]] bind_address = "0.0.0.0:6969" @@ -97,7 +97,7 @@ Announce responses per info hash: - p100: 361 ``` -> IMPORTANT: The performance of the Torrust UDP Tracker is drastically decreased with these log levels: `info`, `debug`, `trace`. +> IMPORTANT: The performance of the Torrust UDP Tracker is drastically decreased with these log threshold: `info`, `debug`, `trace`. ```output Requests out: 40719.21/second @@ -161,11 +161,11 @@ Announce responses per info hash: #### Torrust-Actix UDP Tracker -Run the tracker with UDP service enabled and other services disabled and set log level to `error`. +Run the tracker with UDP service enabled and other services disabled and set log threshold to `error`. ```toml [logging] -log_level = "error" +threshold = "error" [[udp_trackers]] bind_address = "0.0.0.0:6969" diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index dd250d280..841a5182e 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -41,6 +41,7 @@ pub type HttpApi = v2::tracker_api::HttpApi; pub type HttpTracker = v2::http_tracker::HttpTracker; pub type UdpTracker = v2::udp_tracker::UdpTracker; pub type Database = v2::database::Database; +pub type Threshold = v2::logging::Threshold; pub type AccessTokens = HashMap; @@ -241,20 +242,3 @@ impl TslConfig { Utf8PathBuf::new() } } - -#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] -#[serde(rename_all = "lowercase")] -pub enum LogLevel { - /// A level lower than all log levels. - Off, - /// Corresponds to the `Error` log level. - Error, - /// Corresponds to the `Warn` log level. - Warn, - /// Corresponds to the `Info` log level. - Info, - /// Corresponds to the `Debug` log level. - Debug, - /// Corresponds to the `Trace` log level. - Trace, -} diff --git a/packages/configuration/src/v2/logging.rs b/packages/configuration/src/v2/logging.rs index e33522db4..e7dbe146c 100644 --- a/packages/configuration/src/v2/logging.rs +++ b/packages/configuration/src/v2/logging.rs @@ -1,26 +1,41 @@ use serde::{Deserialize, Serialize}; -use crate::LogLevel; - #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct Logging { /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, /// `Debug` and `Trace`. Default is `Info`. - #[serde(default = "Logging::default_log_level")] - pub log_level: LogLevel, + #[serde(default = "Logging::default_threshold")] + pub threshold: Threshold, } impl Default for Logging { fn default() -> Self { Self { - log_level: Self::default_log_level(), + threshold: Self::default_threshold(), } } } impl Logging { - fn default_log_level() -> LogLevel { - LogLevel::Info + fn default_threshold() -> Threshold { + Threshold::Info } } + +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] +#[serde(rename_all = "lowercase")] +pub enum Threshold { + /// A threshold lower than all security levels. + Off, + /// Corresponds to the `Error` security level. + Error, + /// Corresponds to the `Warn` security level. + Warn, + /// Corresponds to the `Info` security level. + Info, + /// Corresponds to the `Debug` security level. + Debug, + /// Corresponds to the `Trace` security level. + Trace, +} diff --git a/packages/configuration/src/v2/mod.rs b/packages/configuration/src/v2/mod.rs index 9b6e01cb4..92ac88506 100644 --- a/packages/configuration/src/v2/mod.rs +++ b/packages/configuration/src/v2/mod.rs @@ -196,7 +196,7 @@ //! //! ```toml //! [logging] -//! log_level = "info" +//! threshold = "info" //! //! [core] //! inactive_peer_cleanup_interval = 600 @@ -379,7 +379,7 @@ mod tests { #[cfg(test)] fn default_config_toml() -> String { let config = r#"[logging] - log_level = "info" + threshold = "info" [core] inactive_peer_cleanup_interval = 600 diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 65d9d9144..0a6c1c72b 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -2,7 +2,7 @@ use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, LogLevel, UdpTracker}; +use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, Threshold, UdpTracker}; use crate::random; @@ -14,7 +14,7 @@ use crate::random; /// > **NOTICE**: Port 0 is used for ephemeral ports, which means that the OS /// > will assign a random free port for the tracker to use. /// -/// > **NOTICE**: You can change the log level to `debug` to see the logs of the +/// > **NOTICE**: You can change the log threshold to `debug` to see the logs of the /// > tracker while running the tests. That can be particularly useful when /// > debugging tests. /// @@ -28,7 +28,7 @@ pub fn ephemeral() -> Configuration { let mut config = Configuration::default(); - config.logging.log_level = LogLevel::Off; // Change to `debug` for tests debugging + config.logging.threshold = Threshold::Off; // Change to `debug` for tests debugging // Ephemeral socket address for API let api_port = 0u16; diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index d9361cf10..c01fcd25e 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -1,5 +1,5 @@ [logging] -log_level = "error" +threshold = "error" [core] remove_peerless_torrents = false diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 285b72133..023520507 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -68,7 +68,7 @@ pub fn initialize_tracker(config: &Configuration) -> Tracker { tracker_factory(config) } -/// It initializes the log level, format and channel. +/// It initializes the log threshold, format and channel. /// /// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. pub fn initialize_logging(config: &Configuration) { diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index f17c1ef28..496b3ea45 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -1,6 +1,7 @@ //! Setup for the application logging. //! -//! It redirects the log info to the standard output with the log level defined in the configuration. +//! It redirects the log info to the standard output with the log threshold +//! defined in the configuration. //! //! - `Off` //! - `Error` @@ -12,15 +13,16 @@ //! Refer to the [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) to know how to change log settings. use std::sync::Once; -use torrust_tracker_configuration::{Configuration, LogLevel}; +use torrust_tracker_configuration::{Configuration, Threshold}; use tracing::info; use tracing::level_filters::LevelFilter; static INIT: Once = Once::new(); -/// It redirects the log info to the standard output with the log level defined in the configuration +/// It redirects the log info to the standard output with the log threshold +/// defined in the configuration. pub fn setup(cfg: &Configuration) { - let tracing_level = map_to_tracing_level_filter(&cfg.logging.log_level); + let tracing_level = map_to_tracing_level_filter(&cfg.logging.threshold); if tracing_level == LevelFilter::OFF { return; @@ -31,14 +33,14 @@ pub fn setup(cfg: &Configuration) { }); } -fn map_to_tracing_level_filter(log_level: &LogLevel) -> LevelFilter { - match log_level { - LogLevel::Off => LevelFilter::OFF, - LogLevel::Error => LevelFilter::ERROR, - LogLevel::Warn => LevelFilter::WARN, - LogLevel::Info => LevelFilter::INFO, - LogLevel::Debug => LevelFilter::DEBUG, - LogLevel::Trace => LevelFilter::TRACE, +fn map_to_tracing_level_filter(threshold: &Threshold) -> LevelFilter { + match threshold { + Threshold::Off => LevelFilter::OFF, + Threshold::Error => LevelFilter::ERROR, + Threshold::Warn => LevelFilter::WARN, + Threshold::Info => LevelFilter::INFO, + Threshold::Debug => LevelFilter::DEBUG, + Threshold::Trace => LevelFilter::TRACE, } } diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index fd7295eab..95648a2b5 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -7,7 +7,7 @@ use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::STARTED_ON; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; -const INFO_LOG_LEVEL: &str = "INFO"; +const INFO_THRESHOLD: &str = "INFO"; #[derive(Serialize, Deserialize, Debug, Default)] pub struct RunningServices { @@ -74,7 +74,7 @@ impl RunningServices { for line in logs.lines() { let clean_line = ansi_escape_re.replace_all(line, ""); - if !line.contains(INFO_LOG_LEVEL) { + if !line.contains(INFO_THRESHOLD) { continue; }; diff --git a/src/core/mod.rs b/src/core/mod.rs index 9a64826c9..06b1aa0f9 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -313,7 +313,7 @@ //! //! ```toml //! [logging] -//! log_level = "debug" +//! threshold = "debug" //! //! [core] //! inactive_peer_cleanup_interval = 600 diff --git a/src/lib.rs b/src/lib.rs index e5362259f..9776345b0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -168,7 +168,7 @@ //! //! ```toml //! [logging] -//! log_level = "info" +//! threshold = "info" //! //! [core] //! inactive_peer_cleanup_interval = 600 From e2dbb0bbf70f599656d11ea145cae00bbf91e10c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 21:07:15 +0100 Subject: [PATCH 0931/1003] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 4 packages to latest compatible versions Updating cc v1.0.103 -> v1.0.104 Updating hyper v1.3.1 -> v1.4.0 Updating hyper-util v0.1.5 -> v0.1.6 Updating serde_json v1.0.119 -> v1.0.120 ``` --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96c78db81..2215edfeb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -738,9 +738,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.103" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2755ff20a1d93490d26ba33a6f092a38a508398a5320df5d4b3014fcccce9410" +checksum = "74b6a57f98764a267ff415d50a25e6e166f3831a5071af4995296ea97d210490" dependencies = [ "jobserver", "libc", @@ -1707,9 +1707,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" dependencies = [ "bytes", "futures-channel", @@ -1761,9 +1761,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" dependencies = [ "bytes", "futures-channel", @@ -3392,9 +3392,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.119" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ "indexmap 2.2.6", "itoa", From 4846c9fe45285c650204e636331ff2eaa8ab3b57 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Jul 2024 21:08:15 +0100 Subject: [PATCH 0932/1003] chore: update workflow action build-push-action --- .github/workflows/container.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 884a15843..9f51f3124 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -30,7 +30,7 @@ jobs: - id: build name: Build - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: file: ./Containerfile push: false @@ -127,7 +127,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: file: ./Containerfile push: true @@ -168,7 +168,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: file: ./Containerfile push: true From 60c68762b64b7b909987fc6002447af7c823adbe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Jul 2024 08:23:23 +0100 Subject: [PATCH 0933/1003] feat: [#937] add version to configration file following semver. Add version and namespace to the configuration. It will fail if the provided version is not supported. ```toml version = "2" ``` It only supports the exact match '2'. --- packages/configuration/src/lib.rs | 62 ++++++++++++++++++- packages/configuration/src/v2/mod.rs | 20 +++++- .../config/tracker.container.mysql.toml | 2 + .../config/tracker.container.sqlite3.toml | 2 + .../config/tracker.development.sqlite3.toml | 2 + .../config/tracker.e2e.container.sqlite3.toml | 2 + .../config/tracker.udp.benchmarking.toml | 2 + 7 files changed, 89 insertions(+), 3 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 841a5182e..e5bfa6eb7 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -12,7 +12,7 @@ use std::sync::Arc; use std::time::Duration; use camino::Utf8PathBuf; -use derive_more::Constructor; +use derive_more::{Constructor, Display}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use thiserror::Error; @@ -45,6 +45,63 @@ pub type Threshold = v2::logging::Threshold; pub type AccessTokens = HashMap; +pub const LATEST_VERSION: &str = "2"; + +/// Info about the configuration specification. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display)] +pub struct Metadata { + #[serde(default = "Metadata::default_version")] + #[serde(flatten)] + version: Version, +} + +impl Default for Metadata { + fn default() -> Self { + Self { + version: Self::default_version(), + } + } +} + +impl Metadata { + fn default_version() -> Version { + Version::latest() + } +} + +/// The configuration version. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display)] +pub struct Version { + #[serde(default = "Version::default_semver")] + version: String, +} + +impl Default for Version { + fn default() -> Self { + Self { + version: Self::default_semver(), + } + } +} + +impl Version { + fn new(semver: &str) -> Self { + Self { + version: semver.to_owned(), + } + } + + fn latest() -> Self { + Self { + version: LATEST_VERSION.to_string(), + } + } + + fn default_semver() -> String { + LATEST_VERSION.to_string() + } +} + #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Constructor)] pub struct TrackerPolicy { // Cleanup job configuration @@ -208,6 +265,9 @@ pub enum Error { #[error("The error for errors that can never happen.")] Infallible, + + #[error("Unsupported configuration version: {version}")] + UnsupportedVersion { version: Version }, } impl From for Error { diff --git a/packages/configuration/src/v2/mod.rs b/packages/configuration/src/v2/mod.rs index 92ac88506..3425dc0de 100644 --- a/packages/configuration/src/v2/mod.rs +++ b/packages/configuration/src/v2/mod.rs @@ -251,16 +251,24 @@ use self::health_check_api::HealthCheckApi; use self::http_tracker::HttpTracker; use self::tracker_api::HttpApi; use self::udp_tracker::UdpTracker; -use crate::{Error, Info}; +use crate::{Error, Info, Metadata, Version}; + +/// This configuration version +const VERSION_2: &str = "2"; /// Prefix for env vars that overwrite configuration options. const CONFIG_OVERRIDE_PREFIX: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_"; + /// Path separator in env var names for nested values in configuration. const CONFIG_OVERRIDE_SEPARATOR: &str = "__"; /// Core configuration for the tracker. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default)] pub struct Configuration { + /// Configuration metadata. + #[serde(flatten)] + pub metadata: Metadata, + /// Logging configuration pub logging: Logging, @@ -326,6 +334,12 @@ impl Configuration { let config: Configuration = figment.extract()?; + if config.metadata.version != Version::new(VERSION_2) { + return Err(Error::UnsupportedVersion { + version: config.metadata.version, + }); + } + Ok(config) } @@ -378,7 +392,9 @@ mod tests { #[cfg(test)] fn default_config_toml() -> String { - let config = r#"[logging] + let config = r#"version = "2" + + [logging] threshold = "info" [core] diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 68cc8db8a..9465c0ef8 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -1,3 +1,5 @@ +version = "2" + [core.database] driver = "MySQL" path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 63e169a70..aa8aefa5e 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,3 +1,5 @@ +version = "2" + [core.database] path = "/var/lib/torrust/tracker/database/sqlite3.db" diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 84754794e..554835922 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -1,3 +1,5 @@ +version = "2" + [[udp_trackers]] bind_address = "0.0.0.0:6969" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index fb33a8e32..6b1383fb5 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -1,3 +1,5 @@ +version = "2" + [core.database] path = "/var/lib/torrust/tracker/database/sqlite3.db" diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index c01fcd25e..907a05456 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -1,3 +1,5 @@ +version = "2" + [logging] threshold = "error" From 16aa652821434a43e4efd7f8d85a63b9fae3ff3c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 4 Jul 2024 11:23:02 +0100 Subject: [PATCH 0934/1003] chore(deps): add url dependency It will be use to parse the MySQL path in the configuration. FOr example: ```toml [core.database] driver = "MySQL" path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" ``` --- Cargo.lock | 1 + packages/configuration/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 2215edfeb..4e25fc4c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4018,6 +4018,7 @@ dependencies = [ "toml", "torrust-tracker-located-error", "torrust-tracker-primitives", + "url", "uuid", ] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 51260d082..90c816344 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -25,6 +25,7 @@ thiserror = "1" toml = "0" torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } +url = "2.5.2" [dev-dependencies] uuid = { version = "1", features = ["v4"] } From 4673514bdd401c5546f0da7ec71eb7dd4c4f9c1a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 4 Jul 2024 11:28:44 +0100 Subject: [PATCH 0935/1003] fix: [#948] mask secrets in logs --- packages/configuration/src/lib.rs | 4 +- packages/configuration/src/v2/database.rs | 41 +++++++++++++++++++- packages/configuration/src/v2/mod.rs | 14 ++++++- packages/configuration/src/v2/tracker_api.rs | 6 +++ src/bootstrap/app.rs | 2 +- 5 files changed, 62 insertions(+), 5 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index e5bfa6eb7..32831409d 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -48,7 +48,7 @@ pub type AccessTokens = HashMap; pub const LATEST_VERSION: &str = "2"; /// Info about the configuration specification. -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] pub struct Metadata { #[serde(default = "Metadata::default_version")] #[serde(flatten)] @@ -70,7 +70,7 @@ impl Metadata { } /// The configuration version. -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] pub struct Version { #[serde(default = "Version::default_semver")] version: String, diff --git a/packages/configuration/src/v2/database.rs b/packages/configuration/src/v2/database.rs index b029175ce..932db552c 100644 --- a/packages/configuration/src/v2/database.rs +++ b/packages/configuration/src/v2/database.rs @@ -1,5 +1,6 @@ use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::DatabaseDriver; +use url::Url; #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] @@ -13,7 +14,7 @@ pub struct Database { /// For `Sqlite3`, the format is `path/to/database.db`, for example: /// `./storage/tracker/lib/database/sqlite3.db`. /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for - /// example: `root:password@localhost:3306/torrust`. + /// example: `mysql://root:password@localhost:3306/torrust`. #[serde(default = "Database::default_path")] pub path: String, } @@ -35,4 +36,42 @@ impl Database { fn default_path() -> String { String::from("./storage/tracker/lib/database/sqlite3.db") } + + /// Masks secrets in the configuration. + /// + /// # Panics + /// + /// Will panic if the database path for `MySQL` is not a valid URL. + pub fn mask_secrets(&mut self) { + match self.driver { + DatabaseDriver::Sqlite3 => { + // Nothing to mask + } + DatabaseDriver::MySQL => { + let mut url = Url::parse(&self.path).expect("path for MySQL driver should be a valid URL"); + url.set_password(Some("***")).expect("url password should be changed"); + self.path = url.to_string(); + } + } + } +} + +#[cfg(test)] +mod tests { + + use torrust_tracker_primitives::DatabaseDriver; + + use super::Database; + + #[test] + fn it_should_allow_masking_the_mysql_user_password() { + let mut database = Database { + driver: DatabaseDriver::MySQL, + path: "mysql://root:password@localhost:3306/torrust".to_string(), + }; + + database.mask_secrets(); + + assert_eq!(database.path, "mysql://root:***@localhost:3306/torrust".to_string()); + } } diff --git a/packages/configuration/src/v2/mod.rs b/packages/configuration/src/v2/mod.rs index 3425dc0de..141bab00f 100644 --- a/packages/configuration/src/v2/mod.rs +++ b/packages/configuration/src/v2/mod.rs @@ -263,7 +263,7 @@ const CONFIG_OVERRIDE_PREFIX: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_"; const CONFIG_OVERRIDE_SEPARATOR: &str = "__"; /// Core configuration for the tracker. -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default, Clone)] pub struct Configuration { /// Configuration metadata. #[serde(flatten)] @@ -380,6 +380,18 @@ impl Configuration { // code-review: do we need to use Figment also to serialize into json? serde_json::to_string_pretty(self).expect("Could not encode JSON value") } + + /// Masks secrets in the configuration. + #[must_use] + pub fn mask_secrets(mut self) -> Self { + self.core.database.mask_secrets(); + + if let Some(ref mut api) = self.http_api { + api.mask_secrets(); + } + + self + } } #[cfg(test)] diff --git a/packages/configuration/src/v2/tracker_api.rs b/packages/configuration/src/v2/tracker_api.rs index 1a2e0cbf0..dbbff7995 100644 --- a/packages/configuration/src/v2/tracker_api.rs +++ b/packages/configuration/src/v2/tracker_api.rs @@ -61,6 +61,12 @@ impl HttpApi { pub fn override_admin_token(&mut self, api_admin_token: &str) { self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); } + + pub fn mask_secrets(&mut self) { + for token in self.access_tokens.values_mut() { + *token = "***".to_string(); + } + } } #[cfg(test)] diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 023520507..cfb84a2d1 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -30,7 +30,7 @@ pub fn setup() -> (Configuration, Arc) { let tracker = initialize_with_configuration(&configuration); - info!("Configuration:\n{}", configuration.to_json()); + info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); (configuration, tracker) } From 9be96380f92b56df40273bd33b69ede51e7fc963 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 5 Jul 2024 10:02:56 +0100 Subject: [PATCH 0936/1003] refactor: [#950] decouple database driver enum Use a different enum for configuration and domain. --- packages/configuration/src/lib.rs | 1 + packages/configuration/src/v2/database.rs | 29 +++++++++++++++-------- packages/primitives/src/lib.rs | 3 --- src/core/mod.rs | 10 ++++++-- 4 files changed, 28 insertions(+), 15 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 32831409d..5e839b7b1 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -41,6 +41,7 @@ pub type HttpApi = v2::tracker_api::HttpApi; pub type HttpTracker = v2::http_tracker::HttpTracker; pub type UdpTracker = v2::udp_tracker::UdpTracker; pub type Database = v2::database::Database; +pub type Driver = v2::database::Driver; pub type Threshold = v2::logging::Threshold; pub type AccessTokens = HashMap; diff --git a/packages/configuration/src/v2/database.rs b/packages/configuration/src/v2/database.rs index 932db552c..ef462556d 100644 --- a/packages/configuration/src/v2/database.rs +++ b/packages/configuration/src/v2/database.rs @@ -1,5 +1,4 @@ use serde::{Deserialize, Serialize}; -use torrust_tracker_primitives::DatabaseDriver; use url::Url; #[allow(clippy::struct_excessive_bools)] @@ -8,7 +7,7 @@ pub struct Database { // Database configuration /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. #[serde(default = "Database::default_driver")] - pub driver: DatabaseDriver, + pub driver: Driver, /// Database connection string. The format depends on the database driver. /// For `Sqlite3`, the format is `path/to/database.db`, for example: @@ -29,8 +28,8 @@ impl Default for Database { } impl Database { - fn default_driver() -> DatabaseDriver { - DatabaseDriver::Sqlite3 + fn default_driver() -> Driver { + Driver::Sqlite3 } fn default_path() -> String { @@ -44,10 +43,10 @@ impl Database { /// Will panic if the database path for `MySQL` is not a valid URL. pub fn mask_secrets(&mut self) { match self.driver { - DatabaseDriver::Sqlite3 => { + Driver::Sqlite3 => { // Nothing to mask } - DatabaseDriver::MySQL => { + Driver::MySQL => { let mut url = Url::parse(&self.path).expect("path for MySQL driver should be a valid URL"); url.set_password(Some("***")).expect("url password should be changed"); self.path = url.to_string(); @@ -56,17 +55,27 @@ impl Database { } } +/// The database management system used by the tracker. +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] +pub enum Driver { + // todo: + // - Rename serialized values to lowercase: `sqlite3` and `mysql`. + // - Add serde default values. + /// The `Sqlite3` database driver. + Sqlite3, + /// The `MySQL` database driver. + MySQL, +} + #[cfg(test)] mod tests { - use torrust_tracker_primitives::DatabaseDriver; - - use super::Database; + use super::{Database, Driver}; #[test] fn it_should_allow_masking_the_mysql_user_password() { let mut database = Database { - driver: DatabaseDriver::MySQL, + driver: Driver::MySQL, path: "mysql://root:password@localhost:3306/torrust".to_string(), }; diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 7ad1d35b4..9bd3bad55 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -52,9 +52,6 @@ pub struct NumberOfBytes(pub i64); /// For more information about persistence. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] pub enum DatabaseDriver { - // TODO: - // - Move to the database crate once that gets its own crate. - // - Rename serialized values to lowercase: `sqlite3` and `mysql`. /// The Sqlite3 database driver. Sqlite3, /// The `MySQL` database driver. diff --git a/src/core/mod.rs b/src/core/mod.rs index 06b1aa0f9..57cf56bca 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -456,11 +456,12 @@ use std::time::Duration; use derive_more::Constructor; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::v2::database; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DatabaseDriver}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; use tracing::debug; @@ -564,7 +565,12 @@ impl Tracker { stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = Arc::new(databases::driver::build(&config.database.driver, &config.database.path)?); + let driver = match config.database.driver { + database::Driver::Sqlite3 => DatabaseDriver::Sqlite3, + database::Driver::MySQL => DatabaseDriver::MySQL, + }; + + let database = Arc::new(databases::driver::build(&driver, &config.database.path)?); Ok(Tracker { config: config.clone(), From 954295aa09831c59b2dfe6206c20acdb0229543e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 5 Jul 2024 10:12:29 +0100 Subject: [PATCH 0937/1003] refactor: [#950] move DatabaseDriver to databases mod --- packages/primitives/src/lib.rs | 16 ---------------- src/core/databases/driver.rs | 23 ++++++++++++++++++++--- src/core/databases/error.rs | 3 ++- src/core/databases/mysql.rs | 3 ++- src/core/databases/sqlite.rs | 3 ++- src/core/mod.rs | 3 ++- 6 files changed, 28 insertions(+), 23 deletions(-) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 9bd3bad55..d6f29c2b5 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -42,20 +42,4 @@ pub enum IPVersion { #[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct NumberOfBytes(pub i64); -/// The database management system used by the tracker. -/// -/// Refer to: -/// -/// - [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration). -/// - [Torrust Tracker](https://docs.rs/torrust-tracker). -/// -/// For more information about persistence. -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] -pub enum DatabaseDriver { - /// The Sqlite3 database driver. - Sqlite3, - /// The `MySQL` database driver. - MySQL, -} - pub type PersistentTorrents = BTreeMap; diff --git a/src/core/databases/driver.rs b/src/core/databases/driver.rs index 99d96c6b1..f6c7aeb08 100644 --- a/src/core/databases/driver.rs +++ b/src/core/databases/driver.rs @@ -2,20 +2,37 @@ //! //! See [`databases::driver::build`](crate::core::databases::driver::build) //! function for more information. -use torrust_tracker_primitives::DatabaseDriver; +use serde::{Deserialize, Serialize}; use super::error::Error; use super::mysql::Mysql; use super::sqlite::Sqlite; use super::{Builder, Database}; +/// The database management system used by the tracker. +/// +/// Refer to: +/// +/// - [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration). +/// - [Torrust Tracker](https://docs.rs/torrust-tracker). +/// +/// For more information about persistence. +#[allow(clippy::module_name_repetitions)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] +pub enum DatabaseDriver { + /// The Sqlite3 database driver. + Sqlite3, + /// The `MySQL` database driver. + MySQL, +} + /// It builds a new database driver. /// /// Example for `SQLite3`: /// /// ```rust,no_run /// use torrust_tracker::core::databases; -/// use torrust_tracker_primitives::DatabaseDriver; +/// use torrust_tracker::core::databases::driver::DatabaseDriver; /// /// let db_driver = DatabaseDriver::Sqlite3; /// let db_path = "./storage/tracker/lib/database/sqlite3.db".to_string(); @@ -26,7 +43,7 @@ use super::{Builder, Database}; /// /// ```rust,no_run /// use torrust_tracker::core::databases; -/// use torrust_tracker_primitives::DatabaseDriver; +/// use torrust_tracker::core::databases::driver::DatabaseDriver; /// /// let db_driver = DatabaseDriver::MySQL; /// let db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker".to_string(); diff --git a/src/core/databases/error.rs b/src/core/databases/error.rs index a5179e3a4..315d1343a 100644 --- a/src/core/databases/error.rs +++ b/src/core/databases/error.rs @@ -6,7 +6,8 @@ use std::sync::Arc; use r2d2_mysql::mysql::UrlError; use torrust_tracker_located_error::{DynError, Located, LocatedError}; -use torrust_tracker_primitives::DatabaseDriver; + +use super::driver::DatabaseDriver; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index ebb002d31..e16929a04 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -8,9 +8,10 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{DatabaseDriver, PersistentTorrents}; +use torrust_tracker_primitives::PersistentTorrents; use tracing::debug; +use super::driver::DatabaseDriver; use super::{Database, Error}; use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index 53a01f80c..93bf1f9a1 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -6,8 +6,9 @@ use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{DatabaseDriver, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; +use super::driver::DatabaseDriver; use super::{Database, Error}; use crate::core::auth::{self, Key}; diff --git a/src/core/mod.rs b/src/core/mod.rs index 57cf56bca..c70d81fed 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -453,15 +453,16 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; +use databases::driver::DatabaseDriver; use derive_more::Constructor; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v2::database; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DatabaseDriver}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; use tracing::debug; From d970bb873d972903dbe06dbbeb9341e6dc78201f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 5 Jul 2024 10:15:00 +0100 Subject: [PATCH 0938/1003] refactor: [#950] rename DatabaseDriver to Driver --- src/core/databases/driver.rs | 17 ++++++++--------- src/core/databases/error.rs | 26 +++++++++++++------------- src/core/databases/mysql.rs | 4 ++-- src/core/databases/sqlite.rs | 6 +++--- src/core/mod.rs | 6 +++--- 5 files changed, 29 insertions(+), 30 deletions(-) diff --git a/src/core/databases/driver.rs b/src/core/databases/driver.rs index f6c7aeb08..a456a2650 100644 --- a/src/core/databases/driver.rs +++ b/src/core/databases/driver.rs @@ -17,9 +17,8 @@ use super::{Builder, Database}; /// - [Torrust Tracker](https://docs.rs/torrust-tracker). /// /// For more information about persistence. -#[allow(clippy::module_name_repetitions)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] -pub enum DatabaseDriver { +pub enum Driver { /// The Sqlite3 database driver. Sqlite3, /// The `MySQL` database driver. @@ -32,9 +31,9 @@ pub enum DatabaseDriver { /// /// ```rust,no_run /// use torrust_tracker::core::databases; -/// use torrust_tracker::core::databases::driver::DatabaseDriver; +/// use torrust_tracker::core::databases::driver::Driver; /// -/// let db_driver = DatabaseDriver::Sqlite3; +/// let db_driver = Driver::Sqlite3; /// let db_path = "./storage/tracker/lib/database/sqlite3.db".to_string(); /// let database = databases::driver::build(&db_driver, &db_path); /// ``` @@ -43,9 +42,9 @@ pub enum DatabaseDriver { /// /// ```rust,no_run /// use torrust_tracker::core::databases; -/// use torrust_tracker::core::databases::driver::DatabaseDriver; +/// use torrust_tracker::core::databases::driver::Driver; /// -/// let db_driver = DatabaseDriver::MySQL; +/// let db_driver = Driver::MySQL; /// let db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker".to_string(); /// let database = databases::driver::build(&db_driver, &db_path); /// ``` @@ -62,10 +61,10 @@ pub enum DatabaseDriver { /// # Panics /// /// This function will panic if unable to create database tables. -pub fn build(driver: &DatabaseDriver, db_path: &str) -> Result, Error> { +pub fn build(driver: &Driver, db_path: &str) -> Result, Error> { let database = match driver { - DatabaseDriver::Sqlite3 => Builder::::build(db_path), - DatabaseDriver::MySQL => Builder::::build(db_path), + Driver::Sqlite3 => Builder::::build(db_path), + Driver::MySQL => Builder::::build(db_path), }?; database.create_database_tables().expect("Could not create database tables."); diff --git a/src/core/databases/error.rs b/src/core/databases/error.rs index 315d1343a..4d64baf48 100644 --- a/src/core/databases/error.rs +++ b/src/core/databases/error.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use r2d2_mysql::mysql::UrlError; use torrust_tracker_located_error::{DynError, Located, LocatedError}; -use super::driver::DatabaseDriver; +use super::driver::Driver; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { @@ -15,21 +15,21 @@ pub enum Error { #[error("The {driver} query unexpectedly returned nothing: {source}")] QueryReturnedNoRows { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - driver: DatabaseDriver, + driver: Driver, }, /// The query was malformed. #[error("The {driver} query was malformed: {source}")] InvalidQuery { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - driver: DatabaseDriver, + driver: Driver, }, /// Unable to insert a record into the database #[error("Unable to insert record into {driver} database, {location}")] InsertFailed { location: &'static Location<'static>, - driver: DatabaseDriver, + driver: Driver, }, /// Unable to delete a record into the database @@ -37,21 +37,21 @@ pub enum Error { DeleteFailed { location: &'static Location<'static>, error_code: usize, - driver: DatabaseDriver, + driver: Driver, }, /// Unable to connect to the database #[error("Failed to connect to {driver} database: {source}")] ConnectionError { source: LocatedError<'static, UrlError>, - driver: DatabaseDriver, + driver: Driver, }, /// Unable to create a connection pool #[error("Failed to create r2d2 {driver} connection pool: {source}")] ConnectionPool { source: LocatedError<'static, r2d2::Error>, - driver: DatabaseDriver, + driver: Driver, }, } @@ -61,11 +61,11 @@ impl From for Error { match err { r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { source: (Arc::new(err) as DynError).into(), - driver: DatabaseDriver::Sqlite3, + driver: Driver::Sqlite3, }, _ => Error::InvalidQuery { source: (Arc::new(err) as DynError).into(), - driver: DatabaseDriver::Sqlite3, + driver: Driver::Sqlite3, }, } } @@ -77,7 +77,7 @@ impl From for Error { let e: DynError = Arc::new(err); Error::InvalidQuery { source: e.into(), - driver: DatabaseDriver::MySQL, + driver: Driver::MySQL, } } } @@ -87,14 +87,14 @@ impl From for Error { fn from(err: UrlError) -> Self { Self::ConnectionError { source: Located(err).into(), - driver: DatabaseDriver::MySQL, + driver: Driver::MySQL, } } } -impl From<(r2d2::Error, DatabaseDriver)> for Error { +impl From<(r2d2::Error, Driver)> for Error { #[track_caller] - fn from(e: (r2d2::Error, DatabaseDriver)) -> Self { + fn from(e: (r2d2::Error, Driver)) -> Self { let (err, driver) = e; Self::ConnectionPool { source: Located(err).into(), diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index e16929a04..c6094cd8f 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -11,12 +11,12 @@ use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::PersistentTorrents; use tracing::debug; -use super::driver::DatabaseDriver; +use super::driver::Driver; use super::{Database, Error}; use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; +const DRIVER: Driver = Driver::MySQL; pub struct Mysql { pool: Pool, diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index 93bf1f9a1..071a824c5 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -8,11 +8,11 @@ use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; -use super::driver::DatabaseDriver; +use super::driver::Driver; use super::{Database, Error}; use crate::core::auth::{self, Key}; -const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; +const DRIVER: Driver = Driver::Sqlite3; pub struct Sqlite { pool: Pool, @@ -29,7 +29,7 @@ impl Database for Sqlite { /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); - Pool::new(cm).map_or_else(|err| Err((err, DatabaseDriver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) + Pool::new(cm).map_or_else(|err| Err((err, Driver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) } /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). diff --git a/src/core/mod.rs b/src/core/mod.rs index c70d81fed..ce6189157 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -453,7 +453,7 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; -use databases::driver::DatabaseDriver; +use databases::driver::Driver; use derive_more::Constructor; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; @@ -567,8 +567,8 @@ impl Tracker { stats_repository: statistics::Repo, ) -> Result { let driver = match config.database.driver { - database::Driver::Sqlite3 => DatabaseDriver::Sqlite3, - database::Driver::MySQL => DatabaseDriver::MySQL, + database::Driver::Sqlite3 => Driver::Sqlite3, + database::Driver::MySQL => Driver::MySQL, }; let database = Arc::new(databases::driver::build(&driver, &config.database.path)?); From 9d72f5159887e21ff98b5fcb29ce87c5c1c86ca9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 5 Jul 2024 10:21:47 +0100 Subject: [PATCH 0939/1003] feat: [#950] use lowercase for database driver values in configuration ```toml [core.database] driver = "sqlite3" ``` We are normalizing all enum variants in configration to lowercase. --- Containerfile | 2 +- compose.yaml | 2 +- docs/containers.md | 4 ++-- packages/configuration/src/v2/database.rs | 8 +++----- packages/configuration/src/v2/mod.rs | 4 ++-- share/container/entry_script_sh | 6 +++--- share/default/config/tracker.container.mysql.toml | 2 +- src/core/mod.rs | 2 +- src/lib.rs | 2 +- 9 files changed, 15 insertions(+), 17 deletions(-) diff --git a/Containerfile b/Containerfile index d55d2f300..d302e5c66 100644 --- a/Containerfile +++ b/Containerfile @@ -96,7 +96,7 @@ RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/ COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec ARG TORRUST_TRACKER_CONFIG_TOML_PATH="/etc/torrust/tracker/tracker.toml" -ARG TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER="Sqlite3" +ARG TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER="sqlite3" ARG USER_ID=1000 ARG UDP_PORT=6969 ARG HTTP_PORT=7070 diff --git a/compose.yaml b/compose.yaml index cab5c6d5e..c2e7c63bd 100644 --- a/compose.yaml +++ b/compose.yaml @@ -4,7 +4,7 @@ services: image: torrust-tracker:release tty: true environment: - - TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER:-MySQL} + - TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER:-mysql} - TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=${TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN:-MyAccessToken} networks: - server_side diff --git a/docs/containers.md b/docs/containers.md index 82c67c26e..cddd2ba98 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -149,7 +149,7 @@ The following environmental variables can be set: - `TORRUST_TRACKER_CONFIG_TOML_PATH` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). - `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN` - Override of the admin token. If set, this value overrides any value set in the config. -- `TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER` - The database type used for the container, (options: `Sqlite3`, `MySQL`, default `Sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. - `TORRUST_TRACKER_CONFIG_TOML` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG_TOML=$(cat tracker-tracker.toml)`). - `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). - `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). @@ -244,7 +244,7 @@ The docker-compose configuration includes the MySQL service configuration. If yo ```toml [core.database] -driver = "MySQL" +driver = "mysql" path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" ``` diff --git a/packages/configuration/src/v2/database.rs b/packages/configuration/src/v2/database.rs index ef462556d..c2b24d809 100644 --- a/packages/configuration/src/v2/database.rs +++ b/packages/configuration/src/v2/database.rs @@ -5,12 +5,12 @@ use url::Url; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct Database { // Database configuration - /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. + /// Database driver. Possible values are: `sqlite3`, and `mysql`. #[serde(default = "Database::default_driver")] pub driver: Driver, /// Database connection string. The format depends on the database driver. - /// For `Sqlite3`, the format is `path/to/database.db`, for example: + /// For `sqlite3`, the format is `path/to/database.db`, for example: /// `./storage/tracker/lib/database/sqlite3.db`. /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for /// example: `mysql://root:password@localhost:3306/torrust`. @@ -57,10 +57,8 @@ impl Database { /// The database management system used by the tracker. #[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] +#[serde(rename_all = "lowercase")] pub enum Driver { - // todo: - // - Rename serialized values to lowercase: `sqlite3` and `mysql`. - // - Add serde default values. /// The `Sqlite3` database driver. Sqlite3, /// The `MySQL` database driver. diff --git a/packages/configuration/src/v2/mod.rs b/packages/configuration/src/v2/mod.rs index 141bab00f..35c8b1070 100644 --- a/packages/configuration/src/v2/mod.rs +++ b/packages/configuration/src/v2/mod.rs @@ -209,7 +209,7 @@ //! interval_min = 120 //! //! [core.database] -//! driver = "Sqlite3" +//! driver = "sqlite3" //! path = "./storage/tracker/lib/database/sqlite3.db" //! //! [core.net] @@ -420,7 +420,7 @@ mod tests { interval_min = 120 [core.database] - driver = "Sqlite3" + driver = "sqlite3" path = "./storage/tracker/lib/database/sqlite3.db" [core.net] diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh index 0668114fd..32cdfe33d 100644 --- a/share/container/entry_script_sh +++ b/share/container/entry_script_sh @@ -27,7 +27,7 @@ chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust # Install the database and config: if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" ]; then - if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "Sqlite3"; then + if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "sqlite3"; then # Select Sqlite3 empty database default_database="/usr/share/torrust/default/database/tracker.sqlite3.db" @@ -35,7 +35,7 @@ if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" ]; then # Select Sqlite3 default configuration default_config="/usr/share/torrust/default/config/tracker.container.sqlite3.toml" - elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "MySQL"; then + elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "mysql"; then # (no database file needed for MySQL) @@ -44,7 +44,7 @@ if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" ]; then else echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER\"." - echo "Please Note: Supported Database Types: \"Sqlite3\", \"MySQL\"." + echo "Please Note: Supported Database Types: \"sqlite3\", \"mysql\"." exit 1 fi else diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 9465c0ef8..1c84fb2e2 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -1,7 +1,7 @@ version = "2" [core.database] -driver = "MySQL" +driver = "mysql" path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" # Uncomment to enable services diff --git a/src/core/mod.rs b/src/core/mod.rs index ce6189157..ee90cea39 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -326,7 +326,7 @@ //! interval_min = 120 //! //! [core.database] -//! driver = "Sqlite3" +//! driver = "sqlite3" //! path = "./storage/tracker/lib/database/sqlite3.db" //! //! [core.net] diff --git a/src/lib.rs b/src/lib.rs index 9776345b0..d242ac80e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -181,7 +181,7 @@ //! interval_min = 120 //! //! [core.database] -//! driver = "Sqlite3" +//! driver = "sqlite3" //! path = "./storage/tracker/lib/database/sqlite3.db" //! //! [core.net] From ca348a839867ec56a66b0ce8abfd714d7633ede8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 5 Jul 2024 10:31:11 +0100 Subject: [PATCH 0940/1003] chore: remove unused dependency --- Cargo.lock | 1 - packages/configuration/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e25fc4c4..8ae3d20df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4017,7 +4017,6 @@ dependencies = [ "thiserror", "toml", "torrust-tracker-located-error", - "torrust-tracker-primitives", "url", "uuid", ] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 90c816344..ae9c64cfe 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -24,7 +24,6 @@ serde_with = "3" thiserror = "1" toml = "0" torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } url = "2.5.2" [dev-dependencies] From 019cf9ffdc327193f4c2e065e46a238ac76bdf1f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 8 Jul 2024 16:21:37 +0200 Subject: [PATCH 0941/1003] udp: processor for requests --- src/servers/udp/handlers.rs | 6 +-- src/servers/udp/server/bound_socket.rs | 7 +-- src/servers/udp/server/launcher.rs | 65 ++----------------------- src/servers/udp/server/mod.rs | 1 + src/servers/udp/server/processor.rs | 66 ++++++++++++++++++++++++++ src/servers/udp/server/receiver.rs | 8 ++-- src/shared/bit_torrent/common.rs | 12 ----- 7 files changed, 81 insertions(+), 84 deletions(-) create mode 100644 src/servers/udp/server/processor.rs diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f1f61ee6b..c6b2458e5 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -33,7 +33,7 @@ use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Arc, addr: SocketAddr) -> Response { +pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr) -> Response { debug!("Handling Packets: {udp_request:?}"); let start_time = Instant::now(); @@ -47,7 +47,7 @@ pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Arc { - log_request(&request, &request_id, &addr); + log_request(&request, &request_id, &local_addr); let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, @@ -62,7 +62,7 @@ pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Arc, + socket: tokio::net::UdpSocket, } impl BoundSocket { @@ -30,9 +29,7 @@ impl BoundSocket { let local_addr = format!("udp://{addr}"); tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpSocket::new (bound)"); - Ok(Self { - socket: Arc::new(socket), - }) + Ok(Self { socket }) } /// # Panics diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index bb7c7d70f..7b40f6604 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -1,26 +1,23 @@ -use std::io::Cursor; use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use aquatic_udp_protocol::Response; use derive_more::Constructor; use futures_util::StreamExt; use tokio::select; use tokio::sync::oneshot; use super::request_buffer::ActiveRequests; -use super::RawRequest; use crate::bootstrap::jobs::Started; use crate::core::Tracker; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; use crate::servers::udp::server::bound_socket::BoundSocket; +use crate::servers::udp::server::processor::Processor; use crate::servers::udp::server::receiver::Receiver; -use crate::servers::udp::{handlers, UDP_TRACKER_LOG_TARGET}; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; use crate::shared::bit_torrent::tracker::udp::client::check; -use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; /// A UDP server instance launcher. #[derive(Constructor)] @@ -109,6 +106,8 @@ impl Launcher { let local_addr = format!("udp://{addr}"); loop { + let processor = Processor::new(receiver.socket.clone(), tracker.clone()); + if let Some(req) = { tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); receiver.next().await @@ -138,9 +137,7 @@ impl Launcher { // are only adding and removing tasks without given them the // chance to finish. However, the buffer is yielding before // aborting one tasks, giving it the chance to finish. - let abort_handle: tokio::task::AbortHandle = - tokio::task::spawn(Launcher::process_request(req, tracker.clone(), receiver.bound_socket.clone())) - .abort_handle(); + let abort_handle: tokio::task::AbortHandle = tokio::task::spawn(processor.process_request(req)).abort_handle(); if abort_handle.is_finished() { continue; @@ -156,56 +153,4 @@ impl Launcher { } } } - - async fn process_request(request: RawRequest, tracker: Arc, socket: Arc) { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, request = %request.from, "Udp::process_request (receiving)"); - Self::process_valid_request(tracker, socket, request).await; - } - - async fn process_valid_request(tracker: Arc, socket: Arc, udp_request: RawRequest) { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, "Udp::process_valid_request. Making Response to {udp_request:?}"); - let from = udp_request.from; - let response = handlers::handle_packet(udp_request, &tracker.clone(), socket.address()).await; - Self::send_response(&socket.clone(), from, response).await; - } - - async fn send_response(bound_socket: &Arc, to: SocketAddr, response: Response) { - let response_type = match &response { - Response::Connect(_) => "Connect".to_string(), - Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), - Response::AnnounceIpv6(_) => "AnnounceIpv6".to_string(), - Response::Scrape(_) => "Scrape".to_string(), - Response::Error(e) => format!("Error: {e:?}"), - }; - - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, target = ?to, response_type, "Udp::send_response (sending)"); - - let buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(buffer); - - match response.write_bytes(&mut cursor) { - Ok(()) => { - #[allow(clippy::cast_possible_truncation)] - let position = cursor.position() as usize; - let inner = cursor.get_ref(); - - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), "Udp::send_response (sending...)" ); - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), payload = ?&inner[..position], "Udp::send_response (sending...)"); - - Self::send_packet(bound_socket, &to, &inner[..position]).await; - - tracing::trace!(target:UDP_TRACKER_LOG_TARGET, ?to, bytes_count = &inner[..position].len(), "Udp::send_response (sent)"); - } - Err(e) => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, ?to, response_type, err = %e, "Udp::send_response (error)"); - } - } - } - - async fn send_packet(bound_socket: &Arc, remote_addr: &SocketAddr, payload: &[u8]) { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, to = %remote_addr, ?payload, "Udp::send_response (sending)"); - - // doesn't matter if it reaches or not - drop(bound_socket.send_to(payload, remote_addr).await); - } } diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index e3321f157..16133e21b 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -5,6 +5,7 @@ use super::RawRequest; pub mod bound_socket; pub mod launcher; +pub mod processor; pub mod receiver; pub mod request_buffer; pub mod spawner; diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs new file mode 100644 index 000000000..e633a2358 --- /dev/null +++ b/src/servers/udp/server/processor.rs @@ -0,0 +1,66 @@ +use std::io::Cursor; +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::Response; + +use super::bound_socket::BoundSocket; +use crate::core::Tracker; +use crate::servers::udp::{handlers, RawRequest, UDP_TRACKER_LOG_TARGET}; + +pub struct Processor { + socket: Arc, + tracker: Arc, +} + +impl Processor { + pub fn new(socket: Arc, tracker: Arc) -> Self { + Self { socket, tracker } + } + + pub async fn process_request(self, request: RawRequest) { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, request = %request.from, "Udp::process_request (receiving)"); + + let from = request.from; + let response = handlers::handle_packet(request, &self.tracker, self.socket.address()).await; + self.send_response(from, response).await; + } + + async fn send_response(self, to: SocketAddr, response: Response) { + let response_type = match &response { + Response::Connect(_) => "Connect".to_string(), + Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), + Response::AnnounceIpv6(_) => "AnnounceIpv6".to_string(), + Response::Scrape(_) => "Scrape".to_string(), + Response::Error(e) => format!("Error: {e:?}"), + }; + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, target = ?to, response_type, "Udp::send_response (sending)"); + + let mut writer = Cursor::new(Vec::with_capacity(200)); + + match response.write_bytes(&mut writer) { + Ok(()) => { + let bytes_count = writer.get_ref().len(); + let payload = writer.get_ref(); + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count, "Udp::send_response (sending...)" ); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count, ?payload, "Udp::send_response (sending...)"); + + self.send_packet(&to, payload).await; + + tracing::trace!(target:UDP_TRACKER_LOG_TARGET, ?to, bytes_count, "Udp::send_response (sent)"); + } + Err(e) => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, ?to, response_type, err = %e, "Udp::send_response (error)"); + } + } + } + + async fn send_packet(&self, remote_addr: &SocketAddr, payload: &[u8]) { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, to = %remote_addr, ?payload, "Udp::send_response (sending)"); + + // doesn't matter if it reaches or not + drop(self.socket.send_to(payload, remote_addr).await); + } +} diff --git a/src/servers/udp/server/receiver.rs b/src/servers/udp/server/receiver.rs index 020ab7324..0176930a4 100644 --- a/src/servers/udp/server/receiver.rs +++ b/src/servers/udp/server/receiver.rs @@ -11,7 +11,7 @@ use super::RawRequest; use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; pub struct Receiver { - pub bound_socket: Arc, + pub socket: Arc, data: RefCell<[u8; MAX_PACKET_SIZE]>, } @@ -19,13 +19,13 @@ impl Receiver { #[must_use] pub fn new(bound_socket: Arc) -> Self { Receiver { - bound_socket, + socket: bound_socket, data: RefCell::new([0; MAX_PACKET_SIZE]), } } pub fn bound_socket_address(&self) -> SocketAddr { - self.bound_socket.address() + self.socket.address() } } @@ -36,7 +36,7 @@ impl Stream for Receiver { let mut buf = *self.data.borrow_mut(); let mut buf = tokio::io::ReadBuf::new(&mut buf); - let Poll::Ready(ready) = self.bound_socket.poll_recv_from(cx, &mut buf) else { + let Poll::Ready(ready) = self.socket.poll_recv_from(cx, &mut buf) else { return Poll::Pending; }; diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 9625b88e7..3dd059a6a 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -1,7 +1,6 @@ //! `BitTorrent` protocol primitive types //! //! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -use serde::{Deserialize, Serialize}; /// The maximum number of torrents that can be returned in an `scrape` response. /// @@ -21,14 +20,3 @@ pub const MAX_SCRAPE_TORRENTS: u8 = 74; /// See function to [`generate`](crate::core::auth::generate) the /// [`ExpiringKeys`](crate::core::auth::ExpiringKey) for more information. pub const AUTH_KEY_LENGTH: usize = 32; - -#[repr(u32)] -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -enum Actions { - // todo: it seems this enum is not used anywhere. Values match the ones in - // aquatic_udp_protocol::request::Request::from_bytes. - Connect = 0, - Announce = 1, - Scrape = 2, - Error = 3, -} From 64850aff2d29ce368f7c2be2edb7c4c7b789441d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 13 Jul 2024 13:04:42 +0200 Subject: [PATCH 0942/1003] dev: remove async trait dep --- Cargo.lock | 1 - Cargo.toml | 1 - src/core/databases/mod.rs | 28 +++---- src/core/databases/mysql.rs | 22 +++-- src/core/databases/sqlite.rs | 22 +++-- src/core/mod.rs | 82 +++++++++---------- src/core/services/torrent.rs | 16 ++-- src/core/statistics.rs | 11 ++- .../http/v1/extractors/announce_request.rs | 24 ++++-- .../http/v1/extractors/authentication_key.rs | 30 +++++-- .../http/v1/extractors/client_ip_sources.rs | 42 ++++++---- .../http/v1/extractors/scrape_request.rs | 24 ++++-- src/servers/http/v1/services/announce.rs | 2 +- src/servers/http/v1/services/scrape.rs | 4 +- src/servers/udp/handlers.rs | 16 ++-- tests/servers/api/environment.rs | 4 +- .../servers/api/v1/contract/context/stats.rs | 3 +- .../api/v1/contract/context/torrent.rs | 18 ++-- tests/servers/http/environment.rs | 4 +- tests/servers/http/v1/contract.rs | 33 +++----- tests/servers/udp/environment.rs | 4 +- 21 files changed, 208 insertions(+), 183 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8ae3d20df..83b3781dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3939,7 +3939,6 @@ version = "3.0.0-alpha.12-develop" dependencies = [ "anyhow", "aquatic_udp_protocol", - "async-trait", "axum", "axum-client-ip", "axum-extra", diff --git a/Cargo.toml b/Cargo.toml index 41afb1538..ed2de33e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,6 @@ version = "3.0.0-alpha.12-develop" [dependencies] anyhow = "1" aquatic_udp_protocol = "0" -async-trait = "0" axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index e3fb9ad60..cdb4c7ce5 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -50,7 +50,6 @@ pub mod sqlite; use std::marker::PhantomData; -use async_trait::async_trait; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::PersistentTorrents; @@ -79,7 +78,6 @@ where } /// The persistence trait. It contains all the methods to interact with the database. -#[async_trait] pub trait Database: Sync + Send { /// It instantiates a new database driver. /// @@ -126,7 +124,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn load_persistent_torrents(&self) -> Result; + fn load_persistent_torrents(&self) -> Result; /// It saves the torrent metrics data into the database. /// @@ -135,7 +133,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; + fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; // Whitelist @@ -146,7 +144,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn load_whitelist(&self) -> Result, Error>; + fn load_whitelist(&self) -> Result, Error>; /// It checks if the torrent is whitelisted. /// @@ -157,7 +155,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error>; + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error>; /// It adds the torrent to the whitelist. /// @@ -166,7 +164,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; /// It checks if the torrent is whitelisted. /// @@ -175,8 +173,8 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - Ok(self.get_info_hash_from_whitelist(info_hash).await?.is_some()) + fn is_info_hash_whitelisted(&self, info_hash: InfoHash) -> Result { + Ok(self.get_info_hash_from_whitelist(info_hash)?.is_some()) } /// It removes the torrent from the whitelist. @@ -186,7 +184,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; // Authentication keys @@ -197,19 +195,19 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn load_keys(&self) -> Result, Error>; + fn load_keys(&self) -> Result, Error>; /// It gets an expiring authentication key from the database. /// /// It returns `Some(ExpiringKey)` if a [`ExpiringKey`](crate::core::auth::ExpiringKey) - /// with the input [`Key`](crate::core::auth::Key) exists, `None` otherwise. + /// with the input [`Key`] exists, `None` otherwise. /// /// # Context: Authentication Keys /// /// # Errors /// /// Will return `Err` if unable to load. - async fn get_key_from_keys(&self, key: &Key) -> Result, Error>; + fn get_key_from_keys(&self, key: &Key) -> Result, Error>; /// It adds an expiring authentication key to the database. /// @@ -218,7 +216,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; + fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; /// It removes an expiring authentication key from the database. /// @@ -227,5 +225,5 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn remove_key_from_keys(&self, key: &Key) -> Result; + fn remove_key_from_keys(&self, key: &Key) -> Result; } diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index c6094cd8f..40eced900 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -2,7 +2,6 @@ use std::str::FromStr; use std::time::Duration; -use async_trait::async_trait; use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; @@ -22,7 +21,6 @@ pub struct Mysql { pool: Pool, } -#[async_trait] impl Database for Mysql { /// It instantiates a new `MySQL` database driver. /// @@ -106,7 +104,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result { + fn load_persistent_torrents(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( @@ -121,7 +119,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). - async fn load_keys(&self) -> Result, Error> { + fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let keys = conn.query_map( @@ -136,7 +134,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_whitelist`](crate::core::databases::Database::load_whitelist). - async fn load_whitelist(&self) -> Result, Error> { + fn load_whitelist(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hashes = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { @@ -147,7 +145,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -160,7 +158,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). - async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let select = conn.exec_first::( @@ -174,7 +172,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::core::databases::Database::add_info_hash_to_whitelist). - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); @@ -188,7 +186,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::core::databases::Database::remove_info_hash_from_whitelist). - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash = info_hash.to_string(); @@ -199,7 +197,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). - async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::<(String, i64), _, _>( @@ -216,7 +214,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). - async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { + fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.key.to_string(); @@ -231,7 +229,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::remove_key_from_keys`](crate::core::databases::Database::remove_key_from_keys). - async fn remove_key_from_keys(&self, key: &Key) -> Result { + fn remove_key_from_keys(&self, key: &Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { "key" => key.to_string() })?; diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index 071a824c5..3acbf9e77 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -2,7 +2,6 @@ use std::panic::Location; use std::str::FromStr; -use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; @@ -18,7 +17,6 @@ pub struct Sqlite { pool: Pool, } -#[async_trait] impl Database for Sqlite { /// It instantiates a new `SQLite3` database driver. /// @@ -90,7 +88,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result { + fn load_persistent_torrents(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -106,7 +104,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). - async fn load_keys(&self) -> Result, Error> { + fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -127,7 +125,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_whitelist`](crate::core::databases::Database::load_whitelist). - async fn load_whitelist(&self) -> Result, Error> { + fn load_whitelist(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; @@ -144,7 +142,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( @@ -163,7 +161,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). - async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; @@ -176,7 +174,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::core::databases::Database::add_info_hash_to_whitelist). - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()])?; @@ -192,7 +190,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::core::databases::Database::remove_info_hash_from_whitelist). - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let deleted = conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()])?; @@ -210,7 +208,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). - async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -230,7 +228,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). - async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { + fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( @@ -249,7 +247,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::remove_key_from_keys`](crate::core::databases::Database::remove_key_from_keys). - async fn remove_key_from_keys(&self, key: &Key) -> Result { + fn remove_key_from_keys(&self, key: &Key) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key.to_string()])?; diff --git a/src/core/mod.rs b/src/core/mod.rs index ee90cea39..64d5e2c9a 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -622,7 +622,7 @@ impl Tracker { /// # Context: Tracker /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). - pub async fn announce(&self, info_hash: &InfoHash, peer: &mut peer::Peer, remote_client_ip: &IpAddr) -> AnnounceData { + pub fn announce(&self, info_hash: &InfoHash, peer: &mut peer::Peer, remote_client_ip: &IpAddr) -> AnnounceData { // code-review: maybe instead of mutating the peer we could just return // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. @@ -642,7 +642,7 @@ impl Tracker { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); debug!("After: {peer:?}"); - let stats = self.upsert_peer_and_get_stats(info_hash, peer).await; + let stats = self.upsert_peer_and_get_stats(info_hash, peer); let peers = self.get_peers_for(info_hash, peer); @@ -688,8 +688,8 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.database.load_persistent_torrents().await?; + pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + let persistent_torrents = self.database.load_persistent_torrents()?; self.torrents.import_persistent(&persistent_torrents); @@ -718,7 +718,7 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { + pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { let swarm_metadata_before = match self.torrents.get_swarm_metadata(info_hash) { Some(swarm_metadata) => swarm_metadata, None => SwarmMetadata::zeroed(), @@ -732,7 +732,7 @@ impl Tracker { }; if swarm_metadata_before != swarm_metadata_after { - self.persist_stats(info_hash, &swarm_metadata_after).await; + self.persist_stats(info_hash, &swarm_metadata_after); } swarm_metadata_after @@ -741,12 +741,12 @@ impl Tracker { /// It stores the torrents stats into the database (if persistency is enabled). /// /// # Context: Tracker - async fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { + fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { if self.config.tracker_policy.persistent_torrent_completed_stat { let completed = swarm_metadata.downloaded; let info_hash = *info_hash; - drop(self.database.save_persistent_torrent(&info_hash, completed).await); + drop(self.database.save_persistent_torrent(&info_hash, completed)); } } @@ -804,7 +804,7 @@ impl Tracker { /// Will return a `database::Error` if unable to add the `auth_key` to the database. pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); - self.database.add_key_to_keys(&auth_key).await?; + self.database.add_key_to_keys(&auth_key)?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) } @@ -821,7 +821,7 @@ impl Tracker { /// /// Will panic if key cannot be converted into a valid `Key`. pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { - self.database.remove_key_from_keys(key).await?; + self.database.remove_key_from_keys(key)?; self.keys.write().await.remove(key); Ok(()) } @@ -856,7 +856,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to `load_keys` from the database. pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { - let keys_from_database = self.database.load_keys().await?; + let keys_from_database = self.database.load_keys()?; let mut keys = self.keys.write().await; keys.clear(); @@ -901,20 +901,20 @@ impl Tracker { /// /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_database_whitelist(info_hash)?; self.add_torrent_to_memory_whitelist(info_hash).await; Ok(()) } /// It adds a torrent to the whitelist if it has not been whitelisted previously - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; if is_whitelisted { return Ok(()); } - self.database.add_info_hash_to_whitelist(*info_hash).await?; + self.database.add_info_hash_to_whitelist(*info_hash)?; Ok(()) } @@ -932,7 +932,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.remove_torrent_from_database_whitelist(info_hash).await?; + self.remove_torrent_from_database_whitelist(info_hash)?; self.remove_torrent_from_memory_whitelist(info_hash).await; Ok(()) } @@ -944,14 +944,14 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; if !is_whitelisted { return Ok(()); } - self.database.remove_info_hash_from_whitelist(*info_hash).await?; + self.database.remove_info_hash_from_whitelist(*info_hash)?; Ok(()) } @@ -978,7 +978,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let whitelisted_torrents_from_database = self.database.load_whitelist()?; let mut whitelist = self.whitelist.write().await; whitelist.clear(); @@ -1173,7 +1173,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - tracker.upsert_peer_and_get_stats(&info_hash, &peer).await; + tracker.upsert_peer_and_get_stats(&info_hash, &peer); let peers = tracker.get_torrent_peers(&info_hash); @@ -1187,7 +1187,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - tracker.upsert_peer_and_get_stats(&info_hash, &peer).await; + tracker.upsert_peer_and_get_stats(&info_hash, &peer); let peers = tracker.get_peers_for(&info_hash, &peer); @@ -1198,7 +1198,7 @@ mod tests { async fn it_should_return_the_torrent_metrics() { let tracker = public_tracker(); - tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()).await; + tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()); let torrent_metrics = tracker.get_torrents_metrics(); @@ -1219,7 +1219,7 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()).await; + tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()); } let result_a = start_time.elapsed(); @@ -1353,7 +1353,7 @@ mod tests { let mut peer = sample_peer(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); assert_eq!(announce_data.peers, vec![]); } @@ -1363,12 +1363,10 @@ mod tests { let tracker = public_tracker(); let mut previously_announced_peer = sample_peer_1(); - tracker - .announce(&sample_info_hash(), &mut previously_announced_peer, &peer_ip()) - .await; + tracker.announce(&sample_info_hash(), &mut previously_announced_peer, &peer_ip()); let mut peer = sample_peer_2(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } @@ -1385,7 +1383,7 @@ mod tests { let mut peer = seeder(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); assert_eq!(announce_data.stats.complete, 1); } @@ -1396,7 +1394,7 @@ mod tests { let mut peer = leecher(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); assert_eq!(announce_data.stats.incomplete, 1); } @@ -1407,10 +1405,10 @@ mod tests { // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); - tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip()).await; + tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip()); let mut completed_peer = completed_peer(); - let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()); assert_eq!(announce_data.stats.downloaded, 1); } @@ -1450,15 +1448,11 @@ mod tests { // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); - tracker - .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) - .await; + tracker.announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))); // Announce an "incomplete" peer for the torrent let mut incomplete_peer = incomplete_peer(); - tracker - .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) - .await; + tracker.announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))); // Scrape let scrape_data = tracker.scrape(&vec![info_hash]).await; @@ -1606,11 +1600,11 @@ mod tests { let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); let mut peer = incomplete_peer(); - tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + tracker.announce(&info_hash, &mut peer, &peer_ip()); // Announce twice to force non zeroed swarm metadata let mut peer = complete_peer(); - tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + tracker.announce(&info_hash, &mut peer, &peer_ip()); let scrape_data = tracker.scrape(&vec![info_hash]).await; @@ -1743,17 +1737,17 @@ mod tests { let mut peer = sample_peer(); peer.event = AnnounceEvent::Started; - let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer).await; + let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer); assert_eq!(swarm_stats.downloaded, 0); peer.event = AnnounceEvent::Completed; - let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer).await; + let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer); assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory tracker.torrents.remove(&info_hash); - tracker.load_torrents_from_database().await.unwrap(); + tracker.load_torrents_from_database().unwrap(); let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry"); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 9cba5de25..1c337a41d 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -156,7 +156,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()).await; + tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); @@ -206,7 +206,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()).await; + tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -230,8 +230,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()).await; - tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()).await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); let offset = 0; let limit = 1; @@ -250,8 +250,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()).await; - tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()).await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); let offset = 1; let limit = 4000; @@ -276,11 +276,11 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()).await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()).await; + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; diff --git a/src/core/statistics.rs b/src/core/statistics.rs index d7192f5d1..bcafda17f 100644 --- a/src/core/statistics.rs +++ b/src/core/statistics.rs @@ -19,7 +19,8 @@ //! See the [`statistics::Event`](crate::core::statistics::Event) enum to check which events are available. use std::sync::Arc; -use async_trait::async_trait; +use futures::future::BoxFuture; +use futures::FutureExt; #[cfg(test)] use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; @@ -185,10 +186,9 @@ async fn event_handler(event: Event, stats_repository: &Repo) { } /// A trait to allow sending statistics events -#[async_trait] #[cfg_attr(test, automock)] pub trait EventSender: Sync + Send { - async fn send_event(&self, event: Event) -> Option>>; + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } /// An [`statistics::EventSender`](crate::core::statistics::EventSender) implementation. @@ -199,10 +199,9 @@ pub struct Sender { sender: mpsc::Sender, } -#[async_trait] impl EventSender for Sender { - async fn send_event(&self, event: Event) -> Option>> { - Some(self.sender.send(event).await) + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event).await) }.boxed() } } diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index bf77f0608..d2612f79b 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -29,10 +29,11 @@ //! ``` use std::panic::Location; -use axum::async_trait; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; @@ -42,18 +43,29 @@ use crate::servers::http::v1::responses; /// request. pub struct ExtractRequest(pub Announce); -#[async_trait] impl FromRequestParts for ExtractRequest where S: Send + Sync, { type Rejection = Response; - async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - match extract_announce_from(parts.uri.query()) { - Ok(announce_request) => Ok(ExtractRequest(announce_request)), - Err(error) => Err(error.into_response()), + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + _state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + match extract_announce_from(parts.uri.query()) { + Ok(announce_request) => Ok(ExtractRequest(announce_request)), + Err(error) => Err(error.into_response()), + } } + .boxed() } } diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index 985e32371..e86241edf 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -44,11 +44,12 @@ //! > specifications specify any HTTP status code for authentication errors. use std::panic::Location; -use axum::async_trait; use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; use serde::Deserialize; use crate::core::auth::Key; @@ -68,21 +69,32 @@ impl KeyParam { } } -#[async_trait] impl FromRequestParts for Extract where S: Send + Sync, { type Rejection = Response; - async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - // Extract `key` from URL path with Axum `Path` extractor - let maybe_path_with_key = Path::::from_request_parts(parts, state).await; - - match extract_key(maybe_path_with_key) { - Ok(key) => Ok(Extract(key)), - Err(error) => Err(error.into_response()), + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + // Extract `key` from URL path with Axum `Path` extractor + let maybe_path_with_key = Path::::from_request_parts(parts, state).await; + + match extract_key(maybe_path_with_key) { + Ok(key) => Ok(Extract(key)), + Err(error) => Err(error.into_response()), + } } + .boxed() } } diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs index 1c6cdc636..5b235fbe0 100644 --- a/src/servers/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -37,11 +37,12 @@ //! ``` use std::net::SocketAddr; -use axum::async_trait; use axum::extract::{ConnectInfo, FromRequestParts}; use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; +use futures::future::BoxFuture; +use futures::FutureExt; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; @@ -49,27 +50,38 @@ use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; /// struct. pub struct Extract(pub ClientIpSources); -#[async_trait] impl FromRequestParts for Extract where S: Send + Sync, { type Rejection = Response; - async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { - Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), - Err(_) => None, - }; + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { + Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), + Err(_) => None, + }; - let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { - Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), - Err(_) => None, - }; + let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { + Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), + Err(_) => None, + }; - Ok(Extract(ClientIpSources { - right_most_x_forwarded_for, - connection_info_ip, - })) + Ok(Extract(ClientIpSources { + right_most_x_forwarded_for, + connection_info_ip, + })) + } + .boxed() } } diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs index 35a8da5f8..07fa4ccb9 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -29,10 +29,11 @@ //! ``` use std::panic::Location; -use axum::async_trait; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; @@ -42,18 +43,29 @@ use crate::servers::http::v1::responses; /// request. pub struct ExtractRequest(pub Scrape); -#[async_trait] impl FromRequestParts for ExtractRequest where S: Send + Sync, { type Rejection = Response; - async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - match extract_scrape_from(parts.uri.query()) { - Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), - Err(error) => Err(error.into_response()), + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + _state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + match extract_scrape_from(parts.uri.query()) { + Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), + Err(error) => Err(error.into_response()), + } } + .boxed() } } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 47175817d..f5f730ae2 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -30,7 +30,7 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer: let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip).await; + let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip); match original_peer_ip { IpAddr::V4(_) => { diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index ee7814194..b83abb321 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -119,7 +119,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + tracker.announce(&info_hash, &mut peer, &original_peer_ip); let scrape_data = invoke(&tracker, &info_hashes, &original_peer_ip).await; @@ -210,7 +210,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + tracker.announce(&info_hash, &mut peer, &original_peer_ip); let scrape_data = fake(&tracker, &info_hashes, &original_peer_ip).await; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index c6b2458e5..53683fbb9 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -164,7 +164,7 @@ pub async fn handle_announce( let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); - let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip); match remote_client_ip { IpAddr::V4(_) => { @@ -722,7 +722,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + fn add_a_torrent_peer_using_ipv6(tracker: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -735,7 +735,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6).await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6); } async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { @@ -751,7 +751,7 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { let tracker = public_tracker(); - add_a_torrent_peer_using_ipv6(tracker.clone()).await; + add_a_torrent_peer_using_ipv6(&tracker); let response = announce_a_new_peer_using_ipv4(tracker.clone()).await; @@ -954,7 +954,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + fn add_a_torrent_peer_using_ipv4(tracker: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -966,7 +966,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4).await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4); } async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { @@ -985,7 +985,7 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { let tracker = public_tracker(); - add_a_torrent_peer_using_ipv4(tracker.clone()).await; + add_a_torrent_peer_using_ipv4(&tracker); let response = announce_a_new_peer_using_ipv6(tracker.clone()).await; @@ -1144,7 +1144,7 @@ mod tests { .with_number_of_bytes_left(0) .into(); - tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer).await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer); } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index dc2f70a76..92ef7b70b 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -22,8 +22,8 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.upsert_peer_and_get_stats(info_hash, peer).await; + pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); } } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index af6587673..c4c992484 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -17,8 +17,7 @@ async fn should_allow_getting_tracker_statistics() { env.add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), &PeerBuilder::default().into(), - ) - .await; + ); let response = Client::new(env.get_connection_info()).get_tracker_statistics().await; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index d54935f80..7ef35e729 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -24,7 +24,7 @@ async fn should_allow_getting_all_torrents() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); let response = Client::new(env.get_connection_info()).get_torrents(Query::empty()).await; @@ -50,8 +50,8 @@ async fn should_allow_limiting_the_torrents_in_the_result() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) @@ -79,8 +79,8 @@ async fn should_allow_the_torrents_result_pagination() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) @@ -107,8 +107,8 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); let response = Client::new(env.get_connection_info()) .get_torrents(Query::params( @@ -224,7 +224,7 @@ async fn should_allow_getting_a_torrent_info() { let peer = PeerBuilder::default().into(); - env.add_torrent_peer(&info_hash, &peer).await; + env.add_torrent_peer(&info_hash, &peer); let response = Client::new(env.get_connection_info()) .get_torrent(&info_hash.to_string()) @@ -285,7 +285,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) .get_torrent(&info_hash.to_string()) diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 2133ed6d0..b6bb21c16 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -19,8 +19,8 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.upsert_peer_and_get_stats(info_hash, peer).await; + pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index cdffead99..e4a35d0c5 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -415,7 +415,7 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer); // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(*env.bind_address()) @@ -456,7 +456,7 @@ mod for_all_config_modes { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + env.add_torrent_peer(&info_hash, &peer_using_ipv4); // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -466,7 +466,7 @@ mod for_all_config_modes { 8080, )) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + env.add_torrent_peer(&info_hash, &peer_using_ipv6); // Announce the new Peer. let response = Client::new(*env.bind_address()) @@ -505,7 +505,7 @@ mod for_all_config_modes { let peer = PeerBuilder::default().build(); // Add a peer - env.add_torrent_peer(&info_hash, &peer).await; + env.add_torrent_peer(&info_hash, &peer); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -536,7 +536,7 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer); // Announce the new Peer 2 accepting compact responses let response = Client::new(*env.bind_address()) @@ -577,7 +577,7 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer); // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -942,8 +942,7 @@ mod for_all_config_modes { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ) - .await; + ); let response = Client::new(*env.bind_address()) .scrape( @@ -981,8 +980,7 @@ mod for_all_config_modes { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_no_bytes_pending_to_download() .build(), - ) - .await; + ); let response = Client::new(*env.bind_address()) .scrape( @@ -1182,8 +1180,7 @@ mod configured_as_whitelisted { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ) - .await; + ); let response = Client::new(*env.bind_address()) .scrape( @@ -1212,8 +1209,7 @@ mod configured_as_whitelisted { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ) - .await; + ); env.tracker .add_torrent_to_whitelist(&info_hash) @@ -1366,8 +1362,7 @@ mod configured_as_private { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ) - .await; + ); let response = Client::new(*env.bind_address()) .scrape( @@ -1396,8 +1391,7 @@ mod configured_as_private { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ) - .await; + ); let expiring_key = env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); @@ -1440,8 +1434,7 @@ mod configured_as_private { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ) - .await; + ); let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index c580c3558..cfc4390c9 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -21,8 +21,8 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker #[allow(dead_code)] - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.upsert_peer_and_get_stats(info_hash, peer).await; + pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); } } From cafb9aaa46ccc408238835745363304113e483de Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 13 Jul 2024 13:57:18 +0200 Subject: [PATCH 0943/1003] chore: update deps Updating crates.io index Locking 38 packages to latest compatible versions Updating async-trait v0.1.80 -> v0.1.81 Updating bytes v1.6.0 -> v1.6.1 Updating castaway v0.2.2 -> v0.2.3 Updating cc v1.0.104 -> v1.1.2 Updating clap v4.5.8 -> v4.5.9 Updating clap_builder v4.5.8 -> v4.5.9 Updating darling v0.20.9 -> v0.20.10 Updating darling_core v0.20.9 -> v0.20.10 Updating darling_macro v0.20.9 -> v0.20.10 Updating http-body v1.0.0 -> v1.0.1 Updating hyper v1.4.0 -> v1.4.1 Updating oorandom v11.1.3 -> v11.1.4 Updating rustls v0.23.10 -> v0.23.11 Updating rustls-webpki v0.102.4 -> v0.102.5 Updating serde v1.0.203 -> v1.0.204 Updating serde_derive v1.0.203 -> v1.0.204 Updating serde_with v3.8.2 -> v3.8.3 Updating serde_with_macros v3.8.2 -> v3.8.3 Updating syn v2.0.68 -> v2.0.71 Updating thiserror v1.0.61 -> v1.0.62 Updating thiserror-impl v1.0.61 -> v1.0.62 Updating tinyvec v1.6.1 -> v1.8.0 Updating toml_edit v0.22.14 -> v0.22.15 Updating uuid v1.9.1 -> v1.10.0 Updating windows-targets v0.52.5 -> v0.52.6 Updating windows_aarch64_gnullvm v0.52.5 -> v0.52.6 Updating windows_aarch64_msvc v0.52.5 -> v0.52.6 Updating windows_i686_gnu v0.52.5 -> v0.52.6 Updating windows_i686_gnullvm v0.52.5 -> v0.52.6 Updating windows_i686_msvc v0.52.5 -> v0.52.6 Updating windows_x86_64_gnu v0.52.5 -> v0.52.6 Updating windows_x86_64_gnullvm v0.52.5 -> v0.52.6 Updating windows_x86_64_msvc v0.52.5 -> v0.52.6 Updating zerocopy v0.7.34 -> v0.7.35 Updating zerocopy-derive v0.7.34 -> v0.7.35 Updating zstd v0.13.1 -> v0.13.2 Updating zstd-safe v7.1.0 -> v7.2.0 Updating zstd-sys v2.0.11+zstd.1.5.6 -> v2.0.12+zstd.1.5.6 --- Cargo.lock | 244 ++++++++++++++++++++++++++--------------------------- 1 file changed, 122 insertions(+), 122 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 83b3781dc..e3c03fa69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -350,13 +350,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -479,7 +479,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -566,7 +566,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -635,7 +635,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "syn_derive", ] @@ -708,9 +708,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" [[package]] name = "camino" @@ -729,18 +729,18 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "castaway" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a17ed5635fc8536268e5d4de1e22e81ac34419e5f052d4d51f4e01dcc263fcc" +checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" dependencies = [ "rustversion", ] [[package]] name = "cc" -version = "1.0.104" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74b6a57f98764a267ff415d50a25e6e166f3831a5071af4995296ea97d210490" +checksum = "47de7e88bbbd467951ae7f5a6f34f70d1b4d9cfce53d5fd70f74ebe118b3db56" dependencies = [ "jobserver", "libc", @@ -778,7 +778,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -821,9 +821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.8" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b3edb18336f4df585bc9aa31dd99c036dfa5dc5e9a2939a722a188f3a8970d" +checksum = "64acc1846d54c1fe936a78dc189c34e28d3f5afc348403f28ecf53660b9b8462" dependencies = [ "clap_builder", "clap_derive", @@ -831,9 +831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.8" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c09dd5ada6c6c78075d6fd0da3f90d8080651e2d6cc8eb2f1aaa4034ced708" +checksum = "6fb8393d67ba2e7bfaf28a23458e4e2b543cc73a99595511eb207fdb8aede942" dependencies = [ "anstream", "anstyle", @@ -850,7 +850,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1058,9 +1058,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -1068,27 +1068,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] name = "darling_macro" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1124,7 +1124,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1135,7 +1135,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1357,7 +1357,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1369,7 +1369,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1381,7 +1381,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1474,7 +1474,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -1672,9 +1672,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", @@ -1707,9 +1707,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", @@ -1736,7 +1736,7 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls 0.23.10", + "rustls 0.23.11", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -2047,7 +2047,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -2192,7 +2192,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -2243,7 +2243,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "termcolor", "thiserror", ] @@ -2424,9 +2424,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "openssl" @@ -2451,7 +2451,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -2504,7 +2504,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -2527,7 +2527,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -2601,7 +2601,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -2790,7 +2790,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "version_check", "yansi", ] @@ -3098,7 +3098,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.68", + "syn 2.0.71", "unicode-ident", ] @@ -3194,13 +3194,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "4828ea528154ae444e5a642dbb7d5623354030dc9822b83fd9bb79683c7399d0" dependencies = [ "once_cell", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.5", "subtle", "zeroize", ] @@ -3233,9 +3233,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "f9a6fccd794a42c2c105b513a2f62bc3fd8f3ba57a4593677ceb0bd035164d78" dependencies = [ "ring", "rustls-pki-types", @@ -3340,9 +3340,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] @@ -3368,13 +3368,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3420,7 +3420,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3446,9 +3446,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.2" +version = "3.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "079f3a42cd87588d924ed95b533f8d30a483388c4e400ab736a7058e34f16169" +checksum = "e73139bc5ec2d45e6c5fd85be5a46949c1c39a4c18e56915f5eb4c12f975e377" dependencies = [ "base64 0.22.1", "chrono", @@ -3464,14 +3464,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.2" +version = "3.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc03aad67c1d26b7de277d51c86892e7d9a0110a2fe44bf6b26cc569fba302d6" +checksum = "b80d3d6b56b64335c0180e5ffde23b3c5e08c14c585b51a15bd0e95393f46703" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3614,9 +3614,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.68" +version = "2.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +checksum = "b146dcf730474b4bcd16c311627b31ede9ab149045db4d6088b3becaea046462" dependencies = [ "proc-macro2", "quote", @@ -3632,7 +3632,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3720,22 +3720,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3791,9 +3791,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55115c6fbe2d2bef26eb09ad74bde02d8255476fc0c7b515ef09fbb35742d82" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -3830,7 +3830,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -3859,7 +3859,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.10", + "rustls 0.23.11", "rustls-pki-types", "tokio", ] @@ -3886,7 +3886,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.14", + "toml_edit 0.22.15", ] [[package]] @@ -3922,9 +3922,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.14" +version = "0.22.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" +checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" dependencies = [ "indexmap 2.2.6", "serde", @@ -4154,7 +4154,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -4284,9 +4284,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom", "rand", @@ -4368,7 +4368,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "wasm-bindgen-shared", ] @@ -4402,7 +4402,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4460,7 +4460,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -4478,7 +4478,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -4498,18 +4498,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -4520,9 +4520,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -4532,9 +4532,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -4544,15 +4544,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -4562,9 +4562,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -4574,9 +4574,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -4586,9 +4586,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -4598,9 +4598,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -4647,9 +4647,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", "zerocopy-derive", @@ -4657,13 +4657,13 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.71", ] [[package]] @@ -4674,27 +4674,27 @@ checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] name = "zstd" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.1.0" +version = "7.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" +checksum = "fa556e971e7b568dc775c136fc9de8c779b1c2fc3a63defaafadffdbd3181afa" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.11+zstd.1.5.6" +version = "2.0.12+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" +checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" dependencies = [ "cc", "pkg-config", From 7f867d6138f44962e280cb0ab3fed23a8f595deb Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 13 Jul 2024 15:10:29 +0200 Subject: [PATCH 0944/1003] toml: use major versions --- Cargo.toml | 26 +++++++++++++------------- packages/clock/Cargo.toml | 4 ++-- packages/configuration/Cargo.toml | 6 +++--- packages/located-error/Cargo.toml | 2 +- packages/primitives/Cargo.toml | 6 +++--- packages/torrent-repository/Cargo.toml | 8 ++++---- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ed2de33e8..e54946be1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,29 +36,29 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } -camino = { version = "1.1.6", features = ["serde", "serde1"] } +camino = { version = "1", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } -crossbeam-skiplist = "0.1" -dashmap = "5.5.3" +crossbeam-skiplist = "0" +dashmap = "5" derive_more = "0" -figment = "0.10.18" +figment = "0" futures = "0" -futures-util = "0.3.30" +futures-util = "0" hex-literal = "0" -http-body = "1.0.0" +http-body = "1" hyper = "1" -hyper-util = { version = "0.1.3", features = ["http1", "http2", "tokio"] } +hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } lazy_static = "1" multimap = "0" -parking_lot = "0.12.1" +parking_lot = "0" percent-encoding = "2" -pin-project-lite = "0.2.14" +pin-project-lite = "0" r2d2 = "0" r2d2_mysql = "24" r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" -regex = "1.10.5" +regex = "1" reqwest = { version = "0", features = ["json"] } ringbuf = "0" serde = { version = "1", features = ["derive"] } @@ -74,14 +74,14 @@ torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = " torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12-develop", path = "packages/torrent-repository" } -tower = { version = "0.4.13", features = ["timeout"] } +tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" tracing = "0" -tracing-subscriber = { version = "0.3.18", features = ["json"] } +tracing-subscriber = { version = "0", features = ["json"] } url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } -zerocopy = "0.7.33" +zerocopy = "0" [package.metadata.cargo-machete] ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes"] diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index d7192b6e4..d71175fdc 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to a clock for the torrust tracker." -keywords = ["library", "clock", "torrents"] +keywords = ["clock", "library", "torrents"] name = "torrust-tracker-clock" readme = "README.md" @@ -16,8 +16,8 @@ rust-version.workspace = true version.workspace = true [dependencies] -lazy_static = "1" chrono = { version = "0", default-features = false, features = ["clock"] } +lazy_static = "1" torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index ae9c64cfe..5afa39b89 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -15,16 +15,16 @@ rust-version.workspace = true version.workspace = true [dependencies] -camino = { version = "1.1.6", features = ["serde", "serde1"] } +camino = { version = "1", features = ["serde", "serde1"] } derive_more = "0" -figment = { version = "0.10.18", features = ["env", "test", "toml"] } +figment = { version = "0", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "../located-error" } -url = "2.5.2" +url = "2" [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 4b2c73178..637ea3055 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -15,7 +15,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -tracing = "0.1.40" +tracing = "0" [dev-dependencies] thiserror = "1" diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 3b2406a69..174750fbb 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -15,9 +15,9 @@ rust-version.workspace = true version.workspace = true [dependencies] -derive_more = "0" -thiserror = "1" binascii = "0" +derive_more = "0" serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" -tdyne-peer-id-registry = "0" \ No newline at end of file +tdyne-peer-id-registry = "0" +thiserror = "1" diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 937ec11e2..8b46a8abe 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -16,10 +16,10 @@ rust-version.workspace = true version.workspace = true [dependencies] -crossbeam-skiplist = "0.1" -dashmap = "5.5.3" -futures = "0.3.29" -parking_lot = "0.12.1" +crossbeam-skiplist = "0" +dashmap = "5" +futures = "0" +parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } From d2717ad1b9bb58a0b46f79620929becab64fa320 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 13 Jul 2024 23:43:24 +0200 Subject: [PATCH 0945/1003] fixup: doc fixups --- packages/configuration/src/lib.rs | 2 +- packages/configuration/src/v2/mod.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 5e839b7b1..7f63b7f18 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -3,7 +3,7 @@ //! This module contains the configuration data structures for the //! Torrust Tracker, which is a `BitTorrent` tracker server. //! -//! The current version for configuration is [`v1`]. +//! The current version for configuration is [`v2`]. pub mod v2; use std::collections::HashMap; diff --git a/packages/configuration/src/v2/mod.rs b/packages/configuration/src/v2/mod.rs index 35c8b1070..5fa142b0b 100644 --- a/packages/configuration/src/v2/mod.rs +++ b/packages/configuration/src/v2/mod.rs @@ -39,11 +39,11 @@ //! Please refer to the documentation of each structure for more information //! about each section. //! -//! - [`Core configuration`](crate::v1::Configuration) -//! - [`HTTP API configuration`](crate::v1::tracker_api::HttpApi) -//! - [`HTTP Tracker configuration`](crate::v1::http_tracker::HttpTracker) -//! - [`UDP Tracker configuration`](crate::v1::udp_tracker::UdpTracker) -//! - [`Health Check API configuration`](crate::v1::health_check_api::HealthCheckApi) +//! - [`Core configuration`](crate::v2::Configuration) +//! - [`HTTP API configuration`](crate::v2::tracker_api::HttpApi) +//! - [`HTTP Tracker configuration`](crate::v2::http_tracker::HttpTracker) +//! - [`UDP Tracker configuration`](crate::v2::udp_tracker::UdpTracker) +//! - [`Health Check API configuration`](crate::v2::health_check_api::HealthCheckApi) //! //! ## Port binding //! @@ -78,7 +78,7 @@ //! //! Alternatively, you could setup a reverse proxy like Nginx or Apache to //! handle the SSL/TLS part and forward the requests to the tracker. If you do -//! that, you should set [`on_reverse_proxy`](crate::v1::core::Core::on_reverse_proxy) +//! that, you should set [`on_reverse_proxy`](crate::v2::network::Network::on_reverse_proxy) //! to `true` in the configuration file. It's out of scope for this //! documentation to explain in detail how to setup a reverse proxy, but the //! configuration file should be something like this: From 9791427541ffba649b6803797df12117536585db Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 13 Jul 2024 15:21:39 +0200 Subject: [PATCH 0946/1003] chore: update deps --- Cargo.lock | 174 +++++++------------------ Cargo.toml | 4 +- packages/torrent-repository/Cargo.toml | 6 +- 3 files changed, 53 insertions(+), 131 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e3c03fa69..889a6a5d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -64,6 +64,12 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "allocator-api2" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -534,10 +540,12 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.3.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" dependencies = [ + "autocfg", + "libm", "num-bigint", "num-integer", "num-traits", @@ -632,7 +640,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", - "proc-macro-crate 3.1.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 2.0.71", @@ -660,6 +668,15 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "btoi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd6407f73a9b8b6162d8a2ef999fe6afd7cc15902ebf42c5cd296addf17e0ad" +dependencies = [ + "num-traits", +] + [[package]] name = "bufstream" version = "0.1.4" @@ -1093,11 +1110,12 @@ dependencies = [ [[package]] name = "dashmap" -version = "5.5.3" +version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" dependencies = [ "cfg-if", + "crossbeam-utils", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -1596,15 +1614,6 @@ dependencies = [ "ahash 0.7.8", ] -[[package]] -name = "hashbrown" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash 0.8.11", -] - [[package]] name = "hashbrown" version = "0.14.5" @@ -1612,6 +1621,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash 0.8.11", + "allocator-api2", ] [[package]] @@ -1961,79 +1971,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" -[[package]] -name = "lexical" -version = "6.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7aefb36fd43fef7003334742cbf77b243fcd36418a1d1bdd480d613a67968f6" -dependencies = [ - "lexical-core", -] - -[[package]] -name = "lexical-core" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" -dependencies = [ - "lexical-parse-float", - "lexical-parse-integer", - "lexical-util", - "lexical-write-float", - "lexical-write-integer", -] - -[[package]] -name = "lexical-parse-float" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" -dependencies = [ - "lexical-parse-integer", - "lexical-util", - "static_assertions", -] - -[[package]] -name = "lexical-parse-integer" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" -dependencies = [ - "lexical-util", - "static_assertions", -] - -[[package]] -name = "lexical-util" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" -dependencies = [ - "static_assertions", -] - -[[package]] -name = "lexical-write-float" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" -dependencies = [ - "lexical-util", - "lexical-write-integer", - "static_assertions", -] - -[[package]] -name = "lexical-write-integer" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" -dependencies = [ - "lexical-util", - "static_assertions", -] - [[package]] name = "libc" version = "0.2.155" @@ -2050,6 +1987,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + [[package]] name = "libsqlite3-sys" version = "0.28.0" @@ -2117,11 +2060,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.10.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.5", ] [[package]] @@ -2206,9 +2149,9 @@ dependencies = [ [[package]] name = "mysql" -version = "24.0.0" +version = "25.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe2babc5f5b354eab9c0a0e40da3e69c4d77421c8b9b6ee03f97acc75bd7955" +checksum = "c6ad644efb545e459029b1ffa7c969d830975bd76906820913247620df10050b" dependencies = [ "bufstream", "bytes", @@ -2220,7 +2163,6 @@ dependencies = [ "mysql_common", "named_pipe", "native-tls", - "once_cell", "pem", "percent-encoding", "serde", @@ -2232,14 +2174,14 @@ dependencies = [ [[package]] name = "mysql-common-derive" -version = "0.30.2" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f" +checksum = "afe0450cc9344afff34915f8328600ab5ae19260802a334d0f72d2d5bdda3bfe" dependencies = [ "darling", "heck 0.4.1", "num-bigint", - "proc-macro-crate 1.3.1", + "proc-macro-crate", "proc-macro-error", "proc-macro2", "quote", @@ -2250,15 +2192,16 @@ dependencies = [ [[package]] name = "mysql_common" -version = "0.30.6" +version = "0.32.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" +checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", "bitflags 2.6.0", "bitvec", + "btoi", "byteorder", "bytes", "cc", @@ -2267,7 +2210,6 @@ dependencies = [ "flate2", "frunk", "lazy_static", - "lexical", "mysql-common-derive", "num-bigint", "num-traits", @@ -2284,6 +2226,7 @@ dependencies = [ "thiserror", "time", "uuid", + "zstd", ] [[package]] @@ -2532,11 +2475,11 @@ dependencies = [ [[package]] name = "pem" -version = "2.0.1" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "serde", ] @@ -2730,16 +2673,6 @@ dependencies = [ "termtree", ] -[[package]] -name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.15", -] - [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -2848,9 +2781,9 @@ dependencies = [ [[package]] name = "r2d2_mysql" -version = "24.0.0" +version = "25.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fe5127e6c21971cdb9580f2f54cbe6d9c2226eb861036c3ca6d390c25f52574" +checksum = "93963fe09ca35b0311d089439e944e42a6cb39bf8ea323782ddb31240ba2ae87" dependencies = [ "mysql", "r2d2", @@ -3092,7 +3025,7 @@ checksum = "4165dfae59a39dd41d8dec720d3cbfbc71f69744efb480a3920f5d4e0cc6798d" dependencies = [ "cfg-if", "glob", - "proc-macro-crate 3.1.0", + "proc-macro-crate", "proc-macro2", "quote", "regex", @@ -3898,17 +3831,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml_edit" -version = "0.19.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" -dependencies = [ - "indexmap 2.2.6", - "toml_datetime", - "winnow 0.5.40", -] - [[package]] name = "toml_edit" version = "0.21.1" diff --git a/Cargo.toml b/Cargo.toml index e54946be1..5e4401516 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,7 @@ camino = { version = "1", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0" -dashmap = "5" +dashmap = "6" derive_more = "0" figment = "0" futures = "0" @@ -55,7 +55,7 @@ parking_lot = "0" percent-encoding = "2" pin-project-lite = "0" r2d2 = "0" -r2d2_mysql = "24" +r2d2_mysql = "25" r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" regex = "1" diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 8b46a8abe..53bb41e52 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library that provides a repository of torrents files and their peers." -keywords = ["torrents", "repository", "library"] +keywords = ["library", "repository", "torrents"] name = "torrust-tracker-torrent-repository" readme = "README.md" @@ -17,7 +17,7 @@ version.workspace = true [dependencies] crossbeam-skiplist = "0" -dashmap = "5" +dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } @@ -26,9 +26,9 @@ torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = ".. torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } [dev-dependencies] +async-std = { version = "1", features = ["attributes", "tokio1"] } criterion = { version = "0", features = ["async_tokio"] } rstest = "0" -async-std = {version = "1", features = ["attributes", "tokio1"] } [[bench]] harness = false From 8cefad624d7550717a7f23858741aaa61f999436 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 15 Jul 2024 09:26:28 +0100 Subject: [PATCH 0947/1003] fix: [#933] uppercase for containerfile keywords --- Containerfile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Containerfile b/Containerfile index d302e5c66..263053390 100644 --- a/Containerfile +++ b/Containerfile @@ -3,13 +3,13 @@ # Torrust Tracker ## Builder Image -FROM docker.io/library/rust:bookworm as chef +FROM docker.io/library/rust:bookworm AS chef WORKDIR /tmp RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash RUN cargo binstall --no-confirm cargo-chef cargo-nextest ## Tester Image -FROM docker.io/library/rust:slim-bookworm as tester +FROM docker.io/library/rust:slim-bookworm AS tester WORKDIR /tmp RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean @@ -21,7 +21,7 @@ RUN mkdir -p /app/share/torrust/default/database/; \ sqlite3 /app/share/torrust/default/database/tracker.sqlite3.db "VACUUM;" ## Su Exe Compile -FROM docker.io/library/gcc:bookworm as gcc +FROM docker.io/library/gcc:bookworm AS gcc COPY ./contrib/dev-tools/su-exec/ /usr/local/src/su-exec/ RUN cc -Wall -Werror -g /usr/local/src/su-exec/su-exec.c -o /usr/local/bin/su-exec; chmod +x /usr/local/bin/su-exec @@ -62,7 +62,7 @@ RUN cargo nextest archive --tests --benches --examples --workspace --all-targets # Extract and Test (debug) -FROM tester as test_debug +FROM tester AS test_debug WORKDIR /test COPY . /test/src/ COPY --from=build_debug \ @@ -76,7 +76,7 @@ RUN mkdir /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-tracker | grep "lib RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin # Extract and Test (release) -FROM tester as test +FROM tester AS test WORKDIR /test COPY . /test/src COPY --from=build \ @@ -91,7 +91,7 @@ RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin ## Runtime -FROM gcr.io/distroless/cc-debian12:debug as runtime +FROM gcr.io/distroless/cc-debian12:debug AS runtime RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec @@ -129,14 +129,14 @@ ENTRYPOINT ["/usr/local/bin/entry.sh"] ## Torrust-Tracker (debug) -FROM runtime as debug +FROM runtime AS debug ENV RUNTIME="debug" COPY --from=test_debug /app/ /usr/ RUN env CMD ["sh"] ## Torrust-Tracker (release) (default) -FROM runtime as release +FROM runtime AS release ENV RUNTIME="release" COPY --from=test /app/ /usr/ HEALTHCHECK --interval=5s --timeout=5s --start-period=3s --retries=3 \ From 82a8b4355330db1f0f8b601b4da31b489b2cd5a6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jul 2024 10:00:27 +0100 Subject: [PATCH 0948/1003] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 47 packages to latest compatible versions Updating anstream v0.6.14 -> v0.6.15 Updating anstyle v1.0.7 -> v1.0.8 Updating anstyle-parse v0.2.4 -> v0.2.5 Updating anstyle-query v1.1.0 -> v1.1.1 Updating anstyle-wincon v3.0.3 -> v3.0.4 Updating async-compression v0.4.11 -> v0.4.12 Updating async-executor v1.12.0 -> v1.13.0 Updating cc v1.1.2 -> v1.1.6 Updating clap v4.5.9 -> v4.5.11 Updating clap_builder v4.5.9 -> v4.5.11 Updating clap_derive v4.5.8 -> v4.5.11 Updating clap_lex v0.7.1 -> v0.7.2 Updating colorchoice v1.0.1 -> v1.0.2 Updating is_terminal_polyfill v1.70.0 -> v1.70.1 Updating jobserver v0.1.31 -> v0.1.32 Updating libloading v0.8.4 -> v0.8.5 Updating libsqlite3-sys v0.28.0 -> v0.30.1 Updating mio v0.8.11 -> v1.0.1 Updating mockall v0.12.1 -> v0.13.0 Updating mockall_derive v0.12.1 -> v0.13.0 Removing num_cpus v1.16.0 Updating object v0.36.1 -> v0.36.2 Updating openssl v0.10.64 -> v0.10.66 Updating openssl-sys v0.9.102 -> v0.9.103 Updating predicates v3.1.0 -> v3.1.2 Updating predicates-core v1.0.6 -> v1.0.8 Updating predicates-tree v1.0.9 -> v1.0.11 Updating r2d2_sqlite v0.24.0 -> v0.25.0 Updating redox_syscall v0.5.2 -> v0.5.3 Updating rusqlite v0.31.0 -> v0.32.1 Updating rustls v0.23.11 -> v0.23.12 Updating rustls-webpki v0.102.5 -> v0.102.6 Updating security-framework v2.11.0 -> v2.11.1 Updating security-framework-sys v2.11.0 -> v2.11.1 Updating serde_json v1.0.120 -> v1.0.121 Updating serde_spanned v0.6.6 -> v0.6.7 Updating serde_with v3.8.3 -> v3.9.0 Updating serde_with_macros v3.8.3 -> v3.9.0 Updating syn v2.0.71 -> v2.0.72 Updating thiserror v1.0.62 -> v1.0.63 Updating thiserror-impl v1.0.62 -> v1.0.63 Updating tokio v1.38.0 -> v1.39.2 Updating tokio-macros v2.3.0 -> v2.4.0 Updating toml v0.8.14 -> v0.8.16 Updating toml_datetime v0.6.6 -> v0.6.7 Updating toml_edit v0.22.15 -> v0.22.17 Updating version_check v0.9.4 -> v0.9.5 Updating winnow v0.6.13 -> v0.6.16 ``` --- Cargo.lock | 277 +++++++++++++++++++++++++---------------------------- 1 file changed, 133 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 889a6a5d7..e4f7a938e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,9 +93,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -108,33 +108,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +checksum = "fec134f64e2bc57411226dfc4e52dec859ddfc7e711fc5e07b612584f000e4aa" dependencies = [ "brotli", "flate2", @@ -235,9 +235,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8828ec6e544c02b0d6691d21ed9f9218d0384a82542855073c2a3f58304aaf0" +checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" dependencies = [ "async-task", "concurrent-queue", @@ -362,7 +362,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -485,7 +485,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -574,7 +574,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -643,7 +643,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", "syn_derive", ] @@ -755,13 +755,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.2" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47de7e88bbbd467951ae7f5a6f34f70d1b4d9cfce53d5fd70f74ebe118b3db56" +checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" dependencies = [ "jobserver", "libc", - "once_cell", ] [[package]] @@ -838,9 +837,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.9" +version = "4.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64acc1846d54c1fe936a78dc189c34e28d3f5afc348403f28ecf53660b9b8462" +checksum = "35723e6a11662c2afb578bcf0b88bf6ea8e21282a953428f240574fcc3a2b5b3" dependencies = [ "clap_builder", "clap_derive", @@ -848,9 +847,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.9" +version = "4.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8393d67ba2e7bfaf28a23458e4e2b543cc73a99595511eb207fdb8aede942" +checksum = "49eb96cbfa7cfa35017b7cd548c75b14c3118c98b423041d70562665e07fb0fa" dependencies = [ "anstream", "anstyle", @@ -860,21 +859,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.8" +version = "4.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085" +checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] name = "clap_lex" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cmake" @@ -887,9 +886,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "compact_str" @@ -1094,7 +1093,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -1105,7 +1104,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -1142,7 +1141,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -1153,7 +1152,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -1375,7 +1374,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -1387,7 +1386,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -1399,7 +1398,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -1492,7 +1491,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -1746,7 +1745,7 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls 0.23.11", + "rustls 0.23.12", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -1904,9 +1903,9 @@ dependencies = [ [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -1934,9 +1933,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] @@ -1979,9 +1978,9 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -1995,9 +1994,9 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libsqlite3-sys" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -2102,25 +2101,25 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" dependencies = [ + "hermit-abi 0.3.9", "libc", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "mockall" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" dependencies = [ "cfg-if", "downcast", "fragile", - "lazy_static", "mockall_derive", "predicates", "predicates-tree", @@ -2128,14 +2127,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -2185,7 +2184,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", "termcolor", "thiserror", ] @@ -2340,21 +2339,11 @@ dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.9", - "libc", -] - [[package]] name = "object" -version = "0.36.1" +version = "0.36.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" +checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" dependencies = [ "memchr", ] @@ -2373,9 +2362,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "openssl" -version = "0.10.64" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -2394,7 +2383,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -2405,9 +2394,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.102" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", @@ -2470,7 +2459,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -2544,7 +2533,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -2649,9 +2638,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "3.1.0" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b87bfd4605926cdfefc1c3b5f8fe560e3feca9d5552cf68c466d3d8236c7e8" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" dependencies = [ "anstyle", "predicates-core", @@ -2659,15 +2648,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" [[package]] name = "predicates-tree" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" dependencies = [ "predicates-core", "termtree", @@ -2723,7 +2712,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", "version_check", "yansi", ] @@ -2791,9 +2780,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a982edf65c129796dba72f8775b292ef482b40d035e827a9825b3bc07ccc5f2" +checksum = "eb14dba8247a6a15b7fdbc7d389e2e6f03ee9f184f87117706d509c092dfe846" dependencies = [ "r2d2", "rusqlite", @@ -2858,9 +2847,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ "bitflags 2.6.0", ] @@ -3031,15 +3020,15 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.71", + "syn 2.0.72", "unicode-ident", ] [[package]] name = "rusqlite" -version = "0.31.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" +checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" dependencies = [ "bitflags 2.6.0", "fallible-iterator", @@ -3127,13 +3116,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.11" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4828ea528154ae444e5a642dbb7d5623354030dc9822b83fd9bb79683c7399d0" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ "once_cell", "rustls-pki-types", - "rustls-webpki 0.102.5", + "rustls-webpki 0.102.6", "subtle", "zeroize", ] @@ -3166,9 +3155,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.5" +version = "0.102.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a6fccd794a42c2c105b513a2f62bc3fd8f3ba57a4593677ceb0bd035164d78" +checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" dependencies = [ "ring", "rustls-pki-types", @@ -3244,9 +3233,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -3257,9 +3246,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" dependencies = [ "core-foundation-sys", "libc", @@ -3307,7 +3296,7 @@ checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -3325,12 +3314,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" +checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" dependencies = [ "indexmap 2.2.6", "itoa", + "memchr", "ryu", "serde", ] @@ -3353,14 +3343,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] name = "serde_spanned" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ "serde", ] @@ -3379,9 +3369,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.3" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73139bc5ec2d45e6c5fd85be5a46949c1c39a4c18e56915f5eb4c12f975e377" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ "base64 0.22.1", "chrono", @@ -3397,14 +3387,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.3" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b80d3d6b56b64335c0180e5ffde23b3c5e08c14c585b51a15bd0e95393f46703" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -3547,9 +3537,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.71" +version = "2.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b146dcf730474b4bcd16c311627b31ede9ab149045db4d6088b3becaea046462" +checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" dependencies = [ "proc-macro2", "quote", @@ -3565,7 +3555,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -3653,22 +3643,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.62" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.62" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -3739,31 +3729,30 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "pin-project-lite", "signal-hook-registry", "socket2 0.5.7", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -3792,7 +3781,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.11", + "rustls 0.23.12", "rustls-pki-types", "tokio", ] @@ -3812,21 +3801,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.14" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" +checksum = "81967dd0dd2c1ab0bc3468bd7caecc32b8a4aa47d0c8c695d8c2b2108168d62c" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.15", + "toml_edit 0.22.17", ] [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "f8fb9f64314842840f1d940ac544da178732128f1c78c21772e876579e0da1db" dependencies = [ "serde", ] @@ -3844,15 +3833,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.15" +version = "0.22.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" +checksum = "8d9f8729f5aea9562aac1cc0441f5d6de3cff1ee0c5d67293eeca5eb36ee7c16" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.13", + "winnow 0.6.16", ] [[package]] @@ -4076,7 +4065,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] @@ -4234,9 +4223,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "waker-fn" @@ -4290,7 +4279,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", "wasm-bindgen-shared", ] @@ -4324,7 +4313,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4535,9 +4524,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.13" +version = "0.6.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +checksum = "b480ae9340fc261e6be3e95a1ba86d54ae3f9171132a73ce8d4bbaf68339507c" dependencies = [ "memchr", ] @@ -4585,7 +4574,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.72", ] [[package]] From 09beb52d8fe8d6833f881bc6d2365268564e84d8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 29 Jul 2024 16:56:48 +0100 Subject: [PATCH 0949/1003] feat: [#974] new API endpoint to upload pre-existing keys You can test it with: ```console curl -X POST http://localhost:1212/api/v1/keys?token=MyAccessToken \ -H "Content-Type: application/json" \ -d '{ "key": "Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z7", "seconds_valid": 7200 }' ``` The `key` field is optional. If it's not provided a random key will be generated. --- src/core/auth.rs | 2 +- src/core/mod.rs | 38 +++++++++++-- src/servers/apis/v1/context/auth_key/forms.rs | 8 +++ .../apis/v1/context/auth_key/handlers.rs | 53 ++++++++++++++++++- src/servers/apis/v1/context/auth_key/mod.rs | 1 + .../apis/v1/context/auth_key/responses.rs | 21 +++++++- .../apis/v1/context/auth_key/routes.rs | 8 ++- src/servers/apis/v1/responses.rs | 3 +- 8 files changed, 122 insertions(+), 12 deletions(-) create mode 100644 src/servers/apis/v1/context/auth_key/forms.rs diff --git a/src/core/auth.rs b/src/core/auth.rs index 94d455d7e..00ded71ef 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -152,7 +152,7 @@ pub struct Key(String); /// ``` /// /// If the string does not contains a valid key, the parser function will return this error. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Display)] pub struct ParseKeyError; impl FromStr for Key { diff --git a/src/core/mod.rs b/src/core/mod.rs index 64d5e2c9a..f0853ec27 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -453,6 +453,7 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; +use auth::ExpiringKey; use databases::driver::Driver; use derive_more::Constructor; use tokio::sync::mpsc::error::SendError; @@ -460,9 +461,9 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v2::database; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; use tracing::debug; @@ -804,6 +805,37 @@ impl Tracker { /// Will return a `database::Error` if unable to add the `auth_key` to the database. pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); + + self.database.add_key_to_keys(&auth_key)?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + /// It adds a pre-generated authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn add_auth_key( + &self, + key: Key, + valid_until: DurationSinceUnixEpoch, + ) -> Result { + let auth_key = ExpiringKey { key, valid_until }; + + // code-review: should we return a friendly error instead of the DB + // constrain error when the key already exist? For now, it's returning + // the specif error for each DB driver when a UNIQUE constrain fails. self.database.add_key_to_keys(&auth_key)?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) @@ -816,10 +848,6 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. - /// - /// # Panics - /// - /// Will panic if key cannot be converted into a valid `Key`. pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key)?; self.keys.write().await.remove(key); diff --git a/src/servers/apis/v1/context/auth_key/forms.rs b/src/servers/apis/v1/context/auth_key/forms.rs new file mode 100644 index 000000000..9c023ab72 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/forms.rs @@ -0,0 +1,8 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct AddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: u64, +} diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index 792d9507e..3f85089ec 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -3,17 +3,66 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use axum::extract::{Path, State}; +use axum::extract::{self, Path, State}; use axum::response::Response; use serde::Deserialize; +use torrust_tracker_clock::clock::Time; +use super::forms::AddKeyForm; use super::responses::{ - auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, + auth_key_response, failed_to_add_key_response, failed_to_delete_key_response, failed_to_generate_key_response, + failed_to_reload_keys_response, invalid_auth_key_duration_response, invalid_auth_key_response, }; use crate::core::auth::Key; use crate::core::Tracker; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; +use crate::CurrentClock; + +/// It handles the request to add a new authentication key. +/// +/// It returns these types of responses: +/// +/// - `200` with a json [`AuthKey`] +/// resource. If the key was generated successfully. +/// - `400` with an error if the key couldn't been added because of an invalid +/// request. +/// - `500` with serialized error in debug format. If the key couldn't be +/// generated. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) +/// for more information about this endpoint. +pub async fn add_auth_key_handler( + State(tracker): State>, + extract::Json(add_key_form): extract::Json, +) -> Response { + match add_key_form.opt_key { + Some(pre_existing_key) => { + let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(add_key_form.seconds_valid)) else { + return invalid_auth_key_duration_response(add_key_form.seconds_valid); + }; + + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match tracker.add_auth_key(key, valid_until).await { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(e) => failed_to_add_key_response(e), + }, + Err(e) => invalid_auth_key_response(&pre_existing_key, &e), + } + } + None => { + match tracker + .generate_auth_key(Duration::from_secs(add_key_form.seconds_valid)) + .await + { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(e) => failed_to_generate_key_response(e), + } + } + } +} /// It handles the request to generate a new authentication key. /// diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs index 330249b58..b00d7a2cb 100644 --- a/src/servers/apis/v1/context/auth_key/mod.rs +++ b/src/servers/apis/v1/context/auth_key/mod.rs @@ -119,6 +119,7 @@ //! "status": "ok" //! } //! ``` +pub mod forms; pub mod handlers; pub mod resources; pub mod responses; diff --git a/src/servers/apis/v1/context/auth_key/responses.rs b/src/servers/apis/v1/context/auth_key/responses.rs index 51be162c5..dfe449b46 100644 --- a/src/servers/apis/v1/context/auth_key/responses.rs +++ b/src/servers/apis/v1/context/auth_key/responses.rs @@ -4,8 +4,9 @@ use std::error::Error; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; +use crate::core::auth::ParseKeyError; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; -use crate::servers::apis::v1::responses::unhandled_rejection_response; +use crate::servers::apis::v1::responses::{bad_request_response, unhandled_rejection_response}; /// `200` response that contains the `AuthKey` resource as json. /// @@ -22,12 +23,20 @@ pub fn auth_key_response(auth_key: &AuthKey) -> Response { .into_response() } +// Error responses + /// `500` error response when a new authentication key cannot be generated. #[must_use] pub fn failed_to_generate_key_response(e: E) -> Response { unhandled_rejection_response(format!("failed to generate key: {e}")) } +/// `500` error response when the provide key cannot be added. +#[must_use] +pub fn failed_to_add_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to add key: {e}")) +} + /// `500` error response when an authentication key cannot be deleted. #[must_use] pub fn failed_to_delete_key_response(e: E) -> Response { @@ -40,3 +49,13 @@ pub fn failed_to_delete_key_response(e: E) -> Response { pub fn failed_to_reload_keys_response(e: E) -> Response { unhandled_rejection_response(format!("failed to reload keys: {e}")) } + +#[must_use] +pub fn invalid_auth_key_response(auth_key: &str, error: &ParseKeyError) -> Response { + bad_request_response(&format!("Invalid URL: invalid auth key: string \"{auth_key}\", {error}")) +} + +#[must_use] +pub fn invalid_auth_key_duration_response(duration: u64) -> Response { + bad_request_response(&format!("Invalid URL: invalid auth key duration: \"{duration}\"")) +} diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index 003ee5af4..9452f2c0f 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use axum::routing::{get, post}; use axum::Router; -use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; +use super::handlers::{add_auth_key_handler, delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; use crate::core::Tracker; /// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. @@ -30,5 +30,9 @@ pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { .with_state(tracker.clone()), ) // Keys command - .route(&format!("{prefix}/keys/reload"), get(reload_keys_handler).with_state(tracker)) + .route( + &format!("{prefix}/keys/reload"), + get(reload_keys_handler).with_state(tracker.clone()), + ) + .route(&format!("{prefix}/keys"), post(add_auth_key_handler).with_state(tracker)) } diff --git a/src/servers/apis/v1/responses.rs b/src/servers/apis/v1/responses.rs index ecaf90098..d2c52ac40 100644 --- a/src/servers/apis/v1/responses.rs +++ b/src/servers/apis/v1/responses.rs @@ -61,7 +61,8 @@ pub fn invalid_auth_key_param_response(invalid_key: &str) -> Response { bad_request_response(&format!("Invalid auth key id param \"{invalid_key}\"")) } -fn bad_request_response(body: &str) -> Response { +#[must_use] +pub fn bad_request_response(body: &str) -> Response { ( StatusCode::BAD_REQUEST, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], From 583b305ed32a310f5897be7de943df8e4975e751 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jul 2024 10:55:34 +0100 Subject: [PATCH 0950/1003] test: [#874] new key generation endpoint --- tests/servers/api/v1/asserts.rs | 35 ++- tests/servers/api/v1/client.rs | 28 +- .../api/v1/contract/context/auth_key.rs | 240 +++++++++++++++--- 3 files changed, 267 insertions(+), 36 deletions(-) diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index 955293db1..ba906f65f 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -61,6 +61,12 @@ pub async fn assert_bad_request(response: Response, body: &str) { assert_eq!(response.text().await.unwrap(), body); } +pub async fn assert_unprocessable_content(response: Response, text: &str) { + assert_eq!(response.status(), 422); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert!(response.text().await.unwrap().contains(text)); +} + pub async fn assert_not_found(response: Response) { assert_eq!(response.status(), 404); // todo: missing header in the response @@ -82,10 +88,37 @@ pub async fn assert_invalid_infohash_param(response: Response, invalid_infohash: .await; } -pub async fn assert_invalid_auth_key_param(response: Response, invalid_auth_key: &str) { +pub async fn assert_invalid_auth_key_get_param(response: Response, invalid_auth_key: &str) { assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key)).await; } +pub async fn assert_invalid_auth_key_post_param(response: Response, invalid_auth_key: &str) { + assert_bad_request( + response, + &format!( + "Invalid URL: invalid auth key: string \"{}\", ParseKeyError", + &invalid_auth_key + ), + ) + .await; +} + +pub async fn _assert_unprocessable_auth_key_param(response: Response, _invalid_value: &str) { + assert_unprocessable_content( + response, + "Failed to deserialize the JSON body into the target type: seconds_valid: invalid type", + ) + .await; +} + +pub async fn assert_unprocessable_auth_key_duration_param(response: Response, _invalid_value: &str) { + assert_unprocessable_content( + response, + "Failed to deserialize the JSON body into the target type: seconds_valid: invalid type", + ) + .await; +} + pub async fn assert_invalid_key_duration_param(response: Response, invalid_key_duration: &str) { assert_bad_request( response, diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs index 61e98e742..91f18acac 100644 --- a/tests/servers/api/v1/client.rs +++ b/tests/servers/api/v1/client.rs @@ -1,4 +1,5 @@ use reqwest::Response; +use serde::Serialize; use crate::common::http::{Query, QueryParam, ReqwestQuery}; use crate::servers::api::connection_info::ConnectionInfo; @@ -18,7 +19,11 @@ impl Client { } pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { - self.post(&format!("key/{}", &seconds_valid)).await + self.post_empty(&format!("key/{}", &seconds_valid)).await + } + + pub async fn add_auth_key(&self, add_key_form: AddKeyForm) -> Response { + self.post_form("keys", &add_key_form).await } pub async fn delete_auth_key(&self, key: &str) -> Response { @@ -30,7 +35,7 @@ impl Client { } pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - self.post(&format!("whitelist/{}", &info_hash)).await + self.post_empty(&format!("whitelist/{}", &info_hash)).await } pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { @@ -63,10 +68,20 @@ impl Client { self.get_request_with_query(path, query).await } - pub async fn post(&self, path: &str) -> Response { + pub async fn post_empty(&self, path: &str) -> Response { + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + pub async fn post_form(&self, path: &str, form: &T) -> Response { reqwest::Client::new() .post(self.base_url(path).clone()) .query(&ReqwestQuery::from(self.query_with_token())) + .json(&form) .send() .await .unwrap() @@ -114,3 +129,10 @@ pub async fn get(path: &str, query: Option) -> Response { None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), } } + +#[derive(Serialize, Debug)] +pub struct AddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: u64, +} diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index f9630bafe..f02267b8b 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -1,23 +1,28 @@ use std::time::Duration; +use serde::Serialize; use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, + assert_invalid_auth_key_get_param, assert_invalid_auth_key_post_param, assert_ok, assert_token_not_valid, + assert_unauthorized, assert_unprocessable_auth_key_duration_param, }; -use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::client::{AddKeyForm, Client}; use crate::servers::api::{force_database_error, Started}; #[tokio::test] -async fn should_allow_generating_a_new_auth_key() { +async fn should_allow_generating_a_new_random_auth_key() { let env = Started::new(&configuration::ephemeral().into()).await; - let seconds_valid = 60; - - let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: 60, + }) + .await; let auth_key_resource = assert_auth_key_utf8(response).await; @@ -32,43 +37,49 @@ async fn should_allow_generating_a_new_auth_key() { } #[tokio::test] -async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { +async fn should_allow_uploading_a_preexisting_auth_key() { let env = Started::new(&configuration::ephemeral().into()).await; - let seconds_valid = 60; - - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .generate_auth_key(seconds_valid) + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: Some("Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z5".to_string()), + seconds_valid: 60, + }) .await; - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .generate_auth_key(seconds_valid) - .await; + let auth_key_resource = assert_auth_key_utf8(response).await; - assert_unauthorized(response).await; + // Verify the key with the tracker + assert!(env + .tracker + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); env.stop().await; } #[tokio::test] -async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { +async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { let env = Started::new(&configuration::ephemeral().into()).await; - let invalid_key_durations = [ - // "", it returns 404 - // " ", it returns 404 - "-1", "text", - ]; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: 60, + }) + .await; - for invalid_key_duration in invalid_key_durations { - let response = Client::new(env.get_connection_info()) - .post(&format!("key/{invalid_key_duration}")) - .await; + assert_token_not_valid(response).await; - assert_invalid_key_duration_param(response, invalid_key_duration).await; - } + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: 60, + }) + .await; + + assert_unauthorized(response).await; env.stop().await; } @@ -79,8 +90,12 @@ async fn should_fail_when_the_auth_key_cannot_be_generated() { force_database_error(&env.tracker); - let seconds_valid = 60; - let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: 60, + }) + .await; assert_failed_to_generate_key(response).await; @@ -107,6 +122,77 @@ async fn should_allow_deleting_an_auth_key() { env.stop().await; } +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid() { + #[derive(Serialize, Debug)] + pub struct InvalidAddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: u64, + } + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_keys = [ + // "", it returns 404 + // " ", it returns 404 + "-1", // Not a string + "invalid", // Invalid string + "GQEs2ZNcCm9cwEV9dBpcPB5OwNFWFiR", // Not a 32-char string + // "%QEs2ZNcCm9cwEV9dBpcPB5OwNFWFiRd", // Invalid char. todo: this doesn't fail + ]; + + for invalid_key in invalid_keys { + let response = Client::new(env.get_connection_info()) + .post_form( + "keys", + &InvalidAddKeyForm { + opt_key: Some(invalid_key.to_string()), + seconds_valid: 60, + }, + ) + .await; + + assert_invalid_auth_key_post_param(response, invalid_key).await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + #[derive(Serialize, Debug)] + pub struct InvalidAddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: String, + } + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(env.get_connection_info()) + .post_form( + "keys", + &InvalidAddKeyForm { + opt_key: None, + seconds_valid: invalid_key_duration.to_string(), + }, + ) + .await; + + assert_unprocessable_auth_key_duration_param(response, invalid_key_duration).await; + } + + env.stop().await; +} + #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { let env = Started::new(&configuration::ephemeral().into()).await; @@ -124,7 +210,7 @@ async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { for invalid_auth_key in &invalid_auth_keys { let response = Client::new(env.get_connection_info()).delete_auth_key(invalid_auth_key).await; - assert_invalid_auth_key_param(response, invalid_auth_key).await; + assert_invalid_auth_key_get_param(response, invalid_auth_key).await; } env.stop().await; @@ -247,3 +333,93 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { env.stop().await; } + +mod deprecated_generate_key_endpoint { + + use torrust_tracker::core::auth::Key; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::servers::api::v1::asserts::{ + assert_auth_key_utf8, assert_failed_to_generate_key, assert_invalid_key_duration_param, assert_token_not_valid, + assert_unauthorized, + }; + use crate::servers::api::v1::client::Client; + use crate::servers::api::{force_database_error, Started}; + + #[tokio::test] + async fn should_allow_generating_a_new_auth_key() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + + let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + + let auth_key_resource = assert_auth_key_utf8(response).await; + + // Verify the key with the tracker + assert!(env + .tracker + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); + + env.stop().await; + } + + #[tokio::test] + async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(env.get_connection_info()) + .post_empty(&format!("key/{invalid_key_duration}")) + .await; + + assert_invalid_key_duration_param(response, invalid_key_duration).await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_auth_key_cannot_be_generated() { + let env = Started::new(&configuration::ephemeral().into()).await; + + force_database_error(&env.tracker); + + let seconds_valid = 60; + let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + + assert_failed_to_generate_key(response).await; + + env.stop().await; + } +} From 04f50e454e6e6c0c047a798af410ff3b19ad228d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jul 2024 11:22:30 +0100 Subject: [PATCH 0951/1003] docs: [#974] update add key endpoint doc --- .../apis/v1/context/auth_key/handlers.rs | 2 ++ src/servers/apis/v1/context/auth_key/mod.rs | 21 +++++++++++++------ .../apis/v1/context/auth_key/routes.rs | 8 +++++-- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index 3f85089ec..6d2d99150 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -75,6 +75,8 @@ pub async fn add_auth_key_handler( /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) /// for more information about this endpoint. +/// +/// This endpoint has been deprecated. Use [`add_auth_key_handler`]. pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { let seconds_valid = seconds_valid_or_key; match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs index b00d7a2cb..f6762b26e 100644 --- a/src/servers/apis/v1/context/auth_key/mod.rs +++ b/src/servers/apis/v1/context/auth_key/mod.rs @@ -3,8 +3,8 @@ //! Authentication keys are used to authenticate HTTP tracker `announce` and //! `scrape` requests. //! -//! When the tracker is running in `private` or `private_listed` mode, the -//! authentication keys are required to announce and scrape torrents. +//! When the tracker is running in `private` mode, the authentication keys are +//! required to announce and scrape torrents. //! //! A sample `announce` request **without** authentication key: //! @@ -22,22 +22,31 @@ //! //! # Generate a new authentication key //! -//! `POST /key/:seconds_valid` +//! `POST /keys` //! -//! It generates a new authentication key. +//! It generates a new authentication key or upload a pre-generated key. //! //! > **NOTICE**: keys expire after a certain amount of time. //! -//! **Path parameters** +//! **POST parameters** //! //! Name | Type | Description | Required | Example //! ---|---|---|---|--- +//! `key` | 32-char string (0-9, a-z, A-Z) | The optional pre-generated key. | No | `Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z7` //! `seconds_valid` | positive integer | The number of seconds the key will be valid. | Yes | `3600` //! +//! > **NOTICE**: the `key` field is optional. If is not provided the tracker +//! > will generated a random one. +//! //! **Example request** //! //! ```bash -//! curl -X POST "http://127.0.0.1:1212/api/v1/key/120?token=MyAccessToken" +//! curl -X POST http://localhost:1212/api/v1/keys?token=MyAccessToken \ +//! -H "Content-Type: application/json" \ +//! -d '{ +//! "key": "xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6", +//! "seconds_valid": 7200 +//! }' //! ``` //! //! **Example response** `200` diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index 9452f2c0f..60ccd77ab 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -21,8 +21,12 @@ pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { .route( // code-review: Axum does not allow two routes with the same path but different path variable name. // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: - // POST /key - // DELETE /key/:key + // + // POST /keys + // DELETE /keys/:key + // + // The POST /key/:seconds_valid has been deprecated and it will removed in the future. + // Use POST /keys &format!("{prefix}/key/:seconds_valid_or_key"), post(generate_auth_key_handler) .with_state(tracker.clone()) From 8d41d1885d79580d598f4c88ad31379ffcea32a8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jul 2024 13:10:02 +0100 Subject: [PATCH 0952/1003] fix: [#976] do not allow invalid tracker keys --- src/core/auth.rs | 51 ++++++++++++++++--- .../api/v1/contract/context/auth_key.rs | 8 +-- 2 files changed, 48 insertions(+), 11 deletions(-) diff --git a/src/core/auth.rs b/src/core/auth.rs index 00ded71ef..f041c8f2b 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -131,13 +131,37 @@ impl ExpiringKey { } } -/// A randomly generated token used for authentication. +/// A token used for authentication. /// -/// It contains lower and uppercase letters and numbers. -/// It's a 32-char string. +/// - It contains only ascii alphanumeric chars: lower and uppercase letters and +/// numbers. +/// - It's a 32-char string. #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] pub struct Key(String); +impl Key { + /// # Errors + /// + /// Will return an error is the string represents an invalid key. + /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. + pub fn new(value: &str) -> Result { + if value.len() != AUTH_KEY_LENGTH { + return Err(ParseKeyError); + } + + if !value.chars().all(|c| c.is_ascii_alphanumeric()) { + return Err(ParseKeyError); + } + + Ok(Self(value.to_owned())) + } + + #[must_use] + pub fn value(&self) -> &str { + &self.0 + } +} + /// Error returned when a key cannot be parsed from a string. /// /// ```rust,no_run @@ -159,10 +183,7 @@ impl FromStr for Key { type Err = ParseKeyError; fn from_str(s: &str) -> Result { - if s.len() != AUTH_KEY_LENGTH { - return Err(ParseKeyError); - } - + Key::new(s)?; Ok(Self(s.to_string())) } } @@ -209,6 +230,22 @@ mod tests { assert!(key.is_ok()); assert_eq!(key.unwrap().to_string(), key_string); } + + #[test] + fn length_should_be_32() { + let key = Key::new(""); + assert!(key.is_err()); + + let string_longer_than_32 = "012345678901234567890123456789012"; // DevSkim: ignore DS173237 + let key = Key::new(string_longer_than_32); + assert!(key.is_err()); + } + + #[test] + fn should_only_include_alphanumeric_chars() { + let key = Key::new("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); + assert!(key.is_err()); + } } mod expiring_auth_key { diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index f02267b8b..3130503d9 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -136,10 +136,10 @@ async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid( let invalid_keys = [ // "", it returns 404 // " ", it returns 404 - "-1", // Not a string - "invalid", // Invalid string - "GQEs2ZNcCm9cwEV9dBpcPB5OwNFWFiR", // Not a 32-char string - // "%QEs2ZNcCm9cwEV9dBpcPB5OwNFWFiRd", // Invalid char. todo: this doesn't fail + "-1", // Not a string + "invalid", // Invalid string + "GQEs2ZNcCm9cwEV9dBpcPB5OwNFWFiR", // Not a 32-char string + "%QEs2ZNcCm9cwEV9dBpcPB5OwNFWFiRd", // Invalid char. ]; for invalid_key in invalid_keys { From e81914b9dd1da75196f0de0a529262c16e4d1b2b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 30 Jul 2024 17:07:20 +0100 Subject: [PATCH 0953/1003] refactor: [#976] concrete errors for parsing keys --- src/core/auth.rs | 20 +++++++++++++------- tests/servers/api/v1/asserts.rs | 21 ++++++++------------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/core/auth.rs b/src/core/auth.rs index f041c8f2b..783faa0da 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -146,11 +146,11 @@ impl Key { /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. pub fn new(value: &str) -> Result { if value.len() != AUTH_KEY_LENGTH { - return Err(ParseKeyError); + return Err(ParseKeyError::InvalidKeyLength); } if !value.chars().all(|c| c.is_ascii_alphanumeric()) { - return Err(ParseKeyError); + return Err(ParseKeyError::InvalidChars); } Ok(Self(value.to_owned())) @@ -175,9 +175,15 @@ impl Key { /// assert_eq!(key.unwrap().to_string(), key_string); /// ``` /// -/// If the string does not contains a valid key, the parser function will return this error. -#[derive(Debug, PartialEq, Eq, Display)] -pub struct ParseKeyError; +/// If the string does not contains a valid key, the parser function will return +/// this error. +#[derive(Debug, Error)] +pub enum ParseKeyError { + #[error("Invalid key length. Key must be have 32 chars")] + InvalidKeyLength, + #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] + InvalidChars, +} impl FromStr for Key { type Err = ParseKeyError; @@ -188,8 +194,8 @@ impl FromStr for Key { } } -/// Verification error. Error returned when an [`ExpiringKey`] cannot be verified with the [`verify(...)`](crate::core::auth::verify) function. -/// +/// Verification error. Error returned when an [`ExpiringKey`] cannot be +/// verified with the [`verify(...)`](crate::core::auth::verify) function. #[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index ba906f65f..aeecfa170 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -61,6 +61,12 @@ pub async fn assert_bad_request(response: Response, body: &str) { assert_eq!(response.text().await.unwrap(), body); } +pub async fn assert_bad_request_with_text(response: Response, text: &str) { + assert_eq!(response.status(), 400); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert!(response.text().await.unwrap().contains(text)); +} + pub async fn assert_unprocessable_content(response: Response, text: &str) { assert_eq!(response.status(), 422); assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); @@ -93,20 +99,9 @@ pub async fn assert_invalid_auth_key_get_param(response: Response, invalid_auth_ } pub async fn assert_invalid_auth_key_post_param(response: Response, invalid_auth_key: &str) { - assert_bad_request( + assert_bad_request_with_text( response, - &format!( - "Invalid URL: invalid auth key: string \"{}\", ParseKeyError", - &invalid_auth_key - ), - ) - .await; -} - -pub async fn _assert_unprocessable_auth_key_param(response: Response, _invalid_value: &str) { - assert_unprocessable_content( - response, - "Failed to deserialize the JSON body into the target type: seconds_valid: invalid type", + &format!("Invalid URL: invalid auth key: string \"{}\"", &invalid_auth_key), ) .await; } From 8d3fe72e9ad0fb3c877ec6398d097666e19c9ad1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 31 Jul 2024 15:19:57 +0100 Subject: [PATCH 0954/1003] chore(deps): [#979] add new cargo dep: serde_with We will add a new enpoint to add tracker keys where some JSON values can be null: ```console curl -X POST http://localhost:1212/api/v1/keys?token=MyAccessToken \ -H "Content-Type: application/json" \ -d '{ "key": null, "seconds_valid": null }' ``` We need to set those values to `None` in the Rsut strucut. --- Cargo.lock | 1 + Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index e4f7a938e..234f291c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3886,6 +3886,7 @@ dependencies = [ "serde_bytes", "serde_json", "serde_repr", + "serde_with", "thiserror", "tokio", "torrust-tracker-clock", diff --git a/Cargo.toml b/Cargo.toml index 5e4401516..4184f2ae7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,6 +66,7 @@ serde_bencode = "0" serde_bytes = "0" serde_json = { version = "1", features = ["preserve_order"] } serde_repr = "0" +serde_with = { version = "3.9.0", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "packages/clock" } From c5beff551e51f95e05e36a315350902d3b104153 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 31 Jul 2024 15:27:40 +0100 Subject: [PATCH 0955/1003] feat: [#979] permanent keys This commit adds a new feature. It allow creating permanent keys (keys that do not expire). THis is an example for making a request to the endpoint using curl: ```console curl -X POST http://localhost:1212/api/v1/keys?token=MyAccessToken \ -H "Content-Type: application/json" \ -d '{ "key": null, "seconds_valid": null }' ``` NOTICE: both the `key` and the `seconds_valid` fields can be null. - If `key` is `null` a new random key will be generated. You can use an string with a pre-generated key like `Xc1L4PbQJSFGlrgSRZl8wxSFAuMa2110`. That will allow users to migrate to the Torrust Tracker wihtout forcing the users to re-start downloading/seeding with new keys. - If `seconds_valid` is `null` the key will be permanent. Otherwise it will expire after the seconds specified in this value. --- migrations/README.md | 5 + ...3000_torrust_tracker_create_all_tables.sql | 21 +++ ...rust_tracker_keys_valid_until_nullable.sql | 1 + ...3000_torrust_tracker_create_all_tables.sql | 19 ++ ...rust_tracker_keys_valid_until_nullable.sql | 12 ++ src/core/auth.rs | 107 ++++++++---- src/core/databases/mod.rs | 8 +- src/core/databases/mysql.rs | 39 +++-- src/core/databases/sqlite.rs | 59 +++++-- src/core/error.rs | 23 +++ src/core/mod.rs | 162 ++++++++++++++++-- src/servers/apis/v1/context/auth_key/forms.rs | 16 +- .../apis/v1/context/auth_key/handlers.rs | 48 ++---- src/servers/apis/v1/context/auth_key/mod.rs | 10 +- .../apis/v1/context/auth_key/resources.rs | 53 +++--- .../apis/v1/context/auth_key/responses.rs | 5 +- src/shared/bit_torrent/common.rs | 4 +- tests/servers/api/v1/client.rs | 2 +- .../api/v1/contract/context/auth_key.rs | 24 +-- tests/servers/http/v1/contract.rs | 4 +- 20 files changed, 455 insertions(+), 167 deletions(-) create mode 100644 migrations/README.md create mode 100644 migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql create mode 100644 migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql create mode 100644 migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql create mode 100644 migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql diff --git a/migrations/README.md b/migrations/README.md new file mode 100644 index 000000000..090c46ccb --- /dev/null +++ b/migrations/README.md @@ -0,0 +1,5 @@ +# Database Migrations + +We don't support automatic migrations yet. The tracker creates all the needed tables when it starts. The SQL sentences are hardcoded in each database driver. + +The migrations in this folder were introduced to add some new changes (permanent keys) and to allow users to migrate to the new version. In the future, we will remove the hardcoded SQL and start using a Rust crate for database migrations. For the time being, if you are using the initial schema described in the migration `20240730183000_torrust_tracker_create_all_tables.sql` you will need to run all the subsequent migrations manually. diff --git a/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql b/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql new file mode 100644 index 000000000..407ae4dd1 --- /dev/null +++ b/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql @@ -0,0 +1,21 @@ +CREATE TABLE + IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE + ); + +CREATE TABLE + IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + ); + +CREATE TABLE + IF NOT EXISTS `keys` ( + `id` INT NOT NULL AUTO_INCREMENT, + `key` VARCHAR(32) NOT NULL, + `valid_until` INT (10) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE (`key`) + ); \ No newline at end of file diff --git a/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql b/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql new file mode 100644 index 000000000..2602797d6 --- /dev/null +++ b/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql @@ -0,0 +1 @@ +ALTER TABLE `keys` CHANGE `valid_until` `valid_until` INT (10); \ No newline at end of file diff --git a/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql b/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql new file mode 100644 index 000000000..bd451bf8b --- /dev/null +++ b/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql @@ -0,0 +1,19 @@ +CREATE TABLE + IF NOT EXISTS whitelist ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE + ); + +CREATE TABLE + IF NOT EXISTS torrents ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + ); + +CREATE TABLE + IF NOT EXISTS keys ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER NOT NULL + ); \ No newline at end of file diff --git a/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql b/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql new file mode 100644 index 000000000..c6746e3ee --- /dev/null +++ b/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql @@ -0,0 +1,12 @@ +CREATE TABLE + IF NOT EXISTS keys_new ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER + ); + +INSERT INTO keys_new SELECT * FROM `keys`; + +DROP TABLE `keys`; + +ALTER TABLE keys_new RENAME TO `keys`; \ No newline at end of file diff --git a/src/core/auth.rs b/src/core/auth.rs index 783faa0da..999b43615 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -4,7 +4,7 @@ //! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs //! in `private` or `private_listed` modes. //! -//! There are services to [`generate`] and [`verify`] authentication keys. +//! There are services to [`generate_key`] and [`verify_key`] authentication keys. //! //! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means //! they are only valid during a period of time. After that time the expiring key will no longer be valid. @@ -19,7 +19,7 @@ //! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` //! pub key: Key, //! /// Timestamp, the key will be no longer valid after this timestamp -//! pub valid_until: DurationSinceUnixEpoch, +//! pub valid_until: Option, //! } //! ``` //! @@ -29,11 +29,11 @@ //! use torrust_tracker::core::auth; //! use std::time::Duration; //! -//! let expiring_key = auth::generate(Duration::new(9999, 0)); +//! let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); //! //! // And you can later verify it with: //! -//! assert!(auth::verify(&expiring_key).is_ok()); +//! assert!(auth::verify_key(&expiring_key).is_ok()); //! ``` use std::panic::Location; @@ -55,63 +55,96 @@ use tracing::debug; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; use crate::CurrentClock; +/// It generates a new permanent random key [`PeerKey`]. #[must_use] -/// It generates a new random 32-char authentication [`ExpiringKey`] +pub fn generate_permanent_key() -> PeerKey { + generate_key(None) +} + +/// It generates a new random 32-char authentication [`PeerKey`]. +/// +/// It can be an expiring or permanent key. /// /// # Panics /// /// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. -pub fn generate(lifetime: Duration) -> ExpiringKey { +/// +/// # Arguments +/// +/// * `lifetime`: if `None` the key will be permanent. +#[must_use] +pub fn generate_key(lifetime: Option) -> PeerKey { let random_id: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) .map(char::from) .collect(); - debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); + if let Some(lifetime) = lifetime { + debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); + + PeerKey { + key: random_id.parse::().unwrap(), + valid_until: Some(CurrentClock::now_add(&lifetime).unwrap()), + } + } else { + debug!("Generated key: {}, permanent", random_id); - ExpiringKey { - key: random_id.parse::().unwrap(), - valid_until: CurrentClock::now_add(&lifetime).unwrap(), + PeerKey { + key: random_id.parse::().unwrap(), + valid_until: None, + } } } -/// It verifies an [`ExpiringKey`]. It checks if the expiration date has passed. +/// It verifies an [`PeerKey`]. It checks if the expiration date has passed. +/// Permanent keys without duration (`None`) do not expire. /// /// # Errors /// -/// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// Will return: /// -/// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. -pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { +/// - `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// - `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +pub fn verify_key(auth_key: &PeerKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = CurrentClock::now(); - if auth_key.valid_until < current_time { - Err(Error::KeyExpired { - location: Location::caller(), - }) - } else { - Ok(()) + match auth_key.valid_until { + Some(valid_until) => { + if valid_until < current_time { + Err(Error::KeyExpired { + location: Location::caller(), + }) + } else { + Ok(()) + } + } + None => Ok(()), // Permanent key } } -/// An authentication key which has an expiration time. +/// An authentication key which can potentially have an expiration time. /// After that time is will automatically become invalid. #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] -pub struct ExpiringKey { +pub struct PeerKey { /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` pub key: Key, - /// Timestamp, the key will be no longer valid after this timestamp - pub valid_until: DurationSinceUnixEpoch, + + /// Timestamp, the key will be no longer valid after this timestamp. + /// If `None` the keys will not expire (permanent key). + pub valid_until: Option, } -impl std::fmt::Display for ExpiringKey { +impl std::fmt::Display for PeerKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "key: `{}`, valid until `{}`", self.key, self.expiry_time()) + match self.expiry_time() { + Some(expire_time) => write!(f, "key: `{}`, valid until `{}`", self.key, expire_time), + None => write!(f, "key: `{}`, permanent", self.key), + } } } -impl ExpiringKey { +impl PeerKey { #[must_use] pub fn key(&self) -> Key { self.key.clone() @@ -126,8 +159,8 @@ impl ExpiringKey { /// Will panic when the key timestamp overflows the internal i64 type. /// (this will naturally happen in 292.5 billion years) #[must_use] - pub fn expiry_time(&self) -> chrono::DateTime { - convert_from_timestamp_to_datetime_utc(self.valid_until) + pub fn expiry_time(&self) -> Option> { + self.valid_until.map(convert_from_timestamp_to_datetime_utc) } } @@ -194,8 +227,8 @@ impl FromStr for Key { } } -/// Verification error. Error returned when an [`ExpiringKey`] cannot be -/// verified with the [`verify(...)`](crate::core::auth::verify) function. +/// Verification error. Error returned when an [`PeerKey`] cannot be +/// verified with the (`crate::core::auth::verify_key`) function. #[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { @@ -277,7 +310,7 @@ mod tests { // Set the time to the current time. clock::Stopped::local_set_to_unix_epoch(); - let expiring_key = auth::generate(Duration::from_secs(0)); + let expiring_key = auth::generate_key(Some(Duration::from_secs(0))); assert_eq!( expiring_key.to_string(), @@ -287,9 +320,9 @@ mod tests { #[test] fn should_be_generated_with_a_expiration_time() { - let expiring_key = auth::generate(Duration::new(9999, 0)); + let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); - assert!(auth::verify(&expiring_key).is_ok()); + assert!(auth::verify_key(&expiring_key).is_ok()); } #[test] @@ -298,17 +331,17 @@ mod tests { clock::Stopped::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. - let expiring_key = auth::generate(Duration::from_secs(19)); + let expiring_key = auth::generate_key(Some(Duration::from_secs(19))); // Mock the time has passed 10 sec. clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); - assert!(auth::verify(&expiring_key).is_ok()); + assert!(auth::verify_key(&expiring_key).is_ok()); // Mock the time has passed another 10 sec. clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); - assert!(auth::verify(&expiring_key).is_err()); + assert!(auth::verify_key(&expiring_key).is_err()); } } } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index cdb4c7ce5..f559eb80e 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -195,11 +195,11 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - fn load_keys(&self) -> Result, Error>; + fn load_keys(&self) -> Result, Error>; /// It gets an expiring authentication key from the database. /// - /// It returns `Some(ExpiringKey)` if a [`ExpiringKey`](crate::core::auth::ExpiringKey) + /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::core::auth::PeerKey) /// with the input [`Key`] exists, `None` otherwise. /// /// # Context: Authentication Keys @@ -207,7 +207,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - fn get_key_from_keys(&self, key: &Key) -> Result, Error>; + fn get_key_from_keys(&self, key: &Key) -> Result, Error>; /// It adds an expiring authentication key to the database. /// @@ -216,7 +216,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result; /// It removes an expiring authentication key from the database. /// diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index 40eced900..3a06c4982 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -60,7 +60,7 @@ impl Database for Mysql { CREATE TABLE IF NOT EXISTS `keys` ( `id` INT NOT NULL AUTO_INCREMENT, `key` VARCHAR({}) NOT NULL, - `valid_until` INT(10) NOT NULL, + `valid_until` INT(10), PRIMARY KEY (`id`), UNIQUE (`key`) );", @@ -119,14 +119,20 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). - fn load_keys(&self) -> Result, Error> { + fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| auth::ExpiringKey { - key: key.parse::().unwrap(), - valid_until: Duration::from_secs(valid_until.unsigned_abs()), + |(key, valid_until): (String, Option)| match valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, }, )?; @@ -197,28 +203,37 @@ impl Database for Mysql { } /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). - fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let query = conn.exec_first::<(String, i64), _, _>( + let query = conn.exec_first::<(String, Option), _, _>( "SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { "key" => key.to_string() }, ); let key = query?; - Ok(key.map(|(key, expiry)| auth::ExpiringKey { - key: key.parse::().unwrap(), - valid_until: Duration::from_secs(expiry.unsigned_abs()), + Ok(key.map(|(key, opt_valid_until)| match opt_valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, })) } /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). - fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.key.to_string(); - let valid_until = auth_key.valid_until.as_secs().to_string(); + let valid_until = match auth_key.valid_until { + Some(valid_until) => valid_until.as_secs().to_string(), + None => todo!(), + }; conn.exec_drop( "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index 3acbf9e77..69470ee04 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -3,6 +3,8 @@ use std::panic::Location; use std::str::FromStr; use r2d2::Pool; +use r2d2_sqlite::rusqlite::params; +use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; @@ -51,7 +53,7 @@ impl Database for Sqlite { CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY AUTOINCREMENT, key TEXT NOT NULL UNIQUE, - valid_until INTEGER NOT NULL + valid_until INTEGER );" .to_string(); @@ -104,22 +106,28 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). - fn load_keys(&self) -> Result, Error> { + fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; let keys_iter = stmt.query_map([], |row| { let key: String = row.get(0)?; - let valid_until: i64 = row.get(1)?; - - Ok(auth::ExpiringKey { - key: key.parse::().unwrap(), - valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), - }) + let opt_valid_until: Option = row.get(1)?; + + match opt_valid_until { + Some(valid_until) => Ok(auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), + }), + None => Ok(auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }), + } })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -208,7 +216,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). - fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -218,23 +226,36 @@ impl Database for Sqlite { let key = rows.next()?; Ok(key.map(|f| { - let expiry: i64 = f.get(1).unwrap(); + let valid_until: Option = f.get(1).unwrap(); let key: String = f.get(0).unwrap(); - auth::ExpiringKey { - key: key.parse::().unwrap(), - valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), + + match valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, } })) } /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). - fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let insert = conn.execute( - "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.key.to_string(), auth_key.valid_until.as_secs().to_string()], - )?; + let insert = match auth_key.valid_until { + Some(valid_until) => conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + [auth_key.key.to_string(), valid_until.as_secs().to_string()], + )?, + None => conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + params![auth_key.key.to_string(), Null], + )?, + }; if insert == 0 { Err(Error::InsertFailed { diff --git a/src/core/error.rs b/src/core/error.rs index a826de349..d89b030c4 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -11,6 +11,9 @@ use std::panic::Location; use torrust_tracker_located_error::LocatedError; use torrust_tracker_primitives::info_hash::InfoHash; +use super::auth::ParseKeyError; +use super::databases; + /// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] pub enum Error { @@ -20,6 +23,7 @@ pub enum Error { key: super::auth::Key, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + #[error("The peer is not authenticated, {location}")] PeerNotAuthenticated { location: &'static Location<'static> }, @@ -30,3 +34,22 @@ pub enum Error { location: &'static Location<'static>, }, } + +/// Errors related to peers keys. +#[allow(clippy::module_name_repetitions)] +#[derive(thiserror::Error, Debug, Clone)] +pub enum PeerKeyError { + #[error("Invalid peer key duration: {seconds_valid:?}, is not valid")] + DurationOverflow { seconds_valid: u64 }, + + #[error("Invalid key: {key}")] + InvalidKey { + key: String, + source: LocatedError<'static, ParseKeyError>, + }, + + #[error("Can't persist key: {source}")] + DatabaseError { + source: LocatedError<'static, databases::error::Error>, + }, +} diff --git a/src/core/mod.rs b/src/core/mod.rs index f0853ec27..f4cff8daf 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -453,13 +453,15 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; -use auth::ExpiringKey; +use auth::PeerKey; use databases::driver::Driver; use derive_more::Constructor; +use error::PeerKeyError; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v2::database; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; +use torrust_tracker_located_error::Located; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; @@ -492,7 +494,7 @@ pub struct Tracker { database: Arc>, /// Tracker users' keys. Only for private trackers. - keys: tokio::sync::RwLock>, + keys: tokio::sync::RwLock>, /// The list of allowed torrents. Only for listed trackers. whitelist: tokio::sync::RwLock>, @@ -556,6 +558,20 @@ impl ScrapeData { } } +/// This type contains the info needed to add a new tracker key. +/// +/// You can upload a pre-generated key or let the app to generate a new one. +/// You can also set an expiration date or leave it empty (`None`) if you want +/// to create a permanent key that does not expire. +#[derive(Debug)] +pub struct AddKeyRequest { + /// The pre-generated key. Use `None` to generate a random key. + pub opt_key: Option, + + /// How long the key will be valid in seconds. Use `None` for permanent keys. + pub opt_seconds_valid: Option, +} + impl Tracker { /// `Tracker` constructor. /// @@ -793,9 +809,96 @@ impl Tracker { } } + /// Adds new peer keys to the tracker. + /// + /// Keys can be pre-generated or randomly created. They can also be permanent or expire. + /// + /// # Errors + /// + /// Will return an error if: + /// + /// - The key duration overflows the duration type maximum value. + /// - The provided pre-generated key is invalid. + /// - The key could not been persisted due to database issues. + pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { + // code-review: all methods related to keys should be moved to a new independent "keys" service. + + match add_key_req.opt_key { + // Upload pre-generated key + Some(pre_existing_key) => { + if let Some(seconds_valid) = add_key_req.opt_seconds_valid { + // Expiring key + let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { + return Err(PeerKeyError::DurationOverflow { seconds_valid }); + }; + + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_auth_key(key, Some(valid_until)).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } else { + // Permanent key + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_permanent_auth_key(key).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } + } + // Generate a new random key + None => match add_key_req.opt_seconds_valid { + // Expiring key + Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + // Permanent key + None => match self.generate_permanent_auth_key().await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + }, + } + } + + /// It generates a new permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + pub async fn generate_permanent_auth_key(&self) -> Result { + self.generate_auth_key(None).await + } + /// It generates a new expiring authentication key. - /// `lifetime` param is the duration in seconds for the new key. - /// The key will be no longer valid after `lifetime` seconds. + /// /// Authentication keys are used by HTTP trackers. /// /// # Context: Authentication @@ -803,14 +906,37 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = auth::generate(lifetime); + /// + /// # Arguments + /// + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn generate_auth_key(&self, lifetime: Option) -> Result { + let auth_key = auth::generate_key(lifetime); self.database.add_key_to_keys(&auth_key)?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) } + /// It adds a pre-generated permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + pub async fn add_permanent_auth_key(&self, key: Key) -> Result { + self.add_auth_key(key, None).await + } + /// It adds a pre-generated authentication key. /// /// Authentication keys are used by HTTP trackers. @@ -824,14 +950,15 @@ impl Tracker { /// /// # Arguments /// + /// * `key` - The pre-generated key. /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. pub async fn add_auth_key( &self, key: Key, - valid_until: DurationSinceUnixEpoch, - ) -> Result { - let auth_key = ExpiringKey { key, valid_until }; + valid_until: Option, + ) -> Result { + let auth_key = PeerKey { key, valid_until }; // code-review: should we return a friendly error instead of the DB // constrain error when the key already exist? For now, it's returning @@ -869,7 +996,7 @@ impl Tracker { location: Location::caller(), key: Box::new(key.clone()), }), - Some(key) => auth::verify(key), + Some(key) => auth::verify_key(key), } } @@ -1661,16 +1788,19 @@ mod tests { async fn it_should_generate_the_expiring_authentication_keys() { let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); - assert_eq!(key.valid_until, CurrentClock::now_add(&Duration::from_secs(100)).unwrap()); + assert_eq!( + key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); } #[tokio::test] async fn it_should_authenticate_a_peer_by_using_a_key() { let tracker = private_tracker(); - let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); let result = tracker.authenticate(&expiring_key.key()).await; @@ -1694,7 +1824,7 @@ mod tests { // `verify_auth_key` should be a private method. let tracker = private_tracker(); - let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); } @@ -1712,7 +1842,7 @@ mod tests { async fn it_should_remove_an_authentication_key() { let tracker = private_tracker(); - let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); let result = tracker.remove_auth_key(&expiring_key.key()).await; @@ -1724,7 +1854,7 @@ mod tests { async fn it_should_load_authentication_keys_from_the_database() { let tracker = private_tracker(); - let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); // Remove the newly generated key in memory tracker.keys.write().await.remove(&expiring_key.key()); diff --git a/src/servers/apis/v1/context/auth_key/forms.rs b/src/servers/apis/v1/context/auth_key/forms.rs index 9c023ab72..5dfea6e80 100644 --- a/src/servers/apis/v1/context/auth_key/forms.rs +++ b/src/servers/apis/v1/context/auth_key/forms.rs @@ -1,8 +1,22 @@ use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DefaultOnNull}; +/// This type contains the info needed to add a new tracker key. +/// +/// You can upload a pre-generated key or let the app to generate a new one. +/// You can also set an expiration date or leave it empty (`None`) if you want +/// to create permanent key that does not expire. +#[serde_as] #[derive(Serialize, Deserialize, Debug)] pub struct AddKeyForm { + /// The pre-generated key. Use `None` (null in json) to generate a random key. + #[serde_as(deserialize_as = "DefaultOnNull")] #[serde(rename = "key")] pub opt_key: Option, - pub seconds_valid: u64, + + /// How long the key will be valid in seconds. Use `None` (null in json) for + /// permanent keys. + #[serde_as(deserialize_as = "DefaultOnNull")] + #[serde(rename = "seconds_valid")] + pub opt_seconds_valid: Option, } diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index 6d2d99150..fed3ad301 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -6,18 +6,16 @@ use std::time::Duration; use axum::extract::{self, Path, State}; use axum::response::Response; use serde::Deserialize; -use torrust_tracker_clock::clock::Time; use super::forms::AddKeyForm; use super::responses::{ - auth_key_response, failed_to_add_key_response, failed_to_delete_key_response, failed_to_generate_key_response, - failed_to_reload_keys_response, invalid_auth_key_duration_response, invalid_auth_key_response, + auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, + invalid_auth_key_duration_response, invalid_auth_key_response, }; use crate::core::auth::Key; -use crate::core::Tracker; +use crate::core::{AddKeyRequest, Tracker}; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; -use crate::CurrentClock; /// It handles the request to add a new authentication key. /// @@ -36,31 +34,21 @@ pub async fn add_auth_key_handler( State(tracker): State>, extract::Json(add_key_form): extract::Json, ) -> Response { - match add_key_form.opt_key { - Some(pre_existing_key) => { - let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(add_key_form.seconds_valid)) else { - return invalid_auth_key_duration_response(add_key_form.seconds_valid); - }; - - let key = pre_existing_key.parse::(); - - match key { - Ok(key) => match tracker.add_auth_key(key, valid_until).await { - Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), - Err(e) => failed_to_add_key_response(e), - }, - Err(e) => invalid_auth_key_response(&pre_existing_key, &e), - } - } - None => { - match tracker - .generate_auth_key(Duration::from_secs(add_key_form.seconds_valid)) - .await - { - Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), - Err(e) => failed_to_generate_key_response(e), + match tracker + .add_peer_key(AddKeyRequest { + opt_key: add_key_form.opt_key.clone(), + opt_seconds_valid: add_key_form.opt_seconds_valid, + }) + .await + { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(err) => match err { + crate::core::error::PeerKeyError::DurationOverflow { seconds_valid } => { + invalid_auth_key_duration_response(seconds_valid) } - } + crate::core::error::PeerKeyError::InvalidKey { key, source } => invalid_auth_key_response(&key, source), + crate::core::error::PeerKeyError::DatabaseError { source } => failed_to_generate_key_response(source), + }, } } @@ -79,7 +67,7 @@ pub async fn add_auth_key_handler( /// This endpoint has been deprecated. Use [`add_auth_key_handler`]. pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { let seconds_valid = seconds_valid_or_key; - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + match tracker.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), Err(e) => failed_to_generate_key_response(e), } diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs index f6762b26e..b4112f21f 100644 --- a/src/servers/apis/v1/context/auth_key/mod.rs +++ b/src/servers/apis/v1/context/auth_key/mod.rs @@ -26,17 +26,15 @@ //! //! It generates a new authentication key or upload a pre-generated key. //! -//! > **NOTICE**: keys expire after a certain amount of time. -//! //! **POST parameters** //! //! Name | Type | Description | Required | Example //! ---|---|---|---|--- -//! `key` | 32-char string (0-9, a-z, A-Z) | The optional pre-generated key. | No | `Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z7` -//! `seconds_valid` | positive integer | The number of seconds the key will be valid. | Yes | `3600` +//! `key` | 32-char string (0-9, a-z, A-Z) or `null` | The optional pre-generated key. | Yes | `Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z7` or `null` +//! `seconds_valid` | positive integer or `null` | The number of seconds the key will be valid. | Yes | `3600` or `null` //! -//! > **NOTICE**: the `key` field is optional. If is not provided the tracker -//! > will generated a random one. +//! > **NOTICE**: the `key` and `seconds_valid` fields are optional. If `key` is not provided the tracker +//! > will generated a random one. If `seconds_valid` field is not provided the key will be permanent. You can use the `null` value. //! //! **Example request** //! diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index 3671438c2..c26b2c4d3 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -12,27 +12,36 @@ pub struct AuthKey { pub key: String, /// The timestamp when the key will expire. #[deprecated(since = "3.0.0", note = "please use `expiry_time` instead")] - pub valid_until: u64, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. + pub valid_until: Option, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. /// The ISO 8601 timestamp when the key will expire. - pub expiry_time: String, + pub expiry_time: Option, } -impl From for auth::ExpiringKey { +impl From for auth::PeerKey { fn from(auth_key_resource: AuthKey) -> Self { - auth::ExpiringKey { + auth::PeerKey { key: auth_key_resource.key.parse::().unwrap(), - valid_until: convert_from_iso_8601_to_timestamp(&auth_key_resource.expiry_time), + valid_until: auth_key_resource + .expiry_time + .map(|expiry_time| convert_from_iso_8601_to_timestamp(&expiry_time)), } } } #[allow(deprecated)] -impl From for AuthKey { - fn from(auth_key: auth::ExpiringKey) -> Self { - AuthKey { - key: auth_key.key.to_string(), - valid_until: auth_key.valid_until.as_secs(), - expiry_time: auth_key.expiry_time().to_string(), +impl From for AuthKey { + fn from(auth_key: auth::PeerKey) -> Self { + match (auth_key.valid_until, auth_key.expiry_time()) { + (Some(valid_until), Some(expiry_time)) => AuthKey { + key: auth_key.key.to_string(), + valid_until: Some(valid_until.as_secs()), + expiry_time: Some(expiry_time.to_string()), + }, + _ => AuthKey { + key: auth_key.key.to_string(), + valid_until: None, + expiry_time: None, + }, } } } @@ -72,15 +81,15 @@ mod tests { let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: one_hour_after_unix_epoch().timestamp, - expiry_time: one_hour_after_unix_epoch().iso_8601_v1, + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v1), }; assert_eq!( - auth::ExpiringKey::from(auth_key_resource), - auth::ExpiringKey { + auth::PeerKey::from(auth_key_resource), + auth::PeerKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() + valid_until: Some(CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap()) } ); } @@ -90,17 +99,17 @@ mod tests { fn it_should_be_convertible_from_an_auth_key() { clock::Stopped::local_set_to_unix_epoch(); - let auth_key = auth::ExpiringKey { + let auth_key = auth::PeerKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), + valid_until: Some(CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap()), }; assert_eq!( AuthKey::from(auth_key), AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: one_hour_after_unix_epoch().timestamp, - expiry_time: one_hour_after_unix_epoch().iso_8601_v2, + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v2), } ); } @@ -111,8 +120,8 @@ mod tests { assert_eq!( serde_json::to_string(&AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: one_hour_after_unix_epoch().timestamp, - expiry_time: one_hour_after_unix_epoch().iso_8601_v1, + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v1), }) .unwrap(), "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60,\"expiry_time\":\"1970-01-01T00:01:00.000Z\"}" // cspell:disable-line diff --git a/src/servers/apis/v1/context/auth_key/responses.rs b/src/servers/apis/v1/context/auth_key/responses.rs index dfe449b46..4905d9adc 100644 --- a/src/servers/apis/v1/context/auth_key/responses.rs +++ b/src/servers/apis/v1/context/auth_key/responses.rs @@ -4,7 +4,6 @@ use std::error::Error; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; -use crate::core::auth::ParseKeyError; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{bad_request_response, unhandled_rejection_response}; @@ -51,8 +50,8 @@ pub fn failed_to_reload_keys_response(e: E) -> Response { } #[must_use] -pub fn invalid_auth_key_response(auth_key: &str, error: &ParseKeyError) -> Response { - bad_request_response(&format!("Invalid URL: invalid auth key: string \"{auth_key}\", {error}")) +pub fn invalid_auth_key_response(auth_key: &str, e: E) -> Response { + bad_request_response(&format!("Invalid URL: invalid auth key: string \"{auth_key}\", {e}")) } #[must_use] diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 3dd059a6a..46026ac47 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -17,6 +17,6 @@ pub const MAX_SCRAPE_TORRENTS: u8 = 74; /// HTTP tracker authentication key length. /// -/// See function to [`generate`](crate::core::auth::generate) the -/// [`ExpiringKeys`](crate::core::auth::ExpiringKey) for more information. +/// For more information see function [`generate_key`](crate::core::auth::generate_key) to generate the +/// [`PeerKey`](crate::core::auth::PeerKey). pub const AUTH_KEY_LENGTH: usize = 32; diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs index 91f18acac..3d95c10ca 100644 --- a/tests/servers/api/v1/client.rs +++ b/tests/servers/api/v1/client.rs @@ -134,5 +134,5 @@ pub async fn get(path: &str, query: Option) -> Response { pub struct AddKeyForm { #[serde(rename = "key")] pub opt_key: Option, - pub seconds_valid: u64, + pub seconds_valid: Option, } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 3130503d9..cd6d2544f 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -20,7 +20,7 @@ async fn should_allow_generating_a_new_random_auth_key() { let response = Client::new(env.get_connection_info()) .add_auth_key(AddKeyForm { opt_key: None, - seconds_valid: 60, + seconds_valid: Some(60), }) .await; @@ -43,7 +43,7 @@ async fn should_allow_uploading_a_preexisting_auth_key() { let response = Client::new(env.get_connection_info()) .add_auth_key(AddKeyForm { opt_key: Some("Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z5".to_string()), - seconds_valid: 60, + seconds_valid: Some(60), }) .await; @@ -66,7 +66,7 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) .add_auth_key(AddKeyForm { opt_key: None, - seconds_valid: 60, + seconds_valid: Some(60), }) .await; @@ -75,7 +75,7 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .add_auth_key(AddKeyForm { opt_key: None, - seconds_valid: 60, + seconds_valid: Some(60), }) .await; @@ -93,7 +93,7 @@ async fn should_fail_when_the_auth_key_cannot_be_generated() { let response = Client::new(env.get_connection_info()) .add_auth_key(AddKeyForm { opt_key: None, - seconds_valid: 60, + seconds_valid: Some(60), }) .await; @@ -109,7 +109,7 @@ async fn should_allow_deleting_an_auth_key() { let seconds_valid = 60; let auth_key = env .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -223,7 +223,7 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { let seconds_valid = 60; let auth_key = env .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -247,7 +247,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -260,7 +260,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -279,7 +279,7 @@ async fn should_allow_reloading_keys() { let seconds_valid = 60; env.tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -296,7 +296,7 @@ async fn should_fail_when_keys_cannot_be_reloaded() { let seconds_valid = 60; env.tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -315,7 +315,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let seconds_valid = 60; env.tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index e4a35d0c5..14c237984 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1261,7 +1261,7 @@ mod configured_as_private { async fn should_respond_to_authenticated_peers() { let env = Started::new(&configuration::ephemeral_private().into()).await; - let expiring_key = env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); let response = Client::authenticated(*env.bind_address(), expiring_key.key()) .announce(&QueryBuilder::default().query()) @@ -1393,7 +1393,7 @@ mod configured_as_private { .build(), ); - let expiring_key = env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); let response = Client::authenticated(*env.bind_address(), expiring_key.key()) .scrape( From e8e935caf0b8b474d07e30dacf4627200aa0150f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Aug 2024 13:09:56 +0100 Subject: [PATCH 0956/1003] feat: [#978] add a config option to disable cheking keys' expiration When the tracker is running in private mode you can disable checking keys' expiration in the configuration with: ```toml [core] private = false [core.private_mode] check_keys_expiration = true ``` All keys will be valid as long as they exist in the database. --- packages/configuration/src/v2/core.rs | 39 +++++++++++++++++++++++++++ src/core/auth.rs | 10 +++---- src/core/mod.rs | 32 ++++++++++++++++++++-- 3 files changed, 74 insertions(+), 7 deletions(-) diff --git a/packages/configuration/src/v2/core.rs b/packages/configuration/src/v2/core.rs index 09280917c..5d6afdee2 100644 --- a/packages/configuration/src/v2/core.rs +++ b/packages/configuration/src/v2/core.rs @@ -1,3 +1,4 @@ +use derive_more::{Constructor, Display}; use serde::{Deserialize, Serialize}; use super::network::Network; @@ -32,6 +33,10 @@ pub struct Core { #[serde(default = "Core::default_private")] pub private: bool, + // Configuration specific when the tracker is running in private mode. + #[serde(default = "Core::default_private_mode")] + pub private_mode: Option, + // Tracker policy configuration. #[serde(default = "Core::default_tracker_policy")] pub tracker_policy: TrackerPolicy, @@ -54,6 +59,7 @@ impl Default for Core { listed: Self::default_listed(), net: Self::default_network(), private: Self::default_private(), + private_mode: Self::default_private_mode(), tracker_policy: Self::default_tracker_policy(), tracker_usage_statistics: Self::default_tracker_usage_statistics(), } @@ -85,6 +91,14 @@ impl Core { false } + fn default_private_mode() -> Option { + if Self::default_private() { + Some(PrivateMode::default()) + } else { + None + } + } + fn default_tracker_policy() -> TrackerPolicy { TrackerPolicy::default() } @@ -92,3 +106,28 @@ impl Core { true } } + +/// Configuration specific when the tracker is running in private mode. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy, Constructor, Display)] +pub struct PrivateMode { + /// A flag to disable expiration date for peer keys. + /// + /// When true, if the keys is not permanent the expiration date will be + /// ignored. The key will be accepted even if it has expired. + #[serde(default = "PrivateMode::default_check_keys_expiration")] + pub check_keys_expiration: bool, +} + +impl Default for PrivateMode { + fn default() -> Self { + Self { + check_keys_expiration: Self::default_check_keys_expiration(), + } + } +} + +impl PrivateMode { + fn default_check_keys_expiration() -> bool { + true + } +} diff --git a/src/core/auth.rs b/src/core/auth.rs index 999b43615..fef5b3098 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -33,7 +33,7 @@ //! //! // And you can later verify it with: //! -//! assert!(auth::verify_key(&expiring_key).is_ok()); +//! assert!(auth::verify_key_expiration(&expiring_key).is_ok()); //! ``` use std::panic::Location; @@ -106,7 +106,7 @@ pub fn generate_key(lifetime: Option) -> PeerKey { /// /// - `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. /// - `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. -pub fn verify_key(auth_key: &PeerKey) -> Result<(), Error> { +pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = CurrentClock::now(); match auth_key.valid_until { @@ -322,7 +322,7 @@ mod tests { fn should_be_generated_with_a_expiration_time() { let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); - assert!(auth::verify_key(&expiring_key).is_ok()); + assert!(auth::verify_key_expiration(&expiring_key).is_ok()); } #[test] @@ -336,12 +336,12 @@ mod tests { // Mock the time has passed 10 sec. clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); - assert!(auth::verify_key(&expiring_key).is_ok()); + assert!(auth::verify_key_expiration(&expiring_key).is_ok()); // Mock the time has passed another 10 sec. clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); - assert!(auth::verify_key(&expiring_key).is_err()); + assert!(auth::verify_key_expiration(&expiring_key).is_err()); } } } diff --git a/src/core/mod.rs b/src/core/mod.rs index f4cff8daf..a8c265408 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -996,7 +996,16 @@ impl Tracker { location: Location::caller(), key: Box::new(key.clone()), }), - Some(key) => auth::verify_key(key), + Some(key) => match self.config.private_mode { + Some(private_mode) => { + if private_mode.check_keys_expiration { + return auth::verify_key_expiration(key); + } + + Ok(()) + } + None => auth::verify_key_expiration(key), + }, } } @@ -1779,8 +1788,9 @@ mod tests { use std::time::Duration; use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::v2::core::PrivateMode; - use crate::core::auth; + use crate::core::auth::{self, Key}; use crate::core::tests::the_tracker::private_tracker; use crate::CurrentClock; @@ -1829,6 +1839,24 @@ mod tests { assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); } + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let mut tracker = private_tracker(); + + tracker.config.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let past_time = Some(Duration::ZERO); + + let expiring_key = tracker + .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), past_time) + .await + .unwrap(); + + assert!(tracker.authenticate(&expiring_key.key()).await.is_ok()); + } + #[tokio::test] async fn it_should_fail_verifying_an_unregistered_authentication_key() { let tracker = private_tracker(); From d7dfc3bea813cec6b45aa9c6d31bfa129766b2a0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Aug 2024 13:46:08 +0100 Subject: [PATCH 0957/1003] feat: [#978] add semantic validation for configuration The section [core.provate_mode] can be inlcuded in the configration TOML file only if the tracker is running in private mode (`private = true`). This commits adds that validation and makes it possible to add more semantic validations in the future. Semantic validations are validations that depend on more than one value. Like the one added, it could be any other incompatible combination. --- packages/configuration/src/lib.rs | 1 + packages/configuration/src/v2/core.rs | 11 +++++++++++ packages/configuration/src/v2/mod.rs | 7 +++++++ packages/configuration/src/validator.rs | 19 +++++++++++++++++++ src/bootstrap/app.rs | 9 +++++++++ src/core/auth.rs | 2 +- 6 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 packages/configuration/src/validator.rs diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 7f63b7f18..7b59d3f95 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -5,6 +5,7 @@ //! //! The current version for configuration is [`v2`]. pub mod v2; +pub mod validator; use std::collections::HashMap; use std::env; diff --git a/packages/configuration/src/v2/core.rs b/packages/configuration/src/v2/core.rs index 5d6afdee2..3dfde122e 100644 --- a/packages/configuration/src/v2/core.rs +++ b/packages/configuration/src/v2/core.rs @@ -3,6 +3,7 @@ use serde::{Deserialize, Serialize}; use super::network::Network; use crate::v2::database::Database; +use crate::validator::{SemanticValidationError, Validator}; use crate::{AnnouncePolicy, TrackerPolicy}; #[allow(clippy::struct_excessive_bools)] @@ -131,3 +132,13 @@ impl PrivateMode { true } } + +impl Validator for Core { + fn validate(&self) -> Result<(), SemanticValidationError> { + if self.private_mode.is_some() && !self.private { + return Err(SemanticValidationError::UselessPrivateModeSection); + } + + Ok(()) + } +} diff --git a/packages/configuration/src/v2/mod.rs b/packages/configuration/src/v2/mod.rs index 5fa142b0b..de8af0891 100644 --- a/packages/configuration/src/v2/mod.rs +++ b/packages/configuration/src/v2/mod.rs @@ -251,6 +251,7 @@ use self::health_check_api::HealthCheckApi; use self::http_tracker::HttpTracker; use self::tracker_api::HttpApi; use self::udp_tracker::UdpTracker; +use crate::validator::{SemanticValidationError, Validator}; use crate::{Error, Info, Metadata, Version}; /// This configuration version @@ -394,6 +395,12 @@ impl Configuration { } } +impl Validator for Configuration { + fn validate(&self) -> Result<(), SemanticValidationError> { + self.core.validate() + } +} + #[cfg(test)] mod tests { diff --git a/packages/configuration/src/validator.rs b/packages/configuration/src/validator.rs new file mode 100644 index 000000000..4555b88dd --- /dev/null +++ b/packages/configuration/src/validator.rs @@ -0,0 +1,19 @@ +//! Trait to validate semantic errors. +//! +//! Errors could involve more than one configuration option. Some configuration +//! combinations can be incompatible. +use thiserror::Error; + +/// Errors that can occur validating the configuration. +#[derive(Error, Debug)] +pub enum SemanticValidationError { + #[error("Private mode section in configuration can only be included when the tracker is running in private mode.")] + UselessPrivateModeSection, +} + +pub trait Validator { + /// # Errors + /// + /// Will return an error if the configuration is invalid. + fn validate(&self) -> Result<(), SemanticValidationError>; +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index cfb84a2d1..b79f4dc86 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -14,6 +14,7 @@ use std::sync::Arc; use torrust_tracker_clock::static_time; +use torrust_tracker_configuration::validator::Validator; use torrust_tracker_configuration::Configuration; use tracing::info; @@ -24,10 +25,18 @@ use crate::core::Tracker; use crate::shared::crypto::ephemeral_instance_keys; /// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. +/// +/// # Panics +/// +/// Setup can file if the configuration is invalid. #[must_use] pub fn setup() -> (Configuration, Arc) { let configuration = initialize_configuration(); + if let Err(e) = configuration.validate() { + panic!("Configuration error: {e}"); + } + let tracker = initialize_with_configuration(&configuration); info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); diff --git a/src/core/auth.rs b/src/core/auth.rs index fef5b3098..61ccbdb52 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -4,7 +4,7 @@ //! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs //! in `private` or `private_listed` modes. //! -//! There are services to [`generate_key`] and [`verify_key`] authentication keys. +//! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. //! //! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means //! they are only valid during a period of time. After that time the expiring key will no longer be valid. From 8d58882a82014a4ac209c94ceae300fc9f07a6f1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Aug 2024 16:04:22 +0100 Subject: [PATCH 0958/1003] refactor: make method private It was public becuase it was being used in a test but it can be used the `authenticate` method instead. --- src/core/mod.rs | 8 ++------ tests/servers/api/v1/contract/context/auth_key.rs | 9 +++------ 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index a8c265408..dd15d8705 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -988,9 +988,7 @@ impl Tracker { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { - // code-review: this function is public only because it's used in a test. - // We should change the test and make it private. + async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { match self.keys.read().await.get(key) { None => Err(auth::Error::UnableToReadKey { location: Location::caller(), @@ -1830,13 +1828,11 @@ mod tests { #[tokio::test] async fn it_should_verify_a_valid_authentication_key() { - // todo: this should not be tested directly because - // `verify_auth_key` should be a private method. let tracker = private_tracker(); let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); - assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); + assert!(tracker.authenticate(&expiring_key.key()).await.is_ok()); } #[tokio::test] diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index cd6d2544f..41f421ca6 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -26,10 +26,9 @@ async fn should_allow_generating_a_new_random_auth_key() { let auth_key_resource = assert_auth_key_utf8(response).await; - // Verify the key with the tracker assert!(env .tracker - .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); @@ -49,10 +48,9 @@ async fn should_allow_uploading_a_preexisting_auth_key() { let auth_key_resource = assert_auth_key_utf8(response).await; - // Verify the key with the tracker assert!(env .tracker - .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); @@ -357,10 +355,9 @@ mod deprecated_generate_key_endpoint { let auth_key_resource = assert_auth_key_utf8(response).await; - // Verify the key with the tracker assert!(env .tracker - .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); From 349692b928cf29108b9e9f7a176930d2c7ad478a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 Aug 2024 16:45:11 +0100 Subject: [PATCH 0959/1003] test: [#989] add more tests for keys There are four ways of adding keys to the tracker. One for each combination of: - Expiring or permanent key. - Pre-generated (uploaded) ot randomdly generated key. This commit adds new tests for each case. --- src/core/mod.rs | 242 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 187 insertions(+), 55 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index dd15d8705..ea1472b61 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1785,35 +1785,8 @@ mod tests { use std::str::FromStr; use std::time::Duration; - use torrust_tracker_clock::clock::Time; - use torrust_tracker_configuration::v2::core::PrivateMode; - - use crate::core::auth::{self, Key}; + use crate::core::auth::{self}; use crate::core::tests::the_tracker::private_tracker; - use crate::CurrentClock; - - #[tokio::test] - async fn it_should_generate_the_expiring_authentication_keys() { - let tracker = private_tracker(); - - let key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); - - assert_eq!( - key.valid_until, - Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) - ); - } - - #[tokio::test] - async fn it_should_authenticate_a_peer_by_using_a_key() { - let tracker = private_tracker(); - - let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); - - let result = tracker.authenticate(&expiring_key.key()).await; - - assert!(result.is_ok()); - } #[tokio::test] async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { @@ -1826,33 +1799,6 @@ mod tests { assert!(result.is_err()); } - #[tokio::test] - async fn it_should_verify_a_valid_authentication_key() { - let tracker = private_tracker(); - - let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); - - assert!(tracker.authenticate(&expiring_key.key()).await.is_ok()); - } - - #[tokio::test] - async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let mut tracker = private_tracker(); - - tracker.config.private_mode = Some(PrivateMode { - check_keys_expiration: false, - }); - - let past_time = Some(Duration::ZERO); - - let expiring_key = tracker - .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), past_time) - .await - .unwrap(); - - assert!(tracker.authenticate(&expiring_key.key()).await.is_ok()); - } - #[tokio::test] async fn it_should_fail_verifying_an_unregistered_authentication_key() { let tracker = private_tracker(); @@ -1888,6 +1834,192 @@ mod tests { assert!(result.is_ok()); assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); } + + mod with_expiring_and { + + mod randomly_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::v2::core::PrivateMode; + + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_generate_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let mut tracker = private_tracker(); + + tracker.config.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let past_timestamp = Duration::ZERO; + + let peer_key = tracker + .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) + .await + .unwrap(); + + assert!(tracker.authenticate(&peer_key.key()).await.is_ok()); + } + } + + mod pre_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::v2::core::PrivateMode; + + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::core::AddKeyRequest; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let mut tracker = private_tracker(); + + tracker.config.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(0), + }) + .await + .unwrap(); + + assert!(tracker.authenticate(&peer_key.key()).await.is_ok()); + } + } + } + + mod with_permanent_and { + + mod randomly_generated_keys { + use crate::core::tests::the_tracker::private_tracker; + + #[tokio::test] + async fn it_should_generate_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_permanent_auth_key().await.unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_permanent_auth_key().await.unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + } + + mod pre_generated_keys { + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::core::AddKeyRequest; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + } + } } mod handling_an_announce_request {} From 287e48422003e2e1b890dd2de9cb5ce102601d8c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 2 Aug 2024 10:21:00 +0100 Subject: [PATCH 0960/1003] feat!: [#958] improve metadata in config files The metadata section in the configuration file is changed: TOML: ```toml [metadata] app = "torrust-tracker" purpose = "configuration" schema_version = "2.0.0" ``` JSON: ```json { "metadata": { "app": "torrust-tracker", "purpose": "configuration", "version": "2.0.0" } } ``` - `app`: the applications this config file is used for. - `purpose`: the purpose of the file containing these metadata. - `schema_version`: the schema version for the file being parsed. --- packages/configuration/src/lib.rs | 71 ++++++++++++++----- .../configuration/src/{v2 => v2_0_0}/core.rs | 16 ++--- .../src/{v2 => v2_0_0}/database.rs | 0 .../src/{v2 => v2_0_0}/health_check_api.rs | 0 .../src/{v2 => v2_0_0}/http_tracker.rs | 0 .../src/{v2 => v2_0_0}/logging.rs | 0 .../configuration/src/{v2 => v2_0_0}/mod.rs | 14 ++-- .../src/{v2 => v2_0_0}/network.rs | 0 .../src/{v2 => v2_0_0}/tracker_api.rs | 2 +- .../src/{v2 => v2_0_0}/udp_tracker.rs | 0 .../config/tracker.container.mysql.toml | 5 +- .../config/tracker.container.sqlite3.toml | 5 +- .../config/tracker.development.sqlite3.toml | 5 +- .../config/tracker.e2e.container.sqlite3.toml | 5 +- .../config/tracker.udp.benchmarking.toml | 5 +- src/bootstrap/config.rs | 2 +- src/core/mod.rs | 6 +- 17 files changed, 93 insertions(+), 43 deletions(-) rename packages/configuration/src/{v2 => v2_0_0}/core.rs (90%) rename packages/configuration/src/{v2 => v2_0_0}/database.rs (100%) rename packages/configuration/src/{v2 => v2_0_0}/health_check_api.rs (100%) rename packages/configuration/src/{v2 => v2_0_0}/http_tracker.rs (100%) rename packages/configuration/src/{v2 => v2_0_0}/logging.rs (100%) rename packages/configuration/src/{v2 => v2_0_0}/mod.rs (97%) rename packages/configuration/src/{v2 => v2_0_0}/network.rs (100%) rename packages/configuration/src/{v2 => v2_0_0}/tracker_api.rs (98%) rename packages/configuration/src/{v2 => v2_0_0}/udp_tracker.rs (100%) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 7b59d3f95..aedf3a6f1 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -4,7 +4,7 @@ //! Torrust Tracker, which is a `BitTorrent` tracker server. //! //! The current version for configuration is [`v2`]. -pub mod v2; +pub mod v2_0_0; pub mod validator; use std::collections::HashMap; @@ -35,53 +35,86 @@ const ENV_VAR_CONFIG_TOML: &str = "TORRUST_TRACKER_CONFIG_TOML"; /// The `tracker.toml` file location. pub const ENV_VAR_CONFIG_TOML_PATH: &str = "TORRUST_TRACKER_CONFIG_TOML_PATH"; -pub type Configuration = v2::Configuration; -pub type Core = v2::core::Core; -pub type HealthCheckApi = v2::health_check_api::HealthCheckApi; -pub type HttpApi = v2::tracker_api::HttpApi; -pub type HttpTracker = v2::http_tracker::HttpTracker; -pub type UdpTracker = v2::udp_tracker::UdpTracker; -pub type Database = v2::database::Database; -pub type Driver = v2::database::Driver; -pub type Threshold = v2::logging::Threshold; +pub type Configuration = v2_0_0::Configuration; +pub type Core = v2_0_0::core::Core; +pub type HealthCheckApi = v2_0_0::health_check_api::HealthCheckApi; +pub type HttpApi = v2_0_0::tracker_api::HttpApi; +pub type HttpTracker = v2_0_0::http_tracker::HttpTracker; +pub type UdpTracker = v2_0_0::udp_tracker::UdpTracker; +pub type Database = v2_0_0::database::Database; +pub type Driver = v2_0_0::database::Driver; +pub type Threshold = v2_0_0::logging::Threshold; pub type AccessTokens = HashMap; -pub const LATEST_VERSION: &str = "2"; +pub const LATEST_VERSION: &str = "2.0.0"; /// Info about the configuration specification. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[display(fmt = "Metadata(app: {app}, purpose: {purpose}, schema_version: {schema_version})")] pub struct Metadata { - #[serde(default = "Metadata::default_version")] + /// The application this configuration is valid for. + #[serde(default = "Metadata::default_app")] + app: App, + + /// The purpose of this parsed file. + #[serde(default = "Metadata::default_purpose")] + purpose: Purpose, + + /// The schema version for the configuration. + #[serde(default = "Metadata::default_schema_version")] #[serde(flatten)] - version: Version, + schema_version: Version, } impl Default for Metadata { fn default() -> Self { Self { - version: Self::default_version(), + app: Self::default_app(), + purpose: Self::default_purpose(), + schema_version: Self::default_schema_version(), } } } impl Metadata { - fn default_version() -> Version { + fn default_app() -> App { + App::TorrustTracker + } + + fn default_purpose() -> Purpose { + Purpose::Configuration + } + + fn default_schema_version() -> Version { Version::latest() } } +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "kebab-case")] +pub enum App { + TorrustTracker, +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "lowercase")] +pub enum Purpose { + Configuration, +} + /// The configuration version. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "lowercase")] pub struct Version { #[serde(default = "Version::default_semver")] - version: String, + schema_version: String, } impl Default for Version { fn default() -> Self { Self { - version: Self::default_semver(), + schema_version: Self::default_semver(), } } } @@ -89,13 +122,13 @@ impl Default for Version { impl Version { fn new(semver: &str) -> Self { Self { - version: semver.to_owned(), + schema_version: semver.to_owned(), } } fn latest() -> Self { Self { - version: LATEST_VERSION.to_string(), + schema_version: LATEST_VERSION.to_string(), } } diff --git a/packages/configuration/src/v2/core.rs b/packages/configuration/src/v2_0_0/core.rs similarity index 90% rename from packages/configuration/src/v2/core.rs rename to packages/configuration/src/v2_0_0/core.rs index 3dfde122e..ed3e6aeb7 100644 --- a/packages/configuration/src/v2/core.rs +++ b/packages/configuration/src/v2_0_0/core.rs @@ -2,18 +2,18 @@ use derive_more::{Constructor, Display}; use serde::{Deserialize, Serialize}; use super::network::Network; -use crate::v2::database::Database; +use crate::v2_0_0::database::Database; use crate::validator::{SemanticValidationError, Validator}; use crate::{AnnouncePolicy, TrackerPolicy}; #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct Core { - // Announce policy configuration. + /// Announce policy configuration. #[serde(default = "Core::default_announce_policy")] pub announce_policy: AnnouncePolicy, - // Database configuration. + /// Database configuration. #[serde(default = "Core::default_database")] pub database: Database, @@ -22,23 +22,23 @@ pub struct Core { #[serde(default = "Core::default_inactive_peer_cleanup_interval")] pub inactive_peer_cleanup_interval: u64, - // When `true` only approved torrents can be announced in the tracker. + /// When `true` only approved torrents can be announced in the tracker. #[serde(default = "Core::default_listed")] pub listed: bool, - // Network configuration. + /// Network configuration. #[serde(default = "Core::default_network")] pub net: Network, - // When `true` clients require a key to connect and use the tracker. + /// When `true` clients require a key to connect and use the tracker. #[serde(default = "Core::default_private")] pub private: bool, - // Configuration specific when the tracker is running in private mode. + /// Configuration specific when the tracker is running in private mode. #[serde(default = "Core::default_private_mode")] pub private_mode: Option, - // Tracker policy configuration. + /// Tracker policy configuration. #[serde(default = "Core::default_tracker_policy")] pub tracker_policy: TrackerPolicy, diff --git a/packages/configuration/src/v2/database.rs b/packages/configuration/src/v2_0_0/database.rs similarity index 100% rename from packages/configuration/src/v2/database.rs rename to packages/configuration/src/v2_0_0/database.rs diff --git a/packages/configuration/src/v2/health_check_api.rs b/packages/configuration/src/v2_0_0/health_check_api.rs similarity index 100% rename from packages/configuration/src/v2/health_check_api.rs rename to packages/configuration/src/v2_0_0/health_check_api.rs diff --git a/packages/configuration/src/v2/http_tracker.rs b/packages/configuration/src/v2_0_0/http_tracker.rs similarity index 100% rename from packages/configuration/src/v2/http_tracker.rs rename to packages/configuration/src/v2_0_0/http_tracker.rs diff --git a/packages/configuration/src/v2/logging.rs b/packages/configuration/src/v2_0_0/logging.rs similarity index 100% rename from packages/configuration/src/v2/logging.rs rename to packages/configuration/src/v2_0_0/logging.rs diff --git a/packages/configuration/src/v2/mod.rs b/packages/configuration/src/v2_0_0/mod.rs similarity index 97% rename from packages/configuration/src/v2/mod.rs rename to packages/configuration/src/v2_0_0/mod.rs index de8af0891..b426b5c03 100644 --- a/packages/configuration/src/v2/mod.rs +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -255,7 +255,7 @@ use crate::validator::{SemanticValidationError, Validator}; use crate::{Error, Info, Metadata, Version}; /// This configuration version -const VERSION_2: &str = "2"; +const VERSION_2_0_0: &str = "2.0.0"; /// Prefix for env vars that overwrite configuration options. const CONFIG_OVERRIDE_PREFIX: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_"; @@ -267,7 +267,6 @@ const CONFIG_OVERRIDE_SEPARATOR: &str = "__"; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default, Clone)] pub struct Configuration { /// Configuration metadata. - #[serde(flatten)] pub metadata: Metadata, /// Logging configuration @@ -335,9 +334,9 @@ impl Configuration { let config: Configuration = figment.extract()?; - if config.metadata.version != Version::new(VERSION_2) { + if config.metadata.schema_version != Version::new(VERSION_2_0_0) { return Err(Error::UnsupportedVersion { - version: config.metadata.version, + version: config.metadata.schema_version, }); } @@ -406,12 +405,15 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; - use crate::v2::Configuration; + use crate::v2_0_0::Configuration; use crate::Info; #[cfg(test)] fn default_config_toml() -> String { - let config = r#"version = "2" + let config = r#"[metadata] + app = "torrust-tracker" + purpose = "configuration" + schema_version = "2.0.0" [logging] threshold = "info" diff --git a/packages/configuration/src/v2/network.rs b/packages/configuration/src/v2_0_0/network.rs similarity index 100% rename from packages/configuration/src/v2/network.rs rename to packages/configuration/src/v2_0_0/network.rs diff --git a/packages/configuration/src/v2/tracker_api.rs b/packages/configuration/src/v2_0_0/tracker_api.rs similarity index 98% rename from packages/configuration/src/v2/tracker_api.rs rename to packages/configuration/src/v2_0_0/tracker_api.rs index dbbff7995..43b08a21e 100644 --- a/packages/configuration/src/v2/tracker_api.rs +++ b/packages/configuration/src/v2_0_0/tracker_api.rs @@ -71,7 +71,7 @@ impl HttpApi { #[cfg(test)] mod tests { - use crate::v2::tracker_api::HttpApi; + use crate::v2_0_0::tracker_api::HttpApi; #[test] fn http_api_configuration_should_check_if_it_contains_a_token() { diff --git a/packages/configuration/src/v2/udp_tracker.rs b/packages/configuration/src/v2_0_0/udp_tracker.rs similarity index 100% rename from packages/configuration/src/v2/udp_tracker.rs rename to packages/configuration/src/v2_0_0/udp_tracker.rs diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 1c84fb2e2..1fcad4df1 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -1,4 +1,7 @@ -version = "2" +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" [core.database] driver = "mysql" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index aa8aefa5e..017df5b48 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,4 +1,7 @@ -version = "2" +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" [core.database] path = "/var/lib/torrust/tracker/database/sqlite3.db" diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 554835922..1ecc76532 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -1,4 +1,7 @@ -version = "2" +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" [[udp_trackers]] bind_address = "0.0.0.0:6969" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index 6b1383fb5..7c6f4bb77 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -1,4 +1,7 @@ -version = "2" +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" [core.database] path = "/var/lib/torrust/tracker/database/sqlite3.db" diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index 907a05456..afbef84b8 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -1,4 +1,7 @@ -version = "2" +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" [logging] threshold = "error" diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 6b607bd6f..fb5afe403 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -24,7 +24,7 @@ pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.developmen #[must_use] pub fn initialize_configuration() -> Configuration { let info = Info::new(DEFAULT_PATH_CONFIG.to_string()).expect("info to load configuration is not valid"); - Configuration::load(&info).expect("configuration should be loaded from provided info") + Configuration::load(&info).expect("error loading configuration from sources") } #[cfg(test)] diff --git a/src/core/mod.rs b/src/core/mod.rs index ea1472b61..98de9e4fd 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -459,7 +459,7 @@ use derive_more::Constructor; use error::PeerKeyError; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; -use torrust_tracker_configuration::v2::database; +use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_located_error::Located; use torrust_tracker_primitives::info_hash::InfoHash; @@ -1841,7 +1841,7 @@ mod tests { use std::time::Duration; use torrust_tracker_clock::clock::Time; - use torrust_tracker_configuration::v2::core::PrivateMode; + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; use crate::core::auth::Key; use crate::core::tests::the_tracker::private_tracker; @@ -1893,7 +1893,7 @@ mod tests { use std::time::Duration; use torrust_tracker_clock::clock::Time; - use torrust_tracker_configuration::v2::core::PrivateMode; + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; use crate::core::auth::Key; use crate::core::tests::the_tracker::private_tracker; From 90ef14d2d5450421fd2b59b78e09536ed30152e7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 2 Aug 2024 11:53:02 +0100 Subject: [PATCH 0961/1003] feat!: [#938] add mandatory config options Some configuration options are mandatory. The tracker will panic if the user doesn't provide an explicit value for them from one of the configuration sources: TOML or ENV VARS. The mandatory options are: ```toml [metadata] schema_version = "2.0.0" [logging] threshold = "info" [core] private = false listed = false ``` --- packages/configuration/src/lib.rs | 3 + packages/configuration/src/v2_0_0/mod.rs | 112 ++++++++++++++++-- .../config/tracker.container.mysql.toml | 7 ++ .../config/tracker.container.sqlite3.toml | 7 ++ .../config/tracker.development.sqlite3.toml | 7 ++ .../config/tracker.e2e.container.sqlite3.toml | 7 ++ .../config/tracker.udp.benchmarking.toml | 2 + 7 files changed, 133 insertions(+), 12 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index aedf3a6f1..bdbe419ca 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -303,6 +303,9 @@ pub enum Error { #[error("Unsupported configuration version: {version}")] UnsupportedVersion { version: Version }, + + #[error("Missing mandatory configuration option. Option path: {path}")] + MissingMandatoryOption { path: String }, } impl From for Error { diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs index b426b5c03..5067210bb 100644 --- a/packages/configuration/src/v2_0_0/mod.rs +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -313,27 +313,33 @@ impl Configuration { } /// Loads the configuration from the `Info` struct. The whole - /// configuration in toml format is included in the `info.tracker_toml` string. + /// configuration in toml format is included in the `info.tracker_toml` + /// string. /// - /// Optionally will override the admin api token. + /// Configuration provided via env var has priority over config file path. /// /// # Errors /// /// Will return `Err` if the environment variable does not exist or has a bad configuration. pub fn load(info: &Info) -> Result { + // Load configuration provided by the user, prioritizing env vars let figment = if let Some(config_toml) = &info.config_toml { - // Config in env var has priority over config file path - Figment::from(Serialized::defaults(Configuration::default())) - .merge(Toml::string(config_toml)) - .merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) + Figment::from(Toml::string(config_toml)).merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) } else { - Figment::from(Serialized::defaults(Configuration::default())) - .merge(Toml::file(&info.config_toml_path)) + Figment::from(Toml::file(&info.config_toml_path)) .merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) }; + // Make sure user has provided the mandatory options. + Self::check_mandatory_options(&figment)?; + + // Fill missing options with default values. + let figment = figment.join(Serialized::defaults(Configuration::default())); + + // Build final configuration. let config: Configuration = figment.extract()?; + // Make sure the provided schema version matches this version. if config.metadata.schema_version != Version::new(VERSION_2_0_0) { return Err(Error::UnsupportedVersion { version: config.metadata.schema_version, @@ -343,6 +349,28 @@ impl Configuration { Ok(config) } + /// Some configuration options are mandatory. The tracker will panic if + /// the user doesn't provide an explicit value for them from one of the + /// configuration sources: TOML or ENV VARS. + /// + /// # Errors + /// + /// Will return an error if a mandatory configuration option is only + /// obtained by default value (code), meaning the user hasn't overridden it. + fn check_mandatory_options(figment: &Figment) -> Result<(), Error> { + let mandatory_options = ["metadata.schema_version", "logging.threshold", "core.private", "core.listed"]; + + for mandatory_option in mandatory_options { + figment + .find_value(mandatory_option) + .map_err(|_err| Error::MissingMandatoryOption { + path: mandatory_option.to_owned(), + })?; + } + + Ok(()) + } + /// Saves the configuration to the configuration file. /// /// # Errors @@ -496,14 +524,25 @@ mod tests { } #[test] - fn configuration_should_use_the_default_values_when_an_empty_configuration_is_provided_by_the_user() { + fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_file() { figment::Jail::expect_with(|jail| { - jail.create_file("tracker.toml", "")?; + jail.create_file( + "tracker.toml", + r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" - let empty_configuration = String::new(); + [core] + listed = false + private = false + "#, + )?; let info = Info { - config_toml: Some(empty_configuration), + config_toml: None, config_toml_path: "tracker.toml".to_string(), }; @@ -515,10 +554,49 @@ mod tests { }); } + #[test] + fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_content() { + figment::Jail::expect_with(|_jail| { + let config_toml = r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + "# + .to_string(); + + let info = Info { + config_toml: Some(config_toml), + config_toml_path: String::new(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + + Ok(()) + }); + } + #[test] fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { figment::Jail::expect_with(|_jail| { let config_toml = r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + [core.database] path = "OVERWRITTEN DEFAULT DB PATH" "# @@ -543,6 +621,16 @@ mod tests { jail.create_file( "tracker.toml", r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + [core.database] path = "OVERWRITTEN DEFAULT DB PATH" "#, diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index 1fcad4df1..865ea224e 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -3,6 +3,13 @@ app = "torrust-tracker" purpose = "configuration" schema_version = "2.0.0" +[logging] +threshold = "info" + +[core] +listed = false +private = false + [core.database] driver = "mysql" path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 017df5b48..6c73cf54a 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -3,6 +3,13 @@ app = "torrust-tracker" purpose = "configuration" schema_version = "2.0.0" +[logging] +threshold = "info" + +[core] +listed = false +private = false + [core.database] path = "/var/lib/torrust/tracker/database/sqlite3.db" diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 1ecc76532..96addaf87 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -3,6 +3,13 @@ app = "torrust-tracker" purpose = "configuration" schema_version = "2.0.0" +[logging] +threshold = "info" + +[core] +listed = false +private = false + [[udp_trackers]] bind_address = "0.0.0.0:6969" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml index 7c6f4bb77..73c6df219 100644 --- a/share/default/config/tracker.e2e.container.sqlite3.toml +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -3,6 +3,13 @@ app = "torrust-tracker" purpose = "configuration" schema_version = "2.0.0" +[logging] +threshold = "info" + +[core] +listed = false +private = false + [core.database] path = "/var/lib/torrust/tracker/database/sqlite3.db" diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index afbef84b8..a73760b95 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -7,6 +7,8 @@ schema_version = "2.0.0" threshold = "error" [core] +listed = false +private = false remove_peerless_torrents = false tracker_usage_statistics = false From 7c626452c2183dd4578eb07ca19f3a58b153c39d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 5 Aug 2024 12:58:49 +0100 Subject: [PATCH 0962/1003] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 35 packages to latest compatible versions Adding aws-lc-rs v1.8.1 Adding aws-lc-sys v0.20.1 Updating axum-server v0.6.0 -> v0.7.1 Updating bytemuck v1.16.1 -> v1.16.3 Updating bytes v1.6.1 -> v1.7.1 Updating cc v1.1.6 -> v1.1.7 Updating clap v4.5.11 -> v4.5.13 Updating clap_builder v4.5.11 -> v4.5.13 Updating clap_derive v4.5.11 -> v4.5.13 Adding dunce v1.0.5 Updating flate2 v1.0.30 -> v1.0.31 Adding fs_extra v1.3.0 Adding home v0.5.9 Updating indexmap v2.2.6 -> v2.3.0 Updating lru v0.12.3 -> v0.12.4 Adding mirai-annotations v1.12.0 Adding paste v1.0.15 Updating ppv-lite86 v0.2.17 -> v0.2.20 Adding prettyplease v0.2.20 Updating regex v1.10.5 -> v1.10.6 Updating rstest v0.21.0 -> v0.22.0 Updating rstest_macros v0.21.0 -> v0.22.0 Removing rustls v0.21.12 Updating rustls-pemfile v2.1.2 -> v2.1.3 Removing rustls-webpki v0.101.7 Removing sct v0.7.1 Updating serde_json v1.0.121 -> v1.0.122 Updating tempfile v3.10.1 -> v3.11.0 Removing tokio-rustls v0.24.1 Updating toml v0.8.16 -> v0.8.19 Updating toml_datetime v0.6.7 -> v0.6.8 Updating toml_edit v0.22.17 -> v0.22.20 Adding which v4.4.2 (latest: v6.0.2) Updating winapi-util v0.1.8 -> v0.1.9 Adding windows-sys v0.59.0 Updating winnow v0.6.16 -> v0.6.18 Adding zeroize_derive v1.4.2 Updating zstd-safe v7.2.0 -> v7.2.1 Updating zstd-sys v2.0.12+zstd.1.5.6 -> v2.0.13+zstd.1.5.6 ``` --- Cargo.lock | 283 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 178 insertions(+), 105 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 234f291c5..e60aca814 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -386,6 +386,33 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "aws-lc-rs" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] + [[package]] name = "axum" version = "0.7.5" @@ -490,9 +517,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ad46c3ec4e12f4a4b6835e173ba21c25e484c9d02b49770bf006ce5367c036" +checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" dependencies = [ "arc-swap", "bytes", @@ -503,10 +530,11 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls 0.21.12", + "rustls", "rustls-pemfile", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tower", "tower-service", ] @@ -569,12 +597,15 @@ dependencies = [ "itertools 0.12.1", "lazy_static", "lazycell", + "log", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", "syn 2.0.72", + "which", ] [[package]] @@ -713,9 +744,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.16.1" +version = "1.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" +checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" [[package]] name = "byteorder" @@ -725,9 +756,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "camino" @@ -755,9 +786,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" +checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" dependencies = [ "jobserver", "libc", @@ -837,9 +868,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.11" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35723e6a11662c2afb578bcf0b88bf6ea8e21282a953428f240574fcc3a2b5b3" +checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc" dependencies = [ "clap_builder", "clap_derive", @@ -847,9 +878,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.11" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49eb96cbfa7cfa35017b7cd548c75b14c3118c98b423041d70562665e07fb0fa" +checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99" dependencies = [ "anstream", "anstyle", @@ -859,9 +890,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.11" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1171,6 +1202,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "either" version = "1.13.0" @@ -1294,9 +1331,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "7f211bbe8e69bbd0cfdea405084f128ae8b4aaa6b0b522fc8f2b009084797920" dependencies = [ "crc32fast", "libz-sys", @@ -1401,6 +1438,12 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "funty" version = "2.0.0" @@ -1587,7 +1630,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.2.6", + "indexmap 2.3.0", "slab", "tokio", "tokio-util", @@ -1668,6 +1711,15 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "http" version = "1.1.0" @@ -1745,10 +1797,10 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls 0.23.12", + "rustls", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", ] @@ -1840,9 +1892,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -2059,9 +2111,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" dependencies = [ "hashbrown 0.14.5", ] @@ -2111,6 +2163,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + [[package]] name = "mockall" version = "0.13.0" @@ -2439,6 +2497,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + [[package]] name = "pear" version = "0.2.9" @@ -2632,9 +2696,12 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "predicates" @@ -2662,6 +2729,16 @@ dependencies = [ "termtree", ] +[[package]] +name = "prettyplease" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +dependencies = [ + "proc-macro2", + "syn 2.0.72", +] + [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -2856,9 +2933,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", @@ -2996,9 +3073,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9afd55a67069d6e434a95161415f5beeada95a01c7b815508a82dcb0e1593682" +checksum = "7b423f0e62bdd61734b67cd21ff50871dfaeb9cc74f869dcd6af974fbcb19936" dependencies = [ "futures", "futures-timer", @@ -3008,9 +3085,9 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4165dfae59a39dd41d8dec720d3cbfbc71f69744efb480a3920f5d4e0cc6798d" +checksum = "c5e1711e7d14f74b12a58411c542185ef7fb7f2e7f8ee6e2940a883628522b42" dependencies = [ "cfg-if", "glob", @@ -3102,36 +3179,25 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - [[package]] name = "rustls" version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ + "aws-lc-rs", "once_cell", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki", "subtle", "zeroize", ] [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ "base64 0.22.1", "rustls-pki-types", @@ -3143,22 +3209,13 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.102.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -3215,16 +3272,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "seahash" version = "4.1.0" @@ -3306,7 +3353,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.2.6", + "indexmap 2.3.0", "itoa", "ryu", "serde", @@ -3314,11 +3361,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.3.0", "itoa", "memchr", "ryu", @@ -3377,7 +3424,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.6", + "indexmap 2.3.0", "serde", "serde_derive", "serde_json", @@ -3616,12 +3663,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.1" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" dependencies = [ "cfg-if", "fastrand 2.1.0", + "once_cell", "rustix 0.38.34", "windows-sys 0.52.0", ] @@ -3765,23 +3813,13 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls", "rustls-pki-types", "tokio", ] @@ -3801,21 +3839,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81967dd0dd2c1ab0bc3468bd7caecc32b8a4aa47d0c8c695d8c2b2108168d62c" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.17", + "toml_edit 0.22.20", ] [[package]] name = "toml_datetime" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fb9f64314842840f1d940ac544da178732128f1c78c21772e876579e0da1db" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] @@ -3826,22 +3864,22 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.3.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.17" +version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9f8729f5aea9562aac1cc0441f5d6de3cff1ee0c5d67293eeca5eb36ee7c16" +checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.3.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.16", + "winnow 0.6.18", ] [[package]] @@ -4335,6 +4373,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.34", +] + [[package]] name = "winapi" version = "0.3.9" @@ -4353,11 +4403,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4393,6 +4443,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -4525,9 +4584,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.16" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b480ae9340fc261e6be3e95a1ba86d54ae3f9171132a73ce8d4bbaf68339507c" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] @@ -4583,6 +4642,20 @@ name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.72", +] [[package]] name = "zstd" @@ -4595,18 +4668,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa556e971e7b568dc775c136fc9de8c779b1c2fc3a63defaafadffdbd3181afa" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.12+zstd.1.5.6" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", From 5939b9a69599fe4e578656d2a09cfafbdcc86a29 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 5 Aug 2024 16:09:50 +0100 Subject: [PATCH 0963/1003] docs: update roadmap in README Permanent keys have been already implemented. --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 306a8620c..6d611d9a5 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,6 @@ Core: - [ ] New option `want_ip_from_query_string`. See . -- [ ] Permanent keys. See . - [ ] Peer and torrents specific statistics. See . Persistence: From 222fa42ee0bf8674bb2b2ec639ed2fdcf8b02763 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 5 Jul 2024 11:33:24 +0100 Subject: [PATCH 0964/1003] feat: disable TimeoutAcceptor when TSL is enabled The TimeoutAcceptor es a custom acceptor for Axum that sets a timeput for making a request after openning a connection. It does not work when TSL is enabled. This commit disables it, therefore the app does not have any way to avoid a DDos attacks where clients just open connections without making any request. --- src/servers/apis/server.rs | 4 +++- src/servers/http/server.rs | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 39a68a856..40c4d0779 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -239,7 +239,9 @@ impl Launcher { match tls { Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) .handle(handle) - .acceptor(TimeoutAcceptor) + // The TimeoutAcceptor is commented because TSL does not work with it. + // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 + //.acceptor(TimeoutAcceptor) .serve(router.into_make_service_with_connect_info::()) .await .expect("Axum server for tracker API crashed."), diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index faedaf921..4a6dccc6a 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -65,7 +65,9 @@ impl Launcher { match tls { Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) .handle(handle) - .acceptor(TimeoutAcceptor) + // The TimeoutAcceptor is commented because TSL does not work with it. + // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 + //.acceptor(TimeoutAcceptor) .serve(app.into_make_service_with_connect_info::()) .await .expect("Axum server crashed."), From e563bfbcd0b25004ce13bfefd448656606380c00 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 Aug 2024 12:01:22 +0100 Subject: [PATCH 0965/1003] fix: benchmarking config template - Missing [core.tracker_policy] section. Configuration was changed. - Missing database config. Needed when the default DB folder does not exist. With this config storage folder is not needed. WE are not using DB which required DB for benchmarking. --- share/default/config/tracker.udp.benchmarking.toml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index a73760b95..c6644d8dc 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -1,6 +1,4 @@ [metadata] -app = "torrust-tracker" -purpose = "configuration" schema_version = "2.0.0" [logging] @@ -9,8 +7,15 @@ threshold = "error" [core] listed = false private = false -remove_peerless_torrents = false tracker_usage_statistics = false +[core.database] +driver = "sqlite3" +path = "./sqlite3.db" + +[core.tracker_policy] +persistent_torrent_completed_stat = false +remove_peerless_torrents = false + [[udp_trackers]] bind_address = "0.0.0.0:6969" From 176658762b9cd03d081b628a28aa1528ab19a137 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 Aug 2024 16:27:34 +0100 Subject: [PATCH 0966/1003] feat: [#1002] remove inactive peers always even when `remove_peerless_torrents` is disabled. We should remove peer that haven't announce otherwise we are returning inactive peers to the clients. That does not affect keeping the torrents even if they don't have any peer. --- src/core/mod.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 98de9e4fd..a6ee830db 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -778,18 +778,17 @@ impl Tracker { self.torrents.get_metrics() } - /// Remove inactive peers and (optionally) peerless torrents + /// Remove inactive peers and (optionally) peerless torrents. /// /// # Context: Tracker pub fn cleanup_torrents(&self) { - // If we don't need to remove torrents we will use the faster iter + let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) + .unwrap_or_default(); + + self.torrents.remove_inactive_peers(current_cutoff); + if self.config.tracker_policy.remove_peerless_torrents { self.torrents.remove_peerless_torrents(&self.config.tracker_policy); - } else { - let current_cutoff = - CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) - .unwrap_or_default(); - self.torrents.remove_inactive_peers(current_cutoff); } } From 3fbab31f741475596fcedd1527804efa72b02ead Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 Aug 2024 16:38:50 +0100 Subject: [PATCH 0967/1003] refactor: [#1002] rename is_good fn to meets_retaining_policy A "good" torrent means it should be retained in the repository according to the tracker policy. It should not be removed (even if it does not have any peers). --- packages/torrent-repository/src/entry/mod.rs | 6 +++--- .../src/entry/mutex_parking_lot.rs | 4 ++-- packages/torrent-repository/src/entry/mutex_std.rs | 4 ++-- packages/torrent-repository/src/entry/mutex_tokio.rs | 4 ++-- .../src/entry/rw_lock_parking_lot.rs | 4 ++-- packages/torrent-repository/src/entry/single.rs | 2 +- .../src/repository/dash_map_mutex_std.rs | 2 +- .../torrent-repository/src/repository/rw_lock_std.rs | 2 +- .../src/repository/rw_lock_std_mutex_std.rs | 2 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio.rs | 2 +- .../src/repository/rw_lock_tokio_mutex_std.rs | 2 +- .../src/repository/rw_lock_tokio_mutex_tokio.rs | 2 +- .../src/repository/skip_map_mutex_std.rs | 6 +++--- packages/torrent-repository/tests/common/torrent.rs | 12 ++++++------ packages/torrent-repository/tests/entry/mod.rs | 12 ++++++------ packages/torrent-repository/tests/repository/mod.rs | 2 +- 17 files changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index b811d3262..b920839d9 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -22,7 +22,7 @@ pub trait Entry { fn get_swarm_metadata(&self) -> SwarmMetadata; /// Returns True if Still a Valid Entry according to the Tracker Policy - fn is_good(&self, policy: &TrackerPolicy) -> bool; + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; /// Returns True if the Peers is Empty fn peers_is_empty(&self) -> bool; @@ -53,7 +53,7 @@ pub trait Entry { #[allow(clippy::module_name_repetitions)] pub trait EntrySync { fn get_swarm_metadata(&self) -> SwarmMetadata; - fn is_good(&self, policy: &TrackerPolicy) -> bool; + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; fn peers_is_empty(&self) -> bool; fn get_peers_len(&self) -> usize; fn get_peers(&self, limit: Option) -> Vec>; @@ -65,7 +65,7 @@ pub trait EntrySync { #[allow(clippy::module_name_repetitions)] pub trait EntryAsync { fn get_swarm_metadata(&self) -> impl std::future::Future + Send; - fn check_good(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn meets_retaining_policy(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; fn peers_is_empty(&self) -> impl std::future::Future + Send; fn get_peers_len(&self) -> impl std::future::Future + Send; fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; diff --git a/packages/torrent-repository/src/entry/mutex_parking_lot.rs b/packages/torrent-repository/src/entry/mutex_parking_lot.rs index 4f3921ea7..738c3ff9d 100644 --- a/packages/torrent-repository/src/entry/mutex_parking_lot.rs +++ b/packages/torrent-repository/src/entry/mutex_parking_lot.rs @@ -13,8 +13,8 @@ impl EntrySync for EntryMutexParkingLot { self.lock().get_swarm_metadata() } - fn is_good(&self, policy: &TrackerPolicy) -> bool { - self.lock().is_good(policy) + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.lock().meets_retaining_policy(policy) } fn peers_is_empty(&self) -> bool { diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs index 990d8ab76..0ab70a96f 100644 --- a/packages/torrent-repository/src/entry/mutex_std.rs +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -13,8 +13,8 @@ impl EntrySync for EntryMutexStd { self.lock().expect("it should get a lock").get_swarm_metadata() } - fn is_good(&self, policy: &TrackerPolicy) -> bool { - self.lock().expect("it should get a lock").is_good(policy) + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").meets_retaining_policy(policy) } fn peers_is_empty(&self) -> bool { diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs index c5363e51a..6db789a72 100644 --- a/packages/torrent-repository/src/entry/mutex_tokio.rs +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -13,8 +13,8 @@ impl EntryAsync for EntryMutexTokio { self.lock().await.get_swarm_metadata() } - async fn check_good(self, policy: &TrackerPolicy) -> bool { - self.lock().await.is_good(policy) + async fn meets_retaining_policy(self, policy: &TrackerPolicy) -> bool { + self.lock().await.meets_retaining_policy(policy) } async fn peers_is_empty(&self) -> bool { diff --git a/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs index ef0e958d5..ac0dc0b30 100644 --- a/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs +++ b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs @@ -13,8 +13,8 @@ impl EntrySync for EntryRwLockParkingLot { self.read().get_swarm_metadata() } - fn is_good(&self, policy: &TrackerPolicy) -> bool { - self.read().is_good(policy) + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.read().meets_retaining_policy(policy) } fn peers_is_empty(&self) -> bool { diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index a01124454..6d7ed3155 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -22,7 +22,7 @@ impl Entry for EntrySingle { } } - fn is_good(&self, policy: &TrackerPolicy) -> bool { + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { if policy.persistent_torrent_completed_stat && self.downloaded > 0 { return true; } diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index a38205205..4354c12ec 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -103,6 +103,6 @@ where } fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - self.torrents.retain(|_, entry| entry.is_good(policy)); + self.torrents.retain(|_, entry| entry.meets_retaining_policy(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index 0d96a2375..5439fdd79 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -126,6 +126,6 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut(); - db.retain(|_, e| e.is_good(policy)); + db.retain(|_, e| e.meets_retaining_policy(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 76d5e8f1e..7d58b0b10 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -124,6 +124,6 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut(); - db.retain(|_, e| e.lock().expect("it should lock entry").is_good(policy)); + db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index e527d6b59..90451ca9f 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -143,8 +143,8 @@ where handles = zip(db.keys().copied(), db.values().cloned()) .map(|(infohash, torrent)| { torrent - .check_good(policy) - .map(move |good| if good { None } else { Some(infohash) }) + .meets_retaining_policy(policy) + .map(move |should_be_retained| if should_be_retained { None } else { Some(infohash) }) .boxed() }) .collect::>(); diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index c360106b8..baaa01232 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -130,6 +130,6 @@ where async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut().await; - db.retain(|_, e| e.is_good(policy)); + db.retain(|_, e| e.meets_retaining_policy(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index 9fce79b44..1887f70c7 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -124,6 +124,6 @@ where async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { let mut db = self.get_torrents_mut().await; - db.retain(|_, e| e.lock().expect("it should lock entry").is_good(policy)); + db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); } } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index c7e0d4054..6c9c08a73 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -130,7 +130,7 @@ where let mut not_good = Vec::::default(); for (&infohash, torrent) in db.iter() { - if !torrent.clone().check_good(policy).await { + if !torrent.clone().meets_retaining_policy(policy).await { not_good.push(infohash); } } diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 9960b0c30..dd0d9c1b1 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -100,7 +100,7 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { for entry in &self.torrents { - if entry.value().is_good(policy) { + if entry.value().meets_retaining_policy(policy) { continue; } @@ -191,7 +191,7 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { for entry in &self.torrents { - if entry.value().is_good(policy) { + if entry.value().meets_retaining_policy(policy) { continue; } @@ -282,7 +282,7 @@ where fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { for entry in &self.torrents { - if entry.value().is_good(policy) { + if entry.value().meets_retaining_policy(policy) { continue; } diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index abcf5525e..927f13169 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -29,13 +29,13 @@ impl Torrent { } } - pub(crate) async fn is_good(&self, policy: &TrackerPolicy) -> bool { + pub(crate) async fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { match self { - Torrent::Single(entry) => entry.is_good(policy), - Torrent::MutexStd(entry) => entry.is_good(policy), - Torrent::MutexTokio(entry) => entry.clone().check_good(policy).await, - Torrent::MutexParkingLot(entry) => entry.is_good(policy), - Torrent::RwLockParkingLot(entry) => entry.is_good(policy), + Torrent::Single(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexStd(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexTokio(entry) => entry.clone().meets_retaining_policy(policy).await, + Torrent::MutexParkingLot(entry) => entry.meets_retaining_policy(policy), + Torrent::RwLockParkingLot(entry) => entry.meets_retaining_policy(policy), } } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index fdbe211b3..2a7063a4f 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -126,7 +126,7 @@ async fn it_should_be_empty_by_default( #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_check_if_entry_is_good( +async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, @@ -141,19 +141,19 @@ async fn it_should_check_if_entry_is_good( (true, true) => match (has_peers, has_downloads) { // no peers, but has downloads // peers, with or without downloads - (false, true) | (true, true | false) => assert!(torrent.is_good(&policy).await), + (false, true) | (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), // no peers and no downloads - (false, false) => assert!(!torrent.is_good(&policy).await), + (false, false) => assert!(!torrent.meets_retaining_policy(&policy).await), }, // remove torrents without peers and drop completed download stats (true, false) => match (has_peers, has_downloads) { // peers, with or without downloads - (true, true | false) => assert!(torrent.is_good(&policy).await), + (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), // no peers and with or without downloads - (false, true | false) => assert!(!torrent.is_good(&policy).await), + (false, true | false) => assert!(!torrent.meets_retaining_policy(&policy).await), }, // keep torrents without peers, but keep or drop completed download stats - (false, true | false) => assert!(torrent.is_good(&policy).await), + (false, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index b10f4a64a..b3b742607 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -634,6 +634,6 @@ async fn it_should_remove_peerless_torrents( let torrents = repo.get_paginated(None).await; for (_, entry) in torrents { - assert!(entry.is_good(&policy)); + assert!(entry.meets_retaining_policy(&policy)); } } From f5e38bb9f40675a9cca28d43ea92c2f3a5defdcc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Aug 2024 10:17:12 +0100 Subject: [PATCH 0968/1003] feat!: [#1006] remove config deafults for secrets --- .../configuration/src/v2_0_0/tracker_api.rs | 21 ++++++++++++------- packages/test-helpers/src/configuration.rs | 6 ++++-- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/packages/configuration/src/v2_0_0/tracker_api.rs b/packages/configuration/src/v2_0_0/tracker_api.rs index 43b08a21e..2da21758b 100644 --- a/packages/configuration/src/v2_0_0/tracker_api.rs +++ b/packages/configuration/src/v2_0_0/tracker_api.rs @@ -52,14 +52,11 @@ impl HttpApi { } fn default_access_tokens() -> AccessTokens { - [(String::from("admin"), String::from("MyAccessToken"))] - .iter() - .cloned() - .collect() + [].iter().cloned().collect() } - pub fn override_admin_token(&mut self, api_admin_token: &str) { - self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); + pub fn add_token(&mut self, key: &str, token: &str) { + self.access_tokens.insert(key.to_string(), token.to_string()); } pub fn mask_secrets(&mut self) { @@ -74,10 +71,18 @@ mod tests { use crate::v2_0_0::tracker_api::HttpApi; #[test] - fn http_api_configuration_should_check_if_it_contains_a_token() { + fn default_http_api_configuration_should_not_contains_any_token() { let configuration = HttpApi::default(); + assert_eq!(configuration.access_tokens.values().len(), 0); + } + + #[test] + fn http_api_configuration_should_allow_adding_tokens() { + let mut configuration = HttpApi::default(); + + configuration.add_token("admin", "MyAccessToken"); + assert!(configuration.access_tokens.values().any(|t| t == "MyAccessToken")); - assert!(!configuration.access_tokens.values().any(|t| t == "NonExistingToken")); } } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 0a6c1c72b..0c4029b69 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -32,10 +32,12 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for API let api_port = 0u16; - config.http_api = Some(HttpApi { + let mut http_api = HttpApi { bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port), ..Default::default() - }); + }; + http_api.add_token("admin", "MyAccessToken"); + config.http_api = Some(http_api); // Ephemeral socket address for Health Check API let health_check_api_port = 0u16; From 6a707b9d0975c7f0bef83c65e3771a8a77b79513 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Aug 2024 10:30:06 +0100 Subject: [PATCH 0969/1003] fix: linter errors --- src/app.rs | 71 ++++++++++++------------- src/console/ci/e2e/tracker_container.rs | 13 ++--- 2 files changed, 39 insertions(+), 45 deletions(-) diff --git a/src/app.rs b/src/app.rs index fd7d6a99d..b2447a9ef 100644 --- a/src/app.rs +++ b/src/app.rs @@ -66,56 +66,53 @@ pub async fn start(config: &Configuration, tracker: Arc) -> Vec { - for udp_tracker_config in udp_trackers { - if tracker.is_private() { - warn!( - "Could not start UDP tracker on: {} while in private mode. UDP is not safe for private trackers!", - udp_tracker_config.bind_address - ); - } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone(), registar.give_form()).await); - } + if let Some(udp_trackers) = &config.udp_trackers { + for udp_tracker_config in udp_trackers { + if tracker.is_private() { + warn!( + "Could not start UDP tracker on: {} while in private mode. UDP is not safe for private trackers!", + udp_tracker_config.bind_address + ); + } else { + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone(), registar.give_form()).await); } } - None => info!("No UDP blocks in configuration"), + } else { + info!("No UDP blocks in configuration"); } // Start the HTTP blocks - match &config.http_trackers { - Some(http_trackers) => { - for http_tracker_config in http_trackers { - if let Some(job) = http_tracker::start_job( - http_tracker_config, - tracker.clone(), - registar.give_form(), - servers::http::Version::V1, - ) - .await - { - jobs.push(job); - }; - } - } - None => info!("No HTTP blocks in configuration"), - } - - // Start HTTP API - match &config.http_api { - Some(http_api_config) => { - if let Some(job) = tracker_apis::start_job( - http_api_config, + if let Some(http_trackers) = &config.http_trackers { + for http_tracker_config in http_trackers { + if let Some(job) = http_tracker::start_job( + http_tracker_config, tracker.clone(), registar.give_form(), - servers::apis::Version::V1, + servers::http::Version::V1, ) .await { jobs.push(job); }; } - None => info!("No API block in configuration"), + } else { + info!("No HTTP blocks in configuration"); + } + + // Start HTTP API + if let Some(http_api_config) = &config.http_api { + if let Some(job) = tracker_apis::start_job( + http_api_config, + tracker.clone(), + registar.give_form(), + servers::apis::Version::V1, + ) + .await + { + jobs.push(job); + }; + } else { + info!("No API block in configuration"); } // Start runners to remove torrents without peers, every interval diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs index dc7036faa..528fd3c62 100644 --- a/src/console/ci/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -105,14 +105,11 @@ impl TrackerContainer { /// /// Will panic if it can't remove the container. pub fn remove(&self) { - match &self.running { - Some(_running_container) => { - error!("Can't remove running container: {} ...", self.name); - } - None => { - info!("Removing docker tracker container: {} ...", self.name); - Docker::remove(&self.name).expect("Container should be removed"); - } + if let Some(_running_container) = &self.running { + error!("Can't remove running container: {} ...", self.name); + } else { + info!("Removing docker tracker container: {} ...", self.name); + Docker::remove(&self.name).expect("Container should be removed"); } } From 62ffffb124fad1bb78d932c38aca8b55a05278f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Aug 2024 11:04:07 +0100 Subject: [PATCH 0970/1003] chore(deps): update dependencies ```console cargo update Updating crates.io index Locking 16 packages to latest compatible versions Updating async-io v2.3.3 -> v2.3.4 Updating cc v1.1.7 -> v1.1.10 Updating clap v4.5.13 -> v4.5.15 Updating clap_builder v4.5.13 -> v4.5.15 Updating core-foundation-sys v0.8.6 -> v0.8.7 Updating hyper-util v0.1.6 -> v0.1.7 Updating mio v1.0.1 -> v1.0.2 Updating object v0.36.2 -> v0.36.3 Updating piper v0.2.3 -> v0.2.4 Updating polling v3.7.2 -> v3.7.3 Updating rustls-pki-types v1.7.0 -> v1.8.0 Updating serde v1.0.204 -> v1.0.206 Updating serde_derive v1.0.204 -> v1.0.206 Updating serde_json v1.0.122 -> v1.0.124 Updating syn v2.0.72 -> v2.0.74 Updating tempfile v3.11.0 -> v3.12.0 ``` --- Cargo.lock | 138 ++++++++++++++++++++++++++--------------------------- 1 file changed, 69 insertions(+), 69 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e60aca814..94bcd6419 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -254,7 +254,7 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io 2.3.3", + "async-io 2.3.4", "async-lock 3.4.0", "blocking", "futures-lite 2.3.0", @@ -284,9 +284,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ "async-lock 3.4.0", "cfg-if", @@ -294,11 +294,11 @@ dependencies = [ "futures-io", "futures-lite 2.3.0", "parking", - "polling 3.7.2", + "polling 3.7.3", "rustix 0.38.34", "slab", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -362,7 +362,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -512,7 +512,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -604,7 +604,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.72", + "syn 2.0.74", "which", ] @@ -674,7 +674,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "syn_derive", ] @@ -786,9 +786,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.7" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" +checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" dependencies = [ "jobserver", "libc", @@ -868,9 +868,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.13" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc" +checksum = "11d8838454fda655dafd3accb2b6e2bea645b9e4078abe84a22ceb947235c5cc" dependencies = [ "clap_builder", "clap_derive", @@ -878,9 +878,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.13" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99" +checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" dependencies = [ "anstream", "anstyle", @@ -897,7 +897,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -961,9 +961,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" @@ -1124,7 +1124,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1135,7 +1135,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1172,7 +1172,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1183,7 +1183,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1411,7 +1411,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1423,7 +1423,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1435,7 +1435,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1534,7 +1534,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -1822,9 +1822,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" dependencies = [ "bytes", "futures-channel", @@ -2153,9 +2153,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", @@ -2192,7 +2192,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -2242,7 +2242,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "termcolor", "thiserror", ] @@ -2399,9 +2399,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.2" +version = "0.36.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" +checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" dependencies = [ "memchr", ] @@ -2441,7 +2441,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -2523,7 +2523,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -2597,7 +2597,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -2614,9 +2614,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", "fastrand 2.1.0", @@ -2675,9 +2675,9 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.2" +version = "3.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" dependencies = [ "cfg-if", "concurrent-queue", @@ -2685,7 +2685,7 @@ dependencies = [ "pin-project-lite", "rustix 0.38.34", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2736,7 +2736,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -2789,7 +2789,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "version_check", "yansi", ] @@ -3097,7 +3097,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.72", + "syn 2.0.74", "unicode-ident", ] @@ -3205,9 +3205,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -3309,9 +3309,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "5b3e4cd94123dd520a128bcd11e34d9e9e423e7e3e50425cb1b4b1e3549d0284" dependencies = [ "serde_derive", ] @@ -3337,13 +3337,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -3361,9 +3361,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.122" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ "indexmap 2.3.0", "itoa", @@ -3390,7 +3390,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -3441,7 +3441,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -3584,9 +3584,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.72" +version = "2.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" dependencies = [ "proc-macro2", "quote", @@ -3602,7 +3602,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -3663,15 +3663,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.11.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand 2.1.0", "once_cell", "rustix 0.38.34", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3706,7 +3706,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -3800,7 +3800,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -4104,7 +4104,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -4318,7 +4318,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "wasm-bindgen-shared", ] @@ -4352,7 +4352,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4634,7 +4634,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] @@ -4654,7 +4654,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.74", ] [[package]] From 1455295e39f955496b03dc9005afc9aa3214cbcd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Aug 2024 17:24:58 +0100 Subject: [PATCH 0971/1003] ci: [#1010] fix missing publishing packages --- .github/workflows/deployment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 2a0f174f7..6aa66e985 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -54,6 +54,8 @@ jobs: cargo publish -p torrust-tracker-contrib-bencode cargo publish -p torrust-tracker-located-error cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-clock cargo publish -p torrust-tracker-configuration + cargo publish -p torrust-tracker-torrent-repository cargo publish -p torrust-tracker-test-helpers cargo publish -p torrust-tracker From 592c9cce0bf801735f39d9ab9b5764362de6b6a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 Aug 2024 10:30:47 +0100 Subject: [PATCH 0972/1003] release: version 3.0.0-alpha.12 --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 16 ++++++++-------- packages/clock/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 6 +++--- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 94bcd6419..ddee311ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3884,7 +3884,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" dependencies = [ "anyhow", "aquatic_udp_protocol", @@ -3946,7 +3946,7 @@ dependencies = [ [[package]] name = "torrust-tracker-clock" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" dependencies = [ "chrono", "lazy_static", @@ -3955,7 +3955,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" dependencies = [ "camino", "derive_more", @@ -3972,7 +3972,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" dependencies = [ "criterion", "error-chain", @@ -3980,7 +3980,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" dependencies = [ "thiserror", "tracing", @@ -3988,7 +3988,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" dependencies = [ "binascii", "derive_more", @@ -4000,7 +4000,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" dependencies = [ "rand", "torrust-tracker-configuration", @@ -4008,7 +4008,7 @@ dependencies = [ [[package]] name = "torrust-tracker-torrent-repository" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" dependencies = [ "async-std", "criterion", diff --git a/Cargo.toml b/Cargo.toml index 4184f2ae7..7f9d211c3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.12-develop" +version = "3.0.0-alpha.12" [dependencies] anyhow = "1" @@ -69,12 +69,12 @@ serde_repr = "0" serde_with = { version = "3.9.0", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "packages/clock" } -torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "packages/primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12-develop", path = "packages/torrent-repository" } +torrust-tracker-clock = { version = "3.0.0-alpha.12", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.12", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" @@ -90,7 +90,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.12-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.12", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index d71175fdc..0177b2fb3 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -19,6 +19,6 @@ version.workspace = true chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" -torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "../primitives" } [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 5afa39b89..a4c3f2006 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" -torrust-tracker-located-error = { version = "3.0.0-alpha.12-develop", path = "../located-error" } +torrust-tracker-located-error = { version = "3.0.0-alpha.12", path = "../located-error" } url = "2" [dev-dependencies] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 4fed6bc42..5a4220b53 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -16,4 +16,4 @@ version.workspace = true [dependencies] rand = "0" -torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "../configuration" } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 53bb41e52..f1f85a52d 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -21,9 +21,9 @@ dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-alpha.12-develop", path = "../clock" } -torrust-tracker-configuration = { version = "3.0.0-alpha.12-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.12-develop", path = "../primitives" } +torrust-tracker-clock = { version = "3.0.0-alpha.12", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "../primitives" } [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } From 8fcc016fb62e363dde2214e0b73955987d9af2fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 Aug 2024 11:31:29 +0100 Subject: [PATCH 0973/1003] develop: bump to version 3.0.0-beta-develop --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 16 ++++++++-------- packages/clock/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 6 +++--- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ddee311ea..d51ecff56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3884,7 +3884,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" dependencies = [ "anyhow", "aquatic_udp_protocol", @@ -3946,7 +3946,7 @@ dependencies = [ [[package]] name = "torrust-tracker-clock" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" dependencies = [ "chrono", "lazy_static", @@ -3955,7 +3955,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" dependencies = [ "camino", "derive_more", @@ -3972,7 +3972,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" dependencies = [ "criterion", "error-chain", @@ -3980,7 +3980,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" dependencies = [ "thiserror", "tracing", @@ -3988,7 +3988,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" dependencies = [ "binascii", "derive_more", @@ -4000,7 +4000,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" dependencies = [ "rand", "torrust-tracker-configuration", @@ -4008,7 +4008,7 @@ dependencies = [ [[package]] name = "torrust-tracker-torrent-repository" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" dependencies = [ "async-std", "criterion", diff --git a/Cargo.toml b/Cargo.toml index 7f9d211c3..43453cb5a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.12" +version = "3.0.0-beta-develop" [dependencies] anyhow = "1" @@ -69,12 +69,12 @@ serde_repr = "0" serde_with = { version = "3.9.0", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-alpha.12", path = "packages/clock" } -torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.12", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "packages/primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12", path = "packages/torrent-repository" } +torrust-tracker-clock = { version = "3.0.0-beta-develop", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-beta-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-beta-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-beta-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-beta-develop", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-beta-develop", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" @@ -90,7 +90,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.12", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-beta-develop", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index 0177b2fb3..e28a37466 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -19,6 +19,6 @@ version.workspace = true chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" -torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "../primitives" } +torrust-tracker-primitives = { version = "3.0.0-beta-develop", path = "../primitives" } [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index a4c3f2006..0a4cfea23 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" -torrust-tracker-located-error = { version = "3.0.0-alpha.12", path = "../located-error" } +torrust-tracker-located-error = { version = "3.0.0-beta-develop", path = "../located-error" } url = "2" [dev-dependencies] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 5a4220b53..0fd108ecf 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -16,4 +16,4 @@ version.workspace = true [dependencies] rand = "0" -torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "../configuration" } +torrust-tracker-configuration = { version = "3.0.0-beta-develop", path = "../configuration" } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index f1f85a52d..1fd58ab02 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -21,9 +21,9 @@ dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-alpha.12", path = "../clock" } -torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "../primitives" } +torrust-tracker-clock = { version = "3.0.0-beta-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-beta-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-beta-develop", path = "../primitives" } [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } From 33757e0858761fcc18dae6613e427d87e9555f4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 20 Aug 2024 11:13:18 +0100 Subject: [PATCH 0974/1003] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 43 packages to latest compatible versions Adding adler2 v2.0.0 Updating arrayvec v0.7.4 -> v0.7.6 Adding bindgen v0.70.0 Updating bytemuck v1.16.3 -> v1.17.0 Updating camino v1.1.7 -> v1.1.9 Updating cc v1.1.10 -> v1.1.13 Updating clap v4.5.15 -> v4.5.16 Updating cmake v0.1.50 -> v0.1.51 Updating cpufeatures v0.2.12 -> v0.2.13 Updating flate2 v1.0.31 -> v1.0.32 Updating h2 v0.4.5 -> v0.4.6 Updating indexmap v2.3.0 -> v2.4.0 Updating is-terminal v0.4.12 -> v0.4.13 Adding itertools v0.13.0 Updating js-sys v0.3.69 -> v0.3.70 Updating libc v0.2.155 -> v0.2.158 Updating libz-sys v1.1.18 -> v1.1.19 Adding miniz_oxide v0.8.0 Updating reqwest v0.12.5 -> v0.12.7 Updating ringbuf v0.4.1 -> v0.4.4 Updating rkyv v0.7.44 -> v0.7.45 Updating rkyv_derive v0.7.44 -> v0.7.45 Updating rust_decimal v1.35.0 -> v1.36.0 Updating serde v1.0.206 -> v1.0.208 Updating serde_derive v1.0.206 -> v1.0.208 Updating serde_json v1.0.124 -> v1.0.125 Updating syn v2.0.74 -> v2.0.75 Updating system-configuration v0.5.1 -> v0.6.0 Updating system-configuration-sys v0.5.0 -> v0.6.0 Updating tokio v1.39.2 -> v1.39.3 Adding tower v0.5.0 Updating tower-layer v0.3.2 -> v0.3.3 Updating tower-service v0.3.2 -> v0.3.3 Updating wasm-bindgen v0.2.92 -> v0.2.93 Updating wasm-bindgen-backend v0.2.92 -> v0.2.93 Updating wasm-bindgen-futures v0.4.42 -> v0.4.43 Updating wasm-bindgen-macro v0.2.92 -> v0.2.93 Updating wasm-bindgen-macro-support v0.2.92 -> v0.2.93 Updating wasm-bindgen-shared v0.2.92 -> v0.2.93 Updating web-sys v0.3.69 -> v0.3.70 Adding windows-registry v0.2.0 Adding windows-result v0.2.0 Adding windows-strings v0.1.0 Removing winreg v0.52.0 ``` --- Cargo.lock | 339 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 209 insertions(+), 130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d51ecff56..a7589c287 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + [[package]] name = "ahash" version = "0.7.8" @@ -180,9 +186,9 @@ checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-attributes" @@ -362,7 +368,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -404,7 +410,7 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" dependencies = [ - "bindgen", + "bindgen 0.69.4", "cc", "cmake", "dunce", @@ -442,7 +448,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -497,7 +503,7 @@ dependencies = [ "pin-project-lite", "serde", "serde_html_form", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -512,7 +518,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -535,7 +541,7 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower", + "tower 0.4.13", "tower-service", ] @@ -549,7 +555,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.7.4", "object", "rustc-demangle", ] @@ -604,10 +610,28 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.74", + "syn 2.0.75", "which", ] +[[package]] +name = "bindgen" +version = "0.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0127a1da21afb5adaae26910922c3f7afd3d329ba1a1b98a0884cab4907a251" +dependencies = [ + "bitflags 2.6.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.75", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -674,7 +698,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "syn_derive", ] @@ -744,9 +768,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.16.3" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" +checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" [[package]] name = "byteorder" @@ -762,9 +786,9 @@ checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "camino" -version = "1.1.7" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ "serde", ] @@ -786,12 +810,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.10" +version = "1.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" +checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -868,9 +893,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.15" +version = "4.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d8838454fda655dafd3accb2b6e2bea645b9e4078abe84a22ceb947235c5cc" +checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" dependencies = [ "clap_builder", "clap_derive", @@ -897,7 +922,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -908,9 +933,9 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" dependencies = [ "cc", ] @@ -967,9 +992,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" dependencies = [ "libc", ] @@ -1124,7 +1149,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1135,7 +1160,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1172,7 +1197,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1183,7 +1208,7 @@ checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1331,13 +1356,13 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.31" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f211bbe8e69bbd0cfdea405084f128ae8b4aaa6b0b522fc8f2b009084797920" +checksum = "9c0596c1eac1f9e04ed902702e9878208b336edc9d6fddc8a48387349bab3666" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] @@ -1411,7 +1436,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1423,7 +1448,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1435,7 +1460,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1534,7 +1559,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -1620,9 +1645,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -1630,7 +1655,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.3.0", + "indexmap 2.4.0", "slab", "tokio", "tokio-util", @@ -1835,7 +1860,7 @@ dependencies = [ "pin-project-lite", "socket2 0.5.7", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -1892,9 +1917,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -1944,11 +1969,11 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.4.0", "libc", "windows-sys 0.52.0", ] @@ -1977,6 +2002,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -1994,9 +2028,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -2024,9 +2058,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" @@ -2057,9 +2091,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.18" +version = "1.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" +checksum = "fdc53a7799a7496ebc9fd29f31f7df80e83c9bda5299768af5f9e59eeea74647" dependencies = [ "cc", "pkg-config", @@ -2151,6 +2185,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + [[package]] name = "mio" version = "1.0.2" @@ -2192,7 +2235,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -2242,7 +2285,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "termcolor", "thiserror", ] @@ -2255,7 +2298,7 @@ checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" dependencies = [ "base64 0.21.7", "bigdecimal", - "bindgen", + "bindgen 0.70.0", "bitflags 2.6.0", "bitvec", "btoi", @@ -2441,7 +2484,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -2523,7 +2566,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -2597,7 +2640,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -2736,7 +2779,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -2789,7 +2832,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "version_check", "yansi", ] @@ -2977,9 +3020,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "base64 0.22.1", "bytes", @@ -3015,7 +3058,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "windows-registry", ] [[package]] @@ -3035,18 +3078,18 @@ dependencies = [ [[package]] name = "ringbuf" -version = "0.4.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c65e4c865bc3d2e3294493dff0acf7e6c259d066e34e22059fa9c39645c3636" +checksum = "46f7f1b88601a8ee13cabf203611ccdf64345dc1c5d24de8b11e1a678ee619b6" dependencies = [ "crossbeam-utils", ] [[package]] name = "rkyv" -version = "0.7.44" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ "bitvec", "bytecheck", @@ -3062,9 +3105,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.44" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ "proc-macro2", "quote", @@ -3097,7 +3140,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.74", + "syn 2.0.75", "unicode-ident", ] @@ -3117,9 +3160,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.35.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a" +checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" dependencies = [ "arrayvec", "borsh", @@ -3309,9 +3352,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.206" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b3e4cd94123dd520a128bcd11e34d9e9e423e7e3e50425cb1b4b1e3549d0284" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] @@ -3337,13 +3380,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.206" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -3353,7 +3396,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.3.0", + "indexmap 2.4.0", "itoa", "ryu", "serde", @@ -3361,11 +3404,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.124" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" +checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "itoa", "memchr", "ryu", @@ -3390,7 +3433,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -3424,7 +3467,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.3.0", + "indexmap 2.4.0", "serde", "serde_derive", "serde_json", @@ -3441,7 +3484,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -3584,9 +3627,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.74" +version = "2.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" +checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" dependencies = [ "proc-macro2", "quote", @@ -3602,7 +3645,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -3616,23 +3659,26 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "system-configuration" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "658bc6ee10a9b4fcf576e9b0819d95ec16f4d2c02d39fd83ac1c8789785c4a42" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" dependencies = [ "core-foundation-sys", "libc", @@ -3706,7 +3752,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -3777,9 +3823,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", @@ -3800,7 +3846,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -3864,7 +3910,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "toml_datetime", "winnow 0.5.40", ] @@ -3875,7 +3921,7 @@ version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "serde", "serde_spanned", "toml_datetime", @@ -3934,7 +3980,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", - "tower", + "tower 0.5.0", "tower-http", "trace", "tracing", @@ -4039,6 +4085,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36b837f86b25d7c0d7988f00a54e74739be6477f2aac6201b8f429a7569991b7" +dependencies = [ + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-http" version = "0.5.2" @@ -4063,15 +4121,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "trace" @@ -4104,7 +4162,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -4299,34 +4357,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -4336,9 +4395,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4346,28 +4405,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -4425,6 +4484,36 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -4591,16 +4680,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "wyz" version = "0.5.1" @@ -4634,7 +4713,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] @@ -4654,7 +4733,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.75", ] [[package]] From 95333f0d21896477a09cd5a35a6e923b3f3eb450 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 20 Aug 2024 11:44:26 +0100 Subject: [PATCH 0975/1003] chore(deps): update udp cookie consts due the hasher fn udpate The hasher function produces new hashes and that changes the final cookie used in the UDP tracker tests. It adds a function to generate new values and fix the test with the new values. --- src/servers/udp/connection_cookie.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index c15ad114c..36bf98304 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -178,15 +178,16 @@ mod tests { #[test] fn it_should_make_a_connection_cookie() { - // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. - const ID_COOKIE_OLD: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; - const ID_COOKIE_NEW: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; + // Note: This constant may need to be updated in the future as the hash + // is not guaranteed to to be stable between versions. + const ID_COOKIE_OLD_HASHER: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; + const ID_COOKIE_NEW_HASHER: Cookie = [185, 122, 191, 238, 6, 43, 2, 198]; clock::Stopped::local_set_to_unix_epoch(); let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); - assert!(cookie == ID_COOKIE_OLD || cookie == ID_COOKIE_NEW); + assert!(cookie == ID_COOKIE_OLD_HASHER || cookie == ID_COOKIE_NEW_HASHER); } #[test] From 7779fa3db8728a9c04ca84e6bffaeb115dceb7ec Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 13 Jul 2024 14:10:21 +0200 Subject: [PATCH 0976/1003] dev: remove announce_request wrapper --- src/servers/udp/handlers.rs | 11 ++++------- src/servers/udp/mod.rs | 4 ---- src/servers/udp/peer_builder.rs | 15 +++++++-------- src/servers/udp/request.rs | 28 ---------------------------- 4 files changed, 11 insertions(+), 47 deletions(-) delete mode 100644 src/servers/udp/request.rs diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 53683fbb9..e37179b4b 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -22,7 +22,6 @@ use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::logging::{log_bad_request, log_error_response, log_request, log_response}; use crate::servers::udp::peer_builder; -use crate::servers::udp::request::AnnounceWrapper; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; /// It handles the incoming UDP packets. @@ -152,9 +151,7 @@ pub async fn handle_announce( check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; - let wrapped_announce_request = AnnounceWrapper::new(announce_request); - - let info_hash = wrapped_announce_request.info_hash; + let info_hash = InfoHash(announce_request.info_hash.0); let remote_client_ip = remote_addr.ip(); // Authorization @@ -162,7 +159,7 @@ pub async fn handle_announce( source: (Arc::new(e) as Arc).into(), })?; - let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); + let mut peer = peer_builder::from_request(announce_request, &remote_client_ip); let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip); @@ -179,7 +176,7 @@ pub async fn handle_announce( if remote_addr.is_ipv4() { let announce_response = AnnounceResponse { fixed: AnnounceResponseFixedData { - transaction_id: wrapped_announce_request.announce_request.transaction_id, + transaction_id: announce_request.transaction_id, announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), @@ -206,7 +203,7 @@ pub async fn handle_announce( } else { let announce_response = AnnounceResponse { fixed: AnnounceResponseFixedData { - transaction_id: wrapped_announce_request.announce_request.transaction_id, + transaction_id: announce_request.transaction_id, announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 8ea05d5b1..91b19a91d 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -61,9 +61,6 @@ //! UDP packet -> Aquatic Struct Request -> [Torrust Struct Request] -> Tracker -> Aquatic Struct Response -> UDP packet //! ``` //! -//! For the `Announce` request there is a wrapper struct [`AnnounceWrapper`](crate::servers::udp::request::AnnounceWrapper). -//! It was added to add an extra field with the internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) struct. -//! //! ### Connect //! //! `Connect` requests are used to get a connection ID which must be provided on @@ -646,7 +643,6 @@ pub mod error; pub mod handlers; pub mod logging; pub mod peer_builder; -pub mod request; pub mod server; pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index e54a23443..39881ad5c 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -5,7 +5,6 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; -use super::request::AnnounceWrapper; use crate::CurrentClock; /// Extracts the [`peer::Peer`] info from the @@ -16,8 +15,8 @@ use crate::CurrentClock; /// * `announce_wrapper` - The announce request to extract the peer info from. /// * `peer_ip` - The real IP address of the peer, not the one in the announce request. #[must_use] -pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> peer::Peer { - let announce_event = match aquatic_udp_protocol::AnnounceEvent::from(announce_wrapper.announce_request.event) { +pub fn from_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_ip: &IpAddr) -> peer::Peer { + let announce_event = match aquatic_udp_protocol::AnnounceEvent::from(announce_request.event) { aquatic_udp_protocol::AnnounceEvent::Started => AnnounceEvent::Started, aquatic_udp_protocol::AnnounceEvent::Stopped => AnnounceEvent::Stopped, aquatic_udp_protocol::AnnounceEvent::Completed => AnnounceEvent::Completed, @@ -25,12 +24,12 @@ pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> pee }; peer::Peer { - peer_id: peer::Id(announce_wrapper.announce_request.peer_id.0), - peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0.into()), + peer_id: peer::Id(announce_request.peer_id.0), + peer_addr: SocketAddr::new(*peer_ip, announce_request.port.0.into()), updated: CurrentClock::now(), - uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0.into()), - downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0.into()), - left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0.into()), + uploaded: NumberOfBytes(announce_request.bytes_uploaded.0.into()), + downloaded: NumberOfBytes(announce_request.bytes_downloaded.0.into()), + left: NumberOfBytes(announce_request.bytes_left.0.into()), event: announce_event, } } diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs deleted file mode 100644 index f95fec07a..000000000 --- a/src/servers/udp/request.rs +++ /dev/null @@ -1,28 +0,0 @@ -//! UDP request types. -//! -//! Torrust Tracker uses the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) -//! crate to parse and serialize UDP requests. -//! -//! Some of the type in this module are wrappers around the types in the -//! `aquatic_udp_protocol` crate. -use aquatic_udp_protocol::AnnounceRequest; -use torrust_tracker_primitives::info_hash::InfoHash; - -/// Wrapper around [`AnnounceRequest`]. -pub struct AnnounceWrapper { - /// [`AnnounceRequest`] to wrap. - pub announce_request: AnnounceRequest, - /// Info hash of the torrent. - pub info_hash: InfoHash, -} - -impl AnnounceWrapper { - /// Creates a new [`AnnounceWrapper`] from an [`AnnounceRequest`]. - #[must_use] - pub fn new(announce_request: &AnnounceRequest) -> Self { - AnnounceWrapper { - announce_request: *announce_request, - info_hash: InfoHash(announce_request.info_hash.0), - } - } -} From 325df70a5f6e3d9874364eb019b1a5d8e254c448 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 13 Jul 2024 14:59:46 +0200 Subject: [PATCH 0977/1003] dev: use aquatic_udp_protocol InfoHash inside our type --- Cargo.lock | 2 + packages/primitives/Cargo.toml | 2 + packages/primitives/src/info_hash.rs | 64 +++++++++++++++---- .../benches/helpers/asyn.rs | 12 ++-- .../benches/helpers/sync.rs | 12 ++-- .../benches/helpers/utils.rs | 2 +- src/console/clients/checker/checks/udp.rs | 7 +- src/servers/http/v1/responses/scrape.rs | 4 +- src/servers/udp/handlers.rs | 6 +- 9 files changed, 75 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a7589c287..cfe940e43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4036,12 +4036,14 @@ dependencies = [ name = "torrust-tracker-primitives" version = "3.0.0-beta-develop" dependencies = [ + "aquatic_udp_protocol", "binascii", "derive_more", "serde", "tdyne-peer-id", "tdyne-peer-id-registry", "thiserror", + "zerocopy", ] [[package]] diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 174750fbb..05981b3a8 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -15,9 +15,11 @@ rust-version.workspace = true version.workspace = true [dependencies] +aquatic_udp_protocol = "0" binascii = "0" derive_more = "0" serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" thiserror = "1" +zerocopy = "0" diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs index a07cc41a2..57dfd90e5 100644 --- a/packages/primitives/src/info_hash.rs +++ b/packages/primitives/src/info_hash.rs @@ -1,11 +1,15 @@ use std::hash::{DefaultHasher, Hash, Hasher}; +use std::ops::{Deref, DerefMut}; use std::panic::Location; use thiserror::Error; +use zerocopy::FromBytes; /// `BitTorrent` Info Hash v1 -#[derive(PartialEq, Eq, Hash, Clone, Copy, Default, Debug)] -pub struct InfoHash(pub [u8; 20]); +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +pub struct InfoHash { + data: aquatic_udp_protocol::InfoHash, +} pub const INFO_HASH_BYTES_LEN: usize = 20; @@ -17,10 +21,9 @@ impl InfoHash { /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. #[must_use] pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); - let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret + let data = aquatic_udp_protocol::InfoHash::read_from(bytes).expect("it should have the exact amount of bytes"); + + Self { data } } /// Returns the `InfoHash` internal byte array. @@ -36,6 +39,34 @@ impl InfoHash { } } +impl Default for InfoHash { + fn default() -> Self { + Self { + data: aquatic_udp_protocol::InfoHash(Default::default()), + } + } +} + +impl From for InfoHash { + fn from(data: aquatic_udp_protocol::InfoHash) -> Self { + Self { data } + } +} + +impl Deref for InfoHash { + type Target = aquatic_udp_protocol::InfoHash; + + fn deref(&self) -> &Self::Target { + &self.data + } +} + +impl DerefMut for InfoHash { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.data + } +} + impl Ord for InfoHash { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.0.cmp(&other.0) @@ -60,7 +91,7 @@ impl std::str::FromStr for InfoHash { type Err = binascii::ConvertError; fn from_str(s: &str) -> Result { - let mut i = Self([0u8; 20]); + let mut i = Self::default(); if s.len() != 40 { return Err(binascii::ConvertError::InvalidInputLength); } @@ -72,7 +103,7 @@ impl std::str::FromStr for InfoHash { impl std::convert::From<&[u8]> for InfoHash { fn from(data: &[u8]) -> InfoHash { assert_eq!(data.len(), 20); - let mut ret = InfoHash([0u8; 20]); + let mut ret = Self::default(); ret.0.clone_from_slice(data); ret } @@ -82,23 +113,28 @@ impl std::convert::From<&[u8]> for InfoHash { impl std::convert::From<&DefaultHasher> for InfoHash { fn from(data: &DefaultHasher) -> InfoHash { let n = data.finish().to_le_bytes(); - InfoHash([ + let bytes = [ n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], n[3], - ]) + ]; + let data = aquatic_udp_protocol::InfoHash(bytes); + Self { data } } } impl std::convert::From<&i32> for InfoHash { fn from(n: &i32) -> InfoHash { let n = n.to_le_bytes(); - InfoHash([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]]) + let bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]]; + let data = aquatic_udp_protocol::InfoHash(bytes); + Self { data } } } impl std::convert::From<[u8; 20]> for InfoHash { - fn from(val: [u8; 20]) -> Self { - InfoHash(val) + fn from(bytes: [u8; 20]) -> Self { + let data = aquatic_udp_protocol::InfoHash(bytes); + Self { data } } } @@ -171,7 +207,7 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { )); } - let mut res = InfoHash([0u8; 20]); + let mut res = InfoHash::default(); if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { return Err(serde::de::Error::invalid_value( diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs index 1c6d9d915..08862abc8 100644 --- a/packages/torrent-repository/benches/helpers/asyn.rs +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -16,7 +16,7 @@ where for _ in 0..samples { let torrent_repository = V::default(); - let info_hash = InfoHash([0; 20]); + let info_hash = InfoHash::default(); torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER).await; @@ -33,13 +33,13 @@ where Arc: Clone + Send + Sync + 'static, { let torrent_repository = Arc::::default(); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let info_hash = InfoHash::default(); let handles = FuturesUnordered::new(); // Add the torrent/peer to the torrent repository - torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER).await; + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER).await; - torrent_repository.get_swarm_metadata(info_hash).await; + torrent_repository.get_swarm_metadata(&info_hash).await; let start = Instant::now(); @@ -47,9 +47,9 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(info_hash, &DEFAULT_PEER).await; + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; - torrent_repository_clone.get_swarm_metadata(info_hash).await; + torrent_repository_clone.get_swarm_metadata(&info_hash).await; if let Some(sleep_time) = sleep { let start_time = std::time::Instant::now(); diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs index 63fccfc77..77055911d 100644 --- a/packages/torrent-repository/benches/helpers/sync.rs +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -18,7 +18,7 @@ where for _ in 0..samples { let torrent_repository = V::default(); - let info_hash = InfoHash([0; 20]); + let info_hash = InfoHash::default(); torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER); @@ -35,13 +35,13 @@ where Arc: Clone + Send + Sync + 'static, { let torrent_repository = Arc::::default(); - let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let info_hash = InfoHash::default(); let handles = FuturesUnordered::new(); // Add the torrent/peer to the torrent repository - torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER); + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER); - torrent_repository.get_swarm_metadata(info_hash); + torrent_repository.get_swarm_metadata(&info_hash); let start = Instant::now(); @@ -49,9 +49,9 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(info_hash, &DEFAULT_PEER); + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); - torrent_repository_clone.get_swarm_metadata(info_hash); + torrent_repository_clone.get_swarm_metadata(&info_hash); if let Some(sleep_time) = sleep { let start_time = std::time::Instant::now(); diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs index 170194806..2f912a5c0 100644 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -30,7 +30,7 @@ pub fn generate_unique_info_hashes(size: usize) -> Vec { bytes[2] = ((i >> 16) & 0xFF) as u8; bytes[3] = ((i >> 24) & 0xFF) as u8; - let info_hash = InfoHash(bytes); + let info_hash = InfoHash::from_bytes(&bytes); result.insert(info_hash); } diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index dd4d5e639..dd9afa47c 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -4,7 +4,6 @@ use std::time::Duration; use aquatic_udp_protocol::TransactionId; use hex_literal::hex; use serde::Serialize; -use torrust_tracker_primitives::info_hash::InfoHash; use crate::console::clients::udp::checker::Client; use crate::console::clients::udp::Error; @@ -29,7 +28,7 @@ pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec, timeout: Duration) -> Vec, timeout: Duration) -> Vec ScrapeData { - let info_hash = InfoHash([0x69; 20]); + let info_hash = InfoHash::from_bytes(&[0x69; 20]); let mut scrape_data = ScrapeData::empty(); scrape_data.add_file( &info_hash, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index e37179b4b..7eb07bc8d 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -151,7 +151,7 @@ pub async fn handle_announce( check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; - let info_hash = InfoHash(announce_request.info_hash.0); + let info_hash = announce_request.info_hash.into(); let remote_client_ip = remote_addr.ip(); // Authorization @@ -240,9 +240,9 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra debug!("udp scrape request: {:#?}", request); // Convert from aquatic infohashes - let mut info_hashes = vec![]; + let mut info_hashes: Vec = vec![]; for info_hash in &request.info_hashes { - info_hashes.push(InfoHash(info_hash.0)); + info_hashes.push((*info_hash).into()); } let scrape_data = if tracker.requires_authentication() { From 00af70f2558b861ca0bf59796f0c3f4b683819cd Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 14 Jul 2024 16:42:25 +0200 Subject: [PATCH 0978/1003] dev: remove announce event wrapper --- packages/primitives/src/announce_event.rs | 43 ------------------- packages/primitives/src/lib.rs | 25 ++++++++++- packages/primitives/src/peer.rs | 22 ++++++++-- .../benches/helpers/utils.rs | 3 +- .../torrent-repository/src/entry/single.rs | 3 +- .../tests/common/torrent_peer_builder.rs | 3 +- .../torrent-repository/tests/entry/mod.rs | 3 +- .../tests/repository/mod.rs | 3 +- src/core/mod.rs | 12 +++--- src/core/peer_tests.rs | 2 +- src/core/services/torrent.rs | 2 +- .../apis/v1/context/torrent/resources/peer.rs | 2 +- .../v1/context/torrent/resources/torrent.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/services/announce.rs | 2 +- src/servers/http/v1/services/scrape.rs | 2 +- src/servers/udp/peer_builder.rs | 10 +---- 17 files changed, 61 insertions(+), 80 deletions(-) delete mode 100644 packages/primitives/src/announce_event.rs diff --git a/packages/primitives/src/announce_event.rs b/packages/primitives/src/announce_event.rs deleted file mode 100644 index 3bd560084..000000000 --- a/packages/primitives/src/announce_event.rs +++ /dev/null @@ -1,43 +0,0 @@ -//! Copyright (c) 2020-2023 Joakim Frostegård and The Torrust Developers -//! -//! Distributed under Apache 2.0 license - -use serde::{Deserialize, Serialize}; - -/// Announce events. Described on the -/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub enum AnnounceEvent { - /// The peer has started downloading the torrent. - Started, - /// The peer has ceased downloading the torrent. - Stopped, - /// The peer has completed downloading the torrent. - Completed, - /// This is one of the announcements done at regular intervals. - None, -} - -impl AnnounceEvent { - #[inline] - #[must_use] - pub fn from_i32(i: i32) -> Self { - match i { - 1 => Self::Completed, - 2 => Self::Started, - 3 => Self::Stopped, - _ => Self::None, - } - } - - #[inline] - #[must_use] - pub fn to_i32(&self) -> i32 { - match self { - AnnounceEvent::None => 0, - AnnounceEvent::Completed => 1, - AnnounceEvent::Started => 2, - AnnounceEvent::Stopped => 3, - } - } -} diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index d6f29c2b5..44666c9c3 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -7,10 +7,10 @@ use std::collections::BTreeMap; use std::time::Duration; +pub use aquatic_udp_protocol::{AnnounceEvent, AnnounceEventBytes}; use info_hash::InfoHash; use serde::{Deserialize, Serialize}; -pub mod announce_event; pub mod info_hash; pub mod pagination; pub mod peer; @@ -29,6 +29,29 @@ pub fn ser_unix_time_value(unix_time_value: &DurationSince ser.serialize_u64(unix_time_value.as_millis() as u64) } +#[derive(Serialize)] +pub enum AnnounceEventSer { + Started, + Stopped, + Completed, + None, +} + +/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +pub fn ser_announce_event(announce_event: &AnnounceEvent, ser: S) -> Result { + let event_ser = match announce_event { + AnnounceEvent::Started => AnnounceEventSer::Started, + AnnounceEvent::Stopped => AnnounceEventSer::Stopped, + AnnounceEvent::Completed => AnnounceEventSer::Completed, + AnnounceEvent::None => AnnounceEventSer::None, + }; + + ser.serialize_some(&event_ser) +} + /// IP version used by the peer to connect to the tracker: IPv4 or IPv6 #[derive(PartialEq, Eq, Debug)] pub enum IPVersion { diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index ab7559508..369aa443a 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -24,10 +24,10 @@ use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; +use aquatic_udp_protocol::AnnounceEvent; use serde::Serialize; -use crate::announce_event::AnnounceEvent; -use crate::{ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfBytes}; +use crate::{ser_announce_event, ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfBytes}; /// Peer struct used by the core `Tracker`. /// @@ -51,7 +51,7 @@ use crate::{ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfByte /// event: AnnounceEvent::Started, /// }; /// ``` -#[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq, Hash)] pub struct Peer { /// ID used by the downloader peer pub peer_id: Id, @@ -67,9 +67,22 @@ pub struct Peer { /// The number of bytes this peer still has to download pub left: NumberOfBytes, /// This is an optional key which maps to started, completed, or stopped (or empty, which is the same as not being present). + #[serde(serialize_with = "ser_announce_event")] pub event: AnnounceEvent, } +impl Ord for Peer { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.peer_id.cmp(&other.peer_id) + } +} + +impl PartialOrd for Peer { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.peer_id.cmp(&other.peer_id)) + } +} + pub trait ReadInfo { fn is_seeder(&self) -> bool; fn get_event(&self) -> AnnounceEvent; @@ -344,8 +357,9 @@ impl FromIterator for Vec

{ pub mod fixture { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use aquatic_udp_protocol::AnnounceEvent; + use super::{Id, Peer}; - use crate::announce_event::AnnounceEvent; use crate::{DurationSinceUnixEpoch, NumberOfBytes}; #[derive(PartialEq, Debug)] diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs index 2f912a5c0..b904ef0e8 100644 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -1,10 +1,9 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::{Id, Peer}; -use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; +use torrust_tracker_primitives::{AnnounceEvent, DurationSinceUnixEpoch, NumberOfBytes}; pub const DEFAULT_PEER: Peer = Peer { peer_id: Id([0; 20]), diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 6d7ed3155..2d99fa9c5 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -2,10 +2,9 @@ use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::peer::{self}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_primitives::{AnnounceEvent, DurationSinceUnixEpoch}; use super::Entry; use crate::EntrySingle; diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs index 3a4e61ed2..dbdec826d 100644 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -1,8 +1,7 @@ use std::net::SocketAddr; use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::announce_event::AnnounceEvent; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; +use torrust_tracker_primitives::{peer, AnnounceEvent, DurationSinceUnixEpoch, NumberOfBytes}; use crate::CurrentClock; diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 2a7063a4f..be7d5d715 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -6,9 +6,8 @@ use rstest::{fixture, rstest}; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; -use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_primitives::{peer, NumberOfBytes}; +use torrust_tracker_primitives::{peer, AnnounceEvent, NumberOfBytes}; use torrust_tracker_torrent_repository::{ EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, }; diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index b3b742607..132684c13 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -3,11 +3,10 @@ use std::hash::{DefaultHasher, Hash, Hasher}; use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; +use torrust_tracker_primitives::{AnnounceEvent, NumberOfBytes, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::Entry as _; use torrust_tracker_torrent_repository::repository::dash_map_mutex_std::XacrimonDashMap; use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; diff --git a/src/core/mod.rs b/src/core/mod.rs index a6ee830db..49c781959 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -55,15 +55,15 @@ //! Once you have instantiated the `Tracker` you can `announce` a new [`peer::Peer`] with: //! //! ```rust,no_run -//! use torrust_tracker_primitives::peer; -//! use torrust_tracker_primitives::info_hash::InfoHash; -//! use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; -//! use torrust_tracker_primitives::announce_event::AnnounceEvent; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; //! use std::str::FromStr; //! +//! use aquatic_udp_protocol::AnnounceEvent; +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; //! //! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); //! @@ -1198,7 +1198,7 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use torrust_tracker_primitives::announce_event::AnnounceEvent; + use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; @@ -2035,7 +2035,7 @@ mod tests { mod handling_torrent_persistence { - use torrust_tracker_primitives::announce_event::AnnounceEvent; + use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs index d30d73db3..fa1396887 100644 --- a/src/core/peer_tests.rs +++ b/src/core/peer_tests.rs @@ -2,9 +2,9 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time}; -use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; use crate::CurrentClock; diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 1c337a41d..fce9a2602 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -105,7 +105,7 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use torrust_tracker_primitives::announce_event::AnnounceEvent; + use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; fn sample_peer() -> peer::Peer { diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index e7a0802c1..59637f2ee 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -22,7 +22,7 @@ pub struct Peer { /// The peer's left bytes (pending to download). pub left: i64, /// The peer's event: `started`, `stopped`, `completed`. - /// See [`AnnounceEvent`](torrust_tracker_primitives::announce_event::AnnounceEvent). + /// See [`AnnounceEvent`](aquatic_udp_protocol::AnnounceEvent). pub event: String, } diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 0d65b3eb6..d5ff957de 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -97,7 +97,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use torrust_tracker_primitives::announce_event::AnnounceEvent; + use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 0514a9f71..32143ce8e 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -9,10 +9,10 @@ use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; +use aquatic_udp_protocol::AnnounceEvent; use axum::extract::State; use axum::response::{IntoResponse, Response}; use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; use tracing::debug; diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index f5f730ae2..e8699a6eb 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -48,7 +48,7 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer: mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use torrust_tracker_primitives::announce_event::AnnounceEvent; + use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index b83abb321..e49c37ba6 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -61,7 +61,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use torrust_tracker_primitives::announce_event::AnnounceEvent; + use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 39881ad5c..a6bb3f7c3 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -2,7 +2,6 @@ use std::net::{IpAddr, SocketAddr}; use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::announce_event::AnnounceEvent; use torrust_tracker_primitives::{peer, NumberOfBytes}; use crate::CurrentClock; @@ -16,13 +15,6 @@ use crate::CurrentClock; /// * `peer_ip` - The real IP address of the peer, not the one in the announce request. #[must_use] pub fn from_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_ip: &IpAddr) -> peer::Peer { - let announce_event = match aquatic_udp_protocol::AnnounceEvent::from(announce_request.event) { - aquatic_udp_protocol::AnnounceEvent::Started => AnnounceEvent::Started, - aquatic_udp_protocol::AnnounceEvent::Stopped => AnnounceEvent::Stopped, - aquatic_udp_protocol::AnnounceEvent::Completed => AnnounceEvent::Completed, - aquatic_udp_protocol::AnnounceEvent::None => AnnounceEvent::None, - }; - peer::Peer { peer_id: peer::Id(announce_request.peer_id.0), peer_addr: SocketAddr::new(*peer_ip, announce_request.port.0.into()), @@ -30,6 +22,6 @@ pub fn from_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, pe uploaded: NumberOfBytes(announce_request.bytes_uploaded.0.into()), downloaded: NumberOfBytes(announce_request.bytes_downloaded.0.into()), left: NumberOfBytes(announce_request.bytes_left.0.into()), - event: announce_event, + event: announce_request.event.into(), } } From 03e88d0ffe9277f6328dcb5625a624940f1e91d0 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 14 Jul 2024 17:13:03 +0200 Subject: [PATCH 0979/1003] dev: use aquatic number_of_bytes --- Cargo.lock | 2 + packages/primitives/src/lib.rs | 22 +++++--- packages/primitives/src/peer.rs | 51 ++++++++++--------- packages/torrent-repository/Cargo.toml | 2 + .../benches/helpers/utils.rs | 10 ++-- .../torrent-repository/src/entry/single.rs | 3 +- .../tests/common/torrent_peer_builder.rs | 5 +- .../torrent-repository/tests/entry/mod.rs | 15 +++--- .../tests/repository/mod.rs | 7 +-- src/core/mod.rs | 38 +++++++------- src/core/peer_tests.rs | 10 ++-- src/core/services/torrent.rs | 10 ++-- .../apis/v1/context/torrent/resources/peer.rs | 6 +-- .../v1/context/torrent/resources/torrent.rs | 10 ++-- .../http/v1/extractors/announce_request.rs | 7 +-- src/servers/http/v1/handlers/announce.rs | 10 ++-- src/servers/http/v1/requests/announce.rs | 30 ++++++----- src/servers/http/v1/requests/scrape.rs | 2 - src/servers/http/v1/services/announce.rs | 10 ++-- src/servers/http/v1/services/scrape.rs | 10 ++-- src/servers/udp/handlers.rs | 5 +- src/servers/udp/peer_builder.rs | 9 ++-- 22 files changed, 147 insertions(+), 127 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cfe940e43..504a5bb17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4058,6 +4058,7 @@ dependencies = [ name = "torrust-tracker-torrent-repository" version = "3.0.0-beta-develop" dependencies = [ + "aquatic_udp_protocol", "async-std", "criterion", "crossbeam-skiplist", @@ -4069,6 +4070,7 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", + "zerocopy", ] [[package]] diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 44666c9c3..b383e95ad 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -7,9 +7,9 @@ use std::collections::BTreeMap; use std::time::Duration; -pub use aquatic_udp_protocol::{AnnounceEvent, AnnounceEventBytes}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use info_hash::InfoHash; -use serde::{Deserialize, Serialize}; +use serde::Serialize; pub mod info_hash; pub mod pagination; @@ -37,10 +37,11 @@ pub enum AnnounceEventSer { None, } -/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. +/// Serializes a `Announce Event` as a enum. +/// /// # Errors /// -/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +/// If will return an error if the internal serializer was to fail. pub fn ser_announce_event(announce_event: &AnnounceEvent, ser: S) -> Result { let event_ser = match announce_event { AnnounceEvent::Started => AnnounceEventSer::Started, @@ -52,6 +53,15 @@ pub fn ser_announce_event(announce_event: &AnnounceEvent, ser.serialize_some(&event_ser) } +/// Serializes a `Announce Event` as a i64. +/// +/// # Errors +/// +/// If will return an error if the internal serializer was to fail. +pub fn ser_number_of_bytes(number_of_bytes: &NumberOfBytes, ser: S) -> Result { + ser.serialize_i64(number_of_bytes.0.get()) +} + /// IP version used by the peer to connect to the tracker: IPv4 or IPv6 #[derive(PartialEq, Eq, Debug)] pub enum IPVersion { @@ -61,8 +71,4 @@ pub enum IPVersion { IPv6, } -/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. -#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct NumberOfBytes(pub i64); - pub type PersistentTorrents = BTreeMap; diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 369aa443a..987099b70 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -14,9 +14,9 @@ //! peer_id: peer::Id(*b"-qB00000000000000000"), //! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), //! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), -//! uploaded: NumberOfBytes(0), -//! downloaded: NumberOfBytes(0), -//! left: NumberOfBytes(0), +//! uploaded: NumberOfBytes::new(0), +//! downloaded: NumberOfBytes::new(0), +//! left: NumberOfBytes::new(0), //! event: AnnounceEvent::Started, //! }; //! ``` @@ -24,10 +24,10 @@ use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; -use aquatic_udp_protocol::AnnounceEvent; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::Serialize; -use crate::{ser_announce_event, ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfBytes}; +use crate::{ser_announce_event, ser_number_of_bytes, ser_unix_time_value, DurationSinceUnixEpoch, IPVersion}; /// Peer struct used by the core `Tracker`. /// @@ -45,9 +45,9 @@ use crate::{ser_announce_event, ser_unix_time_value, DurationSinceUnixEpoch, IPV /// peer_id: peer::Id(*b"-qB00000000000000000"), /// peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), /// updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), -/// uploaded: NumberOfBytes(0), -/// downloaded: NumberOfBytes(0), -/// left: NumberOfBytes(0), +/// uploaded: NumberOfBytes::new(0), +/// downloaded: NumberOfBytes::new(0), +/// left: NumberOfBytes::new(0), /// event: AnnounceEvent::Started, /// }; /// ``` @@ -61,10 +61,13 @@ pub struct Peer { #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, /// The total amount of bytes uploaded by this peer so far + #[serde(serialize_with = "ser_number_of_bytes")] pub uploaded: NumberOfBytes, /// The total amount of bytes downloaded by this peer so far + #[serde(serialize_with = "ser_number_of_bytes")] pub downloaded: NumberOfBytes, /// The number of bytes this peer still has to download + #[serde(serialize_with = "ser_number_of_bytes")] pub left: NumberOfBytes, /// This is an optional key which maps to started, completed, or stopped (or empty, which is the same as not being present). #[serde(serialize_with = "ser_announce_event")] @@ -93,7 +96,7 @@ pub trait ReadInfo { impl ReadInfo for Peer { fn is_seeder(&self) -> bool { - self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } fn get_event(&self) -> AnnounceEvent { @@ -115,7 +118,7 @@ impl ReadInfo for Peer { impl ReadInfo for Arc { fn is_seeder(&self) -> bool { - self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } fn get_event(&self) -> AnnounceEvent { @@ -138,7 +141,7 @@ impl ReadInfo for Arc { impl Peer { #[must_use] pub fn is_seeder(&self) -> bool { - self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } pub fn ip(&mut self) -> IpAddr { @@ -357,10 +360,10 @@ impl FromIterator for Vec

{ pub mod fixture { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::AnnounceEvent; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use super::{Id, Peer}; - use crate::{DurationSinceUnixEpoch, NumberOfBytes}; + use crate::DurationSinceUnixEpoch; #[derive(PartialEq, Debug)] @@ -383,9 +386,9 @@ pub mod fixture { peer_id: Id(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Completed, }; @@ -399,9 +402,9 @@ pub mod fixture { peer_id: Id(*b"-qB00000000000000002"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(10), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(10), event: AnnounceEvent::Started, }; @@ -425,14 +428,14 @@ pub mod fixture { #[allow(dead_code)] #[must_use] pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); + self.peer.left = NumberOfBytes::new(left); self } #[allow(dead_code)] #[must_use] pub fn with_no_bytes_pending_to_download(mut self) -> Self { - self.peer.left = NumberOfBytes(0); + self.peer.left = NumberOfBytes::new(0); self } @@ -462,9 +465,9 @@ pub mod fixture { peer_id: Id::default(), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Started, } } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 1fd58ab02..38405e4e0 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -16,6 +16,7 @@ rust-version.workspace = true version.workspace = true [dependencies] +aquatic_udp_protocol = "0" crossbeam-skiplist = "0" dashmap = "6" futures = "0" @@ -24,6 +25,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-clock = { version = "3.0.0-beta-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-beta-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-beta-develop", path = "../primitives" } +zerocopy = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs index b904ef0e8..f7a392bd8 100644 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -1,17 +1,19 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::{Id, Peer}; -use torrust_tracker_primitives::{AnnounceEvent, DurationSinceUnixEpoch, NumberOfBytes}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use zerocopy::I64; pub const DEFAULT_PEER: Peer = Peer { peer_id: Id([0; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::from_secs(0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes(I64::ZERO), + downloaded: NumberOfBytes(I64::ZERO), + left: NumberOfBytes(I64::ZERO), event: AnnounceEvent::Started, }; diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 2d99fa9c5..7f8cfc4e6 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -1,10 +1,11 @@ use std::net::SocketAddr; use std::sync::Arc; +use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::{self}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{AnnounceEvent, DurationSinceUnixEpoch}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::Entry; use crate::EntrySingle; diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs index dbdec826d..a5d2814c1 100644 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -1,7 +1,8 @@ use std::net::SocketAddr; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::{peer, AnnounceEvent, DurationSinceUnixEpoch, NumberOfBytes}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use crate::CurrentClock; @@ -48,7 +49,7 @@ impl TorrentPeerBuilder { #[must_use] fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); + self.peer.left = NumberOfBytes::new(left); self } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index be7d5d715..223819a14 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -2,12 +2,13 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::Sub; use std::time::Duration; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use rstest::{fixture, rstest}; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_primitives::{peer, AnnounceEvent, NumberOfBytes}; use torrust_tracker_torrent_repository::{ EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, }; @@ -85,7 +86,7 @@ async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { let mut peer = a_started_peer(3); torrent.upsert_peer(&peer).await; peer.event = AnnounceEvent::Completed; - peer.left = NumberOfBytes(0); + peer.left = NumberOfBytes::new(0); torrent.upsert_peer(&peer).await; vec![peer] } @@ -99,7 +100,7 @@ async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { let mut peer_3 = a_started_peer(3); torrent.upsert_peer(&peer_3).await; peer_3.event = AnnounceEvent::Completed; - peer_3.left = NumberOfBytes(0); + peer_3.left = NumberOfBytes::new(0); torrent.upsert_peer(&peer_3).await; vec![peer_1, peer_2, peer_3] } @@ -304,10 +305,10 @@ async fn it_should_update_a_peer_as_a_seeder( let peers = torrent.get_peers(None).await; let mut peer = **peers.first().expect("there should be a peer"); - let is_already_non_left = peer.left == NumberOfBytes(0); + let is_already_non_left = peer.left == NumberOfBytes::new(0); // Set Bytes Left to Zero - peer.left = NumberOfBytes(0); + peer.left = NumberOfBytes::new(0); torrent.upsert_peer(&peer).await; let stats = torrent.get_stats().await; @@ -336,10 +337,10 @@ async fn it_should_update_a_peer_as_incomplete( let peers = torrent.get_peers(None).await; let mut peer = **peers.first().expect("there should be a peer"); - let completed_already = peer.left == NumberOfBytes(0); + let completed_already = peer.left == NumberOfBytes::new(0); // Set Bytes Left to no Zero - peer.left = NumberOfBytes(1); + peer.left = NumberOfBytes::new(1); torrent.upsert_peer(&peer).await; let stats = torrent.get_stats().await; diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 132684c13..05d538582 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -1,12 +1,13 @@ use std::collections::{BTreeMap, HashSet}; use std::hash::{DefaultHasher, Hash, Hasher}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{AnnounceEvent, NumberOfBytes, PersistentTorrents}; +use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::entry::Entry as _; use torrust_tracker_torrent_repository::repository::dash_map_mutex_std::XacrimonDashMap; use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; @@ -99,7 +100,7 @@ fn downloaded() -> Entries { let mut peer = a_started_peer(3); torrent.upsert_peer(&peer); peer.event = AnnounceEvent::Completed; - peer.left = NumberOfBytes(0); + peer.left = NumberOfBytes::new(0); torrent.upsert_peer(&peer); vec![(InfoHash::default(), torrent)] } @@ -121,7 +122,7 @@ fn three() -> Entries { let mut downloaded_peer = a_started_peer(3); downloaded.upsert_peer(&downloaded_peer); downloaded_peer.event = AnnounceEvent::Completed; - downloaded_peer.left = NumberOfBytes(0); + downloaded_peer.left = NumberOfBytes::new(0); downloaded.upsert_peer(&downloaded_peer); downloaded.hash(downloaded_h); diff --git a/src/core/mod.rs b/src/core/mod.rs index 49c781959..7c10c0aae 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -60,10 +60,10 @@ //! use std::net::Ipv4Addr; //! use std::str::FromStr; //! -//! use aquatic_udp_protocol::AnnounceEvent; +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; //! use torrust_tracker_primitives::peer; //! use torrust_tracker_primitives::info_hash::InfoHash; -//! use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; +//! use torrust_tracker_primitives::{DurationSinceUnixEpoch}; //! //! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); //! @@ -71,9 +71,9 @@ //! peer_id: peer::Id(*b"-qB00000000000000001"), //! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), //! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), -//! uploaded: NumberOfBytes(0), -//! downloaded: NumberOfBytes(0), -//! left: NumberOfBytes(0), +//! uploaded: NumberOfBytes::new(0), +//! downloaded: NumberOfBytes::new(0), +//! left: NumberOfBytes::new(0), //! event: AnnounceEvent::Completed, //! }; //! @@ -1198,9 +1198,9 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use aquatic_udp_protocol::AnnounceEvent; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; use crate::core::peer::{self, Peer}; @@ -1246,9 +1246,9 @@ mod tests { peer_id: peer::Id(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Completed, } } @@ -1259,9 +1259,9 @@ mod tests { peer_id: peer::Id(*b"-qB00000000000000002"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Completed, } } @@ -1290,9 +1290,9 @@ mod tests { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), // No bytes left to download + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download event: AnnounceEvent::Completed, } } @@ -1303,9 +1303,9 @@ mod tests { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(1000), // Still bytes to download + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download event: AnnounceEvent::Started, } } diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs index fa1396887..f0773faf0 100644 --- a/src/core/peer_tests.rs +++ b/src/core/peer_tests.rs @@ -2,10 +2,10 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use aquatic_udp_protocol::AnnounceEvent; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time}; -use torrust_tracker_primitives::{peer, NumberOfBytes}; +use torrust_tracker_primitives::peer; use crate::CurrentClock; @@ -17,9 +17,9 @@ fn it_should_be_serializable() { peer_id: peer::Id(*b"-qB0000-000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: CurrentClock::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Started, }; diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index fce9a2602..9cb38e3f1 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -105,17 +105,17 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::AnnounceEvent; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; fn sample_peer() -> peer::Peer { peer::Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Started, } } diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 59637f2ee..129318ce1 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -52,9 +52,9 @@ impl From for Peer { peer_addr: value.peer_addr.to_string(), updated: value.updated.as_millis(), updated_milliseconds_ago: value.updated.as_millis(), - uploaded: value.uploaded.0, - downloaded: value.downloaded.0, - left: value.left.0, + uploaded: value.uploaded.0.get(), + downloaded: value.downloaded.0.get(), + left: value.left.0.get(), event: format!("{:?}", value.event), } } diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index d5ff957de..772a37f98 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -97,9 +97,9 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use aquatic_udp_protocol::AnnounceEvent; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::Torrent; use crate::core::services::torrent::{BasicInfo, Info}; @@ -111,9 +111,9 @@ mod tests { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Started, } } diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index d2612f79b..6867461e0 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -95,6 +95,7 @@ fn extract_announce_from(maybe_raw_query: Option<&str>) -> Result peer::Pee peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), updated: CurrentClock::now(), - uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), - downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), - left: NumberOfBytes(announce_request.left.unwrap_or(0)), + uploaded: announce_request.uploaded.unwrap_or(NumberOfBytes::new(0)), + downloaded: announce_request.downloaded.unwrap_or(NumberOfBytes::new(0)), + left: announce_request.left.unwrap_or(NumberOfBytes::new(0)), event: map_to_torrust_event(&announce_request.event), } } diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 83cc7ddf9..6efee18b3 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -5,6 +5,7 @@ use std::fmt; use std::panic::Location; use std::str::FromStr; +use aquatic_udp_protocol::NumberOfBytes; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; use torrust_tracker_primitives::info_hash::{self, InfoHash}; @@ -14,10 +15,6 @@ use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_d use crate::servers::http::v1::query::{ParseQueryError, Query}; use crate::servers::http::v1::responses; -/// The number of bytes `downloaded`, `uploaded` or `left`. It's used in the -/// `Announce` request for parameters that represent a number of bytes. -pub type NumberOfBytes = i64; - // Query param names const INFO_HASH: &str = "info_hash"; const PEER_ID: &str = "peer_id"; @@ -32,6 +29,7 @@ const COMPACT: &str = "compact"; /// query params of the request. /// /// ```rust +/// use aquatic_udp_protocol::NumberOfBytes; /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; /// use torrust_tracker_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::peer; @@ -42,9 +40,9 @@ const COMPACT: &str = "compact"; /// peer_id: "-qB00000000000000001".parse::().unwrap(), /// port: 17548, /// // Optional params -/// downloaded: Some(1), -/// uploaded: Some(2), -/// left: Some(3), +/// downloaded: Some(NumberOfBytes::new(1)), +/// uploaded: Some(NumberOfBytes::new(2)), +/// left: Some(NumberOfBytes::new(3)), /// event: Some(Event::Started), /// compact: Some(Compact::NotAccepted) /// }; @@ -324,13 +322,16 @@ fn extract_number_of_bytes_from_param(param_name: &str, query: &Query) -> Result location: Location::caller(), })?; - Ok(Some(i64::try_from(number_of_bytes).map_err(|_e| { - ParseAnnounceQueryError::NumberOfBytesOverflow { + let number_of_bytes = + i64::try_from(number_of_bytes).map_err(|_e| ParseAnnounceQueryError::NumberOfBytesOverflow { param_name: param_name.to_owned(), param_value: raw_param.clone(), location: Location::caller(), - } - })?)) + })?; + + let number_of_bytes = NumberOfBytes::new(number_of_bytes); + + Ok(Some(number_of_bytes)) } None => Ok(None), } @@ -355,6 +356,7 @@ mod tests { mod announce_request { + use aquatic_udp_protocol::NumberOfBytes; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; @@ -415,9 +417,9 @@ mod tests { info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), peer_id: "-qB00000000000000001".parse::().unwrap(), port: 17548, - downloaded: Some(1), - uploaded: Some(2), - left: Some(3), + downloaded: Some(NumberOfBytes::new(1)), + uploaded: Some(NumberOfBytes::new(2)), + left: Some(NumberOfBytes::new(3)), event: Some(Event::Started), compact: Some(Compact::NotAccepted), } diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index 19f6e35a6..c61d3be1f 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -11,8 +11,6 @@ use crate::servers::http::percent_encoding::percent_decode_info_hash; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::responses; -pub type NumberOfBytes = i64; - // Query param names const INFO_HASH: &str = "info_hash"; diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index e8699a6eb..a85a4d4bf 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -48,9 +48,9 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer: mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use aquatic_udp_protocol::AnnounceEvent; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; @@ -82,9 +82,9 @@ mod tests { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Started, } } diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index e49c37ba6..bd3f323b4 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -61,9 +61,9 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::AnnounceEvent; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; @@ -86,9 +86,9 @@ mod tests { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), event: AnnounceEvent::Started, } } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 7eb07bc8d..c7204b4b9 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -318,9 +318,10 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; + use aquatic_udp_protocol::NumberOfBytes; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::{peer, NumberOfBytes}; + use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; @@ -397,7 +398,7 @@ mod tests { #[must_use] pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); + self.peer.left = NumberOfBytes::new(left); self } diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index a6bb3f7c3..1824b2826 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -2,7 +2,7 @@ use std::net::{IpAddr, SocketAddr}; use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::{peer, NumberOfBytes}; +use torrust_tracker_primitives::peer; use crate::CurrentClock; @@ -11,7 +11,6 @@ use crate::CurrentClock; /// /// # Arguments /// -/// * `announce_wrapper` - The announce request to extract the peer info from. /// * `peer_ip` - The real IP address of the peer, not the one in the announce request. #[must_use] pub fn from_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_ip: &IpAddr) -> peer::Peer { @@ -19,9 +18,9 @@ pub fn from_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, pe peer_id: peer::Id(announce_request.peer_id.0), peer_addr: SocketAddr::new(*peer_ip, announce_request.port.0.into()), updated: CurrentClock::now(), - uploaded: NumberOfBytes(announce_request.bytes_uploaded.0.into()), - downloaded: NumberOfBytes(announce_request.bytes_downloaded.0.into()), - left: NumberOfBytes(announce_request.bytes_left.0.into()), + uploaded: announce_request.bytes_uploaded, + downloaded: announce_request.bytes_downloaded, + left: announce_request.bytes_left, event: announce_request.event.into(), } } From 9362fa57817122292ba25202c85d6869329096df Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 15 Jul 2024 10:09:24 +0200 Subject: [PATCH 0980/1003] dev: use aquatic PeerId instead of local one --- .../flamegraph_generated_without_sudo.svg | 2 +- packages/primitives/src/lib.rs | 44 --- packages/primitives/src/peer.rs | 278 ++++++++---------- .../benches/helpers/utils.rs | 6 +- .../torrent-repository/src/entry/peer_list.rs | 21 +- .../tests/common/torrent_peer_builder.rs | 10 +- .../torrent-repository/tests/entry/mod.rs | 2 +- src/core/mod.rs | 23 +- src/core/peer_tests.rs | 4 +- src/core/services/torrent.rs | 4 +- .../apis/v1/context/torrent/resources/peer.rs | 6 +- .../v1/context/torrent/resources/torrent.rs | 4 +- src/servers/http/percent_encoding.rs | 18 +- .../http/v1/extractors/announce_request.rs | 5 +- src/servers/http/v1/handlers/announce.rs | 4 +- src/servers/http/v1/requests/announce.rs | 24 +- src/servers/http/v1/responses/announce.rs | 8 +- src/servers/http/v1/services/announce.rs | 4 +- src/servers/http/v1/services/scrape.rs | 4 +- src/servers/udp/handlers.rs | 24 +- src/servers/udp/peer_builder.rs | 2 +- .../tracker/http/client/requests/announce.rs | 6 +- .../tracker/http/client/responses/announce.rs | 3 +- tests/servers/http/requests/announce.rs | 6 +- tests/servers/http/responses/announce.rs | 3 +- tests/servers/http/v1/contract.rs | 46 ++- 26 files changed, 244 insertions(+), 317 deletions(-) diff --git a/docs/media/flamegraph_generated_without_sudo.svg b/docs/media/flamegraph_generated_without_sudo.svg index 84c00ffe3..e3df85866 100644 --- a/docs/media/flamegraph_generated_without_sudo.svg +++ b/docs/media/flamegraph_generated_without_sudo.svg @@ -488,4 +488,4 @@ function search(term) { function format_percent(n) { return n.toFixed(4) + "%"; } -]]>Flame Graph Reset ZoomSearch [unknown] (188 samples, 0.14%)[unknown] (187 samples, 0.14%)[unknown] (186 samples, 0.14%)[unknown] (178 samples, 0.14%)[unknown] (172 samples, 0.13%)[unknown] (158 samples, 0.12%)[unknown] (158 samples, 0.12%)[unknown] (125 samples, 0.10%)[unknown] (102 samples, 0.08%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (41 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (25 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (15 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)profiling (214 samples, 0.16%)clone3 (22 samples, 0.02%)start_thread (22 samples, 0.02%)std::sys::pal::unix::thread::Thread::new::thread_start (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::Handler::new (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::make_handler (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::get_stack (19 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (30 samples, 0.02%)[[vdso]] (93 samples, 0.07%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (143 samples, 0.11%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (31 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (21 samples, 0.02%)[[vdso]] (91 samples, 0.07%)__GI___clock_gettime (14 samples, 0.01%)_int_malloc (53 samples, 0.04%)epoll_wait (254 samples, 0.19%)tokio::runtime::context::with_scheduler (28 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::context::with_scheduler::{{closure}} (14 samples, 0.01%)core::option::Option<T>::map (17 samples, 0.01%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (17 samples, 0.01%)mio::poll::Poll::poll (27 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (27 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (54 samples, 0.04%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (26 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (65 samples, 0.05%)core::sync::atomic::AtomicUsize::fetch_add (65 samples, 0.05%)core::sync::atomic::atomic_add (65 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (31 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (49 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (33 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (93 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Parker::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (75 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (18 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (18 samples, 0.01%)core::cell::BorrowRefMut::new (18 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (96 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (18 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (14 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (220 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (54 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (54 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (240 samples, 0.18%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (265 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park (284 samples, 0.22%)core::option::Option<T>::or_else (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (40 samples, 0.03%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (17 samples, 0.01%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (17 samples, 0.01%)core::num::<impl u32>::wrapping_add (17 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (26 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (129 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (128 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (119 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::pack (39 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run (613 samples, 0.47%)tokio::runtime::context::runtime::enter_runtime (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (613 samples, 0.47%)tokio::runtime::context::set_scheduler (613 samples, 0.47%)std::thread::local::LocalKey<T>::with (613 samples, 0.47%)std::thread::local::LocalKey<T>::try_with (613 samples, 0.47%)tokio::runtime::context::set_scheduler::{{closure}} (613 samples, 0.47%)tokio::runtime::context::scoped::Scoped<T>::set (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Context::run (613 samples, 0.47%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (777 samples, 0.59%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (776 samples, 0.59%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (16 samples, 0.01%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::runtime::context::set_current_task_id (16 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (16 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (835 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (56 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (46 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (897 samples, 0.68%)tokio::runtime::task::harness::poll_future::{{closure}} (897 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::store_output (62 samples, 0.05%)tokio::runtime::task::harness::poll_future (930 samples, 0.71%)std::panic::catch_unwind (927 samples, 0.71%)std::panicking::try (927 samples, 0.71%)std::panicking::try::do_call (925 samples, 0.70%)core::mem::manually_drop::ManuallyDrop<T>::take (28 samples, 0.02%)core::ptr::read (28 samples, 0.02%)tokio::runtime::task::raw::poll (938 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll (934 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (934 samples, 0.71%)core::array::<impl core::default::Default for [T: 32]>::default (26 samples, 0.02%)tokio::runtime::time::Inner::lock (16 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::time::wheel::Wheel::poll (25 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (98 samples, 0.07%)tokio::runtime::time::Driver::park_internal (51 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (24 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (131 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (24 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (14 samples, 0.01%)core::sync::atomic::AtomicU32::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (39 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (34 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (32 samples, 0.02%)[[heap]] (2,361 samples, 1.80%)[..[[vdso]] (313 samples, 0.24%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (41 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (67 samples, 0.05%)alloc::string::String::push_str (18 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (18 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (18 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (18 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (36 samples, 0.03%)core::num::<impl u64>::rotate_left (28 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (60 samples, 0.05%)core::num::<impl u64>::wrapping_add (14 samples, 0.01%)core::hash::sip::u8to64_le (60 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (184 samples, 0.14%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (15 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (19 samples, 0.01%)core::cell::Cell<T>::get (17 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (26 samples, 0.02%)core::ops::function::FnMut::call_mut (21 samples, 0.02%)tokio::runtime::coop::poll_proceed (21 samples, 0.02%)tokio::runtime::context::budget (21 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (21 samples, 0.02%)[unknown] (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (195 samples, 0.15%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (14 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (14 samples, 0.01%)core::result::Result<T,E>::is_err (18 samples, 0.01%)core::result::Result<T,E>::is_ok (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (39 samples, 0.03%)core::sync::atomic::AtomicU32::compare_exchange (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (19 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (245 samples, 0.19%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (26 samples, 0.02%)[[vdso]] (748 samples, 0.57%)[profiling] (34 samples, 0.03%)core::fmt::write (31 samples, 0.02%)__GI___clock_gettime (29 samples, 0.02%)__GI___libc_free (131 samples, 0.10%)arena_for_chunk (20 samples, 0.02%)arena_for_chunk (19 samples, 0.01%)heap_for_ptr (19 samples, 0.01%)heap_max_size (14 samples, 0.01%)__GI___libc_malloc (114 samples, 0.09%)__GI___libc_realloc (15 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)__GI___pthread_disable_asynccancel (66 samples, 0.05%)__GI_getsockname (249 samples, 0.19%)__libc_calloc (15 samples, 0.01%)__libc_recvfrom (23 samples, 0.02%)__libc_sendto (130 samples, 0.10%)__memcmp_evex_movbe (451 samples, 0.34%)__memcpy_avx512_unaligned_erms (426 samples, 0.32%)__memset_avx512_unaligned_erms (215 samples, 0.16%)__posix_memalign (17 samples, 0.01%)_int_free (418 samples, 0.32%)tcache_put (24 samples, 0.02%)_int_malloc (385 samples, 0.29%)_int_memalign (31 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (26 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (15 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (15 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (96 samples, 0.07%)alloc::raw_vec::RawVec<T,A>::grow_amortized (66 samples, 0.05%)core::num::<impl usize>::checked_add (18 samples, 0.01%)core::num::<impl usize>::overflowing_add (18 samples, 0.01%)alloc::raw_vec::finish_grow (74 samples, 0.06%)alloc::sync::Arc<T,A>::drop_slow (16 samples, 0.01%)core::mem::drop (14 samples, 0.01%)core::fmt::Formatter::pad_integral (14 samples, 0.01%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (93 samples, 0.07%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (23 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (188 samples, 0.14%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (30 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_connect::{{closure}}> (22 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_packet::{{closure}}> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}}> (19 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (22 samples, 0.02%)malloc_consolidate (24 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (15 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)rand_chacha::guts::round (66 samples, 0.05%)rand_chacha::guts::refill_wide::impl_avx2 (99 samples, 0.08%)rand_chacha::guts::refill_wide::fn_impl (98 samples, 0.07%)rand_chacha::guts::refill_wide_impl (98 samples, 0.07%)std::io::error::Error::kind (14 samples, 0.01%)[unknown] (42 samples, 0.03%)[unknown] (14 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (490 samples, 0.37%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (211 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (84 samples, 0.06%)tokio::runtime::task::core::Header::get_owner_id (18 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (18 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (20 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (31 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (29 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (108 samples, 0.08%)tokio::runtime::task::core::TaskIdGuard::enter (14 samples, 0.01%)tokio::runtime::context::set_current_task_id (14 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (21 samples, 0.02%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (32 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (54 samples, 0.04%)tokio::runtime::task::raw::drop_abort_handle (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (17 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (22 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (22 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (79 samples, 0.06%)core::slice::<impl [T]>::contains (178 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (40 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (40 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (216 samples, 0.16%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (219 samples, 0.17%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (29 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (29 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (54 samples, 0.04%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (113 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (31 samples, 0.02%)core::sync::atomic::AtomicU64::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (447 samples, 0.34%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (174 samples, 0.13%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (489 samples, 0.37%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (489 samples, 0.37%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run (484 samples, 0.37%)tokio::runtime::context::runtime::enter_runtime (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (484 samples, 0.37%)tokio::runtime::context::set_scheduler (484 samples, 0.37%)std::thread::local::LocalKey<T>::with (484 samples, 0.37%)std::thread::local::LocalKey<T>::try_with (484 samples, 0.37%)tokio::runtime::context::set_scheduler::{{closure}} (484 samples, 0.37%)tokio::runtime::context::scoped::Scoped<T>::set (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Context::run (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (24 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (20 samples, 0.02%)tokio::runtime::task::raw::poll (515 samples, 0.39%)tokio::runtime::task::harness::Harness<T,S>::poll (493 samples, 0.38%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (493 samples, 0.38%)tokio::runtime::task::harness::poll_future (493 samples, 0.38%)std::panic::catch_unwind (493 samples, 0.38%)std::panicking::try (493 samples, 0.38%)std::panicking::try::do_call (493 samples, 0.38%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (493 samples, 0.38%)tokio::runtime::task::harness::poll_future::{{closure}} (493 samples, 0.38%)tokio::runtime::task::core::Core<T,S>::poll (493 samples, 0.38%)tokio::runtime::time::wheel::Wheel::next_expiration (16 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (27 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (15 samples, 0.01%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (44 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (15 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (29 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (74 samples, 0.06%)torrust_tracker::servers::udp::peer_builder::from_request (17 samples, 0.01%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (51 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (54 samples, 0.04%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (58 samples, 0.04%)torrust_tracker::core::Tracker::announce::{{closure}} (70 samples, 0.05%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (113 samples, 0.09%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (175 samples, 0.13%)<T as alloc::string::ToString>::to_string (38 samples, 0.03%)core::option::Option<T>::expect (56 samples, 0.04%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (18 samples, 0.01%)<T as alloc::string::ToString>::to_string (18 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (180 samples, 0.14%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (468 samples, 0.36%)torrust_tracker::servers::udp::logging::log_response (38 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (669 samples, 0.51%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (152 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (147 samples, 0.11%)tokio::net::udp::UdpSocket::send_to::{{closure}} (138 samples, 0.11%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (119 samples, 0.09%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (75 samples, 0.06%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to (39 samples, 0.03%)mio::io_source::IoSource<T>::do_io (39 samples, 0.03%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to::{{closure}} (39 samples, 0.03%)std::net::udp::UdpSocket::send_to (39 samples, 0.03%)std::sys_common::net::UdpSocket::send_to (39 samples, 0.03%)std::sys::pal::unix::cvt (39 samples, 0.03%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (39 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_stats (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (14 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (33 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (33 samples, 0.03%)torrust_tracker_primitives::peer::Peer::is_seeder (33 samples, 0.03%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (75 samples, 0.06%)core::iter::traits::iterator::Iterator::sum (75 samples, 0.06%)<usize as core::iter::traits::accum::Sum>::sum (75 samples, 0.06%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (75 samples, 0.06%)core::iter::traits::iterator::Iterator::fold (75 samples, 0.06%)core::iter::adapters::map::map_fold::{{closure}} (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (104 samples, 0.08%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (24 samples, 0.02%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (215 samples, 0.16%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (198 samples, 0.15%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (89 samples, 0.07%)core::option::Option<T>::is_some_and (32 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (30 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (30 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (26 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (58 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (58 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (58 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (238 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (236 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (208 samples, 0.16%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (208 samples, 0.16%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (282 samples, 0.21%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (67 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (22 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (22 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (22 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (22 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (22 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (43 samples, 0.03%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (43 samples, 0.03%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (43 samples, 0.03%)<u8 as core::slice::cmp::SliceOrd>::compare (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (151 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (145 samples, 0.11%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (137 samples, 0.10%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (137 samples, 0.10%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (266 samples, 0.20%)core::sync::atomic::AtomicU32::load (27 samples, 0.02%)core::sync::atomic::atomic_load (27 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (38 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (37 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (36 samples, 0.03%)tracing::span::Span::log (16 samples, 0.01%)tracing::span::Span::record_all (70 samples, 0.05%)unlink_chunk (139 samples, 0.11%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (30 samples, 0.02%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (30 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (28 samples, 0.02%)[anon] (8,759 samples, 6.67%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (32 samples, 0.02%)uuid::rng::bytes (32 samples, 0.02%)rand::random (32 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (15 samples, 0.01%)_int_free (338 samples, 0.26%)tcache_put (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (22 samples, 0.02%)hashbrown::raw::h2 (14 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (23 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (17 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (25 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (15 samples, 0.01%)[profiling] (545 samples, 0.42%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (32 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (22 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (30 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (28 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (83 samples, 0.06%)alloc::string::String::push_str (57 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (57 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (57 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (57 samples, 0.04%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (20 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (41 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (151 samples, 0.12%)core::hash::sip::u8to64_le (50 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (33 samples, 0.03%)tokio::runtime::context::CONTEXT::__getit (35 samples, 0.03%)core::cell::Cell<T>::get (33 samples, 0.03%)[unknown] (20 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (75 samples, 0.06%)core::ops::function::FnMut::call_mut (66 samples, 0.05%)tokio::runtime::coop::poll_proceed (66 samples, 0.05%)tokio::runtime::context::budget (66 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (66 samples, 0.05%)tokio::runtime::context::budget::{{closure}} (27 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (27 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (110 samples, 0.08%)[unknown] (15 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (27 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (27 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (70 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (55 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (55 samples, 0.04%)[unknown] (33 samples, 0.03%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (214 samples, 0.16%)__memcpy_avx512_unaligned_erms (168 samples, 0.13%)[profiling] (171 samples, 0.13%)binascii::bin2hex (77 samples, 0.06%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (280 samples, 0.21%)[unknown] (317 samples, 0.24%)[[vdso]] (2,648 samples, 2.02%)[..[unknown] (669 samples, 0.51%)[unknown] (396 samples, 0.30%)[unknown] (251 samples, 0.19%)[unknown] (65 samples, 0.05%)[unknown] (30 samples, 0.02%)[unknown] (21 samples, 0.02%)__GI___clock_gettime (56 samples, 0.04%)arena_for_chunk (72 samples, 0.05%)arena_for_chunk (62 samples, 0.05%)heap_for_ptr (49 samples, 0.04%)heap_max_size (28 samples, 0.02%)__GI___libc_free (194 samples, 0.15%)arena_for_chunk (19 samples, 0.01%)checked_request2size (24 samples, 0.02%)__GI___libc_malloc (220 samples, 0.17%)tcache_get (44 samples, 0.03%)__GI___libc_write (25 samples, 0.02%)__GI___libc_write (14 samples, 0.01%)__GI___pthread_disable_asynccancel (97 samples, 0.07%)core::num::<impl u128>::leading_zeros (15 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (72 samples, 0.05%)__floattidf (90 samples, 0.07%)compiler_builtins::float::conv::__floattidf (86 samples, 0.07%)exp_inline (40 samples, 0.03%)log_inline (64 samples, 0.05%)__ieee754_pow_fma (114 samples, 0.09%)__libc_calloc (106 samples, 0.08%)__libc_recvfrom (252 samples, 0.19%)__libc_sendto (133 samples, 0.10%)__memcmp_evex_movbe (137 samples, 0.10%)__memcpy_avx512_unaligned_erms (1,399 samples, 1.07%)__posix_memalign (172 samples, 0.13%)__posix_memalign (80 samples, 0.06%)_mid_memalign (71 samples, 0.05%)arena_for_chunk (14 samples, 0.01%)__pow (18 samples, 0.01%)__vdso_clock_gettime (40 samples, 0.03%)[unknown] (24 samples, 0.02%)_int_free (462 samples, 0.35%)tcache_put (54 samples, 0.04%)[unknown] (14 samples, 0.01%)_int_malloc (508 samples, 0.39%)_int_memalign (68 samples, 0.05%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (78 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::grow_amortized (73 samples, 0.06%)alloc::raw_vec::finish_grow (91 samples, 0.07%)core::result::Result<T,E>::map_err (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Weak<ring::ec::curve25519::ed25519::signing::Ed25519KeyPair,&alloc::alloc::Global>> (16 samples, 0.01%)<alloc::sync::Weak<T,A> as core::ops::drop::Drop>::drop (16 samples, 0.01%)core::mem::drop (18 samples, 0.01%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)alloc_new_heap (49 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (49 samples, 0.04%)core::fmt::Formatter::pad_integral (40 samples, 0.03%)core::fmt::Formatter::pad_integral::write_prefix (19 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (155 samples, 0.12%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (71 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (245 samples, 0.19%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (33 samples, 0.03%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (37 samples, 0.03%)core::str::converts::from_utf8 (33 samples, 0.03%)core::str::validations::run_utf8_validation (20 samples, 0.02%)epoll_wait (31 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (17 samples, 0.01%)rand_chacha::guts::refill_wide (19 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (17 samples, 0.01%)std_detect::detect::check_for (17 samples, 0.01%)std_detect::detect::cache::test (17 samples, 0.01%)std_detect::detect::cache::Cache::test (17 samples, 0.01%)core::sync::atomic::AtomicUsize::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)std::sys::pal::unix::time::Timespec::new (29 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (132 samples, 0.10%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (22 samples, 0.02%)core::cmp::PartialOrd::ge (22 samples, 0.02%)std::sys::pal::unix::time::Timespec::sub_timespec (67 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (18 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr (29 samples, 0.02%)std::sys_common::net::sockname (28 samples, 0.02%)syscall (552 samples, 0.42%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (74 samples, 0.06%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (74 samples, 0.06%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (74 samples, 0.06%)core::cell::Cell<T>::set (74 samples, 0.06%)core::cell::Cell<T>::replace (74 samples, 0.06%)core::mem::replace (74 samples, 0.06%)core::ptr::write (74 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (14 samples, 0.01%)tokio::runtime::context::with_scheduler (176 samples, 0.13%)std::thread::local::LocalKey<T>::try_with (152 samples, 0.12%)tokio::runtime::context::with_scheduler::{{closure}} (151 samples, 0.12%)tokio::runtime::context::scoped::Scoped<T>::with (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (16 samples, 0.01%)core::option::Option<T>::map (19 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (24 samples, 0.02%)mio::poll::Poll::poll (53 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (53 samples, 0.04%)core::result::Result<T,E>::map (28 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (28 samples, 0.02%)tokio::io::ready::Ready::from_mio (14 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (126 samples, 0.10%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (18 samples, 0.01%)[unknown] (51 samples, 0.04%)[unknown] (100 samples, 0.08%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (326 samples, 0.25%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (205 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (77 samples, 0.06%)[unknown] (26 samples, 0.02%)<tokio::util::linked_list::DrainFilter<T,F> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (396 samples, 0.30%)tokio::loom::std::mutex::Mutex<T>::lock (18 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (573 samples, 0.44%)core::sync::atomic::AtomicUsize::fetch_add (566 samples, 0.43%)core::sync::atomic::atomic_add (566 samples, 0.43%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (635 samples, 0.48%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (44 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (21 samples, 0.02%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (21 samples, 0.02%)core::sync::atomic::AtomicUsize::load (21 samples, 0.02%)core::sync::atomic::atomic_load (21 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (32 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (32 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (32 samples, 0.02%)std::sync::poison::Flag::done (32 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (43 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (43 samples, 0.03%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (123 samples, 0.09%)tokio::runtime::task::list::OwnedTasks<S>::remove (117 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (80 samples, 0.06%)tokio::runtime::scheduler::defer::Defer::wake (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (71 samples, 0.05%)std::sync::condvar::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (56 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (37 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (138 samples, 0.11%)tokio::runtime::driver::Driver::park (77 samples, 0.06%)tokio::runtime::driver::TimeDriver::park (77 samples, 0.06%)tokio::runtime::time::Driver::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Parker::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::park::Inner::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (432 samples, 0.33%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (26 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (94 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (94 samples, 0.07%)core::cell::BorrowRefMut::new (94 samples, 0.07%)tokio::runtime::coop::budget (142 samples, 0.11%)tokio::runtime::coop::with_budget (142 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (121 samples, 0.09%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (44 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (208 samples, 0.16%)tokio::runtime::signal::Driver::process (30 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (46 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (35 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage (75 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_xor (76 samples, 0.06%)core::sync::atomic::atomic_xor (76 samples, 0.06%)tokio::runtime::task::state::State::transition_to_complete (79 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::complete (113 samples, 0.09%)tokio::runtime::task::state::State::transition_to_terminal (18 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (28 samples, 0.02%)core::mem::drop (18 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (18 samples, 0.01%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (16 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (16 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (53 samples, 0.04%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (21 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (113 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (15 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (15 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (14 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (82 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (23 samples, 0.02%)tokio::runtime::task::state::State::ref_dec (23 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (34 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (32 samples, 0.02%)tokio::runtime::task::state::State::unset_join_interested (23 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (23 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (43 samples, 0.03%)core::num::<impl u32>::wrapping_add (23 samples, 0.02%)core::option::Option<T>::or_else (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (59 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (45 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (132 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (63 samples, 0.05%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run (290 samples, 0.22%)tokio::runtime::context::runtime::enter_runtime (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (290 samples, 0.22%)tokio::runtime::context::set_scheduler (290 samples, 0.22%)std::thread::local::LocalKey<T>::with (290 samples, 0.22%)std::thread::local::LocalKey<T>::try_with (290 samples, 0.22%)tokio::runtime::context::set_scheduler::{{closure}} (290 samples, 0.22%)tokio::runtime::context::scoped::Scoped<T>::set (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Context::run (290 samples, 0.22%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (327 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (322 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll (333 samples, 0.25%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (342 samples, 0.26%)tokio::runtime::task::harness::poll_future::{{closure}} (342 samples, 0.26%)tokio::runtime::task::harness::poll_future (348 samples, 0.27%)std::panic::catch_unwind (347 samples, 0.26%)std::panicking::try (347 samples, 0.26%)std::panicking::try::do_call (347 samples, 0.26%)core::sync::atomic::AtomicUsize::compare_exchange (18 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (18 samples, 0.01%)tokio::runtime::task::state::State::transition_to_running (47 samples, 0.04%)tokio::runtime::task::state::State::fetch_update_action (47 samples, 0.04%)tokio::runtime::task::state::State::transition_to_running::{{closure}} (19 samples, 0.01%)tokio::runtime::task::raw::poll (427 samples, 0.33%)tokio::runtime::task::harness::Harness<T,S>::poll (408 samples, 0.31%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (407 samples, 0.31%)tokio::runtime::task::state::State::transition_to_idle (17 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (21 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (14 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (72 samples, 0.05%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (23 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (15 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (14 samples, 0.01%)tokio::runtime::time::Driver::park_internal (155 samples, 0.12%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (96 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (35 samples, 0.03%)core::num::<impl usize>::pow (35 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (39 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (33 samples, 0.03%)core::num::<impl usize>::pow (33 samples, 0.03%)tokio::runtime::time::wheel::level::Level::next_expiration (208 samples, 0.16%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.04%)core::num::<impl usize>::pow (48 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (277 samples, 0.21%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (18 samples, 0.01%)core::option::Option<T>::is_some (18 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (50 samples, 0.04%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (37 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (19 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (17 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (17 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (20 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (62 samples, 0.05%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (40 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (27 samples, 0.02%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (17 samples, 0.01%)torrust_tracker::servers::udp::peer_builder::from_request (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (19 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (355 samples, 0.27%)<F as core::future::into_future::IntoFuture>::into_future (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (37 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (25 samples, 0.02%)core::sync::atomic::atomic_add (25 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet (14 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (20 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::result::Result<T,E>::map_err (16 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (136 samples, 0.10%)torrust_tracker::core::Tracker::announce::{{closure}} (173 samples, 0.13%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (267 samples, 0.20%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (30 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (423 samples, 0.32%)core::fmt::Formatter::new (26 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (80 samples, 0.06%)core::fmt::num::imp::fmt_u64 (58 samples, 0.04%)core::intrinsics::copy_nonoverlapping (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (74 samples, 0.06%)core::fmt::num::imp::fmt_u64 (70 samples, 0.05%)<T as alloc::string::ToString>::to_string (207 samples, 0.16%)core::option::Option<T>::expect (19 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (18 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (18 samples, 0.01%)torrust_tracker::servers::udp::logging::map_action_name (25 samples, 0.02%)alloc::str::<impl alloc::borrow::ToOwned for str>::to_owned (14 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (345 samples, 0.26%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (18 samples, 0.01%)core::fmt::num::imp::fmt_u64 (14 samples, 0.01%)<T as alloc::string::ToString>::to_string (35 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,067 samples, 0.81%)torrust_tracker::servers::udp::logging::log_response (72 samples, 0.05%)alloc::vec::from_elem (68 samples, 0.05%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (68 samples, 0.05%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (68 samples, 0.05%)alloc::alloc::Global::alloc_impl (68 samples, 0.05%)alloc::alloc::alloc_zeroed (68 samples, 0.05%)__rdl_alloc_zeroed (68 samples, 0.05%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (68 samples, 0.05%)[unknown] (48 samples, 0.04%)[unknown] (16 samples, 0.01%)[unknown] (28 samples, 0.02%)std::sys::pal::unix::cvt (134 samples, 0.10%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (134 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,908 samples, 1.45%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (504 samples, 0.38%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (382 samples, 0.29%)tokio::net::udp::UdpSocket::send_to::{{closure}} (344 samples, 0.26%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (332 samples, 0.25%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (304 samples, 0.23%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (215 samples, 0.16%)mio::net::udp::UdpSocket::send_to (185 samples, 0.14%)mio::io_source::IoSource<T>::do_io (185 samples, 0.14%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (185 samples, 0.14%)mio::net::udp::UdpSocket::send_to::{{closure}} (185 samples, 0.14%)std::net::udp::UdpSocket::send_to (185 samples, 0.14%)std::sys_common::net::UdpSocket::send_to (169 samples, 0.13%)alloc::vec::Vec<T>::with_capacity (17 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (17 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (104 samples, 0.08%)tokio::net::udp::UdpSocket::ready::{{closure}} (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (190 samples, 0.14%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (49 samples, 0.04%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (28 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (330 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (327 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (92 samples, 0.07%)tokio::task::spawn::spawn (92 samples, 0.07%)tokio::task::spawn::spawn_inner (92 samples, 0.07%)tokio::runtime::context::current::with_current (92 samples, 0.07%)std::thread::local::LocalKey<T>::try_with (92 samples, 0.07%)tokio::runtime::context::current::with_current::{{closure}} (92 samples, 0.07%)core::option::Option<T>::map (92 samples, 0.07%)tokio::task::spawn::spawn_inner::{{closure}} (92 samples, 0.07%)tokio::runtime::scheduler::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (92 samples, 0.07%)tokio::runtime::task::list::OwnedTasks<S>::bind (90 samples, 0.07%)tokio::runtime::task::new_task (89 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (89 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (89 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (34 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (27 samples, 0.02%)alloc::sync::Arc<T>::new (21 samples, 0.02%)alloc::boxed::Box<T>::new (21 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (152 samples, 0.12%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (125 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (88 samples, 0.07%)core::option::Option<T>::is_some_and (18 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (17 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (17 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (17 samples, 0.01%)std::sync::rwlock::RwLock<T>::read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::read (16 samples, 0.01%)tracing::span::Span::log (26 samples, 0.02%)core::fmt::Arguments::new_v1 (15 samples, 0.01%)tracing_core::span::Record::is_empty (34 samples, 0.03%)tracing_core::field::ValueSet::is_empty (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (22 samples, 0.02%)tracing_core::field::ValueSet::is_empty::{{closure}} (18 samples, 0.01%)core::option::Option<T>::is_none (16 samples, 0.01%)core::option::Option<T>::is_some (16 samples, 0.01%)tracing::span::Span::record_all (143 samples, 0.11%)unlink_chunk (185 samples, 0.14%)uuid::builder::Builder::with_variant (48 samples, 0.04%)[unknown] (40 samples, 0.03%)uuid::builder::Builder::from_random_bytes (77 samples, 0.06%)uuid::builder::Builder::with_version (29 samples, 0.02%)[unknown] (24 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)[unknown] (92 samples, 0.07%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (162 samples, 0.12%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (162 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (162 samples, 0.12%)[unknown] (18,233 samples, 13.89%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (270 samples, 0.21%)uuid::rng::bytes (190 samples, 0.14%)rand::random (190 samples, 0.14%)__memcpy_avx512_unaligned_erms (69 samples, 0.05%)_int_free (23 samples, 0.02%)_int_malloc (23 samples, 0.02%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)advise_stack_range (31 samples, 0.02%)__GI_madvise (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (31 samples, 0.02%)syscall (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sync::condvar::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (35 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (56 samples, 0.04%)std::sys::pal::unix::futex::futex_wait (56 samples, 0.04%)syscall (56 samples, 0.04%)[unknown] (56 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (53 samples, 0.04%)[unknown] (52 samples, 0.04%)[unknown] (46 samples, 0.04%)[unknown] (39 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[[vdso]] (26 samples, 0.02%)[[vdso]] (263 samples, 0.20%)__ieee754_pow_fma (26 samples, 0.02%)__pow (314 samples, 0.24%)std::f64::<impl f64>::powf (345 samples, 0.26%)__GI___clock_gettime (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (416 samples, 0.32%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (24 samples, 0.02%)std::time::Instant::now (18 samples, 0.01%)std::sys::pal::unix::time::Instant::now (18 samples, 0.01%)mio::poll::Poll::poll (102 samples, 0.08%)mio::sys::unix::selector::epoll::Selector::select (102 samples, 0.08%)epoll_wait (99 samples, 0.08%)[unknown] (92 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (88 samples, 0.07%)[unknown] (85 samples, 0.06%)[unknown] (84 samples, 0.06%)[unknown] (43 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (125 samples, 0.10%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (125 samples, 0.10%)tokio::runtime::driver::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::driver::TimeDriver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_internal (116 samples, 0.09%)tokio::runtime::io::driver::Driver::turn (116 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (148 samples, 0.11%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (111 samples, 0.08%)alloc::sync::Arc<T,A>::inner (111 samples, 0.08%)core::ptr::non_null::NonNull<T>::as_ref (111 samples, 0.08%)core::sync::atomic::AtomicUsize::compare_exchange (16 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (16 samples, 0.01%)core::bool::<impl bool>::then (88 samples, 0.07%)std::sys::pal::unix::futex::futex_wait (13,339 samples, 10.16%)std::sys::pal::..syscall (13,003 samples, 9.90%)syscall[unknown] (12,895 samples, 9.82%)[unknown][unknown] (12,759 samples, 9.72%)[unknown][unknown] (12,313 samples, 9.38%)[unknown][unknown] (12,032 samples, 9.16%)[unknown][unknown] (11,734 samples, 8.94%)[unknown][unknown] (11,209 samples, 8.54%)[unknown][unknown] (10,265 samples, 7.82%)[unknown][unknown] (9,345 samples, 7.12%)[unknown][unknown] (8,623 samples, 6.57%)[unknown][unknown] (7,744 samples, 5.90%)[unknow..[unknown] (5,922 samples, 4.51%)[unkn..[unknown] (4,459 samples, 3.40%)[un..[unknown] (2,808 samples, 2.14%)[..[unknown] (1,275 samples, 0.97%)[unknown] (1,022 samples, 0.78%)[unknown] (738 samples, 0.56%)[unknown] (607 samples, 0.46%)[unknown] (155 samples, 0.12%)core::result::Result<T,E>::is_err (77 samples, 0.06%)core::result::Result<T,E>::is_ok (77 samples, 0.06%)std::sync::condvar::Condvar::wait (13,429 samples, 10.23%)std::sync::cond..std::sys::sync::condvar::futex::Condvar::wait (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::mutex::futex::Mutex::lock (89 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (13,508 samples, 10.29%)tokio::runtime:..tokio::loom::std::mutex::Mutex<T>::lock (64 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (31 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (30 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (30 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (34 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (34 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (17 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (19 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (33 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::level_range (17 samples, 0.01%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (95 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (41 samples, 0.03%)core::num::<impl usize>::pow (41 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (129 samples, 0.10%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (202 samples, 0.15%)tokio::runtime::time::wheel::Wheel::poll_at (17 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (38 samples, 0.03%)core::option::Option<T>::map (38 samples, 0.03%)core::result::Result<T,E>::map (31 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (31 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (17 samples, 0.01%)[[vdso]] (28 samples, 0.02%)[unknown] (11,031 samples, 8.40%)[unknown][unknown] (10,941 samples, 8.33%)[unknown][unknown] (10,850 samples, 8.26%)[unknown][unknown] (10,691 samples, 8.14%)[unknown][unknown] (10,070 samples, 7.67%)[unknown][unknown] (9,737 samples, 7.42%)[unknown][unknown] (7,659 samples, 5.83%)[unknow..[unknown] (6,530 samples, 4.97%)[unkno..[unknown] (5,633 samples, 4.29%)[unkn..[unknown] (5,055 samples, 3.85%)[unk..[unknown] (4,046 samples, 3.08%)[un..[unknown] (2,911 samples, 2.22%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,226 samples, 0.93%)[unknown] (455 samples, 0.35%)[unknown] (408 samples, 0.31%)[unknown] (249 samples, 0.19%)[unknown] (202 samples, 0.15%)[unknown] (100 samples, 0.08%)mio::poll::Poll::poll (11,328 samples, 8.63%)mio::poll::P..mio::sys::unix::selector::epoll::Selector::select (11,328 samples, 8.63%)mio::sys::un..epoll_wait (11,229 samples, 8.55%)epoll_wait__GI___pthread_disable_asynccancel (50 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (47 samples, 0.04%)tokio::util::bit::Pack::pack (38 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (25 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (23 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (19 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (11,595 samples, 8.83%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (175 samples, 0.13%)__GI___clock_gettime (15 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (18 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (26 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (26 samples, 0.02%)tokio::time::clock::Clock::now (20 samples, 0.02%)tokio::time::clock::now (20 samples, 0.02%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (17 samples, 0.01%)tokio::runtime::time::Driver::park_internal (11,686 samples, 8.90%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (11,957 samples, 9.11%)tokio::runtim..tokio::runtime::driver::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::driver::TimeDriver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::time::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Parker::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::park::Inner::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (25,547 samples, 19.46%)tokio::runtime::scheduler::mul..core::result::Result<T,E>::is_err (14 samples, 0.01%)core::result::Result<T,E>::is_ok (14 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (45 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (45 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (81 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::lock (73 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (122 samples, 0.09%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (241 samples, 0.18%)<T as core::slice::cmp::SliceContains>::slice_contains (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (75 samples, 0.06%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (75 samples, 0.06%)core::sync::atomic::AtomicU32::compare_exchange (20 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (283 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (24 samples, 0.02%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (33 samples, 0.03%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (33 samples, 0.03%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (98 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (401 samples, 0.31%)alloc::vec::Vec<T,A>::push (14 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (15 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)core::result::Result<T,E>::is_err (15 samples, 0.01%)core::result::Result<T,E>::is_ok (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (22 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (22 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (62 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (14 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (21 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (17 samples, 0.01%)alloc::sync::Arc<T,A>::inner (17 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (17 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (68 samples, 0.05%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (33 samples, 0.03%)core::sync::atomic::AtomicU64::load (16 samples, 0.01%)core::sync::atomic::atomic_load (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::park (26,672 samples, 20.31%)tokio::runtime::scheduler::multi..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (272 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::has_tasks (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::has_tasks (24 samples, 0.02%)tokio::runtime::context::budget (18 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (18 samples, 0.01%)syscall (61 samples, 0.05%)__memcpy_avx512_unaligned_erms (172 samples, 0.13%)__memcpy_avx512_unaligned_erms (224 samples, 0.17%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (228 samples, 0.17%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (228 samples, 0.17%)std::panic::catch_unwind (415 samples, 0.32%)std::panicking::try (415 samples, 0.32%)std::panicking::try::do_call (415 samples, 0.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (415 samples, 0.32%)core::ops::function::FnOnce::call_once (415 samples, 0.32%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::set_stage (410 samples, 0.31%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (27 samples, 0.02%)core::result::Result<T,E>::is_err (43 samples, 0.03%)core::result::Result<T,E>::is_ok (43 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::complete (570 samples, 0.43%)tokio::runtime::task::harness::Harness<T,S>::release (155 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (152 samples, 0.12%)tokio::runtime::task::list::OwnedTasks<S>::remove (152 samples, 0.12%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (103 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (65 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (54 samples, 0.04%)std::io::stdio::stderr::INSTANCE (17 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (70 samples, 0.05%)__memcpy_avx512_unaligned_erms (42 samples, 0.03%)core::cmp::Ord::min (22 samples, 0.02%)core::cmp::min_by (22 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (27 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (30 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (24 samples, 0.02%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (44 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (20 samples, 0.02%)byteorder::io::ReadBytesExt::read_i32 (46 samples, 0.04%)core::cmp::Ord::min (14 samples, 0.01%)core::cmp::min_by (14 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (19 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (24 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (24 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (349 samples, 0.27%)__GI___lll_lock_wake_private (148 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (137 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (111 samples, 0.08%)[unknown] (98 samples, 0.07%)[unknown] (42 samples, 0.03%)[unknown] (30 samples, 0.02%)__GI___lll_lock_wait_private (553 samples, 0.42%)futex_wait (541 samples, 0.41%)[unknown] (536 samples, 0.41%)[unknown] (531 samples, 0.40%)[unknown] (524 samples, 0.40%)[unknown] (515 samples, 0.39%)[unknown] (498 samples, 0.38%)[unknown] (470 samples, 0.36%)[unknown] (435 samples, 0.33%)[unknown] (350 samples, 0.27%)[unknown] (327 samples, 0.25%)[unknown] (290 samples, 0.22%)[unknown] (222 samples, 0.17%)[unknown] (160 samples, 0.12%)[unknown] (104 samples, 0.08%)[unknown] (33 samples, 0.03%)[unknown] (25 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (703 samples, 0.54%)__GI___libc_free (866 samples, 0.66%)tracing::span::Span::record_all (30 samples, 0.02%)unlink_chunk (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (899 samples, 0.68%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (899 samples, 0.68%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (899 samples, 0.68%)alloc::alloc::dealloc (899 samples, 0.68%)__rdl_dealloc (899 samples, 0.68%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (899 samples, 0.68%)core::result::Result<T,E>::expect (91 samples, 0.07%)core::result::Result<T,E>::map_err (28 samples, 0.02%)[[vdso]] (28 samples, 0.02%)__GI___clock_gettime (47 samples, 0.04%)std::time::Instant::elapsed (67 samples, 0.05%)std::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (53 samples, 0.04%)std::sys::pal::unix::cvt (23 samples, 0.02%)__GI_getsockname (3,792 samples, 2.89%)__..[unknown] (3,714 samples, 2.83%)[u..[unknown] (3,661 samples, 2.79%)[u..[unknown] (3,557 samples, 2.71%)[u..[unknown] (3,416 samples, 2.60%)[u..[unknown] (2,695 samples, 2.05%)[..[unknown] (2,063 samples, 1.57%)[unknown] (891 samples, 0.68%)[unknown] (270 samples, 0.21%)[unknown] (99 samples, 0.08%)[unknown] (94 samples, 0.07%)[unknown] (84 samples, 0.06%)[unknown] (77 samples, 0.06%)[unknown] (25 samples, 0.02%)[unknown] (16 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr::{{closure}} (3,800 samples, 2.89%)st..tokio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)to..mio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)mi..std::net::tcp::TcpListener::local_addr (3,838 samples, 2.92%)st..std::sys_common::net::TcpListener::socket_addr (3,838 samples, 2.92%)st..std::sys_common::net::sockname (3,835 samples, 2.92%)st..[[vdso]] (60 samples, 0.05%)rand_chacha::guts::ChaCha::pos64 (168 samples, 0.13%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (26 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (29 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (18 samples, 0.01%)rand_chacha::guts::round (118 samples, 0.09%)rand_chacha::guts::refill_wide::impl_avx2 (312 samples, 0.24%)rand_chacha::guts::refill_wide::fn_impl (312 samples, 0.24%)rand_chacha::guts::refill_wide_impl (312 samples, 0.24%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (384 samples, 0.29%)rand_chacha::guts::ChaCha::refill4 (384 samples, 0.29%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (432 samples, 0.33%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (432 samples, 0.33%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)rand_core::block::BlockRng<R>::generate_and_set (392 samples, 0.30%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (392 samples, 0.30%)torrust_tracker::servers::udp::handlers::RequestId::make (440 samples, 0.34%)uuid::v4::<impl uuid::Uuid>::new_v4 (436 samples, 0.33%)uuid::rng::bytes (435 samples, 0.33%)rand::random (435 samples, 0.33%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (22 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (16 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (16 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.01%)core::iter::traits::iterator::Iterator::find (15 samples, 0.01%)core::iter::traits::iterator::Iterator::try_fold (15 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (31 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)core::slice::iter::Iter<T>::post_inc_start (14 samples, 0.01%)core::ptr::non_null::NonNull<T>::add (14 samples, 0.01%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (26 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (165 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (165 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (165 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (165 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (165 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (339 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (308 samples, 0.23%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (308 samples, 0.23%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (342 samples, 0.26%)std::sys::sync::rwlock::futex::RwLock::spin_read (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (28 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (436 samples, 0.33%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (397 samples, 0.30%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (29 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (29 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (29 samples, 0.02%)__memcmp_evex_movbe (31 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (52 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (52 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (52 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (52 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (52 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (103 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (102 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (96 samples, 0.07%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (96 samples, 0.07%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (72 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)core::slice::iter::Iter<T>::post_inc_start (32 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (32 samples, 0.02%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (81 samples, 0.06%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (271 samples, 0.21%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (271 samples, 0.21%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (271 samples, 0.21%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (271 samples, 0.21%)<u8 as core::slice::cmp::SliceOrd>::compare (271 samples, 0.21%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (610 samples, 0.46%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (566 samples, 0.43%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (566 samples, 0.43%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (18 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (616 samples, 0.47%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::KV>::split (15 samples, 0.01%)alloc::collections::btree::map::entry::Entry<K,V,A>::or_insert (46 samples, 0.04%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (45 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (40 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (29 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (120 samples, 0.09%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (118 samples, 0.09%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::Leaf>::new_leaf (118 samples, 0.09%)alloc::collections::btree::node::LeafNode<K,V>::new (118 samples, 0.09%)alloc::boxed::Box<T,A>::new_uninit_in (118 samples, 0.09%)alloc::boxed::Box<T,A>::try_new_uninit_in (118 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (118 samples, 0.09%)alloc::alloc::Global::alloc_impl (118 samples, 0.09%)alloc::alloc::alloc (118 samples, 0.09%)__rdl_alloc (118 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (118 samples, 0.09%)__GI___libc_malloc (118 samples, 0.09%)_int_malloc (107 samples, 0.08%)_int_malloc (28 samples, 0.02%)__GI___libc_malloc (32 samples, 0.02%)__rdl_alloc (36 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (36 samples, 0.03%)alloc::sync::Arc<T>::new (42 samples, 0.03%)alloc::boxed::Box<T>::new (42 samples, 0.03%)alloc::alloc::exchange_malloc (39 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (39 samples, 0.03%)alloc::alloc::Global::alloc_impl (39 samples, 0.03%)alloc::alloc::alloc (39 samples, 0.03%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)__GI___libc_free (39 samples, 0.03%)_int_free (37 samples, 0.03%)get_max_fast (16 samples, 0.01%)core::option::Option<T>::is_some_and (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (50 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (50 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (290 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (284 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (255 samples, 0.19%)std::sys::sync::rwlock::futex::RwLock::spin_read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::spin_until (16 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (21 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (21 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (1,147 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (1,144 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents_mut (32 samples, 0.02%)std::sync::rwlock::RwLock<T>::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write_contended (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_write (28 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (28 samples, 0.02%)torrust_tracker::core::Tracker::announce::{{closure}} (1,597 samples, 1.22%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (29 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (24 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (25 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (25 samples, 0.02%)core::hash::Hasher::write_u32 (25 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (36 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (37 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (37 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (64 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (39 samples, 0.03%)core::hash::Hasher::write_u64 (39 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (122 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (58 samples, 0.04%)core::hash::Hasher::write_u64 (58 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (57 samples, 0.04%)core::hash::sip::u8to64_le (23 samples, 0.02%)core::hash::Hasher::write_length_prefix (27 samples, 0.02%)core::hash::Hasher::write_usize (27 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (16 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (246 samples, 0.19%)core::array::<impl core::hash::Hash for [T: N]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (62 samples, 0.05%)core::hash::sip::u8to64_le (17 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (285 samples, 0.22%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (36 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (24 samples, 0.02%)std::time::SystemTime::now (19 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (19 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (1,954 samples, 1.49%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (24 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (18 samples, 0.01%)<core::time::Nanoseconds as core::hash::Hash>::hash (20 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (20 samples, 0.02%)core::hash::Hasher::write_u32 (20 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (44 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (65 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (45 samples, 0.03%)core::hash::Hasher::write_u64 (45 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (45 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (45 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (105 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (40 samples, 0.03%)core::hash::Hasher::write_u64 (40 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (39 samples, 0.03%)core::hash::Hasher::write_length_prefix (34 samples, 0.03%)core::hash::Hasher::write_usize (34 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (33 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (231 samples, 0.18%)core::array::<impl core::hash::Hash for [T: N]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (61 samples, 0.05%)core::hash::sip::u8to64_le (16 samples, 0.01%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (270 samples, 0.21%)torrust_tracker::servers::udp::connection_cookie::make (268 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (35 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (31 samples, 0.02%)std::time::SystemTime::now (26 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (26 samples, 0.02%)torrust_tracker::core::ScrapeData::add_file (19 samples, 0.01%)std::collections::hash::map::HashMap<K,V,S>::insert (19 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (19 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (16 samples, 0.01%)hashbrown::raw::RawTable<T,A>::reserve (16 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (17 samples, 0.01%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (17 samples, 0.01%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (17 samples, 0.01%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (17 samples, 0.01%)<u8 as core::slice::cmp::SliceOrd>::compare (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (2,336 samples, 1.78%)t..torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (101 samples, 0.08%)torrust_tracker::core::Tracker::scrape::{{closure}} (90 samples, 0.07%)torrust_tracker::core::Tracker::get_swarm_metadata (68 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (64 samples, 0.05%)alloc::raw_vec::finish_grow (19 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (21 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (23 samples, 0.02%)alloc::string::String::push_str (23 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (23 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (23 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (23 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (85 samples, 0.06%)core::fmt::num::imp::fmt_u64 (78 samples, 0.06%)<alloc::string::String as core::fmt::Write>::write_str (15 samples, 0.01%)alloc::string::String::push_str (15 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (15 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (15 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (37 samples, 0.03%)core::fmt::num::imp::fmt_u64 (36 samples, 0.03%)<T as alloc::string::ToString>::to_string (141 samples, 0.11%)core::option::Option<T>::expect (34 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (28 samples, 0.02%)alloc::alloc::dealloc (28 samples, 0.02%)__rdl_dealloc (28 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (28 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (55 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (55 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::current_memory (20 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (16 samples, 0.01%)binascii::bin2hex (51 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (16 samples, 0.01%)core::fmt::write (25 samples, 0.02%)core::fmt::rt::Argument::fmt (15 samples, 0.01%)core::fmt::Formatter::write_fmt (87 samples, 0.07%)core::str::converts::from_utf8 (43 samples, 0.03%)core::str::validations::run_utf8_validation (37 samples, 0.03%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (161 samples, 0.12%)<T as alloc::string::ToString>::to_string (161 samples, 0.12%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (156 samples, 0.12%)torrust_tracker::servers::udp::logging::log_request (479 samples, 0.36%)[[vdso]] (51 samples, 0.04%)alloc::raw_vec::finish_grow (56 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.05%)<alloc::string::String as core::fmt::Write>::write_str (65 samples, 0.05%)alloc::string::String::push_str (65 samples, 0.05%)alloc::vec::Vec<T,A>::extend_from_slice (65 samples, 0.05%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (65 samples, 0.05%)alloc::vec::Vec<T,A>::append_elements (65 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (114 samples, 0.09%)core::fmt::num::imp::fmt_u64 (110 samples, 0.08%)<T as alloc::string::ToString>::to_string (132 samples, 0.10%)core::option::Option<T>::expect (20 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (22 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (22 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (8,883 samples, 6.77%)torrust_t..torrust_tracker::servers::udp::logging::log_response (238 samples, 0.18%)__GI___lll_lock_wait_private (14 samples, 0.01%)futex_wait (14 samples, 0.01%)__GI___lll_lock_wake_private (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)_int_malloc (191 samples, 0.15%)__libc_calloc (238 samples, 0.18%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)alloc::vec::from_elem (316 samples, 0.24%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (316 samples, 0.24%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (312 samples, 0.24%)alloc::alloc::Global::alloc_impl (312 samples, 0.24%)alloc::alloc::alloc_zeroed (312 samples, 0.24%)__rdl_alloc_zeroed (312 samples, 0.24%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (312 samples, 0.24%)byteorder::ByteOrder::write_i32 (18 samples, 0.01%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (18 samples, 0.01%)core::num::<impl u32>::to_be_bytes (18 samples, 0.01%)core::num::<impl u32>::to_be (18 samples, 0.01%)core::num::<impl u32>::swap_bytes (18 samples, 0.01%)byteorder::io::WriteBytesExt::write_i32 (89 samples, 0.07%)std::io::Write::write_all (71 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (71 samples, 0.05%)std::io::cursor::vec_write (71 samples, 0.05%)std::io::cursor::vec_write_unchecked (51 samples, 0.04%)core::ptr::mut_ptr::<impl *mut T>::copy_from (51 samples, 0.04%)core::intrinsics::copy (51 samples, 0.04%)aquatic_udp_protocol::response::Response::write (227 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (28 samples, 0.02%)std::io::Write::write_all (21 samples, 0.02%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (21 samples, 0.02%)std::io::cursor::vec_write (21 samples, 0.02%)std::io::cursor::vec_write_unchecked (21 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::copy_from (21 samples, 0.02%)core::intrinsics::copy (21 samples, 0.02%)__GI___lll_lock_wake_private (17 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (136 samples, 0.10%)__GI___libc_free (206 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (211 samples, 0.16%)alloc::alloc::dealloc (211 samples, 0.16%)__rdl_dealloc (211 samples, 0.16%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (211 samples, 0.16%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (224 samples, 0.17%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (224 samples, 0.17%)std::io::cursor::Cursor<T>::new (56 samples, 0.04%)tokio::io::ready::Ready::intersection (23 samples, 0.02%)tokio::io::ready::Ready::from_interest (23 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (83 samples, 0.06%)[unknown] (32,674 samples, 24.88%)[unknown][unknown] (32,402 samples, 24.68%)[unknown][unknown] (32,272 samples, 24.58%)[unknown][unknown] (32,215 samples, 24.54%)[unknown][unknown] (31,174 samples, 23.74%)[unknown][unknown] (30,794 samples, 23.45%)[unknown][unknown] (30,036 samples, 22.88%)[unknown][unknown] (28,639 samples, 21.81%)[unknown][unknown] (27,908 samples, 21.25%)[unknown][unknown] (26,013 samples, 19.81%)[unknown][unknown] (23,181 samples, 17.65%)[unknown][unknown] (19,559 samples, 14.90%)[unknown][unknown] (18,052 samples, 13.75%)[unknown][unknown] (15,794 samples, 12.03%)[unknown][unknown] (14,740 samples, 11.23%)[unknown][unknown] (12,486 samples, 9.51%)[unknown][unknown] (11,317 samples, 8.62%)[unknown][unknown] (10,725 samples, 8.17%)[unknown][unknown] (10,017 samples, 7.63%)[unknown][unknown] (9,713 samples, 7.40%)[unknown][unknown] (8,432 samples, 6.42%)[unknown][unknown] (8,062 samples, 6.14%)[unknown][unknown] (6,973 samples, 5.31%)[unknow..[unknown] (5,328 samples, 4.06%)[unk..[unknown] (4,352 samples, 3.31%)[un..[unknown] (3,786 samples, 2.88%)[u..[unknown] (3,659 samples, 2.79%)[u..[unknown] (3,276 samples, 2.50%)[u..[unknown] (2,417 samples, 1.84%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,610 samples, 1.23%)[unknown] (422 samples, 0.32%)[unknown] (84 samples, 0.06%)[unknown] (69 samples, 0.05%)__GI___pthread_disable_asynccancel (67 samples, 0.05%)__libc_sendto (32,896 samples, 25.05%)__libc_sendtotokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (32,981 samples, 25.12%)tokio::net::udp::UdpSocket::send_to_addr..mio::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (32,981 samples, 25.12%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (32,981 samples, 25.12%)mio::sys::unix::stateless_io_source::IoS..mio::net::udp::UdpSocket::send_to::{{closure}} (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_to::{{clo..std::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (32,981 samples, 25.12%)std::sys_common::net::UdpSocket::send_tostd::sys::pal::unix::cvt (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (44,349 samples, 33.78%)torrust_tracker::servers::udp::server::Udp::process_req..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (43,412 samples, 33.06%)torrust_tracker::servers::udp::server::Udp::process_va..torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (34,320 samples, 26.14%)torrust_tracker::servers::udp::server::Udp..torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (33,360 samples, 25.41%)torrust_tracker::servers::udp::server::Ud..tokio::net::udp::UdpSocket::send_to::{{closure}} (33,227 samples, 25.31%)tokio::net::udp::UdpSocket::send_to::{{c..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (33,142 samples, 25.24%)tokio::net::udp::UdpSocket::send_to_addr..tokio::runtime::io::registration::Registration::async_io::{{closure}} (33,115 samples, 25.22%)tokio::runtime::io::registration::Regist..tokio::runtime::io::registration::Registration::readiness::{{closure}} (28 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (15 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (14 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (15 samples, 0.01%)core::sync::atomic::atomic_add (15 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (135 samples, 0.10%)__GI___libc_free (147 samples, 0.11%)syscall (22 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (15 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (24 samples, 0.02%)core::mem::drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (262 samples, 0.20%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (262 samples, 0.20%)tokio::runtime::task::raw::RawTask::drop_abort_handle (256 samples, 0.19%)tokio::runtime::task::raw::drop_abort_handle (59 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (50 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (50 samples, 0.04%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (16 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (47 samples, 0.04%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (47 samples, 0.04%)tokio::runtime::task::state::State::drop_join_handle_fast (19 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (19 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (14 samples, 0.01%)<ringbuf::ring_buffer::shared::SharedRb<T,C> as ringbuf::ring_buffer::base::RbBase<T>>::head (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (29 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (29 samples, 0.02%)ringbuf::ring_buffer::rb::Rb::pop (50 samples, 0.04%)ringbuf::consumer::Consumer<T,R>::pop (50 samples, 0.04%)ringbuf::producer::Producer<T,R>::advance (23 samples, 0.02%)ringbuf::ring_buffer::base::RbWrite::advance_tail (23 samples, 0.02%)core::num::nonzero::<impl core::ops::arith::Rem<core::num::nonzero::NonZero<usize>> for usize>::rem (19 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (107 samples, 0.08%)ringbuf::ring_buffer::rb::Rb::push (43 samples, 0.03%)ringbuf::producer::Producer<T,R>::push (43 samples, 0.03%)tokio::runtime::task::abort::AbortHandle::is_finished (84 samples, 0.06%)tokio::runtime::task::state::Snapshot::is_complete (84 samples, 0.06%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (17 samples, 0.01%)tokio::runtime::task::raw::RawTask::ref_inc (17 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (17 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (14 samples, 0.01%)core::sync::atomic::atomic_add (14 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)malloc_consolidate (95 samples, 0.07%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (26 samples, 0.02%)_int_malloc (282 samples, 0.21%)__GI___libc_malloc (323 samples, 0.25%)alloc::vec::Vec<T>::with_capacity (326 samples, 0.25%)alloc::vec::Vec<T,A>::with_capacity_in (326 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (324 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (324 samples, 0.25%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (324 samples, 0.25%)alloc::alloc::Global::alloc_impl (324 samples, 0.25%)alloc::alloc::alloc (324 samples, 0.25%)__rdl_alloc (324 samples, 0.25%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (324 samples, 0.25%)tokio::io::ready::Ready::intersection (24 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (199 samples, 0.15%)tokio::util::bit::Pack::unpack (16 samples, 0.01%)tokio::util::bit::unpack (16 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (19 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (16 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (222 samples, 0.17%)tokio::net::udp::UdpSocket::ready::{{closure}} (222 samples, 0.17%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (50 samples, 0.04%)std::io::error::repr_bitpacked::Repr::data (14 samples, 0.01%)std::io::error::repr_bitpacked::decode_repr (14 samples, 0.01%)std::io::error::Error::kind (16 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)[unknown] (8,756 samples, 6.67%)[unknown][unknown] (8,685 samples, 6.61%)[unknown][unknown] (8,574 samples, 6.53%)[unknown][unknown] (8,415 samples, 6.41%)[unknown][unknown] (7,686 samples, 5.85%)[unknow..[unknown] (7,239 samples, 5.51%)[unknow..[unknown] (6,566 samples, 5.00%)[unkno..[unknown] (5,304 samples, 4.04%)[unk..[unknown] (4,008 samples, 3.05%)[un..[unknown] (3,571 samples, 2.72%)[u..[unknown] (2,375 samples, 1.81%)[..[unknown] (1,844 samples, 1.40%)[unknown] (1,030 samples, 0.78%)[unknown] (344 samples, 0.26%)[unknown] (113 samples, 0.09%)__libc_recvfrom (8,903 samples, 6.78%)__libc_re..__GI___pthread_disable_asynccancel (22 samples, 0.02%)std::sys::pal::unix::cvt (20 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (9,005 samples, 6.86%)tokio::ne..mio::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)mio::net:..mio::io_source::IoSource<T>::do_io (8,964 samples, 6.83%)mio::io_s..mio::sys::unix::stateless_io_source::IoSourceState::do_io (8,964 samples, 6.83%)mio::sys:..mio::net::udp::UdpSocket::recv_from::{{closure}} (8,964 samples, 6.83%)mio::net:..std::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)std::net:..std::sys_common::net::UdpSocket::recv_from (8,964 samples, 6.83%)std::sys_..std::sys::pal::unix::net::Socket::recv_from (8,964 samples, 6.83%)std::sys:..std::sys::pal::unix::net::Socket::recv_from_with_flags (8,964 samples, 6.83%)std::sys:..std::sys_common::net::sockaddr_to_addr (23 samples, 0.02%)tokio::runtime::io::registration::Registration::clear_readiness (18 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (32 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (9,967 samples, 7.59%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (9,291 samples, 7.08%)tokio::ne..tokio::runtime::io::registration::Registration::async_io::{{closure}} (9,287 samples, 7.07%)tokio::ru..tokio::runtime::io::registration::Registration::readiness::{{closure}} (45 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (41 samples, 0.03%)__memcpy_avx512_unaligned_erms (424 samples, 0.32%)__memcpy_avx512_unaligned_erms (493 samples, 0.38%)__memcpy_avx512_unaligned_erms (298 samples, 0.23%)syscall (1,105 samples, 0.84%)[unknown] (1,095 samples, 0.83%)[unknown] (1,091 samples, 0.83%)[unknown] (1,049 samples, 0.80%)[unknown] (998 samples, 0.76%)[unknown] (907 samples, 0.69%)[unknown] (710 samples, 0.54%)[unknown] (635 samples, 0.48%)[unknown] (538 samples, 0.41%)[unknown] (358 samples, 0.27%)[unknown] (256 samples, 0.19%)[unknown] (153 samples, 0.12%)[unknown] (96 samples, 0.07%)[unknown] (81 samples, 0.06%)tokio::runtime::context::with_scheduler (36 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (31 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (27 samples, 0.02%)tokio::runtime::context::scoped::Scoped<T>::with (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (340 samples, 0.26%)core::sync::atomic::atomic_add (340 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (354 samples, 0.27%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (367 samples, 0.28%)[unknown] (95 samples, 0.07%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (90 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (73 samples, 0.06%)[unknown] (63 samples, 0.05%)[unknown] (44 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (35 samples, 0.03%)[unknown] (30 samples, 0.02%)[unknown] (22 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)tokio::runtime::driver::Handle::unpark (99 samples, 0.08%)tokio::runtime::driver::IoHandle::unpark (99 samples, 0.08%)tokio::runtime::io::driver::Handle::unpark (99 samples, 0.08%)mio::waker::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::fdbased::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::eventfd::WakerInternal::wake (99 samples, 0.08%)<&std::fs::File as std::io::Write>::write (99 samples, 0.08%)std::sys::pal::unix::fs::File::write (99 samples, 0.08%)std::sys::pal::unix::fd::FileDesc::write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)tokio::runtime::context::with_scheduler (1,615 samples, 1.23%)std::thread::local::LocalKey<T>::try_with (1,613 samples, 1.23%)tokio::runtime::context::with_scheduler::{{closure}} (1,612 samples, 1.23%)tokio::runtime::context::scoped::Scoped<T>::with (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (1,647 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (1,646 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::with_current (1,646 samples, 1.25%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (23 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (18 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (104 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (60 samples, 0.05%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (57 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (38 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (162 samples, 0.12%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)__GI___lll_lock_wake_private (127 samples, 0.10%)[unknown] (125 samples, 0.10%)[unknown] (124 samples, 0.09%)[unknown] (119 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (106 samples, 0.08%)[unknown] (87 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (51 samples, 0.04%)[unknown] (27 samples, 0.02%)[unknown] (19 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (77 samples, 0.06%)[unknown] (1,207 samples, 0.92%)[unknown] (1,146 samples, 0.87%)[unknown] (1,126 samples, 0.86%)[unknown] (1,091 samples, 0.83%)[unknown] (1,046 samples, 0.80%)[unknown] (962 samples, 0.73%)[unknown] (914 samples, 0.70%)[unknown] (848 samples, 0.65%)[unknown] (774 samples, 0.59%)[unknown] (580 samples, 0.44%)[unknown] (456 samples, 0.35%)[unknown] (305 samples, 0.23%)[unknown] (85 samples, 0.06%)__GI_mprotect (2,474 samples, 1.88%)_..[unknown] (2,457 samples, 1.87%)[..[unknown] (2,440 samples, 1.86%)[..[unknown] (2,436 samples, 1.86%)[..[unknown] (2,435 samples, 1.85%)[..[unknown] (2,360 samples, 1.80%)[..[unknown] (2,203 samples, 1.68%)[unknown] (1,995 samples, 1.52%)[unknown] (1,709 samples, 1.30%)[unknown] (1,524 samples, 1.16%)[unknown] (1,193 samples, 0.91%)[unknown] (865 samples, 0.66%)[unknown] (539 samples, 0.41%)[unknown] (259 samples, 0.20%)[unknown] (80 samples, 0.06%)[unknown] (29 samples, 0.02%)sysmalloc (3,786 samples, 2.88%)sy..grow_heap (2,509 samples, 1.91%)g.._int_malloc (4,038 samples, 3.08%)_in..unlink_chunk (31 samples, 0.02%)alloc::alloc::exchange_malloc (4,335 samples, 3.30%)all..<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,329 samples, 3.30%)<al..alloc::alloc::Global::alloc_impl (4,329 samples, 3.30%)all..alloc::alloc::alloc (4,329 samples, 3.30%)all..__rdl_alloc (4,329 samples, 3.30%)__r..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,329 samples, 3.30%)std..std::sys::pal::unix::alloc::aligned_malloc (4,329 samples, 3.30%)std..__posix_memalign (4,297 samples, 3.27%)__p..__posix_memalign (4,297 samples, 3.27%)__p.._mid_memalign (4,297 samples, 3.27%)_mi.._int_memalign (4,149 samples, 3.16%)_in..sysmalloc (18 samples, 0.01%)core::option::Option<T>::map (6,666 samples, 5.08%)core::..tokio::task::spawn::spawn_inner::{{closure}} (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::Handle::spawn (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (6,664 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (6,661 samples, 5.07%)tokio:..tokio::runtime::task::list::OwnedTasks<S>::bind (4,692 samples, 3.57%)toki..tokio::runtime::task::new_task (4,579 samples, 3.49%)tok..tokio::runtime::task::raw::RawTask::new (4,579 samples, 3.49%)tok..tokio::runtime::task::core::Cell<T,S>::new (4,579 samples, 3.49%)tok..alloc::boxed::Box<T>::new (4,389 samples, 3.34%)all..tokio::runtime::context::current::with_current (7,636 samples, 5.82%)tokio::..std::thread::local::LocalKey<T>::try_with (7,635 samples, 5.81%)std::th..tokio::runtime::context::current::with_current::{{closure}} (7,188 samples, 5.47%)tokio::..tokio::task::spawn::spawn (7,670 samples, 5.84%)tokio::..tokio::task::spawn::spawn_inner (7,670 samples, 5.84%)tokio::..tokio::runtime::task::id::Id::next (24 samples, 0.02%)core::sync::atomic::AtomicU64::fetch_add (24 samples, 0.02%)core::sync::atomic::atomic_add (24 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (62,691 samples, 47.75%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (62,691 samples, 47.75%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (18,228 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (18,226 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (7,679 samples, 5.85%)torrust..__memcpy_avx512_unaligned_erms (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (407 samples, 0.31%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::poll (63,150 samples, 48.10%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (459 samples, 0.35%)tokio::runtime::task::core::Core<T,S>::set_stage (459 samples, 0.35%)__memcpy_avx512_unaligned_erms (16 samples, 0.01%)__memcpy_avx512_unaligned_erms (398 samples, 0.30%)__memcpy_avx512_unaligned_erms (325 samples, 0.25%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage (731 samples, 0.56%)tokio::runtime::task::harness::poll_future (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (63,908 samples, 48.67%)std::panic::catch_unwindstd::panicking::try (63,908 samples, 48.67%)std::panicking::trystd::panicking::try::do_call (63,908 samples, 48.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (63,908 samples, 48.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()..tokio::runtime::task::harness::poll_future::{{closure}} (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (758 samples, 0.58%)tokio::runtime::coop::budget (65,027 samples, 49.53%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (65,027 samples, 49.53%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (65,009 samples, 49.51%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (65,003 samples, 49.51%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (65,003 samples, 49.51%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (64,538 samples, 49.15%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (64,493 samples, 49.12%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (63,919 samples, 48.68%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (93 samples, 0.07%)syscall (2,486 samples, 1.89%)s..[unknown] (2,424 samples, 1.85%)[..[unknown] (2,416 samples, 1.84%)[..[unknown] (2,130 samples, 1.62%)[unknown] (2,013 samples, 1.53%)[unknown] (1,951 samples, 1.49%)[unknown] (1,589 samples, 1.21%)[unknown] (1,415 samples, 1.08%)[unknown] (1,217 samples, 0.93%)[unknown] (820 samples, 0.62%)[unknown] (564 samples, 0.43%)[unknown] (360 samples, 0.27%)[unknown] (244 samples, 0.19%)[unknown] (194 samples, 0.15%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (339 samples, 0.26%)core::sync::atomic::AtomicUsize::fetch_add (337 samples, 0.26%)core::sync::atomic::atomic_add (337 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (364 samples, 0.28%)[unknown] (154 samples, 0.12%)[unknown] (152 samples, 0.12%)[unknown] (143 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (131 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (80 samples, 0.06%)[unknown] (74 samples, 0.06%)[unknown] (65 samples, 0.05%)[unknown] (64 samples, 0.05%)[unknown] (47 samples, 0.04%)[unknown] (44 samples, 0.03%)[unknown] (43 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (26 samples, 0.02%)[unknown] (20 samples, 0.02%)__GI___libc_write (158 samples, 0.12%)__GI___libc_write (158 samples, 0.12%)mio::sys::unix::waker::eventfd::WakerInternal::wake (159 samples, 0.12%)<&std::fs::File as std::io::Write>::write (159 samples, 0.12%)std::sys::pal::unix::fs::File::write (159 samples, 0.12%)std::sys::pal::unix::fd::FileDesc::write (159 samples, 0.12%)tokio::runtime::driver::Handle::unpark (168 samples, 0.13%)tokio::runtime::driver::IoHandle::unpark (168 samples, 0.13%)tokio::runtime::io::driver::Handle::unpark (168 samples, 0.13%)mio::waker::Waker::wake (165 samples, 0.13%)mio::sys::unix::waker::fdbased::Waker::wake (165 samples, 0.13%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (68,159 samples, 51.91%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (3,024 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (3,023 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (3,022 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (171 samples, 0.13%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (171 samples, 0.13%)core::option::Option<T>::or_else (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (107 samples, 0.08%)__GI___libc_free (17 samples, 0.01%)_int_free (17 samples, 0.01%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Dying,K,V>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::LeafOrInternal>::deallocate_and_ascend (18 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (18 samples, 0.01%)alloc::alloc::dealloc (18 samples, 0.01%)__rdl_dealloc (18 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (18 samples, 0.01%)alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (19 samples, 0.01%)tokio::runtime::task::Task<S>::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::RawTask::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task (26 samples, 0.02%)std::panic::catch_unwind (26 samples, 0.02%)std::panicking::try (26 samples, 0.02%)std::panicking::try::do_call (26 samples, 0.02%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (26 samples, 0.02%)core::ops::function::FnOnce::call_once (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task::{{closure}} (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (26 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::Tracker> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)core::ptr::drop_in_place<std::sync::rwlock::RwLock<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)core::mem::drop (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,NodeType>,alloc::collections::btree::node::marker::KV>::drop_key_val (24 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::assume_init_drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (24 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::entry::Torrent> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::mem::drop (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::pre_shutdown (33 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::close_and_shutdown_all (33 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (114 samples, 0.09%)alloc::sync::Arc<T,A>::inner (114 samples, 0.09%)core::ptr::non_null::NonNull<T>::as_ref (114 samples, 0.09%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (108 samples, 0.08%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (108 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (106 samples, 0.08%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (49 samples, 0.04%)alloc::sync::Arc<T,A>::inner (49 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (49 samples, 0.04%)core::num::<impl u32>::wrapping_sub (132 samples, 0.10%)core::sync::atomic::AtomicU64::load (40 samples, 0.03%)core::sync::atomic::atomic_load (40 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (48 samples, 0.04%)core::sync::atomic::AtomicU32::load (48 samples, 0.04%)core::sync::atomic::atomic_load (48 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (65 samples, 0.05%)alloc::sync::Arc<T,A>::inner (65 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (65 samples, 0.05%)core::num::<impl u32>::wrapping_sub (50 samples, 0.04%)core::sync::atomic::AtomicU32::load (55 samples, 0.04%)core::sync::atomic::atomic_load (55 samples, 0.04%)core::sync::atomic::AtomicU64::load (80 samples, 0.06%)core::sync::atomic::atomic_load (80 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::pack (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (666 samples, 0.51%)tokio::runtime::scheduler::multi_thread::queue::unpack (147 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (1,036 samples, 0.79%)tokio::runtime::scheduler::multi_thread::queue::unpack (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (49 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (2,414 samples, 1.84%)t..tokio::util::rand::FastRand::fastrand_n (24 samples, 0.02%)tokio::util::rand::FastRand::fastrand (24 samples, 0.02%)std::sys_common::backtrace::__rust_begin_short_backtrace (98,136 samples, 74.74%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (98,042 samples, 74.67%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (98,042 samples, 74.67%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (98,042 samples, 74.67%)std::panic::catch_unwindstd::panicking::try (98,042 samples, 74.67%)std::panicking::trystd::panicking::try::do_call (98,042 samples, 74.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,042 samples, 74.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (98,042 samples, 74.67%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (98,042 samples, 74.67%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (98,042 samples, 74.67%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (98,042 samples, 74.67%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (98,042 samples, 74.67%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (98,042 samples, 74.67%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Context::runstd::panic::catch_unwind (98,137 samples, 74.74%)std::panic::catch_unwindstd::panicking::try (98,137 samples, 74.74%)std::panicking::trystd::panicking::try::do_call (98,137 samples, 74.74%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,137 samples, 74.74%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (98,137 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (98,139 samples, 74.74%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (98,139 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (98,205 samples, 74.79%)clone3start_thread (98,205 samples, 74.79%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (98,158 samples, 74.76%)std::sys::pal::unix::thread::Thread::new::thread_startcore::ptr::drop_in_place<std::sys::pal::unix::stack_overflow::Handler> (19 samples, 0.01%)<std::sys::pal::unix::stack_overflow::Handler as core::ops::drop::Drop>::drop (19 samples, 0.01%)std::sys::pal::unix::stack_overflow::imp::drop_handler (19 samples, 0.01%)__GI_munmap (19 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)[unknown] (16 samples, 0.01%)core::fmt::Formatter::pad_integral (112 samples, 0.09%)core::fmt::Formatter::pad_integral::write_prefix (59 samples, 0.04%)core::fmt::Formatter::pad_integral (16 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (19 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (51 samples, 0.04%)rand_chacha::guts::round (18 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (26 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide (14 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (14 samples, 0.01%)std_detect::detect::check_for (14 samples, 0.01%)std_detect::detect::cache::test (14 samples, 0.01%)std_detect::detect::cache::Cache::test (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.06%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.06%)core::cell::BorrowRefMut::new (81 samples, 0.06%)std::sys::pal::unix::time::Timespec::now (164 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (106 samples, 0.08%)tokio::runtime::coop::budget (105 samples, 0.08%)tokio::runtime::coop::with_budget (105 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (96 samples, 0.07%)std::sys::pal::unix::time::Timespec::sub_timespec (35 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock_contended (15 samples, 0.01%)syscall (90 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (21 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run (61 samples, 0.05%)tokio::runtime::context::runtime::enter_runtime (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (61 samples, 0.05%)tokio::runtime::context::set_scheduler (61 samples, 0.05%)std::thread::local::LocalKey<T>::with (61 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (61 samples, 0.05%)tokio::runtime::context::set_scheduler::{{closure}} (61 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::set (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (19 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (17 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (14 samples, 0.01%)core::cell::Cell<T>::get (14 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (22 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (22 samples, 0.02%)tokio::runtime::context::set_current_task_id (22 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (112 samples, 0.09%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (111 samples, 0.08%)tokio::runtime::task::harness::poll_future (125 samples, 0.10%)std::panic::catch_unwind (125 samples, 0.10%)std::panicking::try (125 samples, 0.10%)std::panicking::try::do_call (125 samples, 0.10%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (125 samples, 0.10%)tokio::runtime::task::harness::poll_future::{{closure}} (125 samples, 0.10%)tokio::runtime::task::core::Core<T,S>::poll (125 samples, 0.10%)tokio::runtime::task::raw::poll (157 samples, 0.12%)tokio::runtime::task::harness::Harness<T,S>::poll (135 samples, 0.10%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (135 samples, 0.10%)tokio::runtime::time::Driver::park_internal (15 samples, 0.01%)torrust_tracker::bootstrap::logging::INIT (17 samples, 0.01%)__memcpy_avx512_unaligned_erms (397 samples, 0.30%)_int_free (24 samples, 0.02%)_int_malloc (132 samples, 0.10%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (570 samples, 0.43%)__GI___lll_lock_wait_private (22 samples, 0.02%)futex_wait (14 samples, 0.01%)__memcpy_avx512_unaligned_erms (299 samples, 0.23%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (361 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (41 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (23 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (53 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (14 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (63 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (21 samples, 0.02%)__GI___libc_malloc (18 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (116 samples, 0.09%)alloc::vec::Vec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (116 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (116 samples, 0.09%)alloc::alloc::Global::alloc_impl (116 samples, 0.09%)alloc::alloc::alloc (116 samples, 0.09%)__rdl_alloc (116 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (116 samples, 0.09%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (53 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (53 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (53 samples, 0.04%)_int_malloc (21 samples, 0.02%)[unknown] (36 samples, 0.03%)[unknown] (16 samples, 0.01%)core::mem::zeroed (27 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (27 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (27 samples, 0.02%)core::intrinsics::write_bytes (27 samples, 0.02%)[unknown] (27 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (64 samples, 0.05%)mio::net::udp::UdpSocket::recv_from (49 samples, 0.04%)mio::io_source::IoSource<T>::do_io (49 samples, 0.04%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (49 samples, 0.04%)mio::net::udp::UdpSocket::recv_from::{{closure}} (49 samples, 0.04%)std::net::udp::UdpSocket::recv_from (49 samples, 0.04%)std::sys_common::net::UdpSocket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from_with_flags (49 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (271 samples, 0.21%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (143 samples, 0.11%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (141 samples, 0.11%)tokio::runtime::io::registration::Registration::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (15 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (359 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (346 samples, 0.26%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (39 samples, 0.03%)tokio::task::spawn::spawn (39 samples, 0.03%)tokio::task::spawn::spawn_inner (39 samples, 0.03%)tokio::runtime::context::current::with_current (39 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (39 samples, 0.03%)tokio::runtime::context::current::with_current::{{closure}} (39 samples, 0.03%)core::option::Option<T>::map (39 samples, 0.03%)tokio::task::spawn::spawn_inner::{{closure}} (39 samples, 0.03%)tokio::runtime::scheduler::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (39 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind (34 samples, 0.03%)all (131,301 samples, 100%)tokio-runtime-w (131,061 samples, 99.82%)tokio-runtime-w \ No newline at end of file +]]>Flame Graph Reset ZoomSearch [unknown] (188 samples, 0.14%)[unknown] (187 samples, 0.14%)[unknown] (186 samples, 0.14%)[unknown] (178 samples, 0.14%)[unknown] (172 samples, 0.13%)[unknown] (158 samples, 0.12%)[unknown] (158 samples, 0.12%)[unknown] (125 samples, 0.10%)[unknown] (102 samples, 0.08%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (41 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (25 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (15 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)profiling (214 samples, 0.16%)clone3 (22 samples, 0.02%)start_thread (22 samples, 0.02%)std::sys::pal::unix::thread::Thread::new::thread_start (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::Handler::new (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::make_handler (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::get_stack (19 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (30 samples, 0.02%)[[vdso]] (93 samples, 0.07%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (143 samples, 0.11%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (31 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (21 samples, 0.02%)[[vdso]] (91 samples, 0.07%)__GI___clock_gettime (14 samples, 0.01%)_int_malloc (53 samples, 0.04%)epoll_wait (254 samples, 0.19%)tokio::runtime::context::with_scheduler (28 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::context::with_scheduler::{{closure}} (14 samples, 0.01%)core::option::Option<T>::map (17 samples, 0.01%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (17 samples, 0.01%)mio::poll::Poll::poll (27 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (27 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (54 samples, 0.04%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (26 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (65 samples, 0.05%)core::sync::atomic::AtomicUsize::fetch_add (65 samples, 0.05%)core::sync::atomic::atomic_add (65 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (31 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (49 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (33 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (93 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Parker::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (75 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (18 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (18 samples, 0.01%)core::cell::BorrowRefMut::new (18 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (96 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (18 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (14 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (220 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (54 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (54 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (240 samples, 0.18%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (265 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park (284 samples, 0.22%)core::option::Option<T>::or_else (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (40 samples, 0.03%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (17 samples, 0.01%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (17 samples, 0.01%)core::num::<impl u32>::wrapping_add (17 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (26 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (129 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (128 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (119 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::pack (39 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run (613 samples, 0.47%)tokio::runtime::context::runtime::enter_runtime (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (613 samples, 0.47%)tokio::runtime::context::set_scheduler (613 samples, 0.47%)std::thread::local::LocalKey<T>::with (613 samples, 0.47%)std::thread::local::LocalKey<T>::try_with (613 samples, 0.47%)tokio::runtime::context::set_scheduler::{{closure}} (613 samples, 0.47%)tokio::runtime::context::scoped::Scoped<T>::set (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Context::run (613 samples, 0.47%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (777 samples, 0.59%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (776 samples, 0.59%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (16 samples, 0.01%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::runtime::context::set_current_task_id (16 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (16 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (835 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (56 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (46 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (897 samples, 0.68%)tokio::runtime::task::harness::poll_future::{{closure}} (897 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::store_output (62 samples, 0.05%)tokio::runtime::task::harness::poll_future (930 samples, 0.71%)std::panic::catch_unwind (927 samples, 0.71%)std::panicking::try (927 samples, 0.71%)std::panicking::try::do_call (925 samples, 0.70%)core::mem::manually_drop::ManuallyDrop<T>::take (28 samples, 0.02%)core::ptr::read (28 samples, 0.02%)tokio::runtime::task::raw::poll (938 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll (934 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (934 samples, 0.71%)core::array::<impl core::default::Default for [T: 32]>::default (26 samples, 0.02%)tokio::runtime::time::Inner::lock (16 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::time::wheel::Wheel::poll (25 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (98 samples, 0.07%)tokio::runtime::time::Driver::park_internal (51 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (24 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (131 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (24 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (14 samples, 0.01%)core::sync::atomic::AtomicU32::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (39 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (34 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (32 samples, 0.02%)[[heap]] (2,361 samples, 1.80%)[..[[vdso]] (313 samples, 0.24%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (41 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (67 samples, 0.05%)alloc::string::String::push_str (18 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (18 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (18 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (18 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (36 samples, 0.03%)core::num::<impl u64>::rotate_left (28 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (60 samples, 0.05%)core::num::<impl u64>::wrapping_add (14 samples, 0.01%)core::hash::sip::u8to64_le (60 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (184 samples, 0.14%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (15 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (19 samples, 0.01%)core::cell::Cell<T>::get (17 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (26 samples, 0.02%)core::ops::function::FnMut::call_mut (21 samples, 0.02%)tokio::runtime::coop::poll_proceed (21 samples, 0.02%)tokio::runtime::context::budget (21 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (21 samples, 0.02%)[unknown] (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (195 samples, 0.15%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (14 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (14 samples, 0.01%)core::result::Result<T,E>::is_err (18 samples, 0.01%)core::result::Result<T,E>::is_ok (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (39 samples, 0.03%)core::sync::atomic::AtomicU32::compare_exchange (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (19 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (245 samples, 0.19%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (26 samples, 0.02%)[[vdso]] (748 samples, 0.57%)[profiling] (34 samples, 0.03%)core::fmt::write (31 samples, 0.02%)__GI___clock_gettime (29 samples, 0.02%)__GI___libc_free (131 samples, 0.10%)arena_for_chunk (20 samples, 0.02%)arena_for_chunk (19 samples, 0.01%)heap_for_ptr (19 samples, 0.01%)heap_max_size (14 samples, 0.01%)__GI___libc_malloc (114 samples, 0.09%)__GI___libc_realloc (15 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)__GI___pthread_disable_asynccancel (66 samples, 0.05%)__GI_getsockname (249 samples, 0.19%)__libc_calloc (15 samples, 0.01%)__libc_recvfrom (23 samples, 0.02%)__libc_sendto (130 samples, 0.10%)__memcmp_evex_movbe (451 samples, 0.34%)__memcpy_avx512_unaligned_erms (426 samples, 0.32%)__memset_avx512_unaligned_erms (215 samples, 0.16%)__posix_memalign (17 samples, 0.01%)_int_free (418 samples, 0.32%)tcache_put (24 samples, 0.02%)_int_malloc (385 samples, 0.29%)_int_memalign (31 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (26 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (15 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (15 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (96 samples, 0.07%)alloc::raw_vec::RawVec<T,A>::grow_amortized (66 samples, 0.05%)core::num::<impl usize>::checked_add (18 samples, 0.01%)core::num::<impl usize>::overflowing_add (18 samples, 0.01%)alloc::raw_vec::finish_grow (74 samples, 0.06%)alloc::sync::Arc<T,A>::drop_slow (16 samples, 0.01%)core::mem::drop (14 samples, 0.01%)core::fmt::Formatter::pad_integral (14 samples, 0.01%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (93 samples, 0.07%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (23 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (188 samples, 0.14%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (30 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_connect::{{closure}}> (22 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_packet::{{closure}}> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}}> (19 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (22 samples, 0.02%)malloc_consolidate (24 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (15 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)rand_chacha::guts::round (66 samples, 0.05%)rand_chacha::guts::refill_wide::impl_avx2 (99 samples, 0.08%)rand_chacha::guts::refill_wide::fn_impl (98 samples, 0.07%)rand_chacha::guts::refill_wide_impl (98 samples, 0.07%)std::io::error::Error::kind (14 samples, 0.01%)[unknown] (42 samples, 0.03%)[unknown] (14 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (490 samples, 0.37%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (211 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (84 samples, 0.06%)tokio::runtime::task::core::Header::get_owner_id (18 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (18 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (20 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (31 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (29 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (108 samples, 0.08%)tokio::runtime::task::core::TaskIdGuard::enter (14 samples, 0.01%)tokio::runtime::context::set_current_task_id (14 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (21 samples, 0.02%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (32 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (54 samples, 0.04%)tokio::runtime::task::raw::drop_abort_handle (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (17 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (22 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (22 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (79 samples, 0.06%)core::slice::<impl [T]>::contains (178 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (40 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (40 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (216 samples, 0.16%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (219 samples, 0.17%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (29 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (29 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (54 samples, 0.04%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (113 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (31 samples, 0.02%)core::sync::atomic::AtomicU64::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (447 samples, 0.34%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (174 samples, 0.13%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (489 samples, 0.37%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (489 samples, 0.37%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run (484 samples, 0.37%)tokio::runtime::context::runtime::enter_runtime (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (484 samples, 0.37%)tokio::runtime::context::set_scheduler (484 samples, 0.37%)std::thread::local::LocalKey<T>::with (484 samples, 0.37%)std::thread::local::LocalKey<T>::try_with (484 samples, 0.37%)tokio::runtime::context::set_scheduler::{{closure}} (484 samples, 0.37%)tokio::runtime::context::scoped::Scoped<T>::set (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Context::run (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (24 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (20 samples, 0.02%)tokio::runtime::task::raw::poll (515 samples, 0.39%)tokio::runtime::task::harness::Harness<T,S>::poll (493 samples, 0.38%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (493 samples, 0.38%)tokio::runtime::task::harness::poll_future (493 samples, 0.38%)std::panic::catch_unwind (493 samples, 0.38%)std::panicking::try (493 samples, 0.38%)std::panicking::try::do_call (493 samples, 0.38%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (493 samples, 0.38%)tokio::runtime::task::harness::poll_future::{{closure}} (493 samples, 0.38%)tokio::runtime::task::core::Core<T,S>::poll (493 samples, 0.38%)tokio::runtime::time::wheel::Wheel::next_expiration (16 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (27 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (15 samples, 0.01%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (44 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (15 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (29 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (74 samples, 0.06%)torrust_tracker::servers::udp::peer_builder::from_request (17 samples, 0.01%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (51 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (54 samples, 0.04%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (58 samples, 0.04%)torrust_tracker::core::Tracker::announce::{{closure}} (70 samples, 0.05%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (113 samples, 0.09%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (175 samples, 0.13%)<T as alloc::string::ToString>::to_string (38 samples, 0.03%)core::option::Option<T>::expect (56 samples, 0.04%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (18 samples, 0.01%)<T as alloc::string::ToString>::to_string (18 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (180 samples, 0.14%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (468 samples, 0.36%)torrust_tracker::servers::udp::logging::log_response (38 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (669 samples, 0.51%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (152 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (147 samples, 0.11%)tokio::net::udp::UdpSocket::send_to::{{closure}} (138 samples, 0.11%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (119 samples, 0.09%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (75 samples, 0.06%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to (39 samples, 0.03%)mio::io_source::IoSource<T>::do_io (39 samples, 0.03%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to::{{closure}} (39 samples, 0.03%)std::net::udp::UdpSocket::send_to (39 samples, 0.03%)std::sys_common::net::UdpSocket::send_to (39 samples, 0.03%)std::sys::pal::unix::cvt (39 samples, 0.03%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (39 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_stats (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (14 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (33 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (33 samples, 0.03%)torrust_tracker_primitives::peer::Peer::is_seeder (33 samples, 0.03%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (75 samples, 0.06%)core::iter::traits::iterator::Iterator::sum (75 samples, 0.06%)<usize as core::iter::traits::accum::Sum>::sum (75 samples, 0.06%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (75 samples, 0.06%)core::iter::traits::iterator::Iterator::fold (75 samples, 0.06%)core::iter::adapters::map::map_fold::{{closure}} (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (104 samples, 0.08%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (24 samples, 0.02%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (215 samples, 0.16%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (198 samples, 0.15%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (89 samples, 0.07%)core::option::Option<T>::is_some_and (32 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (30 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (30 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (26 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (58 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (58 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (58 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (238 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (236 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (208 samples, 0.16%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (208 samples, 0.16%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (282 samples, 0.21%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (67 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (22 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (22 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (22 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (22 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (22 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (43 samples, 0.03%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (43 samples, 0.03%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (43 samples, 0.03%)<u8 as core::slice::cmp::SliceOrd>::compare (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (151 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (145 samples, 0.11%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (137 samples, 0.10%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (137 samples, 0.10%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (266 samples, 0.20%)core::sync::atomic::AtomicU32::load (27 samples, 0.02%)core::sync::atomic::atomic_load (27 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (38 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (37 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (36 samples, 0.03%)tracing::span::Span::log (16 samples, 0.01%)tracing::span::Span::record_all (70 samples, 0.05%)unlink_chunk (139 samples, 0.11%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (30 samples, 0.02%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (30 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (28 samples, 0.02%)[anon] (8,759 samples, 6.67%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (32 samples, 0.02%)uuid::rng::bytes (32 samples, 0.02%)rand::random (32 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (15 samples, 0.01%)_int_free (338 samples, 0.26%)tcache_put (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (22 samples, 0.02%)hashbrown::raw::h2 (14 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (23 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (17 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (25 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (15 samples, 0.01%)[profiling] (545 samples, 0.42%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (32 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (22 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (30 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (28 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (83 samples, 0.06%)alloc::string::String::push_str (57 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (57 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (57 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (57 samples, 0.04%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (20 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (41 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (151 samples, 0.12%)core::hash::sip::u8to64_le (50 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (33 samples, 0.03%)tokio::runtime::context::CONTEXT::__getit (35 samples, 0.03%)core::cell::Cell<T>::get (33 samples, 0.03%)[unknown] (20 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (75 samples, 0.06%)core::ops::function::FnMut::call_mut (66 samples, 0.05%)tokio::runtime::coop::poll_proceed (66 samples, 0.05%)tokio::runtime::context::budget (66 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (66 samples, 0.05%)tokio::runtime::context::budget::{{closure}} (27 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (27 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (110 samples, 0.08%)[unknown] (15 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (27 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (27 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (70 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (55 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (55 samples, 0.04%)[unknown] (33 samples, 0.03%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (214 samples, 0.16%)__memcpy_avx512_unaligned_erms (168 samples, 0.13%)[profiling] (171 samples, 0.13%)binascii::bin2hex (77 samples, 0.06%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (280 samples, 0.21%)[unknown] (317 samples, 0.24%)[[vdso]] (2,648 samples, 2.02%)[..[unknown] (669 samples, 0.51%)[unknown] (396 samples, 0.30%)[unknown] (251 samples, 0.19%)[unknown] (65 samples, 0.05%)[unknown] (30 samples, 0.02%)[unknown] (21 samples, 0.02%)__GI___clock_gettime (56 samples, 0.04%)arena_for_chunk (72 samples, 0.05%)arena_for_chunk (62 samples, 0.05%)heap_for_ptr (49 samples, 0.04%)heap_max_size (28 samples, 0.02%)__GI___libc_free (194 samples, 0.15%)arena_for_chunk (19 samples, 0.01%)checked_request2size (24 samples, 0.02%)__GI___libc_malloc (220 samples, 0.17%)tcache_get (44 samples, 0.03%)__GI___libc_write (25 samples, 0.02%)__GI___libc_write (14 samples, 0.01%)__GI___pthread_disable_asynccancel (97 samples, 0.07%)core::num::<impl u128>::leading_zeros (15 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (72 samples, 0.05%)__floattidf (90 samples, 0.07%)compiler_builtins::float::conv::__floattidf (86 samples, 0.07%)exp_inline (40 samples, 0.03%)log_inline (64 samples, 0.05%)__ieee754_pow_fma (114 samples, 0.09%)__libc_calloc (106 samples, 0.08%)__libc_recvfrom (252 samples, 0.19%)__libc_sendto (133 samples, 0.10%)__memcmp_evex_movbe (137 samples, 0.10%)__memcpy_avx512_unaligned_erms (1,399 samples, 1.07%)__posix_memalign (172 samples, 0.13%)__posix_memalign (80 samples, 0.06%)_mid_memalign (71 samples, 0.05%)arena_for_chunk (14 samples, 0.01%)__pow (18 samples, 0.01%)__vdso_clock_gettime (40 samples, 0.03%)[unknown] (24 samples, 0.02%)_int_free (462 samples, 0.35%)tcache_put (54 samples, 0.04%)[unknown] (14 samples, 0.01%)_int_malloc (508 samples, 0.39%)_int_memalign (68 samples, 0.05%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (78 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::grow_amortized (73 samples, 0.06%)alloc::raw_vec::finish_grow (91 samples, 0.07%)core::result::Result<T,E>::map_err (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Weak<ring::ec::curve25519::ed25519::signing::Ed25519KeyPair,&alloc::alloc::Global>> (16 samples, 0.01%)<alloc::sync::Weak<T,A> as core::ops::drop::Drop>::drop (16 samples, 0.01%)core::mem::drop (18 samples, 0.01%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)alloc_new_heap (49 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (49 samples, 0.04%)core::fmt::Formatter::pad_integral (40 samples, 0.03%)core::fmt::Formatter::pad_integral::write_prefix (19 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (155 samples, 0.12%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (71 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (245 samples, 0.19%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (33 samples, 0.03%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (37 samples, 0.03%)core::str::converts::from_utf8 (33 samples, 0.03%)core::str::validations::run_utf8_validation (20 samples, 0.02%)epoll_wait (31 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (17 samples, 0.01%)rand_chacha::guts::refill_wide (19 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (17 samples, 0.01%)std_detect::detect::check_for (17 samples, 0.01%)std_detect::detect::cache::test (17 samples, 0.01%)std_detect::detect::cache::Cache::test (17 samples, 0.01%)core::sync::atomic::AtomicUsize::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)std::sys::pal::unix::time::Timespec::new (29 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (132 samples, 0.10%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (22 samples, 0.02%)core::cmp::PartialOrd::ge (22 samples, 0.02%)std::sys::pal::unix::time::Timespec::sub_timespec (67 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (18 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr (29 samples, 0.02%)std::sys_common::net::sockname (28 samples, 0.02%)syscall (552 samples, 0.42%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (74 samples, 0.06%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (74 samples, 0.06%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (74 samples, 0.06%)core::cell::Cell<T>::set (74 samples, 0.06%)core::cell::Cell<T>::replace (74 samples, 0.06%)core::mem::replace (74 samples, 0.06%)core::ptr::write (74 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (14 samples, 0.01%)tokio::runtime::context::with_scheduler (176 samples, 0.13%)std::thread::local::LocalKey<T>::try_with (152 samples, 0.12%)tokio::runtime::context::with_scheduler::{{closure}} (151 samples, 0.12%)tokio::runtime::context::scoped::Scoped<T>::with (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (16 samples, 0.01%)core::option::Option<T>::map (19 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (24 samples, 0.02%)mio::poll::Poll::poll (53 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (53 samples, 0.04%)core::result::Result<T,E>::map (28 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (28 samples, 0.02%)tokio::io::ready::Ready::from_mio (14 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (126 samples, 0.10%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (18 samples, 0.01%)[unknown] (51 samples, 0.04%)[unknown] (100 samples, 0.08%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (326 samples, 0.25%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (205 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (77 samples, 0.06%)[unknown] (26 samples, 0.02%)<tokio::util::linked_list::DrainFilter<T,F> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (396 samples, 0.30%)tokio::loom::std::mutex::Mutex<T>::lock (18 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (573 samples, 0.44%)core::sync::atomic::AtomicUsize::fetch_add (566 samples, 0.43%)core::sync::atomic::atomic_add (566 samples, 0.43%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (635 samples, 0.48%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (44 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (21 samples, 0.02%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (21 samples, 0.02%)core::sync::atomic::AtomicUsize::load (21 samples, 0.02%)core::sync::atomic::atomic_load (21 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (32 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (32 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (32 samples, 0.02%)std::sync::poison::Flag::done (32 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (43 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (43 samples, 0.03%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (123 samples, 0.09%)tokio::runtime::task::list::OwnedTasks<S>::remove (117 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (80 samples, 0.06%)tokio::runtime::scheduler::defer::Defer::wake (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (71 samples, 0.05%)std::sync::condvar::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (56 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (37 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (138 samples, 0.11%)tokio::runtime::driver::Driver::park (77 samples, 0.06%)tokio::runtime::driver::TimeDriver::park (77 samples, 0.06%)tokio::runtime::time::Driver::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Parker::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::park::Inner::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (432 samples, 0.33%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (26 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (94 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (94 samples, 0.07%)core::cell::BorrowRefMut::new (94 samples, 0.07%)tokio::runtime::coop::budget (142 samples, 0.11%)tokio::runtime::coop::with_budget (142 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (121 samples, 0.09%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (44 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (208 samples, 0.16%)tokio::runtime::signal::Driver::process (30 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (46 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (35 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage (75 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_xor (76 samples, 0.06%)core::sync::atomic::atomic_xor (76 samples, 0.06%)tokio::runtime::task::state::State::transition_to_complete (79 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::complete (113 samples, 0.09%)tokio::runtime::task::state::State::transition_to_terminal (18 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (28 samples, 0.02%)core::mem::drop (18 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (18 samples, 0.01%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (16 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (16 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (53 samples, 0.04%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (21 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (113 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (15 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (15 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (14 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (82 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (23 samples, 0.02%)tokio::runtime::task::state::State::ref_dec (23 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (34 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (32 samples, 0.02%)tokio::runtime::task::state::State::unset_join_interested (23 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (23 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (43 samples, 0.03%)core::num::<impl u32>::wrapping_add (23 samples, 0.02%)core::option::Option<T>::or_else (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (59 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (45 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (132 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (63 samples, 0.05%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run (290 samples, 0.22%)tokio::runtime::context::runtime::enter_runtime (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (290 samples, 0.22%)tokio::runtime::context::set_scheduler (290 samples, 0.22%)std::thread::local::LocalKey<T>::with (290 samples, 0.22%)std::thread::local::LocalKey<T>::try_with (290 samples, 0.22%)tokio::runtime::context::set_scheduler::{{closure}} (290 samples, 0.22%)tokio::runtime::context::scoped::Scoped<T>::set (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Context::run (290 samples, 0.22%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (327 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (322 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll (333 samples, 0.25%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (342 samples, 0.26%)tokio::runtime::task::harness::poll_future::{{closure}} (342 samples, 0.26%)tokio::runtime::task::harness::poll_future (348 samples, 0.27%)std::panic::catch_unwind (347 samples, 0.26%)std::panicking::try (347 samples, 0.26%)std::panicking::try::do_call (347 samples, 0.26%)core::sync::atomic::AtomicUsize::compare_exchange (18 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (18 samples, 0.01%)tokio::runtime::task::state::State::transition_to_running (47 samples, 0.04%)tokio::runtime::task::state::State::fetch_update_action (47 samples, 0.04%)tokio::runtime::task::state::State::transition_to_running::{{closure}} (19 samples, 0.01%)tokio::runtime::task::raw::poll (427 samples, 0.33%)tokio::runtime::task::harness::Harness<T,S>::poll (408 samples, 0.31%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (407 samples, 0.31%)tokio::runtime::task::state::State::transition_to_idle (17 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (21 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (14 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (72 samples, 0.05%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (23 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (15 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (14 samples, 0.01%)tokio::runtime::time::Driver::park_internal (155 samples, 0.12%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (96 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (35 samples, 0.03%)core::num::<impl usize>::pow (35 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (39 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (33 samples, 0.03%)core::num::<impl usize>::pow (33 samples, 0.03%)tokio::runtime::time::wheel::level::Level::next_expiration (208 samples, 0.16%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.04%)core::num::<impl usize>::pow (48 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (277 samples, 0.21%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (18 samples, 0.01%)core::option::Option<T>::is_some (18 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (50 samples, 0.04%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (37 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (19 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (17 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (17 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (20 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (62 samples, 0.05%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (40 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (27 samples, 0.02%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (17 samples, 0.01%)torrust_tracker::servers::udp::peer_builder::from_request (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (19 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (355 samples, 0.27%)<F as core::future::into_future::IntoFuture>::into_future (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (37 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (25 samples, 0.02%)core::sync::atomic::atomic_add (25 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet (14 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (20 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::result::Result<T,E>::map_err (16 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (136 samples, 0.10%)torrust_tracker::core::Tracker::announce::{{closure}} (173 samples, 0.13%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (267 samples, 0.20%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (30 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (423 samples, 0.32%)core::fmt::Formatter::new (26 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (80 samples, 0.06%)core::fmt::num::imp::fmt_u64 (58 samples, 0.04%)core::intrinsics::copy_nonoverlapping (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (74 samples, 0.06%)core::fmt::num::imp::fmt_u64 (70 samples, 0.05%)<T as alloc::string::ToString>::to_string (207 samples, 0.16%)core::option::Option<T>::expect (19 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (18 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (18 samples, 0.01%)torrust_tracker::servers::udp::logging::map_action_name (25 samples, 0.02%)alloc::str::<impl alloc::borrow::ToOwned for str>::to_owned (14 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (345 samples, 0.26%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (18 samples, 0.01%)core::fmt::num::imp::fmt_u64 (14 samples, 0.01%)<T as alloc::string::ToString>::to_string (35 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,067 samples, 0.81%)torrust_tracker::servers::udp::logging::log_response (72 samples, 0.05%)alloc::vec::from_elem (68 samples, 0.05%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (68 samples, 0.05%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (68 samples, 0.05%)alloc::alloc::Global::alloc_impl (68 samples, 0.05%)alloc::alloc::alloc_zeroed (68 samples, 0.05%)__rdl_alloc_zeroed (68 samples, 0.05%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (68 samples, 0.05%)[unknown] (48 samples, 0.04%)[unknown] (16 samples, 0.01%)[unknown] (28 samples, 0.02%)std::sys::pal::unix::cvt (134 samples, 0.10%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (134 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,908 samples, 1.45%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (504 samples, 0.38%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (382 samples, 0.29%)tokio::net::udp::UdpSocket::send_to::{{closure}} (344 samples, 0.26%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (332 samples, 0.25%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (304 samples, 0.23%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (215 samples, 0.16%)mio::net::udp::UdpSocket::send_to (185 samples, 0.14%)mio::io_source::IoSource<T>::do_io (185 samples, 0.14%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (185 samples, 0.14%)mio::net::udp::UdpSocket::send_to::{{closure}} (185 samples, 0.14%)std::net::udp::UdpSocket::send_to (185 samples, 0.14%)std::sys_common::net::UdpSocket::send_to (169 samples, 0.13%)alloc::vec::Vec<T>::with_capacity (17 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (17 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (104 samples, 0.08%)tokio::net::udp::UdpSocket::ready::{{closure}} (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (190 samples, 0.14%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (49 samples, 0.04%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (28 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (330 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (327 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (92 samples, 0.07%)tokio::task::spawn::spawn (92 samples, 0.07%)tokio::task::spawn::spawn_inner (92 samples, 0.07%)tokio::runtime::context::current::with_current (92 samples, 0.07%)std::thread::local::LocalKey<T>::try_with (92 samples, 0.07%)tokio::runtime::context::current::with_current::{{closure}} (92 samples, 0.07%)core::option::Option<T>::map (92 samples, 0.07%)tokio::task::spawn::spawn_inner::{{closure}} (92 samples, 0.07%)tokio::runtime::scheduler::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (92 samples, 0.07%)tokio::runtime::task::list::OwnedTasks<S>::bind (90 samples, 0.07%)tokio::runtime::task::new_task (89 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (89 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (89 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (34 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (27 samples, 0.02%)alloc::sync::Arc<T>::new (21 samples, 0.02%)alloc::boxed::Box<T>::new (21 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (152 samples, 0.12%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (125 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (88 samples, 0.07%)core::option::Option<T>::is_some_and (18 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (17 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (17 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (17 samples, 0.01%)std::sync::rwlock::RwLock<T>::read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::read (16 samples, 0.01%)tracing::span::Span::log (26 samples, 0.02%)core::fmt::Arguments::new_v1 (15 samples, 0.01%)tracing_core::span::Record::is_empty (34 samples, 0.03%)tracing_core::field::ValueSet::is_empty (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (22 samples, 0.02%)tracing_core::field::ValueSet::is_empty::{{closure}} (18 samples, 0.01%)core::option::Option<T>::is_none (16 samples, 0.01%)core::option::Option<T>::is_some (16 samples, 0.01%)tracing::span::Span::record_all (143 samples, 0.11%)unlink_chunk (185 samples, 0.14%)uuid::builder::Builder::with_variant (48 samples, 0.04%)[unknown] (40 samples, 0.03%)uuid::builder::Builder::from_random_bytes (77 samples, 0.06%)uuid::builder::Builder::with_version (29 samples, 0.02%)[unknown] (24 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)[unknown] (92 samples, 0.07%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (162 samples, 0.12%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (162 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (162 samples, 0.12%)[unknown] (18,233 samples, 13.89%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (270 samples, 0.21%)uuid::rng::bytes (190 samples, 0.14%)rand::random (190 samples, 0.14%)__memcpy_avx512_unaligned_erms (69 samples, 0.05%)_int_free (23 samples, 0.02%)_int_malloc (23 samples, 0.02%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)advise_stack_range (31 samples, 0.02%)__GI_madvise (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (31 samples, 0.02%)syscall (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sync::condvar::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (35 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (56 samples, 0.04%)std::sys::pal::unix::futex::futex_wait (56 samples, 0.04%)syscall (56 samples, 0.04%)[unknown] (56 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (53 samples, 0.04%)[unknown] (52 samples, 0.04%)[unknown] (46 samples, 0.04%)[unknown] (39 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[[vdso]] (26 samples, 0.02%)[[vdso]] (263 samples, 0.20%)__ieee754_pow_fma (26 samples, 0.02%)__pow (314 samples, 0.24%)std::f64::<impl f64>::powf (345 samples, 0.26%)__GI___clock_gettime (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (416 samples, 0.32%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (24 samples, 0.02%)std::time::Instant::now (18 samples, 0.01%)std::sys::pal::unix::time::Instant::now (18 samples, 0.01%)mio::poll::Poll::poll (102 samples, 0.08%)mio::sys::unix::selector::epoll::Selector::select (102 samples, 0.08%)epoll_wait (99 samples, 0.08%)[unknown] (92 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (88 samples, 0.07%)[unknown] (85 samples, 0.06%)[unknown] (84 samples, 0.06%)[unknown] (43 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (125 samples, 0.10%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (125 samples, 0.10%)tokio::runtime::driver::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::driver::TimeDriver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_internal (116 samples, 0.09%)tokio::runtime::io::driver::Driver::turn (116 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (148 samples, 0.11%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (111 samples, 0.08%)alloc::sync::Arc<T,A>::inner (111 samples, 0.08%)core::ptr::non_null::NonNull<T>::as_ref (111 samples, 0.08%)core::sync::atomic::AtomicUsize::compare_exchange (16 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (16 samples, 0.01%)core::bool::<impl bool>::then (88 samples, 0.07%)std::sys::pal::unix::futex::futex_wait (13,339 samples, 10.16%)std::sys::pal::..syscall (13,003 samples, 9.90%)syscall[unknown] (12,895 samples, 9.82%)[unknown][unknown] (12,759 samples, 9.72%)[unknown][unknown] (12,313 samples, 9.38%)[unknown][unknown] (12,032 samples, 9.16%)[unknown][unknown] (11,734 samples, 8.94%)[unknown][unknown] (11,209 samples, 8.54%)[unknown][unknown] (10,265 samples, 7.82%)[unknown][unknown] (9,345 samples, 7.12%)[unknown][unknown] (8,623 samples, 6.57%)[unknown][unknown] (7,744 samples, 5.90%)[unknow..[unknown] (5,922 samples, 4.51%)[unkn..[unknown] (4,459 samples, 3.40%)[un..[unknown] (2,808 samples, 2.14%)[..[unknown] (1,275 samples, 0.97%)[unknown] (1,022 samples, 0.78%)[unknown] (738 samples, 0.56%)[unknown] (607 samples, 0.46%)[unknown] (155 samples, 0.12%)core::result::Result<T,E>::is_err (77 samples, 0.06%)core::result::Result<T,E>::is_ok (77 samples, 0.06%)std::sync::condvar::Condvar::wait (13,429 samples, 10.23%)std::sync::cond..std::sys::sync::condvar::futex::Condvar::wait (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::mutex::futex::Mutex::lock (89 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (13,508 samples, 10.29%)tokio::runtime:..tokio::loom::std::mutex::Mutex<T>::lock (64 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (31 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (30 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (30 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (34 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (34 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (17 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (19 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (33 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::level_range (17 samples, 0.01%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (95 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (41 samples, 0.03%)core::num::<impl usize>::pow (41 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (129 samples, 0.10%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (202 samples, 0.15%)tokio::runtime::time::wheel::Wheel::poll_at (17 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (38 samples, 0.03%)core::option::Option<T>::map (38 samples, 0.03%)core::result::Result<T,E>::map (31 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (31 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (17 samples, 0.01%)[[vdso]] (28 samples, 0.02%)[unknown] (11,031 samples, 8.40%)[unknown][unknown] (10,941 samples, 8.33%)[unknown][unknown] (10,850 samples, 8.26%)[unknown][unknown] (10,691 samples, 8.14%)[unknown][unknown] (10,070 samples, 7.67%)[unknown][unknown] (9,737 samples, 7.42%)[unknown][unknown] (7,659 samples, 5.83%)[unknow..[unknown] (6,530 samples, 4.97%)[unkno..[unknown] (5,633 samples, 4.29%)[unkn..[unknown] (5,055 samples, 3.85%)[unk..[unknown] (4,046 samples, 3.08%)[un..[unknown] (2,911 samples, 2.22%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,226 samples, 0.93%)[unknown] (455 samples, 0.35%)[unknown] (408 samples, 0.31%)[unknown] (249 samples, 0.19%)[unknown] (202 samples, 0.15%)[unknown] (100 samples, 0.08%)mio::poll::Poll::poll (11,328 samples, 8.63%)mio::poll::P..mio::sys::unix::selector::epoll::Selector::select (11,328 samples, 8.63%)mio::sys::un..epoll_wait (11,229 samples, 8.55%)epoll_wait__GI___pthread_disable_asynccancel (50 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (47 samples, 0.04%)tokio::util::bit::Pack::pack (38 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (25 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (23 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (19 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (11,595 samples, 8.83%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (175 samples, 0.13%)__GI___clock_gettime (15 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (18 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (26 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (26 samples, 0.02%)tokio::time::clock::Clock::now (20 samples, 0.02%)tokio::time::clock::now (20 samples, 0.02%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (17 samples, 0.01%)tokio::runtime::time::Driver::park_internal (11,686 samples, 8.90%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (11,957 samples, 9.11%)tokio::runtim..tokio::runtime::driver::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::driver::TimeDriver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::time::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Parker::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::park::Inner::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (25,547 samples, 19.46%)tokio::runtime::scheduler::mul..core::result::Result<T,E>::is_err (14 samples, 0.01%)core::result::Result<T,E>::is_ok (14 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (45 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (45 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (81 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::lock (73 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (122 samples, 0.09%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (241 samples, 0.18%)<T as core::slice::cmp::SliceContains>::slice_contains (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (75 samples, 0.06%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (75 samples, 0.06%)core::sync::atomic::AtomicU32::compare_exchange (20 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (283 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (24 samples, 0.02%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (33 samples, 0.03%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (33 samples, 0.03%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (98 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (401 samples, 0.31%)alloc::vec::Vec<T,A>::push (14 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (15 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)core::result::Result<T,E>::is_err (15 samples, 0.01%)core::result::Result<T,E>::is_ok (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (22 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (22 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (62 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (14 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (21 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (17 samples, 0.01%)alloc::sync::Arc<T,A>::inner (17 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (17 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (68 samples, 0.05%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (33 samples, 0.03%)core::sync::atomic::AtomicU64::load (16 samples, 0.01%)core::sync::atomic::atomic_load (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::park (26,672 samples, 20.31%)tokio::runtime::scheduler::multi..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (272 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::has_tasks (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::has_tasks (24 samples, 0.02%)tokio::runtime::context::budget (18 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (18 samples, 0.01%)syscall (61 samples, 0.05%)__memcpy_avx512_unaligned_erms (172 samples, 0.13%)__memcpy_avx512_unaligned_erms (224 samples, 0.17%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (228 samples, 0.17%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (228 samples, 0.17%)std::panic::catch_unwind (415 samples, 0.32%)std::panicking::try (415 samples, 0.32%)std::panicking::try::do_call (415 samples, 0.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (415 samples, 0.32%)core::ops::function::FnOnce::call_once (415 samples, 0.32%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::set_stage (410 samples, 0.31%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (27 samples, 0.02%)core::result::Result<T,E>::is_err (43 samples, 0.03%)core::result::Result<T,E>::is_ok (43 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::complete (570 samples, 0.43%)tokio::runtime::task::harness::Harness<T,S>::release (155 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (152 samples, 0.12%)tokio::runtime::task::list::OwnedTasks<S>::remove (152 samples, 0.12%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (103 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (65 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (54 samples, 0.04%)std::io::stdio::stderr::INSTANCE (17 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (70 samples, 0.05%)__memcpy_avx512_unaligned_erms (42 samples, 0.03%)core::cmp::Ord::min (22 samples, 0.02%)core::cmp::min_by (22 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (27 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (30 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (24 samples, 0.02%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (44 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (20 samples, 0.02%)byteorder::io::ReadBytesExt::read_i32 (46 samples, 0.04%)core::cmp::Ord::min (14 samples, 0.01%)core::cmp::min_by (14 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (19 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (24 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (24 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (349 samples, 0.27%)__GI___lll_lock_wake_private (148 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (137 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (111 samples, 0.08%)[unknown] (98 samples, 0.07%)[unknown] (42 samples, 0.03%)[unknown] (30 samples, 0.02%)__GI___lll_lock_wait_private (553 samples, 0.42%)futex_wait (541 samples, 0.41%)[unknown] (536 samples, 0.41%)[unknown] (531 samples, 0.40%)[unknown] (524 samples, 0.40%)[unknown] (515 samples, 0.39%)[unknown] (498 samples, 0.38%)[unknown] (470 samples, 0.36%)[unknown] (435 samples, 0.33%)[unknown] (350 samples, 0.27%)[unknown] (327 samples, 0.25%)[unknown] (290 samples, 0.22%)[unknown] (222 samples, 0.17%)[unknown] (160 samples, 0.12%)[unknown] (104 samples, 0.08%)[unknown] (33 samples, 0.03%)[unknown] (25 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (703 samples, 0.54%)__GI___libc_free (866 samples, 0.66%)tracing::span::Span::record_all (30 samples, 0.02%)unlink_chunk (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (899 samples, 0.68%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (899 samples, 0.68%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (899 samples, 0.68%)alloc::alloc::dealloc (899 samples, 0.68%)__rdl_dealloc (899 samples, 0.68%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (899 samples, 0.68%)core::result::Result<T,E>::expect (91 samples, 0.07%)core::result::Result<T,E>::map_err (28 samples, 0.02%)[[vdso]] (28 samples, 0.02%)__GI___clock_gettime (47 samples, 0.04%)std::time::Instant::elapsed (67 samples, 0.05%)std::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (53 samples, 0.04%)std::sys::pal::unix::cvt (23 samples, 0.02%)__GI_getsockname (3,792 samples, 2.89%)__..[unknown] (3,714 samples, 2.83%)[u..[unknown] (3,661 samples, 2.79%)[u..[unknown] (3,557 samples, 2.71%)[u..[unknown] (3,416 samples, 2.60%)[u..[unknown] (2,695 samples, 2.05%)[..[unknown] (2,063 samples, 1.57%)[unknown] (891 samples, 0.68%)[unknown] (270 samples, 0.21%)[unknown] (99 samples, 0.08%)[unknown] (94 samples, 0.07%)[unknown] (84 samples, 0.06%)[unknown] (77 samples, 0.06%)[unknown] (25 samples, 0.02%)[unknown] (16 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr::{{closure}} (3,800 samples, 2.89%)st..tokio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)to..mio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)mi..std::net::tcp::TcpListener::local_addr (3,838 samples, 2.92%)st..std::sys_common::net::TcpListener::socket_addr (3,838 samples, 2.92%)st..std::sys_common::net::sockname (3,835 samples, 2.92%)st..[[vdso]] (60 samples, 0.05%)rand_chacha::guts::ChaCha::pos64 (168 samples, 0.13%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (26 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (29 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (18 samples, 0.01%)rand_chacha::guts::round (118 samples, 0.09%)rand_chacha::guts::refill_wide::impl_avx2 (312 samples, 0.24%)rand_chacha::guts::refill_wide::fn_impl (312 samples, 0.24%)rand_chacha::guts::refill_wide_impl (312 samples, 0.24%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (384 samples, 0.29%)rand_chacha::guts::ChaCha::refill4 (384 samples, 0.29%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (432 samples, 0.33%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (432 samples, 0.33%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)rand_core::block::BlockRng<R>::generate_and_set (392 samples, 0.30%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (392 samples, 0.30%)torrust_tracker::servers::udp::handlers::RequestId::make (440 samples, 0.34%)uuid::v4::<impl uuid::Uuid>::new_v4 (436 samples, 0.33%)uuid::rng::bytes (435 samples, 0.33%)rand::random (435 samples, 0.33%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (22 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (16 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (16 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.01%)core::iter::traits::iterator::Iterator::find (15 samples, 0.01%)core::iter::traits::iterator::Iterator::try_fold (15 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (31 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)core::slice::iter::Iter<T>::post_inc_start (14 samples, 0.01%)core::ptr::non_null::NonNull<T>::add (14 samples, 0.01%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (26 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (165 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (165 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (165 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (165 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (165 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (339 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (308 samples, 0.23%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (308 samples, 0.23%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (342 samples, 0.26%)std::sys::sync::rwlock::futex::RwLock::spin_read (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (28 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (436 samples, 0.33%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (397 samples, 0.30%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (29 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (29 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (29 samples, 0.02%)__memcmp_evex_movbe (31 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (52 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (52 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (52 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (52 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (52 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (103 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (102 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (96 samples, 0.07%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (96 samples, 0.07%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (72 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)core::slice::iter::Iter<T>::post_inc_start (32 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (32 samples, 0.02%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (81 samples, 0.06%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (271 samples, 0.21%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (271 samples, 0.21%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (271 samples, 0.21%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (271 samples, 0.21%)<u8 as core::slice::cmp::SliceOrd>::compare (271 samples, 0.21%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (610 samples, 0.46%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (566 samples, 0.43%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (566 samples, 0.43%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (18 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (616 samples, 0.47%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::KV>::split (15 samples, 0.01%)alloc::collections::btree::map::entry::Entry<K,V,A>::or_insert (46 samples, 0.04%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (45 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (40 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (29 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (120 samples, 0.09%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (118 samples, 0.09%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::Leaf>::new_leaf (118 samples, 0.09%)alloc::collections::btree::node::LeafNode<K,V>::new (118 samples, 0.09%)alloc::boxed::Box<T,A>::new_uninit_in (118 samples, 0.09%)alloc::boxed::Box<T,A>::try_new_uninit_in (118 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (118 samples, 0.09%)alloc::alloc::Global::alloc_impl (118 samples, 0.09%)alloc::alloc::alloc (118 samples, 0.09%)__rdl_alloc (118 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (118 samples, 0.09%)__GI___libc_malloc (118 samples, 0.09%)_int_malloc (107 samples, 0.08%)_int_malloc (28 samples, 0.02%)__GI___libc_malloc (32 samples, 0.02%)__rdl_alloc (36 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (36 samples, 0.03%)alloc::sync::Arc<T>::new (42 samples, 0.03%)alloc::boxed::Box<T>::new (42 samples, 0.03%)alloc::alloc::exchange_malloc (39 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (39 samples, 0.03%)alloc::alloc::Global::alloc_impl (39 samples, 0.03%)alloc::alloc::alloc (39 samples, 0.03%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)__GI___libc_free (39 samples, 0.03%)_int_free (37 samples, 0.03%)get_max_fast (16 samples, 0.01%)core::option::Option<T>::is_some_and (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (50 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (50 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (290 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (284 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (255 samples, 0.19%)std::sys::sync::rwlock::futex::RwLock::spin_read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::spin_until (16 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (21 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (21 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (1,147 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (1,144 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents_mut (32 samples, 0.02%)std::sync::rwlock::RwLock<T>::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write_contended (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_write (28 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (28 samples, 0.02%)torrust_tracker::core::Tracker::announce::{{closure}} (1,597 samples, 1.22%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (29 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (24 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (25 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (25 samples, 0.02%)core::hash::Hasher::write_u32 (25 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (36 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (37 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (37 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (64 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (39 samples, 0.03%)core::hash::Hasher::write_u64 (39 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (122 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (58 samples, 0.04%)core::hash::Hasher::write_u64 (58 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (57 samples, 0.04%)core::hash::sip::u8to64_le (23 samples, 0.02%)core::hash::Hasher::write_length_prefix (27 samples, 0.02%)core::hash::Hasher::write_usize (27 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (16 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (246 samples, 0.19%)core::array::<impl core::hash::Hash for [T: N]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (62 samples, 0.05%)core::hash::sip::u8to64_le (17 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (285 samples, 0.22%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (36 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (24 samples, 0.02%)std::time::SystemTime::now (19 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (19 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (1,954 samples, 1.49%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (24 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (18 samples, 0.01%)<core::time::Nanoseconds as core::hash::Hash>::hash (20 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (20 samples, 0.02%)core::hash::Hasher::write_u32 (20 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (44 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (65 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (45 samples, 0.03%)core::hash::Hasher::write_u64 (45 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (45 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (45 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (105 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (40 samples, 0.03%)core::hash::Hasher::write_u64 (40 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (39 samples, 0.03%)core::hash::Hasher::write_length_prefix (34 samples, 0.03%)core::hash::Hasher::write_usize (34 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (33 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (231 samples, 0.18%)core::array::<impl core::hash::Hash for [T: N]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (61 samples, 0.05%)core::hash::sip::u8to64_le (16 samples, 0.01%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (270 samples, 0.21%)torrust_tracker::servers::udp::connection_cookie::make (268 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (35 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (31 samples, 0.02%)std::time::SystemTime::now (26 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (26 samples, 0.02%)torrust_tracker::core::ScrapeData::add_file (19 samples, 0.01%)std::collections::hash::map::HashMap<K,V,S>::insert (19 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (19 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (16 samples, 0.01%)hashbrown::raw::RawTable<T,A>::reserve (16 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (17 samples, 0.01%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (17 samples, 0.01%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (17 samples, 0.01%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (17 samples, 0.01%)<u8 as core::slice::cmp::SliceOrd>::compare (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (2,336 samples, 1.78%)t..torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (101 samples, 0.08%)torrust_tracker::core::Tracker::scrape::{{closure}} (90 samples, 0.07%)torrust_tracker::core::Tracker::get_swarm_metadata (68 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (64 samples, 0.05%)alloc::raw_vec::finish_grow (19 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (21 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (23 samples, 0.02%)alloc::string::String::push_str (23 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (23 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (23 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (23 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (85 samples, 0.06%)core::fmt::num::imp::fmt_u64 (78 samples, 0.06%)<alloc::string::String as core::fmt::Write>::write_str (15 samples, 0.01%)alloc::string::String::push_str (15 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (15 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (15 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (37 samples, 0.03%)core::fmt::num::imp::fmt_u64 (36 samples, 0.03%)<T as alloc::string::ToString>::to_string (141 samples, 0.11%)core::option::Option<T>::expect (34 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (28 samples, 0.02%)alloc::alloc::dealloc (28 samples, 0.02%)__rdl_dealloc (28 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (28 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (55 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (55 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::current_memory (20 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (16 samples, 0.01%)binascii::bin2hex (51 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (16 samples, 0.01%)core::fmt::write (25 samples, 0.02%)core::fmt::rt::Argument::fmt (15 samples, 0.01%)core::fmt::Formatter::write_fmt (87 samples, 0.07%)core::str::converts::from_utf8 (43 samples, 0.03%)core::str::validations::run_utf8_validation (37 samples, 0.03%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (161 samples, 0.12%)<T as alloc::string::ToString>::to_string (161 samples, 0.12%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (156 samples, 0.12%)torrust_tracker::servers::udp::logging::log_request (479 samples, 0.36%)[[vdso]] (51 samples, 0.04%)alloc::raw_vec::finish_grow (56 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.05%)<alloc::string::String as core::fmt::Write>::write_str (65 samples, 0.05%)alloc::string::String::push_str (65 samples, 0.05%)alloc::vec::Vec<T,A>::extend_from_slice (65 samples, 0.05%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (65 samples, 0.05%)alloc::vec::Vec<T,A>::append_elements (65 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (114 samples, 0.09%)core::fmt::num::imp::fmt_u64 (110 samples, 0.08%)<T as alloc::string::ToString>::to_string (132 samples, 0.10%)core::option::Option<T>::expect (20 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (22 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (22 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (8,883 samples, 6.77%)torrust_t..torrust_tracker::servers::udp::logging::log_response (238 samples, 0.18%)__GI___lll_lock_wait_private (14 samples, 0.01%)futex_wait (14 samples, 0.01%)__GI___lll_lock_wake_private (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)_int_malloc (191 samples, 0.15%)__libc_calloc (238 samples, 0.18%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)alloc::vec::from_elem (316 samples, 0.24%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (316 samples, 0.24%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (312 samples, 0.24%)alloc::alloc::Global::alloc_impl (312 samples, 0.24%)alloc::alloc::alloc_zeroed (312 samples, 0.24%)__rdl_alloc_zeroed (312 samples, 0.24%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (312 samples, 0.24%)byteorder::ByteOrder::write_i32 (18 samples, 0.01%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (18 samples, 0.01%)core::num::<impl u32>::to_be_bytes (18 samples, 0.01%)core::num::<impl u32>::to_be (18 samples, 0.01%)core::num::<impl u32>::swap_bytes (18 samples, 0.01%)byteorder::io::WriteBytesExt::write_i32 (89 samples, 0.07%)std::io::Write::write_all (71 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (71 samples, 0.05%)std::io::cursor::vec_write (71 samples, 0.05%)std::io::cursor::vec_write_unchecked (51 samples, 0.04%)core::ptr::mut_ptr::<impl *mut T>::copy_from (51 samples, 0.04%)core::intrinsics::copy (51 samples, 0.04%)aquatic_udp_protocol::response::Response::write (227 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (28 samples, 0.02%)std::io::Write::write_all (21 samples, 0.02%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (21 samples, 0.02%)std::io::cursor::vec_write (21 samples, 0.02%)std::io::cursor::vec_write_unchecked (21 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::copy_from (21 samples, 0.02%)core::intrinsics::copy (21 samples, 0.02%)__GI___lll_lock_wake_private (17 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (136 samples, 0.10%)__GI___libc_free (206 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (211 samples, 0.16%)alloc::alloc::dealloc (211 samples, 0.16%)__rdl_dealloc (211 samples, 0.16%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (211 samples, 0.16%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (224 samples, 0.17%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (224 samples, 0.17%)std::io::cursor::Cursor<T>::new (56 samples, 0.04%)tokio::io::ready::Ready::intersection (23 samples, 0.02%)tokio::io::ready::Ready::from_interest (23 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (83 samples, 0.06%)[unknown] (32,674 samples, 24.88%)[unknown][unknown] (32,402 samples, 24.68%)[unknown][unknown] (32,272 samples, 24.58%)[unknown][unknown] (32,215 samples, 24.54%)[unknown][unknown] (31,174 samples, 23.74%)[unknown][unknown] (30,794 samples, 23.45%)[unknown][unknown] (30,036 samples, 22.88%)[unknown][unknown] (28,639 samples, 21.81%)[unknown][unknown] (27,908 samples, 21.25%)[unknown][unknown] (26,013 samples, 19.81%)[unknown][unknown] (23,181 samples, 17.65%)[unknown][unknown] (19,559 samples, 14.90%)[unknown][unknown] (18,052 samples, 13.75%)[unknown][unknown] (15,794 samples, 12.03%)[unknown][unknown] (14,740 samples, 11.23%)[unknown][unknown] (12,486 samples, 9.51%)[unknown][unknown] (11,317 samples, 8.62%)[unknown][unknown] (10,725 samples, 8.17%)[unknown][unknown] (10,017 samples, 7.63%)[unknown][unknown] (9,713 samples, 7.40%)[unknown][unknown] (8,432 samples, 6.42%)[unknown][unknown] (8,062 samples, 6.14%)[unknown][unknown] (6,973 samples, 5.31%)[unknow..[unknown] (5,328 samples, 4.06%)[unk..[unknown] (4,352 samples, 3.31%)[un..[unknown] (3,786 samples, 2.88%)[u..[unknown] (3,659 samples, 2.79%)[u..[unknown] (3,276 samples, 2.50%)[u..[unknown] (2,417 samples, 1.84%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,610 samples, 1.23%)[unknown] (422 samples, 0.32%)[unknown] (84 samples, 0.06%)[unknown] (69 samples, 0.05%)__GI___pthread_disable_asynccancel (67 samples, 0.05%)__libc_sendto (32,896 samples, 25.05%)__libc_sendtotokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (32,981 samples, 25.12%)tokio::net::udp::UdpSocket::send_to_addr..mio::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (32,981 samples, 25.12%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (32,981 samples, 25.12%)mio::sys::unix::stateless_io_source::IoS..mio::net::udp::UdpSocket::send_to::{{closure}} (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_to::{{clo..std::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (32,981 samples, 25.12%)std::sys_common::net::UdpSocket::send_tostd::sys::pal::unix::cvt (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (44,349 samples, 33.78%)torrust_tracker::servers::udp::server::Udp::process_req..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (43,412 samples, 33.06%)torrust_tracker::servers::udp::server::Udp::process_va..torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (34,320 samples, 26.14%)torrust_tracker::servers::udp::server::Udp..torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (33,360 samples, 25.41%)torrust_tracker::servers::udp::server::Ud..tokio::net::udp::UdpSocket::send_to::{{closure}} (33,227 samples, 25.31%)tokio::net::udp::UdpSocket::send_to::{{c..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (33,142 samples, 25.24%)tokio::net::udp::UdpSocket::send_to_addr..tokio::runtime::io::registration::Registration::async_io::{{closure}} (33,115 samples, 25.22%)tokio::runtime::io::registration::Regist..tokio::runtime::io::registration::Registration::readiness::{{closure}} (28 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (15 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (14 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (15 samples, 0.01%)core::sync::atomic::atomic_add (15 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (135 samples, 0.10%)__GI___libc_free (147 samples, 0.11%)syscall (22 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (15 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (24 samples, 0.02%)core::mem::drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (262 samples, 0.20%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (262 samples, 0.20%)tokio::runtime::task::raw::RawTask::drop_abort_handle (256 samples, 0.19%)tokio::runtime::task::raw::drop_abort_handle (59 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (50 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (50 samples, 0.04%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (16 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (47 samples, 0.04%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (47 samples, 0.04%)tokio::runtime::task::state::State::drop_join_handle_fast (19 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (19 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (14 samples, 0.01%)<ringbuf::ring_buffer::shared::SharedRb<T,C> as ringbuf::ring_buffer::base::RbBase<T>>::head (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (29 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (29 samples, 0.02%)ringbuf::ring_buffer::rb::Rb::pop (50 samples, 0.04%)ringbuf::consumer::Consumer<T,R>::pop (50 samples, 0.04%)ringbuf::producer::Producer<T,R>::advance (23 samples, 0.02%)ringbuf::ring_buffer::base::RbWrite::advance_tail (23 samples, 0.02%)core::num::nonzero::<impl core::ops::arith::Rem<core::num::nonzero::NonZero<usize>> for usize>::rem (19 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (107 samples, 0.08%)ringbuf::ring_buffer::rb::Rb::push (43 samples, 0.03%)ringbuf::producer::Producer<T,R>::push (43 samples, 0.03%)tokio::runtime::task::abort::AbortHandle::is_finished (84 samples, 0.06%)tokio::runtime::task::state::Snapshot::is_complete (84 samples, 0.06%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (17 samples, 0.01%)tokio::runtime::task::raw::RawTask::ref_inc (17 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (17 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (14 samples, 0.01%)core::sync::atomic::atomic_add (14 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)malloc_consolidate (95 samples, 0.07%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (26 samples, 0.02%)_int_malloc (282 samples, 0.21%)__GI___libc_malloc (323 samples, 0.25%)alloc::vec::Vec<T>::with_capacity (326 samples, 0.25%)alloc::vec::Vec<T,A>::with_capacity_in (326 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (324 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (324 samples, 0.25%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (324 samples, 0.25%)alloc::alloc::Global::alloc_impl (324 samples, 0.25%)alloc::alloc::alloc (324 samples, 0.25%)__rdl_alloc (324 samples, 0.25%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (324 samples, 0.25%)tokio::io::ready::Ready::intersection (24 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (199 samples, 0.15%)tokio::util::bit::Pack::unpack (16 samples, 0.01%)tokio::util::bit::unpack (16 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (19 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (16 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (222 samples, 0.17%)tokio::net::udp::UdpSocket::ready::{{closure}} (222 samples, 0.17%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (50 samples, 0.04%)std::io::error::repr_bitpacked::Repr::data (14 samples, 0.01%)std::io::error::repr_bitpacked::decode_repr (14 samples, 0.01%)std::io::error::Error::kind (16 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)[unknown] (8,756 samples, 6.67%)[unknown][unknown] (8,685 samples, 6.61%)[unknown][unknown] (8,574 samples, 6.53%)[unknown][unknown] (8,415 samples, 6.41%)[unknown][unknown] (7,686 samples, 5.85%)[unknow..[unknown] (7,239 samples, 5.51%)[unknow..[unknown] (6,566 samples, 5.00%)[unkno..[unknown] (5,304 samples, 4.04%)[unk..[unknown] (4,008 samples, 3.05%)[un..[unknown] (3,571 samples, 2.72%)[u..[unknown] (2,375 samples, 1.81%)[..[unknown] (1,844 samples, 1.40%)[unknown] (1,030 samples, 0.78%)[unknown] (344 samples, 0.26%)[unknown] (113 samples, 0.09%)__libc_recvfrom (8,903 samples, 6.78%)__libc_re..__GI___pthread_disable_asynccancel (22 samples, 0.02%)std::sys::pal::unix::cvt (20 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (9,005 samples, 6.86%)tokio::ne..mio::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)mio::net:..mio::io_source::IoSource<T>::do_io (8,964 samples, 6.83%)mio::io_s..mio::sys::unix::stateless_io_source::IoSourceState::do_io (8,964 samples, 6.83%)mio::sys:..mio::net::udp::UdpSocket::recv_from::{{closure}} (8,964 samples, 6.83%)mio::net:..std::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)std::net:..std::sys_common::net::UdpSocket::recv_from (8,964 samples, 6.83%)std::sys_..std::sys::pal::unix::net::Socket::recv_from (8,964 samples, 6.83%)std::sys:..std::sys::pal::unix::net::Socket::recv_from_with_flags (8,964 samples, 6.83%)std::sys:..std::sys_common::net::sockaddr_to_addr (23 samples, 0.02%)tokio::runtime::io::registration::Registration::clear_readiness (18 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (32 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (9,967 samples, 7.59%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (9,291 samples, 7.08%)tokio::ne..tokio::runtime::io::registration::Registration::async_io::{{closure}} (9,287 samples, 7.07%)tokio::ru..tokio::runtime::io::registration::Registration::readiness::{{closure}} (45 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (41 samples, 0.03%)__memcpy_avx512_unaligned_erms (424 samples, 0.32%)__memcpy_avx512_unaligned_erms (493 samples, 0.38%)__memcpy_avx512_unaligned_erms (298 samples, 0.23%)syscall (1,105 samples, 0.84%)[unknown] (1,095 samples, 0.83%)[unknown] (1,091 samples, 0.83%)[unknown] (1,049 samples, 0.80%)[unknown] (998 samples, 0.76%)[unknown] (907 samples, 0.69%)[unknown] (710 samples, 0.54%)[unknown] (635 samples, 0.48%)[unknown] (538 samples, 0.41%)[unknown] (358 samples, 0.27%)[unknown] (256 samples, 0.19%)[unknown] (153 samples, 0.12%)[unknown] (96 samples, 0.07%)[unknown] (81 samples, 0.06%)tokio::runtime::context::with_scheduler (36 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (31 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (27 samples, 0.02%)tokio::runtime::context::scoped::Scoped<T>::with (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (340 samples, 0.26%)core::sync::atomic::atomic_add (340 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (354 samples, 0.27%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (367 samples, 0.28%)[unknown] (95 samples, 0.07%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (90 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (73 samples, 0.06%)[unknown] (63 samples, 0.05%)[unknown] (44 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (35 samples, 0.03%)[unknown] (30 samples, 0.02%)[unknown] (22 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)tokio::runtime::driver::Handle::unpark (99 samples, 0.08%)tokio::runtime::driver::IoHandle::unpark (99 samples, 0.08%)tokio::runtime::io::driver::Handle::unpark (99 samples, 0.08%)mio::waker::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::fdbased::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::eventfd::WakerInternal::wake (99 samples, 0.08%)<&std::fs::File as std::io::Write>::write (99 samples, 0.08%)std::sys::pal::unix::fs::File::write (99 samples, 0.08%)std::sys::pal::unix::fd::FileDesc::write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)tokio::runtime::context::with_scheduler (1,615 samples, 1.23%)std::thread::local::LocalKey<T>::try_with (1,613 samples, 1.23%)tokio::runtime::context::with_scheduler::{{closure}} (1,612 samples, 1.23%)tokio::runtime::context::scoped::Scoped<T>::with (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (1,647 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (1,646 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::with_current (1,646 samples, 1.25%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (23 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (18 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (104 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (60 samples, 0.05%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (57 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (38 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (162 samples, 0.12%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)__GI___lll_lock_wake_private (127 samples, 0.10%)[unknown] (125 samples, 0.10%)[unknown] (124 samples, 0.09%)[unknown] (119 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (106 samples, 0.08%)[unknown] (87 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (51 samples, 0.04%)[unknown] (27 samples, 0.02%)[unknown] (19 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (77 samples, 0.06%)[unknown] (1,207 samples, 0.92%)[unknown] (1,146 samples, 0.87%)[unknown] (1,126 samples, 0.86%)[unknown] (1,091 samples, 0.83%)[unknown] (1,046 samples, 0.80%)[unknown] (962 samples, 0.73%)[unknown] (914 samples, 0.70%)[unknown] (848 samples, 0.65%)[unknown] (774 samples, 0.59%)[unknown] (580 samples, 0.44%)[unknown] (456 samples, 0.35%)[unknown] (305 samples, 0.23%)[unknown] (85 samples, 0.06%)__GI_mprotect (2,474 samples, 1.88%)_..[unknown] (2,457 samples, 1.87%)[..[unknown] (2,440 samples, 1.86%)[..[unknown] (2,436 samples, 1.86%)[..[unknown] (2,435 samples, 1.85%)[..[unknown] (2,360 samples, 1.80%)[..[unknown] (2,203 samples, 1.68%)[unknown] (1,995 samples, 1.52%)[unknown] (1,709 samples, 1.30%)[unknown] (1,524 samples, 1.16%)[unknown] (1,193 samples, 0.91%)[unknown] (865 samples, 0.66%)[unknown] (539 samples, 0.41%)[unknown] (259 samples, 0.20%)[unknown] (80 samples, 0.06%)[unknown] (29 samples, 0.02%)sysmalloc (3,786 samples, 2.88%)sy..grow_heap (2,509 samples, 1.91%)g.._int_malloc (4,038 samples, 3.08%)_in..unlink_chunk (31 samples, 0.02%)alloc::alloc::exchange_malloc (4,335 samples, 3.30%)all..<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,329 samples, 3.30%)<al..alloc::alloc::Global::alloc_impl (4,329 samples, 3.30%)all..alloc::alloc::alloc (4,329 samples, 3.30%)all..__rdl_alloc (4,329 samples, 3.30%)__r..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,329 samples, 3.30%)std..std::sys::pal::unix::alloc::aligned_malloc (4,329 samples, 3.30%)std..__posix_memalign (4,297 samples, 3.27%)__p..__posix_memalign (4,297 samples, 3.27%)__p.._mid_memalign (4,297 samples, 3.27%)_mi.._int_memalign (4,149 samples, 3.16%)_in..sysmalloc (18 samples, 0.01%)core::option::Option<T>::map (6,666 samples, 5.08%)core::..tokio::task::spawn::spawn_inner::{{closure}} (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::Handle::spawn (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (6,664 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (6,661 samples, 5.07%)tokio:..tokio::runtime::task::list::OwnedTasks<S>::bind (4,692 samples, 3.57%)toki..tokio::runtime::task::new_task (4,579 samples, 3.49%)tok..tokio::runtime::task::raw::RawTask::new (4,579 samples, 3.49%)tok..tokio::runtime::task::core::Cell<T,S>::new (4,579 samples, 3.49%)tok..alloc::boxed::Box<T>::new (4,389 samples, 3.34%)all..tokio::runtime::context::current::with_current (7,636 samples, 5.82%)tokio::..std::thread::local::LocalKey<T>::try_with (7,635 samples, 5.81%)std::th..tokio::runtime::context::current::with_current::{{closure}} (7,188 samples, 5.47%)tokio::..tokio::task::spawn::spawn (7,670 samples, 5.84%)tokio::..tokio::task::spawn::spawn_inner (7,670 samples, 5.84%)tokio::..tokio::runtime::task::id::Id::next (24 samples, 0.02%)core::sync::atomic::AtomicU64::fetch_add (24 samples, 0.02%)core::sync::atomic::atomic_add (24 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (62,691 samples, 47.75%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (62,691 samples, 47.75%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (18,228 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (18,226 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (7,679 samples, 5.85%)torrust..__memcpy_avx512_unaligned_erms (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (407 samples, 0.31%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::poll (63,150 samples, 48.10%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (459 samples, 0.35%)tokio::runtime::task::core::Core<T,S>::set_stage (459 samples, 0.35%)__memcpy_avx512_unaligned_erms (16 samples, 0.01%)__memcpy_avx512_unaligned_erms (398 samples, 0.30%)__memcpy_avx512_unaligned_erms (325 samples, 0.25%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage (731 samples, 0.56%)tokio::runtime::task::harness::poll_future (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (63,908 samples, 48.67%)std::panic::catch_unwindstd::panicking::try (63,908 samples, 48.67%)std::panicking::trystd::panicking::try::do_call (63,908 samples, 48.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (63,908 samples, 48.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()..tokio::runtime::task::harness::poll_future::{{closure}} (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (758 samples, 0.58%)tokio::runtime::coop::budget (65,027 samples, 49.53%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (65,027 samples, 49.53%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (65,009 samples, 49.51%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (65,003 samples, 49.51%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (65,003 samples, 49.51%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (64,538 samples, 49.15%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (64,493 samples, 49.12%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (63,919 samples, 48.68%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (93 samples, 0.07%)syscall (2,486 samples, 1.89%)s..[unknown] (2,424 samples, 1.85%)[..[unknown] (2,416 samples, 1.84%)[..[unknown] (2,130 samples, 1.62%)[unknown] (2,013 samples, 1.53%)[unknown] (1,951 samples, 1.49%)[unknown] (1,589 samples, 1.21%)[unknown] (1,415 samples, 1.08%)[unknown] (1,217 samples, 0.93%)[unknown] (820 samples, 0.62%)[unknown] (564 samples, 0.43%)[unknown] (360 samples, 0.27%)[unknown] (244 samples, 0.19%)[unknown] (194 samples, 0.15%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (339 samples, 0.26%)core::sync::atomic::AtomicUsize::fetch_add (337 samples, 0.26%)core::sync::atomic::atomic_add (337 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (364 samples, 0.28%)[unknown] (154 samples, 0.12%)[unknown] (152 samples, 0.12%)[unknown] (143 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (131 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (80 samples, 0.06%)[unknown] (74 samples, 0.06%)[unknown] (65 samples, 0.05%)[unknown] (64 samples, 0.05%)[unknown] (47 samples, 0.04%)[unknown] (44 samples, 0.03%)[unknown] (43 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (26 samples, 0.02%)[unknown] (20 samples, 0.02%)__GI___libc_write (158 samples, 0.12%)__GI___libc_write (158 samples, 0.12%)mio::sys::unix::waker::eventfd::WakerInternal::wake (159 samples, 0.12%)<&std::fs::File as std::io::Write>::write (159 samples, 0.12%)std::sys::pal::unix::fs::File::write (159 samples, 0.12%)std::sys::pal::unix::fd::FileDesc::write (159 samples, 0.12%)tokio::runtime::driver::Handle::unpark (168 samples, 0.13%)tokio::runtime::driver::IoHandle::unpark (168 samples, 0.13%)tokio::runtime::io::driver::Handle::unpark (168 samples, 0.13%)mio::waker::Waker::wake (165 samples, 0.13%)mio::sys::unix::waker::fdbased::Waker::wake (165 samples, 0.13%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (68,159 samples, 51.91%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (3,024 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (3,023 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (3,022 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (171 samples, 0.13%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (171 samples, 0.13%)core::option::Option<T>::or_else (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (107 samples, 0.08%)__GI___libc_free (17 samples, 0.01%)_int_free (17 samples, 0.01%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Dying,K,V>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::LeafOrInternal>::deallocate_and_ascend (18 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (18 samples, 0.01%)alloc::alloc::dealloc (18 samples, 0.01%)__rdl_dealloc (18 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (18 samples, 0.01%)alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (19 samples, 0.01%)tokio::runtime::task::Task<S>::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::RawTask::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task (26 samples, 0.02%)std::panic::catch_unwind (26 samples, 0.02%)std::panicking::try (26 samples, 0.02%)std::panicking::try::do_call (26 samples, 0.02%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (26 samples, 0.02%)core::ops::function::FnOnce::call_once (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task::{{closure}} (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (26 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::Tracker> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)core::ptr::drop_in_place<std::sync::rwlock::RwLock<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)core::mem::drop (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,NodeType>,alloc::collections::btree::node::marker::KV>::drop_key_val (24 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::assume_init_drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (24 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::entry::Torrent> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::PeerId,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::mem::drop (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::PeerId,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::pre_shutdown (33 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::close_and_shutdown_all (33 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (114 samples, 0.09%)alloc::sync::Arc<T,A>::inner (114 samples, 0.09%)core::ptr::non_null::NonNull<T>::as_ref (114 samples, 0.09%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (108 samples, 0.08%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (108 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (106 samples, 0.08%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (49 samples, 0.04%)alloc::sync::Arc<T,A>::inner (49 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (49 samples, 0.04%)core::num::<impl u32>::wrapping_sub (132 samples, 0.10%)core::sync::atomic::AtomicU64::load (40 samples, 0.03%)core::sync::atomic::atomic_load (40 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (48 samples, 0.04%)core::sync::atomic::AtomicU32::load (48 samples, 0.04%)core::sync::atomic::atomic_load (48 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (65 samples, 0.05%)alloc::sync::Arc<T,A>::inner (65 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (65 samples, 0.05%)core::num::<impl u32>::wrapping_sub (50 samples, 0.04%)core::sync::atomic::AtomicU32::load (55 samples, 0.04%)core::sync::atomic::atomic_load (55 samples, 0.04%)core::sync::atomic::AtomicU64::load (80 samples, 0.06%)core::sync::atomic::atomic_load (80 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::pack (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (666 samples, 0.51%)tokio::runtime::scheduler::multi_thread::queue::unpack (147 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (1,036 samples, 0.79%)tokio::runtime::scheduler::multi_thread::queue::unpack (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (49 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (2,414 samples, 1.84%)t..tokio::util::rand::FastRand::fastrand_n (24 samples, 0.02%)tokio::util::rand::FastRand::fastrand (24 samples, 0.02%)std::sys_common::backtrace::__rust_begin_short_backtrace (98,136 samples, 74.74%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (98,042 samples, 74.67%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (98,042 samples, 74.67%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (98,042 samples, 74.67%)std::panic::catch_unwindstd::panicking::try (98,042 samples, 74.67%)std::panicking::trystd::panicking::try::do_call (98,042 samples, 74.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,042 samples, 74.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (98,042 samples, 74.67%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (98,042 samples, 74.67%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (98,042 samples, 74.67%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (98,042 samples, 74.67%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (98,042 samples, 74.67%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (98,042 samples, 74.67%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Context::runstd::panic::catch_unwind (98,137 samples, 74.74%)std::panic::catch_unwindstd::panicking::try (98,137 samples, 74.74%)std::panicking::trystd::panicking::try::do_call (98,137 samples, 74.74%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,137 samples, 74.74%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (98,137 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (98,139 samples, 74.74%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (98,139 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (98,205 samples, 74.79%)clone3start_thread (98,205 samples, 74.79%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (98,158 samples, 74.76%)std::sys::pal::unix::thread::Thread::new::thread_startcore::ptr::drop_in_place<std::sys::pal::unix::stack_overflow::Handler> (19 samples, 0.01%)<std::sys::pal::unix::stack_overflow::Handler as core::ops::drop::Drop>::drop (19 samples, 0.01%)std::sys::pal::unix::stack_overflow::imp::drop_handler (19 samples, 0.01%)__GI_munmap (19 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)[unknown] (16 samples, 0.01%)core::fmt::Formatter::pad_integral (112 samples, 0.09%)core::fmt::Formatter::pad_integral::write_prefix (59 samples, 0.04%)core::fmt::Formatter::pad_integral (16 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (19 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (51 samples, 0.04%)rand_chacha::guts::round (18 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (26 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide (14 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (14 samples, 0.01%)std_detect::detect::check_for (14 samples, 0.01%)std_detect::detect::cache::test (14 samples, 0.01%)std_detect::detect::cache::Cache::test (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.06%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.06%)core::cell::BorrowRefMut::new (81 samples, 0.06%)std::sys::pal::unix::time::Timespec::now (164 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (106 samples, 0.08%)tokio::runtime::coop::budget (105 samples, 0.08%)tokio::runtime::coop::with_budget (105 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (96 samples, 0.07%)std::sys::pal::unix::time::Timespec::sub_timespec (35 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock_contended (15 samples, 0.01%)syscall (90 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (21 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run (61 samples, 0.05%)tokio::runtime::context::runtime::enter_runtime (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (61 samples, 0.05%)tokio::runtime::context::set_scheduler (61 samples, 0.05%)std::thread::local::LocalKey<T>::with (61 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (61 samples, 0.05%)tokio::runtime::context::set_scheduler::{{closure}} (61 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::set (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (19 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (17 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (14 samples, 0.01%)core::cell::Cell<T>::get (14 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (22 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (22 samples, 0.02%)tokio::runtime::context::set_current_task_id (22 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (112 samples, 0.09%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (111 samples, 0.08%)tokio::runtime::task::harness::poll_future (125 samples, 0.10%)std::panic::catch_unwind (125 samples, 0.10%)std::panicking::try (125 samples, 0.10%)std::panicking::try::do_call (125 samples, 0.10%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (125 samples, 0.10%)tokio::runtime::task::harness::poll_future::{{closure}} (125 samples, 0.10%)tokio::runtime::task::core::Core<T,S>::poll (125 samples, 0.10%)tokio::runtime::task::raw::poll (157 samples, 0.12%)tokio::runtime::task::harness::Harness<T,S>::poll (135 samples, 0.10%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (135 samples, 0.10%)tokio::runtime::time::Driver::park_internal (15 samples, 0.01%)torrust_tracker::bootstrap::logging::INIT (17 samples, 0.01%)__memcpy_avx512_unaligned_erms (397 samples, 0.30%)_int_free (24 samples, 0.02%)_int_malloc (132 samples, 0.10%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (570 samples, 0.43%)__GI___lll_lock_wait_private (22 samples, 0.02%)futex_wait (14 samples, 0.01%)__memcpy_avx512_unaligned_erms (299 samples, 0.23%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (361 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (41 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (23 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (53 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (14 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (63 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (21 samples, 0.02%)__GI___libc_malloc (18 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (116 samples, 0.09%)alloc::vec::Vec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (116 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (116 samples, 0.09%)alloc::alloc::Global::alloc_impl (116 samples, 0.09%)alloc::alloc::alloc (116 samples, 0.09%)__rdl_alloc (116 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (116 samples, 0.09%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (53 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (53 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (53 samples, 0.04%)_int_malloc (21 samples, 0.02%)[unknown] (36 samples, 0.03%)[unknown] (16 samples, 0.01%)core::mem::zeroed (27 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (27 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (27 samples, 0.02%)core::intrinsics::write_bytes (27 samples, 0.02%)[unknown] (27 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (64 samples, 0.05%)mio::net::udp::UdpSocket::recv_from (49 samples, 0.04%)mio::io_source::IoSource<T>::do_io (49 samples, 0.04%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (49 samples, 0.04%)mio::net::udp::UdpSocket::recv_from::{{closure}} (49 samples, 0.04%)std::net::udp::UdpSocket::recv_from (49 samples, 0.04%)std::sys_common::net::UdpSocket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from_with_flags (49 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (271 samples, 0.21%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (143 samples, 0.11%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (141 samples, 0.11%)tokio::runtime::io::registration::Registration::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (15 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (359 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (346 samples, 0.26%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (39 samples, 0.03%)tokio::task::spawn::spawn (39 samples, 0.03%)tokio::task::spawn::spawn_inner (39 samples, 0.03%)tokio::runtime::context::current::with_current (39 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (39 samples, 0.03%)tokio::runtime::context::current::with_current::{{closure}} (39 samples, 0.03%)core::option::Option<T>::map (39 samples, 0.03%)tokio::task::spawn::spawn_inner::{{closure}} (39 samples, 0.03%)tokio::runtime::scheduler::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (39 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind (34 samples, 0.03%)all (131,301 samples, 100%)tokio-runtime-w (131,061 samples, 99.82%)tokio-runtime-w \ No newline at end of file diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index b383e95ad..a9bf97009 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -7,9 +7,7 @@ use std::collections::BTreeMap; use std::time::Duration; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use info_hash::InfoHash; -use serde::Serialize; pub mod info_hash; pub mod pagination; @@ -20,48 +18,6 @@ pub mod torrent_metrics; /// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; -/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. -/// # Errors -/// -/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. -pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { - #[allow(clippy::cast_possible_truncation)] - ser.serialize_u64(unix_time_value.as_millis() as u64) -} - -#[derive(Serialize)] -pub enum AnnounceEventSer { - Started, - Stopped, - Completed, - None, -} - -/// Serializes a `Announce Event` as a enum. -/// -/// # Errors -/// -/// If will return an error if the internal serializer was to fail. -pub fn ser_announce_event(announce_event: &AnnounceEvent, ser: S) -> Result { - let event_ser = match announce_event { - AnnounceEvent::Started => AnnounceEventSer::Started, - AnnounceEvent::Stopped => AnnounceEventSer::Stopped, - AnnounceEvent::Completed => AnnounceEventSer::Completed, - AnnounceEvent::None => AnnounceEventSer::None, - }; - - ser.serialize_some(&event_ser) -} - -/// Serializes a `Announce Event` as a i64. -/// -/// # Errors -/// -/// If will return an error if the internal serializer was to fail. -pub fn ser_number_of_bytes(number_of_bytes: &NumberOfBytes, ser: S) -> Result { - ser.serialize_i64(number_of_bytes.0.get()) -} - /// IP version used by the peer to connect to the tracker: IPv4 or IPv6 #[derive(PartialEq, Eq, Debug)] pub enum IPVersion { diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 987099b70..9a02ef39b 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -3,6 +3,7 @@ //! A sample peer: //! //! ```rust,no_run +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; //! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; //! use std::net::IpAddr; @@ -11,7 +12,7 @@ //! //! //! peer::Peer { -//! peer_id: peer::Id(*b"-qB00000000000000000"), +//! peer_id: PeerId(*b"-qB00000000000000000"), //! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), //! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), //! uploaded: NumberOfBytes::new(0), @@ -22,18 +23,21 @@ //! ``` use std::net::{IpAddr, SocketAddr}; +use std::ops::{Deref, DerefMut}; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use serde::Serialize; +use zerocopy::FromBytes as _; -use crate::{ser_announce_event, ser_number_of_bytes, ser_unix_time_value, DurationSinceUnixEpoch, IPVersion}; +use crate::{DurationSinceUnixEpoch, IPVersion}; /// Peer struct used by the core `Tracker`. /// /// A sample peer: /// /// ```rust,no_run +/// use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; /// use torrust_tracker_primitives::peer; /// use std::net::SocketAddr; /// use std::net::IpAddr; @@ -42,7 +46,7 @@ use crate::{ser_announce_event, ser_number_of_bytes, ser_unix_time_value, Durati /// /// /// peer::Peer { -/// peer_id: peer::Id(*b"-qB00000000000000000"), +/// peer_id: PeerId(*b"-qB00000000000000000"), /// peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), /// updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), /// uploaded: NumberOfBytes::new(0), @@ -54,7 +58,8 @@ use crate::{ser_announce_event, ser_number_of_bytes, ser_unix_time_value, Durati #[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq, Hash)] pub struct Peer { /// ID used by the downloader peer - pub peer_id: Id, + #[serde(serialize_with = "ser_peer_id")] + pub peer_id: PeerId, /// The IP and port this peer is listening on pub peer_addr: SocketAddr, /// The last time the the tracker receive an announce request from this peer (timestamp) @@ -74,6 +79,58 @@ pub struct Peer { pub event: AnnounceEvent, } +/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] + ser.serialize_u64(unix_time_value.as_millis() as u64) +} + +#[derive(Serialize)] +pub enum AnnounceEventSer { + Started, + Stopped, + Completed, + None, +} + +/// Serializes a `Announce Event` as a enum. +/// +/// # Errors +/// +/// If will return an error if the internal serializer was to fail. +pub fn ser_announce_event(announce_event: &AnnounceEvent, ser: S) -> Result { + let event_ser = match announce_event { + AnnounceEvent::Started => AnnounceEventSer::Started, + AnnounceEvent::Stopped => AnnounceEventSer::Stopped, + AnnounceEvent::Completed => AnnounceEventSer::Completed, + AnnounceEvent::None => AnnounceEventSer::None, + }; + + ser.serialize_some(&event_ser) +} + +/// Serializes a `Announce Event` as a i64. +/// +/// # Errors +/// +/// If will return an error if the internal serializer was to fail. +pub fn ser_number_of_bytes(number_of_bytes: &NumberOfBytes, ser: S) -> Result { + ser.serialize_i64(number_of_bytes.0.get()) +} + +/// Serializes a `PeerId` as a `peer::Id`. +/// +/// # Errors +/// +/// If will return an error if the internal serializer was to fail. +pub fn ser_peer_id(peer_id: &PeerId, ser: S) -> Result { + let id = Id { data: *peer_id }; + ser.serialize_some(&id) +} + impl Ord for Peer { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.peer_id.cmp(&other.peer_id) @@ -89,7 +146,7 @@ impl PartialOrd for Peer { pub trait ReadInfo { fn is_seeder(&self) -> bool; fn get_event(&self) -> AnnounceEvent; - fn get_id(&self) -> Id; + fn get_id(&self) -> PeerId; fn get_updated(&self) -> DurationSinceUnixEpoch; fn get_address(&self) -> SocketAddr; } @@ -103,7 +160,7 @@ impl ReadInfo for Peer { self.event } - fn get_id(&self) -> Id { + fn get_id(&self) -> PeerId { self.peer_id } @@ -125,7 +182,7 @@ impl ReadInfo for Arc { self.event } - fn get_id(&self) -> Id { + fn get_id(&self) -> PeerId { self.peer_id } @@ -183,19 +240,46 @@ pub enum IdConversionError { }, } +pub struct Id { + data: PeerId, +} + +impl From for Id { + fn from(id: PeerId) -> Self { + Self { data: id } + } +} + +impl Deref for Id { + type Target = PeerId; + + fn deref(&self) -> &Self::Target { + &self.data + } +} + +impl DerefMut for Id { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.data + } +} + impl From<[u8; 20]> for Id { fn from(bytes: [u8; 20]) -> Self { - Id(bytes) + let data = PeerId(bytes); + Self { data } } } impl From for Id { fn from(number: i32) -> Self { - let peer_id = number.to_le_bytes(); - Id::from([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], - peer_id[3], - ]) + let number = number.to_le_bytes(); + let bytes = [ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, number[0], number[1], number[2], + number[3], + ]; + + Id::from(bytes) } } @@ -215,15 +299,9 @@ impl TryFrom> for Id { message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, }); } - Ok(Self::from_bytes(&bytes)) - } -} - -impl std::str::FromStr for Id { - type Err = IdConversionError; - fn from_str(s: &str) -> Result { - Self::try_from(s.as_bytes().to_vec()) + let data = PeerId::read_from(&bytes).expect("it should have the correct amount of bytes"); + Ok(Self { data }) } } @@ -236,51 +314,13 @@ impl std::fmt::Display for Id { } } -/// Peer ID. A 20-byte array. -/// -/// A string of length 20 which this downloader uses as its id. -/// Each downloader generates its own id at random at the start of a new download. -/// -/// A sample peer ID: -/// -/// ```rust,no_run -/// use torrust_tracker_primitives::peer; -/// -/// let peer_id = peer::Id(*b"-qB00000000000000000"); -/// ``` -/// -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] -pub struct Id(pub [u8; 20]); - pub const PEER_ID_BYTES_LEN: usize = 20; impl Id { - /// # Panics - /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!( - PEER_ID_BYTES_LEN, - bytes.len(), - "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", - PEER_ID_BYTES_LEN, - bytes.len(), - ); - let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - #[must_use] - pub fn to_bytes(&self) -> [u8; 20] { - self.0 - } - #[must_use] /// Converts to hex string. /// - /// For the Id `-qB00000000000000000` it returns `2d71423030303030303030303030303030303030` + /// For the `PeerId` `-qB00000000000000000` it returns `2d71423030303030303030303030303030303030` /// /// For example: /// @@ -362,7 +402,7 @@ pub mod fixture { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use super::{Id, Peer}; + use super::{Id, Peer, PeerId}; use crate::DurationSinceUnixEpoch; #[derive(PartialEq, Debug)] @@ -383,7 +423,7 @@ pub mod fixture { #[must_use] pub fn seeder() -> Self { let peer = Peer { - peer_id: Id(*b"-qB00000000000000001"), + peer_id: PeerId(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -399,7 +439,7 @@ pub mod fixture { #[must_use] pub fn leecher() -> Self { let peer = Peer { - peer_id: Id(*b"-qB00000000000000002"), + peer_id: PeerId(*b"-qB00000000000000002"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -413,7 +453,7 @@ pub mod fixture { #[allow(dead_code)] #[must_use] - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &PeerId) -> Self { self.peer.peer_id = *peer_id; self } @@ -462,7 +502,7 @@ pub mod fixture { impl Default for Peer { fn default() -> Self { Self { - peer_id: Id::default(), + peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -475,7 +515,8 @@ pub mod fixture { impl Default for Id { fn default() -> Self { - Self(*b"-qB00000000000000000") + let data = PeerId(*b"-qB00000000000000000"); + Self { data } } } } @@ -483,113 +524,50 @@ pub mod fixture { #[cfg(test)] pub mod test { mod torrent_peer_id { - use crate::peer; - - #[test] - fn should_be_instantiated_from_a_byte_slice() { - let id = peer::Id::from_bytes(&[ - 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, - ]); - - let expected_id = peer::Id([ - 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, - ]); - - assert_eq!(id, expected_id); - } - - #[test] - #[should_panic = "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` (20) and the supplied `bytes` length: 19"] - fn should_fail_trying_to_instantiate_from_a_byte_slice_with_less_than_20_bytes() { - let less_than_20_bytes = [0; 19]; - let _: peer::Id = peer::Id::from_bytes(&less_than_20_bytes); - } + use aquatic_udp_protocol::PeerId; - #[test] - #[should_panic = "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` (20) and the supplied `bytes` length: 21"] - fn should_fail_trying_to_instantiate_from_a_byte_slice_with_more_than_20_bytes() { - let more_than_20_bytes = [0; 21]; - let _: peer::Id = peer::Id::from_bytes(&more_than_20_bytes); - } - - #[test] - fn should_be_instantiated_from_a_string() { - let id = "-qB00000000000000001".parse::().unwrap(); - - let expected_id = peer::Id([ - 45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, - ]); - - assert_eq!(id, expected_id); - } - - #[test] - fn should_be_converted_from_a_20_byte_array() { - let id = peer::Id::from([ - 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, - ]); - - let expected_id = peer::Id([ - 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, - ]); - - assert_eq!(id, expected_id); - } - - #[test] - fn should_be_converted_from_a_byte_vector() { - let id = peer::Id::try_from( - [ - 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, - ] - .to_vec(), - ) - .unwrap(); - - let expected_id = peer::Id([ - 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, - ]); - - assert_eq!(id, expected_id); - } + use crate::peer; #[test] #[should_panic = "NotEnoughBytes"] fn should_fail_trying_to_convert_from_a_byte_vector_with_less_than_20_bytes() { - let _: peer::Id = peer::Id::try_from([0; 19].to_vec()).unwrap(); + let _ = peer::Id::try_from([0; 19].to_vec()).unwrap(); } #[test] #[should_panic = "TooManyBytes"] fn should_fail_trying_to_convert_from_a_byte_vector_with_more_than_20_bytes() { - let _: peer::Id = peer::Id::try_from([0; 21].to_vec()).unwrap(); + let _ = peer::Id::try_from([0; 21].to_vec()).unwrap(); } #[test] fn should_be_converted_to_hex_string() { - let id = peer::Id(*b"-qB00000000000000000"); + let id = peer::Id { + data: PeerId(*b"-qB00000000000000000"), + }; assert_eq!(id.to_hex_string().unwrap(), "0x2d71423030303030303030303030303030303030"); - let id = peer::Id([ - 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, - ]); + let id = peer::Id { + data: PeerId([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]), + }; assert_eq!(id.to_hex_string().unwrap(), "0x009f9296009f9296009f9296009f9296009f9296"); } #[test] fn should_be_converted_into_string_type_using_the_hex_string_format() { - let id = peer::Id(*b"-qB00000000000000000"); + let id = peer::Id { + data: PeerId(*b"-qB00000000000000000"), + }; assert_eq!(id.to_string(), "0x2d71423030303030303030303030303030303030"); - let id = peer::Id([ - 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, - ]); + let id = peer::Id { + data: PeerId([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]), + }; assert_eq!(id.to_string(), "0x009f9296009f9296009f9296009f9296009f9296"); } - - #[test] - fn should_return_the_inner_bytes() { - assert_eq!(peer::Id(*b"-qB00000000000000000").to_bytes(), *b"-qB00000000000000000"); - } } } diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs index f7a392bd8..e21ac7332 100644 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -1,14 +1,14 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::peer::{Id, Peer}; +use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use zerocopy::I64; pub const DEFAULT_PEER: Peer = Peer { - peer_id: Id([0; 20]), + peer_id: PeerId([0; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::from_secs(0), uploaded: NumberOfBytes(I64::ZERO), diff --git a/packages/torrent-repository/src/entry/peer_list.rs b/packages/torrent-repository/src/entry/peer_list.rs index 3f69edbb5..33270cf27 100644 --- a/packages/torrent-repository/src/entry/peer_list.rs +++ b/packages/torrent-repository/src/entry/peer_list.rs @@ -2,6 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; +use aquatic_udp_protocol::PeerId; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; // code-review: the current implementation uses the peer Id as the ``BTreeMap`` @@ -11,7 +12,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PeerList { - peers: std::collections::BTreeMap>, + peers: std::collections::BTreeMap>, } impl PeerList { @@ -29,7 +30,7 @@ impl PeerList { self.peers.insert(value.peer_id, value) } - pub fn remove(&mut self, key: &peer::Id) -> Option> { + pub fn remove(&mut self, key: &PeerId) -> Option> { self.peers.remove(key) } @@ -39,7 +40,7 @@ impl PeerList { } #[must_use] - pub fn get(&self, peer_id: &peer::Id) -> Option<&Arc> { + pub fn get(&self, peer_id: &PeerId) -> Option<&Arc> { self.peers.get(peer_id) } @@ -89,8 +90,8 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; + use aquatic_udp_protocol::PeerId; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use torrust_tracker_primitives::peer::{self}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::entry::peer_list::PeerList; @@ -193,13 +194,13 @@ mod tests { let mut peer_list = PeerList::default(); let peer1 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); peer_list.upsert(peer1.into()); let peer2 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); peer_list.upsert(peer2.into()); @@ -273,14 +274,10 @@ mod tests { fn allow_inserting_two_identical_peers_except_for_the_id() { let mut peer_list = PeerList::default(); - let peer1 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + let peer1 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); peer_list.upsert(peer1.into()); - let peer2 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .build(); + let peer2 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000002")).build(); peer_list.upsert(peer2.into()); assert_eq!(peer_list.len(), 2); diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs index a5d2814c1..e9b869395 100644 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -1,6 +1,6 @@ use std::net::SocketAddr; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -42,7 +42,7 @@ impl TorrentPeerBuilder { } #[must_use] - fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + fn with_peer_id(mut self, peer_id: PeerId) -> Self { self.peer.peer_id = peer_id; self } @@ -69,10 +69,11 @@ impl TorrentPeerBuilder { /// has not announced it has stopped #[must_use] pub fn a_completed_peer(id: i32) -> peer::Peer { + let peer_id = peer::Id::from(id); TorrentPeerBuilder::new() .with_number_of_bytes_left(0) .with_event_completed() - .with_peer_id(id.into()) + .with_peer_id(*peer_id) .into() } @@ -80,9 +81,10 @@ pub fn a_completed_peer(id: i32) -> peer::Peer { /// Leecher: left > 0 OR event = Stopped #[must_use] pub fn a_started_peer(id: i32) -> peer::Peer { + let peer_id = peer::Id::from(id); TorrentPeerBuilder::new() .with_number_of_bytes_left(1) .with_event_started() - .with_peer_id(id.into()) + .with_peer_id(*peer_id) .into() } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 223819a14..31bec313d 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -400,7 +400,7 @@ async fn it_should_limit_the_number_of_peers_returned( // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let mut peer = a_started_peer(1); - peer.peer_id = peer::Id::from(peer_number); + peer.peer_id = *peer::Id::from(peer_number); torrent.upsert_peer(&peer).await; } diff --git a/src/core/mod.rs b/src/core/mod.rs index 7c10c0aae..a7ad66052 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -60,15 +60,15 @@ //! use std::net::Ipv4Addr; //! use std::str::FromStr; //! -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! use torrust_tracker_primitives::peer; //! use torrust_tracker_primitives::info_hash::InfoHash; -//! use torrust_tracker_primitives::{DurationSinceUnixEpoch}; //! //! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); //! //! let peer = peer::Peer { -//! peer_id: peer::Id(*b"-qB00000000000000001"), +//! peer_id: PeerId(*b"-qB00000000000000001"), //! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), //! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), //! uploaded: NumberOfBytes::new(0), @@ -246,14 +246,15 @@ //! A `Peer` is the struct used by the `Tracker` to keep peers data: //! //! ```rust,no_run -//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; + +//! use aquatic_udp_protocol::PeerId; //! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! use aquatic_udp_protocol::NumberOfBytes; //! use aquatic_udp_protocol::AnnounceEvent; //! //! pub struct Peer { -//! pub peer_id: peer::Id, // The peer ID +//! pub peer_id: PeerId, // The peer ID //! pub peer_addr: SocketAddr, // Peer socket address //! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated //! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far @@ -1198,12 +1199,12 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; - use crate::core::peer::{self, Peer}; + use crate::core::peer::Peer; use crate::core::services::tracker_factory; use crate::core::{TorrentsMetrics, Tracker}; use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash; @@ -1243,7 +1244,7 @@ mod tests { /// Sample peer when for tests that need more than one peer fn sample_peer_1() -> Peer { Peer { - peer_id: peer::Id(*b"-qB00000000000000001"), + peer_id: PeerId(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -1256,7 +1257,7 @@ mod tests { /// Sample peer when for tests that need more than one peer fn sample_peer_2() -> Peer { Peer { - peer_id: peer::Id(*b"-qB00000000000000002"), + peer_id: PeerId(*b"-qB00000000000000002"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -1287,7 +1288,7 @@ mod tests { /// announcing the `AnnounceEvent::Completed` event. fn complete_peer() -> Peer { Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -1300,7 +1301,7 @@ mod tests { /// A peer that counts as `incomplete` is swarm metadata fn incomplete_peer() -> Peer { Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs index f0773faf0..b60ca3f6d 100644 --- a/src/core/peer_tests.rs +++ b/src/core/peer_tests.rs @@ -2,7 +2,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time}; use torrust_tracker_primitives::peer; @@ -14,7 +14,7 @@ fn it_should_be_serializable() { clock::Stopped::local_set_to_unix_epoch(); let torrent_peer = peer::Peer { - peer_id: peer::Id(*b"-qB0000-000000000000"), + peer_id: PeerId(*b"-qB0000-000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: CurrentClock::now(), uploaded: NumberOfBytes::new(0), diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 9cb38e3f1..3b014982d 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -105,12 +105,12 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; fn sample_peer() -> peer::Peer { peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 129318ce1..dd4a6cc26 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -1,4 +1,5 @@ //! `Peer` and Peer `Id` API resources. +use aquatic_udp_protocol::PeerId; use derive_more::From; use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::peer; @@ -35,8 +36,9 @@ pub struct Id { pub client: Option, } -impl From for Id { - fn from(peer_id: peer::Id) -> Self { +impl From for Id { + fn from(peer_id: PeerId) -> Self { + let peer_id = peer::Id::from(peer_id); Id { id: peer_id.to_hex_string(), client: peer_id.get_client_name(), diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 772a37f98..657382c0c 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -97,7 +97,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -108,7 +108,7 @@ mod tests { fn sample_peer() -> peer::Peer { peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index 90f4b9a43..c3243d597 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -15,6 +15,7 @@ //! - //! - //! - +use aquatic_udp_protocol::PeerId; use torrust_tracker_primitives::info_hash::{self, InfoHash}; use torrust_tracker_primitives::peer; @@ -49,7 +50,7 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result Result Result { +/// Will return `Err` if if the decoded bytes do not represent a valid [`PeerId`]. +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); - peer::Id::try_from(bytes) + Ok(*peer::Id::try_from(bytes)?) } #[cfg(test)] mod tests { use std::str::FromStr; + use aquatic_udp_protocol::PeerId; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::peer; use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; @@ -112,7 +114,7 @@ mod tests { let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); - assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); + assert_eq!(peer_id, PeerId(*b"-qB00000000000000000")); } #[test] diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index 6867461e0..b1d820598 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -95,9 +95,8 @@ fn extract_announce_from(maybe_raw_query: Option<&str>) -> Result) -> AnnounceEvent { #[cfg(test)] mod tests { + use aquatic_udp_protocol::PeerId; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; @@ -199,7 +199,7 @@ mod tests { fn sample_announce_request() -> Announce { Announce { info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), - peer_id: "-qB00000000000000001".parse::().unwrap(), + peer_id: PeerId(*b"-qB00000000000000001"), port: 17548, downloaded: None, uploaded: None, diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 6efee18b3..3253a07c8 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -5,7 +5,7 @@ use std::fmt; use std::panic::Location; use std::str::FromStr; -use aquatic_udp_protocol::NumberOfBytes; +use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; use torrust_tracker_primitives::info_hash::{self, InfoHash}; @@ -29,20 +29,19 @@ const COMPACT: &str = "compact"; /// query params of the request. /// /// ```rust -/// use aquatic_udp_protocol::NumberOfBytes; +/// use aquatic_udp_protocol::{NumberOfBytes, PeerId}; /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; /// use torrust_tracker_primitives::info_hash::InfoHash; -/// use torrust_tracker_primitives::peer; /// /// let request = Announce { /// // Mandatory params /// info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), -/// peer_id: "-qB00000000000000001".parse::().unwrap(), +/// peer_id: PeerId(*b"-qB00000000000000001"), /// port: 17548, /// // Optional params /// downloaded: Some(NumberOfBytes::new(1)), -/// uploaded: Some(NumberOfBytes::new(2)), -/// left: Some(NumberOfBytes::new(3)), +/// uploaded: Some(NumberOfBytes::new(1)), +/// left: Some(NumberOfBytes::new(1)), /// event: Some(Event::Started), /// compact: Some(Compact::NotAccepted) /// }; @@ -60,8 +59,8 @@ pub struct Announce { // Mandatory params /// The `InfoHash` of the torrent. pub info_hash: InfoHash, - /// The `peer::Id` of the peer. - pub peer_id: peer::Id, + /// The `PeerId` of the peer. + pub peer_id: PeerId, /// The port of the peer. pub port: u16, @@ -269,7 +268,7 @@ fn extract_info_hash(query: &Query) -> Result } } -fn extract_peer_id(query: &Query) -> Result { +fn extract_peer_id(query: &Query) -> Result { match query.get_param(PEER_ID) { Some(raw_param) => Ok( percent_decode_peer_id(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidPeerIdParam { @@ -356,9 +355,8 @@ mod tests { mod announce_request { - use aquatic_udp_protocol::NumberOfBytes; + use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::peer; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ @@ -382,7 +380,7 @@ mod tests { announce_request, Announce { info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), - peer_id: "-qB00000000000000001".parse::().unwrap(), + peer_id: PeerId(*b"-qB00000000000000001"), port: 17548, downloaded: None, uploaded: None, @@ -415,7 +413,7 @@ mod tests { announce_request, Announce { info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), - peer_id: "-qB00000000000000001".parse::().unwrap(), + peer_id: PeerId(*b"-qB00000000000000001"), port: 17548, downloaded: Some(NumberOfBytes::new(1)), uploaded: Some(NumberOfBytes::new(2)), diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 134da919e..f223a4bb0 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -178,7 +178,7 @@ impl peer::Encoding for NormalPeer {} impl From for NormalPeer { fn from(peer: peer::Peer) -> Self { NormalPeer { - peer_id: peer.peer_id.to_bytes(), + peer_id: peer.peer_id.0, ip: peer.peer_addr.ip(), port: peer.peer_addr.port(), } @@ -300,8 +300,8 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; + use aquatic_udp_protocol::PeerId; use torrust_tracker_configuration::AnnouncePolicy; - use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -324,12 +324,12 @@ mod tests { let policy = AnnouncePolicy::new(111, 222); let peer_ipv4 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 0x7070)) .build(); let peer_ipv6 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), 0x7070, diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index a85a4d4bf..6b7f8af5a 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -48,7 +48,7 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer: mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -79,7 +79,7 @@ mod tests { fn sample_peer() -> peer::Peer { peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index bd3f323b4..42fe4b518 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -61,7 +61,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -83,7 +83,7 @@ mod tests { fn sample_peer() -> peer::Peer { peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index c7204b4b9..1ef404ff0 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -318,7 +318,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; - use aquatic_udp_protocol::NumberOfBytes; + use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::peer; @@ -391,7 +391,7 @@ mod tests { } #[must_use] - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { self.peer.peer_id = peer_id; self } @@ -621,7 +621,6 @@ mod tests { PeerId as AquaticPeerId, Response, ResponsePeer, }; use mockall::predicate::eq; - use torrust_tracker_primitives::peer; use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; @@ -655,7 +654,7 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer::Id(peer_id.0)) + .with_peer_id(peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); @@ -729,7 +728,7 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let peer_using_ipv6 = TorrentPeerBuilder::new() - .with_peer_id(peer::Id(peer_id.0)) + .with_peer_id(peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -795,7 +794,6 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use torrust_tracker_primitives::peer; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; @@ -828,7 +826,7 @@ mod tests { let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer::Id(peer_id.0)) + .with_peer_id(peer_id) .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); @@ -848,7 +846,6 @@ mod tests { PeerId as AquaticPeerId, Response, ResponsePeer, }; use mockall::predicate::eq; - use torrust_tracker_primitives::peer; use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; @@ -883,7 +880,7 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer::Id(peer_id.0)) + .with_peer_id(peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -960,7 +957,7 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let peer_using_ipv4 = TorrentPeerBuilder::new() - .with_peer_id(peer::Id(peer_id.0)) + .with_peer_id(peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); @@ -1088,10 +1085,9 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{ - InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, + InfoHash, NumberOfDownloads, NumberOfPeers, PeerId, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; - use torrust_tracker_primitives::peer; use super::TorrentPeerBuilder; use crate::core::{self}; @@ -1134,10 +1130,10 @@ mod tests { } async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { - let peer_id = peer::Id([255u8; 20]); + let peer_id = PeerId([255u8; 20]); let peer = TorrentPeerBuilder::new() - .with_peer_id(peer::Id(peer_id.0)) + .with_peer_id(peer_id) .with_peer_address(*remote_addr) .with_number_of_bytes_left(0) .into(); diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 1824b2826..a42ddfaa5 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -15,7 +15,7 @@ use crate::CurrentClock; #[must_use] pub fn from_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_ip: &IpAddr) -> peer::Peer { peer::Peer { - peer_id: peer::Id(announce_request.peer_id.0), + peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port.0.into()), updated: CurrentClock::now(), uploaded: announce_request.bytes_uploaded, diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs index b872e76e9..3c6b14222 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs @@ -2,9 +2,9 @@ use std::fmt; use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; +use aquatic_udp_protocol::PeerId; use serde_repr::Serialize_repr; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::peer; use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; @@ -99,7 +99,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: peer::Id(*b"-qB00000000000000001").0, + peer_id: PeerId(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -117,7 +117,7 @@ impl QueryBuilder { } #[must_use] - pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &PeerId) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs index 15ec446cb..7f2d3611c 100644 --- a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs @@ -2,6 +2,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::peer; +use zerocopy::AsBytes as _; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -25,7 +26,7 @@ pub struct DictionaryPeer { impl From for DictionaryPeer { fn from(peer: peer::Peer) -> Self { DictionaryPeer { - peer_id: peer.peer_id.to_bytes().to_vec(), + peer_id: peer.peer_id.as_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), port: peer.peer_addr.port(), } diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index 061990621..bcbb36852 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -2,9 +2,9 @@ use std::fmt; use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; +use aquatic_udp_protocol::PeerId; use serde_repr::Serialize_repr; use torrust_tracker_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::peer; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; @@ -93,7 +93,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: peer::Id(*b"-qB00000000000000001").0, + peer_id: PeerId(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -109,7 +109,7 @@ impl QueryBuilder { self } - pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &PeerId) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs index 2b49b4405..554e5ab40 100644 --- a/tests/servers/http/responses/announce.rs +++ b/tests/servers/http/responses/announce.rs @@ -2,6 +2,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{Deserialize, Serialize}; use torrust_tracker_primitives::peer; +use zerocopy::AsBytes as _; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -25,7 +26,7 @@ pub struct DictionaryPeer { impl From for DictionaryPeer { fn from(peer: peer::Peer) -> Self { DictionaryPeer { - peer_id: peer.peer_id.to_bytes().to_vec(), + peer_id: peer.peer_id.as_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), port: peer.peer_addr.port(), } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 14c237984..edc06fb07 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -86,11 +86,11 @@ mod for_all_config_modes { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6}; use std::str::FromStr; + use aquatic_udp_protocol::PeerId; use local_ip_address::local_ip; use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; @@ -410,9 +410,7 @@ mod for_all_config_modes { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 env.add_torrent_peer(&info_hash, &previously_announced_peer); @@ -422,7 +420,7 @@ mod for_all_config_modes { .announce( &QueryBuilder::default() .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) .query(), ) .await; @@ -453,14 +451,14 @@ mod for_all_config_modes { // Announce a peer using IPV4 let peer_using_ipv4 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); env.add_torrent_peer(&info_hash, &peer_using_ipv4); // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), 8080, @@ -473,7 +471,7 @@ mod for_all_config_modes { .announce( &QueryBuilder::default() .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000003")) + .with_peer_id(&PeerId(*b"-qB00000000000000003")) .query(), ) .await; @@ -531,9 +529,7 @@ mod for_all_config_modes { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 env.add_torrent_peer(&info_hash, &previously_announced_peer); @@ -543,7 +539,7 @@ mod for_all_config_modes { .announce( &QueryBuilder::default() .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_compact(Compact::Accepted) .query(), ) @@ -572,9 +568,7 @@ mod for_all_config_modes { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 env.add_torrent_peer(&info_hash, &previously_announced_peer); @@ -586,7 +580,7 @@ mod for_all_config_modes { .announce( &QueryBuilder::default() .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) .without_compact() .query(), ) @@ -886,9 +880,9 @@ mod for_all_config_modes { use std::net::{IpAddr, Ipv6Addr, SocketAddrV6}; use std::str::FromStr; + use aquatic_udp_protocol::PeerId; use tokio::net::TcpListener; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; @@ -939,7 +933,7 @@ mod for_all_config_modes { env.add_torrent_peer( &info_hash, &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), ); @@ -977,7 +971,7 @@ mod for_all_config_modes { env.add_torrent_peer( &info_hash, &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_no_bytes_pending_to_download() .build(), ); @@ -1158,8 +1152,8 @@ mod configured_as_whitelisted { mod receiving_an_scrape_request { use std::str::FromStr; + use aquatic_udp_protocol::PeerId; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; @@ -1177,7 +1171,7 @@ mod configured_as_whitelisted { env.add_torrent_peer( &info_hash, &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), ); @@ -1206,7 +1200,7 @@ mod configured_as_whitelisted { env.add_torrent_peer( &info_hash, &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), ); @@ -1324,9 +1318,9 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; + use aquatic_udp_protocol::PeerId; use torrust_tracker::core::auth::Key; use torrust_tracker_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; @@ -1359,7 +1353,7 @@ mod configured_as_private { env.add_torrent_peer( &info_hash, &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), ); @@ -1388,7 +1382,7 @@ mod configured_as_private { env.add_torrent_peer( &info_hash, &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), ); @@ -1431,7 +1425,7 @@ mod configured_as_private { env.add_torrent_peer( &info_hash, &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), ); From bef5680a02d43aacf11684ab3b3245e6cace7fb6 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 15 Jul 2024 10:59:33 +0200 Subject: [PATCH 0981/1003] dev: vairous fixups --- packages/primitives/src/info_hash.rs | 2 +- packages/primitives/src/lib.rs | 9 ----- packages/primitives/src/peer.rs | 34 +++++++------------ .../tests/common/torrent_peer_builder.rs | 4 +-- .../torrent-repository/tests/entry/mod.rs | 2 +- 5 files changed, 16 insertions(+), 35 deletions(-) diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs index 57dfd90e5..61b40a746 100644 --- a/packages/primitives/src/info_hash.rs +++ b/packages/primitives/src/info_hash.rs @@ -73,7 +73,7 @@ impl Ord for InfoHash { } } -impl std::cmp::PartialOrd for InfoHash { +impl PartialOrd for InfoHash { fn partial_cmp(&self, other: &InfoHash) -> Option { Some(self.cmp(other)) } diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index a9bf97009..08fc58976 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -18,13 +18,4 @@ pub mod torrent_metrics; /// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; -/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 -#[derive(PartialEq, Eq, Debug)] -pub enum IPVersion { - /// - IPv4, - /// - IPv6, -} - pub type PersistentTorrents = BTreeMap; diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 9a02ef39b..c8ff1791d 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -30,7 +30,7 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use serde::Serialize; use zerocopy::FromBytes as _; -use crate::{DurationSinceUnixEpoch, IPVersion}; +use crate::DurationSinceUnixEpoch; /// Peer struct used by the core `Tracker`. /// @@ -208,15 +208,6 @@ impl Peer { pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } - - /// The IP version used by the peer: IPV4 or IPV6 - #[must_use] - pub fn ip_version(&self) -> IPVersion { - if self.peer_addr.is_ipv4() { - return IPVersion::IPv4; - } - IPVersion::IPv6 - } } use std::panic::Location; @@ -264,22 +255,21 @@ impl DerefMut for Id { } } -impl From<[u8; 20]> for Id { - fn from(bytes: [u8; 20]) -> Self { - let data = PeerId(bytes); - Self { data } - } -} - -impl From for Id { - fn from(number: i32) -> Self { +impl Id { + #[must_use] + pub fn new(number: T) -> Self + where + T: Into, + { + let number: i128 = number.into(); let number = number.to_le_bytes(); let bytes = [ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, number[0], number[1], number[2], - number[3], + 0u8, 0u8, 0u8, 0u8, number[0], number[1], number[2], number[3], number[4], number[5], number[6], number[7], + number[8], number[9], number[10], number[11], number[12], number[13], number[14], number[15], ]; - Id::from(bytes) + let data = PeerId(bytes); + Id { data } } } diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs index e9b869395..33120180d 100644 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -69,7 +69,7 @@ impl TorrentPeerBuilder { /// has not announced it has stopped #[must_use] pub fn a_completed_peer(id: i32) -> peer::Peer { - let peer_id = peer::Id::from(id); + let peer_id = peer::Id::new(id); TorrentPeerBuilder::new() .with_number_of_bytes_left(0) .with_event_completed() @@ -81,7 +81,7 @@ pub fn a_completed_peer(id: i32) -> peer::Peer { /// Leecher: left > 0 OR event = Stopped #[must_use] pub fn a_started_peer(id: i32) -> peer::Peer { - let peer_id = peer::Id::from(id); + let peer_id = peer::Id::new(id); TorrentPeerBuilder::new() .with_number_of_bytes_left(1) .with_event_started() diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 31bec313d..43d7f94da 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -400,7 +400,7 @@ async fn it_should_limit_the_number_of_peers_returned( // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let mut peer = a_started_peer(1); - peer.peer_id = *peer::Id::from(peer_number); + peer.peer_id = *peer::Id::new(peer_number); torrent.upsert_peer(&peer).await; } From dcb7770acb5867c704c66a38432507bf643b2541 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 24 Aug 2024 17:58:11 +0200 Subject: [PATCH 0982/1003] chore: update contrib bencode --- Cargo.lock | 12 +- contrib/bencode/Cargo.toml | 6 +- contrib/bencode/src/access/convert.rs | 78 ++++------ contrib/bencode/src/access/dict.rs | 6 +- contrib/bencode/src/access/list.rs | 8 -- contrib/bencode/src/error.rs | 143 ++++++------------- contrib/bencode/src/lib.rs | 6 +- contrib/bencode/src/mutable/encode.rs | 2 + contrib/bencode/src/reference/bencode_ref.rs | 7 +- contrib/bencode/src/reference/decode.rs | 81 ++++------- contrib/bencode/{test => tests}/mod.rs | 0 11 files changed, 117 insertions(+), 232 deletions(-) rename contrib/bencode/{test => tests}/mod.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 504a5bb17..057e1f5db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1274,16 +1274,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "backtrace", - "version_check", -] - [[package]] name = "event-listener" version = "2.5.3" @@ -4021,7 +4011,7 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-beta-develop" dependencies = [ "criterion", - "error-chain", + "thiserror", ] [[package]] diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index f7bab0585..e25a9b64f 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -16,15 +16,11 @@ rust-version.workspace = true version.workspace = true [dependencies] -error-chain = "0" +thiserror = "1" [dev-dependencies] criterion = "0" -[[test]] -name = "test" -path = "test/mod.rs" - [[bench]] harness = false name = "bencode_benchmark" diff --git a/contrib/bencode/src/access/convert.rs b/contrib/bencode/src/access/convert.rs index 42b04f267..b2eb41d15 100644 --- a/contrib/bencode/src/access/convert.rs +++ b/contrib/bencode/src/access/convert.rs @@ -2,7 +2,7 @@ use crate::access::bencode::{BRefAccess, BRefAccessExt}; use crate::access::dict::BDictAccess; use crate::access::list::BListAccess; -use crate::{BencodeConvertError, BencodeConvertErrorKind}; +use crate::BencodeConvertError; /// Trait for extended casting of bencode objects and converting conversion errors into application specific errors. pub trait BConvertExt: BConvert { @@ -12,12 +12,10 @@ pub trait BConvertExt: BConvert { B: BRefAccessExt<'a>, E: AsRef<[u8]>, { - bencode.bytes_ext().ok_or( - self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { - key: error_key.as_ref().to_owned(), - expected_type: "Bytes".to_owned(), - })), - ) + bencode.bytes_ext().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Bytes".to_owned(), + })) } /// See `BConvert::convert_str`. @@ -26,12 +24,10 @@ pub trait BConvertExt: BConvert { B: BRefAccessExt<'a>, E: AsRef<[u8]>, { - bencode.str_ext().ok_or( - self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { - key: error_key.as_ref().to_owned(), - expected_type: "UTF-8 Bytes".to_owned(), - })), - ) + bencode.str_ext().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "UTF-8 Bytes".to_owned(), + })) } /// See `BConvert::lookup_and_convert_bytes`. @@ -77,12 +73,10 @@ pub trait BConvert { B: BRefAccess, E: AsRef<[u8]>, { - bencode.int().ok_or( - self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { - key: error_key.as_ref().to_owned(), - expected_type: "Integer".to_owned(), - })), - ) + bencode.int().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Integer".to_owned(), + })) } /// Attempt to convert the given bencode value into bytes. @@ -93,12 +87,10 @@ pub trait BConvert { B: BRefAccess, E: AsRef<[u8]>, { - bencode.bytes().ok_or( - self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { - key: error_key.as_ref().to_owned(), - expected_type: "Bytes".to_owned(), - })), - ) + bencode.bytes().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Bytes".to_owned(), + })) } /// Attempt to convert the given bencode value into a UTF-8 string. @@ -109,12 +101,10 @@ pub trait BConvert { B: BRefAccess, E: AsRef<[u8]>, { - bencode.str().ok_or( - self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { - key: error_key.as_ref().to_owned(), - expected_type: "UTF-8 Bytes".to_owned(), - })), - ) + bencode.str().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "UTF-8 Bytes".to_owned(), + })) } /// Attempt to convert the given bencode value into a list. @@ -125,12 +115,10 @@ pub trait BConvert { B: BRefAccess, E: AsRef<[u8]>, { - bencode.list().ok_or( - self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { - key: error_key.as_ref().to_owned(), - expected_type: "List".to_owned(), - })), - ) + bencode.list().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "List".to_owned(), + })) } /// Attempt to convert the given bencode value into a dictionary. @@ -141,12 +129,10 @@ pub trait BConvert { B: BRefAccess, E: AsRef<[u8]>, { - bencode.dict().ok_or( - self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::WrongType { - key: error_key.as_ref().to_owned(), - expected_type: "Dictionary".to_owned(), - })), - ) + bencode.dict().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Dictionary".to_owned(), + })) } /// Look up a value in a dictionary of bencoded values using the given key. @@ -159,11 +145,7 @@ pub trait BConvert { match dictionary.lookup(key_ref) { Some(n) => Ok(n), - None => Err( - self.handle_error(BencodeConvertError::from_kind(BencodeConvertErrorKind::MissingKey { - key: key_ref.to_owned(), - })), - ), + None => Err(self.handle_error(BencodeConvertError::MissingKey { key: key_ref.to_owned() })), } } diff --git a/contrib/bencode/src/access/dict.rs b/contrib/bencode/src/access/dict.rs index 7efe93fc3..a3e56d1bb 100644 --- a/contrib/bencode/src/access/dict.rs +++ b/contrib/bencode/src/access/dict.rs @@ -21,8 +21,7 @@ pub trait BDictAccess { impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { fn to_list(&self) -> Vec<(&&'a [u8], &V)> { - #[allow(clippy::map_identity)] - self.iter().map(|(k, v)| (k, v)).collect() + self.iter().collect() } fn lookup(&self, key: &[u8]) -> Option<&V> { @@ -44,8 +43,7 @@ impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { impl<'a, V> BDictAccess, V> for BTreeMap, V> { fn to_list(&self) -> Vec<(&Cow<'a, [u8]>, &V)> { - #[allow(clippy::map_identity)] - self.iter().map(|(k, v)| (k, v)).collect() + self.iter().collect() } fn lookup(&self, key: &[u8]) -> Option<&V> { diff --git a/contrib/bencode/src/access/list.rs b/contrib/bencode/src/access/list.rs index c6d1fc407..840bffa1e 100644 --- a/contrib/bencode/src/access/list.rs +++ b/contrib/bencode/src/access/list.rs @@ -45,14 +45,6 @@ impl<'a, V: 'a> IndexMut for &'a mut dyn BListAccess { } } -impl<'a, V: 'a> dyn BListAccess { - pub fn iter(&'a self) -> impl Iterator { - self.into_iter() - } -} - -#[allow(unknown_lints)] -#[allow(clippy::into_iter_without_iter)] impl<'a, V: 'a> IntoIterator for &'a dyn BListAccess { type Item = &'a V; type IntoIter = BListIter<'a, V>; diff --git a/contrib/bencode/src/error.rs b/contrib/bencode/src/error.rs index 54c589e3e..6e661a068 100644 --- a/contrib/bencode/src/error.rs +++ b/contrib/bencode/src/error.rs @@ -1,103 +1,52 @@ -#![allow(unknown_lints)] -#![allow(clippy::iter_without_into_iter)] -use error_chain::error_chain; +use thiserror::Error; -error_chain! { - types { - BencodeParseError, BencodeParseErrorKind, BencodeParseResultExt, BencodeParseResult; - } +#[allow(clippy::module_name_repetitions)] +#[derive(Error, Debug)] +pub enum BencodeParseError { + #[error("Incomplete Number Of Bytes At {pos}")] + BytesEmpty { pos: usize }, - errors { - BytesEmpty { - pos: usize - } { - description("Incomplete Number Of Bytes") - display("Incomplete Number Of Bytes At {:?}", pos) - } - InvalidByte { - pos: usize - } { - description("Invalid Byte Found") - display("Invalid Byte Found At {:?}", pos) - } - InvalidIntNoDelimiter { - pos: usize - } { - description("Invalid Integer Found With No Delimiter") - display("Invalid Integer Found With No Delimiter At {:?}", pos) - } - InvalidIntNegativeZero { - pos: usize - } { - description("Invalid Integer Found As Negative Zero") - display("Invalid Integer Found As Negative Zero At {:?}", pos) - } - InvalidIntZeroPadding { - pos: usize - } { - description("Invalid Integer Found With Zero Padding") - display("Invalid Integer Found With Zero Padding At {:?}", pos) - } - InvalidIntParseError { - pos: usize - } { - description("Invalid Integer Found To Fail Parsing") - display("Invalid Integer Found To Fail Parsing At {:?}", pos) - } - InvalidKeyOrdering { - pos: usize, - key: Vec - } { - description("Invalid Dictionary Key Ordering Found") - display("Invalid Dictionary Key Ordering Found At {:?} For Key {:?}", pos, key) - } - InvalidKeyDuplicates { - pos: usize, - key: Vec - } { - description("Invalid Dictionary Duplicate Keys Found") - display("Invalid Dictionary Key Found At {:?} For Key {:?}", pos, key) - } - InvalidLengthNegative { - pos: usize - } { - description("Invalid Byte Length Found As Negative") - display("Invalid Byte Length Found As Negative At {:?}", pos) - } - InvalidLengthOverflow { - pos: usize - } { - description("Invalid Byte Length Found To Overflow Buffer Length") - display("Invalid Byte Length Found To Overflow Buffer Length At {:?}", pos) - } - InvalidRecursionExceeded { - pos: usize, - max: usize - } { - description("Invalid Recursion Limit Exceeded") - display("Invalid Recursion Limit Exceeded At {:?} For Limit {:?}", pos, max) - } - } + #[error("Invalid Byte Found At {pos}")] + InvalidByte { pos: usize }, + + #[error("Invalid Integer Found With No Delimiter At {pos}")] + InvalidIntNoDelimiter { pos: usize }, + + #[error("Invalid Integer Found As Negative Zero At {pos}")] + InvalidIntNegativeZero { pos: usize }, + + #[error("Invalid Integer Found With Zero Padding At {pos}")] + InvalidIntZeroPadding { pos: usize }, + + #[error("Invalid Integer Found To Fail Parsing At {pos}")] + InvalidIntParseError { pos: usize }, + + #[error("Invalid Dictionary Key Ordering Found At {pos} For Key {key:?}")] + InvalidKeyOrdering { pos: usize, key: Vec }, + + #[error("Invalid Dictionary Key Found At {pos} For Key {key:?}")] + InvalidKeyDuplicates { pos: usize, key: Vec }, + + #[error("Invalid Byte Length Found As Negative At {pos}")] + InvalidLengthNegative { pos: usize }, + + #[error("Invalid Byte Length Found To Overflow Buffer Length At {pos}")] + InvalidLengthOverflow { pos: usize }, + + #[error("Invalid Recursion Limit Exceeded At {pos} For Limit {max}")] + InvalidRecursionExceeded { pos: usize, max: usize }, } -error_chain! { - types { - BencodeConvertError, BencodeConvertErrorKind, BencodeConvertResultExt, BencodeConvertResult; - } +pub type BencodeParseResult = Result; - errors { - MissingKey { - key: Vec - } { - description("Missing Key In Bencode") - display("Missing Key In Bencode For {:?}", key) - } - WrongType { - key: Vec, - expected_type: String - } { - description("Wrong Type In Bencode") - display("Wrong Type In Bencode For {:?} Expected Type {}", key, expected_type) - } - } +#[allow(clippy::module_name_repetitions)] +#[derive(Error, Debug)] +pub enum BencodeConvertError { + #[error("Missing Key In Bencode For {key:?}")] + MissingKey { key: Vec }, + + #[error("Wrong Type In Bencode For {key:?} Expected Type {expected_type}")] + WrongType { key: Vec, expected_type: String }, } + +pub type BencodeConvertResult = Result; diff --git a/contrib/bencode/src/lib.rs b/contrib/bencode/src/lib.rs index 78e113b66..09aaa6867 100644 --- a/contrib/bencode/src/lib.rs +++ b/contrib/bencode/src/lib.rs @@ -7,7 +7,6 @@ //! ```rust //! extern crate bencode; //! -//! use std::default::Default; //! use bencode::{BencodeRef, BRefAccess, BDecodeOpt}; //! //! fn main() { @@ -63,10 +62,7 @@ pub use crate::access::bencode::{BMutAccess, BRefAccess, MutKind, RefKind}; pub use crate::access::convert::BConvert; pub use crate::access::dict::BDictAccess; pub use crate::access::list::BListAccess; -pub use crate::error::{ - BencodeConvertError, BencodeConvertErrorKind, BencodeConvertResult, BencodeParseError, BencodeParseErrorKind, - BencodeParseResult, -}; +pub use crate::error::{BencodeConvertError, BencodeConvertResult, BencodeParseError, BencodeParseResult}; pub use crate::mutable::bencode_mut::BencodeMut; pub use crate::reference::bencode_ref::BencodeRef; pub use crate::reference::decode_opt::BDecodeOpt; diff --git a/contrib/bencode/src/mutable/encode.rs b/contrib/bencode/src/mutable/encode.rs index 25c91b41d..811c35816 100644 --- a/contrib/bencode/src/mutable/encode.rs +++ b/contrib/bencode/src/mutable/encode.rs @@ -1,3 +1,5 @@ +use std::iter::Extend; + use crate::access::bencode::{BRefAccess, RefKind}; use crate::access::dict::BDictAccess; use crate::access::list::BListAccess; diff --git a/contrib/bencode/src/reference/bencode_ref.rs b/contrib/bencode/src/reference/bencode_ref.rs index a6f2c15bc..73aaad039 100644 --- a/contrib/bencode/src/reference/bencode_ref.rs +++ b/contrib/bencode/src/reference/bencode_ref.rs @@ -4,7 +4,7 @@ use std::str; use crate::access::bencode::{BRefAccess, BRefAccessExt, RefKind}; use crate::access::dict::BDictAccess; use crate::access::list::BListAccess; -use crate::error::{BencodeParseError, BencodeParseErrorKind, BencodeParseResult}; +use crate::error::{BencodeParseError, BencodeParseResult}; use crate::reference::decode; use crate::reference::decode_opt::BDecodeOpt; @@ -41,9 +41,7 @@ impl<'a> BencodeRef<'a> { let (bencode, end_pos) = decode::decode(bytes, 0, opts, 0)?; if end_pos != bytes.len() && opts.enforce_full_decode() { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::BytesEmpty { - pos: end_pos, - })); + return Err(BencodeParseError::BytesEmpty { pos: end_pos }); } Ok(bencode) @@ -125,6 +123,7 @@ impl<'a> BRefAccessExt<'a> for BencodeRef<'a> { #[cfg(test)] mod tests { + use crate::access::bencode::BRefAccess; use crate::reference::bencode_ref::BencodeRef; use crate::reference::decode_opt::BDecodeOpt; diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs index d35d1b597..97c5cf1ff 100644 --- a/contrib/bencode/src/reference/decode.rs +++ b/contrib/bencode/src/reference/decode.rs @@ -1,16 +1,14 @@ use std::collections::btree_map::Entry; use std::collections::BTreeMap; -use std::str::{self}; +use std::str; -use crate::error::{BencodeParseError, BencodeParseErrorKind, BencodeParseResult}; +use crate::error::{BencodeParseError, BencodeParseResult}; use crate::reference::bencode_ref::{BencodeRef, Inner}; use crate::reference::decode_opt::BDecodeOpt; pub fn decode(bytes: &[u8], pos: usize, opts: BDecodeOpt, depth: usize) -> BencodeParseResult<(BencodeRef<'_>, usize)> { if depth >= opts.max_recursion() { - return Err(BencodeParseError::from_kind( - BencodeParseErrorKind::InvalidRecursionExceeded { pos, max: depth }, - )); + return Err(BencodeParseError::InvalidRecursionExceeded { pos, max: depth }); } let curr_byte = peek_byte(bytes, pos)?; @@ -32,7 +30,7 @@ pub fn decode(bytes: &[u8], pos: usize, opts: BDecodeOpt, depth: usize) -> Benco // Include the length digit, don't increment position Ok((Inner::Bytes(bencode, &bytes[pos..next_pos]).into(), next_pos)) } - _ => Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidByte { pos })), + _ => Err(BencodeParseError::InvalidByte { pos }), } } @@ -40,32 +38,24 @@ fn decode_int(bytes: &[u8], pos: usize, delim: u8) -> BencodeParseResult<(i64, u let (_, begin_decode) = bytes.split_at(pos); let Some(relative_end_pos) = begin_decode.iter().position(|n| *n == delim) else { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntNoDelimiter { - pos, - })); + return Err(BencodeParseError::InvalidIntNoDelimiter { pos }); }; let int_byte_slice = &begin_decode[..relative_end_pos]; if int_byte_slice.len() > 1 { // Negative zero is not allowed (this would not be caught when converting) if int_byte_slice[0] == b'-' && int_byte_slice[1] == b'0' { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntNegativeZero { - pos, - })); + return Err(BencodeParseError::InvalidIntNegativeZero { pos }); } // Zero padding is illegal, and unspecified for key lengths (we disallow both) if int_byte_slice[0] == b'0' { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntZeroPadding { - pos, - })); + return Err(BencodeParseError::InvalidIntZeroPadding { pos }); } } let Ok(int_str) = str::from_utf8(int_byte_slice) else { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntParseError { - pos, - })); + return Err(BencodeParseError::InvalidIntParseError { pos }); }; // Position of end of integer type, next byte is the start of the next value @@ -73,31 +63,24 @@ fn decode_int(bytes: &[u8], pos: usize, delim: u8) -> BencodeParseResult<(i64, u let next_pos = absolute_end_pos + 1; match int_str.parse::() { Ok(n) => Ok((n, next_pos)), - Err(_) => Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidIntParseError { - pos, - })), + Err(_) => Err(BencodeParseError::InvalidIntParseError { pos }), } } +use std::convert::TryFrom; + fn decode_bytes(bytes: &[u8], pos: usize) -> BencodeParseResult<(&[u8], usize)> { let (num_bytes, start_pos) = decode_int(bytes, pos, crate::BYTE_LEN_END)?; if num_bytes < 0 { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidLengthNegative { - pos, - })); + return Err(BencodeParseError::InvalidLengthNegative { pos }); } - // Should be safe to cast to usize (TODO: Check if cast would overflow to provide - // a more helpful error message, otherwise, parsing will probably fail with an - // unrelated message). - let num_bytes = - usize::try_from(num_bytes).map_err(|_| BencodeParseErrorKind::Msg(format!("input length is too long: {num_bytes}")))?; + // Use usize::try_from to handle potential overflow + let num_bytes = usize::try_from(num_bytes).map_err(|_| BencodeParseError::InvalidLengthOverflow { pos })?; if num_bytes > bytes[start_pos..].len() { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidLengthOverflow { - pos, - })); + return Err(BencodeParseError::InvalidLengthOverflow { pos }); } let next_pos = start_pos + num_bytes; @@ -140,10 +123,10 @@ fn decode_dict( // Spec says that the keys must be in alphabetical order match (bencode_dict.keys().last(), opts.check_key_sort()) { (Some(last_key), true) if key_bytes < *last_key => { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidKeyOrdering { + return Err(BencodeParseError::InvalidKeyOrdering { pos: curr_pos, key: key_bytes.to_vec(), - })) + }) } _ => (), }; @@ -153,10 +136,10 @@ fn decode_dict( match bencode_dict.entry(key_bytes) { Entry::Vacant(n) => n.insert(value), Entry::Occupied(_) => { - return Err(BencodeParseError::from_kind(BencodeParseErrorKind::InvalidKeyDuplicates { + return Err(BencodeParseError::InvalidKeyDuplicates { pos: curr_pos, key: key_bytes.to_vec(), - })) + }) } }; @@ -169,14 +152,12 @@ fn decode_dict( } fn peek_byte(bytes: &[u8], pos: usize) -> BencodeParseResult { - bytes - .get(pos) - .copied() - .ok_or_else(|| BencodeParseError::from_kind(BencodeParseErrorKind::BytesEmpty { pos })) + bytes.get(pos).copied().ok_or(BencodeParseError::BytesEmpty { pos }) } #[cfg(test)] mod tests { + use crate::access::bencode::BRefAccess; use crate::reference::bencode_ref::BencodeRef; use crate::reference::decode_opt::BDecodeOpt; @@ -327,13 +308,13 @@ mod tests { } #[test] - #[should_panic = "BencodeParseError(InvalidByte { pos: 0 }"] + #[should_panic = "InvalidByte { pos: 0 }"] fn negative_decode_bytes_neg_len() { BencodeRef::decode(BYTES_NEG_LEN, BDecodeOpt::default()).unwrap(); } #[test] - #[should_panic = "BencodeParseError(BytesEmpty { pos: 20 }"] + #[should_panic = "BytesEmpty { pos: 20 }"] fn negative_decode_bytes_extra() { BencodeRef::decode(BYTES_EXTRA, BDecodeOpt::default()).unwrap(); } @@ -346,49 +327,49 @@ mod tests { } #[test] - #[should_panic = "BencodeParseError(InvalidIntParseError { pos: 1 }"] + #[should_panic = "InvalidIntParseError { pos: 1 }"] fn negative_decode_int_nan() { super::decode_int(INT_NAN, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic = "BencodeParseError(InvalidIntZeroPadding { pos: 1 }"] + #[should_panic = "InvalidIntZeroPadding { pos: 1 }"] fn negative_decode_int_leading_zero() { super::decode_int(INT_LEADING_ZERO, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic = "BencodeParseError(InvalidIntZeroPadding { pos: 1 }"] + #[should_panic = "InvalidIntZeroPadding { pos: 1 }"] fn negative_decode_int_double_zero() { super::decode_int(INT_DOUBLE_ZERO, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic = "BencodeParseError(InvalidIntNegativeZero { pos: 1 }"] + #[should_panic = "InvalidIntNegativeZero { pos: 1 }"] fn negative_decode_int_negative_zero() { super::decode_int(INT_NEGATIVE_ZERO, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic = " BencodeParseError(InvalidIntParseError { pos: 1 }"] + #[should_panic = " InvalidIntParseError { pos: 1 }"] fn negative_decode_int_double_negative() { super::decode_int(INT_DOUBLE_NEGATIVE, 1, crate::BEN_END).unwrap(); } #[test] - #[should_panic = "BencodeParseError(InvalidKeyOrdering { pos: 15, key: [97, 95, 107, 101, 121] }"] + #[should_panic = "InvalidKeyOrdering { pos: 15, key: [97, 95, 107, 101, 121] }"] fn negative_decode_dict_unordered_keys() { BencodeRef::decode(DICT_UNORDERED_KEYS, BDecodeOpt::new(5, true, true)).unwrap(); } #[test] - #[should_panic = "BencodeParseError(InvalidKeyDuplicates { pos: 18, key: [97, 95, 107, 101, 121] }"] + #[should_panic = "InvalidKeyDuplicates { pos: 18, key: [97, 95, 107, 101, 121] }"] fn negative_decode_dict_dup_keys_same_data() { BencodeRef::decode(DICT_DUP_KEYS_SAME_DATA, BDecodeOpt::default()).unwrap(); } #[test] - #[should_panic = "BencodeParseError(InvalidKeyDuplicates { pos: 18, key: [97, 95, 107, 101, 121] }"] + #[should_panic = "InvalidKeyDuplicates { pos: 18, key: [97, 95, 107, 101, 121] }"] fn negative_decode_dict_dup_keys_diff_data() { BencodeRef::decode(DICT_DUP_KEYS_DIFF_DATA, BDecodeOpt::default()).unwrap(); } diff --git a/contrib/bencode/test/mod.rs b/contrib/bencode/tests/mod.rs similarity index 100% rename from contrib/bencode/test/mod.rs rename to contrib/bencode/tests/mod.rs From c5a724e9da1e670edb775a4581ac3da92c8ebb44 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 24 Aug 2024 18:19:10 +0200 Subject: [PATCH 0983/1003] chore: update deps ``` sh cargo update Updating crates.io index Locking 12 packages to latest compatible versions Updating bindgen v0.70.0 -> v0.70.1 Updating cc v1.1.13 -> v1.1.14 Updating derive_utils v0.14.1 -> v0.14.2 Updating fastrand v2.1.0 -> v2.1.1 Updating flate2 v1.0.32 -> v1.0.33 Updating libz-sys v1.1.19 -> v1.1.20 Updating quote v1.0.36 -> v1.0.37 Updating serde v1.0.208 -> v1.0.209 Updating serde_derive v1.0.208 -> v1.0.209 Updating serde_json v1.0.125 -> v1.0.127 Updating syn v2.0.75 -> v2.0.76 Updating system-configuration v0.6.0 -> v0.6.1 ``` --- Cargo.lock | 128 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 65 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 057e1f5db..740f185bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -247,7 +247,7 @@ checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-lite 2.3.0", "slab", ] @@ -368,7 +368,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -518,7 +518,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -610,15 +610,15 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.75", + "syn 2.0.76", "which", ] [[package]] name = "bindgen" -version = "0.70.0" +version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0127a1da21afb5adaae26910922c3f7afd3d329ba1a1b98a0884cab4907a251" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ "bitflags 2.6.0", "cexpr", @@ -629,7 +629,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -698,7 +698,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "syn_derive", ] @@ -810,9 +810,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.13" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ "jobserver", "libc", @@ -922,7 +922,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1149,7 +1149,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1160,7 +1160,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1197,18 +1197,18 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] name = "derive_utils" -version = "0.14.1" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" +checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1324,9 +1324,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "figment" @@ -1346,9 +1346,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c0596c1eac1f9e04ed902702e9878208b336edc9d6fddc8a48387349bab3666" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", "libz-sys", @@ -1426,7 +1426,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1438,7 +1438,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1450,7 +1450,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -1534,7 +1534,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-core", "futures-io", "parking", @@ -1549,7 +1549,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -2081,9 +2081,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.19" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc53a7799a7496ebc9fd29f31f7df80e83c9bda5299768af5f9e59eeea74647" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "pkg-config", @@ -2225,7 +2225,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -2275,7 +2275,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "termcolor", "thiserror", ] @@ -2288,7 +2288,7 @@ checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" dependencies = [ "base64 0.21.7", "bigdecimal", - "bindgen 0.70.0", + "bindgen 0.70.1", "bitflags 2.6.0", "bitvec", "btoi", @@ -2474,7 +2474,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -2556,7 +2556,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -2630,7 +2630,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -2652,7 +2652,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-io", ] @@ -2764,12 +2764,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "a909e6e8053fa1a5ad670f5816c7d93029ee1fa8898718490544a6b0d5d38b3e" dependencies = [ "proc-macro2", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -2822,7 +2822,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "version_check", "yansi", ] @@ -2860,9 +2860,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -3130,7 +3130,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.75", + "syn 2.0.76", "unicode-ident", ] @@ -3342,9 +3342,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] @@ -3370,13 +3370,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -3394,9 +3394,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.125" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "indexmap 2.4.0", "itoa", @@ -3423,7 +3423,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -3474,7 +3474,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -3617,9 +3617,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.75" +version = "2.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" +checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" dependencies = [ "proc-macro2", "quote", @@ -3635,7 +3635,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -3655,9 +3655,9 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bc6ee10a9b4fcf576e9b0819d95ec16f4d2c02d39fd83ac1c8789785c4a42" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -3704,7 +3704,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", - "fastrand 2.1.0", + "fastrand 2.1.1", "once_cell", "rustix 0.38.34", "windows-sys 0.59.0", @@ -3742,7 +3742,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -3836,7 +3836,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -4156,7 +4156,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -4371,7 +4371,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "wasm-bindgen-shared", ] @@ -4405,7 +4405,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4707,7 +4707,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] @@ -4727,7 +4727,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.76", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 43453cb5a..e0774e237 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,7 +66,7 @@ serde_bencode = "0" serde_bytes = "0" serde_json = { version = "1", features = ["preserve_order"] } serde_repr = "0" -serde_with = { version = "3.9.0", features = ["json"] } +serde_with = { version = "3", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-beta-develop", path = "packages/clock" } From 2c4bdab2c92ed30193024366b9915513213cf105 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 24 Aug 2024 18:31:25 +0200 Subject: [PATCH 0984/1003] chore: update to v1 of derive more --- Cargo.lock | 28 ++++++++++++++++++---------- Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/configuration/src/lib.rs | 2 +- packages/primitives/Cargo.toml | 2 +- 5 files changed, 22 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 740f185bc..0e3401278 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -968,12 +968,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "core-foundation" version = "0.9.4" @@ -1189,15 +1183,23 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.18" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version", "syn 2.0.76", + "unicode-xid", ] [[package]] @@ -4260,6 +4262,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-xid" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" + [[package]] name = "untrusted" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index e0774e237..1a875a192 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,7 @@ chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0" dashmap = "6" -derive_more = "0" +derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } figment = "0" futures = "0" futures-util = "0" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 0a4cfea23..4f217e1b6 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -16,7 +16,7 @@ version.workspace = true [dependencies] camino = { version = "1", features = ["serde", "serde1"] } -derive_more = "0" +derive_more = { version = "1", features = ["constructor", "display"] } figment = { version = "0", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index bdbe419ca..1ab3479fa 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -51,7 +51,7 @@ pub const LATEST_VERSION: &str = "2.0.0"; /// Info about the configuration specification. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] -#[display(fmt = "Metadata(app: {app}, purpose: {purpose}, schema_version: {schema_version})")] +#[display("Metadata(app: {app}, purpose: {purpose}, schema_version: {schema_version})")] pub struct Metadata { /// The application this configuration is valid for. #[serde(default = "Metadata::default_app")] diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 05981b3a8..02a53e3b7 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -17,7 +17,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" binascii = "0" -derive_more = "0" +derive_more = { version = "1", features = ["constructor"] } serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" From e4aa1db63b1006e5042f2dcb1d439173f444f395 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 24 Aug 2024 15:33:23 +0200 Subject: [PATCH 0985/1003] tracing: qualify use of tracing macros --- packages/located-error/src/lib.rs | 4 +--- src/app.rs | 11 ++++----- src/bootstrap/app.rs | 3 +-- src/bootstrap/jobs/health_check_api.rs | 7 +++--- src/bootstrap/jobs/mod.rs | 5 ++-- src/bootstrap/jobs/torrent_cleanup.rs | 7 +++--- src/bootstrap/jobs/udp_tracker.rs | 7 +++--- src/bootstrap/logging.rs | 3 +-- src/console/ci/e2e/docker.rs | 8 +++---- src/console/ci/e2e/runner.rs | 13 +++++------ src/console/ci/e2e/tracker_checker.rs | 6 ++--- src/console/ci/e2e/tracker_container.rs | 23 +++++++++---------- src/console/clients/checker/app.rs | 3 +-- src/console/clients/udp/app.rs | 9 ++++---- src/console/clients/udp/checker.rs | 7 +++--- src/console/profiling.rs | 5 ++-- src/core/auth.rs | 5 ++-- src/core/databases/mysql.rs | 3 +-- src/core/mod.rs | 5 ++-- src/core/statistics.rs | 3 +-- src/main.rs | 5 ++-- src/servers/apis/server.rs | 11 ++++----- .../apis/v1/context/torrent/handlers.rs | 3 +-- src/servers/health_check_api/server.rs | 4 ++-- src/servers/http/server.rs | 5 ++-- src/servers/http/v1/handlers/announce.rs | 5 ++-- src/servers/http/v1/handlers/scrape.rs | 5 ++-- src/servers/registar.rs | 3 +-- src/servers/signals.rs | 11 ++++----- src/servers/udp/handlers.rs | 19 ++++++++------- tests/servers/health_check_api/environment.rs | 9 ++++---- 31 files changed, 92 insertions(+), 125 deletions(-) diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs index bfd4d4a86..3cba6042d 100644 --- a/packages/located-error/src/lib.rs +++ b/packages/located-error/src/lib.rs @@ -33,8 +33,6 @@ use std::error::Error; use std::panic::Location; use std::sync::Arc; -use tracing::debug; - pub type DynError = Arc; /// A generic wrapper around an error. @@ -94,7 +92,7 @@ where source: Arc::new(self.0), location: Box::new(*std::panic::Location::caller()), }; - debug!("{e}"); + tracing::debug!("{e}"); e } } diff --git a/src/app.rs b/src/app.rs index b2447a9ef..f96ac399f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -25,7 +25,6 @@ use std::sync::Arc; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; -use tracing::{info, warn}; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::servers::registar::Registar; @@ -42,7 +41,7 @@ pub async fn start(config: &Configuration, tracker: Arc) -> Vec> = Vec::new(); @@ -69,7 +68,7 @@ pub async fn start(config: &Configuration, tracker: Arc) -> Vec) -> Vec) -> Vec) -> Vec (Configuration, Arc) { let tracker = initialize_with_configuration(&configuration); - info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); + tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); (configuration, tracker) } diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index b4d4862ee..d306b2be9 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -17,7 +17,6 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HealthCheckApi; -use tracing::info; use super::Started; use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; @@ -45,18 +44,18 @@ pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> Jo // Run the API server let join_handle = tokio::spawn(async move { - info!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting on: {protocol}://{}", bind_addr); + tracing::info!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting on: {protocol}://{}", bind_addr); let handle = server::start(bind_addr, tx_start, rx_halt, register); if let Ok(()) = handle.await { - info!(target: HEALTH_CHECK_API_LOG_TARGET, "Stopped server running on: {protocol}://{}", bind_addr); + tracing::info!(target: HEALTH_CHECK_API_LOG_TARGET, "Stopped server running on: {protocol}://{}", bind_addr); } }); // Wait until the server sends the started message match rx_start.await { - Ok(msg) => info!(target: HEALTH_CHECK_API_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", msg.address), + Ok(msg) => tracing::info!(target: HEALTH_CHECK_API_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", msg.address), Err(e) => panic!("the Health Check API server was dropped: {e}"), } diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 79a4347ef..6534270fa 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -32,8 +32,8 @@ pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option) -> JoinHandle<()> loop { tokio::select! { _ = tokio::signal::ctrl_c() => { - info!("Stopping torrent cleanup job.."); + tracing::info!("Stopping torrent cleanup job.."); break; } _ = interval.tick() => { if let Some(tracker) = weak_tracker.upgrade() { let start_time = Utc::now().time(); - info!("Cleaning up torrents.."); + tracing::info!("Cleaning up torrents.."); tracker.cleanup_torrents(); - info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); + tracing::info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; } diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 647461bfc..407cfbbfa 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -10,7 +10,6 @@ use std::sync::Arc; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; -use tracing::debug; use crate::core; use crate::servers::registar::ServiceRegistrationForm; @@ -37,8 +36,8 @@ pub async fn start_job(config: &UdpTracker, tracker: Arc, form: S .expect("it should be able to start the udp tracker"); tokio::spawn(async move { - debug!(target: UDP_TRACKER_LOG_TARGET, "Wait for launcher (UDP service) to finish ..."); - debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, "Wait for launcher (UDP service) to finish ..."); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); assert!( !server.state.halt_task.is_closed(), @@ -51,6 +50,6 @@ pub async fn start_job(config: &UdpTracker, tracker: Arc, form: S .await .expect("it should be able to join to the udp tracker task"); - debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); }) } diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 496b3ea45..34809c1ca 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -14,7 +14,6 @@ use std::sync::Once; use torrust_tracker_configuration::{Configuration, Threshold}; -use tracing::info; use tracing::level_filters::LevelFilter; static INIT: Once = Once::new(); @@ -54,7 +53,7 @@ fn tracing_stdout_init(filter: LevelFilter, style: &TraceStyle) { TraceStyle::Json => builder.json().init(), }; - info!("Logging initialized"); + tracing::info!("Logging initialized"); } #[derive(Debug)] diff --git a/src/console/ci/e2e/docker.rs b/src/console/ci/e2e/docker.rs index 32a0c3e56..ce2b1aa99 100644 --- a/src/console/ci/e2e/docker.rs +++ b/src/console/ci/e2e/docker.rs @@ -4,8 +4,6 @@ use std::process::{Command, Output}; use std::thread::sleep; use std::time::{Duration, Instant}; -use tracing::{debug, info}; - /// Docker command wrapper. pub struct Docker {} @@ -20,7 +18,7 @@ impl Drop for RunningContainer { /// Ensures that the temporary container is stopped when the struct goes out /// of scope. fn drop(&mut self) { - info!("Dropping running container: {}", self.name); + tracing::info!("Dropping running container: {}", self.name); if Docker::is_container_running(&self.name) { let _unused = Docker::stop(self); } @@ -89,7 +87,7 @@ impl Docker { let args = [initial_args, env_var_args, port_args, [image.to_string()].to_vec()].concat(); - debug!("Docker run args: {:?}", args); + tracing::debug!("Docker run args: {:?}", args); let output = Command::new("docker").args(args).output()?; @@ -176,7 +174,7 @@ impl Docker { let output_str = String::from_utf8_lossy(&output.stdout); - info!("Waiting until container is healthy: {:?}", output_str); + tracing::info!("Waiting until container is healthy: {:?}", output_str); if output_str.contains("(healthy)") { return true; diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index f2285938b..118ecda42 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -21,7 +21,6 @@ use std::path::PathBuf; use anyhow::Context; use clap::Parser; -use tracing::info; use tracing::level_filters::LevelFilter; use super::tracker_container::TrackerContainer; @@ -68,7 +67,7 @@ pub fn run() -> anyhow::Result<()> { let tracker_config = load_tracker_configuration(&args)?; - info!("tracker config:\n{tracker_config}"); + tracing::info!("tracker config:\n{tracker_config}"); let mut tracker_container = TrackerContainer::new(CONTAINER_IMAGE, CONTAINER_NAME_PREFIX); @@ -91,7 +90,7 @@ pub fn run() -> anyhow::Result<()> { let running_services = tracker_container.running_services(); - info!( + tracing::info!( "Running services:\n {}", serde_json::to_string_pretty(&running_services).expect("running services to be serializable to JSON") ); @@ -110,27 +109,27 @@ pub fn run() -> anyhow::Result<()> { tracker_container.remove(); - info!("Tracker container final state:\n{:#?}", tracker_container); + tracing::info!("Tracker container final state:\n{:#?}", tracker_container); Ok(()) } fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).init(); - info!("Logging initialized"); + tracing::info!("Logging initialized"); } fn load_tracker_configuration(args: &Args) -> anyhow::Result { match (args.config_toml_path.clone(), args.config_toml.clone()) { (Some(config_path), _) => { - info!( + tracing::info!( "Reading tracker configuration from file: {} ...", config_path.to_string_lossy() ); load_config_from_file(&config_path) } (_, Some(config_content)) => { - info!("Reading tracker configuration from env var ..."); + tracing::info!("Reading tracker configuration from env var ..."); Ok(config_content) } _ => Err(anyhow::anyhow!("No configuration provided")), diff --git a/src/console/ci/e2e/tracker_checker.rs b/src/console/ci/e2e/tracker_checker.rs index b2fd7df2e..192795e61 100644 --- a/src/console/ci/e2e/tracker_checker.rs +++ b/src/console/ci/e2e/tracker_checker.rs @@ -1,16 +1,14 @@ use std::io; use std::process::Command; -use tracing::info; - /// Runs the Tracker Checker. /// /// # Errors /// /// Will return an error if the Tracker Checker fails. pub fn run(config_content: &str) -> io::Result<()> { - info!("Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run --bin tracker_checker"); - info!("Tracker Checker config:\n{config_content}"); + tracing::info!("Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run --bin tracker_checker"); + tracing::info!("Tracker Checker config:\n{config_content}"); let status = Command::new("cargo") .env("TORRUST_CHECKER_CONFIG", config_content) diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs index 528fd3c62..0d15035a8 100644 --- a/src/console/ci/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -2,7 +2,6 @@ use std::time::Duration; use rand::distributions::Alphanumeric; use rand::Rng; -use tracing::{error, info}; use super::docker::{RunOptions, RunningContainer}; use super::logs_parser::RunningServices; @@ -19,7 +18,7 @@ impl Drop for TrackerContainer { /// Ensures that the temporary container is removed when the /// struct goes out of scope. fn drop(&mut self) { - info!("Dropping tracker container: {}", self.name); + tracing::info!("Dropping tracker container: {}", self.name); if Docker::container_exist(&self.name) { let _unused = Docker::remove(&self.name); } @@ -40,7 +39,7 @@ impl TrackerContainer { /// /// Will panic if it can't build the docker image. pub fn build_image(&self) { - info!("Building tracker container image with tag: {} ...", self.image); + tracing::info!("Building tracker container image with tag: {} ...", self.image); Docker::build("./Containerfile", &self.image).expect("A tracker local docker image should be built"); } @@ -48,17 +47,17 @@ impl TrackerContainer { /// /// Will panic if it can't run the container. pub fn run(&mut self, options: &RunOptions) { - info!("Running docker tracker image: {} ...", self.name); + tracing::info!("Running docker tracker image: {} ...", self.name); let container = Docker::run(&self.image, &self.name, options).expect("A tracker local docker image should be running"); - info!("Waiting for the container {} to be healthy ...", self.name); + tracing::info!("Waiting for the container {} to be healthy ...", self.name); let is_healthy = Docker::wait_until_is_healthy(&self.name, Duration::from_secs(10)); assert!(is_healthy, "Unhealthy tracker container: {}", &self.name); - info!("Container {} is healthy ...", &self.name); + tracing::info!("Container {} is healthy ...", &self.name); self.running = Some(container); @@ -72,7 +71,7 @@ impl TrackerContainer { pub fn running_services(&self) -> RunningServices { let logs = Docker::logs(&self.name).expect("Logs should be captured from running container"); - info!("Parsing running services from logs. Logs :\n{logs}"); + tracing::info!("Parsing running services from logs. Logs :\n{logs}"); RunningServices::parse_from_logs(&logs) } @@ -83,7 +82,7 @@ impl TrackerContainer { pub fn stop(&mut self) { match &self.running { Some(container) => { - info!("Stopping docker tracker container: {} ...", self.name); + tracing::info!("Stopping docker tracker container: {} ...", self.name); Docker::stop(container).expect("Container should be stopped"); @@ -91,9 +90,9 @@ impl TrackerContainer { } None => { if Docker::is_container_running(&self.name) { - error!("Tracker container {} was started manually", self.name); + tracing::error!("Tracker container {} was started manually", self.name); } else { - info!("Docker tracker container is not running: {} ...", self.name); + tracing::info!("Docker tracker container is not running: {} ...", self.name); } } } @@ -106,9 +105,9 @@ impl TrackerContainer { /// Will panic if it can't remove the container. pub fn remove(&self) { if let Some(_running_container) = &self.running { - error!("Can't remove running container: {} ...", self.name); + tracing::error!("Can't remove running container: {} ...", self.name); } else { - info!("Removing docker tracker container: {} ...", self.name); + tracing::info!("Removing docker tracker container: {} ...", self.name); Docker::remove(&self.name).expect("Container should be removed"); } } diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs index 3bafc2661..395f65df9 100644 --- a/src/console/clients/checker/app.rs +++ b/src/console/clients/checker/app.rs @@ -61,7 +61,6 @@ use std::sync::Arc; use anyhow::{Context, Result}; use clap::Parser; -use tracing::debug; use tracing::level_filters::LevelFilter; use super::config::Configuration; @@ -103,7 +102,7 @@ pub async fn run() -> Result> { fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).init(); - debug!("Logging initialized"); + tracing::debug!("Logging initialized"); } fn setup_config(args: Args) -> Result { diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index af6f10611..c2ba647b8 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -64,7 +64,6 @@ use aquatic_udp_protocol::{Response, TransactionId}; use clap::{Parser, Subcommand}; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; -use tracing::debug; use tracing::level_filters::LevelFilter; use url::Url; @@ -129,7 +128,7 @@ pub async fn run() -> anyhow::Result<()> { fn tracing_stdout_init(filter: LevelFilter) { tracing_subscriber::fmt().with_max_level(filter).init(); - debug!("Logging initialized"); + tracing::debug!("Logging initialized"); } async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { @@ -153,11 +152,11 @@ async fn handle_scrape(remote_addr: SocketAddr, info_hashes: &[TorrustInfoHash]) } fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { - debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); + tracing::debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); // Check if the address is a valid URL. If so, extract the host and port. let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { - debug!("Tracker socket address URL: {url:?}"); + tracing::debug!("Tracker socket address URL: {url:?}"); let host = url .host_str() @@ -192,7 +191,7 @@ fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result = resolved_addr.to_socket_addrs()?.collect(); diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index 49f0ac41f..437af33e0 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -8,7 +8,6 @@ use aquatic_udp_protocol::{ PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, }; use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; -use tracing::debug; use super::Error; use crate::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; @@ -57,7 +56,7 @@ impl Client { /// /// Will panic if it receives an unexpected response. pub async fn send_connection_request(&self, transaction_id: TransactionId) -> Result { - debug!("Sending connection request with transaction id: {transaction_id:#?}"); + tracing::debug!("Sending connection request with transaction id: {transaction_id:#?}"); let connect_request = ConnectRequest { transaction_id }; @@ -95,7 +94,7 @@ impl Client { connection_id: ConnectionId, info_hash: TorrustInfoHash, ) -> Result { - debug!("Sending announce request with transaction id: {transaction_id:#?}"); + tracing::debug!("Sending announce request with transaction id: {transaction_id:#?}"); let port = NonZeroU16::new( self.client @@ -150,7 +149,7 @@ impl Client { transaction_id: TransactionId, info_hashes: &[TorrustInfoHash], ) -> Result { - debug!("Sending scrape request with transaction id: {transaction_id:#?}"); + tracing::debug!("Sending scrape request with transaction id: {transaction_id:#?}"); let scrape_request = ScrapeRequest { connection_id, diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 3e2925d9c..5fb507197 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -160,7 +160,6 @@ use std::env; use std::time::Duration; use tokio::time::sleep; -use tracing::info; use crate::{app, bootstrap}; @@ -189,10 +188,10 @@ pub async fn run() { tokio::select! { () = run_duration => { - info!("Torrust timed shutdown.."); + tracing::info!("Torrust timed shutdown.."); }, _ = tokio::signal::ctrl_c() => { - info!("Torrust shutting down via Ctrl+C ..."); + tracing::info!("Torrust shutting down via Ctrl+C ..."); // Await for all jobs to shutdown futures::future::join_all(jobs).await; } diff --git a/src/core/auth.rs b/src/core/auth.rs index 61ccbdb52..0243fceb4 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -50,7 +50,6 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_located_error::{DynError, LocatedError}; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use tracing::debug; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; use crate::CurrentClock; @@ -81,14 +80,14 @@ pub fn generate_key(lifetime: Option) -> PeerKey { .collect(); if let Some(lifetime) = lifetime { - debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); + tracing::debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); PeerKey { key: random_id.parse::().unwrap(), valid_until: Some(CurrentClock::now_add(&lifetime).unwrap()), } } else { - debug!("Generated key: {}, permanent", random_id); + tracing::debug!("Generated key: {}, permanent", random_id); PeerKey { key: random_id.parse::().unwrap(), diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index 3a06c4982..28a5f363b 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -8,7 +8,6 @@ use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::PersistentTorrents; -use tracing::debug; use super::driver::Driver; use super::{Database, Error}; @@ -158,7 +157,7 @@ impl Database for Mysql { let info_hash_str = info_hash.to_string(); - debug!("{}", info_hash_str); + tracing::debug!("{}", info_hash_str); Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) } diff --git a/src/core/mod.rs b/src/core/mod.rs index a7ad66052..cbdd7bcbc 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -469,7 +469,6 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; -use tracing::debug; use self::auth::Key; use self::error::Error; @@ -656,9 +655,9 @@ impl Tracker { // we are actually handling authentication at the handlers level. So I would extract that // responsibility into another authentication service. - debug!("Before: {peer:?}"); + tracing::debug!("Before: {peer:?}"); peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - debug!("After: {peer:?}"); + tracing::debug!("After: {peer:?}"); let stats = self.upsert_peer_and_get_stats(info_hash, peer); diff --git a/src/core/statistics.rs b/src/core/statistics.rs index bcafda17f..c9681d23c 100644 --- a/src/core/statistics.rs +++ b/src/core/statistics.rs @@ -25,7 +25,6 @@ use futures::FutureExt; use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; -use tracing::debug; const CHANNEL_BUFFER_SIZE: usize = 65_535; @@ -182,7 +181,7 @@ async fn event_handler(event: Event, stats_repository: &Repo) { } } - debug!("stats: {:?}", stats_repository.get_stats().await); + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); } /// A trait to allow sending statistics events diff --git a/src/main.rs b/src/main.rs index ab2af65e2..e0b7bc4ab 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,4 @@ use torrust_tracker::{app, bootstrap}; -use tracing::info; #[tokio::main] async fn main() { @@ -10,11 +9,11 @@ async fn main() { // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { - info!("Torrust shutting down ..."); + tracing::info!("Torrust shutting down ..."); // Await for all jobs to shutdown futures::future::join_all(jobs).await; - info!("Torrust successfully shutdown."); + tracing::info!("Torrust successfully shutdown."); } } } diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 40c4d0779..9008d7ce6 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -32,7 +32,6 @@ use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; use torrust_tracker_configuration::AccessTokens; -use tracing::{debug, error, info}; use super::routes::router; use crate::bootstrap::jobs::Started; @@ -123,11 +122,11 @@ impl ApiServer { let launcher = self.state.launcher; let task = tokio::spawn(async move { - debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); + tracing::debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); let _task = launcher.start(tracker, access_tokens, tx_start, rx_halt).await; - debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); + tracing::debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); launcher }); @@ -143,7 +142,7 @@ impl ApiServer { } Err(err) => { let msg = format!("Unable to start API server: {err}"); - error!("{}", msg); + tracing::error!("{}", msg); panic!("{}", msg); } }; @@ -233,7 +232,7 @@ impl Launcher { let tls = self.tls.clone(); let protocol = if tls.is_some() { "https" } else { "http" }; - info!(target: API_LOG_TARGET, "Starting on {protocol}://{}", address); + tracing::info!(target: API_LOG_TARGET, "Starting on {protocol}://{}", address); let running = Box::pin(async { match tls { @@ -254,7 +253,7 @@ impl Launcher { } }); - info!(target: API_LOG_TARGET, "{STARTED_ON} {protocol}://{}", address); + tracing::info!(target: API_LOG_TARGET, "{STARTED_ON} {protocol}://{}", address); tx_start .send(Started { address }) diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index b2418c689..ebca504fd 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -11,7 +11,6 @@ use serde::{de, Deserialize, Deserializer}; use thiserror::Error; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; -use tracing::debug; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; @@ -77,7 +76,7 @@ pub struct QueryParams { /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#list-torrents) /// for more information about this endpoint. pub async fn get_torrents_handler(State(tracker): State>, pagination: Query) -> Response { - debug!("pagination: {:?}", pagination); + tracing::debug!("pagination: {:?}", pagination); if pagination.0.info_hashes.is_empty() { torrent_list_response( diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index 89fbafe45..8a9b97306 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -18,7 +18,7 @@ use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; -use tracing::{debug, Level, Span}; +use tracing::{Level, Span}; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; @@ -81,7 +81,7 @@ pub fn start( let handle = Handle::new(); - debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting service with graceful shutdown in a spawned task ..."); + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting service with graceful shutdown in a spawned task ..."); tokio::task::spawn(graceful_shutdown( handle.clone(), diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 4a6dccc6a..75888f6a4 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -7,7 +7,6 @@ use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; -use tracing::info; use super::v1::routes::router; use crate::bootstrap::jobs::Started; @@ -57,7 +56,7 @@ impl Launcher { let tls = self.tls.clone(); let protocol = if tls.is_some() { "https" } else { "http" }; - info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); let app = router(tracker, address); @@ -80,7 +79,7 @@ impl Launcher { } }); - info!(target: HTTP_TRACKER_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); tx_start .send(Started { address }) diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 0c1d2fdac..ee70b7841 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -14,7 +14,6 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::peer; -use tracing::debug; use crate::core::auth::Key; use crate::core::{AnnounceData, Tracker}; @@ -36,7 +35,7 @@ pub async fn handle_without_key( ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { - debug!("http announce request: {:#?}", announce_request); + tracing::debug!("http announce request: {:#?}", announce_request); handle(&tracker, &announce_request, &client_ip_sources, None).await } @@ -50,7 +49,7 @@ pub async fn handle_with_key( ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { - debug!("http announce request: {:#?}", announce_request); + tracing::debug!("http announce request: {:#?}", announce_request); handle(&tracker, &announce_request, &client_ip_sources, Some(key)).await } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index eb8875a58..ca4c85207 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -9,7 +9,6 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use tracing::debug; use crate::core::auth::Key; use crate::core::{ScrapeData, Tracker}; @@ -28,7 +27,7 @@ pub async fn handle_without_key( ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { - debug!("http scrape request: {:#?}", &scrape_request); + tracing::debug!("http scrape request: {:#?}", &scrape_request); handle(&tracker, &scrape_request, &client_ip_sources, None).await } @@ -44,7 +43,7 @@ pub async fn handle_with_key( ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { - debug!("http scrape request: {:#?}", &scrape_request); + tracing::debug!("http scrape request: {:#?}", &scrape_request); handle(&tracker, &scrape_request, &client_ip_sources, Some(key)).await } diff --git a/src/servers/registar.rs b/src/servers/registar.rs index 6058595ba..6b67188dc 100644 --- a/src/servers/registar.rs +++ b/src/servers/registar.rs @@ -7,7 +7,6 @@ use std::sync::Arc; use derive_more::Constructor; use tokio::sync::Mutex; use tokio::task::JoinHandle; -use tracing::debug; /// A [`ServiceHeathCheckResult`] is returned by a completed health check. pub type ServiceHeathCheckResult = Result; @@ -82,7 +81,7 @@ impl Registar { /// Inserts a listing into the registry. async fn insert(&self, rx: tokio::sync::oneshot::Receiver) { - debug!("Waiting for the started service to send registration data ..."); + tracing::debug!("Waiting for the started service to send registration data ..."); let service_registration = rx .await diff --git a/src/servers/signals.rs b/src/servers/signals.rs index 0a1a06312..367becff8 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -3,7 +3,6 @@ use std::time::Duration; use derive_more::Display; use tokio::time::sleep; -use tracing::info; /// This is the message that the "launcher" spawned task receives from the main /// application process to notify the service to shutdown. @@ -54,8 +53,8 @@ pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { }; tokio::select! { - signal = halt => { info!("Halt signal processed: {}", signal) }, - () = global_shutdown_signal() => { info!("Global shutdown signal processed") } + signal = halt => { tracing::info!("Halt signal processed: {}", signal) }, + () = global_shutdown_signal() => { tracing::info!("Global shutdown signal processed") } } } @@ -63,13 +62,13 @@ pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { pub async fn shutdown_signal_with_message(rx_halt: tokio::sync::oneshot::Receiver, message: String) { shutdown_signal(rx_halt).await; - info!("{message}"); + tracing::info!("{message}"); } pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String) { shutdown_signal_with_message(rx_halt, message).await; - info!("Sending graceful shutdown signal"); + tracing::info!("Sending graceful shutdown signal"); handle.graceful_shutdown(Some(Duration::from_secs(90))); println!("!! shuting down in 90 seconds !!"); @@ -77,6 +76,6 @@ pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync loop { sleep(Duration::from_secs(1)).await; - info!("remaining alive connections: {}", handle.connection_count()); + tracing::info!("remaining alive connections: {}", handle.connection_count()); } } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 1ef404ff0..34f786219 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -12,7 +12,6 @@ use aquatic_udp_protocol::{ }; use torrust_tracker_located_error::DynError; use torrust_tracker_primitives::info_hash::InfoHash; -use tracing::debug; use uuid::Uuid; use zerocopy::network_endian::I32; @@ -33,7 +32,7 @@ use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; /// /// It will return an `Error` response if the request is invalid. pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr) -> Response { - debug!("Handling Packets: {udp_request:?}"); + tracing::debug!("Handling Packets: {udp_request:?}"); let start_time = Instant::now(); @@ -88,7 +87,7 @@ pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, lo /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { - debug!("Handling Request: {request:?} to: {remote_addr:?}"); + tracing::debug!("Handling Request: {request:?} to: {remote_addr:?}"); match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, @@ -104,7 +103,7 @@ pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: /// /// This function does not ever return an error. pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { - debug!("udp connect request: {:#?}", request); + tracing::debug!("udp connect request: {:#?}", request); let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); @@ -114,7 +113,7 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t connection_id, }; - debug!("udp connect response: {:#?}", response); + tracing::debug!("udp connect response: {:#?}", response); // send stats event match remote_addr { @@ -140,7 +139,7 @@ pub async fn handle_announce( announce_request: &AnnounceRequest, tracker: &Tracker, ) -> Result { - debug!("udp announce request: {:#?}", announce_request); + tracing::debug!("udp announce request: {:#?}", announce_request); // Authentication if tracker.requires_authentication() { @@ -197,7 +196,7 @@ pub async fn handle_announce( .collect(), }; - debug!("udp announce response: {:#?}", announce_response); + tracing::debug!("udp announce response: {:#?}", announce_response); Ok(Response::from(announce_response)) } else { @@ -224,7 +223,7 @@ pub async fn handle_announce( .collect(), }; - debug!("udp announce response: {:#?}", announce_response); + tracing::debug!("udp announce response: {:#?}", announce_response); Ok(Response::from(announce_response)) } @@ -237,7 +236,7 @@ pub async fn handle_announce( /// /// This function does not ever return an error. pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { - debug!("udp scrape request: {:#?}", request); + tracing::debug!("udp scrape request: {:#?}", request); // Convert from aquatic infohashes let mut info_hashes: Vec = vec![]; @@ -283,7 +282,7 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra torrent_stats, }; - debug!("udp scrape response: {:#?}", response); + tracing::debug!("udp scrape response: {:#?}", response); Ok(Response::from(response)) } diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index cf0566d67..b101a54e7 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -8,7 +8,6 @@ use torrust_tracker::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TA use torrust_tracker::servers::registar::Registar; use torrust_tracker::servers::signals::{self, Halted}; use torrust_tracker_configuration::HealthCheckApi; -use tracing::debug; #[derive(Debug)] pub enum Error { @@ -49,21 +48,21 @@ impl Environment { let register = self.registar.entries(); - debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Spawning task to launch the service ..."); + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Spawning task to launch the service ..."); let server = tokio::spawn(async move { - debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting the server in a spawned task ..."); + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting the server in a spawned task ..."); server::start(self.state.bind_to, tx_start, rx_halt, register) .await .expect("it should start the health check service"); - debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Server started. Sending the binding {} ...", self.state.bind_to); + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Server started. Sending the binding {} ...", self.state.bind_to); self.state.bind_to }); - debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Waiting for spawning task to send the binding ..."); + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Waiting for spawning task to send the binding ..."); let binding = rx_start.await.expect("it should send service binding").address; From 3dc75d5873ee9ddd995a7aff7defab3a7e298fc6 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sat, 24 Aug 2024 17:41:34 +0200 Subject: [PATCH 0986/1003] tracing: init tracing-subscriber in intragration tests --- packages/test-helpers/src/configuration.rs | 2 +- tests/common/clock.rs | 6 + tests/common/logging.rs | 30 +++ tests/common/mod.rs | 1 + .../servers/api/v1/contract/authentication.rs | 22 ++ .../servers/api/v1/contract/configuration.rs | 8 + .../api/v1/contract/context/auth_key.rs | 72 ++++++ .../api/v1/contract/context/health_check.rs | 6 + .../servers/api/v1/contract/context/stats.rs | 10 + .../api/v1/contract/context/torrent.rs | 50 ++++ .../api/v1/contract/context/whitelist.rs | 50 ++++ tests/servers/health_check_api/contract.rs | 36 +++ tests/servers/http/v1/contract.rs | 226 +++++++++++++++++- tests/servers/udp/contract.rs | 28 +++ tests/servers/udp/environment.rs | 6 + 15 files changed, 551 insertions(+), 2 deletions(-) create mode 100644 tests/common/logging.rs diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 0c4029b69..dbd8eef9e 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -28,7 +28,7 @@ pub fn ephemeral() -> Configuration { let mut config = Configuration::default(); - config.logging.threshold = Threshold::Off; // Change to `debug` for tests debugging + config.logging.threshold = Threshold::Off; // It should always be off here, the tests manage their own logging. // Ephemeral socket address for API let api_port = 0u16; diff --git a/tests/common/clock.rs b/tests/common/clock.rs index 5d94bb83d..de3cc7c65 100644 --- a/tests/common/clock.rs +++ b/tests/common/clock.rs @@ -1,11 +1,17 @@ use std::time::Duration; use torrust_tracker_clock::clock::Time; +use tracing::level_filters::LevelFilter; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::CurrentClock; #[test] fn it_should_use_stopped_time_for_testing() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); let time = CurrentClock::now(); diff --git a/tests/common/logging.rs b/tests/common/logging.rs new file mode 100644 index 000000000..71be2ece7 --- /dev/null +++ b/tests/common/logging.rs @@ -0,0 +1,30 @@ +#![allow(clippy::doc_markdown)] +//! Logging for the Integration Tests +//! +//! Tests should start their own logging. +//! +//! To find tests that do not start their own logging: +//! +//! ´´´ sh +//! awk 'BEGIN{RS=""; FS="\n"} /#\[tokio::test\]\s*async\s+fn\s+\w+\s*\(\s*\)\s*\{[^}]*\}/ && !/#\[tokio::test\]\s*async\s+fn\s+\w+\s*\(\s*\)\s*\{[^}]*INIT\.call_once/' $(find . -name "*.rs") +//! ´´´ +//! + +use std::sync::Once; + +use tracing::level_filters::LevelFilter; + +#[allow(dead_code)] +pub static INIT: Once = Once::new(); + +#[allow(dead_code)] +pub fn tracing_stderr_init(filter: LevelFilter) { + let builder = tracing_subscriber::fmt() + .with_max_level(filter) + .with_ansi(true) + .with_writer(std::io::stderr); + + builder.pretty().with_file(true).init(); + + tracing::info!("Logging initialized"); +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 281c1fb9c..9589ccb1e 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,4 +1,5 @@ pub mod clock; pub mod fixtures; pub mod http; +pub mod logging; pub mod udp; diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs index 49981dd02..5c5cd3ae0 100644 --- a/tests/servers/api/v1/contract/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -1,12 +1,18 @@ use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; use crate::common::http::{Query, QueryParam}; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::servers::api::v1::client::Client; use crate::servers::api::Started; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let token = env.get_connection_info().api_token.unwrap(); @@ -22,6 +28,10 @@ async fn should_authenticate_requests_by_using_a_token_query_param() { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(env.get_connection_info()) @@ -35,6 +45,10 @@ async fn should_not_authenticate_requests_when_the_token_is_missing() { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(env.get_connection_info()) @@ -48,6 +62,10 @@ async fn should_not_authenticate_requests_when_the_token_is_empty() { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(env.get_connection_info()) @@ -61,6 +79,10 @@ async fn should_not_authenticate_requests_when_the_token_is_invalid() { #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let token = env.get_connection_info().api_token.unwrap(); diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs index 4220f62d2..be42f16ad 100644 --- a/tests/servers/api/v1/contract/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -7,10 +7,18 @@ // use crate::common::app::setup_with_configuration; // use crate::servers::api::environment::stopped_environment; +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; + #[tokio::test] #[ignore] #[should_panic = "Could not receive bind_address."] async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + // let tracker = setup_with_configuration(&Arc::new(configuration::ephemeral())); // let config = tracker.config.http_api.clone(); diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 41f421ca6..2792a513c 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -3,7 +3,9 @@ use std::time::Duration; use serde::Serialize; use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, @@ -15,6 +17,10 @@ use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_generating_a_new_random_auth_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(env.get_connection_info()) @@ -37,6 +43,10 @@ async fn should_allow_generating_a_new_random_auth_key() { #[tokio::test] async fn should_allow_uploading_a_preexisting_auth_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(env.get_connection_info()) @@ -59,6 +69,10 @@ async fn should_allow_uploading_a_preexisting_auth_key() { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) @@ -84,6 +98,10 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; force_database_error(&env.tracker); @@ -102,6 +120,10 @@ async fn should_fail_when_the_auth_key_cannot_be_generated() { #[tokio::test] async fn should_allow_deleting_an_auth_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; @@ -129,6 +151,10 @@ async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid( pub seconds_valid: u64, } + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_keys = [ @@ -166,6 +192,10 @@ async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid( pub seconds_valid: String, } + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_key_durations = [ @@ -193,6 +223,10 @@ async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid( #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_auth_keys = [ @@ -216,6 +250,10 @@ async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; @@ -238,6 +276,10 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; @@ -273,6 +315,10 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { #[tokio::test] async fn should_allow_reloading_keys() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; @@ -290,6 +336,10 @@ async fn should_allow_reloading_keys() { #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; @@ -309,6 +359,10 @@ async fn should_fail_when_keys_cannot_be_reloaded() { #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; @@ -336,7 +390,9 @@ mod deprecated_generate_key_endpoint { use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_generate_key, assert_invalid_key_duration_param, assert_token_not_valid, @@ -347,6 +403,10 @@ mod deprecated_generate_key_endpoint { #[tokio::test] async fn should_allow_generating_a_new_auth_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; @@ -366,6 +426,10 @@ mod deprecated_generate_key_endpoint { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; @@ -387,6 +451,10 @@ mod deprecated_generate_key_endpoint { #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_key_durations = [ @@ -408,6 +476,10 @@ mod deprecated_generate_key_endpoint { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; force_database_error(&env.tracker); diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index d8dc3c030..af46a5abe 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -1,11 +1,17 @@ use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::api::v1::client::get; use crate::servers::api::Started; #[tokio::test] async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let url = format!("http://{}/api/health_check", env.get_connection_info().bind_address); diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index c4c992484..a034a7778 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -4,7 +4,9 @@ use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::servers::api::v1::client::Client; @@ -12,6 +14,10 @@ use crate::servers::api::Started; #[tokio::test] async fn should_allow_getting_tracker_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; env.add_torrent_peer( @@ -49,6 +55,10 @@ async fn should_allow_getting_tracker_statistics() { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 7ef35e729..f5e930be3 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -5,8 +5,10 @@ use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{s use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; use crate::common::http::{Query, QueryParam}; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, @@ -20,6 +22,10 @@ use crate::servers::api::Started; #[tokio::test] async fn should_allow_getting_all_torrents() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -44,6 +50,10 @@ async fn should_allow_getting_all_torrents() { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; // torrents are ordered alphabetically by infohashes @@ -73,6 +83,10 @@ async fn should_allow_limiting_the_torrents_in_the_result() { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; // torrents are ordered alphabetically by infohashes @@ -102,6 +116,10 @@ async fn should_allow_the_torrents_result_pagination() { #[tokio::test] async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 @@ -144,6 +162,10 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; @@ -161,6 +183,10 @@ async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_ #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; @@ -178,6 +204,10 @@ async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_p #[tokio::test] async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_info_hashes = [" ", "-1", "1.1", "INVALID INFO_HASH"]; @@ -199,6 +229,10 @@ async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) @@ -218,6 +252,10 @@ async fn should_not_allow_getting_torrents_for_unauthenticated_users() { #[tokio::test] async fn should_allow_getting_a_torrent_info() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -247,6 +285,10 @@ async fn should_allow_getting_a_torrent_info() { #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -262,6 +304,10 @@ async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exis #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { @@ -281,6 +327,10 @@ async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invali #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 29064ec9e..b30a7dbf8 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -2,7 +2,9 @@ use std::str::FromStr; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, @@ -16,6 +18,10 @@ use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -34,6 +40,10 @@ async fn should_allow_whitelisting_a_torrent() { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -51,6 +61,10 @@ async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -72,6 +86,10 @@ async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -87,6 +105,10 @@ async fn should_fail_when_the_torrent_cannot_be_whitelisted() { #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { @@ -110,6 +132,10 @@ async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invali #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -128,6 +154,10 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -143,6 +173,10 @@ async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whi #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { @@ -166,6 +200,10 @@ async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_inf #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -185,6 +223,10 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -209,6 +251,10 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -234,6 +280,10 @@ async fn should_allow_reload_the_whitelist_from_the_database() { #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index 3c3c13151..d40899f98 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -1,12 +1,18 @@ use torrust_tracker::servers::health_check_api::resources::{Report, Status}; use torrust_tracker::servers::registar::Registar; use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::Started; #[tokio::test] async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services_registered() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let configuration = configuration::ephemeral_with_no_services(); let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; @@ -31,13 +37,19 @@ mod api { use torrust_tracker::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::api; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::Started; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_api_service() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let configuration = Arc::new(configuration::ephemeral()); let service = api::Started::new(&configuration).await; @@ -83,6 +95,10 @@ mod api { #[tokio::test] pub(crate) async fn it_should_return_error_when_api_service_was_stopped_after_registration() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let configuration = Arc::new(configuration::ephemeral()); let service = api::Started::new(&configuration).await; @@ -136,13 +152,19 @@ mod http { use torrust_tracker::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::Started; use crate::servers::http; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_http_service() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let configuration = Arc::new(configuration::ephemeral()); let service = http::Started::new(&configuration).await; @@ -187,6 +209,10 @@ mod http { #[tokio::test] pub(crate) async fn it_should_return_error_when_http_service_was_stopped_after_registration() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let configuration = Arc::new(configuration::ephemeral()); let service = http::Started::new(&configuration).await; @@ -240,13 +266,19 @@ mod udp { use torrust_tracker::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::Started; use crate::servers::udp; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_udp_service() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let configuration = Arc::new(configuration::ephemeral()); let service = udp::Started::new(&configuration).await; @@ -288,6 +320,10 @@ mod udp { #[tokio::test] pub(crate) async fn it_should_return_error_when_udp_service_was_stopped_after_registration() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let configuration = Arc::new(configuration::ephemeral()); let service = udp::Started::new(&configuration).await; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index edc06fb07..f74b4717b 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -13,12 +13,18 @@ mod for_all_config_modes { use torrust_tracker::servers::http::v1::handlers::health_check::{Report, Status}; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::http::client::Client; use crate::servers::http::Started; #[tokio::test] async fn health_check_endpoint_should_return_ok_if_the_http_tracker_is_running() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; let response = Client::new(*env.bind_address()).health_check().await; @@ -32,7 +38,9 @@ mod for_all_config_modes { mod and_running_on_reverse_proxy { use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; @@ -40,6 +48,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. @@ -56,6 +68,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; let params = QueryBuilder::default().query().params(); @@ -93,8 +109,10 @@ mod for_all_config_modes { use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; use crate::common::fixtures::invalid_info_hashes; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::http::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, @@ -107,12 +125,20 @@ mod for_all_config_modes { #[tokio::test] async fn it_should_start_and_stop() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; env.stop().await; } #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -128,6 +154,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(*env.bind_address()).get("announce").await; @@ -139,6 +169,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_query_param = "a=b=c"; @@ -154,6 +188,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; // Without `info_hash` param @@ -191,6 +229,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -208,6 +250,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_not_fail_when_the_peer_address_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + // AnnounceQuery does not even contain the `peer_addr` // The peer IP is obtained in two ways: // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. @@ -228,6 +274,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -247,6 +297,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -266,6 +320,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -292,6 +350,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -311,6 +373,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -330,6 +396,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -357,6 +427,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -376,6 +450,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let response = Client::new(*env.bind_address()) @@ -405,6 +483,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -445,6 +527,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -497,6 +583,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -521,6 +611,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_compact_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html @@ -560,6 +654,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_not_return_the_compact_response_by_default() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. @@ -599,6 +697,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; Client::new(*env.bind_address()) @@ -616,6 +718,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) .await .is_err() @@ -640,6 +746,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + // The tracker ignores the peer address in the request param. It uses the client remote ip address. let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -663,6 +773,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; Client::new(*env.bind_address()) @@ -680,6 +794,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) .await .is_err() @@ -704,6 +822,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + // The tracker ignores the peer address in the request param. It uses the client remote ip address. let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -727,6 +849,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -756,6 +882,10 @@ mod for_all_config_modes { #[tokio::test] async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( ) { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + /* We assume that both the client and tracker share the same public IP. client <-> tracker <-> Internet @@ -792,6 +922,10 @@ mod for_all_config_modes { #[tokio::test] async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( ) { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + /* We assume that both the client and tracker share the same public IP. client <-> tracker <-> Internet @@ -832,6 +966,10 @@ mod for_all_config_modes { #[tokio::test] async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( ) { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + /* client <-> http proxy <-> tracker <-> Internet ip: header: config: peer addr: @@ -885,8 +1023,10 @@ mod for_all_config_modes { use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; use crate::common::fixtures::invalid_info_hashes; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::http::asserts::{ assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, @@ -896,9 +1036,13 @@ mod for_all_config_modes { use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; use crate::servers::http::{requests, Started}; - //#[tokio::test] + #[tokio::test] #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let response = Client::new(*env.bind_address()).get("scrape").await; @@ -909,6 +1053,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -926,6 +1074,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -964,6 +1116,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1002,6 +1158,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1021,6 +1181,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_accept_multiple_infohashes() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1047,6 +1211,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1070,6 +1238,10 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) .await .is_err() @@ -1107,7 +1279,9 @@ mod configured_as_whitelisted { use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; @@ -1115,6 +1289,10 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1130,6 +1308,10 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1156,7 +1338,9 @@ mod configured_as_whitelisted { use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::http::asserts::assert_scrape_response; use crate::servers::http::client::Client; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; @@ -1164,6 +1348,10 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1193,6 +1381,10 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1245,7 +1437,9 @@ mod configured_as_private { use torrust_tracker::core::auth::Key; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; @@ -1253,6 +1447,10 @@ mod configured_as_private { #[tokio::test] async fn should_respond_to_authenticated_peers() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_private().into()).await; let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); @@ -1268,6 +1466,10 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1283,6 +1485,10 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_private().into()).await; let invalid_key = "INVALID_KEY"; @@ -1298,6 +1504,10 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_private().into()).await; // The tracker does not have this key @@ -1323,7 +1533,9 @@ mod configured_as_private { use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; use crate::servers::http::client::Client; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; @@ -1331,6 +1543,10 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_private().into()).await; let invalid_key = "INVALID_KEY"; @@ -1346,6 +1562,10 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1375,6 +1595,10 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index e37ef7bf0..91f4c4e06 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -10,7 +10,9 @@ use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; +use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::udp::asserts::is_error_response; use crate::servers::udp::Started; @@ -39,6 +41,10 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { @@ -68,12 +74,18 @@ mod receiving_a_connection_request { use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::udp::asserts::is_connect_response; use crate::servers::udp::Started; #[tokio::test] async fn should_return_a_connect_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { @@ -111,7 +123,9 @@ mod receiving_an_announce_request { use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::udp::asserts::is_ipv4_announce_response; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::Started; @@ -152,6 +166,10 @@ mod receiving_an_announce_request { #[tokio::test] async fn should_return_an_announce_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { @@ -170,6 +188,10 @@ mod receiving_an_announce_request { #[tokio::test] async fn should_return_many_announce_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { @@ -195,13 +217,19 @@ mod receiving_an_scrape_request { use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::udp::asserts::is_scrape_response; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::Started; #[tokio::test] async fn should_return_a_scrape_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index cfc4390c9..30f257d1c 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -90,11 +90,17 @@ mod tests { use tokio::time::sleep; use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + use crate::common::logging::{tracing_stderr_init, INIT}; use crate::servers::udp::Started; #[tokio::test] async fn it_should_make_and_stop_udp_server() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + let env = Started::new(&configuration::ephemeral().into()).await; sleep(Duration::from_secs(1)).await; env.stop().await; From 14e672ec35bfd627d1829bec70a6643f417797e3 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Sun, 25 Aug 2024 10:08:51 +0200 Subject: [PATCH 0987/1003] tracing: add spans to many functions --- src/app.rs | 2 ++ src/bootstrap/app.rs | 6 ++++ src/bootstrap/jobs/health_check_api.rs | 3 ++ src/bootstrap/jobs/http_tracker.rs | 4 +++ src/bootstrap/jobs/mod.rs | 2 ++ src/bootstrap/jobs/torrent_cleanup.rs | 2 ++ src/bootstrap/jobs/tracker_apis.rs | 4 +++ src/bootstrap/jobs/udp_tracker.rs | 3 ++ src/servers/apis/routes.rs | 3 +- src/servers/apis/server.rs | 42 +++++++++++++++++++----- src/servers/health_check_api/handlers.rs | 2 ++ src/servers/health_check_api/server.rs | 3 +- src/servers/http/server.rs | 2 ++ src/servers/http/v1/routes.rs | 3 +- src/servers/signals.rs | 15 ++++++--- src/servers/udp/handlers.rs | 22 ++++++------- src/servers/udp/server/bound_socket.rs | 2 +- src/servers/udp/server/launcher.rs | 14 +++++--- src/servers/udp/server/mod.rs | 19 ++++++++--- src/servers/udp/server/processor.rs | 38 ++++++++++++--------- src/servers/udp/server/spawner.rs | 4 ++- src/servers/udp/server/states.rs | 22 ++++++++----- tests/servers/api/environment.rs | 14 +++++--- tests/servers/http/v1/contract.rs | 4 +++ tests/servers/udp/environment.rs | 12 +++++-- 25 files changed, 176 insertions(+), 71 deletions(-) diff --git a/src/app.rs b/src/app.rs index f96ac399f..06fea4d2e 100644 --- a/src/app.rs +++ b/src/app.rs @@ -25,6 +25,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; +use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::servers::registar::Registar; @@ -36,6 +37,7 @@ use crate::{core, servers}; /// /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. +#[instrument(skip(config, tracker))] pub async fn start(config: &Configuration, tracker: Arc) -> Vec> { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 8b0acabc8..7c0cf45ac 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; use torrust_tracker_configuration::Configuration; +use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; @@ -29,6 +30,7 @@ use crate::shared::crypto::ephemeral_instance_keys; /// /// Setup can file if the configuration is invalid. #[must_use] +#[instrument(skip())] pub fn setup() -> (Configuration, Arc) { let configuration = initialize_configuration(); @@ -47,6 +49,7 @@ pub fn setup() -> (Configuration, Arc) { /// /// The configuration may be obtained from the environment (via config file or env vars). #[must_use] +#[instrument(skip())] pub fn initialize_with_configuration(configuration: &Configuration) -> Arc { initialize_static(); initialize_logging(configuration); @@ -59,6 +62,7 @@ pub fn initialize_with_configuration(configuration: &Configuration) -> Arc Tracker { tracker_factory(config) } @@ -79,6 +84,7 @@ pub fn initialize_tracker(config: &Configuration) -> Tracker { /// It initializes the log threshold, format and channel. /// /// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. +#[instrument(skip(config))] pub fn initialize_logging(config: &Configuration) { bootstrap::logging::setup(config); } diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index d306b2be9..b6250efcc 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -17,6 +17,7 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HealthCheckApi; +use tracing::instrument; use super::Started; use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; @@ -34,6 +35,8 @@ use crate::servers::signals::Halted; /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. +#[allow(clippy::async_yields_async)] +#[instrument(skip(config, register))] pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> JoinHandle<()> { let bind_addr = config.bind_address; diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 745f564b1..c55723bc6 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -16,6 +16,7 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; +use tracing::instrument; use super::make_rust_tls; use crate::core; @@ -32,6 +33,7 @@ use crate::servers::registar::ServiceRegistrationForm; /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. /// +#[instrument(skip(config, tracker, form))] pub async fn start_job( config: &HttpTracker, tracker: Arc, @@ -49,6 +51,8 @@ pub async fn start_job( } } +#[allow(clippy::async_yields_async)] +#[instrument(skip(socket, tls, tracker, form))] async fn start_v1( socket: SocketAddr, tls: Option, diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 6534270fa..6e18ec3ba 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -20,6 +20,7 @@ pub struct Started { pub address: std::net::SocketAddr, } +#[instrument(skip(opt_tsl_config))] pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option> { match opt_tsl_config { Some(tsl_config) => { @@ -89,6 +90,7 @@ use axum_server::tls_rustls::RustlsConfig; use thiserror::Error; use torrust_tracker_configuration::TslConfig; use torrust_tracker_located_error::{DynError, LocatedError}; +use tracing::instrument; /// Error returned by the Bootstrap Process. #[derive(Error, Debug)] diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index ee02a4d6d..6abb4f26b 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -15,6 +15,7 @@ use std::sync::Arc; use chrono::Utc; use tokio::task::JoinHandle; use torrust_tracker_configuration::Core; +use tracing::instrument; use crate::core; @@ -24,6 +25,7 @@ use crate::core; /// /// Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about that option. #[must_use] +#[instrument(skip(config, tracker))] pub fn start_job(config: &Core, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index ca91fbc83..35b13b7ce 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -26,6 +26,7 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; use torrust_tracker_configuration::{AccessTokens, HttpApi}; +use tracing::instrument; use super::make_rust_tls; use crate::core; @@ -53,6 +54,7 @@ pub struct ApiServerJobStarted(); /// It would panic if unable to send the `ApiServerJobStarted` notice. /// /// +#[instrument(skip(config, tracker, form))] pub async fn start_job( config: &HttpApi, tracker: Arc, @@ -72,6 +74,8 @@ pub async fn start_job( } } +#[allow(clippy::async_yields_async)] +#[instrument(skip(socket, tls, tracker, form, access_tokens))] async fn start_v1( socket: SocketAddr, tls: Option, diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 407cfbbfa..ca503aa29 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -10,6 +10,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; +use tracing::instrument; use crate::core; use crate::servers::registar::ServiceRegistrationForm; @@ -27,6 +28,8 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It will panic if it is unable to start the UDP service. /// It will panic if the task did not finish successfully. #[must_use] +#[allow(clippy::async_yields_async)] +#[instrument(skip(config, tracker, form))] pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { let bind_to = config.bind_address; diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 4901d760d..327cab0c5 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -21,7 +21,7 @@ use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; -use tracing::{Level, Span}; +use tracing::{instrument, Level, Span}; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; @@ -31,6 +31,7 @@ use crate::servers::apis::API_LOG_TARGET; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] +#[instrument(skip(tracker, access_tokens))] pub fn router(tracker: Arc, access_tokens: Arc) -> Router { let router = Router::new(); diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 9008d7ce6..31220f497 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -28,10 +28,13 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; +use derive_more::derive::Display; use derive_more::Constructor; use futures::future::BoxFuture; +use thiserror::Error; use tokio::sync::oneshot::{Receiver, Sender}; use torrust_tracker_configuration::AccessTokens; +use tracing::{instrument, Level}; use super::routes::router; use crate::bootstrap::jobs::Started; @@ -43,9 +46,10 @@ use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, Servi use crate::servers::signals::{graceful_shutdown, Halted}; /// Errors that can occur when starting or stopping the API server. -#[derive(Debug)] +#[derive(Debug, Error)] pub enum Error { - Error(String), + #[error("Error when starting or stopping the API server")] + FailedToStartOrStop(String), } /// An alias for the `ApiServer` struct with the `Stopped` state. @@ -62,18 +66,26 @@ pub type RunningApiServer = ApiServer; /// It's a state machine that can be in one of two /// states: `Stopped` or `Running`. #[allow(clippy::module_name_repetitions)] -pub struct ApiServer { +#[derive(Debug, Display)] +pub struct ApiServer +where + S: std::fmt::Debug + std::fmt::Display, +{ pub state: S, } /// The `Stopped` state of the `ApiServer` struct. +#[derive(Debug, Display)] +#[display("Stopped: {launcher}")] pub struct Stopped { launcher: Launcher, } /// The `Running` state of the `ApiServer` struct. +#[derive(Debug, Display)] +#[display("Running (with local address): {local_addr}")] pub struct Running { - pub binding: SocketAddr, + pub local_addr: SocketAddr, pub halt_task: tokio::sync::oneshot::Sender, pub task: tokio::task::JoinHandle, } @@ -81,12 +93,12 @@ pub struct Running { impl Running { #[must_use] pub fn new( - binding: SocketAddr, + local_addr: SocketAddr, halt_task: tokio::sync::oneshot::Sender, task: tokio::task::JoinHandle, ) -> Self { Self { - binding, + local_addr, halt_task, task, } @@ -110,6 +122,7 @@ impl ApiServer { /// # Panics /// /// It would panic if the bound socket address cannot be sent back to this starter. + #[instrument(skip(self, tracker, form, access_tokens), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, @@ -157,13 +170,14 @@ impl ApiServer { /// # Errors /// /// It would return an error if the channel for the task killer signal was closed. + #[instrument(skip(self), err, ret(Display, level = Level::INFO))] pub async fn stop(self) -> Result, Error> { self.state .halt_task .send(Halted::Normal) - .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; + .map_err(|_| Error::FailedToStartOrStop("Task killer channel was closed.".to_string()))?; - let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; + let launcher = self.state.task.await.map_err(|e| Error::FailedToStartOrStop(e.to_string()))?; Ok(ApiServer { state: Stopped { launcher }, @@ -178,6 +192,7 @@ impl ApiServer { /// This function will return an error if unable to connect. /// Or if there request returns an error code. #[must_use] +#[instrument(skip())] pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { let url = format!("http://{binding}/api/health_check"); // DevSkim: ignore DS137138 @@ -199,6 +214,16 @@ pub struct Launcher { tls: Option, } +impl std::fmt::Display for Launcher { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.tls.is_some() { + write!(f, "(with socket): {}, using TLS", self.bind_to,) + } else { + write!(f, "(with socket): {}, without TLS", self.bind_to,) + } + } +} + impl Launcher { /// Starts the API server with graceful shutdown. /// @@ -210,6 +235,7 @@ impl Launcher { /// /// Will panic if unable to bind to the socket, or unable to get the address of the bound socket. /// Will also panic if unable to send message regarding the bound socket address. + #[instrument(skip(self, tracker, access_tokens, tx_start, rx_halt))] pub fn start( &self, tracker: Arc, diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index 944e84a1d..fe65e996b 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -2,6 +2,7 @@ use std::collections::VecDeque; use axum::extract::State; use axum::Json; +use tracing::{instrument, Level}; use super::resources::{CheckReport, Report}; use super::responses; @@ -11,6 +12,7 @@ use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, Servi /// /// Creates a vector [`CheckReport`] from the input set of [`CheckJob`], and then builds a report from the results. /// +#[instrument(skip(register), ret(level = Level::DEBUG))] pub(crate) async fn health_check_handler(State(register): State) -> Json { #[allow(unused_assignments)] let mut checks: VecDeque = VecDeque::new(); diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index 8a9b97306..df4b1cf69 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -18,7 +18,7 @@ use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; -use tracing::{Level, Span}; +use tracing::{instrument, Level, Span}; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; @@ -31,6 +31,7 @@ use crate::servers::signals::{graceful_shutdown, Halted}; /// # Panics /// /// Will panic if binding to the socket address fails. +#[instrument(skip(bind_to, tx, rx_halt, register))] pub fn start( bind_to: SocketAddr, tx: Sender, diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 75888f6a4..560d91681 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -7,6 +7,7 @@ use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; +use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; @@ -41,6 +42,7 @@ pub struct Launcher { } impl Launcher { + #[instrument(skip(self, tracker, tx_start, rx_halt))] fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index c24797c4a..16e39b61b 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -17,7 +17,7 @@ use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; -use tracing::{Level, Span}; +use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; use crate::core::Tracker; @@ -28,6 +28,7 @@ use crate::servers::http::HTTP_TRACKER_LOG_TARGET; /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. #[allow(clippy::needless_pass_by_value)] +#[instrument(skip(tracker, server_socket_addr))] pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { Router::new() // Health check diff --git a/src/servers/signals.rs b/src/servers/signals.rs index 367becff8..b83dd5213 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -3,6 +3,7 @@ use std::time::Duration; use derive_more::Display; use tokio::time::sleep; +use tracing::instrument; /// This is the message that the "launcher" spawned task receives from the main /// application process to notify the service to shutdown. @@ -17,6 +18,7 @@ pub enum Halted { /// # Panics /// /// Will panic if the `ctrl_c` or `terminate` signal resolves with an error. +#[instrument(skip())] pub async fn global_shutdown_signal() { let ctrl_c = async { tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); @@ -34,8 +36,8 @@ pub async fn global_shutdown_signal() { let terminate = std::future::pending::<()>(); tokio::select! { - () = ctrl_c => {}, - () = terminate => {} + () = ctrl_c => {tracing::warn!("caught interrupt signal (ctrl-c), halting...");}, + () = terminate => {tracing::warn!("caught interrupt signal (terminate), halting...");} } } @@ -44,6 +46,7 @@ pub async fn global_shutdown_signal() { /// # Panics /// /// Will panic if the `stop_receiver` resolves with an error. +#[instrument(skip(rx_halt))] pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { let halt = async { match rx_halt.await { @@ -53,22 +56,24 @@ pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { }; tokio::select! { - signal = halt => { tracing::info!("Halt signal processed: {}", signal) }, - () = global_shutdown_signal() => { tracing::info!("Global shutdown signal processed") } + signal = halt => { tracing::debug!("Halt signal processed: {}", signal) }, + () = global_shutdown_signal() => { tracing::debug!("Global shutdown signal processed") } } } /// Same as `shutdown_signal()`, but shows a message when it resolves. +#[instrument(skip(rx_halt))] pub async fn shutdown_signal_with_message(rx_halt: tokio::sync::oneshot::Receiver, message: String) { shutdown_signal(rx_halt).await; tracing::info!("{message}"); } +#[instrument(skip(handle, rx_halt, message))] pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String) { shutdown_signal_with_message(rx_halt, message).await; - tracing::info!("Sending graceful shutdown signal"); + tracing::debug!("Sending graceful shutdown signal"); handle.graceful_shutdown(Some(Duration::from_secs(90))); println!("!! shuting down in 90 seconds !!"); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 34f786219..373fb9c14 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -12,6 +12,7 @@ use aquatic_udp_protocol::{ }; use torrust_tracker_located_error::DynError; use torrust_tracker_primitives::info_hash::InfoHash; +use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; @@ -31,6 +32,7 @@ use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. +#[instrument(skip(udp_request, tracker, local_addr), ret(level = Level::TRACE))] pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr) -> Response { tracing::debug!("Handling Packets: {udp_request:?}"); @@ -86,8 +88,9 @@ pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, lo /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. +#[instrument(skip(request, remote_addr, tracker))] pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { - tracing::debug!("Handling Request: {request:?} to: {remote_addr:?}"); + tracing::trace!("handle request"); match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, @@ -102,8 +105,9 @@ pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: /// # Errors /// /// This function does not ever return an error. +#[instrument(skip(tracker), err, ret(level = Level::TRACE))] pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { - tracing::debug!("udp connect request: {:#?}", request); + tracing::trace!("handle connect"); let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); @@ -113,8 +117,6 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t connection_id, }; - tracing::debug!("udp connect response: {:#?}", response); - // send stats event match remote_addr { SocketAddr::V4(_) => { @@ -134,12 +136,13 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. +#[instrument(skip(tracker), err, ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: &Tracker, ) -> Result { - tracing::debug!("udp announce request: {:#?}", announce_request); + tracing::trace!("handle announce"); // Authentication if tracker.requires_authentication() { @@ -196,8 +199,6 @@ pub async fn handle_announce( .collect(), }; - tracing::debug!("udp announce response: {:#?}", announce_response); - Ok(Response::from(announce_response)) } else { let announce_response = AnnounceResponse { @@ -223,8 +224,6 @@ pub async fn handle_announce( .collect(), }; - tracing::debug!("udp announce response: {:#?}", announce_response); - Ok(Response::from(announce_response)) } } @@ -235,8 +234,9 @@ pub async fn handle_announce( /// # Errors /// /// This function does not ever return an error. +#[instrument(skip(tracker), err, ret(level = Level::TRACE))] pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { - tracing::debug!("udp scrape request: {:#?}", request); + tracing::trace!("handle scrape"); // Convert from aquatic infohashes let mut info_hashes: Vec = vec![]; @@ -282,8 +282,6 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra torrent_stats, }; - tracing::debug!("udp scrape response: {:#?}", response); - Ok(Response::from(response)) } diff --git a/src/servers/udp/server/bound_socket.rs b/src/servers/udp/server/bound_socket.rs index 42242e44a..658589aa6 100644 --- a/src/servers/udp/server/bound_socket.rs +++ b/src/servers/udp/server/bound_socket.rs @@ -26,7 +26,7 @@ impl BoundSocket { Err(e) => Err(e)?, }; - let local_addr = format!("udp://{addr}"); + let local_addr = format!("udp://{}", socket.local_addr()?); tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpSocket::new (bound)"); Ok(Self { socket }) diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 7b40f6604..c9ad213f6 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -6,6 +6,7 @@ use derive_more::Constructor; use futures_util::StreamExt; use tokio::select; use tokio::sync::oneshot; +use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; @@ -30,17 +31,13 @@ impl Launcher { /// /// It panics if unable to bind to udp socket, and get the address from the udp socket. /// It also panics if unable to send address of socket. + #[instrument(skip(tracker, bind_to, tx_start, rx_halt))] pub async fn run_with_graceful_shutdown( tracker: Arc, bind_to: SocketAddr, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, ) { - let halt_task = tokio::task::spawn(shutdown_signal_with_message( - rx_halt, - format!("Halting UDP Service Bound to Socket: {bind_to}"), - )); - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) @@ -80,6 +77,11 @@ impl Launcher { let stop = running.abort_handle(); + let halt_task = tokio::task::spawn(shutdown_signal_with_message( + rx_halt, + format!("Halting UDP Service Bound to Socket: {address}"), + )); + select! { _ = running => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (stopped)"); }, _ = halt_task => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (halting)"); } @@ -90,6 +92,7 @@ impl Launcher { } #[must_use] + #[instrument(skip(binding))] pub fn check(binding: &SocketAddr) -> ServiceHealthCheckJob { let binding = *binding; let info = format!("checking the udp tracker health check at: {binding}"); @@ -99,6 +102,7 @@ impl Launcher { ServiceHealthCheckJob::new(binding, info, job) } + #[instrument(skip(receiver, tracker))] async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc) { let active_requests = &mut ActiveRequests::default(); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 16133e21b..d81624cb2 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -1,6 +1,9 @@ //! Module to handle the UDP server instances. use std::fmt::Debug; +use derive_more::derive::Display; +use thiserror::Error; + use super::RawRequest; pub mod bound_socket; @@ -21,11 +24,13 @@ pub mod states; /// Some errors triggered while stopping the server are: /// /// - The [`Server`] cannot send the shutdown signal to the spawned UDP service thread. -#[derive(Debug)] +#[derive(Debug, Error)] pub enum UdpError { - /// Any kind of error starting or stopping the server. - Socket(std::io::Error), - Error(String), + #[error("Any error to do with the socket")] + FailedToBindSocket(std::io::Error), + + #[error("Any error to do with starting or stopping the sever")] + FailedToStartOrStopServer(String), } /// A UDP server. @@ -38,7 +43,11 @@ pub enum UdpError { /// > reset to the initial value after stopping the server. This struct is not /// > intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] -pub struct Server { +#[derive(Debug, Display)] +pub struct Server +where + S: std::fmt::Debug + std::fmt::Display, +{ /// The state of the server: `running` or `stopped`. pub state: S, } diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index e633a2358..9fa28a44d 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -3,10 +3,11 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; +use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::core::Tracker; -use crate::servers::udp::{handlers, RawRequest, UDP_TRACKER_LOG_TARGET}; +use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, @@ -18,15 +19,17 @@ impl Processor { Self { socket, tracker } } + #[instrument(skip(self, request))] pub async fn process_request(self, request: RawRequest) { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, request = %request.from, "Udp::process_request (receiving)"); - let from = request.from; let response = handlers::handle_packet(request, &self.tracker, self.socket.address()).await; self.send_response(from, response).await; } - async fn send_response(self, to: SocketAddr, response: Response) { + #[instrument(skip(self))] + async fn send_response(self, target: SocketAddr, response: Response) { + tracing::debug!("send response"); + let response_type = match &response { Response::Connect(_) => "Connect".to_string(), Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), @@ -35,8 +38,6 @@ impl Processor { Response::Error(e) => format!("Error: {e:?}"), }; - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, target = ?to, response_type, "Udp::send_response (sending)"); - let mut writer = Cursor::new(Vec::with_capacity(200)); match response.write_bytes(&mut writer) { @@ -44,23 +45,28 @@ impl Processor { let bytes_count = writer.get_ref().len(); let payload = writer.get_ref(); - tracing::debug!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count, "Udp::send_response (sending...)" ); - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count, ?payload, "Udp::send_response (sending...)"); - - self.send_packet(&to, payload).await; - - tracing::trace!(target:UDP_TRACKER_LOG_TARGET, ?to, bytes_count, "Udp::send_response (sent)"); + let () = match self.send_packet(&target, payload).await { + Ok(sent_bytes) => { + if tracing::event_enabled!(Level::TRACE) { + tracing::debug!(%bytes_count, %sent_bytes, ?payload, "sent {response_type}"); + } else { + tracing::debug!(%bytes_count, %sent_bytes, "sent {response_type}"); + } + } + Err(error) => tracing::warn!(%bytes_count, %error, ?payload, "failed to send"), + }; } Err(e) => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, ?to, response_type, err = %e, "Udp::send_response (error)"); + tracing::error!(%e, "error"); } } } - async fn send_packet(&self, remote_addr: &SocketAddr, payload: &[u8]) { - tracing::trace!(target: UDP_TRACKER_LOG_TARGET, to = %remote_addr, ?payload, "Udp::send_response (sending)"); + #[instrument(skip(self))] + async fn send_packet(&self, target: &SocketAddr, payload: &[u8]) -> std::io::Result { + tracing::trace!("send packet"); // doesn't matter if it reaches or not - drop(self.socket.send_to(payload, remote_addr).await); + self.socket.send_to(payload, target).await } } diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index e4612fbe0..dea293ad7 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -2,6 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; +use derive_more::derive::Display; use derive_more::Constructor; use tokio::sync::oneshot; use tokio::task::JoinHandle; @@ -11,7 +12,8 @@ use crate::bootstrap::jobs::Started; use crate::core::Tracker; use crate::servers::signals::Halted; -#[derive(Constructor, Copy, Clone, Debug)] +#[derive(Constructor, Copy, Clone, Debug, Display)] +#[display("(with socket): {bind_to}")] pub struct Spawner { pub bind_to: SocketAddr, } diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index d0a2e4e8a..e90c4da54 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -2,8 +2,10 @@ use std::fmt::Debug; use std::net::SocketAddr; use std::sync::Arc; +use derive_more::derive::Display; use derive_more::Constructor; use tokio::task::JoinHandle; +use tracing::{instrument, Level}; use super::spawner::Spawner; use super::{Server, UdpError}; @@ -23,16 +25,18 @@ pub type StoppedUdpServer = Server; pub type RunningUdpServer = Server; /// A stopped UDP server state. - +#[derive(Debug, Display)] +#[display("Stopped: {spawner}")] pub struct Stopped { pub spawner: Spawner, } /// A running UDP server state. -#[derive(Debug, Constructor)] +#[derive(Debug, Display, Constructor)] +#[display("Running (with local address): {local_addr}")] pub struct Running { /// The address where the server is bound. - pub binding: SocketAddr, + pub local_addr: SocketAddr, pub halt_task: tokio::sync::oneshot::Sender, pub task: JoinHandle, } @@ -57,6 +61,7 @@ impl Server { /// /// It panics if unable to receive the bound socket address from service. /// + #[instrument(skip(self, tracker, form), err, ret(Display, level = Level::INFO))] pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, std::io::Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); @@ -66,20 +71,20 @@ impl Server { // May need to wrap in a task to about a tokio bug. let task = self.state.spawner.spawn_launcher(tracker, tx_start, rx_halt); - let binding = rx_start.await.expect("it should be able to start the service").address; - let local_addr = format!("udp://{binding}"); + let local_addr = rx_start.await.expect("it should be able to start the service").address; - form.send(ServiceRegistration::new(binding, Launcher::check)) + form.send(ServiceRegistration::new(local_addr, Launcher::check)) .expect("it should be able to send service registration"); let running_udp_server: Server = Server { state: Running { - binding, + local_addr, halt_task: tx_halt, task, }, }; + let local_addr = format!("udp://{local_addr}"); tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpServer::start (running)"); Ok(running_udp_server) @@ -98,11 +103,12 @@ impl Server { /// # Panics /// /// It panics if unable to shutdown service. + #[instrument(skip(self), err, ret(Display, level = Level::INFO))] pub async fn stop(self) -> Result, UdpError> { self.state .halt_task .send(Halted::Normal) - .map_err(|e| UdpError::Error(e.to_string()))?; + .map_err(|e| UdpError::FailedToStartOrStopServer(e.to_string()))?; let launcher = self.state.task.await.expect("it should shutdown service"); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 92ef7b70b..2f4606be7 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -13,14 +13,20 @@ use torrust_tracker_primitives::peer; use super::connection_info::ConnectionInfo; -pub struct Environment { +pub struct Environment +where + S: std::fmt::Debug + std::fmt::Display, +{ pub config: Arc, pub tracker: Arc, pub registar: Registar, pub server: ApiServer, } -impl Environment { +impl Environment +where + S: std::fmt::Debug + std::fmt::Display, +{ /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.tracker.upsert_peer_and_get_stats(info_hash, peer); @@ -79,12 +85,12 @@ impl Environment { pub fn get_connection_info(&self) -> ConnectionInfo { ConnectionInfo { - bind_address: self.server.state.binding.to_string(), + bind_address: self.server.state.local_addr.to_string(), api_token: self.config.access_tokens.get("admin").cloned(), } } pub fn bind_address(&self) -> SocketAddr { - self.server.state.binding + self.server.state.local_addr } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index f74b4717b..41e92c9d6 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1639,6 +1639,10 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + // There is not authentication error // code-review: should this really be this way? diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 30f257d1c..b7ac2336c 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -11,14 +11,20 @@ use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; -pub struct Environment { +pub struct Environment +where + S: std::fmt::Debug + std::fmt::Display, +{ pub config: Arc, pub tracker: Arc, pub registar: Registar, pub server: Server, } -impl Environment { +impl Environment +where + S: std::fmt::Debug + std::fmt::Display, +{ /// Add a torrent to the tracker #[allow(dead_code)] pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { @@ -80,7 +86,7 @@ impl Environment { } pub fn bind_address(&self) -> SocketAddr { - self.server.state.binding + self.server.state.local_addr } } From 39ab661d0785b299aecfe0ac13db44f528851d90 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Sep 2024 12:01:03 +0100 Subject: [PATCH 0988/1003] release: version 3.0.0-beta --- Cargo.lock | 38 +++++++++----------------- Cargo.toml | 16 +++++------ packages/clock/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 6 ++-- 6 files changed, 27 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e3401278..44dc6812c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -448,7 +448,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower 0.4.13", + "tower", "tower-layer", "tower-service", "tracing", @@ -503,7 +503,7 @@ dependencies = [ "pin-project-lite", "serde", "serde_html_form", - "tower 0.4.13", + "tower", "tower-layer", "tower-service", "tracing", @@ -541,7 +541,7 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower 0.4.13", + "tower", "tower-service", ] @@ -1852,7 +1852,7 @@ dependencies = [ "pin-project-lite", "socket2 0.5.7", "tokio", - "tower 0.4.13", + "tower", "tower-service", "tracing", ] @@ -3922,7 +3922,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" dependencies = [ "anyhow", "aquatic_udp_protocol", @@ -3972,7 +3972,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", - "tower 0.5.0", + "tower", "tower-http", "trace", "tracing", @@ -3984,7 +3984,7 @@ dependencies = [ [[package]] name = "torrust-tracker-clock" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" dependencies = [ "chrono", "lazy_static", @@ -3993,7 +3993,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" dependencies = [ "camino", "derive_more", @@ -4010,7 +4010,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" dependencies = [ "criterion", "thiserror", @@ -4018,7 +4018,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" dependencies = [ "thiserror", "tracing", @@ -4026,7 +4026,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" dependencies = [ "aquatic_udp_protocol", "binascii", @@ -4040,7 +4040,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" dependencies = [ "rand", "torrust-tracker-configuration", @@ -4048,7 +4048,7 @@ dependencies = [ [[package]] name = "torrust-tracker-torrent-repository" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" dependencies = [ "aquatic_udp_protocol", "async-std", @@ -4081,18 +4081,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36b837f86b25d7c0d7988f00a54e74739be6477f2aac6201b8f429a7569991b7" -dependencies = [ - "pin-project-lite", - "tokio", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-http" version = "0.5.2" diff --git a/Cargo.toml b/Cargo.toml index 1a875a192..1cce015e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-beta-develop" +version = "3.0.0-beta" [dependencies] anyhow = "1" @@ -69,12 +69,12 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-beta-develop", path = "packages/clock" } -torrust-tracker-configuration = { version = "3.0.0-beta-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-beta-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-beta-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-beta-develop", path = "packages/primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-beta-develop", path = "packages/torrent-repository" } +torrust-tracker-clock = { version = "3.0.0-beta", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-beta", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-beta", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-beta", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-beta", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-beta", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" @@ -90,7 +90,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-test-helpers = { version = "3.0.0-beta-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-beta", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index e28a37466..d7893ada7 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -19,6 +19,6 @@ version.workspace = true chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" -torrust-tracker-primitives = { version = "3.0.0-beta-develop", path = "../primitives" } +torrust-tracker-primitives = { version = "3.0.0-beta", path = "../primitives" } [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 4f217e1b6..65b4ffa3a 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" -torrust-tracker-located-error = { version = "3.0.0-beta-develop", path = "../located-error" } +torrust-tracker-located-error = { version = "3.0.0-beta", path = "../located-error" } url = "2" [dev-dependencies] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 0fd108ecf..56d5580ea 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -16,4 +16,4 @@ version.workspace = true [dependencies] rand = "0" -torrust-tracker-configuration = { version = "3.0.0-beta-develop", path = "../configuration" } +torrust-tracker-configuration = { version = "3.0.0-beta", path = "../configuration" } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 38405e4e0..e30a2d80a 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -22,9 +22,9 @@ dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-beta-develop", path = "../clock" } -torrust-tracker-configuration = { version = "3.0.0-beta-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-beta-develop", path = "../primitives" } +torrust-tracker-clock = { version = "3.0.0-beta", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-beta", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-beta", path = "../primitives" } zerocopy = "0" [dev-dependencies] From b88cc6151f421506f502a7dd2b8a79a262fb2517 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Sep 2024 12:07:26 +0100 Subject: [PATCH 0989/1003] develop: bump to version 3.0.0-rc.1-develop --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 16 ++++++++-------- packages/clock/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 6 +++--- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 44dc6812c..ae780dd5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3922,7 +3922,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" dependencies = [ "anyhow", "aquatic_udp_protocol", @@ -3984,7 +3984,7 @@ dependencies = [ [[package]] name = "torrust-tracker-clock" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" dependencies = [ "chrono", "lazy_static", @@ -3993,7 +3993,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" dependencies = [ "camino", "derive_more", @@ -4010,7 +4010,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" dependencies = [ "criterion", "thiserror", @@ -4018,7 +4018,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" dependencies = [ "thiserror", "tracing", @@ -4026,7 +4026,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" dependencies = [ "aquatic_udp_protocol", "binascii", @@ -4040,7 +4040,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" dependencies = [ "rand", "torrust-tracker-configuration", @@ -4048,7 +4048,7 @@ dependencies = [ [[package]] name = "torrust-tracker-torrent-repository" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" dependencies = [ "aquatic_udp_protocol", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 1cce015e0..5a2b382cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-beta" +version = "3.0.0-rc.1-develop" [dependencies] anyhow = "1" @@ -69,12 +69,12 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-beta", path = "packages/clock" } -torrust-tracker-configuration = { version = "3.0.0-beta", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-beta", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-beta", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-beta", path = "packages/primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-beta", path = "packages/torrent-repository" } +torrust-tracker-clock = { version = "3.0.0-rc.1-develop", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-rc.1-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-rc.1-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-rc.1-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-rc.1-develop", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-rc.1-develop", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" @@ -90,7 +90,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-test-helpers = { version = "3.0.0-beta", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-rc.1-develop", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index d7893ada7..908816742 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -19,6 +19,6 @@ version.workspace = true chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" -torrust-tracker-primitives = { version = "3.0.0-beta", path = "../primitives" } +torrust-tracker-primitives = { version = "3.0.0-rc.1-develop", path = "../primitives" } [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 65b4ffa3a..8eafcc06a 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" -torrust-tracker-located-error = { version = "3.0.0-beta", path = "../located-error" } +torrust-tracker-located-error = { version = "3.0.0-rc.1-develop", path = "../located-error" } url = "2" [dev-dependencies] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 56d5580ea..b8762824d 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -16,4 +16,4 @@ version.workspace = true [dependencies] rand = "0" -torrust-tracker-configuration = { version = "3.0.0-beta", path = "../configuration" } +torrust-tracker-configuration = { version = "3.0.0-rc.1-develop", path = "../configuration" } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index e30a2d80a..5268b223f 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -22,9 +22,9 @@ dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-beta", path = "../clock" } -torrust-tracker-configuration = { version = "3.0.0-beta", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-beta", path = "../primitives" } +torrust-tracker-clock = { version = "3.0.0-rc.1-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-rc.1-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-rc.1-develop", path = "../primitives" } zerocopy = "0" [dev-dependencies] From 1f64cc9a1d851d90e29806fc87088b585ec5b6ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Sep 2024 09:23:51 +0100 Subject: [PATCH 0990/1003] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 44 packages to latest compatible versions Updating addr2line v0.22.0 -> v0.24.1 Removing adler v1.0.2 Updating anyhow v1.0.86 -> v1.0.87 Updating async-executor v1.13.0 -> v1.13.1 Removing async-io v1.13.0 Removing async-lock v2.8.0 Updating async-std v1.12.0 -> v1.13.0 Updating async-trait v0.1.81 -> v0.1.82 Updating aws-lc-rs v1.8.1 -> v1.9.0 Updating aws-lc-sys v0.20.1 -> v0.21.1 Updating backtrace v0.3.73 -> v0.3.74 Removing bitflags v1.3.2 Updating bytemuck v1.17.0 -> v1.18.0 Updating cc v1.1.14 -> v1.1.18 Updating clap v4.5.16 -> v4.5.17 Updating clap_builder v4.5.15 -> v4.5.17 Updating cpufeatures v0.2.13 -> v0.2.14 Updating dashmap v6.0.1 -> v6.1.0 Removing fastrand v1.9.0 Updating frunk v0.4.2 -> v0.4.3 Updating frunk_core v0.4.2 -> v0.4.3 Updating frunk_derives v0.4.2 -> v0.4.3 Updating frunk_proc_macro_helpers v0.1.2 -> v0.1.3 Updating frunk_proc_macros v0.1.2 -> v0.1.3 Removing futures-lite v1.13.0 Updating gimli v0.29.0 -> v0.31.0 Updating gloo-timers v0.2.6 -> v0.3.0 Updating hyper-rustls v0.27.2 -> v0.27.3 Updating hyper-util v0.1.7 -> v0.1.8 Updating indexmap v2.4.0 -> v2.5.0 Removing instant v0.1.13 Removing io-lifetimes v1.0.11 Updating ipnet v2.9.0 -> v2.10.0 Removing linux-raw-sys v0.3.8 Updating local-ip-address v0.6.1 -> v0.6.2 Removing miniz_oxide v0.7.4 Updating object v0.36.3 -> v0.36.4 Updating parking v2.2.0 -> v2.2.1 Updating plotters v0.3.6 -> v0.3.7 Updating plotters-backend v0.3.6 -> v0.3.7 Updating plotters-svg v0.3.6 -> v0.3.7 Removing polling v2.8.0 Updating prettyplease v0.2.21 -> v0.2.22 Updating proc-macro-crate v3.1.0 -> v3.2.0 Updating rustc_version v0.4.0 -> v0.4.1 Removing rustix v0.37.27 Removing rustix v0.38.34 Adding rustix v0.38.36 Updating rustls-webpki v0.102.6 -> v0.102.8 Updating schannel v0.1.23 -> v0.1.24 Updating serde v1.0.209 -> v1.0.210 Updating serde_derive v1.0.209 -> v1.0.210 Updating serde_json v1.0.127 -> v1.0.128 Removing socket2 v0.4.10 Updating syn v2.0.76 -> v2.0.77 Updating tokio v1.39.3 -> v1.40.0 Updating tokio-util v0.7.11 -> v0.7.12 Removing toml_edit v0.21.1 Adding tower v0.5.1 Removing waker-fn v1.2.0 Removing winnow v0.5.40 Removing zeroize_derive v1.4.2 ``` --- Cargo.lock | 532 ++++++++++++++++++----------------------------------- 1 file changed, 184 insertions(+), 348 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae780dd5c..7204fd612 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,19 +4,13 @@ version = 3 [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -148,9 +142,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" [[package]] name = "aquatic_peer_id" @@ -241,14 +235,14 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.1", - "futures-lite 2.3.0", + "fastrand", + "futures-lite", "slab", ] @@ -260,62 +254,33 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io 2.3.4", - "async-lock 3.4.0", + "async-io", + "async-lock", "blocking", - "futures-lite 2.3.0", + "futures-lite", "once_cell", "tokio", ] -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - [[package]] name = "async-io" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ - "async-lock 3.4.0", + "async-lock", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite 2.3.0", + "futures-lite", "parking", - "polling 3.7.3", - "rustix 0.38.34", + "polling", + "rustix", "slab", "tracing", "windows-sys 0.59.0", ] -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - [[package]] name = "async-lock" version = "3.4.0" @@ -329,20 +294,20 @@ dependencies = [ [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io", + "async-lock", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", + "futures-lite", "gloo-timers", "kv-log-macro", "log", @@ -362,13 +327,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -394,9 +359,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-lc-rs" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" +checksum = "2f95446d919226d587817a7d21379e6eb099b97b45110a7f272a444ca5c54070" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -406,9 +371,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.20.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" +checksum = "234314bd569802ec87011d653d6815c6d7b9ffb969e9fee5b8b20ef860e8dce9" dependencies = [ "bindgen 0.69.4", "cc", @@ -448,7 +413,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -503,7 +468,7 @@ dependencies = [ "pin-project-lite", "serde", "serde_html_form", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -518,7 +483,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -541,23 +506,23 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower", + "tower 0.4.13", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -597,7 +562,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.6.0", + "bitflags", "cexpr", "clang-sys", "itertools 0.12.1", @@ -610,7 +575,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.76", + "syn 2.0.77", "which", ] @@ -620,7 +585,7 @@ version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ - "bitflags 2.6.0", + "bitflags", "cexpr", "clang-sys", "itertools 0.13.0", @@ -629,15 +594,9 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.76", + "syn 2.0.77", ] -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.6.0" @@ -674,7 +633,7 @@ dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite 2.3.0", + "futures-lite", "piper", ] @@ -698,7 +657,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "syn_derive", ] @@ -768,9 +727,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" [[package]] name = "byteorder" @@ -810,9 +769,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.14" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" +checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" dependencies = [ "jobserver", "libc", @@ -893,9 +852,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" dependencies = [ "clap_builder", "clap_derive", @@ -903,9 +862,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" dependencies = [ "anstream", "anstyle", @@ -922,7 +881,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -986,9 +945,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1143,7 +1102,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1154,14 +1113,14 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "dashmap" -version = "6.0.1" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1198,7 +1157,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "unicode-xid", ] @@ -1210,7 +1169,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1315,15 +1274,6 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" -[[package]] -name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - [[package]] name = "fastrand" version = "2.1.1" @@ -1354,7 +1304,7 @@ checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -1405,54 +1355,58 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frunk" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a351b59e12f97b4176ee78497dff72e4276fb1ceb13e19056aca7fa0206287" +checksum = "874b6a17738fc273ec753618bac60ddaeac48cb1d7684c3e7bd472e57a28b817" dependencies = [ "frunk_core", "frunk_derives", "frunk_proc_macros", + "serde", ] [[package]] name = "frunk_core" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af2469fab0bd07e64ccf0ad57a1438f63160c69b2e57f04a439653d68eb558d6" +checksum = "3529a07095650187788833d585c219761114005d5976185760cf794d265b6a5c" +dependencies = [ + "serde", +] [[package]] name = "frunk_derives" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" +checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "frunk_proc_macro_helpers" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35b54add839292b743aeda6ebedbd8b11e93404f902c56223e51b9ec18a13d2c" +checksum = "05a956ef36c377977e512e227dcad20f68c2786ac7a54dacece3746046fea5ce" dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "frunk_proc_macros" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b85a1d4a9a6b300b41c05e8e13ef2feca03e0334127f29eca9506a7fe13a93" +checksum = "67e86c2c9183662713fea27ea527aad20fb15fee635a71081ff91bf93df4dc51" dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1515,28 +1469,13 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" -[[package]] -name = "futures-lite" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - [[package]] name = "futures-lite" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.1", + "fastrand", "futures-core", "futures-io", "parking", @@ -1551,7 +1490,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1613,9 +1552,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -1625,9 +1564,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-timers" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ "futures-channel", "futures-core", @@ -1647,7 +1586,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -1806,9 +1745,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http", @@ -1839,9 +1778,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -1850,9 +1789,9 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -1909,9 +1848,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -1924,15 +1863,6 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", -] - [[package]] name = "io-enum" version = "1.1.3" @@ -1942,22 +1872,11 @@ dependencies = [ "derive_utils", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.9", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is-terminal" @@ -2092,12 +2011,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -2106,9 +2019,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-ip-address" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136ef34e18462b17bf39a7826f8f3bbc223341f8e83822beb8b77db9a3d49696" +checksum = "b435d7dd476416a905f9634dff8c330cee8d3168fdd1fbd472a17d1a75c00c3e" dependencies = [ "libc", "neli", @@ -2168,15 +2081,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -2227,7 +2131,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2259,7 +2163,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2 0.5.7", + "socket2", "twox-hash", "url", ] @@ -2277,7 +2181,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "termcolor", "thiserror", ] @@ -2291,7 +2195,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen 0.70.1", - "bitflags 2.6.0", + "bitflags", "bitvec", "btoi", "byteorder", @@ -2434,9 +2338,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.3" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -2459,7 +2363,7 @@ version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ - "bitflags 2.6.0", + "bitflags", "cfg-if", "foreign-types", "libc", @@ -2476,7 +2380,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2505,9 +2409,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -2558,7 +2462,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2632,7 +2536,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2654,7 +2558,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.1", + "fastrand", "futures-io", ] @@ -2666,9 +2570,9 @@ checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plotters" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -2679,35 +2583,19 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] -[[package]] -name = "polling" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] - [[package]] name = "polling" version = "3.7.3" @@ -2718,7 +2606,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.34", + "rustix", "tracing", "windows-sys 0.59.0", ] @@ -2766,21 +2654,21 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.21" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a909e6e8053fa1a5ad670f5816c7d93029ee1fa8898718490544a6b0d5d38b3e" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit", ] [[package]] @@ -2824,7 +2712,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "version_check", "yansi", ] @@ -2963,7 +2851,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ - "bitflags 2.6.0", + "bitflags", ] [[package]] @@ -3132,7 +3020,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.76", + "syn 2.0.77", "unicode-ident", ] @@ -3142,7 +3030,7 @@ version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" dependencies = [ - "bitflags 2.6.0", + "bitflags", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -3180,37 +3068,23 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.37.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.34" +version = "0.38.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" dependencies = [ - "bitflags 2.6.0", + "bitflags", "errno", "libc", - "linux-raw-sys 0.4.14", + "linux-raw-sys", "windows-sys 0.52.0", ] @@ -3246,9 +3120,9 @@ checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "aws-lc-rs", "ring", @@ -3285,11 +3159,11 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3319,7 +3193,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.6.0", + "bitflags", "core-foundation", "core-foundation-sys", "libc", @@ -3344,9 +3218,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -3372,13 +3246,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3388,7 +3262,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.4.0", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -3396,11 +3270,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "itoa", "memchr", "ryu", @@ -3425,7 +3299,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3459,7 +3333,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.4.0", + "indexmap 2.5.0", "serde", "serde_derive", "serde_json", @@ -3476,7 +3350,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3552,16 +3426,6 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -3619,9 +3483,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.76" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -3637,7 +3501,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3661,7 +3525,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.6.0", + "bitflags", "core-foundation", "system-configuration-sys", ] @@ -3706,9 +3570,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", - "fastrand 2.1.1", + "fastrand", "once_cell", - "rustix 0.38.34", + "rustix", "windows-sys 0.59.0", ] @@ -3744,7 +3608,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3815,9 +3679,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.3" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -3825,7 +3689,7 @@ dependencies = [ "mio", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -3838,7 +3702,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3864,9 +3728,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -3884,7 +3748,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit", ] [[package]] @@ -3896,28 +3760,17 @@ dependencies = [ "serde", ] -[[package]] -name = "toml_edit" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" -dependencies = [ - "indexmap 2.4.0", - "toml_datetime", - "winnow 0.5.40", -] - [[package]] name = "toml_edit" version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.18", + "winnow", ] [[package]] @@ -3972,7 +3825,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", - "tower", + "tower 0.5.1", "tower-http", "trace", "tracing", @@ -4081,6 +3934,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-http" version = "0.5.2" @@ -4088,7 +3953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "async-compression", - "bitflags 2.6.0", + "bitflags", "bytes", "futures-core", "http", @@ -4146,7 +4011,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4314,12 +4179,6 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" -[[package]] -name = "waker-fn" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" - [[package]] name = "walkdir" version = "2.5.0" @@ -4367,7 +4226,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "wasm-bindgen-shared", ] @@ -4401,7 +4260,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4431,7 +4290,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.34", + "rustix", ] [[package]] @@ -4652,15 +4511,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - [[package]] name = "winnow" version = "0.6.18" @@ -4703,7 +4553,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4711,20 +4561,6 @@ name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.76", -] [[package]] name = "zstd" From ff836ed3885bfa0d12fde7c3832ace856f933a95 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Sep 2024 09:30:41 +0100 Subject: [PATCH 0991/1003] fix: clippy error --- packages/clock/src/conv/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/clock/src/conv/mod.rs b/packages/clock/src/conv/mod.rs index f70950c38..894083061 100644 --- a/packages/clock/src/conv/mod.rs +++ b/packages/clock/src/conv/mod.rs @@ -4,6 +4,7 @@ use chrono::{DateTime, Utc}; use torrust_tracker_primitives::DurationSinceUnixEpoch; /// It converts a string in ISO 8601 format to a timestamp. +/// /// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch /// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. /// From 481d41333c5c3a4a6a0cc0b968d1b904bc9284ee Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Sep 2024 13:29:50 +0100 Subject: [PATCH 0992/1003] feat: [#569] allow UDP clients to limit peers in response The UDP tracker announce response always include all peers available up to a maxium of 74 peers, ignoring the `num_want` param in the request described in: https://www.bittorrent.org/beps/bep_0015.html This change applies that limit only when is lower than then TORRENT_PEERS_LIMIT (74). --- src/core/mod.rs | 178 ++++++++++++++++++++--- src/servers/http/v1/services/announce.rs | 4 +- src/servers/http/v1/services/scrape.rs | 8 +- src/servers/udp/handlers.rs | 5 +- tests/servers/udp/contract.rs | 2 +- 5 files changed, 167 insertions(+), 30 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index cbdd7bcbc..1d2d856ba 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -448,6 +448,7 @@ pub mod torrent; pub mod peer_tests; +use std::cmp::max; use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; @@ -520,6 +521,38 @@ pub struct AnnounceData { pub policy: AnnouncePolicy, } +/// How many peers the peer announcing wants in the announce response. +#[derive(Clone, Debug, PartialEq, Default)] +pub enum PeersWanted { + /// The peer wants as many peers as possible in the announce response. + #[default] + All, + /// The peer only wants a certain amount of peers in the announce response. + Only { amount: usize }, +} + +impl PeersWanted { + fn limit(&self) -> usize { + match self { + PeersWanted::All => TORRENT_PEERS_LIMIT, + PeersWanted::Only { amount } => *amount, + } + } +} + +impl From for PeersWanted { + fn from(value: i32) -> Self { + if value > 0 { + match value.try_into() { + Ok(peers_wanted) => Self::Only { amount: peers_wanted }, + Err(_) => Self::All, + } + } else { + Self::All + } + } +} + /// Structure that holds the data returned by the `scrape` request. #[derive(Debug, PartialEq, Default)] pub struct ScrapeData { @@ -639,7 +672,13 @@ impl Tracker { /// # Context: Tracker /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). - pub fn announce(&self, info_hash: &InfoHash, peer: &mut peer::Peer, remote_client_ip: &IpAddr) -> AnnounceData { + pub fn announce( + &self, + info_hash: &InfoHash, + peer: &mut peer::Peer, + remote_client_ip: &IpAddr, + peers_wanted: &PeersWanted, + ) -> AnnounceData { // code-review: maybe instead of mutating the peer we could just return // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. @@ -661,7 +700,7 @@ impl Tracker { let stats = self.upsert_peer_and_get_stats(info_hash, peer); - let peers = self.get_peers_for(info_hash, peer); + let peers = self.get_peers_for(info_hash, peer, peers_wanted.limit()); AnnounceData { peers, @@ -713,16 +752,21 @@ impl Tracker { Ok(()) } - fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { + /// # Context: Tracker + /// + /// Get torrent peers for a given torrent and client. + /// + /// It filters out the client making the request. + fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(TORRENT_PEERS_LIMIT)), + Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), } } /// # Context: Tracker /// - /// Get all torrent peers for a given torrent + /// Get torrent peers for a given torrent. pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { match self.torrents.get(info_hash) { None => vec![], @@ -1199,6 +1243,7 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; @@ -1328,7 +1373,7 @@ mod tests { } #[tokio::test] - async fn it_should_return_all_the_peers_for_a_given_torrent() { + async fn it_should_return_the_peers_for_a_given_torrent() { let tracker = public_tracker(); let info_hash = sample_info_hash(); @@ -1341,8 +1386,51 @@ mod tests { assert_eq!(peers, vec![Arc::new(peer)]); } + /// It generates a peer id from a number where the number is the last + /// part of the peer ID. For example, for `12` it returns + /// `-qB00000000000000012`. + fn numeric_peer_id(two_digits_value: i32) -> PeerId { + // Format idx as a string with leading zeros, ensuring it has exactly 2 digits + let idx_str = format!("{two_digits_value:02}"); + + // Create the base part of the peer ID. + let base = b"-qB00000000000000000"; + + // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. + let mut peer_id_bytes = [0u8; 20]; + peer_id_bytes[..base.len()].copy_from_slice(base); + peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); + + PeerId(peer_id_bytes) + } + #[tokio::test] - async fn it_should_return_all_the_peers_for_a_given_torrent_excluding_a_given_peer() { + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + + for idx in 1..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + tracker.upsert_peer_and_get_stats(&info_hash, &peer); + } + + let peers = tracker.get_torrent_peers(&info_hash); + + assert_eq!(peers.len(), 74); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { let tracker = public_tracker(); let info_hash = sample_info_hash(); @@ -1350,11 +1438,41 @@ mod tests { tracker.upsert_peer_and_get_stats(&info_hash, &peer); - let peers = tracker.get_peers_for(&info_hash, &peer); + let peers = tracker.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); assert_eq!(peers, vec![]); } + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + + let excluded_peer = sample_peer(); + + tracker.upsert_peer_and_get_stats(&info_hash, &excluded_peer); + + // Add 74 peers + for idx in 2..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + tracker.upsert_peer_and_get_stats(&info_hash, &peer); + } + + let peers = tracker.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers.len(), 74); + } + #[tokio::test] async fn it_should_return_the_torrent_metrics() { let tracker = public_tracker(); @@ -1409,6 +1527,7 @@ mod tests { use crate::core::tests::the_tracker::{ peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, }; + use crate::core::PeersWanted; mod should_assign_the_ip_to_the_peer { @@ -1514,7 +1633,7 @@ mod tests { let mut peer = sample_peer(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.peers, vec![]); } @@ -1524,10 +1643,15 @@ mod tests { let tracker = public_tracker(); let mut previously_announced_peer = sample_peer_1(); - tracker.announce(&sample_info_hash(), &mut previously_announced_peer, &peer_ip()); + tracker.announce( + &sample_info_hash(), + &mut previously_announced_peer, + &peer_ip(), + &PeersWanted::All, + ); let mut peer = sample_peer_2(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } @@ -1537,6 +1661,7 @@ mod tests { use crate::core::tests::the_tracker::{ completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, }; + use crate::core::PeersWanted; #[tokio::test] async fn when_the_peer_is_a_seeder() { @@ -1544,7 +1669,7 @@ mod tests { let mut peer = seeder(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.stats.complete, 1); } @@ -1555,7 +1680,7 @@ mod tests { let mut peer = leecher(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.stats.incomplete, 1); } @@ -1566,10 +1691,11 @@ mod tests { // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); - tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip()); + tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip(), &PeersWanted::All); let mut completed_peer = completed_peer(); - let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()); + let announce_data = + tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.stats.downloaded, 1); } @@ -1583,7 +1709,7 @@ mod tests { use torrust_tracker_primitives::info_hash::InfoHash; use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; - use crate::core::{ScrapeData, SwarmMetadata}; + use crate::core::{PeersWanted, ScrapeData, SwarmMetadata}; #[tokio::test] async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( @@ -1609,11 +1735,21 @@ mod tests { // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); - tracker.announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))); + tracker.announce( + &info_hash, + &mut complete_peer, + &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), + &PeersWanted::All, + ); // Announce an "incomplete" peer for the torrent let mut incomplete_peer = incomplete_peer(); - tracker.announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))); + tracker.announce( + &info_hash, + &mut incomplete_peer, + &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), + &PeersWanted::All, + ); // Scrape let scrape_data = tracker.scrape(&vec![info_hash]).await; @@ -1740,7 +1876,7 @@ mod tests { use crate::core::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; - use crate::core::ScrapeData; + use crate::core::{PeersWanted, ScrapeData}; #[test] fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { @@ -1761,11 +1897,11 @@ mod tests { let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); let mut peer = incomplete_peer(); - tracker.announce(&info_hash, &mut peer, &peer_ip()); + tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); // Announce twice to force non zeroed swarm metadata let mut peer = complete_peer(); - tracker.announce(&info_hash, &mut peer, &peer_ip()); + tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); let scrape_data = tracker.scrape(&vec![info_hash]).await; diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 6b7f8af5a..a58df4e18 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -14,7 +14,7 @@ use std::sync::Arc; use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; -use crate::core::{statistics, AnnounceData, Tracker}; +use crate::core::{statistics, AnnounceData, PeersWanted, Tracker}; /// The HTTP tracker `announce` service. /// @@ -30,7 +30,7 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer: let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip); + let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip, &PeersWanted::All); match original_peer_ip { IpAddr::V4(_) => { diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 42fe4b518..0d561c7bc 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -103,7 +103,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::core::{statistics, ScrapeData, Tracker}; + use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ public_tracker, sample_info_hash, sample_info_hashes, sample_peer, @@ -119,7 +119,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - tracker.announce(&info_hash, &mut peer, &original_peer_ip); + tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); let scrape_data = invoke(&tracker, &info_hashes, &original_peer_ip).await; @@ -194,7 +194,7 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::core::{statistics, ScrapeData, Tracker}; + use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ public_tracker, sample_info_hash, sample_info_hashes, sample_peer, @@ -210,7 +210,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - tracker.announce(&info_hash, &mut peer, &original_peer_ip); + tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); let scrape_data = fake(&tracker, &info_hashes, &original_peer_ip).await; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 373fb9c14..69a427e0e 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -18,7 +18,7 @@ use zerocopy::network_endian::I32; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use super::RawRequest; -use crate::core::{statistics, ScrapeData, Tracker}; +use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::logging::{log_bad_request, log_error_response, log_request, log_response}; use crate::servers::udp::peer_builder; @@ -162,8 +162,9 @@ pub async fn handle_announce( })?; let mut peer = peer_builder::from_request(announce_request, &remote_client_ip); + let peers_wanted: PeersWanted = i32::from(announce_request.peers_wanted.0).into(); - let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip); + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); match remote_client_ip { IpAddr::V4(_) => { diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 91f4c4e06..1f9b71b62 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -159,7 +159,7 @@ mod receiving_an_announce_request { Err(err) => panic!("{err}"), }; - println!("test response {response:?}"); + // println!("test response {response:?}"); assert!(is_ipv4_announce_response(&response)); } From 084879e89f4632bd395dbca35460c08c03abdae9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Sep 2024 18:49:25 +0100 Subject: [PATCH 0993/1003] feat: [#569] numwant HTTP tracker announce param It allows HTTP clients to limit peers in the announce response with the `numwant` GET param. --- src/core/mod.rs | 10 ++++ .../http/v1/extractors/announce_request.rs | 3 +- src/servers/http/v1/handlers/announce.rs | 9 +++- src/servers/http/v1/requests/announce.rs | 47 +++++++++++++++++-- src/servers/http/v1/services/announce.rs | 18 ++++--- tests/servers/http/requests/announce.rs | 12 +++++ tests/servers/http/v1/contract.rs | 23 +++++++++ 7 files changed, 110 insertions(+), 12 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 1d2d856ba..f12eb9a3d 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -532,6 +532,16 @@ pub enum PeersWanted { } impl PeersWanted { + #[must_use] + pub fn only(limit: u32) -> Self { + let amount: usize = match limit.try_into() { + Ok(amount) => amount, + Err(_) => TORRENT_PEERS_LIMIT, + }; + + Self::Only { amount } + } + fn limit(&self) -> usize { match self { PeersWanted::All => TORRENT_PEERS_LIMIT, diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index b1d820598..324e91bf2 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -111,7 +111,7 @@ mod tests { #[test] fn it_should_extract_the_announce_request_from_the_url_query_params() { - let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0"; + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0&numwant=50"; let announce = extract_announce_from(Some(raw_query)).unwrap(); @@ -126,6 +126,7 @@ mod tests { left: Some(NumberOfBytes::new(0)), event: Some(Event::Completed), compact: Some(Compact::NotAccepted), + numwant: Some(50), } ); } diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index ee70b7841..1c7796fca 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -16,7 +16,7 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::peer; use crate::core::auth::Key; -use crate::core::{AnnounceData, Tracker}; +use crate::core::{AnnounceData, PeersWanted, Tracker}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -110,8 +110,12 @@ async fn handle_announce( }; let mut peer = peer_from_request(announce_request, &peer_ip); + let peers_wanted = match announce_request.numwant { + Some(numwant) => PeersWanted::only(numwant), + None => PeersWanted::All, + }; - let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; + let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer, &peers_wanted).await; Ok(announce_data) } @@ -205,6 +209,7 @@ mod tests { left: None, event: None, compact: None, + numwant: None, } } diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 3253a07c8..b432d3478 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -24,6 +24,7 @@ const UPLOADED: &str = "uploaded"; const LEFT: &str = "left"; const EVENT: &str = "event"; const COMPACT: &str = "compact"; +const NUMWANT: &str = "numwant"; /// The `Announce` request. Fields use the domain types after parsing the /// query params of the request. @@ -43,7 +44,8 @@ const COMPACT: &str = "compact"; /// uploaded: Some(NumberOfBytes::new(1)), /// left: Some(NumberOfBytes::new(1)), /// event: Some(Event::Started), -/// compact: Some(Compact::NotAccepted) +/// compact: Some(Compact::NotAccepted), +/// numwant: Some(50) /// }; /// ``` /// @@ -59,8 +61,10 @@ pub struct Announce { // Mandatory params /// The `InfoHash` of the torrent. pub info_hash: InfoHash, + /// The `PeerId` of the peer. pub peer_id: PeerId, + /// The port of the peer. pub port: u16, @@ -80,6 +84,10 @@ pub struct Announce { /// Whether the response should be in compact mode or not. pub compact: Option, + + /// Number of peers that the client would receive from the tracker. The + /// value is permitted to be zero. + pub numwant: Option, } /// Errors that can occur when parsing the `Announce` request. @@ -244,6 +252,7 @@ impl TryFrom for Announce { left: extract_left(&query)?, event: extract_event(&query)?, compact: extract_compact(&query)?, + numwant: extract_numwant(&query)?, }) } } @@ -350,6 +359,22 @@ fn extract_compact(query: &Query) -> Result, ParseAnnounceQueryE } } +fn extract_numwant(query: &Query) -> Result, ParseAnnounceQueryError> { + print!("numwant {query:#?}"); + + match query.get_param(NUMWANT) { + Some(raw_param) => match u32::from_str(&raw_param) { + Ok(numwant) => Ok(Some(numwant)), + Err(_) => Err(ParseAnnounceQueryError::InvalidParam { + param_name: NUMWANT.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + }), + }, + None => Ok(None), + } +} + #[cfg(test)] mod tests { @@ -360,7 +385,7 @@ mod tests { use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ - Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, + Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, NUMWANT, PEER_ID, PORT, UPLOADED, }; #[test] @@ -387,6 +412,7 @@ mod tests { left: None, event: None, compact: None, + numwant: None, } ); } @@ -402,6 +428,7 @@ mod tests { (LEFT, "3"), (EVENT, "started"), (COMPACT, "0"), + (NUMWANT, "50"), ]) .to_string(); @@ -420,6 +447,7 @@ mod tests { left: Some(NumberOfBytes::new(3)), event: Some(Event::Started), compact: Some(Compact::NotAccepted), + numwant: Some(50), } ); } @@ -428,7 +456,7 @@ mod tests { use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ - Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, + Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, NUMWANT, PEER_ID, PORT, UPLOADED, }; #[test] @@ -547,6 +575,19 @@ mod tests { assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); } + + #[test] + fn it_should_fail_if_the_numwant_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (NUMWANT, "-1"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } } } } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index a58df4e18..9c5dfdad2 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -26,11 +26,16 @@ use crate::core::{statistics, AnnounceData, PeersWanted, Tracker}; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `announce` request. -pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer::Peer) -> AnnounceData { +pub async fn invoke( + tracker: Arc, + info_hash: InfoHash, + peer: &mut peer::Peer, + peers_wanted: &PeersWanted, +) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip, &PeersWanted::All); + let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip, peers_wanted); match original_peer_ip { IpAddr::V4(_) => { @@ -100,7 +105,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::core::{statistics, AnnounceData, Tracker}; + use crate::core::{statistics, AnnounceData, PeersWanted, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; @@ -110,7 +115,7 @@ mod tests { let mut peer = sample_peer(); - let announce_data = invoke(tracker.clone(), sample_info_hash(), &mut peer).await; + let announce_data = invoke(tracker.clone(), sample_info_hash(), &mut peer, &PeersWanted::All).await; let expected_announce_data = AnnounceData { peers: vec![], @@ -146,7 +151,7 @@ mod tests { let mut peer = sample_peer_using_ipv4(); - let _announce_data = invoke(tracker, sample_info_hash(), &mut peer).await; + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer, &PeersWanted::All).await; } fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { @@ -185,6 +190,7 @@ mod tests { tracker_with_an_ipv6_external_ip(stats_event_sender).into(), sample_info_hash(), &mut peer, + &PeersWanted::All, ) .await; } @@ -211,7 +217,7 @@ mod tests { let mut peer = sample_peer_using_ipv6(); - let _announce_data = invoke(tracker, sample_info_hash(), &mut peer).await; + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer, &PeersWanted::All).await; } } } diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index bcbb36852..fa20553d0 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -18,6 +18,7 @@ pub struct Query { pub left: BaseTenASCII, pub event: Option, pub compact: Option, + pub numwant: Option, } impl fmt::Display for Query { @@ -98,6 +99,7 @@ impl QueryBuilder { left: 0, event: Some(Event::Completed), compact: Some(Compact::NotAccepted), + numwant: None, }; Self { announce_query: default_announce_query, @@ -149,7 +151,9 @@ impl QueryBuilder { /// left=0 /// event=completed /// compact=0 +/// numwant=50 /// ``` +#[derive(Debug)] pub struct QueryParams { pub info_hash: Option, pub peer_addr: Option, @@ -160,6 +164,7 @@ pub struct QueryParams { pub left: Option, pub event: Option, pub compact: Option, + pub numwant: Option, } impl std::fmt::Display for QueryParams { @@ -193,6 +198,9 @@ impl std::fmt::Display for QueryParams { if let Some(compact) = &self.compact { params.push(("compact", compact)); } + if let Some(numwant) = &self.numwant { + params.push(("numwant", numwant)); + } let query = params .iter() @@ -208,6 +216,7 @@ impl QueryParams { pub fn from(announce_query: &Query) -> Self { let event = announce_query.event.as_ref().map(std::string::ToString::to_string); let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + let numwant = announce_query.numwant.map(|numwant| numwant.to_string()); Self { info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), @@ -219,6 +228,7 @@ impl QueryParams { left: Some(announce_query.left.to_string()), event, compact, + numwant, } } @@ -241,6 +251,7 @@ impl QueryParams { self.left = None; self.event = None; self.compact = None; + self.numwant = None; } pub fn set(&mut self, param_name: &str, param_value: &str) { @@ -254,6 +265,7 @@ impl QueryParams { "left" => self.left = Some(param_value.to_string()), "event" => self.event = Some(param_value.to_string()), "compact" => self.compact = Some(param_value.to_string()), + "numwant" => self.numwant = Some(param_value.to_string()), &_ => panic!("Invalid param name for announce query"), } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 41e92c9d6..405a35dc5 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -448,6 +448,29 @@ mod for_all_config_modes { env.stop().await; } + #[tokio::test] + async fn should_fail_when_the_numwant_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("numwant", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { INIT.call_once(|| { From c49438fc72711c2e82a1bbe76ecf02b45a870597 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Sep 2024 09:17:11 +0100 Subject: [PATCH 0994/1003] fix: remove debugging print --- src/servers/http/v1/requests/announce.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index b432d3478..029bdbc01 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -360,8 +360,6 @@ fn extract_compact(query: &Query) -> Result, ParseAnnounceQueryE } fn extract_numwant(query: &Query) -> Result, ParseAnnounceQueryError> { - print!("numwant {query:#?}"); - match query.get_param(NUMWANT) { Some(raw_param) => match u32::from_str(&raw_param) { Ok(numwant) => Ok(Some(numwant)), From dbee825a47e7059af79b874f5fa83108b74bd647 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Sep 2024 10:45:50 +0100 Subject: [PATCH 0995/1003] fix: [#1037] wrong req type name in tracker checker outout Fixed ouoput: ```output $ TORRUST_CHECKER_CONFIG='{ "udp_trackers": ["127.0.0.1:6969"], "http_trackers": [], "health_checks": [] }' cargo run --bin tracker_checker Compiling torrust-tracker v3.0.0-rc.1-develop (/home/josecelano/Documents/git/committer/me/github/torrust/torrust-tracker) Finished `dev` profile [optimized + debuginfo] target(s) in 15.42s Running `target/debug/tracker_checker` 2024-09-11T09:44:57.432395Z INFO torrust_tracker::console::clients::checker::service: Running checks for trackers ... [ { "Udp": { "Ok": { "remote_addr": "127.0.0.1:6969", "results": [ [ "Setup", { "Ok": null } ], [ "Connect", { "Ok": null } ], [ "Announce", { "Ok": null } ], [ "Scrape", { "Ok": null } ] ] } } } ] ``` --- src/console/clients/checker/checks/udp.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index dd9afa47c..3ba26ceda 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -83,7 +83,7 @@ pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec Date: Thu, 12 Sep 2024 16:25:42 +0100 Subject: [PATCH 0996/1003] feat: [#675] tracker checker supports more service address formats All the following URL for UDP trackers are allow now: ```console TORRUST_CHECKER_CONFIG='{ "udp_trackers": [ "127.0.0.1:6969", "127.0.0.1:6969/", "127.0.0.1:6969/announce", "localhost:6969", "localhost:6969/", "localhost:6969/announce", "udp://127.0.0.1:6969", "udp://127.0.0.1:6969/", "udp://127.0.0.1:6969/announce", "udp://localhost:6969", "udp://localhost:6969/", "udp://localhost:6969/announce" ], "http_trackers": [], "health_checks": [] }' cargo run --bin tracker_checker ``` NOTICE: the client will resolve the domain to a socket address if needed. --- src/console/clients/checker/checks/udp.rs | 43 +++++- src/console/clients/checker/config.rs | 165 +++++++++++++++++----- 2 files changed, 169 insertions(+), 39 deletions(-) diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 3ba26ceda..4044b4c52 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -4,6 +4,7 @@ use std::time::Duration; use aquatic_udp_protocol::TransactionId; use hex_literal::hex; use serde::Serialize; +use url::Url; use crate::console::clients::udp::checker::Client; use crate::console::clients::udp::Error; @@ -23,20 +24,22 @@ pub enum Check { } #[allow(clippy::missing_panics_doc)] -pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { +pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { let mut results = Vec::default(); tracing::debug!("UDP trackers ..."); let info_hash = aquatic_udp_protocol::InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 - for remote_addr in udp_trackers { + for remote_url in udp_trackers { + let remote_addr = resolve_socket_addr(&remote_url); + let mut checks = Checks { remote_addr, results: Vec::default(), }; - tracing::debug!("UDP tracker: {:?}", remote_addr); + tracing::debug!("UDP tracker: {:?}", remote_url); // Setup let client = match Client::new(remote_addr, timeout).await { @@ -95,3 +98,37 @@ pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec SocketAddr { + let socket_addr = url.socket_addrs(|| None).unwrap(); + *socket_addr.first().unwrap() +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use url::Url; + + use crate::console::clients::checker::checks::udp::resolve_socket_addr; + + #[test] + fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_a_domain() { + let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); + + assert!( + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + ); + } + + #[test] + fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_an_ip() { + let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); + + assert!( + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + ); + } +} diff --git a/src/console/clients/checker/config.rs b/src/console/clients/checker/config.rs index 6e44d889b..78c5926b8 100644 --- a/src/console/clients/checker/config.rs +++ b/src/console/clients/checker/config.rs @@ -1,6 +1,5 @@ use std::error::Error; use std::fmt; -use std::net::SocketAddr; use reqwest::Url as ServiceUrl; use serde::Deserialize; @@ -31,7 +30,7 @@ struct PlainConfiguration { /// Validated configuration pub struct Configuration { - pub udp_trackers: Vec, + pub udp_trackers: Vec, pub http_trackers: Vec, pub health_checks: Vec, } @@ -62,7 +61,8 @@ impl TryFrom for Configuration { let udp_trackers = plain_config .udp_trackers .into_iter() - .map(|s| s.parse::().map_err(ConfigurationError::InvalidUdpAddress)) + .map(|s| if s.starts_with("udp://") { s } else { format!("udp://{s}") }) + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) .collect::, _>>()?; let http_trackers = plain_config @@ -87,68 +87,161 @@ impl TryFrom for Configuration { #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr}; - use super::*; #[test] fn configuration_should_be_build_from_plain_serializable_configuration() { let dto = PlainConfiguration { - udp_trackers: vec!["127.0.0.1:8080".to_string()], + udp_trackers: vec!["udp://127.0.0.1:8080".to_string()], http_trackers: vec!["http://127.0.0.1:8080".to_string()], health_checks: vec!["http://127.0.0.1:8080/health".to_string()], }; let config = Configuration::try_from(dto).expect("A valid configuration"); - assert_eq!( - config.udp_trackers, - vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080)] - ); + assert_eq!(config.udp_trackers, vec![ServiceUrl::parse("udp://127.0.0.1:8080").unwrap()]); + assert_eq!( config.http_trackers, vec![ServiceUrl::parse("http://127.0.0.1:8080").unwrap()] ); + assert_eq!( config.health_checks, vec![ServiceUrl::parse("http://127.0.0.1:8080/health").unwrap()] ); } - mod building_configuration_from_plan_configuration { - use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; + mod building_configuration_from_plain_configuration_for { + + mod udp_trackers { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; + + /* The plain configuration should allow UDP URLs with: + + - IP or domain. + - With or without scheme. + - With or without `announce` suffix. + - With or without `/` at the end of the authority section (with empty path). + + For example: + + 127.0.0.1:6969 + 127.0.0.1:6969/ + 127.0.0.1:6969/announce + + localhost:6969 + localhost:6969/ + localhost:6969/announce + + udp://127.0.0.1:6969 + udp://127.0.0.1:6969/ + udp://127.0.0.1:6969/announce + + udp://localhost:6969 + udp://localhost:6969/ + udp://localhost:6969/announce + + */ - #[test] - fn it_should_fail_when_a_tracker_udp_address_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["invalid_address".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; + #[test] + fn it_should_fail_when_a_tracker_udp_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["invalid URL".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; - assert!(Configuration::try_from(plain_config).is_err()); + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_add_the_udp_scheme_to_the_udp_url_when_it_is_missing() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969".parse::().unwrap()); + } + + #[test] + fn it_should_allow_using_domains() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["udp://localhost:6969".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://localhost:6969".parse::().unwrap()); + } + + #[test] + fn it_should_allow_the_url_to_have_an_empty_path() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969/".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969/".parse::().unwrap()); + } + + #[test] + fn it_should_allow_the_url_to_contain_a_path() { + // This is the common format for UDP tracker URLs: + // udp://domain.com:6969/announce + + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969/announce".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.udp_trackers[0], + "udp://127.0.0.1:6969/announce".parse::().unwrap() + ); + } } - #[test] - fn it_should_fail_when_a_tracker_http_address_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec!["not_a_url".to_string()], - health_checks: vec![], - }; + mod http_trackers { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; + + #[test] + fn it_should_fail_when_a_tracker_http_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["invalid URL".to_string()], + health_checks: vec![], + }; - assert!(Configuration::try_from(plain_config).is_err()); + assert!(Configuration::try_from(plain_config).is_err()); + } } - #[test] - fn it_should_fail_when_a_health_check_http_address_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec![], - health_checks: vec!["not_a_url".to_string()], - }; + mod health_checks { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; + + #[test] + fn it_should_fail_when_a_health_check_http_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec![], + health_checks: vec!["invalid URL".to_string()], + }; - assert!(Configuration::try_from(plain_config).is_err()); + assert!(Configuration::try_from(plain_config).is_err()); + } } } } From faee02f257812571f7635b2432e4d7a297958c96 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Sep 2024 17:25:53 +0100 Subject: [PATCH 0997/1003] feat: [#675] tracker checker (HTTP tracker) supports more service address formats Now it supports a path prefix. It will be remove by the client to build the "scrape" URLs. This type of URL is very common in tracker lists like in https://newtrackon.com/. ```console TORRUST_CHECKER_CONFIG='{ "udp_trackers": [], "http_trackers": [ "http://127.0.0.1:7070", "http://127.0.0.1:7070/", "http://127.0.0.1:7070/announce" ], "health_checks": [] }' cargo run --bin tracker_checker ``` --- src/console/clients/checker/checks/http.rs | 7 ++-- src/console/clients/checker/config.rs | 37 +++++++++++++++++++++- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index 8abbeb669..0904f4e6e 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -28,6 +28,9 @@ pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec, timeout: Duration) -> Vec().unwrap() + ); + } + + #[test] + fn it_should_allow_the_url_to_contain_an_empty_path() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["http://127.0.0.1:7070/".to_string()], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.http_trackers[0], + "http://127.0.0.1:7070/".parse::().unwrap() + ); + } } mod health_checks { From bdb04198d57a9d415e9da7f1dc8551266d2a2864 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Sep 2024 15:38:18 +0100 Subject: [PATCH 0998/1003] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 26 packages to latest compatible versions Updating anyhow v1.0.87 -> v1.0.89 Updating aws-lc-sys v0.21.1 -> v0.21.2 Updating axum v0.7.5 -> v0.7.6 Updating axum-core v0.4.3 -> v0.4.4 Updating axum-extra v0.9.3 -> v0.9.4 Updating axum-macros v0.4.1 -> v0.4.2 Updating bytes v1.7.1 -> v1.7.2 Updating cc v1.1.18 -> v1.1.21 Updating clap v4.5.17 -> v4.5.18 Updating clap_builder v4.5.17 -> v4.5.18 Updating clap_derive v4.5.13 -> v4.5.18 Updating iana-time-zone v0.1.60 -> v0.1.61 Updating local-ip-address v0.6.2 -> v0.6.3 Updating pkg-config v0.3.30 -> v0.3.31 Updating redox_syscall v0.5.3 -> v0.5.4 Updating rustix v0.38.36 -> v0.38.37 Updating rustls v0.23.12 -> v0.23.13 Updating security-framework-sys v2.11.1 -> v2.12.0 Updating simdutf8 v0.1.4 -> v0.1.5 Updating thiserror v1.0.63 -> v1.0.64 Updating thiserror-impl v1.0.63 -> v1.0.64 Updating toml_edit v0.22.20 -> v0.22.21 Updating tower-http v0.5.2 -> v0.6.1 Updating unicode-ident v1.0.12 -> v1.0.13 Updating unicode-normalization v0.1.23 -> v0.1.24 Updating unicode-xid v0.2.5 -> v0.2.6 Removing windows-sys v0.48.0 Removing windows-targets v0.48.5 Removing windows_aarch64_gnullvm v0.48.5 Removing windows_aarch64_msvc v0.48.5 Removing windows_i686_gnu v0.48.5 Removing windows_i686_msvc v0.48.5 Removing windows_x86_64_gnu v0.48.5 Removing windows_x86_64_gnullvm v0.48.5 Removing windows_x86_64_msvc v0.48.5 ``` --- Cargo.lock | 218 +++++++++++++++++++---------------------------------- 1 file changed, 77 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7204fd612..97cdbb551 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,9 +142,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "aquatic_peer_id" @@ -371,9 +371,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234314bd569802ec87011d653d6815c6d7b9ffb969e9fee5b8b20ef860e8dce9" +checksum = "b3ddc4a5b231dd6958b140ff3151b6412b3f4321fab354f399eec8f14b06df62" dependencies = [ "bindgen 0.69.4", "cc", @@ -386,9 +386,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" dependencies = [ "async-trait", "axum-core", @@ -413,7 +413,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower 0.4.13", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -432,9 +432,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" dependencies = [ "async-trait", "bytes", @@ -445,7 +445,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", "tracing", @@ -453,9 +453,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0be6ea09c9b96cb5076af0de2e383bd2bc0c18f827cf1967bdd353e0b910d733" +checksum = "73c3220b188aea709cf1b6c5f9b01c3bd936bb08bd2b5184a12b35ac8131b1f9" dependencies = [ "axum", "axum-core", @@ -468,7 +468,7 @@ dependencies = [ "pin-project-lite", "serde", "serde_html_form", - "tower 0.4.13", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -476,11 +476,10 @@ dependencies = [ [[package]] name = "axum-macros" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" +checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ - "heck 0.4.1", "proc-macro2", "quote", "syn 2.0.77", @@ -522,7 +521,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -739,9 +738,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "camino" @@ -769,9 +768,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.18" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -809,7 +808,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -852,9 +851,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.17" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -862,9 +861,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.17" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", @@ -874,9 +873,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1798,9 +1797,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1980,7 +1979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -2019,14 +2018,14 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-ip-address" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b435d7dd476416a905f9634dff8c330cee8d3168fdd1fbd472a17d1a75c00c3e" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" dependencies = [ "libc", "neli", "thiserror", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -2433,7 +2432,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -2564,9 +2563,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "plotters" @@ -2847,9 +2846,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags", ] @@ -3077,9 +3076,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.36" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags", "errno", @@ -3090,9 +3089,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "aws-lc-rs", "once_cell", @@ -3202,9 +3201,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -3401,9 +3400,9 @@ dependencies = [ [[package]] name = "simdutf8" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "siphasher" @@ -3593,18 +3592,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", @@ -3762,9 +3761,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -3940,17 +3939,21 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ + "futures-core", + "futures-util", "pin-project-lite", + "sync_wrapper 0.1.2", "tokio", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ "async-compression", "bitflags", @@ -3958,7 +3961,6 @@ dependencies = [ "futures-core", "http", "http-body", - "http-body-util", "pin-project-lite", "tokio", "tokio-util", @@ -4102,24 +4104,24 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "untrusted" @@ -4330,7 +4332,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -4341,7 +4343,7 @@ checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" dependencies = [ "windows-result", "windows-strings", - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -4350,7 +4352,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" dependencies = [ - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -4360,16 +4362,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ "windows-result", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", + "windows-targets", ] [[package]] @@ -4378,7 +4371,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", + "windows-targets", ] [[package]] @@ -4387,22 +4380,7 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", + "windows-targets", ] [[package]] @@ -4411,46 +4389,28 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -4463,48 +4423,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - [[package]] name = "windows_x86_64_msvc" version = "0.52.6" From beb56d31857c863bb78582efc847aca1a431fecd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Sep 2024 16:15:53 +0100 Subject: [PATCH 0999/1003] release: version 3.0.0-rc.1 --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 16 ++++++++-------- packages/clock/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 6 +++--- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 97cdbb551..56978738f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3774,7 +3774,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" dependencies = [ "anyhow", "aquatic_udp_protocol", @@ -3824,7 +3824,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", - "tower 0.5.1", + "tower 0.4.13", "tower-http", "trace", "tracing", @@ -3836,7 +3836,7 @@ dependencies = [ [[package]] name = "torrust-tracker-clock" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" dependencies = [ "chrono", "lazy_static", @@ -3845,7 +3845,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" dependencies = [ "camino", "derive_more", @@ -3862,7 +3862,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" dependencies = [ "criterion", "thiserror", @@ -3870,7 +3870,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" dependencies = [ "thiserror", "tracing", @@ -3878,7 +3878,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" dependencies = [ "aquatic_udp_protocol", "binascii", @@ -3892,7 +3892,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" dependencies = [ "rand", "torrust-tracker-configuration", @@ -3900,7 +3900,7 @@ dependencies = [ [[package]] name = "torrust-tracker-torrent-repository" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" dependencies = [ "aquatic_udp_protocol", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 5a2b382cb..4aa87e6e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-rc.1-develop" +version = "3.0.0-rc.1" [dependencies] anyhow = "1" @@ -69,12 +69,12 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-rc.1-develop", path = "packages/clock" } -torrust-tracker-configuration = { version = "3.0.0-rc.1-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-rc.1-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-rc.1-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-rc.1-develop", path = "packages/primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-rc.1-develop", path = "packages/torrent-repository" } +torrust-tracker-clock = { version = "3.0.0-rc.1", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-rc.1", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-rc.1", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-rc.1", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" @@ -90,7 +90,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-test-helpers = { version = "3.0.0-rc.1-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-rc.1", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index 908816742..f95c12a0c 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -19,6 +19,6 @@ version.workspace = true chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" -torrust-tracker-primitives = { version = "3.0.0-rc.1-develop", path = "../primitives" } +torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "../primitives" } [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 8eafcc06a..7b8b3c3bf 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" -torrust-tracker-located-error = { version = "3.0.0-rc.1-develop", path = "../located-error" } +torrust-tracker-located-error = { version = "3.0.0-rc.1", path = "../located-error" } url = "2" [dev-dependencies] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index b8762824d..ccf08b570 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -16,4 +16,4 @@ version.workspace = true [dependencies] rand = "0" -torrust-tracker-configuration = { version = "3.0.0-rc.1-develop", path = "../configuration" } +torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "../configuration" } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 5268b223f..0650d608f 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -22,9 +22,9 @@ dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-rc.1-develop", path = "../clock" } -torrust-tracker-configuration = { version = "3.0.0-rc.1-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-rc.1-develop", path = "../primitives" } +torrust-tracker-clock = { version = "3.0.0-rc.1", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "../primitives" } zerocopy = "0" [dev-dependencies] From cb809d3984d86a5ef4adb0f6f452d7a4442bf10b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Sep 2024 17:48:40 +0100 Subject: [PATCH 1000/1003] develop: bump to version 3.0.0-develop --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 16 ++++++++-------- packages/clock/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 6 +++--- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56978738f..d43356ca4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3774,7 +3774,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "anyhow", "aquatic_udp_protocol", @@ -3836,7 +3836,7 @@ dependencies = [ [[package]] name = "torrust-tracker-clock" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "chrono", "lazy_static", @@ -3845,7 +3845,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "camino", "derive_more", @@ -3862,7 +3862,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "criterion", "thiserror", @@ -3870,7 +3870,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "thiserror", "tracing", @@ -3878,7 +3878,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "binascii", @@ -3892,7 +3892,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "rand", "torrust-tracker-configuration", @@ -3900,7 +3900,7 @@ dependencies = [ [[package]] name = "torrust-tracker-torrent-repository" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 4aa87e6e3..47102a349 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-rc.1" +version = "3.0.0-develop" [dependencies] anyhow = "1" @@ -69,12 +69,12 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-rc.1", path = "packages/clock" } -torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-rc.1", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-rc.1", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "packages/primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-rc.1", path = "packages/torrent-repository" } +torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" @@ -90,7 +90,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-test-helpers = { version = "3.0.0-rc.1", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index f95c12a0c..2ede678d9 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -19,6 +19,6 @@ version.workspace = true chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" -torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "../primitives" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 7b8b3c3bf..8706679f6 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" -torrust-tracker-located-error = { version = "3.0.0-rc.1", path = "../located-error" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } url = "2" [dev-dependencies] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index ccf08b570..ad291d209 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -16,4 +16,4 @@ version.workspace = true [dependencies] rand = "0" -torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "../configuration" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 0650d608f..32c324538 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -22,9 +22,9 @@ dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-rc.1", path = "../clock" } -torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "../primitives" } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } zerocopy = "0" [dev-dependencies] From ae5ea1ea0dfe2c1a9a9d2277affee78912a8c815 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 2 Oct 2024 08:28:07 +0100 Subject: [PATCH 1001/1003] docs: fix link to containers docs --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index d242ac80e..5d7c92ae2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -155,7 +155,7 @@ //! ## Run with docker //! //! You can run the tracker with a pre-built docker image. Please refer to the -//! [tracker docker documentation](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! [tracker docker documentation](https://github.com/torrust/torrust-tracker/blob/develop/docs/containers.md). //! //! # Configuration //! @@ -214,7 +214,7 @@ //! of the `tracker.toml` file. //! //! The env var contains the same data as the `tracker.toml`. It's particularly -//! useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/blob/develop/docs/containers.md). //! //! > NOTICE: The `TORRUST_TRACKER_CONFIG_TOML` env var has priority over the `tracker.toml` file. //! From 6b2d8e8372b002cf501f244f4d1b8ffeda8983d9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 2 Oct 2024 09:00:27 +0100 Subject: [PATCH 1002/1003] fix: clippy errors --- packages/clock/src/clock/stopped/mod.rs | 1 - packages/clock/src/conv/mod.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/packages/clock/src/clock/stopped/mod.rs b/packages/clock/src/clock/stopped/mod.rs index 57655ab75..5d0b2ec4e 100644 --- a/packages/clock/src/clock/stopped/mod.rs +++ b/packages/clock/src/clock/stopped/mod.rs @@ -1,6 +1,5 @@ /// Trait for types that can be used as a timestamp clock stopped /// at a given time. - #[allow(clippy::module_name_repetitions)] pub struct StoppedClock {} diff --git a/packages/clock/src/conv/mod.rs b/packages/clock/src/conv/mod.rs index 894083061..0ac278171 100644 --- a/packages/clock/src/conv/mod.rs +++ b/packages/clock/src/conv/mod.rs @@ -48,7 +48,6 @@ pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) } #[cfg(test)] - mod tests { use chrono::DateTime; use torrust_tracker_primitives::DurationSinceUnixEpoch; From ba14fa3e7f9bba0fe2fc037568580c711269bd38 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 2 Oct 2024 13:33:08 +0100 Subject: [PATCH 1003/1003] release: version 3.0.0 --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 16 ++++++++-------- packages/clock/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 6 +++--- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d43356ca4..050e22414 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3774,7 +3774,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-develop" +version = "3.0.0" dependencies = [ "anyhow", "aquatic_udp_protocol", @@ -3836,7 +3836,7 @@ dependencies = [ [[package]] name = "torrust-tracker-clock" -version = "3.0.0-develop" +version = "3.0.0" dependencies = [ "chrono", "lazy_static", @@ -3845,7 +3845,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-develop" +version = "3.0.0" dependencies = [ "camino", "derive_more", @@ -3862,7 +3862,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-develop" +version = "3.0.0" dependencies = [ "criterion", "thiserror", @@ -3870,7 +3870,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-develop" +version = "3.0.0" dependencies = [ "thiserror", "tracing", @@ -3878,7 +3878,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-develop" +version = "3.0.0" dependencies = [ "aquatic_udp_protocol", "binascii", @@ -3892,7 +3892,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-develop" +version = "3.0.0" dependencies = [ "rand", "torrust-tracker-configuration", @@ -3900,7 +3900,7 @@ dependencies = [ [[package]] name = "torrust-tracker-torrent-repository" -version = "3.0.0-develop" +version = "3.0.0" dependencies = [ "aquatic_udp_protocol", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 47102a349..e362dafe7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-develop" +version = "3.0.0" [dependencies] anyhow = "1" @@ -69,12 +69,12 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } -torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-develop", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-develop", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-develop", path = "packages/primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } +torrust-tracker-clock = { version = "3.0.0", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" @@ -90,7 +90,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index 2ede678d9..ffb75fb7c 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -19,6 +19,6 @@ version.workspace = true chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" -torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-primitives = { version = "3.0.0", path = "../primitives" } [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 8706679f6..1ba4830ca 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" -torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-located-error = { version = "3.0.0", path = "../located-error" } url = "2" [dev-dependencies] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index ad291d209..b080c19da 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -16,4 +16,4 @@ version.workspace = true [dependencies] rand = "0" -torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-configuration = { version = "3.0.0", path = "../configuration" } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 32c324538..ba6a76f29 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -22,9 +22,9 @@ dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } -torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-clock = { version = "3.0.0", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0", path = "../primitives" } zerocopy = "0" [dev-dependencies]

o zV_b2;+vGW%(CbG_EE@hCetzA7{zTLe$zf}t;Ijj{m=2A$&1e0an{-<;bb(Yr3Ygzm zA&zSBkwQ8F`W;>B5@Ue~dcWLYc~NJD&8WgQ@ido*#~17{}(Ml@CJK+I2?=oH>6}>@!i!mNhFO}e_W<`o3++z zX+>S5cl(=11A>z`oD1L!M%PF&7+kwj&+h7D0Nsbp_xMX@BqtK(s=m+Gr#IUVwgzp6 zY2Nud3JOtU)LAAWJex`pJW`M zD+>reo!H14;IG3ciBsewfZ_xqroR{7hPZ;A5!neP?23v=E#dIxq6(Wc+ikX#_;4y?T z8;%uSsq)`~r)0P6Ei-H{z4A~BEY^AY?K_5bCK+DxuKOA*BeOw?_k;m z@$)1!K|8FYD#!RuV}3UAkGDKPQ-DloMEmN=>bj@u$q+Lrr3`MpTybk5&Nhy5>b&~Y z=H>F$)V)bEsT_d1Y&3T^*{|jmV7@y8^My%KyQ6K7|2fGXxsez9q`C6}w`y0SN67x@!@J3bCH-b*AZDKyr08QszYILGZfqR?}I1W6<2ZV&0 zqU0cn$FPkzdl&f;&2&jsnDV1}Xu2L)39u`1oe|~$0GsRcsh4Hl@0imqp6l9wTDnQi;}w!)u_c7qd0ZP2ZiyI8Nb zkZ8_NqvOs$@Z^RDDMHgB0%Tt9KxI0I%_#5JZBG@@upVQ62$IM;0R2G^{^Q4b;N>0$ z)UWqcU>@k_@TDZLp=Cm}`t<%Nq*l3HS6rzb1>Ef#nJ&k>CfP}H?2BLbqj6a2H#)m>{RSLl;pMamOstiiQZMr7crRw~Q zMv`rzk%S-+P{DcV!MmpFx zFuP*Zakz_cgImg{=jU^&jfk2855atj#Z~^Uo{`T%O2s9VhWx&;s45K0q4C@R%a&T) zpQ2|IPI##SovzVTwU7OF+kHC6!UW)bIP82(d((sCKq=Q{R5S!(Ysj$qQ&uQ9t&;?ysgjQBhHC@Pkut5ejTjVPWBUW#!8h z&-Nt>tndPA3a=o2BU7W=*)?pLwf99*;xL2N^M-$r|YiLNFB)WEp02ytA{jx0fJC2QY!jj$Z9cfQm&Y zFDozdFt$8CE~!3ckplpQ!otD;A_+9C#sTcmZxx|EAFTchPzEYrV2-?oiU zf&jSvFXV`dO)+K7GmbNw8vfQ~!S!H62#m&L!z{^ufkevQePOt!8vi3tb{0|BN=$?v z1N6zIQpvW)_EbhdMP9S0=e(Jil>MyrO$?c-J|8=~%E|s=?&W%4?84zCaQ1%*#^#0w zGEramu`iPUIHy*_cF#kA4{RHP0*eMq7hc^QyIECb;(h+LCz$c3HFb9Mh1&;wED`X! zd4jGH<A%CZ7W{3(MWU30%lwz9Aa?E$tPt?-owFs`$fsjPTLUHuf{FN`8`f#r~ZA z6DR$)ih(^Lqi>X;HH$*=p(h|2Jlqsv&4pYl%UII;=7l$&@C#?RZ6M?Mj+UE2)o!Qc zYqr=+dY@J)h-$xS#0k$uNE7pq29A@;qf`>$dqIPcGLd@8l5QJupBKmm=4@&Kl~ zoeK^;b#v^x*%G^LfB~IU(^%6$Z#iInG=CkuIL_>H+(i`zUyMxUCb7T0Peo=pn_Lsz z_X;nZgmb)2Yj^r;JDs3?pN&D0<6QTVJv%NAl%s-;>f9dzD2gabj$nD9WczR*8J;O@ z=U^N?S))B62+!^Gi*?FZfTO~cUM4Cg7Bl7TvJueG7pWP%WhX49A#94ukBIiPn6>Bx z4On=Xsv8m^15Sm&)WSubh(MjF>r7e=2^Z;uSzZ$X4e(1E^Tp5P6IC1&`|n|lY@1k( z!*uD}4`&y3qUnXX)Z7j^f>tBr%;D!9C;Ztqn#=rdzH{tPu2{&xVsFPiuaLM zH8lnzT)Hecs(WPma4yINcbpn8@D6W zb4!aVKD8fH_%3rV_Y0lby8wsnQL9c&5dChc$pl#+z>0YRBe!oe!*ou8NRjcE+Q&X) zzc=b2TLp2)qmG~0{72#z)u}c|X(y>x6mlC(_(nhr75@e4121hNFIP%6)5EPqve5o$ z{}bjAM-@2i0r6&iWNz{Jh>=Ceu1K`fP|B7dAq@K}F+wI#+&wxabz1N@pZUF?yS7ML z&dHxcbPiZep~ISe{pfB)x) z<7TMQ6HPxRX#O~kMY;!V*(PE48cH(m3Z_I&T{d>BOu1o17!&IZq>oj3$vQK@fwWC=9%We5gd zhJ5_?2Ho|kjBv%y<(&5q_nl8+mtR5OTJAlGxxL(MLbVQ{Zs_VFODEEmI^N%tj#4-1lyMWOI9f)2G*XVl+sGtY{RukYEO!oxJ z)-*K1#{`U_=sWi4PmCs=RNapm`PzP3@WJWp2#B= z5Fus3(8lx5lhxO4xf|Ti%n-g)tayPS!9;i!sk__G{L{)yb=HVv2!8$L z=$Z1e=0(Hs?&ruPOM02+W!4~|GiB-PH8vgdVb>dtSw|D-a6v}a>#!ArTAu*wSMP5{ zeL+De>p2BY*F)b?ctM<(C>0CX@7(IYwTxyCTT(HF2^rwJC{|oAiQKZ>0OC1cYxPU-o8UO?#A_16i(nB5;X}y?@ZTf07yAjtdWd4#*Gi=oQx~ z7Hf63cLISJjqbXWg5S*e!($OZU1x`sRU8+fEViclt$dDD4md7dZBI^m){9@YNksF0 z>i{K$h8~`qIH;F8d1zr{mpw;k2gQ+0n9o87lV*O@Eepx)S>5^MS4i%-GH#pMw@r-u zS}LhSw{l`1dUwCrg0K{&%vZm(etLs|M#ZiJBLm5Q6D^x(VS!t|il0c6+jYX+au))$ z4f&^=E$7}MNRnt)Cy>!?a}rqrelKUTH#QHd;mALiqiW@5&?&C`@w;EV;pPg4~IywkT;w8~0iCM_n)jLC zKfU26AVlQIRvyvM?_5Sls|FYw1 zOScJE({eF0Ijb}UL3&}x0=2mk=KURlg7r3`glLR;J+A}|hmoHNQRKqa!Zxiv3Wod1 zgsq&Qzv!ViywU0ka4s$QJ6V7KsW88K{TN!rIKPSbG=*GjuKTQmLoP+y03|4to6wM3 z(r6b1cmQBY^bS9kBn~bvu78OHwUUGt#$&6m(3ABu=CyX?KraX@A76%;GBg1V8L96( z@C*+3;~=rlse6uW**bc?s*Iqbqx=3fYh@Gxp(+^?igyta^NTX+$6IS4>}UIW{irsK zRZ-#H73noO&R+zv@){RtM98P)9X@-x{&R__=OOaRa~D)`fu~|66U<24RxdZbDy(P zhNJnGgNxY;)Z$B}sR72HHr4DvGb{0x5tuyr69x>KKfGr=#;-7dRd?HYbji@b!h^5b zP|$v5)eQ6{Ty`AdcitJWzKE&lH#bDb3rU7d?gBy;9aGZ!#QG@?5_Q&hxL9tiDaq>M z;&&G^B*e@w-wWqKN0JtF0I69brV#x!S(x65xiz*|*)cYxpD<~Xm^YcJUO7Wt$Z zTY^>7=E6&i-|fsC1!0(*A_G`i!^U=DRaraxdx<^v_?Q0M&r3`5BQ9i;INusS9i#5# z#})pN70XH&5Tcb-K~}6a($_Bo@HK7*pnn(u1-te4MpvDAczDpP$^hAFaA3f>Onk}_ zxPvQ%^|${BKLO&ocilP%x1-N*N}DN;5hduil>FJYvA3eiZ8Kj;6ssKM&3X>Kb}e<& z*3nu1dVN(FsF5SkS&Q#cuZEKU;xdNAjA~Aw?~MQpgyA(6dd{z zjWrs|AvteOQAv92iQvh1oM_u9-k`Bv_t#%i)G<+P8w~_{gg_FkL{{(LpJYD?<79=Y zTYnuh{wyT{?!c8|WSonh?S$QW%?29sdCyqYqd?DBgB!JzDA;N$o4F;J%-oqCxX$<9 zwL(R_83|_FMG*|~fB%yOC|F!PhxELxNJI!y{CV9>74#>F7ELblGg(f404FUqukp0J zx^Sp-aj@OXQkK6KqSp)|nHvykUcdf(F$mdgjD^{iZq|;Iz`vTWWs&3OM_zd7@#AMgM%}yP<_S=;MpST$Y8{|AE zzWM#FaOI-)J2?{Y#RWh8KXyod?_fKHm9*b0s-l*#I_u)6=i!gmddiWR4>l`%sa8Xo znL_@3=VHHaz!+-LAvlJ#V1)7fuRfHQ!<~>(laD*nrDuS9VI5rdZg&PYM|T(_RLvYfZ-4=^k9W#? zOL6hbF3m4EA4UIRR!N^KC#bNm3-|KwP-a0~1ztBVj*fVDf}WCAi+Lj#jC*ftfVb_d z6SrQ@jK+P_b@XjpJ&^U>nrqhqpXVN7N%jJx6L|y34n2gjowq`Pl9pMm zo`B+mAX&`Ek0`6G7kzd1`ue2o7vb~IqQUNjLI>ZKV{l!3Wa&!*O*vXgGM(w^LIk^=F(OyZ}jjaOW#MJbM z>?Q@#{|{qt9T(Lbt&5Kef;7@nf`l~EA)wOI-JqnjbTcR*4N7-NjC3~)N=i#NLw9#f z{5F2S=bU@b=iGaL^VeqBv){en{lb`sJ+{jmp=x?LGHKLqOT*G-3Q6b%i9B`&=TAOe|`MEvNz9>02yuuVS*}z?lnxR%moeKz2@jSJ^*ZpdHQ@_ zuoU&VusZ^7@&d5302{l0E|5W`d9K-Qmnq?`<1`Hki5pM?t=ejMaqS{!k@Im_o_gWO zi>Yl_8@?yN%@0~W7|#K=K)Q}MKKxkkON}D{O^A)k=o8v0cKxGkYd_>?FAZm+!Zbfq zbMriz+(u;;%`9=h**ZZJ;-!WN58x=117)@-r~bOJRQ7G03T z&@eqSbI60#6<6G4c@_+JG7eX7+kx)S8zNMw*e0NIXV%jVtJqD}|xx5k< z?YN1VnodHf9-J8?G~9xu;1JPROIXsAB~JiJP{3E`mMb>@uG)0N#!ddeYz=yQB+_qQ zeV;NFcmSl=1ij5|*}eyQKX_m&KvQV!QxSV{^PG_GF`<-860k?8G7vH*ahI6Tk!&(} z_&NW`94-OIhX}?$s}+{pK;^KuU2z!AJ_~%$B$y*AcI#J+L{^9!JLu=7%<0L(`efpS zuN>h$G!O>bJtx+gKWk<3BZSIdqO^Lu%^9RZMwHx@A%Fr%?AjnM`XfJ@7+|xht9}?H zm^IwP9cf4ttDsi%pvQ?%39usnF}42=&^H-LQG-;xsh{5FRw@;1qX-GJR%~5Xa%3A{Vd!KN=A_7I*-2`PDxJteCU8BC?RN0*GHrr4Jq>Jx%`s(ql z|Hn?>ugB>xbQcit<}&MkOJ-PxE>1q|F?;6IWzB4`O4lk_J;$YB7AIb%W?5kP6y$;cv^n` zTP1V-RaDGOVRZ(5&z(IT_i3w}Ph)w%e$UNiVNEZ0FqA3O#IgbBs~74oAXD-r0kacW zrtZv6oPSKYoR3Fc!&bZ0nUjtgnGvVgx4L+sPWo?}?6GR!?OH9p%btoT;64oe=n#Mb z%21wi@9L|7Xcq_bmOKD;nmWnIJcO~RVs@<120zB$p9mUZmjZ576~pG_hSSkT6e95| z%To(Qv-7-hEhiGqPe=M`H9&W2{Ng7XPrw5a0Gh>>0eyfREWjJw8Q;kPL!`tHl_!Py z>aBslfjtU6*T>ry%<7meCjO!HW=<2y6YgvJ@e+19bxd-nO426fi*eUC1aGL9aw3C1 zE#yk0y;7LqA|*79C5XUm3>uj9ph*1ef&DD#>&N%Qe+P@|^PE=bN_7+stpX@VOTO77 z?8k>}Yp&h%>t@#cywIcSbi+3VoidA)TGlQ;e(X~mA}gQ#k6*kk>{uok7aRR&d@}R` zLF524{(nQaKu+5m%wQae9(z&!)A9w#9w$)Xbkt4F<8t(XdvbC{Ov%`!b^;w#-}M{= z2BZo5wEv_rVtw*~YqzbmIF|kqi2F%Y;6vOMALjdzsw#CLJ8jcD0FhV2cxCuRK`W;L1>^oakaMkl3)E*gRsY-R?}x$)RU*bBGJ^PKtC;rZUIXatLUn{ zxHuv#j8OU5u~r`#F(1RhkTE?I6Rz|X*x_ZGA`@VKH3Nn>r&HC?FONIbVoam4#Mo5t z5J=-V1}Je=S7Pdh<28W?WevU+lkx6$Wo6|jISM{tETY#QwEh>#H|P=m6X1%N9$Fo| z+7J0>i&3p8u4`1D60G#+o}89OTi$HIj=OQPCw1HWiJu;PVYumbb9o5sl@E;gF9?VE zx#8G-`Aw-dww3N34Yd*(iSyUk=#s#CK{{KkhPhQP1oXr&Q^dU2EZbv3B^p@M=ROWb zew=>7pJ7r#S?l^e*CvKTF(6^-g5B?Br>R)MDIn?Ra0)g#ka=!9r?(|UWoh!4DsUzq>QXgXvD zq!p#3*?OQ#vTu=~KDu`_ZG;2jX7^d|?tRhMwaW8-jTm&M6uKfbv~58$N(nA=5u*Cs zBs;t93IOz2hHJdXPwy69%w-%Tp!$o`fw{SX7rhF|0J0|dV!Y15ia;s<640Vlh}1fF z7o^kZVT9yU)>IWc{MPM@P*9oBP4|CWxa}Z!IAYynB(Zpqcf{67xr-M z`RRtdn;3j`mEX6PzKum94{ob#as+l~Hdb~lB+_d{C_n)brcvJijCDXY0ULXxvMLs6p01p8aSpeV}5QbD$#RJ65-q9QxKdm@t z8(e$4)iL_LKcTTAhuQ{GFS1{|bwLPj#>--0#+7qxW}iq1r8G~Hx2u_Ol(T>8G{Ac$ z>>>u`_h+jL>JHuOhqWWCwz(lPgNtR$QL>J{PFguVqJnY77#^yk;mn z3(%D`PW2tXQqqXrQ_$qRuw*vIerM3`mAc7;b1bW3v zY5^Cc3$-*^Kva;AV3qAm~_XCmeHs7W;^^o|JB!QHo zBF;a%KwC#8M4)_;?n$G+rGNAj5Y87eW(Nvxt9|@anV2c#s`slW77RT-Osaz8cUPv9ypxs!`e`e;BN$`+~h$n z4_<<{T$?t!o}&Fviv>8ug-Y0!Y7~NF-HKJ%-@9#Xr1s9d$ChjRMndbzrS$+wCE35O zC%v)6J`;i-Y)QW4JenO&&^OOmkE3&YN6-rM*te(6*zl9rpJc2R0MpOW|8|!mqI%uZ z{|!4me9**&p!DmRLS$31&eUr$KI<@6H4H6Rtl;mG#>|?SR;&${pPNSSfdB}@PO2&l z^r(~++Fg$l{h4|EG=0W3TFrvVi0YG6_wM7Lc16zjYg&1FwbP28ioaMkji6V9SVXWE z-(%1gWj3|Ja`)4)b*YS3&)rQ)oYZjP&27+4w70WsK?(qzdwq?{-TwSXBk0c(%tc_z z3v!v)Sh%=sT?m9Em?3~lAmNH(K*GGv0PRCJh0w>^`{4jZe){ehHlmyA9w;?=QN@=3 z_1<*LS$>y<5mQ{(>uT{gBd3e zEYYvs5t;kFeqxp7cQNS!@U{e4DnMZU6i|50<8TqE5jkigN)^2ku?OrL_Ks!H>kGei zTp#6el>R0N&v1aT|H=vE^tPxz)#tg<`KvmpUwO>_boBl-4b8sb-iTB^uqs8%s6KvC z0j;y$s5E{U5NkN;sG|P(@rUlIg0N$rSMqtFZ`)qi4n-aZtsP^oZ)a=3tKW5C$)#Ho zo=#;qGJ(iF_glb;x@v)3I!yXfxj zHUT5F3N;_fXM*;)vhk$(Q12zFAse~3-h&4xVcXSCWFqH{MT{PhmXwnOYl}t3bJSv+ z8)^Xw!NMQ1Pnn3BLE2r)zO^FR_d}kXzumHr)1aA&YrEYNotbJiB%+`woZ4*zBw}`D z6%||h`tD(<7?c4uQCC7t22qPXPe7KiDN)I(d#gN$KYajQ%zHJy*C&M<5K=NS2n2GP zfxZynwEVRMY++xj>&~ly@Vi{_@bdCH-5Bg297GSOI|t9<%b~r-p%ytS+Mk+5_PI|E zU*9vns`9k$Z4@9B0=dO7;-u2w!=BTTybt1DZ;PK@rdqP7WnkX+Fk);suKR1@OS=j`V@MYVp(zaF6gPQfi6_@t+&r_EpKYZRKy92{To zbIQnkbAEK~(&d#F@m?1k);oC;w2_8*6iytqMO3WOFKfk+$87APo|%SfoN_ELD(V{= zI%t`~uXC@SDVeY-PbUzcc6Qfn9<+>moB!z1Bf&<$AN9?KWdOMm_=|wSOQbntwP9yx zhiz&Pvo1YjAsV1LP`v-`GCFU`2PA4djk0_9u`UliM9@j*+)EHk!eJJlFx zssRF41Kcy8ad+`>;C@A4@@(egc%d+R(siRljyb?Zxi*^z#`gNbyyQV&zcl! z7UAU-ii3FpEfQ1HfBAT7l^34vkuNZk{LhA*&9Xk27#+tX6smAi%~66th9STkwof}j zL45@%T=VpO#9QZ-xJZInSvpD?Az28;69o6)(bNpo9 z8882=qfPP-4clZ@`v8fI@2q~hSA4Oj7cAE`#(6xE-D4Fw3cG5gE}cB^KfH|#wqjSJ z(Pkp_q8M7WO_`2)@DvjOF_SZO`@TvzzJOnz8ZpW$?3j?gG zh17x~BH1VbiC0Nh_V@`PmhpAz-EqttlD|Xrxo0wU<^uBo zjHcMl)Awzmu41^aKPe_Cnkeo&^wGCX>o4anES29n)N91}TB2G#{#OH-xG%l;f3$xc zjGG?kxC5kjFl+h|Fltnfrd2&JByL$tl(_(AWckzu1UP=0+`@iu75LYH>z%7R`ke#4 zr7o2kHoX1sz2kd=f%~M>ECBSs8?R2jzAph6V&`A4!r9J5#NpF>?RWI0p0+kA?6v9- zz+-oZaJK}Vc=yPHt&V0Y(8XwVL5%%>ot>CgcP3EKY{#;tzwn%#kDo+h3bjLl{W=%q z_wDHC%F9~7N#`qz`5c%N>6AF-(6d8#f|0(* zRmi9iCALfLalyv4p6dwdpk!ix@VB`6^F^qWuT=|H@aOu*hhB8)g-fab8HJ$xY8~f) z^Q&P=2yNayqwzRj6E-WmqrJqk>GY&beq`S+<*r%db7B6nwlZwvOhAZ;Sn(gv+1FQ% zhMb^m>zj?NH$J}IPj2gbY$Q;PMfSklCL|W#M2e?pY4iqO7VKsKH5LyR3?O62)C{lO z59U+8e*FVcp#@ySzkvuPDYbZ@92!95T7UTPdvI`FR4zbMQ!`z}&89Py&==#;SAZD@ z0fk9{L)tq=K6BX)4D=&fziB1tm7gC%i`D`$Z^5y?QGgqwbP5$mX8DoSKx$8zIgwe| zI{*#>DrdPa{k#X&KpO%q5-9vc;qEZa5qgchJv_~z|A@SWp`d4@3ce7`^usoke_uSy z-Cc)BTwsd+Q>jbRAhVMDOqicwwtNFwg zm(sK_%=Y2er!T=g;d|6%6fA#lKsif%&?6`SkTWzT{{kR^)niYz38g;n$SJmLfA%(* ziILIZU8D=puQ|AzaA1^-R{{v1W6sYReC?G5<6`U`p*WtEoAKzeRY^^IVw9Bl4ej}J zb;ZZT@@I!5>%$&rOUy5t1QIRu5?cYF|M@-nYok4aS)?-%beR+*2N1ge8}Wv6=o$!* z`C;bQf*OckwM?pSG&M*1WIqnD%^ar92BMgQx%3iuNhx2vlxBnc_POnTVv1g~ru*Y5 zke4hEbXoKt%Qj|tz%i{r5R{$hEJdySuS2$-d`{Dz{e||#+(M8dby9kw!=Jv{QxS;- z!5U2@h>E)ewPI$^3I-9xNo5I5M0l~Z(edvX0)@d;i($RyJSincJ$@fz-{u4R8lcSi zcsbtw2cqJNj4yrv4oW5;r9=pD8dX(}|Ee;6`YF8al&iWx?1SopH}ec5!X`;9e8*eB?Ys7W1C9oi zCq{0IkCGPE=JfXkBVsik?O;Me_7}A>Cpd0~jEzL;#qZdRZQym${d2irW-9i^t&)7s zhygeiu!Z7qDbMq~8=9rvh@vTP?MX|-@uG9gK^d9na{^^Kl_FuDSS%?Je3gvKChcj*$bu6olZZXi69Z;|DcPbpM^wA-o(@GBtA4!R#w>!zyD0s z*r%fmjJM<&pQKF`%>-|&c^MM}e(Wz;BDJQ397=PB-95-?D z&SV(B_5|>>w4u7^g0bgeM_;NDxiEkcS#GDiQ*sY}G-M~z^MfZ&-xmCI)$P$c0WINs z4npXtLZ*|9L*>1j2^>%*YYLsfN#(r-z;*Aq@RZ|4(d@=|xo-mMvnhTdUwaC95d!wIy(*8gusv6R_8V6=XzDh$YvbQCg5qnpy_)SdN>!~f0)UWD)YWRt z>sUzjT!!*y3a?dAbsNBJ0KB8nt$~)-#Psy@bOZ+*o0*=r(CyrxKcF9yz&vQbw|J)+ z{`p@FL>*9)bT6AhkSbdxRId2=>E%y^#qpyLHNGvX={%!c$0YjWBEo5)e(XZ2vlBC zU@u7KVoE6jYh0+r-8wt|qW#eMAwVvq$=q4XHv1R+uz1ZBj>OrM_NZO#Fu-aWfepk} zo&Zn5fHr-~B_(uMb3rv(4JVV^Y~WWuZEN-XH1n*E4&2OQCl0&kVndV^7}dmlz*MB) zW4L>Ghv$;WmaZe%utx$F|F_w3+09MV3$jC^yF@bmyLW~0&$}w<9Y7gaio;j{yC-KU z=n{^tOdfOHLx>BU2Yf~63WP{uD|_nve(!7j;GeI8?);g=+U>Waz%%^)surIV)&7}! z_21txyaIj`K_Jq7d>`ZAGmIWafbVVR6-}G-Py?EsyvB`w`dlB!CntgQ_PF=t{-=h$ z+jpt%r||1jUyR0ZU}MV^b^(@jol2L@pAgFfy){ok%7o zCPY0B)`X%DpC(n$?E}X^ln@Zxb=R;e=cH-1AF_Ewy&5L7JIiX%-&Gb2FTxj+<1==w7-p+U1oxE#K{AwvsI)e1>P{4kc z&t*dqs$nSo`t>j1DsX=Z*lSK5BO@{3Du7q;gc=at1a~Yt`74jjL^B`^)=~QQnAp-@-?{5E(V@OxqQy@l) z;NZeh;lnN9Nc`KwQ=fpZF{>ZDqb&>~|LyZm{1>o$fB3KVNb(sitOjdh)IYe2-R3jO zYV7+k{u=T(k#sSaBL4fD(V4r|jSanvx>wFL-YR)WdczJ_V-FQG!t#rw7~8Z{C>PlC z|8(w--B9qULnYDgYxsTW@sDF)^=N#^2-6cG*(`fON->;l@9^oeWG!sUXXZ}a>$nv zS{v_+D8G~t(?4|cLHIuf^YTAn~J zkfS6LWk&GjI_F}1xpxaeFVA!ITr!8`i96T(@xF8*17mZ|2aB=YuUwdhjH=ESuH!;9 zZec0SCsuQ%(8!-a$Rp%Q`e*7}APQ+~N2njU?y|@cN_9LB&p+hwws=!#yZc!O%0244 zxx?qMx>?==_fp!)Ql{^H0~W);1PL1iEVfiC*O;EA!tib|KuU*dlJFnJOG~ z9JsA|!G_A0R)CoZ?R(6IJ3?7;y7_}e<@U>hUBuZBU-%ELBovG5i)@C)hohiXOS`O4 zGq>kuHz6buFjV*V{hZdCA}d>W_SDm?lRR0=yJo-Z#Xu%XQkp;Zr>M3qoTuJm*R>~9 zFJ9GIFRfOSaTqmyxX4l;+}#{p@C;vPwhHZ7ksxo35G)hR^FKS}ilk1r+{zI4p51#S zpHDeSGCvyfxCCxctBeG89w0M_@4(=)?IfOgL$Ot2><=5q?Io{eblCjuwv5>ius4*c zzL)gH*09UQ%ao*zI8OzSE3im&(NNZil|FVV98k{)n)O4k{DWGJer9 zD*2lP-{2onm$Udr$3coL!tPa(K5Q(TT93_`!Mz-<9tKNsKL5NS@3l?myT=DB1#_Gi z3MMMkmb99`d;rKN?vpB9jFV*z4enXUHSdP$X}B!R={P<*SD>AmH397ki#T0%f}vMZ z4h6ryrreI#I2A^>`*k_^JAVRnYP}-&sn=}mz(7J$Um$+7*@UF?9cf+0;G%s z-AK=Wr?6qXs<~;e2fVkTYu}10#G`cQDsdwuilIISqw}8!9Zxa};wJVM)SjfBYA0#j zx+k-9YLk+{PA~S%&!e))i<+1irAPQ7tb2OiBweHYB(=)YvEtE(y42x{X-AfI`ncTy z4es;yk~Rky+;5FA%BSIB=&#RT3Z?Y!yH8#9z{nPS?+ky)M40Nk0jK2)W3GTVq}@|> zA^&naY9_Kd8Aa4{j2VUiIxij2Io)2#XQdMmHkSnwrO1+ z+S0;&FzN46n?9t|PDHgpU|?VL`KmK0f+JiiBax%1Se8P9oBh7GJC4L*zUNlcr; z`+0a@M&fAa&K8?4#V>Fb)SGOv1gN4YL@g9)ffvsnd{dOYJn}t84>htJ)gGnCnbHx$1N-$q`e?W$%w3ob1TJZ&ONvimz+0-<5oo4=AuE zU6Sbn6Lpa6!F&+*YgN(d$aTXBit_5hHMwQCx}7tJ ztobPR)N2KOnjI1!r*@O@rTcLZEqi+o5RmxDtM~_(`}`JRVPSE5vA-(%dp$KZbtqGe zGU(TUC9l%hYw|rHod`%UO?rpp(MtFhT~;ML*HqBI*?{3j=`th1@HVH8`6{8ztuIU1 zqhebs6SZ!S_Ef?Hzu?_?X2u3$^*>ts~_NsgD!a3RUhxjm?4FutH&qU;XSbOHoYW3-JE^{ zT_A13p|W-TLIx_B0Mn`Z2{d*V?Ak zg8=_MY_2`z*&;5CoNPXUI#I`Sem%_ue6DaPiM@Een&FLZ&pO|5wC?5NQ{XSa!Mi1{ z?eZw)@)c`KY+hmQ@@PSTd()K_EWmy*JG zJY!2TGxuXw8)|Ca;XDMjdY|9NacAS6E?p7A%QpG=aCfSl*6@~g@Ey05v^2T^@eaGD zUCTq(0n&g(pCF3rnKw4IPmImnc$#*8k|~guz@<Ri}Z&zVkg+xRd z2$J7hiXS~(%*|H9&?!;wGARJVwiFQ>fykXC2v=3cuKILG@oHLCW( z)W8Sm^u4f!0*Pk(?PQgDO~t{MlAB42V5D-Dp;VOI!2m?rBE>qa4rmHJ6ZDR)l0xW zd|tz7Wyv~~gfxABD^MOe7>`o7iRqV9Xc!@qOd4!h5A^EC-_7)0Ewwsq^5xm*UahTg z3LH(&k}x^v1@d2YEzL?W1WQB?|3u*dYg;VuC~;O|5`V>v69Xs!((OybSD5gxP(M`@d9TbDE->t;iL5;gS65 z5Rm?OA;tzR{Lm9)u{{$uqCV+=a8zbmEbuJXM<^lqe{u{c4pGL{uC1P@;X$HDi+`%s z?v5Ev&e8T_pKs%pH@3yMx*|SL)y2kuh0Z@omFHhyP6dZt;Zb`7i^$s-jYN=jWs~MJ zu?^+cSOG-K9KUPdnwn7g-^dHX{}bVVP_Zwx$|~U7P?+9F;bq!!Lc|f3_tI=$!OZ70 zrMUX}cv!fAqh&@aec{$sXD2re>4M7uc3j=iY>TJ$7(r`1=dBajnsSP-%CtMBb%;_j zym`g|*$Q7Z(WoV~lbp#k8mNF|4qz7#_gP(*AX|P81?<1Wos)7fu52M^D0&?;OrEde zl_C+ayH|cNZN*kMAr*5$?0-={dELc05=aXc zR~fci`XdFs*(vt5dZiQFX@$chSF+xgs6FnjwT-WGJEtLO+P-bd1YnU)X1&XjPYbD5qKc~u@Ol(^{ARcXim<#giBx(>euUz}HuPxQ-0Tn4Invl@8?d?iF=7;u|9o>pdwsHuCOxt1Cv2<-Hx z5P>1s#Vm(I{%zi?!%QxcaV-XiYtK#mBqjZ*OR0H86tIip_i##Y=flh_5;mo8no(En zH6a>rS})ozkVUKtSd>Ey3u_1suKVP@jpl^n9Z=^UrYt~ zhh22<>XQ8PulIQWMz;W8JRbls^ndb#y^Da-xJ%Z{-K=i zPE<_>;?@2|)Bmjz6ai8x+_-JfZwdJfV3tv+vVl+0F1q#TV8`OebUh~h@$-gHT zV3q9Gd3vPVhesAaDTi|zDW_p#XPH(M#n@fs-JJHda5Z*p6jxu50BzL<3#cxz?9fb| z!e?&hZkny`yZUmqDCg5Y>(jqaHYWlbNBj<_= zW5q0>?EVWoE`fRk^Rwo>s0L;&l7^!y@ku>v&_R3CF*qz6SZu}9@QTrQZ zi>R&@3xhst&Zp)KEK^a*SEHST-omiPpN+eN$HO!)%$a5DYrY<@#$K}`DHQaUFVm(< z^cyciesAJa>wWa;e<1EM(w(m$kSL-%iOryT?18kHtiix7dPQP6g{S>rF*T#~u~DJe zy2r!%;ezF!hhwd^4oe`R#7Jt9<9rIU+afQCwd5U1N^wtc2bVx1dZPIwp7(3?&-cs=XO4k$ntrrcMl}6T1SFCMOD)vT@nha^nQngWr5ix zB_A@7&#B&=hfAZx?96K7Brs|OGZShzv&#PIF420Pqw!nwx6H*?K zF6KjzT||>67SU$s$r4+Ivh$_sN6<>X5%9EhAWmUGmv7c+|{C*lit(S{;_g7)w z>-c(rLgADw7gusKgWpos$fHk%{bdw5FZuQpEmVvzx{ne`)gMz=fF%lS=05B7E9D}O zhaWYHk<>WNwdKvHto<3hjw=h^!Tqiu&-=j{f`VRGM--*-(B-Sy$%@C9+LX{umH5impb-Q&k>bvgDL)CdeE!G z_^>%riK_A?9MQg?r*xYFGxQG;K-zpJRpCPrcPGjTu@dHkHOS2nHYGW9SRtFY*~Wbu z95Mz(D_Mk6H@lCVwa1=vGvxT+&T=H~<#UN=q+xa)Iug7cW6?Zmh}ffi=%_s?QQQ%n=c(wLs`4NvN#U<|H+27 zO#N|M`{N`>E09+^rk@lyHm0xo&YX_B?5;cH16e-Qjk6~1sfwI);N;>EWIHSLMh}3v zq6lZX86a*1pS^#7WjC?@MR<8`T(O#m@ao-}>S2ogc|-1D7Io~I&&J9x{0Z&dw#*ZH zO<&{ZXDQg)ZRaGneo{Iw8jarIxrWca9V9-4sl2 zn6l){O!0D*LYPz!S@WZ=e%V7cayZqpGyvJ0l|Wg zMz$^l>B)chtqimX@VK(EATAI3#71?~8>V}y1$rR=2=LJ0)pLVhoh(gVn-cfgeuAPJ zCve7#_qyr?gVTefHIxih0>SL6SQG(+3a7?qfB2r6M#4&rC(NsaK1|9!_g)m;J{(?1 zqFQp$@AkDDo~=(L+0D{DlM*0N3`xH=9J$cKo=oFmYLggxb)WmS)9;yZsQ$TpO8-8+ zn@5cN6p%jzAj|mg`)zpAy#elH_Xf~at1tUi1hn`^{Da?XD!&D+-LwnUcrR3H47Q@1 z`|K)41+&4;7wn|+Y^+xn^ZSa~>wpY;<|W_sfrtphtC7@t0$!scQi2oP~& znKNhC5>W9E89)zhPS~;=)|$t)!q^6uaVha0R7jIJBy!HT$j4>6m>3>q0`r9Ub8-=_ zb}P!FFd`q6%%m@Fw_%`xVS9D>{Eb}p$99QvGDCk8?;M9}myt zy(o7HTQ&(pRnsP+!>7exS)}}rFwFH^nJDKIa~JTw$utBn(jChU7Y&G}&vM9V--lcaOZege9vL^QloY=>x zXw-W*zd2~h`!)Uzb*WtQ;kWVHxhdb%A56?@%k|oJ#wKz&DgH6YZmyZ$U-LZz?4FPd zr0{Ue0d@IPBki$IQ4)HOLvRjglAoj%E}s5MPk@YW_?{*0VHVE-1uf`P-sY~{es$5Y zGZI3%%vIDUtzCY>?v=`8SI=F!{Q4|qcEMJheFA?=T!3HZ9;d<#YEF%bS8*@*zIuWn zlWlL$t{}BCb-vE0T$?FKb4k5K>=g6UU!bixSlM-VdS*UFO3AlMSEcPKjG}QpuyO&M z-v`NWL_$ZuozeeMr=gho<@@gBKm`eJ ztHw%|n)0%%p1yHrvgmzkv|l(B-XnFxq-{E4++O;c*ZhJemOc#(?a8{*=;t4Izx!= zlC_!eW`|(e6+H7P_7=Z^Hz;C(MrnYZW~zNcAyLQ5HTWjCIi@Qr>l(5L8kGF;JB8cK zZsbm8goQF&!)u=zZ@+R;9FQ4#m~Cf`;G-=yxri=qP)YBz+YaDZ~#_h5wz}#>8S()%R?$1P^&r-?nj$A>PYa9x46|`g-)W9D2hF zqfia~pP{fP?HT^lNy~+AWu^lO%qr=8DXrm)9o;HvNL?8|uFD2b72ng|$(7VE4;{^i z7scoeadFRa-Rwpy2(3pm(Cz5!cF}fYMTB^b&G{1m z%5Zis-knm!VJGsDWX)YS_hYd(aAN65`gL@n*sE%M4#)dF#n#{jxgk4wPpmcO-fcW| zL&;V(pZfg%HewxE{kSWtAe;!o%8~gyeqKTXH3hHPP{98mk@Eezho2Y(*qTjne4te& zaMvgjzIvG8a`|wSeB!;|uJ!l_rDvL1!g~Hqvnx7~O|jFtX*xx%RcK+LBz!z*Xtlf!@!_H_8w28TDT+KPdyi%? zgI={i4#WKX6jO#Ju}P~q|J=sN8e!e2Uz})MZmd%@xw77*#rB<8iqM?s^#wmeP|(-j z%wBV*>T;_g;Wml;zhi@5sl7n7m>`$gcsElvUv8$q7vIzy3ddD~4O=-46)8~`J(H&h z>-&TXBypPDzp*(jV-mLry&eZEn;P4$9~d=XYSNTB9$>|a(;1)d(~Os29k%&7H5e&! zav6(3 z7S6d?1oH7Xxo^|Frw$KOQ%k?Uf=rakDezZx zqL&zC-cBs|Y?SGzBQCwbFLjYUZ7-dbHW<%{Ed#(9x<~kTclqvBgQUU%Vn4)fli#To z6)jL{d#>Rjj&chK4V{iPAmwa9G-qXYiC(r;AC*U~()yWa_hy9_BkV@16`&ijynI$T z3P)Pd+qHJL_MqlWPwSBg6_{Ac!Udct!lOCthyB>fRN`TENSLmY^RJ3=T|XC(owE>{ zEE>;C74X6$;qG}LXsP-0vcTQVfHpVEjN6>ICj~rqx>XWo0U#V-oD*7opqfzIK@s}m zZ6Kq^)oyayp((}Lvjp%q?pN3C`YEhLP%z08gNWDRkznGpV=aJ|dC zv}U&Ty?W|scz3+{2L`E5;az-f>fMhI+Zo2l2PlG{g~tx3YIfC@_-_w(NUOl@n}S0< zbo^>Zia64=6N?z=u?*Vu3@8S(d@XX2H6yqq8%(Utm%%hG7$Kzz}7(fZRq-870 zjbRqEu;&t1uNRot!H9R8t3;4$^~>n>!&XSex=pV^eRrnB?W7^Qz4Gh-hy|b@3u-># z03ZHJN?dq>{FJi=$GAd1g+I9Ky!5omz)SAl<$3kE-p<)BMk7ygp;FG;By9D34ebzEAUD0dL!S=;i22DMqVR5O}1~eu2jStehzs$O;@p&Y*nESLY#FU`Ov`gfKtqa?b`i z3-K3aJ}+U!z%5vuve^G%ou9|j3n__8jn7ovFSUQcYO28uH~kf4k`8cr^Jo295rTFO zh~bsJh&u24p!5eD|BN^sQ1M8GRA-;tlSIbN4Sb#24cJ9W z{rTfc9JL^p>zNFVFIJFLU3$rDqnP0=UUnO?2CJzd{Vbl>n_{|pDpDoNLp+_V{9@8s zYw(-g+KcaV8!+QUGl69Fh%%q03#S|ol;M<(Jla&iYAoBTC8nk{62l*v#&$TbzPDf4+yfUVNEomN3Y(nwUoUe5X^as zSfd?iytdSz{jkmw7%CT+;zZxVTVg1$#`#ui;jZs3a<1R;CHjs#`)2%` z&xi(k*W83Uq{2_Hxj|QC|FJkoaI$_Jq+Imb{aG(*tx%AltNST5lw8}*`9PEgiU z3Bf0NO#Ft@u*y;UL~SX_zB?-Zub0qQEFg|tRU2r5@%6g{-b^yC;@hxvbEc#PYB7pc zFCR!mY~LVa1E)}PR)Yw1wfOHH;>;f}?`_oZ)yjC>R+np$N4F8+qOZ+r@=Es5&U-jV z%-S0?4b-D{E*?pSg zI6!{I3NL&nn@Xj1tKaxYn4M6q#2)oapyNmEp0T4!`qF8Wy#1(IB{<-a@INL3<})*v zc?%EERyy%U-&wgjU>#sC=SyC}eLLc|mNuB63{5h2$(P|=HZr1K{5Wk}Jcr!g=>GB3 zXZ`?F<#FPB&+^=k{YM`m^LTQcY1(;0SJ!=g0zlmVi940??RMq|OxXK5>easQ-g7?H zlTO!9J^vtq7)GG~5DezXKm~0u5s|8&6mjBduq7y33q};82@I$0^w4%yKgs zhr??Q-vL=v`276_f5*9%gMeLM`be=r+v$yH#`Tl(^N$7>=PoD30;T?rM#uJJRi`04 z5{9=MbE$>Aj%m)$*mY~&7!LDCCsj#{IELSHu-x`cbmD|IbFz{yp-ad|z7=l$JwApD z{L;No!W(w#XPj9b(%%UqloVyIuW(Rg2KZkr6N^LJ?bA><1I)`QTnmB=1L|FYZ3ZuH zm#vTj`CIs78Y=sVF%#u}ryWI@YW=Ud9AT&t+j2jplRWu0s86`CQ=ieLiq}%BG?$Bx zx$T3}<~A%N|HB1djY3*OL}G@(>AKF^r4QEw5GUtGFLM?=XZNmKKp(AF(xV39Us_vEL|g5gAl1^`D4s5C@9w3q z(x7F9JFMOC1Ydi?9nCe^9QX5xgnsnwRX_s+ZOUjWP-uz=J!(HdT%WABQbu=; zbL!ko#7u+=+4skfXiE_*rNj8{LR4x-Lx0%aelKr1yPi=yfX-d%&52)6!Ud_zkq_a6m<`R+h{qw zcLiH6c4+#!Xd55&(k}UY`Nz6FI+W)1UL?Ygj+q>9fNqBDKZ2TRm3=o6^MgrsG{SH| zWF%h@e~l*)2*>%~(G866HG+Sd0)ZY12Z8_IBT$hLZ<+R=!*^dLul`Pi|J$kW{w&S9 zml@XCT~7jcV-$zoEqJI4g;TO!F8ICq+2nV=lTk|cN2=xas9TG&D7`Ymgyrt)0f$f8 zC4Rmm{QuZ{>!3J;r(KjNfe@VF5El2~&LWFja0nLM-8~_Akl+r%9fIpZkl+&B7iZDM z7hmoszwf)J>ejip>QCPTFk9yuQWbN20(t(oxnG{o z)+s-Ie>KN)S0dQ9v6y!|o_us=)b8^8o3_UBwr6F){us0q)?(C^yj<(Feq=s$yp!Mk z#y02fnCEsWyVO>d4t zshU_p1?aGc4rOU@8ZNaLkJ(fB`s0#RurLeDU5(XG_AI?j`ivEqh!{AJSWT1t{$xnpFf$5mHTMCdwq43CL6uPgOLb!z(Q{xrryu%yu~ z@wGa<4%$!3$#cfI>Z|mfmh`9z$Hto|s~jEbC>l7CW?DVaas0U%m2S?BEAu|^;n&WT|kAe`c z=!vcnF?8hSeRqd>ZJt`ro}{d{oy^cG7<%8wtdl!Fte4Ixk>Lyv5OsLiD(zRkHVUvd zWp+V07A!YDL}uXTS)8 z13yy&Assh8JYR-)w6<_@%#FA_aXS$`Z#=TY=Z(3TLj^iNHVi9)Kr4F5j+lw}W)?W4 zkd}*{vfjwB;|xPH4x&kg9bvC=f_3-fzy)y-OAq=)P~>B(VPcB?EVkZz->!U5eVpTp z#X5V-!lVe5AIV?C$U*p!hiz$J*c;DPjrc69v5tHAO`Yq_ru%;Ny?TwHz%X_>5~mzo zaJFpSr2O+|FVWDD49Qo&t_C18CpM#cHOp}E&g~A7>OooI0oWAgbpbzJc==Tza3kv+ zN~g`Hr{z5x8PL(gqt*EH{p0dNPG0-FgyHSXaZ);a`1KNn(FM>mdV(@=LfFpCyGUV4jZ>M{*LH;RWV#=@+?}e~MbYWI56CBqgX}BoDZ0(g1;@7Tvf=b0mu^Eq|gv;dV z?idqKuPd3#^anlCU(150COyY(mK_i_38w!eh_t)EV4V%MnGHJj<3I*qC<4Ewi!ry2AJ z3pTUHz~Si~Y4Dd5*nV#N!5RMlZwH5dECSJZUxgM`F&v0F)_EY|Cx2 z-6vVZH{CyKGuUY~#Sz@Fxs}6>SH+~rZ#$2yT%bvUbInHpThKBB@3_8AYE>q3XN&Hwz=}i{+(qti~{Hdw3FSYW66093!<AO08 zHu9o*xvoK;!N70aU1Xh4e{OL{1Q9=GX2xH2V?=a`%k=(I_D?u8ceS zesuUXJFupklJM30(CIC%)sm?LhTU^1<%h*@p*_Nv%b#Ag9YQwM-(G*7dizuH_U4&- zfbtW)nXx4H<*T;hWW~;-nQ~nKAa-0qaPx&QGxlGh*22CDAl1=fothcAn2)S4uu#?V zfDb-~DnBl8Pa(FMZ6!gV8!ktpg%;-2&iy_9d3?b2)mDL~HNdG6NpNh(2t`Nya?5WJc3;Xj)z0E1E3~j^`v5mG_K+ zu9T35rqS&;%|bREi~8G}d7{NaBd-0k9XE5aO>>^j$iwptE&dJ$&A0HEc_K^CV`i4- zsc!b5Mw)-6{!EGKgLo1!{WOU(uy&RI;dFR>H)1v_gXcH$hH{wcn4M|r17ai+XPnK) zRH@{C!J1yp7Ji8}qW3Tjd_0rht$hj}v8{$Qo;qz*(;FF?2vYOWCyu5kpDsebA+!V2 zX!rTm6cQZL>W>;Kb6A!fTHG((J2TG~mK;0prGKzwoX%;7j9S-jN?xLV><|_TEJ5@% zICfN~37$7HLRU~)&z2}E<{xGsJK%wrQWhdmp;pID-pB0_KE+Y~P2E7d)8Mmaf+df6 z3MN?*)vvoX>_-0TKwizkrrA)}zT#Q3`>h@W)15ti89p|lLtEByrP&X^4C@5m-xk*? z`1EN$$Ar!AQF1mNUcC5sgp9)GJd%K{dPAaHps=O-t|p7z&o3QkC;Lm!CehnGjUF|D zCCYh|#HVkhHBuW+)f+8y2|f01p(w#qInt(eT~9vEtoN~T6dv~7!7}s;=mCLCI4Rzf zmw2v8AW&4xk3aaH(_xdv zqiiasMCakPCjP&CydJyCZfAcqIxSZ3jAtaW0Lx1+BJQ)XAflNU2?sL$!{LW|2)Kng zyn$=l9yQk6E#J}S6GTVT(>8Dp;e)z?e{O$jzT{hqtVLA{?-#mb{|_v%Yo&iWNgLtNn`u* z&+LEA6OtqM5Whn31U&*T`ZC7I8>S)^)2-&l+@l5ffSeB|?E9WIM06_+YpvCY`^6>o zGYTTFE#MbVO2eDQrT$2mt}yE0zl}wJ-TOl%U=Ue?zJB^>D~W9x5|Vc%K594+@yFj4 zef+>VV3oeK;|Ko~*fWgF)+B8qutG`8&E;cP%PIM)U#%|ZT@7k6lE&m;uhu3*k?Yxi zK3~ZwF8^!4@_qp6EdTRg43}5`g#v7bAm)um0?`Zrs&D&ZjF8+>_He(n!HKFa2cXnt>*uj7@|ZE3u2_plhzn&C~{ zQoq?PEV>evjz1o|1IJfH+1WHAPSZ7KpO@(3H7YX_D^I>F%YW+S8_EWm-_lPq~=0ouN#P@@bWamQtvv^bi z0QIhLf+q6`T$HGc!}m`wT4){q#MY zhp4!aIYKr(#FnJFZcVP>uLt~ScUHbhkUBf| zL()SaLmi>_yRBb{S2Rtd{Ndhk3Ae@tZEXQ0b9R+7REopKPJSW{*fCyGuzTwKlse)$ zHN|?x)0Y}+FCt!gXnXptP=JZd4TJ&Of%T?JV4znQ?W;_#cbuFNtAr8yy^;PKQY~Kg zKN`?>VDmqv4|!D0yIguYmf-v6_{MZdIVb(XUZIqeld@`{MV@|!nHb>0+gP0bdaY8K(|3k_UA?@FR&8!wDL`3Yi!Ue|WL#A}8BnDsek% zzR-kqaAo-VudMprY{>oF#M+bcCJ`oFIj*|+&;ygB(G^V!QRxDu_ezS%4SXKtCuks$ zi#PhP&{ts@wFddPw2E;J%13jG(;FK>$+4fmxNr-BL;?@=^-bk%Wf|YXK|$C!g&(7t z_@o@;zOm_DQQ4l~y}`lVP6`vk7gX}DGGxbTS2n8TI*g3$m5uhXH;(+cv|8=mHM{Ko zi~1EvtwB(GaOoj42ai}QJL|3GPS7muyU2hmMn<+BnZqXl#W)0>t=k(pX?*&YN`Gpt zM*2Cx+Q(utoNLOGJ&A?q)ZM{aD>SZvBS*Hqr)IS8cepG-VOmb&i_V94$>!?u8|WCd zkmIkQo=b8>vKR~Cy9AYr3h$Q99oQK64TX)1|BbthdlZW@G`%dGmeo2@T%yKGJ*;|g z{DpV?P--y0eTP(G(uY3_;HkH0Xdqks-fATpJzN_joK>ZR4b_vkAz{zcjt`jh zBD?qnlqfE|V&{?eQ~IKF^Ic<8;3MPO z%_7OgHxLcat{MJLJBRAf0B%%kuG~;|#AMKFTNCwRYy`+8IE5kc?4qDqKPyoJkrnOV zpjc7(dj)?>ATBud7Z+!~p?YcstdouY%HbD%ot4m}Zn>NvO~bhB3U2YI2taR>n@qTw za)zm+Hj6Y((x6&G6!nHVNQ5V`78k&#F|}onp1{G_!QzZUeHalA>Ri&EBX9wM!~v@1 z_-S&tilY$^TL7gj#9K-zRBlLi+Qr<#B6E90dTmLswS1W)ewZsE`h|}izck2XG_J^9 zF4-?Z(12b0XQ+U>xeamL{J~BByDzp)-lW2rIVlV}a{fP3#Ig-j(;*P>H1n)R!y$N* zUrq^xZ|A5xQJSr8ysUA+Z=RF)MoZgY>{Dw}$baVogyZbtDdDlWIHtHdT5{9;T{cql z&B59JtOkxp&c1S!lXiNNk=?@TMVWvnfmy6pSg*Pa*MA|WHvq!m@IpWk7}CDzao z%al^LW!x(pn#ghPyfrWAy_#w~n<%sv_pUE3OnD8r3y`_$7z;P*rlBM1qf-OV9L$hP z??=4umu08U>!|YoYktjU22a<4xX5hDlN?)#r!`Hh&X-d!Y8p*vft9ZH;fX@Ieh)16 z84Z_7qm7HNRNMFFBsJMoN;+n2O&b#Wwng|wIi#c|e$^TKKIkPmq{sui^QM14g-nTx zRjg{?rSM8=2@gyA>BU~RTwm+ry@tkWvt9R8d+1I5d3uVaRBSbJ3&?zs82x2z|G zhBN98FX%(~hqLCk9=RKGtgG59OK*O=!v$mJw-WDM%$3N;Sl@JoO9LB(eUs_jel1=~ zxqIT}Gf{Hg2Uf|c`7D=r2;XC6h^d{E21ey{g_V2G2&<7}hl5YygN9bVU@~x zqpHu#$rVp`?_VU0Fg|3yj{DPb7UpHwUfsL*1IQEbGtn=N-?bDnTv@|(93+-@H#2!5|Dol z4M*;4M>VFZOQTIKV>>Kkk$_jXH4ZJb8D2GCpTX8v*O@!#r>(bSBx)<<=M>BZF4Q3IaWO=QeiOUpZK#%f-8R9SJ`mPEA>1+nmkQV;;pr_50*Q6az&z0@wR>r z3w@rf>(sek_a2&j1Gkzc6dAQ{=buqz*flKPc3t2Jai3~u8V71-Fz@bqJJ(MZJ zWPQJ^&+}^j3e`Lfjfu6#-Gc)N_P-!oAvW)|fzPs(;LAk+9GN9SznHA3=SNZ_dR)v? z>aT2jqor7qoZ&UJlsfyM#StBB-^(R5aMvSN=U7SGf-f{Su~|*e;ab{R-eXQPkw;zO zOC7bmeo_%?Oww3~M^-B2oYWypZnqA)wxo#q!ZxXbb-ZCcG;;Mz|pTFEq=$au|rR1tXfnVzhg?}fg*#Ik!rD1=MGq@l612D~XI zMiYXFA?=HjJ@j$?J@GN&rjhx=lc4BgsWtxazW27QZ_c;v)3~TGTe(OD-Fc#6+|AJ<{DNVLIZ;D3g%NwJ@OL$) z)g$HM)PQ(-&279;g!NeXO;4g>nC)IDQjfmKS_$aijc7Nn7{HW~qI)H3khrEfW2{o!mssUk}bmrdNk9QVDTr$)Tz z=ps6?pEtZq*G>#!eg_G7fM&_f(e|Qt-Q2)! ztH0$m5~|2J1x@{En9oT8vxd5c84Pu8VeC#->q{#d`Ko0a8B0J<9hd^W;MoK z^wLV6%T+xv!24E*JjAbK+_RqN0>~i5@lbP?opuz&OM4E{{rxDy<%*9d`eEwie$Id` zE<8Lo!dWtur8%H?3P^mKL8X>51-AL-Zu{dnnF@ews%LiCZ^C5LbIjC(9M`S{c+{dSpL6I7v;kDm59Y-cU>O@Sf ze{6WzJqA|!-45Rt@;LZLl{B4)1yyxcm--)+#MYT1)Mq;IX01%&zW9JXH2MjB_k~g;t3*+5@O>gJiss%PRx;f0iTn_KV*6QLkQ~W` zcRgYu`e;cwa-mKdaYYne4nB{*jGoEdDJu^hWuHi+0R@3rzwz)>GpTP>TO#jfP@w}n#2kL04*bIq^@{Nl7n9gK-v)B z*+B(OsptfD7D4f}HrBgkBA$}~fXQZ*`fEkg*HgvFQ@n81SSIf1A-Sw1T6#dFbY|{r zyRTo2S?LN5a~X)@J{AE04wgzy&AdBPb-8r z9*H(sYD~P{Y{#mwWoNUx#$Rau+e%x*v*!Y5e4L!OdaXXJe8Jzu`sSzQ&RcdOOZk-S zwgsiZi0y^Gvx(BpL=cF=v3af$!@Ea0u65ClxxCH86HUsrwyOD$_r0C|a)Iz>NiOkR zfoypAd05+6!#uIC3F@VEwA}CXN%d;wI9%1?i7`nsoH&5MO^jNCoHQu%X%>MMny2wuxZ+tx-X@A(?^6AJnT2vxktpQzHr#95ai@Dd!j?vK$2Z^UCx> zb(;z;oKx0eGSUUc;3ss7bRC%KmAp$|qVUzt=T8UPI~k~Aj`m~r7cU+D_3lnupmQ=0fZ8hW3{%iF}V zYtPA~$KI0qWG5YYI+ko?#iWXXx@1$t+$e0#IrgVewd(qdAy|iRyE|hv3vI%m;ya_Z zfjF}fd80TOIB^3_IzqDpD;{V#5e6NvR2{LN9O!9lzZV1HrfJE63RLUV0JzWID95QO zTfFqR4nHjC43~A?G^;#JWH267%uxS9pLa3rDlq{!_vg7Irim(pid{@3lYA@X9&13t z<)>Cs181DPjfXCspOzw$GW695n5l5(3eJu2)#6C#sUy`Grr*-V;}w6{CyS}XG<;Xu zGx!<=0(jnbWX^JO0t`(iU-8HsiD&CXT=jBZKI2ch6-XO@7tBWqPd_`C&P)Q_{IFaR z|B@NwUa1;G)APD6J2ly*)^#Z{gO|4I{22D9@7W5L#Q+e9JIOS7;K~BT%}zC5B9m(d z+WzYKW%|X35C^B+OcO*CM~XbYXS4i87izoILl&n|&CHL85|nCepK%lsW??bPP5dTP2jD8*R!DN=m#p(I2GtzJpOFv!&yZ-lH4hz7)eI zvT<6vDNSDvz=V-eqv2OXKG*XfUTyR^^a$TeI~5rE)c=dJfp;z8<~sF}5gc`6xtS2? zYmnUi5qfWVKK6O-jWcAIkxza4v<^K{!85c%MsIFz$g{Sz3ClVF=H%A@rLkJX0I?jT z`pnAHvA9mom@F6fW@c{!%d5`U}bQ4 zaFGvcit(6c?Os~)g`$GgFR;bv7n!T)nPZ<5b`MV|cYLZa(YQX}NhJqeluF;Vtw`8w zf)y`5y5H#{<^EH{QT3G5g&7;{8A}7Hn4>PkOj9pVy zw=VQt@)>-yf-To}@NZFT5BX~Ioa2UnH3KO@3QYIk$O7e441iBnV`V{;(_XZ4g%+%; zd}v)I3N$lmdf$}pY>gmLEDq;$M>e1LnzglU&i+?T>^bd5PQp0MNk}5+@Oon1@zvCO z16D)#_W16W?p0??t+UMQaB4*mzU7BB8j&X14)YlGTKsico`dvB?`N@dtGtJ&8yg#2 zHt5$GZF<&IV zW;h3|89d1jFOXL%i)>ErBkXMCiI^;}BEOyACRv(;ZO_UUJU;C`DN>Sog#1!a+6zqO z@e7(n^orl*K8X<{vMP^k6x1smy2#+NEXTu%3yx5)i|BEou!AE!^gI<-YM@QFIa%?E z0e*I`g=`9FeXASB4s_0v%1$C{ni!}bOYE&}tPu*byn8-9C`EVun11RH-jMBaO!agg zkRdMr7iyE=sX%E^!7QUv)G}`W_h{(TT#uO0i$=rT#F^2oIMLg4|8XABtiAnwrjSXp zoP4i6qO5g$Ke)d(M#fvqG?i!adL;X^(O`D+y-Fs~SuiAs0pP;c=>*eNwV67^Nsr~b z3!*aQ5%jOEslFob1T>4XuT8NNM=Td4s1=K)_o6zIX!-SB_J+W<77dbnIbG~7`4Lyl zOTna*T&PY7g{!aOX20j-PeTg(F-0yKis2u6R=;gR2^q#kUDX2kB;c*bxyly&o%JZ{u{59QLu_$Z_Y_C7}bxCcq8ML#IJ}M4QSB z%iGbx$ifp1YDt>RiMEm?g+dUGOCzj=uUE>iZccp{9yB*=ut{&SWg3>QXbEPd6*4XflEpX{ z-M2oKc`zO$ebqK5ux-?LQBjDE5qwBBMv8;Wg^Qkqi`$Z1Rr0Q{qVb1V0WXbVvK!*b zXYiUuN|`Pi4%)g?Th4i?*J@2DU-?rr?$KtJ<5R{RMYKK5!>i7xE!dj^ar}W-Fy(BT z$%^IPb(Y`l#woI(sD%*%^l}E0#hKbw?E2siT zX;hO{M!-Dkx!5iCOExKF@GO0v4@ud=%&PLeGa(yHXGyo2_2>zSCb>THsm`ji(7x(p#SXx(C)*u88;0%P?^9 zA)4*`7$uI;7Jzq25Wh@+hEOIvTgcYL$}u`0;(A|(leb<5|B(}yvPG;2vdjVYxv9sg zN%U&Yc`oHYC5-mTd#LBqZ)>F;*<-$a(RyKdo|tpuPOd3c<3S**{; z9={ZN#oi{N55LB)ig4KCL_4ckkYYAoKfhoWvX~`Cp4&X%B|yDwPTqWw zQ*$;-E0o%yU=ny~dX=(s<#H&zH&wG(oE3CE^2njpoN!St@BKb2nwcLW|T>9GK&+6qR_eAPa zc`gp1D96T)i_w8HJ5D~s5Z4=1>Lo1h>Z7fQR4UyF+6Wi>=_WA8AM)sWpBKrm`4*O! z>{P$dNUPtQ6IWDMy3UANQmd#nI7l>#YL31y)>i8Rd6L_#tkcC0ymnQTjJC5>>Y+ap z#z{yn*;;`(dNep(4A;6&!a6wx3@B_JMJs=~)qRAX^1RIJU-UPOac%yp6eFf#D|_6T zq9sqRG}ePirSE0dIg6`(uUUR|6LXUv6Fx15;6Xj&Z3#HLUKVoE3|gvdD|(npE)kn7 zppi_DQQDUSuCL{qHmYFVwMI-kJ4;E=mK@9N*hLmKS@0<=?JuT~!@a|~PfruVtY}x@ zDpzhZ&NP}feNYn4=~}BM*J6=D&%B z`1vY8p?BmWVbv@O)t7?GBI$COj+=;NRyII{pH;SX!IvY)`Y7I1n<2CZLGRyG}4T>pr4SqEsKHXJiA=aHfSnMe^^4u+5eI$ z;3MuQ_NTOi<0~;{z74$B`t)-v#b2ZjvL_NX~X1pLvAKsf#jq6 z0BDpAcd?5PGD7~E{fm#f@nSv_^}1|R`|_pVTTxqW^u^G!0ix3!c@myF(|G;X7qpBO;}p9Ws4OFVD$mT=>n21bTn0NuYrBRWxL0F z9R?7MW@*>~h;T4r1|we;?%n0Hd~nrdcW@&=s6b>qF|4O2+Hgvx@~eD;pqDFaHfCES zCAaHCaxM?4rawW5%)?(VqhHGwaiU_QR3w4D9hu0l)itD^&|ne4(p8~g;pHCR!T|HS zPN|K=Ft2E&#-1jvVv&Em+#cK$>6-Y&!XroEl14*T;;qg17wW;aVKZ#oH@Dj@T( z4c@Xzel7h}b;3+zXlB3td7*SEqny^h(xO=@gF17f9}A1-TZ?^lyiEl>Gnce*urCZ}umeag<7w{a0dh)utS3?+eY%={;8*f@BU`K3pjN&PlCQ0Z=3`$mdLX?En zqg>_nYEs6@v$)k9cpNrKQlllBL-#aE`b7islUCjwzTvalP&*4vVhe*9t)cx5d0d^# zy(Gn?8?3v9t>*@Yc@k`@nB&vUKHZ(5Y`gD*_K>$~(Ts|So8Af+a3X_G!R^J<(exli z|JqE|!RIOLi{v{}Eu4IsjuyQ-)j|y6NWgi3rbU_=E}E&D!ZTHIQbz_k4XDL#ysk6` zYMD@viQoOSs$_sbpH-sT&pKD7Fl@`|9oo`2@u?+~1oS^apeI+Yt?!D}p)`AHNfICp z{DM+nX(Q$hUt9R|(+>^(`I^XYSmfzMR!Vk%QtsfFBIU?7zHrXR!)IsTQvIrM=M`ha zT=hMINQ}Bg8q?Q`_pn<-J?Z4+5)E9_4|0ATR-aRfNr|GxQ}QkAr7J5&-S^{^-nbp1 z44Z_=VG`DvzyD;?T@b~hg35K>E)1bOBfp_ zU;{x)==q}G@(c?F_2`{Z^V-$4;~D7VX{=~tKKt3|?l6FZW#fM&@IO6c+~kam#d;b` zAw1vln^}p|`aIrEF5W3?CGZSbp0Y7|s!c#|HWMrTde+^{vyb89z?W8Ujj4DiS?+$Ekaz~)G z2%0Cyd7;@xsPCF{ewE9~k7Ib#N=7b~baLA9MbmW^ z5?gj1;q3AiT3pKvhgFAyeKzP*7@P4)6d7%rpAvB07Hl{M7}4t*oN@Dhcp4P?SAW~U zbMuyKwKE@qeY@5~YL!m>&|UkdBY34UT`80~WG1nQkvjXkcJ3Hg`TQ$Oc@1~&TWUdf z<@m8Y>3plls~r z!@2%tAG$Fo`)rRND@jlmCVlmbFCr4PIS`M_^Pcz5N&arQQIH8PWgz-jgr2a6Uk!Ro z$>c|-CLO|}o}l&{VT$M1E5y6#LEu-uvqz4V2d0GQg+-Lk*Tqie=EF>Z$JI4^hf!a` z9$nuiLC!m$(qq`|$^UEg^YQP(oZaWXZEdvjAmm9VDoJ}E3wZC2LZ69|NaS!Y*Pq5z zI{Wl1-DCKY!RC?oC%^iRnl>u>fPoio2-{IlgYskdEGd9mKjUK(3E~$`x@{E&Z)5eH zG}9%DRsp4ss#*>FLjhEFUWhC#ep4ou?cw0xML!CHRsVJ!bh!VDfivE+2uH3^rm_Qv zcs1~vUNf`m9qbPA=SgIZew?gf8zfCF8}$@TmP?>LXVH*kOx#= z&DFcjz?{s4%zC+{0$RDHV2NBM*6G>CSCI^nf<>5Nc9^_S7;HH+V@xP;WW4>nd@g!# zraUE@`F!r4BhZeaXZ=G_-mA!a?B{yuag%D3MQ`LXc!B8&l=RTdW6G{c01&-iKyK{( zl+Yi_?M=>2Q+;)J836v52abYLOBNsaU(&IJZT(AzLS8NpQ~jsP4+UkwMfShzp#IOJ z|4T<8YWd$3WWJ>OQ{#IF!|d>b&0Jd8%wK+?HR0Sqh0=RWrFER8;6FD@3e1o67=)bg=)Y==&452rvH ze%5af-lf>PygutiL%tC3{cqx^SSa=iIbuJ9;$y&Fh{a_%YA~G7NWZ%EET%1GB+a2| zZoF;#aYcXbd}o;Q)(jmV*}P#%|G4yteDH?$8$;_ENiHZm1CslaUtH?t0muFrOalYiyXKDKJI&kxT2yW z7|fw1AK5mS=Y$j?0Y3f!`3tgGY)qL5>2hM;f>~>{)Bc{|t*q-SVT$qDNA=|PpMIVYa4s0vC; z_d-b>h~yC}7v>t3T3G&<_);9+B1%*JesZJX&f?hdS4niI1)ub>pGW3R(o-z6!w9d0 zhE72MoNvEH?@uKE(}&Wjr6u3S6Ag4Wk@o2?g-2F z{+{)dwY9ZiG`vX~>B@lrKhLI=ddjM>G0i+N@;OUz(*MNr>L9m(`VU# zoGHudx2PUb2QZnnD#XrmAav8&meEy?E}*Jv6}r8StTv(g%hj43zir@N&C^pTYiyy;av1qX z@8+o|#2dP3Tb(5I9bdP`!5soXCucfWhsQD;sMrwiPlYFgcqad)0I%>8=A*r*5%f{ z6}sb-00%eYQGyih92adWvxEsVgdD1HZXTo5COH<$X8?u=Y<2VGTQL5Z;k7&N0@MlH zU%4LwZb~Gl^L-%QlBDl)9S~+QhrH9t$dS*B7?-0u(2rLrQ0^`}q`Zs_mzlch} z+i6KnnI{u@x3r$K>~hBZNLNJseDKkF^W}rtQ1_koy_FSZuCy(492Y>gVQOVCcH=)Q z)OW)8$Emj%FC&ayRxZ*xTd1GtMs=(O^5Z`~LlF~v6(SM2NImLIHeAcC^iK~=I3n{B zr*_`O$({&TXO5D`(}eUGgALg|-+eE|pT;qZdbKWw1O+i|iZ;m--^mFcLpsHGLSa2r zNYS?41O;Legd(Sr4C<^jF*8X_=?sLGU8Hkm8!glnV2nYy>~zJRJi;Bj zfxXa)xU8n{Tnl>5#t$VUQ%vj_iyk~?$V z*-!5WkcRXD-<&fI=`H>VAeCa&ZWb}Dh2(T)C6JzM3hc?Dynb*@Ru&Gt9EE1y<}4B# zdaZ8}5=BeXJX#+d9>8nPDN#_i{<1tF2gjU<4RV7!#N{P~HfsIgHC z#_;Yd)eX=f)Y}ut<+xic?;CydhcxhRRB!2}gD7;m9}Rfgi?>Nk8dc3vy7X5)PzGeJ z!-AQM3G{J?YhA*=t6}uKj|!S3MouLBZD>v556|Iw?0fs1VZVo2;w2Je{a;9iE>pzFNdh1XLy9v?I8(g(c!MXacM z=y8v8q;>nW-=VFQe`mSQb`H_8OuYJdboTgBEoK>59)0ss3jcnuONGBFH^v^2>Dt}|0YM8;t-?9$=w6XaB^be0Mbc1kFMe+B0CO<%p2ae zv9kIuggu~vjP|y^G(IAg$mjodbWHopzW);qQg#v7$IcRE!zCQKv2*U=HUA=BBzA`7 zgubbf7yCO91w|Iz*C*W~Da+WlQkD7UBpADVrc8~|O1m1V^2)lXRKdoni;IhRBKy$m zQG5SK8|N$6)7ywp1tulJwQ{60B8`bcRX+$ys!q#6x@|Lj5!Z+>9Zpf$wQa6zU)g%uX!^zRV_jLH)%uDjOS*f^O2B$v8|)$G=3HAF7BfVy zvxpXw*pL#v$uEw*6$5Zbl5`7K{iuG6Jaxg1_-pRHcoOfp`7iQm;^_Grz_->ZQ-rWn zndhp2U_h`e67IB~q_r<{Plf0SQVO)fcU&@(PmrDp8+Ld|r(Sl@v5Zk(o*_Wp>@@gyuTks;~yA{pyxa1ZWaJ`#EM5)RkW(i$QkX!b2{fkt30 zb=N%4SsNYgYOAi`+^$japN5P_58N{XW)`Co&|TlOq}z_MW@{wUK00;jh@WA-L82j6A&U>*-MdGze{s=_O~c$lv~>QzXi6b|>*~&cO*_4p3xYK6%+2PC zP9x8Ew_#YAUO_T&4)==?N}VN>p((*-afw$_;j%PHj&la`;bqb4+F7md_YTBJB9$+{ zv~z&!eFNa{-UblaF|BXGrt2+C=x?y{KpN)KD)+ z(w`mf?+XsT`1Y2SL>?iT`{B$af5K}dWiE;c$(Sc!?$zhpUSEH4vflGW?48mE@$qR; zP*7J_S5;NjKRK+^#|8%nZ|_znA#;30q&|s&Uc#8GH+_r})Ad7{{kPQZG;#XLEwzho zx=Gf%L!aJzEknl>ooOCTV~|{yFe3c*F`;859tJU@k!zswYv~fFw)G&A!bwa}ug2uJ zebdV9^t6Va9Y)P3HX7Y2tcwJ+ItKG0S;&#Q z6eHJB9{NByqk2)4bv+77e#6c=%;*6u&@vv#ADQRE<7S%XJoKDAH(ajzp)17P`{Si^ z4h`OVkLQT-HlzKbw_LizFhnZBQ+%6CE#fmF*&Kp@2LJ&_#f8k9(gMpYj<$ysJ4 zf!~`y)BCWlQ#TyDMJfFZX_n3fc0G?L+MsP@1VyZF8RQNecb)ksP1GVJbGZNhaKw z*Xue#*pb*sy#4Z$a@$BA+(!gb1$^oUfx~}7CocJ(d>AB0Mp^O?RDt|j*eGii8UfHI ztd#)s(_e^jPx4oK(8v7=)GFo_p(3{}C;lyDkdvuQ-|vj{2ZBgeN~nUt6ks7lB6n+# zV8Pf`Z#6Rd$lw9Th=1peR8P%wDw?z#JP9|FThG@;{!Q}!U5R;Qqj5JCWIXpZ@o-<;@-pl8wI}jch)DtB z{$|$B<_vWc9XaqdYDVY4i`EjPl6-bHuhID{SB!uF%fa5IkJJkLaP7;l;xLB3D0-p? zm$V#=;d?YH{NX5ngeN-2U| zEIzgK{C8mQv2FL6K3JzxD@k6w&F9TaNsmPLLpS){%T52U)U!GDHjAm{I~z~))n-zN zJAk&hJ?{r4kKI_f%E^pK*6aUsVDkYzOPL-Sl_l|1u1Wu2e7$u*RPEX?i~<58At6YE zAfTkQ#L$fhNS6voNyh*KjM7~a($Wpmk|QGE(2Y_fQj$Xt!@$g0Jo~)+?DL-Y`}|+m za?O3kFRts3wllphz7enJA2v?i7Z3;x{DcB>p;=b}y&#q137=9G4_Qr?RLSSy!g@EM zQp2g^GSf<}qv)gMZ$d3kFiSU+?4F~q-5#GPF##bRPlVdz@3rIbCJqwdDss?-pSi4j5hyW+IO) zOt=8bg?P4()G;$lqFRm7Cx-P9q*0#0+m{4`yE#FrT8@!W{fCVt{#Be^ate2+X1x?=*?}UijhP5T3 zs$FH>e>yNtov%r0pMm~~h6v_0+LcO^L3Z=Rnqb8+f)D+G%V~5{=(BGt~aXz?zF!R z0ghT0a?9bE-1pQAI{#Dr$#GeVKQtxFXr$uf!PgMSh#Uj$MWH(e=H}9J$srs7m7^~G z_xfP0Dg({Bz2@UD^}zVoB6~4Suh5+jrCZvZ0-|6dt6#oksCAx}wH_^Y=iTOEXKr<1RGag=<(%K~v;p1%Ke(-ND} zDve?#zpHrLbNbXSB0;WJf`*o>u(aZ7c2D^f!Glg--33XwP*vyi{Un9E*QGGoh!ItcXSvl=Vt}uYHalJ4G|B09mtp7P)VEKbGp(XN0fhvUJQqm8J8v%d$IF74B`|66R%n8 zcCPd?%_>b`+O;2)?s8%H-`Es%HWB*d`U4wWXBm7p36+AGMP=5};tjO4S&Do-^p!_v#;XAucK!r!)RZ=C%{j zZFefi<`occO$$4KD%yUZt)pbSWs!_vnTxW`JoE084bmcKmQLt8wvMT~#P2^MLiifY z!Vs)?Zmk1dsbs>=-d>u-tunz&4ReIejydj_l^@cMFXhz!D&>%fP8&NjS4p465L}WU zi`-_C%$}oCnOOJCwe0-lV1g!MBghm7n64e{Tl+}g*#^dq)W$H%-b=>1ytCiT3#c#+ z%Ud}&@-+YFm`U1`70}c-$iOw{TAI%XefAE2Rw~JHq+P(woa7M*Rc-sX`*lahYMVm?(uWw^h1f2ZT zC0FCoTYidBGfsvTG%tQS3(>F5Pfcu-0KHs3`uw;3Xi+Ts5$#>&dI|auoQ@$@B4rJK z_cr|a*oiq_-Jgkrhua$*>k_}ZhRtWyHbm<{c1-FX2Y~j=)M{fahQfXkb^*;F%(TwU zIB2e3s&3yt-1dGWl>T#y+0QZ7%M0Shll2LJ z7VJRfI|x~0h`JnuV49t1Rg&+2h!a(53r(gMOsCzhm1_MSn8l1uP|!=AyeZHZ8xX z`&bjxlyd3L`R4Js!~Ha#m@R*6N9kuTwSmH>zC(t)jt6?eo+~WtKBOmTrH-}D-zrTH zSix}mQx^IK`728dmJQjy`ieUr0JXRwYUg$$p<8i%ui>g50bRk{(-m470y+tiiUqcz z^+!iXD^=Ckd~YM9^YPb?TVKjRPrsaga{GHTS{*KiTOrW6twJ4^bVK0qC#1&h@~tmB zGg5Z(Lmnz^boIUbyRRRwf1cUv;wW^xK&P(yZDy1ANaBVm3Hx`aEA$>?9_*w*gU@i2aO%ej8Q8BA4x@}Z(LGm3XUwL5&OdkEsf(I>(b2!`fuRN ziQG||IuTyI)60aq&}P-;q@q!A6wLw}@ZkZNCQV%Qc~sl7PMdLMJ-g*76`PphNO zO-Qw>TqgZc%@2SK<+ewH6ttJREop?Z&%k8yCQ-cE4cG0N!Z~bP}!l6$EJ!rbMl<&lo22jop@83fUdkv zv_}`Bi9v!P-kLeA@%$*d&9L=FzXBRBlafxXhY`;nl+J6aglN=;{ z6f-z6Zk_Gg_T4pw^C*LVG-zL*kDW?UG;Yv!G}|g5r|E1ss_?I88v_GF|2Bz&W71{l zWXW*^juXf%@)wl1mTE9(ExYG_&IXYSn-rdk7BS8|WVzXHJdb$mIu8Ea7n>?u(9)MB z{FIGeTA&NQ*p3~_-^+V7LKnikFpHX5c1KC!G-w= z#01@J<8V_-XSQIbzJ8JtaWd!8EDNeT?-QrS3>Tqw_M7@O=K;=dws#&*nKIuazWp!R zP*sVzBTJ$NvKo(S-9yf#re^5Y)-}^I_?RgcuDlPPD+rBj!3Cky$qea+t(*RCw>67f zKH|pnK9v9+IZO<;iX-Pc6S;34PluzH8*O{mXjGHf85kL_+0tF#4%MPh+fr3CPdj~7 zW<}`l*yztrP)F4+tOg$+@AsG#wsYeC#E?K2#{(oOF-IOTlpLD+x@;G}=YwwLwo8IG zcf`_hUpfo9Da9Pbe5ey^_pFWGWXmLRA6n$}s01s8bOc90n1hIaH8~Nw#F~KC#)kh1 zIB09!KCXVgzYOwk4GEfgBAQ7zv?W-x7fDz4yRs4J&G}}W$;$3MmdiTA^?|dmEXGd9 z!(6Rn7Ab=n!^)b{oR?KKzZ({!+yB@-j(F8y%)N+^+Y9vHVTW=mL`_f7S+x$xUQWCk zxm1spX49ZzpYAt8y4BWN`Q@EfjTu>{7+XFOkk$cc#d&l3{(C97#vU-VJvdg&9M$Z9 zW|Qrz_(mN^hSRuCj_Bwma&TX~mYeCMgB+HyY1B*_S2~X-l`}_APbIi|D75h{0G;_W z`PAaE9Q;rKb19EKiT@DyC?!6+9J8NoZZ`-Q=OwTDXXabMN!`lKzr_d8pYQA%~dqxGH!i%$>$g&I9 ztu}6u!#?pps&oEKR}tOj0l!MT=>2hdgKZ>mdL9Kr275@-bA{1tIzz>9Ia*cP5Xj9Q zhd=72pj5P$+9+cxXQ4h9Yq7Eh1NLnJeg2U&p5$Y7?AVKM9mjoYvnOi|`eswagW-cv zyrk^?p>DGfRO1}>^K0F!pdq*FwGNQ~K1yruY4o#f>8&hl&`GS+n=H!LLZ6<9$%ZSk zuQzpbhq?a)I3C*xJh~UFM^lm`P86*ivWeDn-KYQk2L^#yLy#|z)T|}33#};OlM`6* z{QbBCB}eF!G49mVOOdM5s@T&zO5Dm56V!3cQp>=Vf7%x>+!~TNd6~#}PI;svM2bHp zu-RxH>|u9o>cZ>VJ@QO>^B=N_(D35PwihtBS!V62&J5~Sh9;s zO;yvX3K$3Dz(Fo>{H)wGO}?`7a}Q-5l(Cnksx9@7jHoUK`Yg`Ld?U;Tc~oVj?k z*xLFuj4|eLog-cXm(eOm14ft&shsnoPL1C#s=4&-!; z;sqoL`QBm~afJXpU)JZ_pJQ&LEVb#$*c6)Ieycx_mKny`f_37Mkcmqsp9HgrZR=}6 z&|uq|uHn|Tf+cq_%bMp}(e1MS4IXxqyNncoxq5xSZqCMXEm zohpC)WHX&{<1g{uc|<^<;lPO-!ba_J&Pi#L(l@nVw*{O(zpXSMhsOe3y_C<>Db@t! z9*GP}L(QwznzF=#Otyv-);)ZFto1mqE%OD>|LI2aS%MKm6tF*hZMhzuU4sh5j$i`n z?hUYU*Gl%)i0Q0B@QT#>Jw856FZ^2nnS9s2K|}$v_zcWtELy;Rcf>zcjH0K$lW}>J zorNikl{(_P09BTtVy~+`8T!|tc8YlGVek-sfm@VF6 zq}i)ZfIJMX?1x*vr${_8+U6+Cp11$9K#UcjC)j|B_tG9j7jq=&>aoAiz!pZcEh;t} zg3JRQR%THts=M*DboI8jjnGAtiacbresHhhUo3I%{D{1K1ZH^a+t7&PKrwWs4+9#eWlwv{52z16kuHb zgC?v=-Q)Sc2p8vOsp%h51Bv7(wHL#uhwqnr95~oDowBm5%-l6fwjd%yF{BB*KcPWo zJ7`_R(edY$6q2OQ9&)U)FkVf+8EvYTn)YRgQd^GKt9Eg7PA4&a<+SHwL4DkpruG zzzx|_+elJl1l`hA#5WGHnnwcT7q@H&T~dgr$qL5;J9DHv%|22pTd}172$wpmGoh} z#V6wxJS$`0jcJd>JV_3Chr$$S;s8MBrGB`?Tm`4w(WYE>;7`!CN`f5rxZ|{n^u@Hr zgD0q&;z(}@u+SB+80*uZPsYFI<$YB#;2!>HVNyKhjVZcoMIWs54(NsP9rG?@uUo>T z%ijA+M^N8V0n@XTMd5cxz{S~kYs$2`dHRJzy}qq+w*Mj=<5Y_ZN4tQweXtE-nD5iI z@RTwpn)4x*;;=7T`6b=c z)~>cLEy*`8?DHW$OCHY-j2ix%o|iqMsu%MU(o6cMT{cpwz5p!ilQW!8smVc=9Kmu( z)^9Z*-~dNcYu#?jFv^@2KmAT!$a7arXQV=CBr|LO@#8za)3dYxNSU0tPs*7Kdv3%4 zdnhj#QaijGyta(S527RrDBCxuU29qk5satAt>- ztTID{N4TtU@0Y9OFKp=yp503?ho(v)69g)Hnh?_7pVufa`MO{{%$-ZI=Wm{HxdO#|dT zH`kq2!szE=fu_TCrMzC8&q*oU@&$pAzW^VxCu< z`BW^~!tEpTJ~zpmHnV?nSH~)NG`#5r0cx~7%L`K-C1Fw`sY_ zslbpovLip@IZtuUEGmpLVbvtP#IfYE4p6^%Vg~>G!Hqq|pl!Kvf+9GJ=I!nINe1U0 z+D((?`Kc+o7)P;Ti5<__6o9t;Z4aoj(lU6!+FstSzU*tWGHZxy75AK>)^bvl z7V9|<6lIV_i<#A&RX@-o1vE=NwJWLIXW1jAIcW04fg+=-#-nnOUP@N`We+iQ_m|BpX#-Hw-khgY2JGY0y`Y0%tbcS{spHkh-3sjMdpqQ%5dA;?DsabAw6Y>)7y$pP`xln* z6Rh^b`A1)h3X7`WO(`;(+4?4sv9mS?h~6(g$dMs;ya({#f5l#5M&2Cr)KP>h0xFKz z!2gyBZG`~P`R-2nWsK?Zy&MoylA@wY`$zzzg-x`!T&?Yb=#4n;s{ict_i}dL(GWPF zZS59Iy#z>;+FF65H2gIYyUx#_g=zp@&?EQR90I|5JN%z_w5NZFexCK+>uj~ap%A`q z-Mk*_Qxk0UiAV8HTUH2@7@h9NleeLU64G-YbWo6^LqJhnTwI($xCzk@|NWObm;mN{ z7}q9^?DYE8V0XUojA()UVvYP2pP|_i5CKBVM%5-B3ofl~+OZQM}0k|n;HlNaE>Et5oM`;>678@{<( z90V5VnWW32k8Q~`QX~@$M$PG2yKTrH<3913P64S4h!-}Mu$E@sPDhI-2Up9H=}GDl zJmQ`_qk&Dv<#V<3Y+a*l11x_i;u!l-7Y!R@HrxPxcWQp4;|kRr)L=~U^h})@l65w_ z@(-B^+W$Sp%FNXEMoELpgG@Q*WcM__WsUN``K|Dh`1Z~sb8&b{x2KES;rn*bfI#D= z&c~axscHAJJMnCMHoqGml~m0Q0FT1so*b56HE6#W!7YaXaf9 zFc5lprz-zv_o#{4>&Lv}boYH26#uN;sB4|tIrHfVjmScS{LbwWwGuz{?3X+~fB5*u zT$uTG68meC!(#yuo$m9ntfaE&@8L#czaKAJKDi%>47KJ!#g!mX09AXwV9eAhBr8?Di2Eb(LV-VQC%I z#!4soEhhfBO29N#q)3?;(=nzDIA})O!2e2DhQKdx!l$bD;ub9q)`~iyW^Fn3@StU% z=|js2wA69)XoPWmFQ2;f-cNO#O~W3Bw#4I zI!!u4bkkk(&1eqqwcE%0sf&2F=Y}-E1De$c@bV^0C+T^Rxx=hCC7EOzDiMs^vKRC! zX$+nKV;iPdfn7b)OpEb8^|G+w5nHp+bIChdAB6JIZ13z;Raa{V9W+o96aXgGf&zto zc{T$elM=!Q$#6>7Wrep?{NoVWo3^5NQ@~?s(=Gw>@fxbFA+9ylY+g7H0+G#oc_K;i zTsei)7}V-ypTTZykW<&So>H;$g)*A^A?=ols+c$XKX7hP1FNFv-DaVVU5Nvp^MmSI zT7PHIV8VA@mZLMqyX25- za7gD~NmG!$-8mq$!=@;|c%-ShaRtAt_|4-zB|Bv_sCnzRMMv9Y;I0)owC7V!1Gyq` zuaJ+A&wqPB&LlaB@w3NYicj*l{vN7jS-j_#d%Lhe%@#IJV>deIc^DQ`_-OdT6|}iY zmBQIaRmel8L-0H(w7zDqyY3VC1&LElXaYa^3x_OUU8W_UwxVM!Lr=g;|1we5_YL_{p9VGKuCRFLHSVCNQd;* zXz(-25(y9z2+IP5-F>qvquk%oH(oXYyrRA>xE01uB*Xq1u7st)TYbFP=Cz7J27f8) z-SJ@+x4@lvfkgKMR|&26{t$|2CgbqG*3Wl_vaJ052@K@MAQ^Q_{s?w_5 zRov!g_^|2<+~cZhnW->Ec9p&44ao#MK=WEB2U?(oFQgS+vPNuyQ*dmiAve6+c5`f{!gbBIW%QjGFh!F9N-@7g)f?^o7op?qy(Hryp? ze5UiKwwivu9n3@?1a#f$Er#ns>%)HT9znk#I<`fO-FSR$Qww=C81itNm5wvU^XFqZ z7Lsl1ttCrzI>aIm??2Wa&;o}uTyXv{$yK(;=A7J^_S5L&pOL{*( z+W`GG=VtPf)#GdWd_ZvM%+wP9588WiF;iRDdhC&(OufnO^qgJn`}gl(3h{tx!YDm| z&|OoTMA$ZzF5^Pt8!Rx&rOczLLad|5SM=zI-dMhsHY+mhrWZ@cLFXiw$iqCI<7++( zaP>(GNF=2zUkBnpXBsF^#J7jT^e%w|5k5(`uJZH@Oi(p=9C@px91}bG@>TNPA?()3 z>-rynz4(57zqS-ASo`m3z%OJghzYSkeQy5&GO{UPmGt9z1xz`G2&0iwJSF5qO$v}Q zA8z}=J{o)U*Y!12_q>!m_4yS7h-Yw4Filbqa2`2$iWd;V&##{xQrrE9@j}{I<|6R* ztD3%*_0a8&5yqT7B+fq)Td7u=*3kOY;NMn=v!;1G(DGYxMxRc@lWFy+NkBdaYS5go zw-4q4Tp^~g`~fLet*(j20om~;%W6nlG;gNI2e4vDL7oz`1E^wV27Yaj1R6C{(p=zN z1kww1t>>x0OtF(V(RDg1iB&M{(m~Ce&B@e#j6dU%1DSxxmiM63kMbNNcMZ=GZ{ot~ zUt@b)^h;yrw7CW8eIn4ylQidZSd6N~wd(4t=d%Ro_&RBPq58HCAq&14_TAnlpEoXy9c59TfVi3-Go zwO9pXu3#UnYpV;92BiOC=dMW=487(j%BN{?0Pqgl|B5s3DsyxH>3^GsUB97*m7PXU zFBJ$>)wF>pINz1E@YC7)c=rF>_l*r3UnLF_<>wY8Md>Np} zspguE9IxESuj*)SwnGgF@+0a!`qL!%Ohr+hR5gotL*r6^?IG#mm-&<-z%5OZ=K7oM zL`X;jtZ>=t&G1~pd6;39-9jLpBlNg{$XeF|_q&cxjOGx7u)?03JNqsR!2;vF;^Y2@ zO5bTE*ee8h7gsMHS;*86(3&6*sxO9VqYfUhFq&b@)CGoe%+0>`QI|yIZ2!3w9s@i^ z1<%Xt>gpOAJcsk8WJyBC0UPc3INPglo=d^+ZOH9PmlG;yKr;bPa7 zb(L|QDQ3;HX0I)~wCFA#&vLQ`Wh$`m-IQv+W&a7E>G9-MyyNszBBJ#OPI2t!w^_?Y z2C!y9M^_ui?eT-#E)tIbeP0g_`z+l{kAoC0bHo_(-wdX{N+*gVjQamq|C3!E-RSe|_o4wK=QKGX<(gV<|wF0+qDM z%)E>ZEq)E}1zGAtQw)ZEW=#>@L9=#rb%6nMpVXuDE4%QXy(%Yo_vtns-sl}e8Gp@a z0l}`8sSc18;&tY+`YX)FserZe_`OtZSvMmUhLLB*q_KS**EV%g20IW+F6|`XN^;2! zg8V%j+6|QI(4ZAT^2i9YbAvESp#Ra+G1v&)>wH97!+{3Krd(?y3!(0rR=y#IJGTS! z4gSkEZ}R3L?xZ>!>optAdav*XRI*@HaL|};Mi4!AF->@Di--{m+x{y@JXHUf9lrI> z09*2E7EgZSuOn%C)`1tti2Fka?vUUn-`>OF5kJB#VJ)o56vbMlFkOwcd>Ggi;a_Xf_ zH&s3#c=D)nvAE-N=ik|(NBuJIsDp@%6J~3$LZyubMhxak()KL z*0TLb2V2atl%@j`Jn*$z>M<~$pDj?oa7Up1d$iD#>2CacS z-7yekQ~%~-^N$24=({Q5!L}3|L8ZqEcq;#m&k1;2HE8>EQ@5B`OlO9_%?8f&fW0_y zf*JfaaDAr=yKWKyuQmv#GiiJ?lE5tO{dDO-Fc%mM$rRo8Zij#17PM>^mCAc7q)MDA z>Ie)jD9r(WhhGvbF|q9Z2cvSxZA$@w`%6JYq1s|NHqAWWWRw$=90tC+_?~#E%}POV zlc4{F(L)^d#?{ebrGk>w6X?5CdM7S*Aws|{st8t7>(gP(Z+CXOcw1t``0(Ql`pwRB z)0EmmCl3E^%z?u^mWGbb*vP1XPZ}7WN>=Bj`(=`zD;IPzE~%o|yGbmRa9GVm$JKxk zIfobg9*|vH!t$^y>s0wEFrCG6w(aEW>Dh(1M?4ADq}R?r7tRw~y;j>5$eMS4yhve- z8h~=vjCyw+k4gjVfBGSG;l9IyLjEEHan<9mss9XFwf^^317>|rKH&AwVJL(dn; z&$y4FWJnvJMjoXpEz_i!CygmT{ z+y#1#u_gO^Zf~6unUUnh zw&uxl*dp(2L>(}8SQMB5TWumNbV^L2mO+jh3eQXnMyccUX$*75tlmoOc@R<+iaL9) zgd6C3VrS1a(_;bCBIw`zQ~e!XY+puA9=ToHc72m5ibAHP%s+ z2yUGBQVK%$?DQ4B2m3DaXY)$vlI>ZV7@!TWg#s__tcRv;+~AjlE_<-(8)w>9wTTS3 zT}_lwTUm9ujMbKp22Sv6;r56cX}qy02UEUu1M5_kIMr3jTu6Q%kmBzz)^XD>%~4-s z1m+eXi(yF|xuuno$ij-!nT~dV@q_ONj|EMxkSnv8rdrL+{YQR>gEntwKJNVheO=zR z_85W|$OggoTS-}eZeh@TrU?wU-WA(ob;obwv1uKwy#Ji!{KI}|GY}PVf57ES(d5RE z7}=DyphJNZmIjEY|7)aU}tle+sLM@H2EpD;^1vB=NdwQs2}m)y)pK?-^h(m_;rdPdlW!O8^J-> z#xm8zI0?W~|1V5y5&<*kwB9*0Lj33>V!wN~XENuS^H8goU7hvsS%%zaLLfOrb*Cqa zN0co!ed(V&n&^*DUbXyCl!Y21ER!>*z_VRvj04oaYIG-Nq?AP=6|i_3_W6^JHCh?j zS)H2B*&z24k8TPkNEXF#*ebHbCVIY$OJ9M*#@jaz)mk`JAjMJ%Hm2#N zX(bIO1sbw} zRRq&|&}U^+ZyI^+0gv|SlRL*VMcB)`!EWH+d$Rk`-Tj1vkOaF*KL5Gtbl@euL-MYg zilDl>pSr-GN2Jw>Renr_Ky7L-dYsFq%BGZs(iHz+cs(>Yw&?(#8cpaa-2OFV1KYE{ z9egqDh6QV#GQ+M;XT((hEH!VVz}S6>*;oOnLyP_f(+%Lt4CuSzhrVt&%+L-+t5l)O zGHv2M3)+Z0vX^#_Pe6By=|+P4)7%(MtSn0++*beD5lBerYs2ornsv&LU|siKx~{st zUGED?MfGq~s7J}xeEZ}@^esj8Z16!(>zNO^djWA>sbQ}Hj!A4u68^E}+T8rlQ0F=) zu8^nQLVy$J-rw*9jdDXDhmJK_Uw#E4^F=mL+z}yd&070kFbO+O(_u-P9d;8#iOb!M zZTZ7338&6Irt+<@rf?uDhEDiM$Q}HyzS?1t?p!`|lbB^S@XxZT&GhnU(;uu7N&ENI zr+u(2IQPH20Pg!i83W_gg^PTNSzt(v!bVdH2fjM+6gRN3cRDEij-8t#dwg(zsAPDdDQ`JN&0k;~|EwA5J z_9Lr#TE>annToBedE9*8E8sF2_SOgckD*X;Lix!tNRcDL;uK~n1TUYmFm(}LFY&Q{ z{Qoe1e2+gsdvjQqikDdg^x{{Y2~gdbS^K5s(bD(@@7UnOUa6vNFB+A@)VcB(tM?^% zmSfE+PQ3y>jC5+%E}7zW?oyPh`JIv?!tAo#1E}Y~`)mR< zH@mH8axLI;mX2>B?Y>+Kawdbw->f4WmZX^5 z?+V8lg`Up0>!(Y#b8qg+Mgn9L6+2A<@Xdi9(VMTU{6-@L?-%dt2i$Kg*cyAB#cwGG zh06aJG3NeOpkT#M7}_Thr}Yb$p;_CFf=3+4qiTJ|uEvl^E-om7RAaE!8urvAc*Qk9R^~!%fUs}S zdD~U?o6Rl`Pl8d9nN-wx_d`0v#vdeZot3-{nJ58n#94_e3(AYas5@-XB~Ve5A!C|U>MmO;VG(k+2Zk-xMK z7VPo?w!-jty_yG%f;&4<2Sv!1TSbA2 zsV58@=47+DFW+jXa+YOOppxAP>Hr&KRMwbfC)?I^B|JQfi|$!lOG^`*{dZv^UFdXo zFk z>c2U$g_UMH_$5&)dZ!(`VOn(>U9D%%x-dKaok#f_6mEfA@O0q1@q32Z-kg7&uQo4? zYOXdM(Kc6_c$)7=0DU$RC0=KJx}Bx_v^^--g?vc|8SI|uzc~bTsBPOTi>*9(|8e#| ztR0g==*>Uh012oKKY1-t*vjv!v9`_kScSGImr^%s68P~bnDQjD1Lh$_BHo0*b=fBbV{BE0Ngu z&{3&2M3$&a%0ovjH(q`hCG$@qD#bBAJcO?vO^u+3N7}YFyyc#Xv*eDBsFhMosd;-% z?QOEq`moXY{AO19tS`VHKB>>sRicq4>NshkJ1PYO#WcUzZ_$}N_6II-$|=pqcS%7> zA&{FpB$N59;C%aYyMKEh128l8(H%)r)7Lm5-~F zPV9&%WOAobpv1ElI3_Al_S13fC&>9z3^?vdBx`K`5EI<2nqfdbdQPQvbU<-bxxj)} z*)_;jHoPG7?Cmdp zEO?hVxt@`cdhl0(P4glCdk3MxODuwa?-?-jkD#u9H#|Amck>a*QcuJ(!+*mS7ZkdZ zE{AX?vs|E%!ei567ZBxfD;E{4XHY}lrH^|8U0j|Ymy#D*oXZE=Ln@oWYR3xyt)unz zjYBz=u;+zZqX%P6uDFHZrIh)~KRE{Tw@GJ8@2{68!y=ZLBexO1CM9-Yu2q_&)GATN z?nfX+`1=OF?s|vb*s8fN#>0ZU+cAE#H;ZJ`*$sPW;BV^udB26<{BL_ zW8;iXTf4vB6;ALUm5`^uncbx!(({bqZq#z^y=~rRMn(O+*hHQj=kt{{uD{!9ivo{; z@@C9$yf7MJX=xw5!_@6_`q(~)tY=Mq%+$c@7J0T$&w2h>5^Ea4?_(=c%vqqyC&LDU zI2gBYS4M8Wu}LaOr|KSwMvC25c`H&GQ(vTJzQ8oxG~^bvTbst^0qIfuK-O`kbK^Sd zI6n!%*1==e6E9%Mk3g!3S!3|BQt{8!V1Y?2K99OfWhsq4Of=EWmjH-UWhqjqA1-%} z)^XHw%053xb?U^FoafP_%F0RxY~$GGF**?M)7*|)tD$cCA+RoMcX#)Dg?-_`!u&kV z?Qms|Xk%IYoZXT!X`=3nz;E%3XgYmctM<<6RL}X%EtHxWGgjj4?5$&y2U7+It+qt5 zg|VyI(hs$b6&m^vFz zuM2cox_sQ)-R{3Y2Wj2&AEV>qy~|tti>Ei{Ma6x8yJqlW*K4>1HEe$VfW;S+&ES3r zQjCuBYTb46$bS-TQY`#Y_v}8Y_by`TMp%o3RkAn>^?os7tkJB$r+}rWPn+qeB&zH5 zz%1;UDvam0DDFG6ir^08>Sx%EM62#I2POFMRJvkcR(?K zG=bl>lt-ySIVJKUgRa=p_?6L+GUVwkMbXtJ)Sl$DC6Yr*UtX`+jDw0K>b*QLu8T!{B7iLl&U3Z7 zH*xbM`^T&Mei=!B_x-Qr5icdpA43BDO*@^XvTx>GCQ`xMeRlTScWV3l`x_d14izJ_Cju4h3H61LPzeIXAvRZ`Rn=j9|Tw-8n#h zcfZVtDQ|15Kv^1|pVl1_6xI0RvM~?|=Fcjxb$tD`8=X0wqhmV3ZD5U1w;>#nC%@m$S;Uf?|c^>uBC z4sYOV+o{spYl77^b|7{r#_zn7=P`C9+u+OPu!_YNF}umOIa#CeVWy_8FZA@R5k2wJ z?M;RV!5CxwZ(&>++_Td^qQwDFt0?+_Hu$s`)>aSZMC0QTg}qR>bzIlKYiRp?$rJw5 z{P50D^V3V#7D)+-GZr868}Agj^fbaVXp1*X;Y2r+m0A?CURhsUe$RkI&rM-*mrO)! zyodR(q|#YG=n&p~FaNbjQVFQ@4`ac!Jkf`LTM2dizx;;A^o#<{SGBwGm3g*=+rre! z{X64)LNr{)_hSUw-oPgx+h>ke^h+P%MJ`%%c1E<=F2X zNC(Kt=jJ;ru8ALEy!x-^5y^TDrPQ15r1N56D$vU}gUK19I9ON`i-?UmM_IhkBjS}`;n zQ4unu`z zTe(fZAsq4!wzqp;y~J!|dP9X~J8hY5k6hU?f{P>s{p2#1Fjb|jGWQkzu?*I|dS(SN z`4h9zZ3X3gNUI-yYrR!dBtW)jgy?N3I|CjYUUrxGnW#>YWJT1V&IclnxF`-UpWBgw zf!}D^@HDk6ooiG}=s~Ox!umCj<_K$@QT^$Eo2qkmbNGUoBh|G(#PDQ8%%ZhpQSo}A@aMtXiL>n zR|$NB{>}$ZkI!Hs%IF(-l%FwQ81W>@@rYvx*i{CVX*4KlO|!DA8v12)vwLAhHBhah zy_ZNfytPueAbe)lG+snxW=^j0*!BqUoe9IOIX~%h0wp0Qt){JouUD4FP?O;JXfi9{ z){gEM8@={7?XJ)ATsuc4dVr20tCZW`LyhCNqyQx`4^508T}X;yN+h(!d7;sHd#W)e zwrayLO?zl`5T39aq7*iM-U|m#W}f^<{_1Ye7r+AxYPYv5b3i^St5XLQxpnF3=~7-B z-#cR9Q@?&Wif0cT1`J^M6Vwo{Qf9MtSqNIejF7@5tPYjW= z9fFH*P_Xtg3}Pe*?~`HF?bAUw<2PB;`ff?VRTXaDN4R$H5{LC&zIbgv!+M1>n#8y< zLG^6wq}in{`s|Lhk6rgA$;Gdb_{HZ?aJzIgGygEF=H8Whc@!Mc@NA@;pk z9cTq2IXS~Xrl>Hmzo~jRhpF*d-WqqL05MQdzX{jNqO-i z&e|XeLCaO`M@NC3H}ii!m6rCW#WItt1q573M&Mw1buToW33(*2A|e! z6{#uemCmd{H(w&sA7%#$@fH724E5AY?jChIi713E1$-G|wv}@AQ|S3&7(TIwU#z&@ z5Ym*-KMXSP2ex+b-Lto~4V*gGrs{e583xM-t>oMN%FAQ9gZo>^1(7q#uxM^rt4wQZ zEkeg$=)%g~&VQF&B(6Cr=^37Ori64Beq&;;z4e=D_e`RB+-5Ip1 zg+rH{8eci2%O&4!Vg8euIAktD0+3Rw{f8>GB}v&1|DIm>We8P>GQ>ZZl!M4A+whHi z#_Cz3o}q7SzAJubr;F9$V~Z7eQN#+`zrE`Oc5!XmlM=?mGp%Si;u~pwJbX+#iT4{K zC@$d(Q6qmyqR7XlqNvY)Z><=HFsA|=+ zZw=TFAS)}oZ1{;YeRl=N34oZug4rZveZ33#Mh3QfW#l5F>l7`8ZWOR?sID#59xI|# z2rGT`zbJdpa5%fDZFrOfi6BUbPPByR(HRlF6VXfb-g_NM^m-$D3xenoy(dHuLj=*I z_c|D5%)HzEJkMK>JK1sv2M2GyALqju1p%T1+EZHRzBhm9qk8n>ncrAG zmZ%B&RnAN}RQd_qBKlK}4UY;xv!b@OU9g6P(>djo#;1vW%v4A2GdxGazS1J;m>12C z#ib+m+L5;)-`wYp(BC#)`rM6*bqrp87W&MbrvGcf#jPcr>510TuZ&)m;c0Jw{;Xl# z?}g>g_Vt;%5}df$Zzt^IZY}{9*!f162^jC?qq{M-&fOvG`+IWd-UcUn3|Tfbyb%73 z8gw6S1gz5|b$W^rJkqu_ORM13-m=mUJlNLX5bo7}HvnZESVPTVIU5;#f1XY0>xy8E z;Qsl=D0T&xT=Cn$y|Y1;?UsyU_HIjiORM4!x=2qyoH`9DE-qd@{t3-+r`oGUiQPYQ zwmY79_}InNAD)kUDG$cV7Ovm=!uikY6Z+SRNuT?FS6sSJNSRz-2>Mc|anLHtfXWni zfZk?=`UMH!^mG-P!P8q2J=-OMbP3wIhI`Wr&rI*!b_%|j+i`9(H}h&%B!Xz@8s@Yt zM?Z$uAXEif)YLL)(0G>x6J{M{k?75vb8_1;*e_u@KEgAfuKZY#(5P>*oXL76{wM8JM+ zc|3R^=kBpWNKj5L$*`rJRqfYT<#&JeVggB|QK_p-^z5$}mQ5v5-;#d5a^WWwXh#r8 z_)OE}*d3KY<%We3u4UcMu(o%@7?NoIk>-`@*PUpFvT-|R3rTr}6x~|Lc|+DWR4}x9 zQUUM%iyA!@O$yY@t$V-lTG~%(_rH_SM(6N#%SzoZ#fc4dV$C6T`87eGbV8q)68%by6?LvWWP3G9$cD7FnF9)|aR%hGtyMsDT16mec+#n%pPoDfI zFp)cj8w>!XT>0Fv@hx(ET-5Y7`~cZ%#D#V|?c-4~G^BPujo>o)6%HHe*ojL+itS`c zIPGsto(#(Mg?vVCI?sI33L7W7pILB|avsUBYYzqv73e>dxP83}F28~D!vk00U*qF& zQgp&8@!rnK{=uQH~RXum!C%hm&riInj*z$1*!Ki5)$;WO<|_KtjinDfHAfb-Js!NG?5xs859 zNeNyzw8d7D8m+JP)J*!j#s>o5-I3feA3FT#U#}N0>4$3IJnDbn!GgO$pq$f?1@3$e zo!7zF6`YmQ(usw1Osg%9w4E&jY}D<+j*ZQws)b|OR*P3s%xA#V2kcuvX@M6cS=3j~ z8Ck~GN?VAqc1Hi-Z*hkJHC{iFdZJ}9NLXlpl1$@86f`s0**q}$XYW>so!xPnUFi^q z%4-ofz5HSgFg35^Re~O;PC2|G*F5@v3-xtNd4r|xJKqh87sTt@4-?Amq zlHYG(+QL9CDXf`ll!{nYhjnmN7x26c8Vsnio!rBC7>bOvDZlw&#YZh?h|BhoGt?3p zY76-*JVF~;arN1l@jeeHMdS}T`X~4AJS5)+2}lB+=XsYC5#~~2-i3Y_Cq%pX^XNw- z6X&X0s-Aaax;?B3(~hk8bl11bChSd4zkm>@Q~vzldntP<4;07@fhZ=Uz>?t%Cf{Gpo;LC^W^xe{?^0-2g6Q}g7Zi|70+yOn>5 zW^RLlDL&_ngI{n=N7$Jm>`8wiGUOr-2HV&}x;{oAl58JhTBkee$G|l}zz(8bTY*C7 zZ~H;zgpE!fn8EcuG5~tMaPQ5C0qs)%SxK;LxZE9Y@Y|aib%LiEO@rSujcdkfAE!6k ztBuXAibF$3FS=0)qm~v#2PmVjC(Rr52Iuu)HX);H;KtQePK&zHX zzet)-CP4tP0g;;*W{PBbuf#%TRisq?jDPzBo6i;)w6iTX=d#~8;nRf0@ai$gkV_H5_BIJ9{WW_w2TuO_w{3-?oac4}*WdASHeu6QP5)G5~}%!&yRNYdD4G z#_1uDAK1qLj8j(Kp7Xpo6bSZVn6XA;988g^a@51FPfNdOR2zwo>*-o+D;_D4Nwbf! z*qoPeD(^P{4;ZU~dE{^5Y%{NL9l~#6Ir6hDZJLRKeMu#%KjF-_fo6?R0YV|nOWR`& z5k%S2TAGld{qZky(xpvp*)o-|;->wEm&pBd#5n!MFP_Mpw*c6OQ*EjF9QtZw<`~BLu+Z*uH%6?O zG=2s9ZHRP!qz@yzKKOR@JyYm02kwLM1>j@gcu42J19vcWIeE|AZr%eAZV7DJo-<8I zqVc|Z*?kkek|TC@?rJ$mOaYzL@pFc26B0ig?ab_=A2?8A($d$E8t+jRASM{Yy4OC-#%Et`POl! zSH1oBT#MEJ(gI{(B#^ieZonYllp*-4oT>NT`|Ip2*R0TxS%*Zgh`IdK$0YO#@hW3B z@M=x{Zu6k>bZl(c)Vna$0Rao*qdOS)z78K^F};WQ*!b9Ts(ZkrZ2yj>{U-qMt8#FU zXau>}(L>4*8yvnTkZLfm)dP_J`B2ui%bA@<`8bf~=})w_M?YGH1;$*zLq(q%VTX){#`uY4f`NM-hq~1J zJSR1}Iub5MeXTM6Pk$vcsCAM=wg1jwPTY=zXE|-q%YO578*g=K| zCX%@W&zKnRriZc;+!#C0n#W)h!Ge3=g+{CG5LtTAj!Phz1tz+yB>X} zN#|VY9uFrg$#tVYi1;80l^}`5f1;(3D9}wr&*R_FDWhOt^iGQT>8Hr)W;-9weri9u zh}l`+0$3jbCStDxogb+uWk5JMW8VGFj!%(ubap)1c0@0N8ulTR{ua(SCY{trQ<_&{fDMPd%0=!hPC{jUwZ0}iJyx?S`dXcm}d3hQw!PO z&3O<;f!AhedD{ZlGoQ(ddB)DIk%O+;a-nUP%P}i#n^_7mB;MU>%+?)cqlG{`Y7@QI zG2Kg&c4VB;88*+ASeMF2GlZOz{%PZ&*G3Cn{)Bx5)GfXPK_AltI;kL$JV_484~@6XJE!dkG}4eBE@Jh$ z1wM^~1(5`i$^61(&G>X2rk&<_%X+179H2Gh6!G6C&~;qg@?YDI&{0$W$f6D;z6Ue) z4#{X!vG0MwbF>GfNpOj3`b}y=8waZdZtfVLXUX{x1eA(+XR@+&r>YhBJ`D}}9>sVN z)$BEH%vA@}E{$c}L#9-X_#@xv*ny#&)L0cU$}an%cJHccuN*^ zL0#RHr>_MyMsZM=d*T11^`HaHrDzSlzNB_(bJGIWZUv=%J47;tyl3>SYP*vAR2fUv zAMl@k6>Me+Ubr!n)FB39F-O?y91Ce@6_JHccR7%!% zg3HW@elUe-ew)v7P;pbl%-%G_Du3MClzUlmQ)>_UEZ-cd&H?;Laq z=TH?zvAaI)T&&i93Sa?W``t4%vf;545vWl*Tpdqlf??@L5rxoJ@w5VVNRx-z!c?a? zsfFPldN1O#L13muf-#yI07;o!Ek*pQw^~+bRV8jp6-LsdHDedw$|minaXyU6fRMal zvX~duS(Qi>>Ctw&%H0bMM6E(3ab26!Oa&+o7vuEzL+BE&-Lr@hGH0yFQQ^n#N+kh8 zPJx~6u5!Wgh@yM5Naf;k*^0&@HkwZbZhjj@tD?!M^3GlX_d5j!$phaC1^;f878O}V zJDma#sQRDGJ08Znn}AbnuJ( zRfJj=DvZ1|kznJKN20>&&Jv7)#L44aXm@A+mu1W-!0*|AbL>(h{SF6}TsL0@8E#9v=6 zbonC5q-NO$YAk8&wO=iN^f+w@9;Bglar90+n)%Gjk4YdaKUIg~)4X=d^W#kg4q=$^ zq&?Ef?yHkso!|R9ZJ7t#DKX0Y+k>&;iHdI}a&dRP`X|jp1G=7gNI1|M|7_*1*?UxaFjTx$< z7n`!^f?MV554Ms}C=zKhyb=h5XJY!g(?Qb)^sIkSJY>NKec|Fi)>7}Fvx)uMWCz|* z&r|RAXPfw_TZ&<3<=%Z7T)XuixEjso*%ulc9p!_vCrxWfyW_!8^AyhYS5pUDmp-+$ zkYD6L)^u*yU?CP2kBQzd2MOeVQf~#rI3YMW=sBm1VISC z=kq`m`YhqhQ#dEc`}7p%x1^)m-P_qZi8d}t5P_60dAmCwy7zbp0s$ZeY7&gMJ2yDHI#^ieg^(F_F3u6b|?a<9woyoMYV z(3>p^$ilvwmrFqPY5X!#rxg>|E&)OiXWu}WOuos*-t@F`rChV{1r(VZ)3*ZVUZDsu z{|(506~i!p`n@FreQ5TFr;wfEvO)0lHbiCwr&{t-G`V)Eghd5%2$T;H+;6%UP*tv~ zLB+VKl8DN-5DaQjrvmWnx}3?W(XV==b>s22KEZ(x1I=IaPhEu4(k z)yZs%v-q8F_zOy&wp6cgQS6(VYqlY}b?I+Gc%B9PtvW?aI@@Ecn!QkL^x*+*Uqu!% z@Chg-I?!U^eE*WoWpV6ps9%YTzrO_eNkhrDwd73 ze*u11LA2}c1b2(+bpbz(g(5Z+uOOur7(^0Wu)-{@#LOTPKR(zKqkK;(>JUhOsLxJ! z9euDuG@m^i(WuPM@!Jx30DJfiCj@{%=fC?kDs|ZGh+|?Nli*kAIQ%QV0!^Fa3v-#? zr{l_;_91`zKaUEucCBu1T745mZy`Noz*F4Otr}94yv{aK zW9i~qr9YTK1`n39wq6IG3Y~UQuc)}r<&#%Fcf(GGE-<(J>E>Tc;pYpBZqwWewJ0GB z68UZ_MVhv1o7Y*SC6g!}J?CNmclf#;=p5;NKvy4-*T7R!Z>4(P?J$2FI{JtiT(XKL z^JVfDJ5w$$pD};Pcob-^c4ki=J%?T`4Yr66GBLid;(28Ib_L>E z2h6Hlo+fVEYnUAghDh7;2 z1o#r6D#^HJ&r492FFK5vK_>3vygNVX=u&_6QlK!AAWbh0C=l{ZNdi83>xS!cfe{#!{iZF1?>}{{{*9h200ol8{3nEV55%XmD zeR^8#x<$n{Ypa{9WPhChTuk$q(*^d3?8Hy_R5Vp+1fdtVKy4s#g$2YIi(dpoN1HtG z3eqw6v5TEn$fyl6YUy4+sA_U)zD12@`?9{ij!S@H%=FuMV_Gfrp)~Os*YNpn`p)O> z>Q_o=L|K;voA{Negnh>0HtBm_S*j>r+!vcE%Uh4i8i~)^Cr82I6$>4&%+?(N1CMzhfN{Gg7j186=}f99Z=UYC@d^u59*{yHb8q`GZ9bk; zylGIC@=paw!)&&>msRgdHsarfi~=@3Y(?I zk_fY~9tR6ByS};#Fv^9f)`n`Z>)72(J)WTsKEE~?l4+k0fTD&nPv=E^Fa-*dzsKD1 zArSV&UQ1px_H^Grd&E8W*I_?=K3GjlbQ-y)>U|$I$rCqeyxw}eLyv+(pWAJ4u|G?Z zqls7yxO|H{_1B$yN^!e1jVB%+9%OBOOXq|A{U6!shTAnu zwWp`WXIx_n*t1zK&BvhQUxS7^(CA%Nb;&PImj8S(x;Si@K(PItWeg=H0AJD}bdpa#paI8!q3Pg7 zqn&26Al=x?;Vie~ZiDz`tC)kw^Mdqhp{e%Ws!rz_nCYoB-JV~tBp?P>&gVfrR;k*+ zGwkTlK+Fb%{Dlagm6t1h$45-_BKu9D!ppwx`6bOhU#6(BL;U<^9z0mrpU!mL^ON;2 z9P2OV`I$#>*u4r)qyOat`d6fiNo_5EZcbe|X2B;Hy;6*hHkClfln=VAh}M6=eq-C| zvsA$0uy{4T4V5@M-@V{Aa-qVwsHnA~J56&R(D5r-^&oP{83p_R(~k=w**`x_Ii;uD zJ`Fw%r=;CpxSmYpNb9ld1vUuCN)EVzu$ZYVBt*yXb&{IPHi8F2iB`YgH*pXr@g5%h z8{HH2mU1Y(z-EW;`&SxBod^ot^7J%r4KvrAeO5(-+ZDYt=I?txlHct|po-gO@Q5J3 zUPU+%g(T(y#I;#EHvT)3J4es&8MiS9z3|-kbt-o`d)(Q4p`<+6??A(%ZC@K*&${yd zy0y9kvxf9fTePP_Qb;jbOeXEv#35@75O43IHQ#+hCHN*RV7j&^cCLo?Ew3V+j+M)Q z*428!3WZ?PIi2mz>}q2#Z7PjLjf{jzHeId`F$;}0TpdHvc}Zl_jcnx6y}6i^^Z3GR zvrB=RJch``lx=gY*VbzKoh4VpIPFf}ootjW_~u(G>|foD*V*VRr^el@bkzZPmOHix)QU_j|pN+?$4t z0XbW2<<1&-Mt$2t9cLm|{bRW`h?!5qCtYy*nVB8rEd|70!eXcJRU4Nq&y6wsbi(&m zCkjgjl1k`Ov4v5-NwXPD9Vx{_X?5lkpstDz4Z#l6WBAlRC(z?Ir6JIJM?tzI$+HP3 zPGUjLhXnGpR=|rtZsNkuZ!2)?4X`XGV;1R#Zg@9q7w?|%OH_^U^y39B3~&N3dw%e= zFgN49oU-Ydha;KjFda_yA#RSL+w2^C;}Njq<;nP4jq5+$7HWPO88}az_|((cUhl44 z_S{vj+Za1MC@smI`C`_sPoxKiJE;oI3!pX@78Zb)9E6>NgL+<(OTU+xPFz?(Kp-BT z_gbSuXK>B@d;fWfyuZ)ci!6?x?%8Tj?DkK?EjWkVf0i0=68wEytL5PFgUB#Jj_L}% zyG6V32TwjbXFmi=8q=#+^bd!+m2;ZAfjlgs1g+P1~KMJS&hYZtF2GmG{ zyyyFLBa<9{z6(AAy37$~w7umKZWeCDbzldj3vm>|pGXxE8C%@l{_8v(WntI5ofT|E zcgKGZZ>qZGYM_t9eiDY6TLV%O6be;KLlPw;hpy0m%FoZgCDd)!zN^3iK?RZy(0?LqYM`(!HQ-lY^RPEl9)>;z%Fg(ZV z1JQ*S=g@>$I<5Rw>+N^lWY_jKlvUh9RG%Epiow{xY;}neS|8CIJ3G54Po5AQA3(=e zUcP)Ooj>X*$ff`g&Ji~wX3~M7A>|uj(}*jrb$(x&t9eyCLAO*}Q6e`f3ES3)#qJz0 zL}~|*yZv`SI9@{CZ7HGBwT}eh;M|!58udK#sR0kYj>Op?VAfe?%?E`oFHyAVu6{0{ zoo)2#FVjA1-cv^kPWVJBm6eoF*ttL7aHN1%OQ>bx#X=t04Q|QEnE%o%1ZMd=E`0Am&D9)4o?`Z-3qEF8@ z1sb(^C8eRM)V@g}WVl?r4sF|@FV7`JSzgx)QO5N~J>xO^ zkeV7lgb19Q!DyCye6oY3$G6v1gByb9MuC`oj~t)!YW(8|d|05qa}=*EGlQ~9S-!&V z+U1B(=3b?@DbL{NPhBm1D?n-^-LAz#n+JDmZf-8Lxv%c`=+UD#`}1n8Qs4ShT3O8- zG3-q$uu6#eT_I+A-uJaueWygF*7xLB1W9yBp)Z2|=x$Lx7iO!vj3G{oS_i#xaBL`= zgZ?5@HExxnujX-J$ER?7SKBy2{AiPKty1@aeZfeO;Z(<7zkZkA-{!z1bw^ zgoLeIC4o8r#Ss=m$XZh(0U8&8L%3MHsmh7b>nh`cHr}Z^jMyPg5xdpFV-wqlvv}PQ zS3k4M)07OW^$#j>jz&*utOcn29)$m!gLZvN8d}rZ%5c7|_5$Rm+va#wK0t*Z6{|nR z*>in%jsr3=+=yh%{Z5`M3<^Qa+1XhwhYi_AiRiAwttrC`T1-1Od?Lbt*_|RyLFyOR zi^7C6@rz3_OshZjOJ3=4^5ItGn5m8fTjvK{A#24w*(_&`CoIyKL)BoS5N2T_%D*8v ze?u4uGX7yw)Z$|9e3e2Q&6oS8?)L{ie&x#7kwnwCg@k6j>&_ZQQD8P}73j7w(fq_M zzQ-8e zTKYDsj_;L{MS>4IDGqiB^~6vDMx%CVFncGoLiP<+w2klRWi@Xeh$#F-%)F)!Hx2&&*6zk;H|zu5xjFk3de5;WyusPx)8=S5}XqclWMe z8WR0kfG*qCURiM3{8G3{t(6JG{8MCvYdji^xzd!1sH%I2qClHPB6I0Y2S`6NK}0w3 zb%o{->7{y!=G=bIxDfWyGHi*SMbzjpA?dClM*h{K{E@s`hP)x&4x{q+ucGoY0Xb?e z=?@3JMBBCA)j%H!2nm^LoFgN)`Wk32PCgG&6ckB6?c#Z97H+Kv*`&hn)pH&O+O5(4 zFqYmAIP)ghADIkaLFcMoI(xu3wASKHAcj{08O#)FNt zl+P?ucy0St#iy8{&Ya5~`YPU@R#8tu;Hzun_j~E?m2{ovGp~EH3*v96?~K#epAoZR z#-nQpWwW!789(B`jJ7<7OXQBG%FQ;s465{oG56Z96#%|uD%BcD)W(b-kw$Vso zF7|&PK6(E=8|a&}5ozPQ>p+JZi6x9vVuFMd;mx(HR->#ZOc&8(6O9{(?EcJo zzz3IK;^aqbWzPQJH|c7xB5%L1!I{rx?+!2csVh&|htfo*@IU9ZAMdR$D=n8H6a=cQ06)JH0PA`;a~XLiC&S)BE;5L|9Uq;h-t3#=50Lqndg7 zH>otk-zgj8e{mTw1k~L;K#rCC9Yd(}Qggz~O~Uijcacd%;Q16mZ*u+ zPODSs>irar`iDq3U}k&K&)ebTm;7DcYqpf{B<(xtL-g_Yvfb;09Q&HuJbS9`OI|T2 z=CH!mQf@KvGRj8dvK=&a1pKK9R@YU|>vZA4g#fW{bu|zCrzm6P{=B6mx6rs;1U3H+h*J(ckO7EuF}@XIrC%~0Wq6$* z6ApqD7YI_tw|`=6qu*psVWz0IGMzfdv}Q6E(LFE69||_oY;B%~Vlh*_S~w}t+Nscb zws5ZQIy^l5cnLF2+XuA0gpDVUqn1)oma%3ZNHtreW%wP1dk1CS0olI2P3%4Dmx+Hs zBUBc(6OdNEiF~LbW1Mj`o01XcU!;>LrPx|nSorZ{2y|)DxiLnD37|50aNz#{P?<;J zBJQjB;V{oJ9#r9v8xrNi+ZfE4_BTJVv*$_?KIz5U`YF%#SzaUXg7PUtq{@DMUTgX) z_K1*(hzRqYEOLMEBvt@nBN7kz&FU|k5BT|h_sBgMa^npr(*k&{(lfC&7!vf_U}k1( zJ7;GlNN_`@#{~e$06GiSi&^H1{Q;)vbD7fQ;?5twh-JiEZocja4VDSD|$q}2T=y$)K? z{Q&5T0}j3v4lk8F5_~M%^4q?x<%fo?hEiCR@ZMt~Azd!@CS_7Gw#Sd(>)yk>e)^!g zp`(z!tf@)(hHbieXYARCgknn2gi#zSwKEGtBgXilT!W?82{SZwvI(onSy|Mve4zT1 z`ku=_-SZW)ahubOsDVE1- zf3Y9M$(e+ip;bcYS<{A!wCrxa?ti6Ol%VA962aJR1E4F(^vm^inyIreVkL=csIp!p zSr`6flZ?LOa8|M|gE8Bc?=KKsPlj*x zbs%$tKMo*&njVxYow$lBN(+ozBgt@&cH_?g1=Fp3%_9BuH7jZfB_8#UK(Rs$-v_?2 zKN#rj8`~kQ*-qkVfaLHD+gf%N>5SLwSlj@mfBV2JZ6((8vN#gy#ncS;Hj(ZP$v)|uLWcLqJG&o4}c_FK#Cu@+d~OR(J*+})8_r(z-~X!dda zG+>%;ef!-%#U#eO#zJ2M7Hxqh)3CFob$AI7fe_ao&+B`&KUs>&lL1#-3G$)AZy?Cu z2SM}|r5cc#Fxf`C=$%G>J&XQI!w9jhfBsq|F|N|s4AK#S0rf#M?Otoby-*9z7vIu# z4wu-;BKb@~7lXw6Eb0KLod>PgCDykBUC>XXt1<t$eaOJ;OZxiwmPMPD4z#59~(@r+P4@k__pxk z>ya-~p9{HBkEy;>^ZEOx8ZJmV6XeJs!w>W;-{MY?VD@LFv{9nhpeRh8p{04Op?(1H zus|fN#aQoIeAJS`8uOwwk;OE0e{++ao&B%+dE@LJuyO}6V*@emCT=?5EHCx0#D(*LM+2Bpc{fRpJX3?ffdfAe&7+-->9K*SQ|| z=&7bRwUg*v{Gu*-!Au$zDN``!)NN7o$JLgGu5&4zI=t_3qJl8ZZ6HA7ZK?H_j%!2` zw;L@^@1Mw6lkXT=y>&=V0Y!)mv^& zS!^f6NlQyZqY{c}iFB)-{>h_qJPFHRF{n%}wnJxE>v5{7s;K3F_+u-%<=cMR%lj>n>oRF>Yl7xjf;%>e3FB6eOVl%X8{BG=TuE%Wn@0i|VOLG-h z4Hi88eP4O0E>`;i1h??#dhbG9K&v6`%MRCA=17o{32A5Ey*DTj_GihJx!=!t4(L|^ zdc%Uxbc5cwy<-3nM&F9!4VaN#yp^AyztWfRFV3-#xB!AF*oe=YH;~b$u0sHSxxEMK zWH%Y4GYs{%Z(*sb$>x-}s&Y^>2U+}nDD_zu6ecOF`Qx*WH_+U1-i6!@Q7KMz1A$O5 z3+v5K=h-M8lSSS`HIF*O`xGBc7YnLjW|7banS8}XhB50U__ z=EgWLAfJCua%S0Orj-mU>oSk{1tq)1$?>=Sh5q~S^#&5+NerhdC%#Hb(NYWw**Azx zk(O$f;4{#B9GYh%wQ{E$s>CV0L4R})*caFahGv-tpMQtls@A)X$TM*m{>Mn+4^}V97Ua_wZX^5Gqvc5X3hLS{%Y5UQQwFhE5Zf3p20yNT%_W<)uV5pEt_m zQF{@)PDC()-tE^(qUCOGQTd}ElcGLXLtzCu-0>^I@#+_D%|__c7bmK3&gG+SZJ-|0 zfpCi-`57o|5=uQH0Xy^8x z{6Dyi4>ZXYKU;N~npp9cs;d=S2|GI!?g4tZ(!0}v9UGzx=Lxh?)5$lnQ&>7hh9`4+ANFtiWu}5=3KZ7b@_z-K#SE&YZckhhfE!kB5=l>(n z0TC|ekZ6wD+&1w|U}1mzHyifD5J04t)-o(mJ*53Jers)K-kh_|Cm*M%-SG-scK6VS zk&k6+A;7EzS3Wj#JMSq)E#`J3bQ zuP3c+=JN9LFJ555x;p2bDWYTmDK41@T<(w9Q2c}*tNr(HI^V!*5a5CrAlZHM0&u!O z$6D>O@sCdzdB}tYvFob*{u*?C@&G*qbf+@l{}qP({q-RUlb)7*Y*mKwsSUc$tI3~u?y zym-RE{fXP$eQ?d`~%k(4goUHPc=zco`Kc2z1x>gV61mpgYFXyL$aqRo zFBTyvbQV0i?C5p=D3#yglb6oQ1V0~Oy=p?_c?lRpM=3!9CVjIgap9ojNxJa*Kz?rr z=^0_m+WUoBQ3Iq!|J={iRKV3QJ#Mu>zUz4h5;^*F=iWRu5(#*j+&AQqRJ}MM9cu$Z zr5r7qgJdaiTiX23{|S7QNG>tE^Dv`Vr)a!Gr1u|6HQ<~7=&@%A!(ajfDeNP;{fDs3 z;{4#}F@}v*#^dFtu#@MlnO6P?gHLwkTN4hNtfe0mslt-NGeT80wdDIDB{3qDv9u#+ z*Er9An123Jvv(Re*B7)pJ$)ugBRH}X7~EVk7;_Jrdr>%GPdnOr)#LB2J|J?}=K5>X zpl!$Rzi@pgak}}HmFMjJjg2m83RaEltFXg5$=1)yC0{f7+M^=Pv4eVLuGm7)!~3fZ zPAk8D{k6Ebi0)fsr{FUBM}c|2_IKWx?JDBp=9g*5s_JT=ybL1HY^he#tp#)KBSB+9 zL?3&i@mybCbmm}C8_^asF*?+H`)*Zl*VMF)R~@xon(b>uv*J)KQMuF)SDzhkuCoXm zj(@3b;zR093~bNzvN{mEz{fPv335-m{QYAj3{AI6C+r*VV5s7X)$=6x$-{b$#FGX*f{>YY zrC^Ji80JB@bU^6ICKc1cO!q8e2+V#HH+bz|HTnC`>NC>V_%nL4J^J;H`Qe?1x-6g< znpPuVW9{JJK*#J)kmxrdvX~wdlc|H%<)rh#4#WA~5buaX*p-@1i3`U8|NhoLGlBJm z4sKYR2x3>|ehj3lvLW$VWmUP0W0LoMBgZdBN@zjqgud25oNtq_mQNEB^E=Gp^v^Fb zG@N4k#)e*FDl*-dC+V)H-`bH12U`n8&koik3m3Eee4D1~s|@G27??mr-)J4f29im2 z2C3S|b0Ww~X-E3y!##%^Jc5f1}^0s$5(RhE&Pl|i_DO8McWAoq6qm|d} zjB`SkW*vWb8(P|ZCDo*0B>Q7*am%x+L%(mNG?P|_IVL7FITK8Mfyp>D8`R!OtQlq@@sex_2n z!!T#x%8|Ilxdcl=7%{SDrouhyb5`0n4fu>>11m?0@#iaHpd|6y>$$D-?NCmhqp$CF z=;=wSmsfRaAOzB6fsbiaXCM`6!A*NBiGespjDYog;qkrh=*{$zQenm#rR1W0tss2F zxptaLZ{+tM@~XrAJ&rR{qwP28rvAb5Zav&<;kD0CWyq_O;@YQRZ~jqP3t&V1d99<< zGNA?g&!f%%=ew=I@kJm~e;e*7I5joZ3Rl6?DSuruZdMAtc?j>6%uIg-t%?6u(Tn%D zr%TdDAVFv>=ex(yZ^Jb<2)@OUpZqM(OtK!1>Emsz(LsGGe*Nipm-$hj`zuecd}7Yh zNeyi?lI2bAfK1&~%Pn_Wu0YSX$t~_+6kwe^P?yBNM8xWqI<2qIr)&j+E>GO@4o!`X zpON30XG2lP^$rdyAqg-veL2Ez*h}>LlwF<1PspRQ4m311&GM8W-xL`!ICGHlYc7wT z8K*wA2WwkFxuz?d?#>n4<}V-5%6PV~o%^otJ<%BW!D%yl6yt5E*#qJYROKO)zkpVv zKSm;7qCG%qzYOPQYWKnVmRg@B9~hV=(sJjD7aV!R9WN#$%Av2Pr>F5%UBLig8ngBE zeDL=N33|`|Ybzd#i1mJYh=pIv%I z|LBn)l6zU^=IAerJ)&Rw1pJ2#RnNH-73S=>#KHl;v$OFZ0ciI1n>QZ|l{aNZH|YPy zgH@^PMMmWa*!;#h`YFF7YIagH<5+$0^kV;0Rq;aCp)X;=!QPgc^Vd6+CPLgx9^KmE z{*&+F$x#w8Od&dkX>Y)z-a2)M>tTHSnR?VQF_^2eJuNxq2x7wCvFJz_xF9OKzFc{R zxUfEqX>V(z0rBwgBCxS#8f%Ye3DZ!s0VXyLNZ+oE>etDaEPj|c%EqG|6>~WI)*2Sl z8VZ)`fytY$^f!r-N;Jm(^_jx6Bj$TH(7ejlxf$*^=h4o&g@q~R>IrCc(jTe3+o`js z4ii&-Wyi``TVG%J@1_7SVdTTFv!8u0 zTx@w=EVZBQ9VD}|7x6{hDBbSFqHW3{K585IpPql%-P}f!m;=dOa5As4_UVqHQg?IQ zcysCDo1G#$^P`qv3UjH$7(8#SLXg^J=Hmigpove-m-7mvv6QafXh>$|#H3b~BA=JFR1rIXQ z=f&Nq68fmcymwPh{lR#9#BN(+Vxn=2XY*G?{cOT(6Qir^(gOFF$rHWqI!uN`is;NH z*5M=5ZM|QA0y`zW_bt7gj4hJ?j9civN$W7F|K3n$GLJH3#WZwsH&}=@ShPAga&vRD z+3JE=qDqx0&e%03D=cyo9OXfD_ECJABFNo0&WCBt=4)RgyZ|}_rJ|&&E%o5NvpBp# zqLk~geiQwl2XG1`DbM?Nfy%Ly@C5Dc%KF#6a!gA2N3`_Rqv~9wvs5$3mp%>c4Gj&( zCVl!1LV|*4n`1B8$-R^OMg3cctwDo|;Z&8en4$!2JJ447`uTYsEVL*89v_! z_F$XYs;Qlq($ax}fuc7L)ZWoy!c2M#7uWOVX~pjASLz@|OltIubagi*`#4gRs^{^2 zaF*zI?+v3NwD}?!vir_hkU1_>)5SC6_>GdU(7N#3>Ivl~CM%k*#793rjb<07^gWI9 zvYs6McI?+y!;4n`aXa+F@5=ksv5(66lNI-EpErCN!toxS56&zEvkbir-26C)Ca0#v zK+LI`c9bhTn!LiAgLFavP=kFb+FwLW(EGt5CA~>B_L?7Bv-NEt=C`f!>cm1}UpNG=aQ)Gtk=^3hGwR7* z*{I^&nzF3FpV(OPF|X5d(Pe`=s{)qJDRTm@ncrA%%_Q2dTSVTt0c&hqjF8ty9YjR4 zcw@`>3z_FER8fzQ7{QV1S`LL`+|N>4c_$O$St1h9n~M5?jfg{ z80+Ms((QtbNT!r|Ir%HL@H6}#8!`xFmk8_;Jg`U3KyZ5lyGF5eVl)@cd!Og#FC0gd zYHqVo232D=*{#y^8XNI-O@WmSN_HU$D|l>RQpM%f)y)QJc3pZW!{nCtok*@Q5x(KGH#0F7_9yE9Ov@1ngoh{N z-wG1+cQtSyd}F-%4BB1JDse57aZ46S_@rh1^Mo_D0usL|-m$@@rDKrcfD7plQ`Y~l ztXu3w9t?ulMFI(lPY#l~S|YXy?$mIppY|1JDqtOXO3G7Mc-_~--i>MtYVj;s^*dUY zl}~%4%bHwTS~@WH`VZ)jcjn#xaazM^Xli<}`E<9>x)xyPH$j^aEGHINPNE1D@n}ad z`Z~cyeKWJ7;oYmt6?J?CkwxEdPVMw~UIb>AFP;2_b|ajXMb%AV7fN(g`#Y zT!Op1yLNbjG!DT%xVw9BclY4#u6;Lo-tRs4jPvu{U$+K7_SoHB)UI8%)?Bk@)tZU^ zJ4AJ_b`X{&Kj68$a(mIEHLL*6#!A^#0?qm<#!lz|yq<;8&En=*@phl99wQ(zPjY4QW*}3=v2^Eio&t(L7 zCBy$9p$fq7?M0P!!3@g99#@ObPqB{0Qsd1=thN(ivm#?-Yjj!x&#DXF8cL>2pHtH) zs3gAHeU0RFIbWyt)CX#Str!^@`3#|n+!5h5F(-PoFD(RM$JQBh0wc1_4zWscK!s`| z?YA1xNq_|(*m{_gJj~#FqA94fL{P8h_UN;SNEdJn9)NB3je&oXl9D3BcjA+QS{8YK z?-2kwy=s9k0>l2RoZM)$=iQRa8k#7+9P(stMcs zeC?0!vyL%xql=61oft5(1(#8WjNHa6Toh#es@!|2LAfj*DIMeO?LU??3bulg|M9;( zan#APR0b5=+gSU37915H-1ieSTEyjlg5fmx4#tlr)WiGFd`LooJML_kox zxmCGfIS@uQ9^_ZDEZ2ZWyA(`bu0MD`|4|>r&!7H0)2SG9uE(rOt!PU9Xp5c;U#jI5m`ERtpRaSt%W48hmi$Q5U<-R)e*29w0m@;x>j>OSavBgT$2SfCB zF=%GF4!}4y38ahTf6G%h>u`nX5qL}4Yy*DH8&?9 zSCq8dg=az*q(1_vGg(0U%D?w?cBSIBzKh`<9E%tf1|$!+uSo2W_{gP#N8e4GRdIHF zY*R4#=p&e>UbMMw#p#UsxWZI7TJvrOZS&^2jn+0I!cbq~Qn`tjh~e>bA&Tb-Oe8mY zH++nqr5}W6s}+Z*H&P!rETrn1o6CCD>~~}cj`I0ks4lQNE>BrhRmw-7>z;OukZJlH zR%O?-tpZ!!$&Q|)Z4zdLg%@q$N-%KfsQw;T#nfa_^+;E1K=8HFB%gfrQ zVW70pD;*`U@rG5!GA9s}uxmm+U&nUXCaIG$8{PRg_&inJOaA-f7b*hRGwG6X`&HRx zhS^jX`MO|9d?tbbgBmv{Qs;|by2QfEln?Imn)SdvDVw9)pdi1jW%p! zP-%y)!M1c~oWt*dmCi)CX_Z={`qH(BSATq0k|8h1a=!YYBLMp^ywd`dXR@-#bLJ(E zPf+U)VB7il`4_ibFvtZ8$9EQh3sN2+dpmF;Auj$G307h_TqE1K#vx(yFpGS^i|Dj4 zD+^$chjr+k6s7cCVG>O)&(DYu&-bjI^>&?bUFLSGc+|NA8%qI`KX|8IC$@3o!Z^KP z7jxht00+#%Y>l-xZC*}BO`>GBw%5(ZM4rOmrg^QjA(4gESS{8|%gcyba}vA(7NtBx zYl@49%&O{#PdGk((lKqn^ac7G0qAcjomU7B@_co>WZq@@ZJ!-X9R!q%M}OL>e2z^N zIBG>EzyGusxr}DPOfFN-0Sc4hARf(nSEbA(QgRQknpd|AJWzi5#`!Pa46U)Bn$?=O zZA`8hx|~~TWTBw==T|ad0eC;3BHNGyb~O3t)x(dBoyC8i-|l22_+OhhgJn0_Ty$U9 zkLTol15}oVxyD(VA47l?ueDs~8Q&Xx(;gA5#H`Ejor>|uq1&J0en(Zx4WvxM=f2q# zR#v`uen=#wb(cv=n8{YXjR4Lg{!RL8gZ%LI(;Q%Qc=ladc!bw*)I28^IM=n@l^h$z zSL?a9hi8|LwrDI80>X$XK)EFg0jKv30F6VU3?g`_@ofq<9U@dsQOSN23X@;7s|Ff_ zaOHiB;rt4q(i1dy5I@%K)?)ea}FB2HgI> z_qiXuzyZwlT2L?|a06jG2{1I5Z*QLe9@NEoy78IT#|myk{Pfe1kH7>*K=`Hbr5{}R z^$eHqCm3M???1nz8aX|B0VH;)3J!jQj5o6I$GT=6es|OYoQemex}4oQ;Q0Yim0)ep zi|CAkQt}Xy$cPBJCln&MZ`|&OrVV|l0`8}>84np47yxo>*&rr_ZQ%IiHPQd8w)VPa+3PR7|47h$e<~4U2>76f`_q2r9O+~` zz=Z-c;SBgX2QZ^&|51Oc6Cu1&>Xg5cunxDmYu8+7u^&V*vW{5^@Lg#YRrVQr7v1Kz5x&jPJwy^-Vcw}GUZ z3ZgvPQB|($tt?U@s(A9*O2S}h{o|J#@;v?rO=t4%vHkhY}={ZUoBp{%$7IG z2rOyZ2%1xrHClW0t($&r(mb-5e@=7X;AuG9|LANODl)-9Q6A=ev2@NYQ7)eBwpzYP zSG&)h3Z$4|7}WlvOoPXb-SwTkxGf zneW^yy1I$tPuk9mNtFzIg-`v0k@0xUJOtKz?x5ls;ndBD#!Y(Fcn$4usJ9F*-YP~6 zO|61rQ7It~6>I#dHF`_wRX3A*iUyhDg^Q5_(AES&4Vnc|<^3{K^>*Z>Iz6QlQz}b( ztWVy!NZeS8#4?xdK5@X1lk-Ji6+0_H-I5jgJ;%I7{4DxMykgpb>@2lm=Saqq85R{iIz2*J-ht^Q#+PD2NI# z<5K~9Z6eQ#Y6Z(c*5H-^86vfi$Ilzo86eTskxvDoilmnLDsQn6@4g?>@hr5s2u+P6 z``y{|&&Ey%nh0$n2qw&mH`=5+YfH&|@4t0nMtm?m%?-yjcfyUA|CxNdd*cUr7dD{v zXfl@TE(C!<$~#)$-CyMNg+Pi)9%sjjtcw`K62U@^>z~syf_7B0+ltf+C}SjL0%c#r z#5kxNH~B>(!ZP}Ix%^&RCUd=`*LX)wA%7l(5}9z89}>z`pn2k{M>|M(@tN;zw7$UY zoY$40E{&L!IYOo-05#T)79&g^1tvx#5va^=l=^`a8=(I=09trLR?Xojnk&7Uo$HTW zsAO$7&jrhKv~ZsCQevS$$Ot)3llaq7Sf{q9HIK( zw%*19s*giyk#0dUI{WRBIW$354-n%?W0N+1Vhpe7oP*d*oQisKXVTm#4AQybQNt>R z|6$(LES=jvj?r-PXi=D+>nq&-+_oFzOcqcS_8$Ghbbq(#jrnLjE}Y!_WkhpnHDiN1 z(#en={~VIMdggTSS@ne%f(>C&%SiD}SNj2i393@gc;vUG^ilPlUx8 zt3saJgRV|to>t_bNH?zfa&!urq?!*08&wixUV~8q1AgM9Y`U}Q{u&k{qTbu}C)MNr z5xkB+)~G4G#XPb?w6#kQ4@mldu+fU9&BB2tqGg(?Hc^8@?Rt@BL2+r&v@pchV?vGYN=zKV}w`-ESSWFh8Ee_Swoc7js)}(L3XdBoP%b zs?<2FY$mKQmxzUQ6+QC)a|}%cjs|Qmr@PV z%aSMvacg=6cJ(SMDndHuG_Ei%3_tYGnVGlTI$*cg_M72|WXKJFoXnYYaw$cMC zF6tOLL<$NlSSMmWd%U^@kkg(DujUbTUr*)0c~8sJvx4S$M|n$H>XpNybj5Asd9-VD zJ&|}CcnGQ zuW!GnXu5;o91q%i{^0v1F8t(*3g3oTl+q#m$rXAheWdK8%=h+F*tT}D_npk1BX zF2>4Ymy^Mv{{_C zAWOeL=kosBuQeT@h&J41JhNKc;}5XXQ>&#n-E4<vZHrMj}p zBh?E)a6KP^KG`gW&_rCHP#1cgnP{k55WT?tFQ&%;)KXIgoAI(EQA zLolat42y<}!l0L|gD!VBe zujFbNQ&fv6q${2I3dXZe3=6mSpoJ+H4t~|Hs$kiS?pQ*6$Xj!xpr}6&|6%SV$VXiN zxKunhRx(}aapX@w)CFP@y!m5#W*oe?hj-QsyqMaTXGiYfYvnF0AZ6rrhf7a7%|I$Y zcb}sB8++s}QR?FSeO&ya`wH7|W}-5KtMoMV5B*x`%)3`QT56--+)RnS0X-Uz56BG5 zcj5{%1{?NC9}V^wd}V0gYgVz;rtv}B=l8Bs%2Wb2*gVK+qrl-DSF`In_~JjnB}8Db z=rUxpLfF4*=>ggA`hQ5_VnPusm&UY)M=aJ`@YKk&6~>phY=J1&o~)Ez-WNP{K5*XF zUGXpyUPa4~kvS90?9I(|n!>wAW0IL(a7VB#4Oe|~g&FnJ zrjmO6@7A@|(ff8Bm}52$#aN-dWU^xrE4eE(x$)%dCW zS$)z;{4E+Jz`Fe3@;wO9F8q6GWBUsJ?-c?Qz$O2C`McAVs~(|tpm6ZcMPVq+ELrsf zMhMhJ-psDF_Cg?u$G&T90jy?;OEs{_YX3dC&qCBMV>VG>kk_u-uQ3|bP9x<`Au^<4 z-EtXLpB$vG)MKuCyBkoYO{Az|u0mwu`55zR*vLNt5A_{eupdgR`(C5PX4J3kmP|Kd z<(b9kX6M=X+4=N-CqA-|u>Tb*{0zFZalD0Z){k3?~FCa zzc;<<=xNqpxU*sh$|0vu;dDH>O`6Eu$uDqPL!y#&7w~Ja3n}VPj-{4LNhY$o`sWBw z{k(UVF&e}WJ4`St(Kca9DQP;KWu3%z8PcI-xj@|@FyqAYXRvBG7~>pd!*2L-wSEz!bc%HFscDq@%MU~P0 zd=_WVdv$MN&((Dc^PenOt#ExW`Z~2m97;jg!l!ROCF-O*K2HBLvf!dAgk+O z-)d6WUp&v`VdN6kCqr|l&`H%^rj+Ah%pwG-7w<`i-SWT8Kd^*H2%{!Q*Vg>@Ld8>&34K6Kui`|0uhv zVS~#47XmMytoE-?(MuftMZ270H@fj);3;T;M%|fnplX498Y(SK_UmV@QBcWS4;OL4 zI7x^862+G5Zjp}n4NI$d-qH$ERQaMeAR_q$diXJhga^u>QgMH9Nk; zI%RBitxwF&ZMbxfIYs@Ka!y<;EoWJQm2r^wkPC{ZT#hxSL#zaRVfBAd{TM)r(+;V} zT%OKRo3Hiwy+# z5JI)*#n&gR?Wt^{Qw51JAaAj>Z(6rer<#)WU2Qhk-z#5x0$eD94TZ_g5l$@)s_QGl zrBrnvvNvUTQ%)Ji1g9L9VjQtTSH4BdLUd~yr=D`4C-{&S>W~gJ@ zTD5gX*Zn2gBD{+_)K7gx>mLJ`{sJ8RIon|ei7!~<3vId@02n~eJpZ)>3fbMxa` z)YZ&oDg&j3y$}aiFYsR&Y}J&RtPAZbmodklE>7X+lBL%%k6i17CO(#2w3B;TC)pQ) zrZ*Kn1l;Yxn`GBwvqsx;b(N}Z#^lP(}hBrPKd z2#2k|9`(kvn?xOGcJqU!Qc_~Zy0fO3U)nIef+q`p)?l zcZJ0tkNQq8Hdh-3lg*BGaF6O6DyZ);2)_MTiq-A1!KcaOwP|VPy9HjTM zyPE9F2h3N!8s--D<*j=8)^ELKw=G)gEWw@ik;1gDy81Ly$&VB3v}6G}=?{u&Us1fB zit&Fv#7(Nle0XdGGA}ls*4X|4I6;JkSN)D59u{-wv|S;DY*we(D_#6I)M%l_D8EC} zHd+)g&r`WPw>Q}tlskuILXPve>+B61hAoXi4#bU0n(UQ zWV?;VVbl8S3TrrQY%zwHDJMDziu=PF?!S%CiRr{CD0m{8&ezyjh6ki}q`WbL16>*P z*AkM#=}zdat5>&VSg+zK$;d$-5Azhs%JVX1X*O%=@YmUlaS;pZxeT`!lTb$#wrogk z&kNhaG)V>Ba@okKj)<>0a^!3vO7vGsUp3S4Yrnb+Dzt-4%{~~gCMgvlQQ`C|w%WB$ z=EU)rjBZQfv6@Ihs$XBQRKa`%ywk*uiC;o1o|~S~O>JhD%*_&4gFq<0 ztcJPdZeEB0NU@#p*`nMKXAv?=-c+em#LaRTdsl$K+(Jd>L_VxWphKkmzPg=22`i>iL5ceNpqoDqYCDnYSuSyF~S7 z2}3a{gi_Dvv~_%Qm|$9298$TJ^(*g=)*4)H@m5aA_0g|p`iNh@46EPxnos&Y^sVJ# zMQ^J#KJlDeUc2k$3nA9~c-hpZ%kBbL0!YsL`u1>?fv+5@Q?uD< zO#i^7%n1AXwYKt-c=lYhrMKi*f&`b*UZ=@9Y%-dR+bMrehejq(TNdK5+#fWrOulsT zLpqv^zAqLAYf$mpS268$%?QwkER}!%1%v5qSG_DVUFjo23H@y)5ruM_N~Se3H1qH;jbv-G-)PAlmK%KXq5Zl|JHX z_nX%aHHX*+tw2`=5XI7yK6n*MbJ~GrfZ9i}U8< zX{ggHX3v(Y?+p#j?grJm@tWE6p#!^h!1*EzD_7ccJ<^&ddLE+w(DC6N=*_&+Y+Bn13f5l&^e$i6M;O^>PZgdm@XSN?|^OBFWHQKy2gvgYQ?? zG19?tvDG#Apa6X|Lsd31P15R}yI#eTmePHf)%?LAzju$%XQC^l5}!O6!k@R<+D@ML zRccUmOXDJ5aFyHnH!Y>9)@4;7Y;5Zuia&#N%Ura_7&TQXk)Fn?(pgG9LjFZs7>SRN zq|C_)iGKm2R0{u|?^dek15XVE8-*cWL*N`!082lZ2qbgfEjKvg{uItj=%}0wB9Bl25c+0eqqg+! z0AlJ=S+9(=#11nxMLVa2@xE;HF}TuRNbBK`^0Xudbra8BWZ{D-I}atwdj8D1scD#B z;A~q>ikItJncH~64mMOwwb3a-w?7@+@URzD4xWus&dk$A5_kkXm#2{;O2@$*o!xhZ zU#h8)Byj_c>8e(08j8z#Ly;BwTi-hH}tk zGJpzt-3;YY^fvoqSF<1xzed~&j(Cxl$KBe3>1eDz|IER?NCIc88wz^UoA3XEOnfN- zWaZDKjS58VqV>$lcv~|lyDf|G#YXn2fzjl zk;`k03_a93UUZ5ei^JZyS`VvVnwm~GKli6WKVY6RXsNbY{DOV*{E!@vp5tsoj*?s| zFzT%Cl)Rz>I#LSh07e3j@kXrLGAykf`v{|O`Kt9FUqWyq47Mx10ew7~J1*fF-_eTFFH2D_^<6ihD5%n&1O_x4Y2xYyf^BkiKI4HzKW}2_a(7 zZ!y@|d8DMxLB~RwS(QS-B=8)x=M(blx@)R; zhSG6TboFL8AOX6#8JD>D|Iq^c+fSCiU-`TNu>|LQ*UjDkouX$Cxy63|jnxkpHL5SK zw*{`rk>zC&9H1=k)fZjJsjpOF&Zy^XG`XuGw6(DZSW4`-T11J_sEZTQ*BEYMlZbcWnNiwWt9Obwk#X*5f;$?Wj4OgXP4yW zT7R@o;I|y!s5W05?oz{$!WDf7S$`LKr|I>(aOw7T^_8W#oD^3_70a^XDmo;5=Uby}DV zkJoMio=H#8M;XcJMo#>{ znBEiDIv{G-Jj93sXtn#7|F{@S3h(eE>LX4QfY;MKKDi2u?n3?qkL-pOYnY@C^bQE5 zY203(2Q+PH`gTMosd)x@Fk-#R8*Qte(KBRRe+SU5DRtTE0wB4Xk@XxSB#hB0WyMr^ ze7wz~ROmv~NoqU`<21&fGeQB?gyQB4o3dJ2%R}7sT<+VZx$wBR4HqlgAWVYKTlD!K za2)BnDsVRxak#`?$CGpc?F7R!gogjL)1*NnLW7WuekC{!ga6{&?=0lq3!di6*kI8r z$hol)mz<$$-q)BR-D#j??~hIh{Y%VVAwb&}IsSSk z4vmB+?=O#B=8ZI75S;xlpK6(T{a@H72jyImBQ@LsS zn}aas<@d{d1AyypsVL;ewGZ%hU6a@`Q_pw zoB1U0c^&P7uM=WTZp^56<#&lmm)tl65w<^@-27$mOd@}$^`4M>Hd?hNY3DSi2$-%n zPHU?~SLt8;V^JOO0x@mmYH6amXlt61;x!p3C%~E%at|M&hWFz>U}ZPrWYRr4Z#Hf` zV~Fht_Mmdv3GJ+?(%;G+u04H2qqE*-$fo2~?|73$8XN*6m>lmn3On?CK)GU8K$orq zl5VIgk<3mQP>XaN-=}EZ%j*@0c%)by_i$}$H=v$JcK5>G_!3$hd|QNz-{xYn2mzhY zTx-A3vOFNRz{lLYfj#KY`dCKQlCqsHGQ8`|2$frmj{B8INDd_bZBNx`%)MWpXNw_w z`xFOUZvSIAo=)w6oh?)c{sg&~(fciRWQg$jZgtjzGZ6-8VF8XbBwqZ?ev*uZPUssV zMgBO>qs=$-O^J3tnv3Y4s>=CHqu+Ky@UR@C>AtOi((FJWb)|5Hj*c+0%*F|{?`$!+ zqyk!2CO9y|XDcjnug6F4g%bR%KtldV>Y-csF+_#wziTrIicPw84^Q3Mx5d2_&GXSV zX+)?l6fQzuUEyPc08o0sy~9RDoo##;L@aT7nevk+v!gG7%~T>Zg|C2ZLcK@m!wK4} zkZtmrB1i2P)fQ(_nZ66F5A)dtJ%x<-)gzhW;=9vH7u*k{O*gn>*O@Mc{Q@^q8_4#r z84`-U(yUe}MMcD1AxUK#o|Tyz#%5SB_w(0bjYEmmnRlzq&yAG^<|;gmw9oI5OUeI3 z2RpazU=$~P@)U)i9~4AB>^~m*{X@MrZg-^JU^)8=$?e_q8Bo{1Byugb2;S91dgbpQ z3`$1kGwMf6DO{F^y#391GOioGFQ}jhyh8opzf-M2*HQEg^`bAWy)COo#i>+tHaG`; zJr%E$dMW2~*wYvP7sb4N`G3bo|8Erccj><4!7(~)T7jDEa7PDfXUVIJF*?(|pjveI z=Sq6+{kn}gGqM z9u?}}d@F&-7vTU+^?P?zPa^`uylJQvb4r8&(4<&C@0 zRQ`7&OtlZjsBTM~uA5w6m*&k|rmCQpf7_QF?4N-Oh>_kunDu`dZ70u`p7J^yIvqYm z!w8AMDihu_ZggZd-PxV^(|`96!+w{F1Y$0c-edllJk)>tDW~+CF$KBdx33= zQW5C3laJ4|@=pPE-6QHWANChujVz&po8c%nlBeQ#j+pcdO!@gIeii z)pi@$eVhK1GVCCbHqF?E*4O61=e&pJH;Kp335KiSjUGkEGLOQa^ zU~{a9x~*BrpLSudU{F=ST6Jhv$uS6V?GOXgR`T{|b$i6cO+l;yNiH=7D~U#hP3xSeW8fV*fdIsi zajw>8NeE&vPX55-ZXIyaw^pxhSALcNFY4E12lJOGf%qNP zOTFo{4fcL2vjwS8PBP=xhddhl52{5Y}AE>$-NCPTh-vA_+G1tej3N)xE61 zpz92&_i9rf7tjyr#ssO~@9Z*C_r+GEq>^M|mfb!pXN%lyYOr+cD}>?s1_}D-bewBc z@^YE(X38Nxi-zA|3h^mfqlvjuxqeUmO>#b;eWg7-EtSM?_AM8q(;dg_+CaKBo;qN( zo}avMkR{8)>m**c$s=wqFx;;_&z$X)rbr?}^8v^?=kQ0FNzW~5Jx2dEm(=5s;P?#n zuw>8~$fnEQwh2dA?__-+EV1Ry->y^u4C-fNeV+SHrx3FN9|9B* zOCTuk>p*Lfwka1y7xT`^mjRjit8bVfIoHE#%GFHUuXbHF*Yk*)VyEl6?@o&;L7?{k;QXHw)LL&`Uy6v*nR_iMJ9XVc4ISGUt2O-#zu{O`XR z@hGZIUXK4=-24+fa*8q1J3$5{j>B3b9~jG#00day><4~D&$F0w{XIP?RS3KFLd_ji#5C{IS|1R~2Pb zLJFM`9ee=tRIfvO8aY*`OLIWzW*FG=LvZbo4a+)X`m=YS}F9GBs9vN4VEE`%w~x zDa%#WKfL;_mcaB7EB1EXz4s1{h`=eUM)v+eKkMXK&7>;~dXwxDn{U?ifSG5$ma@O0YYJIJ=pX!P9qVL(7Q4fW5tn#)OGb7*m!4|`oNrQz} z@)#fcFW(BR)G&c3Rqnrw)I7M0DZ@^huw?$Nl5t$9hUZ9U`IM07dXoNDkd+lJH#QHOHoO5;@QW$h&S(7dNU*nudJ&$#v}&y$}f# zbI(2Ik{E?RGGEDtH#aVPUI&Jbp=e+}v!RKhORf)_pTPNq!n0Vd9IcWy3HRP&-u1^& ze4O*ws0V{2Cn2ePo+DZ6HE;S#bHbnyKP3d zPhTcu&ssMPgS5r6QwN|aMDC*gh+LfsRoXt=zV;sE-x9XPmZ!{jZc25moinLQxX8IO zVoJW6QZ_CbXJtCVeg83vERJ#6G(T#d(sor(_=JLI6U zF83Gg*tzO8c1+-8@$5Yuh!zYg1=XoFnGdG>?%X*{S1ty1BI-ev@uN*0r{go=N^v76 zc_$W2eo{yqRd@xnvR*dh<#iG@Q7Xl=3WOl00!8?op7@v5{Fb}xid1$UOVidEcGoT6 z%o46V_Ogr4L^!wgSEH2M!Xa5>4!B%a!~5`F<}j}J%yQFx?RR&JUPXUI+5>ljUOF^X{`mBGV`x7>e3i+( zFj(4w;zN;oJUY5qV(%<-a(u?@bzw9F*5+Gfj=k}JL&(Y2_U(Qbzl7zYUG6wiqSfyEI^8y#7590XxBb5fXfXtyQ5vJ%fx*HQzEgJ%FyH zd8>;8k)l?Q9bBRhe}DTX<#^gQXL+XLR=cjux+{d_u*J&xHoc@re4P$Q&3_2$DBqJs zeqH05wTNxB>^xWs$cKP3-3DxC3@v_!z{HYke>NnxCfdoMz8uRIy}1$N5J1fSQTB5t zI3dw_j4DW4`Zcch?2n?++^}e+B+hut;F)e2Ts+VrGYv&`;oBTo06G8m*nkH|_*p%3l#Bx z#7?K?LKSu8xqnK#da(W9h@lq`4)ZgSMPcQjx32yi)Wl`TQd>g;A$bc>u7PcLS5Jv!e0SOm`xc-M#sI@PCXX7fHAN)>ah%>iE_BNonW)4;zcC*cov@ zW7=QOedl-n1hFiR8v=p`+W(UE00*P|ePUpP(f>=#^S^~p|M~p?S|RlR zNu}`NrR44!x#gQR=&)GEU8P4)jCHrN-WEE_$}4Xh+%d|i3}lP2WheI4p zA3SmL`eTH8%>_RPML$e>8QNYNSK#*?Q zHcS{bMCuQnD7xG4-cw$OB-VBvl;Nd5NNyHV#P?yjM^i(xs_ie6R-Q474P1@K`gQFQ zwKNRy5&I`nzix3==dQJNKRmpybUc)v0R->)ojxGaNI|c9Cu^ zSB&;W4aT2CeijStutq@06-W1h_sn2hV;ly)ha|91f2gmJoT)0>bAAZmifG6?X^?Pi zSQ5DJD1|@_*h@ZU@;soVd@4qqPsoZke=YNd7##xxyo|T~hDsr6DjOm)Y%xZ{HF42! z=%PY_)fwJa7ldl1wW(^M=!Q<>x%=ygx&?xBGidL=>=5H|CzSG9goumdTFpKe`K(B^76KTCYiOWt}EEzoJ26*`PaPZexR^@=_zEm*^0ef?tUEdPR-u_%yFt$nwNO) zI;VJg4FaiRKb-rP>-p`Nw)c>}@vJWpNyMu^<`5s<9Rr2x#-H4ULtahc?l*m63-tJ= z`j3u$XpjM`s#Sfx)rAA&;A)y?cM1=a5ALe?22^`*J86A9sz*<=rtva+%+x@9YdAj;HXWwBLJZC)@XyeF($ z+R@180D3UXnzD*3{#q}PZ6YStLS-|dy7Mk7UVA{idxZu1b%UroN_QrG8W=8iuvl2s zkSA%F<11SVX=|0T+RJgrmlRmodgA?r?2wb+UCu9SBxMg)U6)N#Rw5A%Gp(s9`4`RAhO9YJak*p7_GDR>eWAVxAO(s%}x#U|U2t1k3Y|52y z-VcFliKYWrcclCnO$$vyv=ms~FCX`qyUFHP`12jLcw4T^u@>V4J`-{Yc!-r3o6&2j zTc4AYdN|#cA=litK|+7F$;%xxKr*$tTFy6L79{sT_phHPHl*?mxbLE(iuk82%$+PF z(~zFJ2X<-dZZvW2a`BV|HZ4KIj9JjmmRMprS-{BoW+l9DK#@?k9g!6^3R~ zD6#k2OrdqK!5w;P*KOZEMkOjK%wZ*P-&5WCBls)<%Xa=SMU>+zb9j9sHNsYokQlWW zVnFwyv98J?AX03Fv&HE;t?1$C^_1gT@@-nHcC+Jkn)7qMh_c^x;`p9LgAI9}7RuAV zp-nS4i z-DyrN`eUikTZ+{;lQ^@iT|>-CJJ=%4Eq7_Lp$~S@4taIE5VkT$<6$8vcRhEwfNkrt zCC~lH)Y5cUU4Mc^NzwZ4T$8_#eOPNB_S(E(^sTEenv!3ITK>TJ;wo2xD%aReiS;?( zybS7A>;QhZSIxmpixt^+yaPQN^`z|t*vPiH_>=ljvqkou?57JyM*)@}S}A2XBl_S$_~h9`6vFCeptw}JCg z#Gi0pQ#pEhLc>n8Xx71KEvs{--Y{$HHv4B)QS@~Ex(!wJUTKT6E)aboB1=Fzw(*9T zoAQ>7D82eGA8Y*AA`q{VgZMaIE4>3of*M3$W+#66?x@UFz(qk3(p&8mv31E{Ze>Fk zV34l=cBPP^S=V(@b22Fj>+bfR)OKwCAY^KMxj3y%h4Z5Oh(SIN$$6-dje+)ivv)BqSk0LT~~E2p$3icS&$}2_D?twL@?Z?sRa60KqK; z4;I|r8yfd+y3Zo-_np1>+56rx&fj|({Gb;-)|_k2SyfLxRkMnGW%M9PbV3*jhuY0|f(_TI=1&!NP?+VnHdu9ixY&n7NSU=r z{U5)H+}~~mBwLw0>qiiuUK{wi0+t773yB>^V-qc$6bV^kWBkY{M5-ev(mrW~F6BXX z;J>!0q!ZP8eHgZ=jEvRt($q?4vM}W49R3eQZ;#yRc>z!|<&riWmb~NYQ+$l6Y#_#J z3rfYaB&d{$QE=n0FK;93m~eb9ne^qMzaZ!WeK>KwtL!*91L3CXd``0*VL7kbWa9d_ zI9uwy;uub)t0I zlF!Y8BboY^v3|F;b+S1pWi?GmgOQf;VN+B1)_i%(1+=nHLt7tmnQ_O1%0cB~%3*7a z5w1*PT(oq_xhJc$KEhkg^9dH3MiHf@!c*4Hqy>^Lt?NJVDm@67nwtNZOoe0?vJ%~1 zs2AOBBIqKUei_k}wMi=J;wHXOr5bB$8_*7+34Rju3f3nQ^Nz`*yqHPXE2nm_+S@N_ z(2{!TA-$MbM5c7`2u~!9&nXrpnm-|#Hp50De7joZ%|R9~lX7(2`PLTLT!Ah*jFTlY zrhs?Acy);IfMX;oJdCYUd&?}E!Uk?D8+Y<_{n5D2zcUx0 zeU6P>*`fB9Nq;JJ728ihUAWWQbEmw4$vz6Wk3l)f$%>_B0 zNpGzX%D0a9>HDO(6>h8*>W(FB#c`@zbE_&L$(TqQVJmvEsTM& zS7?SpFP0XBF1?*Pvt~v+&QO`(j5?BpNWTtdH)K%R;Is*&k)=g_Mn(PjODD6`?UUR$ zaZ|_3=Q3LP94mt_EhNQg(3je%TbA+QR4=(sQ0aw>U+T$C=nYxO+*>?O7G~iqK`YEc zi76HcKO(S_SZU~C18iehb75d~OkLAGp=D?F>-FJ-mG^mvJrC@2vfP7zfrN<{l{sn# z4-hJ2Q82=SpfO?`oWW``95$WEoM&Aa>%!%WV5w5+7 z{Iq{BVKjeNM}EG~7KAT3Y-{{Rv`|LC3>Hg}8TyX~Honr7YO zFm~m&x39xX@|S;Y>>8=zo^ASpj%wWTpb1*J82)smjx)KEdu$lP#x$5Im_wWT5R(MU zD$JBhhK?$JxBF(NFwAFB{hg(@qJT|NRvp2zi<}Mgq}rpI&iz8_^2yEjw$1HWg3;-9 z=d}0<`rt7V-rS#munqYUmzKXYL;!-J;d~A($rK3E_KHDI~dO2GkH%0NrZIs5zCiPrb_y)e!jdZeLdhzu% zve2gN!VU^Y2=#$&oP6zTEM(FU|47kLbtZb9vtFk5TkS9kwJ_<4kJhibRn{oiWiWd~ z2&{QwMM;MvITg!6#ZDClkoNfx0aqiQH5;n1i!XiunmS8H+zsN~}- z;qHxez~&^fQ-~?Tac~HdW|wxDb_`T(x#NH0Z7>2T!*(1D9)i(dcCST5$eTwzPEuE= zq>AVfPYWY& z+fvc_-gmUWaJkrUO_MvN%5D>S=otm@)s*f8qcj5jDCl2Jg{uNB|RPGW|7xf$bm=V0 za!Gqhh1<2JZ{YtE{M1Lvzs<*1-6mb@u(|b@rBQKi_j-ThH*8t?uvYO_kLxL;G-CU$ z@o_Jbaa=2WDplM!!L$;YW3^-RikkTmZ^H$&>OBCuzFPG+xYFSZrqlB!{7T%5`F*lf!^4mhbm zjTj!nHEA-6>yk($#yE(@6FCqerX44qVP@fsGe5x{H5S65@Z5$50CT75#`X+E1#&*S zNifOFthTne_HAkUx;hr-L6&rP)V31)Hs@q2SF7D&`IxScZ}Hd5gBv(Ah2`>lw>Sw? zl0p7yOVZ9qW6Opf@-;(0ZT}^#dXLhN{EmF)Dg%g7Q;4z1p3%`$`F>XFNm2(nxJEGy zL{^@T6Z$ys_}nd+cht#zvrH7_h8gVc7JWpr zBjqRhS^9-|Tg&lp+-V-%R}tueIo*vS+&E96nP0>EZ5yf>IX)_I3X|={(zR}!1bkK1 z060=MqJ9~qxarX+M~4;h1xL=G={moo)JApb+zy2hWnybM;rQBbF7c-&Ly#UjvnttP z_*ujLX~O5{mupL})1-~~&c^v87l5Ekm(bXzrF0nj8wvbNOnO!E)}JE|tXM}7#NF6# zs(gg#^As1W*A~3B(q)^-4S!@cZ(OdW>$b$y_Is(Tvo2g5wt?&H%T5uB=TDd}b*oB| zi@q89)Ccqay$@2779h6rf{f>$foM;@z-|NkP;-Z&rD^S*`gs%dxl+B?)zYmzFR=G) zQ?%x-q4Jzeu0DZ2-jWT^BPEgu3>zyUvP4At`>-CCmFFPY*AmHQX&mpEHp9aH^2yhb zZ!hTM&9vj2=A9G50q=4({oqH+?}g>Vn~l#u@e0Ch(spt;^gH;8Ns^`G2;@{cBShO2 zGI$};u@b?{lp-vp=o79Sv(e9y`OI`UY^pU43$S0EV2vSi>UjDfx)d_oBM;=qZHu?i5yHIdN z&u3NNu1A06R|Kk&Flp;p6mb&tTYJeY$H=smkgZ{{Uf^-dgH8L0N;MT%>^#<|VBev% z&t%xkLPm70rdwNN;WP8D5+iqK*IrC-T#24bb~a_p+N5jd2~ z!cqIYsOL3(?_FG5V1S1J{Vnm?Mam3v$yh2NIZD$dI!U4XrhRR7*&jL5zKYZFarMoM z7&4d!EF61KWnD+3mwcA5F;puzE;V*$q^zFECLA9i@k~0FcYHj;uyNv;V$e4FzNGh} zCxmbPM!%SEWMw7cpFIM#yrtvQ^U#}c)Quj)f9Yerzbb$iFIpqtOB+3s`}L}c`lONO z`Ss%g`&<*hB7N?d!MBBQw`)o~#lHx3h)|Nxqph6jt5jQn7~a66YC55vnwlIoVIVWs zR>SP%q3$NVi0_UOg_Fb<_R2bUL{0e^FY|VYYVBEf@nc$koI_%whcbK;QDenZ$HwZf zjc034b9<82uat}F<8ir2ic~CAaQ#%$h^Mz|-Z67II@dT*x(deQ0+s8cFc|GvDsZua zVq-8io@twZOiV=wLckMf@4D#p4de6P|6Q#utP6Uu`4)}(8wxxcLIdL&|cSYlMe?T^AYzUwID zERC;n>k!L65`+1*ij=&FGpKs&KBuEkh7~g7*M{v0x z*!!Auen_v|Pxh~ukCDrt+5a73cX4@+i$7k><4N;1YOuVgn~%?oCRP5030sD)99N)N z0{CTG>?@KeZ@ik24O__=y!EbUR2B85liN{2N=Q?itz$&BDW~5WUwP!9LZQrPshIFG zDP+SeO}cDi3jcdxo~((LF{#X+zzk&n174th$=j7_C2pMCxbkx`jlMa8< zCDj*n5+sUg!U`#qwbj_hTSD8+{{xMT7Vjy}|4tGOEE#jH6CUDjgI>6t-J^5;Vbex& z2fd7VK2NHS&G?$(!9K?2iK<{hKYf)&WKT`wFHNeQW$3Xj}G3qS}>>)afC_~Es|*vPZru@?28R%F(4DByDoTs^wwy5r7< zWk#nn&R|?Gdy}0qwJ4iE-ld4M=G?RqcE@Egf-4sP`!&S!Sh+Sl!-4%3VoI&e$so{9 z9z$AH9k&3}a~qcdd^HAc*K+V>dLKIcz13GSXA!tu=;gUx>h zv=zM8va_QD_a1MUWUN3K_4GXe+F6Q?-=TStQSpS3&+pKXUd;qkE@hfi>EdJ1hB@mS z_|S8LjO*e3xN~|ZONSkYb1rbDmYv7nqxvZQ@0wPihUBlZc{yL7%6eZ{S2xXX|4vH2 z%>4TFiHFyXV?-En-UqEM8SK#zKKkyQk38{`s>SZ{j+!A<*W*bdRdcKX0J=2OUk z{>@f0mnQSxKcNu3h39O*md}AeOTvKri34mdPy}Q@Xt|jzwbKmjlFwuz$@eKL%Fp(G zPKeCoYx|Bal?g3w`gFSsBL0AjopQYFF@VTVzt`Vfi*LB)E_?Lt7Zn|CerCbi9CQ2C zez79g@(1%(OTwe9GX;PnwE0ij8|7z(r@v#%t=^5?p9@xT!zTsT;#7f8iMb? zCipkyLm6X))hwai^Sl8YSq{3<>S(Tf0(2HZK^X~nhMe3SG(DKq&!2zl)NL{UOdCMwA_X$JL+Iv4Zik1>+#o)GGl@h6ra!2T)*nz?8pE zKiQ0G+;W82>oasV=Ki;~MX2or0XymwX|H{}W~ktQQD#Vhk8j$7o~nlH)0?SDqCKQZ zi;~h99v=SA|0EI^w2%_Z&U;g2Ap?0>9m_e(zIWgMIooSH?R;i^Vw zmvcx2a3>AbxOH6-vR}_W#q4HMYg4b-f^I>rME%xxTUv-?u95-0i*--SB`4cgXjD9( z0e!HBPjytjUJmH;i`&Ht{EgW#SW{kkmML2xn)4!E!~*A6M|$_hNuABi`oa$Fdg?n- zadwwpJY~(eu;TXa^sICg7L5!Z2mf=Ga?+HpR^9C7#)h)pxg(HMZAcjY(c!mL_@0IE z#quo-1iwj^*?A%;gWppHy4x?)s>(K}NH&|m30B<4ah&e@(zMv_2fI{zydw(CwB6_G zFgcm9%>Yb&F=Pshqn47DmCaPNh?(R_fVQ=@0VS+APB!nKNmy8z{OKde{=Q3ocA9Oh zWS+jhJ_ZrXP@K1UjlN3aD*^X%ojOJ-NZP<<3Q%>oNaXHK=fQ_-t(Of8e)|;-<}K)r zyD>LZ!PT4URn0^EK;MU^$(flHDDEenAIBb*1Bpice0{z+$Vs4M6TF({ zhN>Tl;y4fY2Nb|K9!?O$w?eNTv#A~DX~ zdqlDnfkk02Y%bL2=OfB$&g9Z=X8g`n)*Q;TSAqa;+B@mF|9P&Uy^N4Dc3AMHqeJ9W z1VxWuHo2<9?&ikBL^h>LYl7GxP(FO$YWi$ZyqJXAK!xz?&EPxAzm`~!A4vRi7L|O3 z7h zNe6)vp(^+i(GAa2^>;5tTDr%wMv>kd^_KykEm`dKe4+XtRSvujQtmSjM|Vh`=UKw0 zak{5HQb!%rY#za)l{cRsAc*;qs3>~u^9u`uzklD~nO{Hv>fX{UnSXV44OVOhGV~xJ_lP{gC*Vl~`dqeK8U#@z!1C7dxiy43owtc+TRodeA^FZ_(tWcfR z(Rf4wJ~%6HFD=o)+h0Ts9vzTyi6Gi+B1h*PJo^j{W|!beEh(AkHqAOkx7(co+Gm|G zD9l9Wf|(ak#6&ds==UorL4~88JPPcl@E&8J{_}!{sU*-f$@4 z+cN(r_t)^z+|>*v_;v8ELM*athUJ}$^`oQlZXpk$lgauj{g$A2kBS&6! z4?E)M-dpbrAdK_lhiS{Be)goLSI!wm%fnZ~o`ZE1I)bC`#W!_oy~$x`8+S&z99>VhfQG)8nYBf8%f_`sS$-~d`GZ4}ME&S9^pWLvWa!c% zs;GqXkEqh8{x!rKor(0Z1Afx)`8Qef$>wI{YA z@^>T4qpu-zpB{R_VJlug$}aY?eT@nt`CuBabJ%R2EX1;CvsKm6?1{h+)vyl)S%h!F zq>vG$GJKe4rFA1lpm5G;Z@d6`u7o`Q5E0tMxnFjkb3*a=#v{SUBKzGr4gQz!keBL8 zTNcQz1rI}QzJXKL!&rZ0xMrvJnDS2jUSg_;!kC|j$aR13sKa%r#Ewui&?2O`IMqD# z*RNj~Rj;HibG)ZeeopCpDzxZEQ0Fl$U!fLOr zov;V`{J{SaH#fgppg$SVLsWlFgcvg>HWv8Lnc&LQR6*sjl6WXKh2~NPsyWdwWI1H% zdesAbpzV6pwbJV6mE;uxS}}%#d?5;Zzeho>TiuTyF~Gn3#>t*=Gi~XMC4v$E1s|lxyf@@GU4x$lmD8P;Pm`_I-mom zCnxippCQXiOUpHr?o0svuEB%R*0rX*RAesS*2AR4%sZso2^21VJ(bpRjC5}F{crk3>qT&~O z!}10IxGqrRtACDhr=lVezvG~NcX>vg#U>u-kJ_CT^y>yQp}C)A8TU<4zoKn_8wHVM zvjaf3U~O#;Fz{^PQF<9wV1o?Zo2N{nY>qxaZ~)yndge>{q91r4I1)~_woLb$StU`s z!>!fVa&zw0ot;=t-1k2mt5Jz<1<+gr9oW4i2M$KHNVz zz?WbHZcqpqRa})h6IbvkkFanyl#v||^9dRn8n7G9J+qG>s)oOXe6NoW$d8p^p`xvx z)^0MwZ=k!`jo-yia?gwIx{$SX`xnnw4hWuFh&9}g)fYfE0l@A}3NDuESK;EO+9SY( z|C4PAQj?Kq7FYStmgQTduk1e!Cm^WRe)|Ha`3`l67V{Sx z%4h7#`g-pBk!osG-Et1{6MMdRam<_6~JO%IN9J6b3a&qX9 zt;&Y+X0r4*ZrQT^Ci+mbMqOodSQsDkNi0oIu^3=!tS&Ip!~^ zQ9x-3<^f7WW9EmPI8b!$gE&bv9WZ#5nRc>9SyA1eAM&Elid$DgCV?fkVLdYA?7VkJvo6HT+We}Pxa%bAZu zuGKi6&PFd*R20;zQ)U7Os6WMg8~yw3O)z#mJbd>|Yx0rtz*1X4BGdkl1K@iU2I}0p zb)9+VU0ttFu3abcV+#oae6vBdAfwoPyGOX0V-mXL<8;1 zOwHP0v~AJY?kV!3}WKW$%xF5liX9Qqw&JG7pLrd&p6 z3rkGSW{6z(9SJs@b?Z&c>zI&xL;J4oCY0Pa_bNQ5iT1NmkwV<)S2H&buv-(-D}T4GbU|CVgPq+y=&`Z3 zR$U500=}+h*>A+2D}a77f|u7Eix1~nNun<gfuw7^jyBVt z5;8R+mW)o3s$Ak%jJ@-L1s+{o@5_}>vAERl|F!@y*aLO?eLuv+pEC-S$=D3%Q?8Lu z!bvQ53;vf~Lh=E`Z^vtZRtc7|zYN`+;MLYGxD+~Nj#M=>XD^p!z>ahDJh`=wb z@G?&5?t!*uL`6tPQ!50TXM%U z3-lXZRSn1nJij+DLhJ7i0$^Zty{l1?Mw_uAa!=vA^NDvI@YB5&y@~M^ftMjSyHXObm^b#yPV8IsG`DoIlasEH4m)mLFE75ViC90D24EgwovOD)_ineRpn;}lZ;kC=-~BJT zd($>)$a3$=N96L7J&){(zjyKlM$f)b=BL~a1_Pe^(kE78%n$%9l9d1q27$zhciWzB z9~r!@r@-u;9d!4ZOnmtZ6{fiP#ls{s)iCKCODZlx1xs!2S1_ogy;XsWSHazO^7uro zNyEZ3bD*u`&z>aef34_Z>N1m~r;m7B$iw>GvM3XA_y)(~oOPWd zKPp_$-oe2^ga851C26)--fb!R?rCe8Mx;%eJ9`T-7)*#cYa;g#?;f-9=!5c%mnQxe;E(R10x)h*6>J1~cUloCi)G=lHu|=yJS{N;s;XAm8i`#k~)@>bjpUcX@$!k1pjT0-VN$uFla6j*caQ@bHY#b6cDIHbZXg zlXG77BF=vzw6I}{v>+;6@x9eg$Lnn$c!4_VtHuMJfVCtBPHld(?&|6)O1@RY_^;mQ z-s09X7i|XoaZ66ZNLqkQ`Ml%x;l;SzT}d!QMCYgP|1d1?RT4#7Jz8C;Xa{Q+>>GDZ zt*JfMuTBadAEE|!^6>k9VQ?a;pS8XYB~7Oj>W_|OMc(dh?IG?rVR#^%kN)sr@^mIyyYABZ@GFL#E0<)^mJOII<=F%YTJwRHNY2@=1a`^z3_T2EjB579z)i(}12@(XNj?&psGy(;`isb?cI86TRjcIma><)U=>}*4aGxjN}9WDT;CxKnmZ}u|P}lsbJNHg^Fg{T|JhW8;{>?K({wj zRqf-6drgzBiCYJ$Be{(FaG{gzq;3Agab2K(reXCeF=E8a`+*4s3^sg$H(6x$+u@WzQMu45J;jh8=Db(+GO3N z;a?$#=s;rcB^Z3yDV#@OW!4!0QRoT zJ$XL5M{RVoqm1XPBEqWL6f#UL9E8mL^0(*0H@g2T$^CtDys{UW3G|j0rwJSP(Qhwt z7v7GPe+8RRyUhYU=onz#S$Vwu0ssU|YGa>4_dX5=bbEB1E>HfG_G2reh&yJpGM?n-s6M_s?oE1=OUg) zB5%xA%ZXg_@W2W`dWdGmM5gzK3%}K&JS!gwDh7FNgUR&3jCvKs!l0k87s% z-0!4BoOW($Js&x;)%_3rZfyUOeM7T(4n0+9S5C7{mnG-)jN#&uvd`^9yRJ3ST6;T_ zYHiSKee**Djf>+lDSG2@*E64L4PmO_u3@94fO$PQBzI3 zC0)fG;0eD88y^g99Z0m@4SUzx6&g^ury zg_B!9(lPm&RT$rNQ$Ogq+C@MG3_$0)3k@Jh2GVnMtA`QBUqxP5S2xG<+9W9{sT8B& z9`MNhY0|5_CusgFL)GNj2fCgb(Qm^Ge6XB90k0cG7I4)iJ$?z!1;7DdmacrB0M-+| zXfGi}LrZ$ zhn^cZ9fIfA@SVdBJ%x>yDe*&x8j^Y08;|d84{dE-eZs}!!W_qB9H zA~p3|(aOG|iW>qTPzfFvMht<~erBqyC}2^hlvw@*kpKewQ@*;gz{A(&E`5vHYE@zsqM>N?h}iK9E_^ zygWkF&oBH!!~iPjw;l!<5sKl9XU{){0LIlkA>s+V$Y@psWe8ubcH0Dto=%u3wBF7) zbb8UPAcJAN{RHHHXCAw2*?5_abnpvI8!}_I5$0CvCL2XTNsFZGt~Ixlm!Yu4?Nh4; zDH(pUIC8B9hv}JTEtnIhhJoEt3fjdL_N9Ju{>9Os=o%B`vcsVr%}B(GrEte}<=ds| zSFxZSLvRcyO9|QH^=gMBR|mow11LBYc+2lq*7tv>ua2phFRpTu$A$a?KIsY86|9L0 zSQVOpW;|_=w5wMDyO~aNSpUN&hR6h^r^%n^^&$WeYdra|AQ$H27yP5 zCiVc@B=B!Ys`;15=ot``n>vT*aAG)3hh^p8tPGa zI|Dk+wkaEv%=IoM1)#GL66Nd5f*r?{jZ@vq^6L2O5xERWo%U0n8J5hxxYo(b)iv_q z^jiH-%NW6;_mcWwxE6&P7H<~3M{DKe8%;|88M(ZhX7vmWZ?Vw0*s2TOQ0X*ZjVKQZ z-ioohJUZ)hze4-L)KXgbt=l}6?lQ&d2D_><>qi~Zm7m$iO-x0-NAku%P>q?B=^9wIx7-V#3fqW4?^${}47P zej1=USO@@=UtNZ1hj>ld>CfmHPg>F0K@C;5Hsj+S1CL~ke96TdMIP!zxPrg=5b&=~ z#s1&OBa)=0YXFd|=3BMH+1FMO8!ZxL-OM0j{hO%^E*&al`-*Vg8$l!nmNb&c(5~XP zcSTcEvy>(AK6Jr7`>Y$BFvs2^YG+l3d6<0 zdUU;EqC#E7Cv^F)%(ar2ufC)3XT?_IS+9S;2@pQsLHmCqn_k1qQe|QAq8z|MH2wUh z8F6&9ayBuuyfi@SaJ>U+uZW|@#qK!v)E%}s(lv~cLA(P%wcpCpfAFc+2fkhZ3eP2A zr1UK#ewJ)1<qL{UEb5`a8X_YTBQBx5= z388ex!we>rweprpD3nRx3*C)NTVlP8*#}B+S@Prs{ zfxA+@_wH)x&iA+#EZFFMIg5y?x1Bo;q=2tJA_Ca+Psgz@L@lYRD~k@BVRKMnnKCu> zU#nFNDYGnP8N1>i+wC?cK;GWeEt@;BE6PH zQ@rxp+Jr_;kU`aqbzQt0gV2komYgItI(HYBm+Y8gnq?gynR=bbv@3v!BzCZ9Q&ZF7 z`xQeGySbfbSF}O#dP{r+O(~;kKWV>r#DjQRx?aKIj^D~s+_df z>!-b=kva7gq_1{8fFAgnpREbmwLSa5I69H7VczC%wRmA_vBzgI%p`s5f!Gfa5LiVX8hYuF zi#aywbw5T%M+33fxboE%sQUucz2*z3YNkw?p&uO~mh*Mxi2m($Y>)Bq@G$TWV(kPQ z59O4ih{R?1S0kH!Znt^mdkF33!A{=-R${sW>?%3~;{Ew_dh|l);_~9g5;dw?EejRi zSj03eto-@oh%47;C~WHw>Uh_SfwH;>wZZ5mOxAHkf|Y?+@MNVL(b+=&kv^+oJNrma zZ-+DLrsDgLAFcrJh{wm}${wb8t#7#+ z(W?Pcv3A_s%{{3VV=!*Y3V z-R6SK=1kes_Ozq0`<%xAxU9~_?%V*ZJa8eF@+rWl_0w_lz~;n}kwF{5Pn0~x0L7=sz+*N+$SSOc1FHsH1$U)~jzuwA)PkuOikC#?^!d z!g*Y_hvb!v78&2Z4S#!WsIHzrgGabFYs?LNpJw8|ujFb>^U9ZeN0zg{jpD2B zXm0kNecFeCsW6bX;jSwk5zl&Fsh>$i<>b$#6k7A*3G(HiliobgwUY11C?V0H&vWi` zz<(7rs@B>pLPot1efm1lTpdxPJBr+_)17%ba zIb6%YcPGI~5`8i+h{-RndRy1jXRC^xQg;Y;Hc2%$&|wcg=Q0#c@EwD*muERLutRS> z(;t=@cHUh@rcok%S(b$Ew0A(5Hy+iswUb!)eTm{I5@m;IM~MtCZf0=D?<&8 zg8p9VaNWoF+sT|@^}J)BWi-@i%r(rCC-GXDS* z@x>){6iWLA3KYw9xt+lDIv;E4IrU6w8D`yjNJ)h*K%acpt~Ix+lk$K%lJzLhAxHdX9YA-z` zZgE9>@UuQdHM-l8p4{4nwn|X}j8P0xL*0`nBK{WYLDWrm;D#qHF^T7-WS2x`jrH}Y z3Vs!}=EDz_$ppY`BgnRQ=`$^YagC0q?7sw9MjMC-QAv3(Z)@K=J7j^+BqoduzK*1c zwA2q3@;lFXi+%>w5Td zAH|fYJpr%tEpHUrELwI`Kd}nw6al0*s5h|0z`feLt8e&#fM8CwEabNf;qR{O%NL*D za8Wht^8fUA@BGLfu2ufgX@E8*>ZAnoNgxpQJ@B-!*m>VkX`4FwvDc(}Hmq*9#x5H; zmKE?>GEb)Nb}-~7%1kewCSkzUwyeW8V`uf`-U#cJP8o6!Sukx_*YT6b?rdKcwP$di zs!;4?V91htRlOmW%SV6o@yBn6X#y`3{QhXC$F=SrS|EGmw|M`+E(CQboWQ5$CgtkZ zy)M}E!|k`R2B_m|$7mJr@)?k@8IYS4Pva$+Jvq)cY|mD_)K;A*m8b69Yp%L~QHm3( z_uM~IjW4DXG;eeq`$x-po(5j@XW?H+Km2W2K0XOw)^tf-m5)J+!q&vbpD{m-W(uX?Yq< zjJ5L$kihm`MGwdP?;?HX>da(s8jwitSM!UDT_+Dw9^J0Dvo`#&{$2is?ePE>7FJ2Y zM%2TJm(R}tV{Iw;3`H{9*Gj`6+ko$q0u2W}5ERrNKSB2|g_M`j?AOK^$ao;In7FIL z7DzhHi1n84M-T75jrAGrBS+ZB)j(#L9!qnFevHW$)?38YawzC3b8VjE440Xd0#s9d z4G|#BpmXMa>4H1l%)#YvC9^p|_sFznu~C-dJOkjIN>-&>k!q$n zeA;6z-tu6on4R6d+x%>(I3ZWG!AO2=gv-|2eN6}V0>Vb6d85wwYX`GZ+rpIC+&} zuxNd|UCNf#M3M5PX9l#=iPd)Wjj;|Dg*6!>XW!If=@7{?q^1+u_QN}wLsu3zUspy( zrm%?B{&{F%Ax4X!$b>IfAuv?LO=A75n91J6xQtZ)trlIUndZr55WGvQnH(lscuq#`_-h(_!jO z%4omR9qc0OmBYcIDc)V5hOp3XiVEusxElVJ>+`+t^dhZw734t3cMam#oMV3>FnhR1 zRSR;u?QLa?^~GtESKnOGhJbtP$$aFyavQ_*pImsXp>OKmRuu~!YE9KcIP9kS?oRhi zK7O#JdeKmafzrJMRHR+6zkBex&}!YjMj4+AGPsd|iU8R<>>J_9e0cO(Q>3Lu2p_WH zhZ+ix1b`37DD9$F3?Tw?{N5z+`;U4L{vFSL1ml_1H7lLTF2%bl-_@+%E`QBODrH}n zT-{~N{6Qrh(J(*MI1!ZY1W_@qkBKzt5Rp-(_3?o5$W{rgoCBwH4Vc_tP}`q69_i3T zcm2yIcB#(EPGPCFiEGO2QZQ2cE!yhbx6zsLb!Wf!o4lo0rR%pg_Dzk6t;COB2aRtR z*x4gqlud#8-w$nPEK7v9&Q!m-{pO}MjI zo6CqVM_r!`zm^2RK&#}>Y9Nr;z$)lS1`G)?DJhIQT%fN z-tAB2$@t%`CUUoae|8t-9a&sm@Wae|bSfq&1Yf;@dc}v48C9`o9=^3x}xwZeMs1 z6+{H2rBhl|x&;J8x}}Eh5|9{b5D94n0m+e;R=Sav?rw>pyBTKg9)9O}&N=tI_r3Qo z*t2Kvz1I5F`i5kN21;e{tj#!_`J~LN=`HtZtmLBFw{~`zO2%Q6+dw|Uocb;P-8&SK z!A06oAWneW7BjxZO&SiY{MIg%NQX;j4E-ubs+*uBjXs2kZ0wnsym1gO;8gMG-pTJu zI60OhdLjPcQ`H7U68G$oCB5yJ+KoZT<$%R#`c*dbSYk}~hJxj^_8{_2 z4rUJ7eH3=aGWsq3a3_MlbSI*rjOW$T6m~E3-H#^3?8V?h2DTE$v22K8@=hF716v)g>>j-(ms$gTU|0wR#~QK=(Rpc&s`4(X|HiO_8ugWSgeg zIQja;^u4Fy%4#~MPY#DuYiFD$guqiW6|mlN z<02mm%jYSzq7tP2oCF;;d$9u0MQz%Cy)Owp)oW>M6Y3l^|M>Bylx2qs$ipyhd)@Ol66Z>R@l!z@**&re9r{A+*LnjbGccB>UEMm6aj?5M5F_ZQ{`M4NI2yC{eTD#rUqc zYIuwLX!W4JA(=kvdbWC?;k+dUZiv_K` zw_JVit0K2w|7gKnJrzia_(a`!x`ubgL4EdrlkFM*H=I3Zt6&Ff@ps&87rFe5wya$=cWVHeB`f zQtGoXO`u^+7?di?FxwwxPdN2tn$$X;^J2@@>-{0uW1|$xGPzkONvy7hF5`v8nh zHhuw`j_AOg9(a1)l1N@Ypz4*8w44(p-F-YY;1C&so@{AuzjNJ0<4*)Vgn7QUm{J=| z50ti%o9VtQ^8!Y$n_MYaBa$6tvqTxrr59qcsuw)yDAaKIU1BP6Bl0k$E(1VOL{L=A6Oxz#cVlfHI|v_5 z>@WF=J56ALd&w`&N!2+(PZZs5IS=su#2x5jbz8O2cOe)R-nEitl&IvH4zOBJQb9@@ zEVdA)kF4?;dud+~L4mUU=cw%L3(!!KPA3kzoF_|EXKuoJIViba5_DRbcf{WgDT zaaNh3(;d#4T=HDtIfIq8-_WLSp|Vi*AJIvZsw7Q^k4^Ny1nz0z%I$xeNQ47DbAcRo zRq{mR5u_8v`x{2w+`ftwJxt&HJ9k0fwR#2$MY~to2^0S*{y<^17G;H7Eb1e! z_V?%7Om(gutZY!NAK%XB5KU_U=U(yH$w zzfx0E(M;0^;WSyEqv?wmR~6HXq@Uy0ZQvwnebE4z6iL)Vt{NFtg_wqB|9P0{xytma z1gyLAyx(m#`|TA52=>+Q=Bz2573&qoZ{o(S`cpX+4Vym?j$CPHsZMw9+O{|F8bIHu zC=GLPrvvc)ZwguBhd@(cbO@wlHy;Zi+@7?1sPeZfo3&^2LwdVF=IDdl>Z&Qc6!}$! z-hLF5p7&5QCvnafdbE}}>9lxd64S2(@o;cRHr02T_0Ta=f2;1mYsy9(hQ5){vu9YK zzkAvskn=1O^lJ1(e$m`yrCXw5vLcI*d`n?MIP8sQcjdR+;zt#x7A?y3rIy5HXQRNm?`lg=45?!xJFRcL}6V;v#?ir~} z?5B~4<^e8_Ur`3Rzj`b$Hp2?eRQqT{XZzXZqR^ds!Ag<-aF8H}3=%%${6KUN72<$Z%5I5}l}XbS{- z;2q<7z;HL~W6${o1Uk4DEyg-`#OkcwI1(0Fz<7n7MhH+%S%=8Xh)gCKQv}rA1NSZp zm~2b;?Ikv>o`e;A%dEF-%IcYb1uoK_33#Gu9g2&!t$02>SBEc<_McFHby*!1KEKm$ zooQSxM%W4M$JC59+tyfBRZ<3l3%H25?`CZx@@8Q#Ms(8jg$);fp>oC>rc9HG$&>CT zE|lMCkgfuE<5itgszsLw2%tbzuYUel4MKWLl>@Colv~+?<7BO@Se*Oo8o{OGw#?hA zUUURQ*1YT_^0l1H@4w~Rq&co)u1>R|Cr~T?=<5T~2+ao)t%sN>&%bS-V3l`kvR`4W zZDHS=YDa+$U4K&^$`S4F9y{7S^71m9mu~Y*QL9|K03=ZK=&I`IxF_Eyq;V@Uv~y9j zHex?QO4p}DHYQ7VvL9L*AsWlA;uU`-pWctuQy$Iy({gex{$Qkq| z&%@7qU?X0DK#K8pJb3?)v_OV97!aKJ((b*}RHT*{VVBfud z*Ih)n1q$>7sSG-f3qU3DYeuaG0H=hwdjI53-zn`Y)#^#0$`3@z=|)U%l}_vs`-6X z>z2zR~Vhuyr{&xvVPver(J1*W+v84U5^5y>v5ho2#n>+WbZp!8%c6B>> zDkDEIWR>u)q`)n^Y1L}wg?LgyH*AEAjAqu8u8%on_Y7v3Za2=+MR<%)aF?DUGJP(U z=W{!H^iOJ$F!Wk>sGhYgbYNk1V-uLlo19LILJ&qjATn1?RxGfEV$<`!w7d!sp7N%{ z1%wZ5;J0xAOno6Q_?D@6KkK(F|Kiqg2c-CPRKA(N=lBE4Rno%h#~`f|KtsF7<^GS>JYN!G5>FO0f<7RPk&918x9@TjDMAq^^ zg=CDAT@aagsj43yJv!67t5;deU)|^C?0pdj86Y(l+d2u%B&$ zivH!ig>tJD{?XU3UnBg~5}XC8+T8|v2L~lKNk1gT#<{+X+~fIYCXPd%>E7!En$HXH z5WB^v?Io7jzdm_uH@@Ov0?9j|m~8s`8?ShKIlF^TvPL!~dx%Yq&eLolQ18f6)bKz? zjp-Ts1=bJh>IBvc`yI5mTp1y*^G+`=F*8bW(Hyz7i~+3SxFtly#-o(S$@-I_kjMI? z-qfVz$?Oc(2=A>ojz?4t-UIoIte;vKNDW7jDu&don#RedpNBBoFmM{k976 zVlWsf`r;%;Rb!`{gZKva?~+y{4&}!GXLvp@pvH7Vp3Gc!s`Hqf+dl45|8^ElKp{0% zjl4|ImGJwU%fne<0%P20P(PBw4;->FrQ2Ctpg6#Nw&|hhh;mw3IKL{1YVsrmHJFqz z5_3Gy2Ug#vPG2(e4lDOA8A-sy^xqQM9=~&1pyz%14Lu-(K>x`TPm^qfoHbn@*-?7% zUBA<_EB&E=7Fn1;Y{w(U7C4mu{OqU1T2}6!<|>SEIdCKO?US$oVp)(cUD6i6g2&8k}l~6VC&migcQYO|F^@RUi- zijDwSKRz$Eb+@DGTpP7m2wMb0B}wPMzajg7DO1pQ2edmY5)Nl_wW~XK%;Ni>{}sq> zMFPl5WKOyjSsRsFE0b$de{-09@q6}Kfd4AfKwVbjl^V{Iv^Qr?^b}f0;g&SjvNeTS z=ck6$_Nh95%;PWX?6$&8(NLX7w?zWP%gakp4NQRyq8E9UdWG}i8v}+?=F(gu63#7| zb|sy7Q0$zCXrvt#drPyv+;OmpxYm+pH)ngq@p8;mbZ+xex8;C%n@KFDe}TQSmN@El zJ#4Vg{weu?lrkDuZJzpHoGFTUi?jnS0325jMq+p6heS4?K=f63X|~M@PY_GU_-oPS zJV4f&Cw+@;8md+ObIfHWlip%0B=Cm5L`0b2ECo)QG+6Sdaw}Ni;QE}58U>gpXoR-7 zv)C%tb9H)RGqYocdvjH45fNVs&pm=oqwPg&a;9O#GGUoVkJ!U9pK(sgWeuMorIayl zr^-Mql;&m+?SeD%JBFmv$IDE^-NQ^)8EHy)w=Py+33xEE zeXq>h2dK0{%ki&?an)Q>x9r`)60IggCd%x7Zx5$8tobOb)96_-qw%S<`EOvN8OrXu zO7`_hA1%>&;?l?H^mpVwBzUiZa(({f_ihL+V*b4JQm%>r$sxoPj#zCfO?*u15cL>% zu+(^1&c(w=$H&d459>6zSy)&`HOS*oKEsz2H25hS(Vgd4IabwF%J)!`35&~kc;-ti zcZaDrysI-5WHq|>&S)vmJOWLji!&6FEO6nzZg`x(O>%?ZKKki87zT+{B2O!O75*C zdC-Gb7YSrE@7BGpu+EzVOk1c=M~slydsh*2kbQ@Rsukl_J%M`nI=)S3-leLb87B0LWrP&8yZ8YnCdlQCLpY=d8LWYrRx9PR%n^t8h z0*Sf(c|XCSUS_OTzu-(_m%8@Mx&}qZ@xzX%LF)NaP&35HYdnAdPRo8Yn}YpGHeGoRwWs z$}VmK?yofnIB)b39&+W-Qo$}2M&smV_lf)CrP?;;t)L?&WKLF zir%=+3Z2;dGg5$*NfE4RgD7e& z&>o|kS2C(y^?{(jgoC{SK|SR zw?v8;a@h3WsT7({xrToDt0z6QG_3hiVmy^}wO3r~G(qw5czt^jIqYuKUCk}lW&;8} zCm%4-wyT8?4<=twre@LH03o>E#Ak-WJF0c8VflC5UMAVUZ}b56#>{-9NN9;d>!fD1 z&F&`M>j^OeR9HCvcRs1R`>hcN=c-Sl>~k(z?9T6B6E@Ht1>7Y(Ii+n#1YLP7GlRe} z!fl7=`fH0q9(4xK_DzTY8w6(eWdQ4GdmujkNp=7e8a==m}&eISUuco>lFcf7ZM32c#a7xF*4e7V?g9M4i);55Pg=XbYA zu-XBEiI6`s-I$?QR+3&PVvSA;0=Z1GN*C(1QfDRpq{3}w-cVld`rSz9RwZpQF5htk z&XD|PFTjv(C5?Xgv)OeaPf@hRDS50IcB{V%MP=DsO=&}?`3I+i*1ex4Bgb;37v3Al zwq35RJ8^L;8!l2r@Zf3~Ph2oKOYTG;M}G&g+6?*Ay`8sLFCHG@b=#r!R{wh6V*8Ue zo4%^8nET7D0n`qTp-I#PZDiqE1dmRk)}LFJrK{?yw`%ct$r(KYsMv1)0d!tx#FXTT z76A=HBcUMZG%5YARNTGWPvt>9(rePlqXWOOe7y4J+15=)&Pf{kUH1ubIQ7=bo?O}_{TwZD(~Euv`Sq@-w)S#rNurnU zKl?tduJv{~bb5+!H;lHp;LmS;-2K_3xh$1przGd=MP$9->L^w}cj@KkHuWb3l0Wb{ z#KpXVA|p6bwF=D)KNXo#e~~p-UaE8_{n6=+fsr_jBhe!tO-2CQ#LW zoMK#BoQ%WbWR8IcR5Sl;7fUK7#ru9MQd7Q}%?n`eePlB>*Bl(U#CEiduW*zvb5uTA6Zv)Q!Y(+ zPhQYsxuw$9e%deW==_7F762?|F?fjM^z9l-OKZCqb;8`v5N_FB(LloE&{hIaI2Ct0 zlxQ}8H~9jSUuJ48-YB5XlBT&_X|BK)oLsQ3to_4$+61<#?3y?1cW_cF!g?)L48gu9 zF9282T)m5|Qv}>oGA~w3g)hA(a#3ERaC~iT)!}mtngbN(`UD*fpoFIPu3H!%XI!4j zA*c1+FRK;WXls?<>sD}cx;{fK6ze2MM+~ulS|{$qPs6=MVsg)28as0J5xX!=yWZ5? zohyPC>)mm;vn@rH6-`z)HnaxYzx*DTBmdhCFfCvS_|r)#D-UdtrLjjhK$j%yMSz!> zX|p$KK^wXlksr*UfLgu;tPu58)Oa6GUHtelx&m)Vi|>H8dW(fgMrRYVS$|rSjJs!z z7VL&T)pW__Rk2fjRGa@*SSf~>`H9#^4~BR^=yinEbiuHJ)K^-ThlFIFK&$rhFEyba z?Kg6RtR>G1h-)`fNY)5Jq((w{im^qT=?M97@ z4Fx7bIrVNrE+^5$^@?c;UEhM}8~9{#YJvKT3?d_vy+=6gDPau)j)7?M03b!P!dW^3 zpJ(@ACK3w={Oa!)6Pf>b>fQLM)z2c*?7|T+fG`lyB0LcA#A=j@#+#12^J0CP^$Ew7 z9>28V6+F$q7%;n#Yi8`&y|V9OPVJ#bO_3s$z72B*2IV^i+u8$yN<>7D(O#M%CNs@m zH=Irp>(|A`dfro53p7I2%`HTJkQ9+GUx{73){637aSlJdklK{+$`mu^Tp1M+jdS?J6y* ztzcw0`Y$g(zxe5x220a={o;*dvSrw9CsLi*L~Eu-iMN{Rv@5GG=N+^!hrV#!NRTSm zMwn9XhRx!HD_nurt2$pG;WG>KlSlHBSD9#moSx*24Bp4Zn`Zc>Fe3=FX-J~V4S0AQ zp9!Tkxctrr9RHXl=E(e> z)E07U_T7M`l$Pj!N!)x8#fmC366cBIA=Cc4PDxC-t()pSS&1)-xO!BGv^x8-I6ESGI(!lV`T3;itbf_%CrJSLpx$H)F|_v*V(&U0`nQAz|SKV*Mjy+@38 z!1@L_+1v9{M4I12Iabd)R>zSbfYlE`v#`fTVi-dzi-*a4OIv3DTHX6X$oyiAWrUbH zQH}jxxfj^vR5MGa@W4}a(8tT#O5aFy8us*OPAz8r z=^Qs-ZOV8a;NwbTC>4TMU9^VaSgdzybL?9nIbwwiHdni08iof>_PFukI@)-M#17NO zq6OzcOz&jKH?ge=sGHkqGM}CDdkkHt!q2bKG=%B7xW~Q5?p}q% zxxo#dQgDES!cL2Ex-mjls2!fxI~H^E@B3M=xCY!c{?TZyGf_-7zuI0uL=*ws z*MCEWn*b_&S&u(9(Kh?tQencu-*23qSpP8crwgg=krQL$@xfy_*) zgHxtWSZUyMN+_x)4fz)dXs6kTN1TfEEz=>3(V%) zX)a*r0fUfGvK&K)mKHL(l>h-M?*8%34|V%viO93G8_e2-TCcTgGRsq`OilJ(_f};g zH2$+Zbs|Z-6Ik5Z6*Eh==vg{YT>Y}j)ya2haxtxTi04>?H+0l)wF*N zb$U#U9Z7lkqY7uI~(%j{x(z2T(d7yIroE55@8F0+VoZ_}U^_dr<<;$&u9 zn$H|VkA6FxCTNHm@=_>XNYDTdP;2!#KqYDHG&jGJ>dm>gSG?=yVkN5Z;5H?r6J8J=ab`A8s zs=uHb1LU8zmswaifLq3p#jmSaFgDZWVeibmJ}AT|N5ExO(lkv@Biu>+6`0_8N_{U5 zEe3#1N)DSN`v?_efb_1|V`bxt`vChE!OkM9{+SB+UWc+fzm5-VK@h)K5)gx1GICX4 zE=mkb#mY;Z&WiJpQ`Azd8VppZLVj1`)5SkCkueXtj`QSIh9I>ib8$wP+y_+uY{$)*4t zVpMaGo6=Ne{zlhC*5Xh3llw@Xv5G@l6>($~q`%Nrx&H~7#%UQxVf1k&Vn(5q`~tr= z=Yz8EW~#&=R0ZYt zKHJMlI4Pb_KBtO6$M*u)b-*uud|Mn3z7ZxgHsHVO?K($Ux z6mxx?c-8kKL1)#*NnhPj6TchZX)mgiFo3(YR=AClq5(4=5$$-43G!Ftnq*J%R3Bsg zX16Z%tZ0B1LFW~(SW1Chrbio6{QzUqtshlU8wfuGmeO0*=?^Xv&6A5WG{y1T_FvCWJt+!cv$kW=O@E)!J*6t^DBbLu?WKm25x31IuR2~I_ zKmZv7IRVOBFOcacz#LxNqSp9Z&9MIa0xfp=izqS>dF|U`zSVbJ!Vy=%;FfXn#S72x zm)&HjH2o7-JW>SA6R`1=6csU*jCiW4eF3XXJrPLhcPH%P056UY&rvHe$$;MAn7$?e z7Eax4hOq-c>f*HShLS?m_kkpF^qfu%$U5c~PM2q%mWKU!)#(Y_T3>Ou1>2Xw9bD%8PVGTJ7s7#MbtE z){e{+d&E%w$8lum9YCr{HeglgKx9?VOh*}}RQXJgEQcR~*bUPgGc8*(w|rWw_OWwR zS<%0a@!!@b79A}UwImA#SCzmOEZJ4u(+w;v0FsMH|62ZnQk@^Jk>@f}bve3z&#Oe_ z5f_xnN@zsGgnW{D!mzIuKE&q#uS-`*!Nk4bpGqCiKj~)0^TfO9>#4Hn zWi-J9VX0qf#gf6fw&AXhM(U1Z6@Fen>v`JMK+bBWL{d#^e3K^WI;qEr(aF-jp`Pl` zZHQkc;{hKf9frU9fFjJ6v&ve|_gSDSKOG>ZSD`Mwxlj5Eja<%RWy+5ZN*DqBz`LBC z+Yi&%Zt~l%bUo6P!2%JZC7hvi-Ilb1gz1#J77yRcET1A zo0p+G0M5wh{+!=*q<>FE^wMBls~7%t-DV9WIy+o(ng*w^nybhSxb@JyHZ9*nTPaP^ zhDo$3iN(<|$xSedGBpM&S}!71KPyeeo&E9>BKbGSJW9LvJCoJBDG53qXhj56H@$?@ z>7?zRSv$IJDNJUiQT&f7{1ba=g>Y#_U6rGWuUvh7R-5lorQLQX%O2zg0#B`TO(?DV zAn*2!=I$#1dXC81Pd7o}!D+A>@hiC|-NG2mgV6UXa>tUe+_0OyJRA^6=7xH34lFnF zj9dPV{e+>euDn6cVP+;nWBSig2mTa?u@#aOyep&Wz4hRUgG8RObF#!#jW6C?_$NvK z_G6|t?Bd@FL)loU8}j(>gh6+{`WgHk#LlRpW9JLG;cBPdPj(A82Q%J#wT`3-^pfb` z7MQyhKjY39Q^>0Icc%_NGr*|J$1`)S@FmeX?); z$Z-npcK-8IFdK+XC9f_smrgF_J|Kk9c^28MBCUxfJ7IOLYbmFF!eGL zNO6hqJ6xT_DZzGyHuAWbbHW`FVc}^q z{Ebu~W4$ECtCnNbseJecu7!0ev%m+al_2z-Ag-rx^Wq0V1>=ZD;l%S#XzQ0sr9T)oB2Y3JnAAmxEPcs0d&3I$gG^vUz4un>oc>^ef7xPD~@u!&}_v zD0=^SqJ-CVrK>$fM&82N$u>QbSFCsZ|F1;Wi~d>(zLDm*+%BWr0W5=5U0=q54Kn|` zs9oBJYJnBSY@6w@)>@pW&;Q)b3RrKEe;Ybv#obg_Wqg^X%B7L?Mo(Tf7+@y4TjGF6 z^-R4>Jkeo%TcRdotcc=FC_wwi%MWd7^H+m9^#xGKO{{G_6^PTSxT*Mgk+_lsW{ttK zzv3+d{QMM=vH+z}Jqwsj0DJi2Wo9MYALzt{fbA62l)7cAxoHEt!!umlSw=~6;k!80 zh72bN;U@ujb^fQZru{xDz-p2bsy|ud0K_Ra6Fq#h$q`_N_Dp1hYiau^(z@Z69+K@6 zd>c*@;TP8NO{Ovn&uMncwaA6aYxAR=xYLgOH)_7#(kAyO@@lh!G5=7{ncskU3ct6k z+uY@3yR0o26S=#$L@845brnd!0`l+wQQ-Wow1BH3X>1Kr_e)}IndU5-GP#QP{Jp$n zu`%8Sg9lhO$tm@R1Pj_=K6O{@;_hR#(Z&)OI%IDZ*r_F06<5vCjE|8cyi=gJ>AH=`T3ZBChJd*h?==1yTYQqK5#;vYij^?ZA=L~V(ujlNfZz%?k ztuMD$p{SON>pOqeYDo+Z&lK2k`TNds5QkD;a_mfcOz`B9Yy@3vSw+5L`2?{i1sile+T z)Hf!;L3q2Gd7XfEmP|o*m!9ugT=#t=D0`OzFdrT@7+C-!i9#&URT9uCJD`+ea&5d6 zft{awI0;d$dP;Ix4HTx9{g_LpKvf7G$jDtZ-Y?Come|qh1$J~sk=12|;-}O#H`i1M zbe_K^A}v;A%+rX55isTKyq1`(N-$xfuqu8CE|Fn*xefAx4GbX53>xuc#{P!G}3pB zt78)Ey_*#+PH~o@LEJG0S58s{77Ubq1QN!_fSVcfcf=E(`wXO1v~Hs0xWI2JGjFI( ziDZuL&~{)N@@?2&^r&pEYu@*V5iy6NBUxiV66(c(ccG=2`h}CA_|We`M|D28pli=J zVW#>&_v3dR3>#DRx&x;ICA*2O?}7tAXDfz%*XeD3QTmftN8qn0o7V+D1)sGz(V;ha zO?NCa%1D-ae@}c5RccQ|D`@fstuh1h9+9fvmnR4XF|2Bb70Y>$`e2I#%$MJkKOq9l znYIO?&opTTDsW`z*xH$3Gx9`fGCnDo4AD#8scmibTK0u%ykbf91|4Zs(tJ7v6o`So7)%`FNu>scQ(se1>Rc#Q;s zNZi~Y(LPN!v_=vdcp!plxa~c;Ml(*>z4iVS55RvAeHaS(l$8af2>F0WZA~ZiuybM3 zz0&o88UetQoh;LsIZSKjX?gFQHVKZ>UTT0C+9A%{8R?pV5(H`M~{LiJj?E)Q%^X(8T z#Gu9a%u-1rEqGDKV@1Z|$+w<%V6peV*?sfMtB2@BlqCbO?}9lH|LmIK7qYGJAHii$ zVXzn;br8l^5WN%h_Qu&){cA48IG;>z=^(XwH!3x(K}iWnVM1#pPIkoOg}YzUCA`^l z2`Nd{_uS!G-;7`Uw*L(F?dGi3|pe4 zqy%0q-InIr-KyDLqQ?l9SW}-RDhXh{Wy2EQ&n_^eCM5n*zu}h`=-is!cGyv58^gJer_%3sM-w|;rdi6G}l&glX-lc@LSRd zDgnufc=p`R?>#t)wME&3HGL<)N1!Oxmp7r@nnXu)ib%q2@c3jlw6M%9C~kzEjZ zuDr^(Npx>pn(nI7<+0_-lsC-qKbDyHJQSr3?|3N|e)zxjYrq>~JJV!Xe$f$L*bnla zD83nS-rU#bPAgsGqlDJldWV0A;~6D-ZLNMg9pQW}K5LC#C zs}M-mnCS>s3cVQrk*;svXhqp9ALnkf4vR$!qmNpo#p6Rd>iY9K>((_I)bpwF+VTr{`IJeW>>|^08 zaLVT@2Z$?br1$6eKs(33<=sE#fxbI^gM+jam<*8#{eP;rz5=agfgFJ0-*k!I2Ak0mY@E%u= z+>JJak?aKcR=i0R%+Okqse?^cD?^v3wc@2D;5XXalf9dxZc|N^V5A7pJE4?WanvS- z2k#X-c5oIs1|^JE))&2owJH!gdK|S)S9M-r~PBwxi zz;82{>;$h*ao@#@2YYQdPHHZv?6+&MKK*$$bC({?{X9K!4AN#3sb{F}v3!G_shf2W zTr1HCHGVe*Ykl|^*uC$d(1Stu=V@}oFxuw2u>vq?l-S7V;)3TL-;-UkP3t6?VOlY6 zB)@Eu(*YPo6qw2S*U=Iq=|6~lJV}zGx|`JY_V%sR=t$w*)-> z*9wMM9fEU-MH+q?!y?Q{4L!yTx}sxy>pkPW((wH&56<)fo$lRGdd~N<*cQ0UEfRmI z$>z?EQ(fm81KhXz>TL8ts_zO*8_ZfV*GP1~bOK87Cjh{uA(_rwwA&hh`K1^!mpP`_ zYuV3!=bM&=3h&%eA0=0KX;+*FcBD)=?B;xLpv9zlJ{vG6?HUWzJ2&GmS>r@9$>U06 z`peR%@XPFnw+Atf6S=ze#Fzi{6Opo3e1ng~_z;!sO2wm_um*WSjDz71Szw0SRKASD z;-Ybo+#tRsV=&URsZTBhzh#Wu&f?RUX1samGFl;Q^*6nrYvlBsrtt88W73O}w@kXc zSz+^2C-Yr0YJky{9qmI|?GLPE{n$r+Y__uWI1`WUubie@Rep~dfK5Y%!+_LeS9i$2 zv31xrv2!>r)Up|`vK{yG(J$8SFfGQato-q#BOrhQJB;|Q^d2Um%Z?-e^mTGt+9EAw zD8r=Jm;l$PpK_&-ckJNhSm`!S%Ox|X`4~&%Ha@i@p$^r5ZK*&pIZKL zEk~^YO#@)F?n(kngP8Zwx$TRCeJ7ulY52ml+nl;mXG`MA zp>S$CnDRYM3hJEYE-CYFy@QSOL@et!c{4Y`^@;+o6p`FR|AS)Z}KKBX?O z)fYHJEwujDos2l1-xojoUC6~;ZuQm5I!dBd%5r!GkM--_xdWJc%@HtWNA-Xa$3~@I zF3l{q23J{tFxYsObiIfb7W%w1r`o5U@Ej2>L73v^-MH|MlmBcbyM6y0a0F|3GITuz z1t2aOFQCXDyID`=SWS4E12Re_nweglb>*-2_2?$_P&s@hW5sbf?MV)-^#y%%oKO*< zy@rR2;8VL?vH6!#?c!>^A;7k)ZE-euPEK4OHg+Hi9E;ap!eGG!zU`^Vpy22#cJ9mL z#>K)HY-@~;PYYij++azW{*FicRwHcIdth@yWM$-L3k61VM%vIndym-oAouvJU>UU6 zn;Rd{eWb4L>Vq=H;=)LC;k^9E_h87`rJPC?ty4!whp?gTc^MYs3*6%a?iJ&!2f7p; z(WD8RU&}5QkoXk#q208gvJYYVX%bv7&hUFx%OA4GCTsbbYpeuwdl_KhvHltRNv5o^ zSe8wflPqJ!OA#s(1kO>{(u_H`3~9f$!?#I1k7%_Qv$CvRtxKQ-_~kgVwwDIKJ3i{# za7t|N#OxM3h=IjR`2{{@%gnE`b8(T>2Yw|XAxU$wt7x|0_uMx&N@;ueYFAuz%AN;$ z-e6->?Dzo!fw0(0k`V>^88PYis*gFlDn&aw&RkeH2~uE!;#1wP7;C*O5N{-mJlh%W z(Cg=s^Ifu(_Ak%=EX|u0<8|1>m#fA=yizq!A7Dv)55!uFwZjc%Y>9D#I!qq}k3p4K zOFclYOwlbc1ZvqyqSLd8m~2vL7U})GFQB^hz6~YVg8u&<=TVpQ#ZYUbldeQ|yYDOg zMyP#8*davL)2!{|ZzCFg$tg-x!e5*m?hn{R*7E0RJPuA94ISK#c%OoPHw{QaS-o_N zh2-+y*nP|r9G(w@?rqLM+t(h*K4QPr?RcUyK;hLdaN;E}IXfr$3^_>9)qEopt*+c9 zKyJBB{1JOU`n4noL@(V2=BeJ`<*?s67V=qce_Ssxw~0fV%$s+a#6qYv%2L8rrp5kuZ$yrh$b?)j2<%Y z^gwv`E2NP`bM{~^o!zXclup~x`s|1>r$?U8AGS6-=^>@(Fe-Pd0voF zH7d_oFJ0H`G46r+4|C}Yjbx{sSpasG;6dQJnD0=ZA3_SH4uQ zZ6}?*O#;8q+i%q{Jqz?si*QI@YQ>9(|w-3R&TQf(zL4{p0 z)9{4CeSWX47|-Y{tykjo6x?@hwC(U<@ZpQ^#)5!tL|P-9y?opymrk0mw4Jwn$hwkT zP`tBeW7D!x(yfl|Cc?*8x9aRwbxQOD5S`Vx0#QR7Qu$FNc1GxH&kL)bgn3|FmfB#H zg`$?=u|Imh3FfmN>x#D^YvjRAZf1u{-w?>(V4&>rGbVQj4M`Ohd}m;n48Oj%_5@qS zRr&gM|9y+Xo2l+Ch?k7k!q7k&nRbzGN*U<(TG#^#!Ab^)dEJNNJ3AG57g z&qvIbNW~-;5&_botan!(HnVNOzow4!p$LDxWz~PI=&r_u?2FHG$ z8}Q#8_AAZr5!_Ra$*8;eTd*v246m+6!Hu1s6Jml?VrJh>rsSCI*JTTH@p-tr>v1NK z0?@=NE#&?klc>CWH9h&-J9G1tP4+v-co&Qy;}P}<_9V&vb;_@>3gMLN1}a)i|ACJn zZtP|wAx5ssM+1TtQZKd@fL`vxY34yF+fy+3m*kqnmXLsM;=_!i2N}9Bp0!0*kJ4Ue$-6jy69}Ox%rT@54B+qRG6Ap2cW{} zX}0vhZU*I4N+Ad31c|@4qP8vVKnM|7ox}W6CT%Hai+KGCIt&gj6(FT_D_)->)ik#u zd8zU_3v|MMxsz$eb~uV>C3@L)k71aa>*js;rZy$~{5orRWA^%5=wY)LJ`rGz8Gr0NY85u!6E|0NQRJxjlU`T{SDto`|XbG z;952SxmpJHovnTNEMK&^(}Gn9EligHr|o4QHt+8NPp;E6K(E`t)%{SnBTMC>#`7qT zl(6t{H(^6q9yj3%QP((5(p%ZtM(M7dE|#;EJVa2ID2Leq7Z1ko8EPqBcLTiWPEX5< zN=zGb{uC7xNKj3LETePYP@!f)7|zkUV3Bs=6f@nI$27>x^+?oKDr7QF0<2~e`^%|^ z{|;!Qh@**T{|luD?=goflh^4=orJ;lb>1Yc{e1Pf4PqG9#NsZ79r+MlT(fhyl6@tI zcLFgWz^R;X`6!$Rj9@Nr6%;z2(OUqZue(~-8~DhY{YFfVgYROjzATs|@mj(u^s87o zED_{s_RxP{Q=hKJY$Q}qXUGR%TN~zW{PFU7g)~Hj07ZBQM6!M{HH>-oBsq+>icIAa z`Tpc4xaqi85%m7>2}ES(%1~P6E{L^HmFMIl7}eL;=X`@$kt`f97`OdZ_$E>+NJ|kaqG{(D1UFEX-;GOGGr*@T7g;=zLXnaL2B+|JVPb>@C2e z{N8op5kZhfkdPRVmM&>VLJ<%Ul`cUV1Zl}pKvF?MlopUsx|@*_X^;{rY3Uwf>a6j% z|9kIq&i?j3AJ^rj&Na;Y&U)8+;(qSue%#>dTh{qEX7*qgo~^)C9!8pl25j??_f=ZI ze_NT9?_-H`o6Nod4?OmN8w#R}wfCv5vUPk|JxiJMUps>qx_3caD*d%GdEZ~>E$Z!N z_c#g=Vg-B75~H>A6o@FGlRmJ2 zj~(0=`O@EdUfeEy;vJDI79Ot9a=_51f zO083QEghYcfJotH?JOz3<){wGwKDhgR;pXIH|s3mDC@qxy7ngI)0sK*W}`-X$`Ph2 zgIOd__(ON|Ua^gH`q8rve1(Whd5(H*?-lJy3GsFGS_JXZLKZ_<#Jn^5W7{g3I#a6C zpJOux#gh{O=Ug%6I}i2!&IO-V`re_G#{dCEWO(kSJoIK28L9ZATqPILuV)=2B;8cI zd3830$wS{^2*Dz8!)d1zS2y=(>O;7p3I60E#zxfR`#lKHooeu06-(b5@b`@htffCb zB!Co8&0(`dP`@&iAgAczTQQutM$hO(^c_HT(f|+h%W^bS*ng=)9x>neSSPPwaQ-QL zWg!5Ks(X4O+_*xzo++z#bpVlb?{nCt3NmGf=7nCoObx%4?Q8TY=l(LezRNWedhFc$ zAh)2AZ89pMQ*`w#KS{gReA@dB!}UKW-2sGma>QPxJMNy$2D^JN?4Kb97Us)w-!oN1 z-vrG)le`2;CLdp^IXmpre%H`w)^Y{nYWzU?;TV=v7$h!9l~5tUw9GOnmYj&`O9=Tp z7|S~eNdftVWuKGBdzJxl)l)(Htb_sb#26av180k5dg&kRVLzhw_NaOf1Powb5y)Tp zidn%je}*3ZLI(Xxa&FyQWi~L^6JC&rSk}DEAbs_BD(-Tq0FFk5Z`{b!zbt}WS7a`F(f_ILiB_fc9ZjRLyAjBb2e^NpTXKyc>8l!)RwGBWK zQy{6kFTN0AnGGDa`@y%<66lZ4eM*S~;RFn(Fzi+Ra(-kA2pz#(@4VcHzc=`$Xmx=w zIoqC&am7-$-!hkR34?627_utJg89p_cayPF!qYn7qzhm|%JAQ2)c?k<#76z(&>2L0z4E}&P<=-J6bzmWZF{>Md*n7CBwjdGcjAj`*K=-N zVD=;>lZddx)@V)*x1Fnx(0xy_F$|!*R9|`lk@_wu__=5%dMShj=h#FG`Nh&ofeJvf zTys#kh9VPx^nP+R%hL5r|M=R5y2jGz!1?+r7XtfdnOrf4?8WPBof45-mX^@IN}E@p z{I~5T*F%Z}Gipjo-V_5e*75Q2KXC*Ui<$NIe#44XhIKng=7$_e6m#eI1SY*=CsBY` z`dWmBhSD04tMvTMkgkMOdC$+aj3g@Exg(OW<=B*n??-R3a048JgSN9f)QguniJ?Yv&gMX3*-2J8yA%5NG}su zA;MU?i3lzL3rjI{sFdZl)vZDP*T8Z`zzt`a;eHEHQzYBE?-?gerk$~Yo4DbiiO(^` zq)<`%rwB^Zk_l6{7X7Uo)S4Gc(drHfk8hOKAJ^XIjdH7q>?`SV#9h8`YKqS`ivGGp z_gs_U35V{b46BK<)=zc=NS^``!Nd_QXAgv;$mh`gRF^r#w%pjyH41ejm~-#= zsg?jmZp}@W#3>h^ov&~pp)kXLbmQHZoRDT;X@esDtt)T8M^fOj8{hA=9FLTg=J;tD z7MJVvC0u;=l9M$6hWUdjFr4%yR|=m0(%VyCHuqTck-mPEInfjdggQI$fa1m8M^Y!B z&epzf*3EBLn<@0E86B}xfoBO+Wo*Q!V7Te=pKC-P2Sqnqm70FjDN(6Dd~b9p$n@n_ zSOLFNs)k}S=VPO)0p`fPu9OY(fr><&bl}M7@Mcujboag(WLRKDQ&xhJ#Iu>5Bm}15 zVH^Yb+D!=jl6y>U_N3_(wr-dWi1NKPQ>5wgAmlgBigO;CUCc^JNttWsc>%s+kvcYu z&z}d5`|a!v#~}OkyL6`q;dP1 zyllZXJ0S=&?(umIL`y>-+d}r}4zX)_Zgy#`opEBGxLbm-;af)FRQI0&z@mp|tm1eV zce?kuPC!zDZr^)5uwWtQYFLR3&y_x&=>Z+NWsftclEd8IZC!*2IYd^jcKYi1)Z)Rz z(?^=Jp2+GE&Yc&qgEcU_(5FngpTYhMh#0A|=zhvlNAluw3mpVk<4E>fNW?Bc!cK2M zUc{5FU0iU;;*}{~$jUF2?Q;CS-KqUo#Y-|{>>I^880yQzDRSpunWYH?QtpQ8md~u( zZ9>754X%ZkxxY5Ezme)%xja9OTj%y|J_tT}H);CG?}AGpfGc-pa;Z1uIvdh;=Vmf4 zymst*vgww>?PH7Y9{Erojc zx(~f4vWgpmIOg=a=8b`_|3?(%eKl5wPIh=etae4T*T-ogPBaV+b!);vR3s^jZ2g07$QR~ zr=;KBF|*}d;^9ESj?vi3QE7pY^qhCi$oxEVDh)R?u>9_W&Pv!pwa3nLe^^K~svjh*x|b1C($>-l_Uha&X& z;B}e9tCkH=#k}^@H)eTW@f2CF_FKP5870f5fCVpP#NM8KA1Qmbi*woLbi1rCA?ZUC z!|0}{s0cI{NKrGzuQkk`mpyinl0P!pkI0Qf%r{pJtW$d2dAF75vnC>3`k?1w0QYS_ z4i?2y&IdBrHm{LGZWVQ$AQ==RGF~nmqhB$7>9b$XvP{-bDd-#FrxRbzr117}i)6UP zos01EvC_*w?+8_=E4B{SA8futs~Qt*D!>oq3-cX;2tEvAK__KeHN&#v+ zm5A1O%y{QTV%_yS_n2sU5m;#y6Ev_WhUW!nU4<%_@;&_xEKaD zi#1Dhn%gh8nH`3**ZT&4{pj=F*@sElaz8t|cqjQkYF0|#X4>FMGwkTSxdl5IQZ#RU zMhEeq9hPG39l7CZ_9*$s`};jiEt+#{QJsdz7hmXbh}?9{#1?w1JHsE&-@k~+t-=_; z^ZR8sJG?&ZsU;3aNjW1#=py%I_2C$=H zfb$LloOkL8!hF-@K~9{P%dfHQ%mM~6?pGTY!KY;y2J|^jI0+XUqQQdYo-1w|SlB%z z9Prn7^l48av%;}z*<+si`l^;OK%5RKUh#}CK_!ptLrh+wZ`Jj^fA+Igr2nbh&-YHJ z@5ROj7z`9nEpi2@FW-J*LX@i~nnsD7YO#utCWqLC9`s5Gt-Z6=Uv>2K;!IP?c$nI= zU*T|%(Zc;LN8IYB4Iwv(&t9o;#t9@T#4Z8k{-^$@@HIa$TH5C-f8uH0MQw;m2O@o3 z?b_V=5>$7Lp6hX(1L>ZOmL!4eDFee_r~0vy0f(N`*JasLd!q&SLM=8;Dxtp8e{_t! zDU3OjdY2;PfY&X!B=!~(@bNb9pBw}L%FFxvh#{PcV?)RJ@|a%#`l1SPijoMgfB$B?%x~gZ*bg>5zxV^zpUuz*!mIijTL-bRvloR z_(k(mED?^!vTwT>4-IDB;eH)fP12vnmY3-U3V*~rqMbxe>}Hi_?T@|}5t{texWBtz zkQID5Agz1w;++7<=rPCpxE=7bwMF=XNy5%SvfHkf*F&MgxQ=dvi4DGeSZWB{Jc`Jr zQJUa73=VcE7gfc)k$naED%zi^7yx-Z_?ijOuA_v6P;unp^^6d;Cj=z7M#lD%YCx6e zUEj{H8i8^DEsk>%8y1;G52EYw*@r}Y5RO)wudK@e6=lOFp0yqv9E6(=Yiz6qChP;y zWqkbhXtmmghMn2pbas6#vA@1!;m_9A*7|zqZUa++S?!dD}ulbnMpzfAiNrQSHytx$9i)QA_mDi8d6T-@Y1 z`q{wMcjsg8*?@phdZ4i4{y?ElfdC&EU7u`A3zdjpiW>d&pwRyL9*7`AUuL7WK-zlc zO>;$=JR4R*LSjkc$9{L+32LbaPC`V=RARkuL~bJ~g}9DtB8z_13_J68QFeNwZZpXd zXSyd7NCOdNPHNd1-ot`ZOUOAJ+7`|7G-2M*n$uSUlddc7DtV1RIzdOV=k=(hVqoHJ z(qk?OK*`A0PUFe&*O4rp+@$S}5Mkx~rhD{!q?_W>#gEn3CD5Ena%P91u2V3ef8w4Y z(70VPiIwo|5u8Xm6&4b&-yDax8kcWHK_kmct9_@7Hcx785kB*8(ikDs{wMYI-?Pea z$bvpYm|w>SX-?tR(?x-@X7L`8yvfbubG-@a-!c$#!aJqq%od)~i~F7hwyuul+&hOu z5`4QYF6WQg6FyrB-O;x#rDT^#DFYd9(C}ka;nc_ow8qcz#a;bUR}F6+{mdQd-7zYj zq)&R^L4xdv4t8hh=2j{lUB5HisM0GFjtf4Q(ES$|pt@r(E}sN#JwJ3YDLLCon9J~+k~ z?5}*bxRFlX%?tl(Q8as^ocq)?F}G#j?Jx}T@rGgDCmwsN+dhu91_en^pLzt2+4vIY zLP9|0tNccj=%L!$ZQyX0BY=K_WHuf|A0LyY*?qf#NjGPo`*hyQw#fEg!B>J& zP0H#N?9_6ok-*RbVd7@6qo;o;$}N%Q0;_vD`|I%IJ-j7gVo zv{-08aM7|bH_uR;Ym-L$SFrTBN|xs@?qZc+LY^1y?vY z%2xzfyJ)Y3$D5G{ese36<|k4`T-O%Gs$9(uZ%C_3AUS)uGVEq|w{{&X{gyzo{3JxA zpNjB0NN7$!e6(VEv-lHruWXVzrt6M^c|p=>j>G1mHQJ`=B#pHnFFQjPpxT$~@v0p^ z3YU&iJAu|^`PKQ&Knh!h>$isw+w<)OdDI6fDhoTBCOJs2g8i9hMGT`l@2<9Je4}|= zF6(i)T8`Mebb~6+c`Fe6WO<03&^NjDIRu~>^p}%tvP6+tt&59`dQ=>V;#E`W_01t9 zcXIcWESatKK0ms8by?fSsN{NjcQh9fz|!s#{&R2#c8i0u%+J5K%*%D8XB+muSW^kM zKY8hmFu|Y^@Mk?#S|l-S%S^5lLXBrD#?YHnv&yc|J3l51*yJ`7L`O$okzeEf+_f_; z?v;QTS_Lx+^h!EoUz;_AevVb&Gv+=e#_|=$?(0?Zj+H$XjK0@`)Yt(7&JN3@E4s} z&Q&DgPct`E^=wZ!VW>&)cJty^?&kn^@4;Au?pnJDUON3=$J|uRS%PLb+sbLgqXHlN z+0sTn)s98xRv_eb>+0ov^==zz)cQ0(fzR;Ilu)y}kK)xjCEcZs@rmV%@(d|ilFxFB z6OHE8(zEwG>1g#xBaf?_Q1ph6A%eHFe3_d{G8q7%i0aMzF|!P`gn6&m-HCP)?Ozfk zAa6fg!9A4EUg^sY+$^);&T_rkFC;sgvR0P4Y?<-wlO-~&GdPSoHAxhZv^rk`j~E~Q z*~H6YIa3`jec1he@JsbF-%9%Y+-zmQQr7&NbU6HKK2F0Pl*qglT|vtgJN1He@RTpe zs#c0aPSpiGy}Nh@WNmqpv(*(%hM)@|ofOYjRGP-=jk;IB_b=-l0PUQl8{{~Lgbpd( zgK%&~nl6hIcZq;+Xw8SnMbq7x<7tGF_paSLi?VpJz|P(MbEdvLUu>XzEkG6WUuR}! zUj8$=LZ*%ivI0s_;?%EsarL*j8gugy6V{Ly(JZVGJ{zx6*Q4O)x-O2g;*2D>pIjoU z^6+WfS{#`qs{YvwmHbtw;Zh5K{Ps;JM(Qt#p-ijxD1guy8cNUY{>IgA?zOQ^ zK7KJ;UI~$|ybYKfJH+$OkQa;h2I^Mn^)r6o+<6GCZ9Q7If`6bT{K)^uJtEk5g}iE^ zUNFcd9?@ZcNV&aj`5>dI+~a!ek$l78piWGeioxmb=H?UQ?V2?~uYeFeNQ!2b)!1*12eh`$ zY2FCm@{_KN2>)4SB_2^pA_BgZdyz{^P+NzM?~gxv4pTw0#6l@8Pe1EdeJ(3L{r%jk zgJD+3^7(Tc`zFt&_bt->P&t~)snyvZ4)YUR4!);bbE1&35644v#>VOP_N_y2#*TcSaKFllf^|H z<*#bCcsDYJb!#7Tt7@R>_;d<;t;*G1f2X~7>w#k@wtjKZIwQ7xdep8erT!ug&?^7E z4m5`_Dlw?gx^*4D3na`AbqVpe#DdL-!%!}LQLw>eLDWv=mG&HBzi&;(z9*0IsHk(|BDly$`J4rR()FeT2jY^IYxwP`xEF{cEuT_s0bR zf}c_^I~488$_fn-X_q+kY7)AqP-0MIP}Eg=UFtspPSx0&;^-FVY;%3{ z)M`aQhm}(PTQM+NZGeH&{L|r^UVVFp(&<^*-*J#z`Qhf?K31*nN1XcxgEzj9@K0>- zIMw|qb z%Ofe$j2!~^dYQ6nz!8e!X&SxT0H9coaZM3t{1F`1Nf->a8!*utFYVvTlmbTmu z2G`vaC=)p$XJ8UT4*TDY>p}m>?!BFJWep>WM#Vt|Y)cXi0^sAr?k9jsf)2vcFma&C z)pyOR01AoLPIbN_htz|R-Kf0bSyKwJf7IuB+^xN}#{9_GplfT99P)jF-^ebBXj*$e z5uLWhiopWzP365VFxiAizkzT7N%c350x=ViJ5SC3r_B*GI0D|5Osr>v86X_f5@y%c zF_>bs-SMs}%o(c@28^hNsm}P>VG>shEUOx1ojQlC&Fo*ZYHMAddZ|5Ug8-djxu69Y z>wJZXVHpbzDx!k?so0l0nET-7Yerg2rsj6`pw7Py-{pEiO~iN+?9Mh1A^d=8U2P-? zircNNO3AhWp$DQO>s|@}2mMM6t@|Ih!i(eJa(sQ9I*&=^XJ{f%inW_14w#)bMO}=~ z=0WNyHDU-^Q3;bahafL_>b$f22fD(An}ycdeYq^}(vN3b=>fc4n~&}ap%uW(FaKWw zOa?aZVmu{2Y)}!!adMd_RiKNFtr=cIAWqKCx@Qd`ke4JN`c|b;a!0FyOO5=!6ntjU z0ap6baxNsgcp(uD#?C~^J2uzX8x?SF?Z*EHlI^Vem(WyaI^t9lZBB57DI6|&c_9^^ zqn$c{vkYQg)TRi#Z<6j&wS#bRgDsP*oqiDM7g)I`bB!5090j^t%pPX>YUKu*R&iR@ zTEg|bcfbXIxM&-+#|MKlFTQ!XuaC2o$lT5@|0nmSd;ePrRuI_5YtcD6XFp!@P_&n} zlWzvg0f1k{Pa(T`G;gEDCpa~ZJ${h<9(SQTh*8Tv0Ib*c1&Ig|&@ zY&q!oYvyn#$r~B+TbbNTS%M?PAOQ431fw)3Xt>1DidiMOg<-Ru}KDpi&R^rpvpw#W&d zrhwD?kPGd4KnjD|KF4@?IR?@+jlQ9rlX3(v`dUVD)}g$0gusz7u)tFW*zvVOt= z&QfD7qZV^U)x1eQyWkB&a(Hptvm^~R!vSsgamw}2>t2B>3eh`38Aa0M~^ zPLe;=AQirF%i-Qkc1al~Xf*blE6l;;%bGd1#F;^t(?w29L$EJauC-?5CRi5sD{lZ1 zb;>V@&9P;ox}_ImzjX6O2hXf9vqL)J?~M%JJH<*gJJbWED6JQS)4EbM7jnq z4uT%+OoaE`qjT?(8@9JDaBZFH%wSlTPlx0$0s2O`#ZdAcRJTxUX&VGe3;F#zup?n3 zo#<*oWgR%@Z*B0gAfXOQS2>Yqyg==UGLl5&VRyig62d%qo2+j3ncYh^g$S(-Lk#*% z8j1Cq%jtoQjt_61eJX)M2ER)$ZzSE+l?lH1BKrkw=Z~}|ke7vxG}WxvUkV-Y`WIoAKu!Kob$=HuDk!+qO8dG!gW8eb=dBs%sqC+sn6XK=jGV8jOa`-h{v zF^33q09WHf%CAR(n0^A)IQiwhFvjpwvCR`UHo&cd@Ik)c?<>oENDVhoEdzdGdx^e# zPXlYey!rmoTI{?FV!*DixA(fUULzPm?jQ-g`+tv}h}52^E#gx(YCquoBtzU+S0)WY z4;+4+)YTwWM*1HlSeqUI_rJLNI|KW@aY&BYUva^K9C8U0-)lCjDyVmXSsfjh z#=7a_dr8)U+rHJ?XXGFyIb3OX0z88OWEdV2ft6Naw6(P_g}yA_o1dC8zJLGL|68)5 zeW`vGs;&G1M`ZYTT}xFnUT#f@h%`#wEGwx^Z~(*l!8YjoU|zie?K8 zdj|#jqT`k~-qp}w>?ZRFzXHLwHXYVa1y=?49L*4|&{lvt@bNe63U|yEN1*$VGx7&y z4hjEd3h@{GXKwZS$WI2`1NpN7SZXR*t0R50lUVy5+)7_)j3KPq$<718Qr3LX3t(xW zp|9YfaP7^vP(VijV1AK7Npu{D!(=jaAbdA&Jm8BZT6JeViOAiA?aGK#e}6D)nPqBX zYAVn8n_z(t8pG<_3WeNOMh~|x3`rCe76!35=k>I0N_{6>^50nd*_J|Bj z*f~yJKE)&0lu#wX#2%i5HJ{WaJTp5(M-HI3rH^jRy83Eq#o+Pm83#$qEESPg{s~M} zmqNj;uVYU;;|Zr4&4NsuZzvS%Lf+MIoTMk~^QAnv%j~oiiCm2LLLHbOXE12UP+!yH zvspIxN2eJhl;yoNTExK2TRPZ7Sb!OM6n+NHRx_KHM<7n(DMBUS=rq$(C3)T+KX}vZ zU}dpMiH=zkTa+4zIw1hki8(qFbVFYY=2YXLgaswsty z2rhm5MVZ@rd3kw1KJ%uyFD6&qg@)s*<&M5xL_D`aIAZ-IN)BFzQ)M<|sB!(!($6es ztGR1czVp8xViKM0sc-|fTH-uwy`51nKudUZ2RiOP%Vf)Xd=uLoW7A%8lbexJnOfG- z&cRgy13~k~B&!ddZ$TH>V&523xQ0t?$>{oG+E?RbZ8X=CGnadzvqsNEp&HRfRmH6l_sb?*tGzb`2n4GA1ub5Ts`2K3Njb=X;@cjyB$;6uzI|Q zU_CaE{oiSFXZR59CMVUbmtIBYzW3xM*V6@B z5sRR%&=|>X*f7K1cTM555p6@NEEg4lkU!gurq$Vdy^6tGT;QY8niZ5vE?aaC!>9|e+_xF} z>9W5xu&NYjoyt1+B0>a=uw?%T)|R@qK&4X*7!%uf$VXBqgDb(x!L63s~$aR$|P2i+ZKr_&3@CxWRwzO@tbK-J7v zJGob35jWvc5%#J~rTOW+*aBZ!k^~4}E`0nvbb?0KZVO?yX~(~U`G3^PqUZwMaz9I2 zLu^LMdG1Bc&EjiQ65hn-q9^s-0&FMjWs(9@Wx=FL;-Ek>EZUEAAU)j04arjsG9q@e zr!ApuHvNV$=~bQPx<@sA?_B0i2P;PB#Zo(Bev>%m5HU1=vM^^S&pY(trt_3^X%qVOCuH?01`7zZ ziLm_ga?#_Bs4&1xfAaz?u72@xfNn|C2dLPW&()C+2KYgUfG6;^v57$LNpE^m^mbsd zJ`t8}Kh7?V1IT1{d+@H{w;xCS?XS&+S}bBOQCE`!PW<@l-qYeu?bn!?B9J%Vk6W;752-nb$n;P!YV7pN-A+}_4|A6szKqZ zRkifvCJXGgeMx)))YAASvNm~px(<#*&BJ;w+6`r5hE=IDG%pCeSEaoT;)ZAbjHN(Xh_=tBH81krZkl**=k9{ikI;EQ2x;A<)S`M>+g z4S9@JkCX)bN{o5w@JMVBhw!yUhZor-43TP7$EcwjgTv*O&*ej=^hbSuhAHXtQ7+4I;JX5_^ z#KWM2;=C-*{Haoo?hw?sE_QIcCFeqID^=f{Ok#+E5lkZQ2UvOACo8y*RTr(PDSCkq zQfO?qBRR_Z6vGkwUM%Us;2*mFpxp`IBdRX!FHqMqPbi5G{#O=08rjqEyOtQf_g%sb z7Q8ksM(yA?b;5Sq#hrXQRPQ_;<6NsQ+!Ayq>DOZ`Z2^C_UHhKxbSuy3-u)l1&vVA9 z<`m|+JkHcW3Q59!89Yn1dYAAimq*A3)eykd5T7qJ5fi|5@N|PyrHaAwq}9zcygYS* z>kpzy#q9ugMxsFq{PYifAq+Mn-_sOj#&MtVP_sm$DDLK7Q45kvS44oH7&4qB)UrT% z=(nw&K1V&|aAZtmYd@CsTmJA9h#gh<-Oo9fGc?17>2meI(;L_jb2dXh+NGRZ=$gmb zyRAKr6Zk`>9<;D9g)L4HeEM_L?FV&j%`}kBwzZ9~;hSCea+19E|B9`R-phO_D^lQ* zM72E7u(;XH1_Vx|5H1s)@1z=jgM0bx)CRbUcRyN#%MT@Anno_2AT1wdcmkuoK5{8I z&3EG8dsg0F#ezyA87b*^Qe03_kYFEx&}LohM%TFn(8Qr_wY4`Z+hGtu;Gy*eO%$&d z0M=Q6p^+o1KZSCv1Jq&;!0B^8aPL|$#@yW^2S6IUZgTTYm-Q2vjwV1NB(GfsofyFU zC{yE8?q@6@L%sb`EkU2P)d5+}z+a>;DR4X*?E;O(d8 zfRDcRrhq2x+Z4iSj}oZVs?>R&0Q-lr8R`5c`Tcu!GqT41d+n5=5XgT8X#c~YEH4sG zUButyC_Wg8GCFief#v{8Dnig>)lCQ~6x&SJ9@5Hx3GGYp9fY|*YywCYY?C?B4{mrTz+N(`y@p`@L@hjOJZIfV&=}8i_B)-~TW_GU0X-9=kJ0 z5BbAH^v!uFBb)+_Z_J_vUy%|2D~*p2VPQNL>L94XN$%`0l6#DRsQ+8Iu5B&^a*M=6 zfd|i)Uy3M~y6MmhyiV8;GI#-X>RsZ187dc-)AAlAMv@2_C62R;FQ{?QDy6Ap4^XRL z7r|?6-ppLXBl|i95_F_dz-izI3l$6#6TEC|Q^xa#yWJ@xRDmVtBq8|ML%d+(Vv&wi z=W9vFlB@!EZ5Cy@ju&HU+MZTlE-FZ#04j8yg$Fl4Ur+SUQzbOHOu=Pn$uCX+`h{;v1q^KK_b=4K`>-?@M8Sk7 zwrAkTFIj1YZXb4phCl#M-_>^y5pii66{KTH2qA=M$~5*v*$vBv1*RTznPlc-cKo<) zr+#rE2>->wW=6R`xPnjK3IXWD0~pdFqM%Y8wp4$MogE+^3x1iPjBsleKY1)1&gCs5 z8#R2|22%0$!&kY}rYxlpLuauqC*Rh9k85uw_+C7|->Z;aDF0nq0eP{Ud|+Sof{dyV zt_@u&ad6B6?HelxhI|=e{q*T}eIW1B=f=j=`owSlP^g6C{-erMM)6KlUr%{%LH6Ic z%OJh}+yS2Ax2s@vb=uUmV^uF=+sVD7G!AEwwLuxB)C>ZHk7`a(D243jn@YN(t-Top z)QgFjYeY{T@rTk_bm7|>9lBD^QVP7$Or zTceNeMT%@N#KJnDkXYdpm?r-ddc&1+@pcbNpc>k90f%N^$8UEYayD#SK2O2uWe9)G z)-R(;23??FL&q|J>b6F_-J2(s6|xVXbG9v9F&q$6Lpn1O6}4H zEdkU_nH4w<1i=q`y-|TIzquM_*eH9I)-4lX7Ep*xd$99lriSt zmtXc_Ztd~rJj|A`?CscKfSeL_7gu(c=cfOxM2PqfoI7@D zA}lKx*cT|OsA2pK--@#g6Ev3O*Lz}th{wT|2?epc#Y`OF3S30^EAWFWD4R`BDxHiv~`ZAXH`3Vv*Gb4k{68-NEV6T}cE&31umv)x`ZwtSIrI!TH zmFtgN;pRhwfzM1z4)Zdec8(4mFE3H{eghhmM2zI=TW$g?x{*@4o_17vD5WRP6omga8gl z5Py}NsDk6=MyTA`)IZ<@<2H5pn~rmO1%<2%;N6I|@RieMr6@ogef2!Z%UOYH0L5Og*bTSgn06|vu0c9}U2x4T(}urM^_1!x=# znDUi&^Xb8Jg)4XfDxzcmY%L3R`a2Z=-+d&~=aq^(Fb7(n08EECU5uHVv#1On1oi?z zyEd3~Di#@b5&~n`8W;$A%MrwSQRIoR>vuCPxP2~L1LXNr%!or0_2o0#a2W&?%qe_* z{K#Zbjxvx?fnlkgZU6v{Nr!h-LuumJA@^@xsNVW`!w-P*riOrRi-FB zd$xP&@}*F4r$A!^UBiKYH^3hywJ&NyfswuU0Zn5Z9^kKQ`~erQrYFn&%0qXThT-e5 zabTdmy0j5JeWC7oGnq*99<&V862$(%>4CB!DIEoby_4N4RuQWsxsP#Je4cZenSoIG z0l++!n&*tJgTG^8YnPdI(Ry0hnPT1hYYJk3uv;`v%BKzX;K<_xbuc9Y7tggmsN}|j zcI?(!JAVJI?$kH1`tbfYKVRdNL?tj*0Q}g9Quo}iqSfQUw`j7NslV)e*#qV903->8 zPqZ5353AAPS}7;Ey+2y=lsBb!r;oNeoKy47J-3N8j+Z-@0WsF$#@7@$7r{L&Nj&8m zE`IELfe-@KIuU_w-FYdG)p==FntC!y>?$XswN(k?M2;SCqf7b@YBH2C+eFVI84dq}`h+Kc;U z7${B+&i})j;oktT@5y3g zTFMBH1-e^{lmz8dF2HlUy16klQR8nX8=#PJKSbD2>KTXdlUZL@%I?nhNfz?HI|_lP z;HajTiXhOH6%~40H*^f0oj2!N0=mEzynTJ$W*b4no6h9duV0&XTl)^tdn8eBAn0J6 z=a8bbj0~R-2VU|)#Sw(1dU9_XI_5XI`7fp4wQg4C6H>{33o7bug^1ss+XTR9;crCa zatBP4O5rCJhu8HE;tK?_brAd-gz+$uvf248GQW>tG9V*I#0nye^*hdxds2(e8A*OX$yZ( z)Z9&Zb!WkMhr$hZ}hl_=6ShTDSO3m6bq`TFqWuWE)PY2nY1~BhU zzxyda0c(`OV@C3#({>(xx_yEWfov%}M z75#Cr$y(?|5~q4P{N&Jg?rgH-Y)!bZ@&`6QLu96^yY#HjjeafC{HMmVH)WV}2KbB{ z;6Rq>GgV4A;|k;v&(Kx_=7&)q3f%wqCMoe`hqb}Ud8hj4$Gsc5oUec6FjVE(oZ?XE z41@{n`L~8cramBoS1}uRnWgiCS*L-X5CuUy`-c?jO~&!66lOTd@PS zb^S@_n|`NbuApR?UCmfQ4cQZhq%wluTHTToiFxdg0mJbRAKq@eOzFBkx89y88Z;4J zxlj5O%xWs@STPK)>#r)&*)mC3`gK0ZJ9l7FiDZz@O3P{s^B;;ftJ^3O*{%26*G&AG zmtllKOqT9`3H*&sOMYipuBYpp;@PD;*<7<*d&Z!N3A*+IS5*_&%>8I!_-9us*Jb56 z-Y?Y_?xWfatgP8gd1E*NF9d3tHlQ;2d`#ESq-vRCTDHM?o}qo_=MZgNsZSSG3}=VB zz9(^J%T8X(9MfF@iB1N^*&Ovpc++2||wBm{*;VuxCdjd`}y=zl~k zz!%+7(k-Jd#x+Q5=lCm-d-+$3R_9N)>y);dZS7Ki*1Wl1q0L|y95yU-wqXwW%6!2A zedLNd(QzG2oJ==$w1#}GK26o?gX6}&1z}hEKFPl%{`lG0{cy%}jlsn~G3f-+dw}!p z3w#U! zYi!YIWbs3ARD9rQq}eth{KKckH}{j&tU}nu30I$qrgXIZ7Db{uQk5B?}FhJ zdTkFkYaVCZA92`DTKcvsygK5_C~%kW-CZt&*<~Nt@kkcchmK%I=2g$lF)^(9u4cAW zmQ>_IOecC8{$U)k6)8lKvF}qq_9f2F(^#)UW0Cdutj=9Eu7{`Ibd0eMsV6NJ&r7T6)omqCEA}3wxc* z{G)H!5-)buNl5C>4Gi5bQt*9sy-Cv9io%mD%5`W>;#^(VQRs9|mFGD%4yns_bN7K> z$)^6qaC;cm0PZrAuWN!j9=Vx|WfrR2K$2d! zc&t{~7ozIUpF2nHzQjaEOFQ^s*$T5Pls7dR&4HGdts-%?T+v*%nh0&Bb=bGmeVaCR13+>g8qt`O?2lqbr{!-tv z&8YZ7lZCeVAV6VHwcC;jZ|zaq&-#e?qU5>w{Apfx1KC*?-KyQYy|c85pd#VN0!4z{ z3WNCvv;*d2Pvqe|w~NMIO-`PAG918qdV9YJ3OKGi>pl8()9_mDQ}oMj=SG}yCqa|C z&Xb`MM>Ij1dD9&=820puib{J+nPsEby1Vm5XFI#1fzBD{2n#&Q#u z*>t0BdQ5kYgr+Xia!*J6_6bB!Lm4@>wam#sME)qUL`JOQy^n}-2Gic)Dxb)Onxu~U zoL^gy4FD7nT^fxHFIrbep!U2n{V?rT69G&RQ$4HF5%EmU?$*M$m+!)cyJf5vzi2<< zMaEVy<}T6S^jtgVY%dg2orPjx_Ei#&5~E3St!tyoiZ+OuiZy zTv$05U#SuBV=72%dW(x{d;Hr&Ht)TstC7nZ>C-4c)nW_>lD(ELPoHi!cZnLz?Gnem z=abR+>k~difgj z*tY5wt$;f&Y3y!0sT)=XyC|oIAG^mjLjh!_Y8}1WtxE{BJNxLwXi=h?gXvSgttY5m zcjiej5>kB1N!|Bw>$LM-xLWvG599SK(bpqRu9m?v6P#noYbR3Oo(yeIdEAk6(Qw;z z%V&SUTa5NVVE(+>)it=A!8lr@tmwq6eP+Gn8J~61{Q`lbXon{*-CbsVen~q%^UuWl zA+wJU5{nTiPPoU`&x%~VtgnvByusxJzLbn)rQabAce`U*)0zmMi!)L+5a)d^qi&e@ z?di9k`y#M8jv(Y3_RioGbl39ZFS*%YS@ifpeFxsYnPe>Ugyf<5SqUq*+G?EmZLRuSFNL|e z%_^bNT>S-#s%k2{dNyHHO$(HdQIzG*bVNJ1 z-uApMJALy{%g1D~z3>IlQYSfaW%gHPKJz!H$`xbBLsPAgz4;@?xIaC5^TI}8()7Y< z0oVAK&T;1|`8OG2AGyifFF+s;L}47X-_G7lH$Qtfe`Me6_e)~>eP>mu{p*9;9m`~w zD9t=6gG7s`T$ObELjq?un~MuVRdc1HL5n9^bqu}p8LPbY*}aW^9nfzRx9tX`4qKYj zh2fPhR)z&ps)1%-YU~#cIxXL+Q&5!81+M)s_TDP0&Fx>l{R$*GO?XbXpH?tx&2Z7G1$tQ_)qSm((qc$0D*=5a2wHcjt1Byhp>a9ieT}K(h zGsHGZ8=?AjKf9*~#`2Z*pdz~5CWueP{Bw#x;9)O&fte*aq4S|YS4}G!+@4i5r z{!2|;{7sG~Jzw62F6#AlS@t~T8cCDj{AAXC8x7$^a_>cPlu(*Q@T$2{9FJEJaxR3y z?vFPAws)-K&Q77h?|6{!|Ba|=MMfFichVsrO`%ro2L$t_)(rve{_xZ|RwoYg1|D}u z%N!zWrKnSPC&UnCvDI2B&Bp|enL|qF6&i)RvdlI1l{5nbC99gK*~LNlAKiPYL$Oj zkojOlfVwu7yk_2u@?s}a`@U8GV6dY1-HkI-BIS1Tc}eMuBzpW|5w>S-Jwi!nfJnnF zg{fz?>ipm(gvkMpDSv#|1<|(|rMmo6Iu>k2e*GPOuXB`Q9>L`495o$R#3isD46mqTj;j*|W#3(S4Zd}D&> znZ(E#N}M;WRI`Y#kwZttxDye{(R&v~;YveFbb&Q<)U__R4J<81+2W- z7T+Lb%uz211$D!tOlH}@y3J>E4%f-rBXmr~)_}bI@vG5$wVH@1hVM5Qk>tDri%Z3F z!~PE2!Y}n3k5O#;_FK&!pT3GqngZ!wRyIf2;g^cn+p9ao)l@~2$ojX)L{gwmqDFR@ zot9c8_r5I3J_~o2y_kPVkXdFkO557@FyDOMNOqZ7dm8oQX9)rNj(*F1fJutEq8V9Q z>M_Tn?;BHV|Fl`PV+&z@Ngrq1vPt$!#6AaqfuoU-`#PsayA{=VV1)%bipcj+}}R4sHu;!s%j-hbr!|2QCFHzw20RL$N@U>7;0mPCCwak_e-!AUM8WrdT}|s={n3?dR<}A@|c_ndYt4ljQs;BD#U(|>vFu=OJ_3@ zQYT+e9qaoy_p?V*TDD8G&s#W_&JZTPRJ8gczYyiS$AYw3k8K84Br7|8`E*dxAqwtO zjKUFU4XwEEBZ8##Z9$OH!fYZxRV!)VG|sSBLWqC0i+quyY-nd@2$g#&n?0n=%=r6N z41YTBBCws*QCUJ8{C2j`!5I9*tE+>Bs8s<&F{A+=)dTTNk5&eGIV>@iT~=H4RjhjS z9kp{z$=u3Ok60!fV0~mafL@n}KL(8&>IPhdQ+2EwN_Zf_k1K`k*yfWLWm()--8=bU znVw}yW#Hv~T7ADSk-&(5K`VMM&$w0~#rEErzOCL(njVjmPQHu5rAahIp6A<{P8%Qo zAW#*2;wYH(tf<09(zTNcmj)eiuGwrf+i%ix4TREUL=hE2Bv zV@~BmKj>X$X~fQCUKdoi-Fy2?<2D~JK#*o_f?(vpJIEX=)3hHwdhql6h5@uVCis2aIYW|68D1vv(D;%9 z_4wgf)iluQ6+w9KdQhCOJtql~uHy|Hgp+>ds{z#T=8_pvRWwdz-V9w+2{vg^!F?8-ir)D-?eAK;D zKEQTL(s~su|Q(}_kEtK`-F4sNJF8s zrRw))=97{JQ=9hFdMW5ETz$vIyS#E}rSbPBhJ)QP*eY8-j)Ga)CgCeHN>AC-hrU1& zX8YBBTQ1b%YPhkkzb+j&aw9Z+{8xmGkpOga;uBEbm)1fL7kLz{2 z&3KajjFZE6HSfD4!Hx!Z*#92VNo%U$*8Akv%RoOHQiO1ON)THGgX9FB?=Z7nPPK%m zwKja@g&#g_Eb%%`HH>^ajr+UkgW#u*-vt;7jUQzm3d1~)Gu^hYq|deJwsBJ$PLANj zvzK+lVdgV39rwGnqrNmuW7)#5NAgx@v92ap3$T+A9i-frofl+Z$`J&iF|09Y?c1ll z1$K{(={qkd6Mzm52{LL1q1&q3>d^}ieE}F}Jvflr)5c+r550bn;(hzmF+x zIAV#qP{Z@l6_HOv4JhFh>ZLp)jiU6n7}+S`p-Jnoc=9=^{{~*te1PQ_^$m+8_U@z) zCQ0JsoO0-BiK2hW_+NAy)Q{wiQooj{?nU(8;q7_&l_ zr2Ru1BZv9c6$iDPtTUaLvzwi-A{{dTAi%6|ne^4m|3u!2&8oY0J8r_J?Or})LPz0Q zmLu|4YI0X&OCKyc3ziO%Yu?_(fRbYd z-rVX>tQ7c{HYfLUM6tvi5b{0d!75j7mj+;qg&rrkxGyhU9BR;%UQb|_{gasJ`Zn2J z6SnLc)Ao|8Of4%jXp=gn5!-xPwALiNjL=J>Us>QjrMwyclV`aL@}SWlqv5qPHm80zXQ0c z+tq5#VX;ocblof(Q(||oP4*?1S?Q3zF7Ct_rT1N8Z^`ohcEa80B>}z)kDpo3m1Z_4 zjuAI!Pj6AAafA916_Ghd-nMQ)N6CPW62DzuYNTG`Sz9c55Xib2BD24i9N;|^FtW^p z)%vSb%A)n!zcKLFg$?-SUMAD0I8md- ze9oVjwUX0|=>fS*@TY9!pM1UNavcY-klPmu2rsoVh`V6re!kWVkNB+y*T_}?RYOXRB-zZ+U?=7J*aAj*u5TP4!U##`j%aAHIrMfC6v95%_id3<4-}E(hW|9?B%m@f9 zW7;mPOhV?CRk_#|rwtzvOIXe>b+?rPieh(?uhF%VwB+hrGPcU(Vc&mUiPapSjdsbf zR@HqO6&$4Vuh0nlJjaoi+Uim*gjlH{QDG>%0k%v&`Qz0zSumqkjVSGpTW62PK}<&^ z$hUEQ)pueQetA3>SwGp2OjQ}d{o6ROz}&@qVdaXW0{J^85LHDt5#%l2Pe<2!Khjkg zd2e*-hk(|vwj&w5F^%SBWdf^@%75$>wMM@fZfzy?+?jNaitRXRDRpqvn9{<2GQLp| zAZUBq8F#=0r>*3CI5BoRxbb{?f+FaW6tmS<-;C4vxp;51>5UtMc>TKJ9~RE~mV+OE z8^2CuAtJ1Ob*=I-l23?L(Fv!0haZD^L+zU#0z9$cd}0z(w3Aiel+DA;^Pce~U9QdV z9k^0SQ2lRmcUwe0?-Nv*~}QPR^|W!Tt{N_R;u}Uu@_2 zE_i#zFd}Ru(P|EUn>G2fgy>1xR{_N4-qLOS+*tGtT%9fbV!P*f{^u>b`PghsvY`*! zzC-T2iR$Nqh2IBmUulHx_A2yyw7@;%MO+VlHpR=A%l&hTOL4c6GBD;n_b$GLnMI~@ zOw7%I=&ikP-Q!Zsb;O#%$;VH_&Ekab*wEzh4{!cNH**H5aHr9>4NPYTYDWa@VIy4{BV%2y>{0;T34SPb!NXslH3;o znqxOk|1lzw%9`SLHqwRgUY*Fk+h5tMoxw}ofrtisDQW+r&y+APh z!&?eld%WUh^B16@0BzR_O2R0s9HzpM1gNKJ#Mcw?e|GrfN$lxi2giLvOaH#8{mr`Y z&a%`}1EaAOnx>snNx1J|e*^pDGo;rBEU*^Ll}_bRr9Xs6%1H)#CrmkSV0U(j&{NM~ zcIXGla$DS8G_Lw1>*orUW4sg$TbY!lfs6V4bb$n=(?mRCEfo_hCO)_yDhC>kk9~cS zlN`35VR27?Ne=5(hM+rhp=&iVm1J8(8^zx1Z}A=XK0L-lGbA-WYzu?Hi&;JbGWHwI z9$4R~`PX&91O$7io2|^X<-#_gD-|Kz-3ZDmS8y+(ddZ8yvZ(IGhTPVBsXmp!KK)-= zdXGbxC~;#`1Abc%w~IE-%}afFxvBAOpw`#V5D(kxB{RHi&BSW4dqR61U^c5-aQwR&EmB33*%&|+i(>I?dyryQPjL)Z*nOG)l~USHk{=z}(t zi8|2To}U{<1Iz60AAG|K)RPsDH~@f2X3k7>1N(NDz{tZ6uc!KQyKJaa+VO+^+~x$& zW2!s#`zzzibWwp3SmtYhYxXJqUu5lM$@!UYypO3!Qfk1b7eHFo@`X3PzO%BzvF$Qj zrSw{2txo2XO+Vi6K4992?B4!s4e;Sh=FEBO$iZ;2FUs(CTI!vZ=3&3OK|9m04enqZl?C>x+b36wIwH2<8iI@`4==>$U@{}%f5eF`-dqw& z|IYmR?BgW=Co7>!ja0dONb22c`ld0tg#XP6S)WSgt=9Bt`b%sQ`Y`Xom3Gk?9dFYo zCwOT6{|~7B$_sy%oBI%=z;%m9EBX^JyFY8G+DswoFxXMO?}hxWgKedNuemk2*52%b z4+9pO8N|_9&{Z?Qi*%*ww;b+>Li_KgtR(u3Kyrxpbo9x9e(~ACdoT3Gs1hB zZ%)O0D4YiUXBm2-*!8GTp&+0_O;5xgVhwE6W$cd~()IhTrj?fT%Tvr^H9Y=|@JgzB%PM7u$0KnvWwr-Czo#?Bsc_?0%u zlr{)h}u)#KRNxhcH!{{1Y-+UWD_i~ksyeh`pnaYzAQnfIHM52;AFjOFXxet;N^ z;Kzcl*C)hR!N_;t{L()BdR-&y_X;aX!Re#^N7`wANAHbASmS=w%#6jSB+a53J%HIE zd8m|y`S2zW(7PM_en{2Q@wX@pqooy};4~l<_sABC@R9Ejp3H_oj%S#=WJc%#fgooU z?eoq!H2|TXN0ORBSHnsL67(G7%|jL~b8($#NiAv8aPrv*ZSb*#TMPkabSiQuNKlL8 zW}NNZ?4*@S<_)bicSrNjGzW)N5GA&QBe_}WLMS5gSG@OWE>3fa9aarE687vf3gchh z_sK8wDH0M*jWhx1^;zdqz}zDA)wTd@RB9V!;3K78B`Megx9!o?92OB}ZlP>2h zLYZ#qw5$ACq(0OWD?xb8_x(Sy;d`Gyn!g4E09j3=M<-*Z*Ztnzay(4oc1fRC)roMZ} zL~qa3BAz~>qx9^3H{Oyb;kTmPWfQ=M8%!J4|1uTEM8Rse7Zo0|uuhxZDg2LrqpP(c z1W{Yu52BKFwVSj3R)Z3{Jgyqd;dvaXwVVN$5dTs5f|@2j2*y_Lz`roRO5qC=<7Ur> z3@ljh_Usv%mON8(t$54Gc-p!EcP>L7PyYaA)BW0hYfw_r4BcD5YbGuz&P%RiM?%!- zXqf^p_Se=C>&n^@d^5x@Y<;!T7?fv_IQ|q~bfpVtt!Gb^{u`i9ouu7Tvm3`9PH4~0 zh7pacMdP;3xzT30i9{1C(Jr}cx56J?foo6jw9%S@C?kJ%`$DiWU20?F4>4x?>=(W( z{_oRM3${H^lLY5&en5TXdoS^@u1@IPb!@U-T-T>+uNaLpDW^}f!i6f#UKSY45cn>q za_!wqFWCdg0tsrsSJv2b;_XOeRUh6%^4G7o%Mb-c-jAVJ5Qriix6o!gC+93oKxJ90 zWaj(047$bA4E__u2YaeNl;U09$stBQOw+1(MnsEPf=TZux>FhA!c5(DB%2#hxsT`qS|8ekmz_PMPW1F4l8EuTFbwM#!eIH0@rJ6_pSMTWrwMe|L%yXwLFbUF>H0q={&Dm6-mN(PId`5IZ;I|h?A zKrxgm(fyt`haoKpzzqK&zdR(pR43jSgF`4*1py@|6^y)Idk1atrhon!jPLg!V=`qg zJ$ub{jQ(7kq`I)v`WzQ-R=vwm4GpLx?VL?tN zIZy24^dfA|)-T}3Ga>o&>tnce8*&L!nN&toWvg`zvgXUwyI=i}M&MEAw6@`$5N$zE zheEatW?}^9!2^7Jd$T;Vv;Ns|bIi!K0*gp|lEvZ9lE0}{J5_4}1v^qaIk9kMPYQMF zdHDwfPEaX4aGNiaOmA8$Gh~2*8Z6+SrAh{|D$u`;2~z8Xhu9N8S{E^OtmFlTj&V%_KqIc~^{yrN!k{ZiAl7 zJWrjv%1?6!YZVBvNfoosrrVKs3@9N=Pwct=0kFd4#J(R*3W^l1jD}Fv5aR88>&gYm z4mfC77g&?oU6RaL(BrqXfY=_yGY+DqP0j^lB2vt=!2WYU;nZoCFtx}t2&6)=wnH~L z@Wdwe4Fdq^b)52$_JzomMVkMbYv5l2A0lNTK>TK^7OSq0GV6z<4Acqlb{u324!A5X zmY%{r%-WA|MLki%{12D-jUuY$^c_r*gL>*W-w(?+GNC*ICYkA(j`57z4{G15MSgf; z^8uY~|K|qLlRqxoZ?p~44k{$|mCVbiJTj!tyfo%oW+oRrF6U+de`X5J0{@x%8Re|w zco!RAfIxO5fiXUZzlkh)wjO;MlGp6tRT0xaK20P45A8;(>d&9hqn!GGeET2i z8|~{v8)g3b&+m0^|F?(be>t^IC#1U8m|Fa9WmI@)r()}P5`o$((4Najx?k3ov}3QO zw9DB(P@V6qX(L8rIdvMsrw0OALP+?mCn&+eeeO&M%iJ2a99Y}g zH9I@F5Dr5Xj!8L;MfXM!mWbZVIiUKMao>idTOAY*UzcS2nEty_C7gA-AC_i`pg;+Z z0H;1#-#!>L3uLy3%BBOFh@p!ZN)(KJ0B%jYkBk(nx{XPm%k2lSb%;$eSuGaP60WYL ztgN=Lu^j_7jts7(;vU22(4Ii_MbtT@wLW=rBRYREpUt}*79e2t^`LLG)l3~lv@G*Z zBdgATpAv~}9C#PGb>(4ezp7h7dA1iv;KmADxSzV}sTI~Ef)%KL+A4YpyNiH8&dja3 zWMBG<*6hMZ3lDRX{YmMkSj-!bWQRW4;`GE11cyduKSVbE-g)L}yCmo!nUPcB6^DsK zMQ3UN72>cM>>{U-y5vOB{yzl%4+lF)KV_rWZ_R*hv!I7Y@>pqbf^txrbliJH!TigjgatMW*s#4dcWrS3 zi$DtKVK<||cDboN8r7yt1ajNX*CeU*zYhFL6f2im=jgs%>`XWAry zmBZ}Kk=}vLSHDY5xdyTxl>c?f*}O#y+cx2m#ZZKnXJ8=EyV?vGxt6#hszMnKap>J? zTML{|@_>tY3IxCw9hq^WIW4q$1J4lslGY18*EHmm<3HaxO_YUzu$?*C4+h3osOI`h zG9Fs)(nwD&VvTdVK36u zJ5UatZQCGuQ5^rB+aOwPFRpu^bKN9J5rgY3c&A*_agX?|i^(U^?)5}oxbv8=%T}5*q$ZD$8ty3p%LB+0(zJkWHg!mwXc69_CZJcmEHOfItsIhg4D2;1Ab6F&Fe z!Y3eDJ6Ko`vcwGROZ*o>$GdXibU!dyCF{@Uoo{O_Jy3* zT(vYsA{+X4+K|Bo5O?A8s0sdWx!jX$5p5NI%%D)gP?ejUMKPWBJqTRZY6ccXU}>EV*?;G0*ZksB9 zcPMG3C1sduDZV0=xJ!F><-d`o(W+a@6m{)prXv*#)n6{~deFC&lj?5S)kYOt7)RcS z&f)$`33MxSr+GXjQ`#EU(#DP~m=toTB5{#`OS`);NAFVL41YCVik&^>=j zzVp~9)MHOK4l^Fv#HT+y`~H1UyeivZ0RME!cPLZGusTaN+wY`l)_%9ig)5vs`C_}8KlYvbS^2;)hOe0p zvZDy!fY*`_ZV$iFe#pg^mL^I3IeOggf8w<@`)K*5QPp;Yfga%Yy}mUwUTG*Zn?px^ zT-Z+MMBS_-^&(Nm%Q9^g2rQta3AaS29scrtTN(X9jgVlEQYSF;yRl@S>YFxxo^I|Q zchZ#(>nGjj0|w?ylO(p04xOvM@ndVOU1kH5so&1{fsfZ5!#}u~naSXN{sNnVR)c2U zJKbS|oOktI%FDuU*!?bsVXKce9r2%RwDyL>^Km%;HG=#~`@>53LSE|QRQ8 zyb*Yy(s!jSmCOAzkVm95>N zTSD=_s`4vWeFOS!^k7!ZmS|VUKCz;3sVk+Mnz~F5FmrczU(gRgH{UUaFDSPM-`!!P zDk1MeJzE6KCM)GIj%6U^fXQULD6#74%VVuqf56cMXFMHaJhuvMn35R%8U;y+Kx{u= zxSKvq>0j;O6HM`l{Ip&O@+{qS+k)Y0FSog8zVTPAQ*1Ur%KDGqaP!bHb1reSlXCUC zd*gp74ic^2m)G)vGMpgk0#@r{1s|yLUC(HG{8|0oM#fiuNhT_1&tHOpw_>2Dd+v$4 zcJ&BnzNaU_GSRBU$M=CCyXu^V+5A=y7bJ6SQZJdrcrTawP55vk1;%xU)tGpbWwx4O z*_d#fDe@(Kc3lZ|`9ztD-t~G0mjI*TC;Q76=FQ6G9Gk83(t@yP2Q9y!3*E-+7dQ3K z{+w9{?+-CzaJU=eKpWn{M5>K4EByXG;LZc+p=t+`4ghIAMc=p%U7_b)_0nT42F>(GjmJki0dW#X5( zwd)UrJ(u6_C{S1f=YN`Xw!wQTWu;{IknLN4lt6#~DJ%Ez>-Te#e;}vM0xfut`z*F&Kepd~+=3cMT#&{K zINr;slP0!-9)7n5w#mSjwuTbzjeK3BrZSA7bZl_d!{_&|y~7&dwxiiy402fD{aJ*h z5V!_(DH{`3hxVcWS(uvq3hzl2^Dg0h?@qs=!rGeQ@N25KOSqZP-xyr>ijTmpEkU<2 zs0y^v?o{Qev9g-j)nSXey~LVZz-|UC6o77*iq;__!qAF`4|QT%nR}|Q_GKUChG%+p7PPFeiCdTtY!jGEO_7m!W+s2*^H z=urPID4EFn$tYmQP198x2Lw~Z_0Q6IbAlN6`ra5{)xjv4P;fR*FjGyaZi|C<a zw*8`A<$1f~s(tfx=g_@bzc#XOol{yYj+=ew#ZW^Zb{JG{)LYHTPh)lP11&qSbPr*giI=+F=lxHeZj zyO(GW2yL*hLNE{JvP~btIEN={=j8tC4i*pArRme5$w z=%X~O#r;UAYJZ{DMz+BWBz;+IiwPyXeBK-}d5A1e2l~3HKg*oz&Uq%?!8gy^GfS0O z_)jU);tO^_tK8NW#MEtlr!MYC2Tp@pe`bPhgxXshs17$5M`M*#)XVQSAFTW3Jz{hV z+timja7n-xHt1P<{L1UAj>rt!QsfT_YVGiqdIKf@&>e5h-r(2J+DUVlcp=3gtB&50Uox}f5o=X<{#Dja|W6gP+o^e=oDK)X!5BibX&?xN#k^7MY79YY0&dH7?WWqEi39= z+|C6Z36ZjRT@lRvwyHgUR?U=HcwZTsT*201dU~)SQXP;tB^m~2ab}x>ua#-+vPPfy zPTM5*ZG1zggzzcnW-AS8gju1l}OOfG7eU&nyPM=*9BwZ{>K?k>~I(fi?6?6bn*NL1I)rtbf;2% z6k>|o+ifb3d~fbBq7P0Qsniw2RCbQJek-Ny5**nc+n5)>t>06g!?T=Tb|kjljesS} zQmy_6Tsi)LD*=J{$8Unf&ca6I**WUzbmES+#rh0$yua@AQa;CIdAU2c(AY?1r=hWW zXE$-5>3*P(d&|(sUGE%(i0bpyka*SKf%;sL-Rl9W40DgbLaC6Ka!QzaP;LxEQ7IeT z)VRs(B3L2oZck&1x_(IdElex&OVDDHsiw3B)2OR(@3@KjHQhx#IynR$c84AuAMba+ zF5#h3OOcej}?uaR00Oggz2hD<<|B-k>hF=?Wlg2^;-qe zGuEL|AphZ|URHYk=y+BQLYZ`?IU{?rvhQF7xGi}k;yz!n5@}k4OYOeio7;4Fz-rEM z>f`3!n384I7|N+)1!J;wWG^xe_o>*($)&WcKDCoBkNRahu8w#lX^|ouPG~8S*S{Y} zWaVvng?<^fSNCKkBAC0aClm3uzN`+z!lkf-IDF{-t8Z|h{o%EA$G&0xb=e=1`}3ik zAZr6#cNnIrh1PVhonaG(<&!ht+uw~VOuW_`1N(?VRH3NIot~MJ11mRDk;^>6pU8M`)a#XA1-7Y* zzBlE1&17r2GgT0L@&D_SjlZ|LHYwcM=)$77%#_^ie_ocg^2rlV%uIr|{1w^m? z4zm=(SRs%tH$hn10bHTPV`gjlqZn&hFf?GhI?uPYmJhCEF>841agSrmeA}<`(i20v zT0THEvEU0MCgyt!)L0JAC0{LEo$E2d{_QlOie9VDk2m+PLA9aJl~pi7xQPNjseW?n zg<uC8WX^a0d4VA01M&U8d?2;CtNohZK*U8JTuN9?U1|D5jDe4&0=&XI3E zn~`r>2Zy=5vK9?nd2w06K5Ij0e{ySM#NKNs18@6HJSzn)tMH0U86TwS{6zFRA;FAM zB_EM)4a^Gp-L~YPjnmuqXX5>yJ46Kfy|eQthZMQrYUVek`-cX2$L&ju=_{W%5*v3A zCi`;qn^*}OxKb6KI3bWrD-v(nD(DpEnCv5aHG66;d!oB?kRo1{RIR0IDh7)=Z)5X* zfK@7UvS^IA1UyV4+PAqr9o=5EN!bny=KM|}RiSAX78xnG$#t${*(>inz}J+2!&*=O z@R~dwU1gwvnzq7EH8fumE-7X*%coUd9{$5}9x(no*q~-5TeOex&M!(rz!b_E0A>nU z+C9bw$+?48z`y(%+Zh_JnV%_`A`;K73RgSgT)6HhGjhuZ$;jwh?fxOGcX#Yh^B@J( zV4uW)^b-~)+T+{_O#9x*Q~P0`k(Iv)&B4zmrr3rd#l~hqb%5CqbwU4e60f7%lj^tk-lS27YD#G*9w$ zXVbva_rh!B0rvu&?aoJ2aZ}0Y`>_P4tJU3iUA_hNuO8VuBbFI72a^Ke$+TR{ zsYYqqhHF;v`)1wA#6P#Zj1i#AVeX!HXmXdB$nw(XTV1>* z_NI+dS?M+&Sl5ZK_Sw0&?Z;MaTxKA)2Yt}M>y}f{wUuo}EHyhxvHU1v`a!dwuNQ}F z@cn%M=}zL2wz6^KCE^XPSB0{;ev<^x`N2}yry0$oN3)791wzEZhK8G^^*1}|uI*a( zSgCgXPV?{ijo#*^N4TVkjtK^ksm%D5*%)pNw6{D7JlI``E$jMn^+BqHt_!qx1n|NUF?1A>@e3`=q_mNY$o_7qZ;_k|6AT!Nnpn#xbAZB95G5&0DjlE(YpKz%et-czHA; z+#)BVumDunrq1s#w^;$XxU@$Aq@lPE&{Df29H#4DlAoJt&uzGr>VH~G-z%~)xD5a~ zy85FLYRbf`%2epEH)HZ`p@6u7BZU6%Yj;w$@|GVy3X{FPu6w**)~F2g4bjsk4Hy|z z9*VpksYJ0Wve&-6O1wDU+I7G{Mg2k4U-Y`2l>+)q+4f{NhZL+5> zi3R-daMR;>|14l}VHQe7tCpW1RTQzg!tasLbmh>^2&g$}Yg-O|-UKD)JTEODV`E?S z;5AvUJab#WyQn#IV=8I|c~s?_YieE<lqm? zf$Ej?s5BMa`ri9CIpMW}czWjH!&bB)nwfEt;=MdPnf!$-?!ZApLIU2QU=?mt&NL8N zK&vrFnO&`N^vg%DLV8b=*)Afbsv7kzOu}5nmwY8Q9+3ueAnSz{f4}2ZkA1pviJz5o z?oK^g+SWV!l&i7~F3~4Vz6|;X`A082<9jbIsToRGI9Zkg)beWZ33#r)NUVrTY_4&3 z!2-T6N0;+i!VSgki@<2ZI*%D%BY}860s6%SFWtJ>)y-9)0;?Y;1hq?@OC=6XtY&&K zag#+9EclA#XNQH zCD~NCaMncBTfb^(bsZn#CnPj0?obp|*)8rM@3islypgQZK?qA z7qL)77UBzt>iO4-F&c;C$3Y?yNyc{HDpwI7*|6IP(FMI4*fugw-$=I0wDZW^KQqN` zbFi*!i>e_j>uBw7=i_et8FF=Zg$)&3R&i0-F5A&Ul8nXX+_1GDo+{?ghQk&faj3;{ z>KA9{)!B)&6Rr+Unfmv|FpkETN1GH=qa4ey-W9AD-~Xf}{GzY_YV1f2z$G9^t|mC6 zpykOFi4bv-_u$tAlW2X?R&a}+FnOJ2pB_j^#YxbJFl-UZdYUOBbnXM}IqAg(0_!zr z1lb!&L3CGlgAaB|=t90Owd*y}0tNu7Q%_9}Pn(|L!6q+%mFnQ{DHUTtMxa5KzCNZP zrzgo0ezk=a+v5Tp&xPj5@z7zQqTT+-W-rm*;)8#{%kZT&N!3$JLuGz~IW2@;cWool z624|?0wvL{#H!tAA7kg z0~wH`AXENRdnAwJ@|@+lv!m*hNu5$$Ss=tw*w7!6Nj zg;1kn+C}jk^2E+DA0zW8Yz>nbv%fLY@hBfMD1WGU(M2<)`bH)Pvo3@)WYQel5NqYC zzd<3Hvj8)}3#5isP@oXhK#lDvMD$_vD+5C`QNNe}P*~Z0(nsF&d#O&L=*pgN{aOB! zsL#exGO$QSX&6YXvf#%zOP$2(p?3*edW%wz?sXu%Na{g+igGfIy-O@WJ)+Isj;KGW z2M=vx-#=bC-nZ+}a~4C0FcF+M)2w6U#sQ?Bc7JkxFDxaVdNW&bqg~qH=5(`lC5R_p zC+XYSKdhz8^!QK8#8MD>NUy77JA$q?F8tR?ZJ41!a=-dcN1cw`1jXu4mi#NPkHLUy z5yJwb9Km92NL%>8Q%OT7jh6jVRKY4(MZxYq`s$tQM0s+OV7sJIB}<)Q03t3e?YAgR z$4=Hb=uz?d#~KwGn_-A?k=UbIZ-bYypiB>`L(#+m1)$)|=FBG2%L|d@LE#h80A)7UgeA3M7jYxyO^!l3yjnr_6R+g*ccKx%6;?@7#~w}vS0Ffi*$|*NUE}p40Js(P#WEJz-e$+t?S4FtCA0uWr$Vw&jS|B z{9xE%Nq_k>+=KX4$g0Tn6dBAd@-0tF>mC4nSWoS4od7i0m%SdZpkV=rdX)5Za$6xdE90Iu$k}t_>9}u|l$>)&O ztHbfSb0sW*=4C-SJz;5s(tJ8!ZR1}Ac@plLw#Y2{`BELMErL` z2qd9wwfmcLa z#_HT?Xv*4hI@wop73(PTOca!N92#fpe%edtxn=L(OvJe5gq4SmqJaRZa(r;rCicmrJNMf9thO(Ll0)+Jd2j$y#Yx*cSvw&7=w`XW zMqlCDJO@&~YA75GHCgLuXdL0sW;aj3qP(XaLsqJF77=45#z7!tJ%c~T{tl0H3P-0- zT7z7+w3k8!3{>^r#$Ol@ZgHZ!t|sT9Ok)=Q`R4$(3-h$Ss%NWE_1s z`t->t&{rDPLA$yJ518BWFZ;;5i5pkUk{}NNyjMTTDJWpn@LNv>_6WszJ@SYoJ;%tS z=UJzE3{cn(81@7L{ZCHG0B}RqJ*}E(KyBVYTmJNqYfQv^`Zq-}6*;jMEyawRbJ9Q5 zVW-@;Y6tb#hcD?1^FBht0K(Xsin`ClB84Fk2zD{>--B4Ka>s(ETsI%n>kp)RB<6#1c> zRWW)a9agE6lLK+yN=EH2KXy`kN^eh2@}f&Y57dI15|yGoONX>-5^y6Qie&;O2$8Rn z?hCyTmxmEN`ERFn!XZt>$qWFVczqnxDhhdW$3!V~F%Z5*fx+nLvIVc1mJ+b8)p)r7 z(jz_iFhdtJy#7~Wy76}fkN8w>0)kDC$Rlj^8IIz%(~Kc^bPK!}fhaBT98mZ8WgE@> zmxy#MM__{GezI_~vR?egE*BGTd;lvg$m*lM?eL1T*I?W=YB`E=*bQkE{(H01x7?cZ z_7G6;JsNtm6Wi#vE#14-fegOyBW4;W$Fdud&mu2W0v*P0b$x1foBGyd0Rv*l|B<&$ zR@2A^CG9nq(B)pP5XUR4RLiThS1LO3k??N0*qY*_d;b8yoP7gYb*GF(=%!As`;Ves!%iMA0Nz_-h5R0HEQ zi6YKzo@4MJMWR+VPrBSFVJvyT?aF$4`(xvsnz}XXW^jf_Pn60}ALXrk+*ikngw=h+ z+Ilf`3G+RmZ9&s3DDgGV z$|Mo|W_k=kE@M&3E(#9it>{;4U590t9ym5FofiaCZwH+})jqru)2Pe|!FaX3m^BcXKn`055d+l9H#M zsau-?;t^`4 z-GhCv-AKRI8~s2V!+|a4M9I)plBC2!&g-gT9Y!RbK{dza8io@wHiFBRX_{12WFur| ztSdk>{ti_JssdB#jWH3i}RnIM?*SAEwk_c2R>`a)=E z_d6fIRs(O^LO~}Fhm3IAWB8;%mu>5&BJVO6#+7;4?QL;+{mJj7mFBwjUMB{%3L|@q zRhk6}Xv2kl45($gekf^_=|dPQ8nl)puQE}mR^y{Hlt^Evd8$%HjIvvA`G`#V0u$`L z8#s&lD3xEH$HULa;BQ?}yDjEwc-(2@kwoJ#cN5oQS=mq|wg*XLFba#rC zM((uRVR?dUHDhEqFS|yb_=Te4T{J3}Dk-%3S=ccO?J8V`^hhU_w*E1|WrvA4yMZkf z6uz6X;wkPD4LZk_+ifjeJ44H;*t_FsL!-`figw$}(+nuo>G^-T2K`DIAJZtQJD$(1N0MjER^r%8!^ z#N2@DXIC=GvBdIvE1D-T1l>1No@9n5%NpH08C%i%*zxot&wP5hrrSg<-_;hh9au~+ zJ55cEDdX}w`1&g7`Z{R-S$bjjcBUtbYo1nIILSGcL{3WLJL&=<@V&8c%5Z=3ieOVQMocC5%>mm7@QDId==1lhVs%A2 zN2d}uDm+%4 zNSa_|k!|rcFtJNI8M%(B=g%e3-ZLL#wq})&nay`=nS@MbTO5xS)Z#N`E?0s_8aG$V z-Q3Z;!$g%-4Hj3Y6ly2t1Wx;5yb9f?DaY?Vf|qt|We#S_${Kw#a+QZ@T?VHlYZ%f= zk@Lhm&tfFKZ$#+Dewew))lJDP&@^&<9qgnO2>$eLeAAwCdS}c%1wJS|g zHD_t*=%L3gQbv9#<*GhE%p-HF0`8L-sIPN{!oGmr$QiKmHo_8_Ph{?2>d%92m4|-I zFm=I+4S6zZEfU@vR^n*{0`j^vNp@$R))vC1Q&Dz(!N3?qKSA)CIhP_s@)|X8`RI!0 zCU73AW9VVHuXw*pHl$^vt)A~*-T+_vfgjNw5?4f+G?xw7Q@fTqpsnoMQKm_9=<`uB zX<{_GgLKapepQiMYA%vb*T8_?WF31?yEl zYT=%sxJj`4bllpr_RJs2Z6V^O+OVbzF20$ceDi1F&7K@>F$x6S>~L%D^KTT3uX< zOFv2^MMCiEXz#3TDeW}aC=)d-MOM~MxZWmpiR%$JiO_#_hx%$_VKfJHB#KbWebvjY zBf!P9W0w^D@w3F4GdU+3>V6Q=)jiM93bqW3!hRk!VZ}L>Kv*K7juTNSviY5bKr%v; zA|q3k`WHSXTX+?+*QfW_lHglO#v!_RwU$N^*_-a?6hWo^%|$9h$SKFI_7a>!n7Oli zMD%jp3HT`D^WJp&S{V4^#B>qC$UuZ~wDN&x(}t&L{htmW39_tfe=5Ti{s=W6Xy_O*QMBj@1r(ZaGn8 zRn4%xf!>MA_i6;y65sLU3Wu`T#*lRyD|L2nEfZu^e)YofLRAb? zp_aakZ!%;AU$1)Dkuo*ZG`~;&l9uCEO161lE1hSO5dSLV<~6lHF`Y(2*JsPB%6K_4 zg3!nR^~0f8jcNE^CaT`9$d9wk5r66=DoPkRZ^*$kBqu3Pb$ulvB=w$L`KOAQc-I8- z!@UzWV0pOr1+Z*nUOkQRpXU5kvMp&aYfmjDifk$s=8_PJs~naH+ci+eh?g!N__dfS z#2Js8@BThpkrikRr=g!?TJ7yx^~5O;10R(Hb?Z2K!jMP(Y#cB;o~AbD`V{@xn8sD9 zso`9MQVMy$NfuRaUfQL`@90&WbL4sZ(?p7Y(z7Yz+Nt}#+7gf*;3xY*1!q@-fdMUu z!!qaT$Brsx(=79Kfp?S}Ob?YE@!w4}t20&eu%1~`U`bq7mZ#pZq zjdXcz6_Mz?^pC@%?vY@%2pKDdd7RSrrc zry&Og*uvgP=)2#Usy1w99ULQFQC5c?j^BNuPngh9tmrJ9#g6Is3h5%rmgQu?%HUzMR+M2!Xg*fAqQ-5DpJ>zeRJjR2n+(~d~mJ#MZ>k}#3$&F(>X^6|30LU@VT zj7wf>AgIEemIO026dARbx}JF}5)eVQD1|c$Oghz)kssx*yK*Nt$t3XJo%OSTj}CUR z3K2h)izAJwlkdG57#CHd*uOm5fQu^vEq&dTr<;B2Zo8F2iU`_pv?Dhfh@~SVA`r zDj(4m0)nJDj0O(%bMtbX?rG@nH!jCptSD^$T$XD#XvF|JN;T2Y$!3*G-4bhT%sd*K z-Vbksikpw$?O#O5LmBxGLS~6*7Hs^chvBmWoA^gXs>f7uE)f-nsPq$Khbm|7aoRZz zZQXuCwhC5@StYs^lT?2{RYY$aUIXqBz_38Fmx>8Bl=XbTN*dtS7oXn=a;BXUw0A~9 z$WYs$A7dH|=1U4c*$YE82@O?Y)V8W;tY+Gj$Nu{+-(nzH{~jl5FEKzR`TG<>Wc%B{ zPy2~u!T&r&NcnjFw~hjE>rRsY-qB`)Ht@G562O_mZ~r;9jezitj}rc$7qN$IlKua0 z3ar5YM{7!7__u76_leGmb(_cT`0U1x&p}KYVZ8BtlfKt}bh(YdW>{Q@L+|{Muy2-`-cvXSpuFXNAwPmeO5l~E1WWHF-hk{PSLRflK|H634LA0j>sNf6rrshTq_-Y7 z9+V9ZN*CsSv9!;$gw<$o6Q)i~ z;{ORby(R`BJy3-F6fy?Z+ov@k)Y(BnU%YSEe;Qf8d_QeH<}5344C3Ux+>BG$Zv7lJ z0(EkPfVx;2hf-zpb#lpH365dMNaqpWGhF=8#dsd{fepN#BIm`OW{PhH>*g48&D=?l z-hzgiCmtaO8QEk~dk9ur0-Yf+0vGRngdhJw3CD@-=>=uxT#b44oIS7if^9wKCQ>Xs zh%!bp-D#Ase%Z5l$`2tu`Ff2OGi-R=4(V(EES>@n=}XoupuaBz=wVMGFY0N2hlifW z>hZlk&gjFe-Lf}Ct6qxlp&SFV&q4ogB8l7P4{Hw_(19k8|1QDOS-kz%s6XU)rYtqk z4oj^>gH2&Q#nekeB4ogXo|!q5=4`mi2k4dnv?EFeoek&f;`D59Z@V1KjKsN{%^jb) z^F|FtGL3}*zqC5-jOOR(FG|dKhnjOA0mbF#W4_lM{?Pa5ZSC=iiF&?}EoF%(_D|Eth-Y*vz@4)WCOTLhB<)d3AXkm$F_4a;oxv*)> zNQMQc6*thPVVTi|98&4DBj2=i?AY)FjS{3>7;n?s8J3egVrJ^ZXKQP_=j=IGWvcu# zmMn)>&-<*$s#a@X6q4Hyf3n)$-4b%&Opxi>;;`=K0GfTAnrVhsVzgB=KC&Y|6SZr<-0F z33>{GRP7)s&z0T6IN*)9ElF9R-8Z+OMR+YJnzWR@<@z2B{SX&;29&s(X2=&zDQ!kB zeFn8+baeuaC#hUsd33RUT{ubv2E&#%q8#!~fjP4+;p#gASHShPPw_@wc(y1@#PDP*(#A+il87y%qM}NYd{_?6 z;RV`WxZrqTNAz^ARRA*``ThGF`Kw8?mjzUSTO(&fg*`*q6r+yY+ui*JO!&-QFk4ab zarN`^#DHbrYxjEwtN@R2mYuu12Jn3QclJj?P%8qm2pX{K{UFrsX9&YW?=fN9M@U4JhxRs$kdW{P+FQFrS3uPtg`%*$JZTPw_)P-X0rD|Om_R`=Vbg?_$SI`X#W!EwqPsxCH$f)c7ycLdq2uf zc!xkcEWq{-`pzxOJf-OtL7t>!?Ipsu=UuFh^XuAXxk-=mj&Uv}pY*O;JvCkUIr+(N zCTHpH8(~j)R~z^^MSZ~tTS%~C1G3{vJveqN(lQi&Lzlj+Nd0%yR;N1p;dI}uAd_JD z%#NNKf4iKw2*9H>u)v^4vsx>5N)~c=4an%LD*SuIq#W?Sa7>=$D|Cm%?RJk$aH9I| znn;b1TCLVV>#TsrG}#*gfA-p3n8E_7z*n#a-45Au&|KC2;7Sf zn8b>O3vM*R?{X!yh(0%5#AxA4m8>0i0Z{utt(nZ%ErT)=t+Dt8}~0eRae-B-7@R*iB( zK&@vM=_)Wc@KwgF8`-sE$@Fv$#F$5~&RKj5_96RsN5IPh;r!NCSYXbJ{v=JalVk`w z0xOX}xoGbBo|qgZ){CwmAL&q{(t8u7^pfJ_3nuq!0AU9HfGNh7ILhq?!6wMATvV}) z8@qpHhBN>c_Na&6`q^$}-S*VJ{zQFVSTphcaAjqtrqQY}bw{=O0PR%YSVcsp;VqPFGiAw%7;=Au?nJ|Dvjk7A z*80dGCULg=uRltc-CpOumO993OvQ@R`ih=;bbA+h1K*uXE4!=TniQ6Dak;vh)d%%F z0osoO=kVJFCxnShnYg7%5E0t_yQ@kj>9WdiU7IotH$Pz&78OYY(iv=s53~yg^uU-q zEe~xikcy*OUdW+y&9UM5_5#Kf(ma5wAllMlqEBAgtGdhFc?%+kjAh+MfrMIHJ~8>i zWtyf%MLQlc<8Q*s_3C#z1;F=PT*iHua#v}>=N;YNX{Eb^krAyP@K*V#LUs-J^dy@I!~>5#CjD0;>Y(0zz_n-A2i<^;P)zhSEAp=wvHNrb7*FQ>gK`x41uw6HX>*R1e9=(F6P!#4%{?x zFwV8R??v&SMr%8mY{q*j(U8oCQL-jA-|NvK9jkqxy`AV5y!AHk=^{s$#p?TVkZ*^+ z+lZ+gO5yC|jivzN{LRsHUtf2xyg07(uxzjncDvGFQt2qd3BQ@`$06Xt6FP{@4~?qR zczxKLN?}P6?D1j%46D$J^B7L{_EQ_6twp5%RHh=gk{117WWC+xrnhahl(c-8@~q9{ z!tLhhHiJA=D@Yj(6M|oDMRJF&zev`vI3z<@-2|GqqP`$O3z_oUl%T~?E}r&m(F44+ zv4MdIcTkF`5-~Ax{K!_f5nJZYo9;olkWcpba8{^j41aSa{HFdHJ=G_(ai6tx!eH7% zGRU9@*NPX!p0dXj@qM|PkK;&==@!cBz5yCJ1Y}xN4qrPNbxCT58+W}1d2dp3RSJljTJJwMJM)(pJH(>2%vr9HVDE(LES`;Pp(@WU6Yf#J&$P0-c1v z#`4HBPJWV1_6P5>s)>fO)kIaYAu0g472aBR^{7YAJ z|C$lFm-|~f`Kxh@ub8fpFH0AsPkd z^jCT)fiv}0=PBz=6|VYOiCU$JfSHS(+>*SNzUY}~TlDNEl0u=xBj5y#zou!HLE(@A z@73@zOp)DqszSYt)@crvIc-_j#J#~~_X|zzraMK|?wkyvbY$`-`lm<8QXK@if8dK| zKSUd??Q~g$p8+eZM0qW`0PKE*oVT8g#FD9aeh&s+x;BAQwz#Clnt1M}QSj6kP{NTp z-Sl%2g_$DjwE@eL`@78#&T8&g-`o`J>}!kd z4-MNEmM^4->_5AM#B1`Vt*45P;k)b zc|g@eRi8up?7<`aKnnU0Ci(#|@@9Y@0cKVlM~qKbo)It3A zq~0E=_^I)x4h|vRpb`%B!3lucNIdBrFBx|@)d-OAP#$C zpjNR;hG3A1@%$}e!dOkAC`HUb|4tUZRBxgJ2{5C-fXxZsEQXlMkd&l-C9$)g=5mV{Cam zK++-=HCr_pUFzstQ|P_O_VNXV_JU7t#49WUU+bomb`cZH6ieo^=Xhv<7P2bTqDxpu z^xc4;A+(!lB)aftJYrD5Q4)aGJf0L66~zL6+YrC^$W;bF%EJ$Va3x%|rLCbMHRh^U z6bY%d+hO$AnpcOl#pyCb!^A8D8saA=C^Xn1x#@#&r>ttH-fjZkPhws@^?j+1;F6+5_WPE~e2egBRwRhx z8T2F%!1nE{Xk4K8b)^C!l!wOOi*d+H*&~kU6L8hK##=$fel4!1Vku(6#b)ICtbaKx z6r|akc{@_i%d1}tAch=sI}W7{4y~KeiO8$=P>-PFp;Ls zWwxsv@|8;_iJ!}MYzcn#Qt$LME>kk`{}pMn|7rfdm~(Icac_r5Radj1oSxl6lPcf5 z^9)edb}9Zm6xhxo9fHBzwUzs>k1Rn+OZHvC=MFLp9~Ve{Zi8;;wc9)o+7=y$nCe%q z3iqS>$b1`v;;7}r{`c{ypH`lv>21_;t0a~sNEQ`fR?>g)?J9{Ea3K(XG_-9z9xY!w zudJ2ts>E&lb)yGcSUVr#(VMF1H>^%*RM+=-5O>|t*E>m|RkVENj}bXoFvb}pX~KRE zQ5Sh(JY+nu=Qy$FOrMZCWKJBGzrrtgE?)rF34_g9kVVy@OFWzne*sa1uV1zy6i}5s zGKpUh{WWXO2CYgH(W1cZZWX4I+Vzck_r3w9kn^G`{Tq_apIX9OCk45G{YSN2_~hQy z-sY#ulK*QI4A2ve1k>ZRGU}E6!BYp+sHq!&QT8+serWB(#6a(ULQD zd}BvSV5sVXp0p;qu6TOi19$9j!SX8vV@#@^Az`pNeMh0`&bPirAa$Ep&mR-Fom?)tsB3@^1PG%QEU;m&+@XO8Rt>7C`CgMJ{1*J1G?WsrhOBS znsq+JDN7;8$HXL%y!;zfhISdc`wWcvhJ~ZQfR!a(c)J8g|K1*lXd0oNGyX+DB75O& zGBqio}Fm(&FU|>2=bV{Ypzu>9Waa-s@cvPSC602L*xfk-}`WevR}J75h_K-tvE>j zKr33zLKXVF^8h|X>NvW33?JvOkJRA3O$jfV^Su(^}_vCf2oj_A+sCdp^ zWm_6VWq@-2VZ zn+sSL$)tNHH7WR6J4;tq(`gc6EK#(qmH=^H!`ChorjvkAAMgT2hUy4ue%<|#AA!0yw(S@P6PH~`eXbsq13fA_d z63sUG!pAb*`HK}<#UuCYw!(i{k+P(-C8&7d!}8Q*cb1-Uvy?mrGF`oGJ$ zQ=TMLUqh6UrYT9~D?f(g9Bte^HMj;=!CwG6h3>mteRq7Kv&-G=@u4k6lJdpN$H=;- z#u@K1!!xYzaIz+B7hIrcm;TFOwj&WQNXwQ%U&lzp88VUtJzOU9fg?@6wkFY;wFW(? zrX66rWg5ruuk$7R1?`RlZK&>#sJ&=*%;dWF>FaSFH>oc73H_7 z|5Rs3pX^ripo&G%S1Qs^-rj8t=svsbeKGQhxWaL3@cy{TPVMZ8tobM&kOCmy_>FzD z_|OcJDO~={jB`7XB&6_d(6#3#3o=VR;{xGnn{{JT=A18!M)m8yozz_&rcrGd4YAYg8hx2%=R*I=_3Q1yZA_&;N$8%gPnJft?q8hpB=f#o%T9EbzX2upOF2xy(R2Luv6wMWZC ziK3t8O33#$wvY>`qBB%JF`nG?OpGk~=`M8ufmrNj=+DZ<<{4cLb)%w+U2)^&3P(~A zG(BRc(M5ZRc`;<1E@F(7U1#eS5y~COH4>i+ zICO(VDZ}GeYO<&aP8G~ z?Iny^O`}%w$03>W8Tg6Rf&hf^t$svCh7??2?tCB|7bWCXx%8cn+GS9E8+0WI&(QU& zkG}Qv4(pH%50xHivQNZP~_Hr2>S50R*NaE6!zl93zJU?nm9cj9q;FKfBRXZ1GEQ3ubmlv`BtdQ-aQ%`F8sj?sF ziUDFYSjr#bFk$82U>N%#aKoaflDeY!9v~1OGnip(!CJ1(3y#04n^wr8&DpJLI}D2L zRari3)YfH-3M1~|5&vc4ONONZ$~3?`Up%hcX!dffQEDx zZfCZaWqzkg{+d@UI#tu(xU8#jL`z#QTYF(@Tgk>VPZlOBs+7Ji$(!*v9sVU{J@{G> zzEc3Z8;mMrwH(Ha8~tvEh?o>cQ{M{R;Y?YwW@%o(i`HEZWJaL-$F9i6IRY7-C6&3+ z&WS&NXkA`Grhb!W>2e)Ca{C#uU*5?SN74De0OjfVU2m4(}=YJ__zsnry#Kc zWqBXqkyZv-M0T@`nf@_Z zMW4f%BrWdW+R^2;VhQV0Xsdh3P=Fn@Eme&Ap?%@&{clW)EU9&&OzD3yDS!D>Cuku2 zlVLb;pGG$@vKjd-ns4|~`CQHPnQ4nc0b2REkYizsSiaal9+}RSwvclp*o%7(n z;20VN-Xl4-Rj@nn{340PnW$hkWgiNEtz%@9{YO=$ATe`>UQcT=4R~}--g!--bc{rooq8kj5V!o58)B@E6!Bx6%on{U(`N9kpF{Y z*+!FWbBuI&{MyrhKs1;phhr&-tcbpdaUw~+wE$tl_dk>wEA}BkLH)5h1nE)0Qa%9Ii=1LzK+nUw*zCfNCjnC|E$i+o%0F7z zb*yjbQ~9#jMqIh`;)Ehgal$-`)IT{ix$~1V_Y$>Yn^<^aBHo|>R{aD3o248edDvq?*Uk%)A8mm918s^?7z;peuq=B-*r zBnG*gde<-&7MCvtOn*(nKU*x;qCbXFUwrLme)^W5yqFtRI1c$s;ja$&8zJ!V)!ka9+h!P z374bTV)xd2u2>0{TxkG_lUAOe; zlK1Fn;;Bu~00t;iG4_pUZldl6v(64P018X)3XASCF6Uu)cb?u@T_*oE82>8Yhm^t- zVfS)llAGL|3jo?+cObk^$W3QY1Ud4Yw1_6!%tlglW|7VLpb24k^W@~$s z#|zsXqZm(qldA0Sw7ijS0`?i)+`v##4g%tnd|tgmgBkUO(W1JfH~Zm{sRQ!7_eH+U zNhnT#qg}w=uZpr>qn!NmnjNdNS&5m_aUNTmovIYoInXCtI)M$(vJ`()sH!_C)9*vc_;t@K6@ONeU>cZVnx1Z$)tE1?t^)2zj2ZLN&RfOe;Ul1)RQ+++v!T0u@ z456aV2IR5BCM?#BPFgN+m>mRbi&;{Kv0@}uRK7O=w-kjabmhi*4dFwDfu;Lch`|Ih5r`5&U_(7)-Fbo8XSHN-2*u04`>m84+7BnWm4 zd8m)yW`|$sWKol0hD@PBCfJU(3x%4=%jeS>AM-~CrM0OsuAXz4&%+u3B(v9k(!}G4 zRS`~E)1%D(LSZQ3ul~Hgr&z#zL`F#H$V;qojgI{o(xMguGu_~3_^@P}1J|BYJut$F%| zkfg+~0wSd<71D^~FCXIQ^ZZrWL~DL3f;aeQLyC?x-~TR(@3o;NK9$RWD%3T%8B@+I z;{G{7^70=*WwFrQN-66^S6!elY0Qx_!${vBhkXn|o3r7jgtNZ=n+UO|=|A^9LI<;C~%MJLFs`eJFBoFx<)IHwSTYPB_2 zdtKQ#h4&_2(|HX?;w0&Ufy**UUBeNsZ#JILqVH8+I+7%vN8?on2;9rg0l-HeFMk7Y zjCqx4P2)skc4NP)2`z^LbdHxN4*D)8ls5c7B@Yl>5cC{d!FJBcomT!u-#YG&F1&CE zj|}v&;i6EW6_PH2KiSFc1B+=Is~*<judT%5LE; znFt`x;1TVZ*@At1T83Q*zX*Of&r-;I({@vP()xagr_{<`slxW&cx7>V40hL)2H+l| zKW9)Ufw9}p*9;Q(h*QiA`|r90gC7g0?VK4I2K>aA!+pnSTeYE$^|&7GZ$H& zfn}&Uv*#=g@Z*1uT!UlAW?}%oc?XQXEq1fkv)>RFNmdhX>ci`+P>tj_kV z=X)yL^~Ec}R%v>=WU?lq{UxSpc$zeVX>&HDv+SDlSp&!;x-i=eUjYfZ8-v^46={8(9=w&HgHwXbdW?1iMF}waF-s=VCG%dGDGdd^#NuvK& z$5BGxDC5OPMSbMg(LWA>qGBqkVHjEZg#vV$XuJ$8%p%C?F06J#&k``FD@#P)JE3ip zL`1I&Li-J;()Z|fbTu^$rwMAY3sh)QuW2}-fP3Uj(AD}}Anpp7aXrWJb+)Y zFogHcR+Nk90^}e4wHSaN z04)MyGloz^!f8UcwL5z41~HyB{-^wfsroe51bN;d5ii(x;^>mj-g(FqJV^IR3Bz)zkWa)8bWg`*x2xaqWk&za>&@TNTnh z4K7iR5zO(M>nVpqV~0dhq)fARQ-!0249arKE+_6sUE?P7Tv$?3_prl={4}qp0!;zH zx|-6a*n-@}~N zME}jQXMyL}=W&NFv$JP(nFRD!mOajjl=~__zWZNQtt9O>L$(1xI#rO*Yp^w#uyQw3 zZ{~r7V4OWq2BbJbyL+aVC0ZRP6YqNR@LUghWKFxS+;nU4v7b`L{!cUT1E9wYJm4x& zKe|wfQ>-JNt+15Buj-iXC`agVlb9JE{_KYnWKz~QgDv{gM$9DQ;G)5H$H+FI^^50m z4do(SQT_mD5V^ID93fwtN?w@hBL!0WQNrp97N+h(0uputjsDc7`=^SRL`jU}#paiD zp1Z%_ri}z|l2He4g5ZqBAXjZ&Y623$+a#~H+kPJ~{|$tN2}E7Y(y+b|%}rSvv2P_U zYsDt)Ubk_;vkcEQw`<&ANmTH1ES~1STBhetvkjL3FqWAN@mX*!wVeCUVy2@>`o5?y zma<6(UY2`*GdbpEY7{ZwCQW+nc^QFdloQM>QE0%;!bDfSyRlc|Z&-8`%szA}AR3ji zGv+|t=g@Hd5Az|^+O0?_CTeQ71nAeGSfXRZ=g!I-Z`JczYZWoX{cQVni>s&NlsPyw zKX~89Ze}hBC?2r-dInfi7leyax6=dE6`m1eL$jl@s^$(??c$-tYo+n!GtLI4pkOk2zYUK^dp z_N`a>W5s&NLINB>(PqfXp#|#U3etolv}ymt;^mV@@Qb0LiD#|sNICdx_USb><_vpZ zZe=UcsjJI3N3NVj=`Y)Hry;(#I12=1`sK@MU!8a zbjM;XsuIK{eREvrr29vb*lVV<)6@O(5|-N zT2}LNhmBgLgDAC#e>rh zW5;c1AGQ)>h7Y9u+hkGOMrRKP^lI@O<`DUDO~pxJb?ILgsHSS7an!cBj%biZ zQ-Jm23A}D?m*eD`rdqIAF(&9egR52B$u)6^cW7CK+keNQopgqtfHh9ovzY8X|i?-iVgjFGScT;$#t;? ztWM>zjNQYq-%2y2>E0D8k0Dz6p%4Vmgx{TumrRF1UOi$kTGK1jdLN40&1&?O2l!mm z&6m2td+=&jTwL5H<)-}8w}BXCK_BR9L|CS@YSfC$%9e|?i_<_EzAmE+Q{|(&ON)z8 z&y=Nf`JJi4Nslz`T0O1emJdv}c}#hJ_9%hh?t$xdcAzjxnP+)Sw$B<)n_>PTU28~z zo6Tbvf?4Jl_b!naJJV{o*3;4rp$Olh^2N0U1uZS6eThELynQCG>UGrwM>-)L~fV@+GODT%B;eLBNL`*q@w<|UwOuRLz&gMv-B2$cJ=rI>MG*sqdQb$u5Cf-mF( zxb2g5cd)KZmx9X_^rWQ@}l{xmg&4 z>y1=BE|W7%z)^wcQV6o&3$+{@vYU%KEC`9KQbUH{F@pms+$mC|i^sREYVKkk15#x> zZEVL@Dl>n=Ln{F zZIy~j2{Klc-jVDF-52RDs%=X`QHqO8;r8)+#Mc?)1eeI!dJfzQz7ah+YQ^JKV3QBD z(08wZWUS*0`|1(CDPZ#Ot>`a>=jRT88%07)I@0ZFmt8K$qIVF1^i}ohe#9u{f zHug^2_u}K^2KYAXq&0yd+_UtfUmp6y`hIK*mr|l3emReee*KsVIR-|U@)nZo@#1H< zs-|y}lc)CXydh4V^Y>c`e8twa-cPeKXa?4!(b=)lZqa!In1rJ0d<*6XP(>{eEP!#o zF+HdbYQTDea4A3=&Mz!O`_}7rsiMN(uFb2PsQ6FxK+xjBz5>6(+e$%F^db3QZy5Z) zKaO}H&#@NtIN)z=i@Hq`$>JafAsU&$%JrGCbJlx`>#jAszd)2N7J} zb&2R&t5`T%er264)^xuF*bY(K*>wej_c1#vl zciHIkb(Zuy!gon3tKSS}g#(OeV)kEFNlz5i)bN503E!GveosX1%YYw}z8RolE*c)E zleA+Uq~Z0T_zulXy$&k3tmgE;fl5ILxLM_S=8geB<;t{3FM+bXkg5;P-DW zz-$|gYmVf-S$M|MwTauB3LZ}OYxWpRfO@1JuKy_{|)JNi@gIRqw}ik zq%5JEaQ9lDxeT0t_mKTYE!}7zPLt(bWz-W6d;{{=2^)$1Rg6SU;^DPP@$udB1i=Kh zWIUe`YI9wud|i*gx0|OS_aTR^tigV2(t2#_7Xf)xl^gfOexzPs4DsKhXO&{2K>F+^ zNC>Xk8Le-E*8adPhmZAk)HWoeN9@i5pOCB5Tl{9&*)!GRqg!7)gU_<%@fjr=ZhoNH zYkN5*T=ozRhHhlIEr%_8W50_0kCoY`Ro|IynvXOqR@ZPFNqvzrJ;lO~Lv0we`_=lsPB(+&3KJ5fGkWMQkkb-okyZ27T zO$)bE2~-%tIoRjqh^Vnqk1Wks|2qbrtjeWtO_4sd5Dm>kOExOtBWX2*Bu7agB_T2{ z3Mz7`4*t+J0!9{-LQ^w{3!L?ngwOh4OBgI(Cw!Au0IT&%rXtw+a_BW;P3TXQb*m&Z ztpN5qX#TMYhIB>%#mAl(VLbbvTPkGbwHM$Q>*5iHTVhVJ4bPtVlOrhjxc->julD>% zh}6ce;JGld?(p)E$X>f;ZCrsMk;F0R?$9sJ^c<=9F69uW7H7QjW#JGxm^L-SI3hej zMp{LSuMkf`5%TV>>H`Ml(O=8at=upLMy0=vCYM@Yxl5ABS}E7`>2kBan1?%eg^(hP z@E5$l8$QWe_;nXFJ@CgOiWYLs;yc*{>#8Sh+97#i-q5~!;d{B;vNKlF^B$-}%^3YU zw+}EUXYS2kH7^Btd6NO}Q^_d6$;p`#axu2_P=0uL7P9wB?!tcbSWn&WgS%xdH4MM< zi;O*6xbSjk58nsabDm@BuMb_z_QeC0O|yosh%MQ)2*c9@c=$MgIFx=BJfo!P_01aV zWBn>h!FSXpg@P9$R;rfE=Jb#2VXstSz9I`ihP{(?{GCArn^wPQpRD(3(izxl>01Ic zq^xm0xVO)4XTDxy<}2 z+#ller5|lnYkj>L05id!+=Y~O93dKtSgx^ZjHE!*LdHG|^)G7jPvyigx?q!#KN2&S zdRa4Ww7}6A33Y*>OLM&Np&jUwmzY_uJxmL zUX~+UxfXDlZQ#?9LaK#tu3@I^n6=k`T9I$o)W?wgbV9QQuC`M<(VyO5VK=*_?1zMO zn3_dDQTXcqJSZ$!G)V1}^x#RFm#nD`RJtJSV9&!93)>59*%Vk@{Dyz8)Z@loN2CiF zfl*G{WI*QsO>~7EIJQVM=NBTi#oF^lu)s0mo6KgC$NYooo)y%m7iu@Go>|~l!9jh~ z>h_W9F*m$zBSgbLHMc@5k?8zFGZsv7#?5x^^iO2zfzg}YCVp+k)lBW+#X z5A};QZz*tz3SUY;M@}MHzl^qAMF&=x>4s>o)9l?qL4ui2W8lxd3-d9sBgbQi;rQ~q z>N12v@)S*4?ct~@*RHe0;c9I36QuHUv(1)4gX!FBn0{TUSoHqbsPU^{+}`V$@$&?O zyd(j=kNAmM>zox@j6Tk)7hgo8=J4qhz5@Fs4D)Ue0uCwOnN}~p%x%vp_WMrz?ZH&N zx}m?OXxzjV^&XcPw7Itt9$tr0Cr-SBQpH!myE>?T3QjVc57%yMa-J8(dk)H^xR^eK z-voupm~LfWppWa)bf8h7CyA_N6&B<8`B8A}W+02iQeBii^kw@4H8@^#d+tr^!t>{a zZ$en{On3xhR4Q5MIEux$1z!Qx4FNtQeRli$WOynza@hN1aNpW5l?g?HNFcv zct9B?8=U!FFJ?!_vKb|jgbP0Y`mCU0Nv%Z`>hxp$dCHjJy(r$I&^CJCV&8FaE(x|q zED0QU9q*UG{bc1>)q9)y#c7XTW7oiKIuIctU48Jc+WDK&(ePp8?J+`sW3InNVXqzU zuem8Rpr8fpHFsYN#@)@im&Ii&&41?dgV6kYW6;r4>_^0JPKVOn=5MjJ+iUJw3325L z<{l!2y-%?vc7Qlwm9)5z8F}3eQYglI_X_XbjZ(!oymYod+!;0C;Ng|@W{x196<-wp z{K)g?hAy#y zK#S#pUqXd|J=Ya#YSgO{%yE5o)8&>wY0C6D$;1`0La1nIY3E)`VI~dtt(|a~mO;C^ zx`4nPWipqOlcNkyU+ln2((p4@l4*sW>zyrPT0TuZ2i5RyYsO={bp6= zDEMT+S7P~MT1cO6Xv4m=A$H2;C21Vpzd_stF%bBR>%MIsetBI5o9~&J((xSja0+v* z-7pM|rsg3H&hbZznz1E0oI`AEzDBq^U*%Ylbt1ePIIZ*aAEQtThx1+AxX&kj=b0#y z27iaE83#Ec;!F8VNoqqbVjz42Xkm*E&{04;Is}cn!5~rr_hVyhvtAPT8Nr%R-2&ma z-MXN&XX{{|s_WjKT@3scx;*t~7Kx(?XxD%83@&~~H#N*SeBXcH=C2rxf7Wt=y3tJ; zr1p{F)7L9>qJJa#Iur1V|G4V0KMi=Tdr3EHjNZ)6N?$oS-2Zy*xuU%He9KMsyB8)r zuNo`DF(u~qhzm>KoBem_SMUsmc*26Ctm~>e+)Rwb#icrpyJH@H+@`jQhiJWT?=a}; zbZL-L-q-4FKrE^+!RDt;3V0}OG}prRDs5Os(le}MoI^?<-k>=$l$#CMHs4RXs1s!G z`j!M4yviDPyIuPw_Smd&Z0!ylxYD*^gyMPeR5hIwR|tXV6wP#D*-<__bE7}pE-|mE z6cdBU+MtuG6^zlqBmR-GGEki~+;zIq@8-1cf}-U`>UF1Ms?H`kL5iuZkZ;W}I6WA| z8(YV^3DH`y2U11W;+2|tZ&Q~3=|PKDs=#5ghjTLIaSMGHzSX=uylR4%Ls!n?@R}G#a z%x3?ihLp7)MCir6MElCLo`hA@(n*_wg;uQYR2lFEU6Ce< z=vB=UN@+Fb*(}YPsBl7&NqtB$d$a@qUk|}ak+FSkZ7oNS>yYpZI>8=rqJGmlo{R~d zI&_fug@sH5rIK=k7^lzcwDbTHJy52E1B#NjjEjQLzsf4SBmQ;e4(2))6B{%VS54%6V>bOmZUexA2 z6>VJR3WCczOYkIb>zf=HvAj|k-wyxD1v|5j=xpf}^GEViYRQ%s?*OmoBPm=0#$y6oI#dkY;OCP-U{^HN9tOV46UT~lyb~I_O|W>vebH@->qh%M_GOBM zUHpLL>M5_s+MyM-!oMjkW+iK+Bw3x6KCx(5koDqEcoiTvWLUq7a<;wgHzQaa; zW&rBBSgh?2B>L}0J(kD0$46=UJKPL*SHBHep>Zgm&p(X&8d$UQA{%JM^erszdAplD z0nPfX&CWTn9i1h=Y$!ws^7U4{2{<}zx}yRB zH?($-#QA7u;-=i{#r>tJwe44@sk$P;CMMJqpB1=>;$hm z*cY_E2tzw8ZShDNlWGYa9Kei;S<%;Vn>u)ltCYFQ*@xABo|gqNzx4D@zTAW@LY1Oe zlA`1N{)QQ1Yic}Z?Uuu^aMkS$3=xiA{mxzZI6UWbAgUj(zWY!7@&Wv+%3+9E)Xii- z;&P3l%iZYVjO(Qy2Y-`*x=!iX{LE1p2NTHtv1Tq4P{#y4NA1f~SGk(|=MV?KAcZo4 z4j$WX)eD<9f`ONT$h$(FcR?ZrMXXq4G)ZW?w{&wCh``{aw7lDphk_iPAxhDWURmsB&4LW8x?R@ z*9D1}LLm1K6ObcvE!`w&$1qu#RgrmN|7H=<^xi9v<5kV7{q4ocEfhbXq-_oIJgFH4 z6XG)hWsE4NV(JPh;~%zltcJ^3=%Lf&rzZZLufsb=L^J=z#^4Wi*YvSjz*+Jtn76G0 zPE!TkHmpg-+yTRy;wj7eeex8A)DTGPMhXczf`exw2}SM-@lYA8LKZimeVU=Y=a01Y z6V6TEx`!P#!|Y|WxRWFmOP#`Qw+l2Gl3IoWzVGX=<^aKcT^L8ix3AFd-m>XG&{n1d zm^Y`^zlfYu#^?8G8;%}70uh&&mnv8YEysCq z3x<58jp)bMd#;_vZ|C*+JYEF@hQ`4|@op4kq!>CUnxy8%!$<676Q6-;$)4%>A3vRe zJGBS`&@}T;$Q&=nI=Z1BlHWIKwHZtH{p}w5(Lv(Tdv=N7juU0HN6feeVp5-yAyUST z3Gxi8?o?n`r=1*oH$n$LnguDyLNEH>Lo0fk#EDVRjSM^#-N$oUc-wRKHiz5TV( zjQHM=nlIxUpzqj;cpe{qXX+7-a(J`&PPO0T*ExZ*Ipj}=RaJq(wv$_E0G7Yodavqq z1_JE9mVYed%eSCSu$H_9gZkT%N(xYM@4aV7Zl^+JLfnh4WTdL~$^-Qh6)w3fcr%xx zre=x(zX=6;{N@TdiseJ7RCB(V$MRHJ6DhneD5B(ggfJd2+zWEin^*T1rmaU4u5oq0 zRr0(iIDv69-_R_E2vzm&1J3&T{WJ?>!F%&Q4!0-sh@-M#jG<~RDn-lp@IDoG7l*rW3L5M!9>P~mH? zxVNolB#&BrWPkzTh>{gGvU>FDO8T$#yuRI4^0Ckw#Bp`g)VevjbDFM5n5npTd#6H8 zF`YIeZU5VQ6}lbT1nIcz$(Nt1X7YFu1B3Zq*Wl17_7?gb$Yb|Fwno~TKr9Id!tG(p2`)DY#ONTP-AN#piZ|$I z?(XN+aDiXDlX?|qyt=!~=mH>r?X1cDXQ-51s6PR%IzA&te78<01st;k=gx@cM)vN< z{?fF`+Gh=ADS)F-Oy$@S2T3(^VI!QkAelGE$Fm-MDg*OPdp%2q>_>cQdIT6P*Lkd?^NN*fvTH_A$Epz$s(B`FC^?r71r~^Ru8; zZBn@8T{geaP^1jJh|Sgmx=3#Oj#tYw<9|cM(u_7tDkHxn>p2|%Xv#SC0!O4hx=37- z$HFA9b!Y$XnnIeSwozF*Ebm%`v;GbEc+3tCw41D>A}!YJ(L^qX?(8&Fqq{l##60jL zJn#n(pFDyovczgXz1Jcr@IW-tdzNY}79-@`?Oe5ciwBfa&!9Jy1K(=f`fFhOiD=DR zNo93)*B}bQ7XscR54^D!7ibFlPK3t)5VILZN)-BIHijz>c==hiU5=s%qjX)ruH{R{ zSkaL|OTtrM4{PA4{YbkoXM+vU3>5a|blNKF6#NHsANvfU zqlB(Jq|ya1K(1GOO++?=u83%U6c91JrhajqGBAQL<}JEpboQ7~ZnOtnSguh|J(nRIjTtjN1y|^T zprl|qFUVlG|0XzuyPJ9K{WH5|Wy+eSNFk`wx*WH*C% z2c%edbYd`l>*j3H(c0{oXJM+Eal9+d8xJ&T5M}v+DVUk-lMi@u%Nj;Gj#8o0jI>j$ z?BQs@cH;g(AcI_Q56@k)+|fQ|KBN#oJ?3zkjtIvDv8mIlhI%|it}#Za8tQ45j&Yv@ zNhU3qnMVD-?nS3an-S04*UJ& z2}k|reH540ixf-w92luz zbBOja_)nK|Ai|+0%XD^{tY7`(f*&W_KKeg?pFT~K;5G2IsC|6q(T|X&C%e<1H{Gpy zY92iJ!XCO%JTn-T828Gj@POPtB)7i_$4v9pCD|eoz~%$|!G3cAf}TE34<$Nq*zfp= z`{+-5iO+X=ams$Uc1b37)?Ib`X3jhFd9c5%5-QWR1W<|qz$#I}%pqj=jH+h$LDT#2 zP552w4W9%>D&tC332X6S67NF{lKL{Mc5BDV+TN504{hTaEbs>)bA%nu&#i5fUZ!S< z5D3$+P7yKy91GC2yzN)NkWtyclGtw4lL#~|Yxp%erjVS!)#%(&n>R!Bt(xh$Io&}1 zy2egDt~Z%PcB5rAk<9Df@rG$Bxjg=MkX+;w^Fa^v1_r&Kc&VL%99aj8w5rdIp0Y-# zdv5aFN-fs{fyR;dh(-ES417DiS6?J=!hS-J3i>0m3-fv^_a4fPH%f5A$hCUMlG;~W z&Mj-RTTS-rJTMOv^i}Gxo3g+&S4`0MJL2oq!u;B`>31X7Cx7AH0FjzvNgxER9loY^ z{SIB+_pKX{}j#y*kHBI6j@e#v6e5pqJ+H^Yu3$;@oX{ER9658CP@Z!d4z=o&77=LjNh zwoL6FhQuwM-LU?`+(ZGnbRj)4|Bo<$#=8uvOC!PbxZs@R>(2@r+MZe+ti~qT+n1-d z+FIr7r--ZAu&b+7H&^~90!y$&}gyeNJ-a7l-ZT=O4k%B&->NvNfeuCJb3Z2b7 zZ3^_iB!Q_=wdFl43JeL#?BfV|?Qdz=Q7BYgAm@eq50%U9$bCc+h^7mEw-i!u7Bh>K zRyu7I(3o&9QT}Qay25#;kQvkcowrc10rd;1P`5J>1 zg&}>0ie*#gb6}1fphhWo826os&8li?t~}s8{inwtBXRFbX%(08!{ybHHMG;AfN$=# z*dyHkO)N$Gy>G3PyxF*WVxAF}#aK z4X9P3njRXw=JV(>P_{5J=uW!2ntfoVF*}}BTEY=OBC$W>nq$r4oYP;#(08%p<#05f z4PQ$K;Qy&J=g|#IdMTWA_B?;;FN7xKi`BiRb>ft`p<>9OaG6>Ffu~xk{@TwxLh!=< zR(u;6l2)J;+Wqfs@mqUk&w~u|$Kk1X4-&)~iWMg$SWt)jxtWz#ImQ;D`R8vs`*w}p zetVYHKD!%7p8y|Yy`R@6YyVbvlm+#2`LI0>xlVjP*OAupA1#t({JtJFFq%-PpYys~ zG0b$md(GqSm*wd2g4drK_JfzecEX^@nMwd_@$9=^pgS49>%m(Dn!kAyZvFtOPBzf1 z_*~C`TpIn-1oS7p$%b%1o^p60CvE`N=OKnly#WQ%&A;2J6$8*g?Y29weWWm;mRiBM z6Ft8%VZe>@vT#`f{$Hny4ur!~sE)>3&ci4D3Sg#Ij<=;AtH#El9RS^~-bWutMM3hb z$40no>Nf1%fIH4B`dQ1EYQZYT61ZJ|xVJKzupv@VGP@LV3AKnM?e(%;j) zO$|vj0Y=D--L9mIei5a6gKoBbZ4xTln+Ci3_1{_mI|bR(g(i{%pUAvHa*!yVll=hD z$Pq9ESrP6|87@JfM9&mP1Ul|dEl0O8(GsM^C(j4RQ%xBd89A+sFsZ))?1<4#(!=px z793vI11Di=cNm3{dER&SRGDFzn;rRG?iQ8(oFo%5{i~WO{AaP{LfQQGj9L9XV`#BD zS=$gEQEHVAWov0eTnDezliK1j2zQ(*@;%#yiYl}}3;+>uklLCKli ziCqj2R4h?>mB{-f9q8f4ZoRVZ&yS&($sh1WYlWASrwHLa2~O82nY3?bn18dfsQm)I z%zK-*lEwYYkUUW`ZuM@c(;SMM|BpWShYw5Bdi_Xs02}_1NNt3cI1CR1Gt8Tp2;BGi z&%7^CKW?m~`-=(RkKgnG9D6TG?yE%nlNKw&Z5b8Xj&OFYVbSu+6*?@?C>tm{E>3-H zxQa1a88eL$zjr(!TDkz;8%?TcW^W=LvTh9AtAAo3aAxr6Zk+L2s!0?0(tPex@FQbi zTverwrC$C>`V|I>2+b4xdx_|s?|}J4fgyhFGL{ByEc9hmQIyXx2I7i+YhnCPOQRZ9 zzD@W$){eb=)7f$w%pMMl@FPr;j`fo<$Jq8Mk|p=gvo6+b0;ECX*qTjl0R317qR{`i zNuHKhIkZYt0T(h8X5J(;3Ey0g8mwP4gj6lUfUj@i2tdz7!hWz@i^MoeNx624CcjkI zJu-g#P|cCsmMjS{<7 zaqpktKBglMOg#=rq|Za$_&ZpW>0phb@6az`q#X(JyCzzz`3%U+f}LimuY-|OLLvxc zGMP$F;4bOm$QM9?ZhRK>5M%BwL-FFE+TCykyp-;(sTtWz7#WKq(M#?WLk?|+^pXIG zb^RsZ$XGTl-V;CGn_o{*O*A7l7-=Q?#|m>}5yr+REi%I@4No|Nky4IF9IXK@!c~w7 z1m3-kA&%7JSl}O`u7>78he|P^xI+ZjJS&1~BW~Yv!@O0`l54x0g>$DRKZ0FcY(4ja zz&=cEdlg#s614v5n56&{JCM?Kst(`2BO%j6d_N%HdDrP$RdQekv%$EyXnoFoZ*5{5 zTex^pe^^UU&cfDlvtXu$wLo?1KaW3Cq5FpCftyk?Y;s5viJhiXT5{ExbL9z&gz&e} zy_0|7aJI)7x@5!2A5fF{i_{@N?@>pvL5)qWJax2h2njTG4=s*n#Z4bQeurv*lP-{< zvyH8cLP!V@RdJPseiWP{0K3#~-PdF$cy?(7zC8!au-e+Da<`SvQjk>atTm;!@?qCo z&tLB(y%u@MwW?(NaCHZOE`MQ)F`OJt$6?)%ARxf3X(}t8x-z-UC$C_JDBsVdkGiT-SPhu(FH1sxr%9mYwXfZ_?{n$b z_YxUY@y(h}+dtI@o=Y1HLmI`&ct$3Q{b2)dlGP`g&E-%Cd)t)ul<#eHwh?;l|y z15#d&RiNC%#BhA0s9y}U9>7iS-Qsw?^Tcm;b=9)tS!5qv`6|vtvuWWqIUIseyn|k9 z33Sn6eDC2^lfQOTA6s$a9lU+e4o0Pgw2zvSkdZ~&L?xYX4YoA>RbUQjT+0W+(`CH$ z^~ovF6M%;Sl9ZdeQ76Dt9X(Bxz^h&M`&tud+PhC(@8Y&J{E#1 zAKg0ddfPIJmas8cwSA6ujb)qHaJ+HfWOTH*lma~;$6&1$%TI8pZ~vQJ{`twyu3hKW zp`jsVQXGA3StbIj1}&Z(Uu}l^5YabT<4JsvDnYU+01NzAHR;+W?{&XMf(=>QcSX%3 z#pGw(?sV<6&G}S51&r^-VN?kpqErkXjf;`S9ba3KbtjX-!N5_%^5Y+|Z%MDj)d%!X zu>C_f6uMx5>hZ~GFW~|3MpaF35Lz{5g0yNPRU+g1N!~+|j}G8D&x=^Xxqw}+7>rMF zc)baOuV-(>mi__oTji}QNZ6%44<@K&GRw&tpjc~2h(HuEk~9KBy7QdYRKY6Ijlj+J zi^fx7W+*PzukYPFz7BX3hWZ^Hi9VLDHbHZeyihj{aXE6g6t35PG!~@AOGaE8UfyS&i{0(KKaO|77`Dm^<4-EK(Ed-52RimxOW;}D7K5E4= zk)~)FTsdLkfVveyKu_B{#DwPQAD42|3g`?~%hcpfX{D%R(TkoIi2{SO3JMDTo;r)u z3`;6@O**OQGnjUcEG^X`Y>hwTJknr#6XS&sLaf<$wa`2(*IwHw1_EUZKorrpc#X22ezrcM%j%D!pSrPh zAucBuZVRhfYK)nR3azstyUg;|qp&$j^@TKfTTHG$-WF1| zw{dv?8|QAkyKTN1c}eQ$wEdE1EWOdu*LxqJ()!kc{sG8+k4pS=X#?OI^5Qm!qwI*H zqXq9*vn&9ZVW{*E| z}E+&KO^}KcMUo98FookO|2P$LHh@8hue)L6~1+h zXdni5s9seuYy8%5P#&$6*ESKIuE-_Q73*>}Xo+>TaIYvDpP{d(9GB&(j)vywK703jD}%o+18)?E z$CUU4=B4NX(7bf~dtYQsD`3{nPw23oTAmiTF)<3x`MLlUTHgOpxa26bR6impdAyKa zLx}7b(dJeb=qJ`9a~k=NmL}_fLRci)B589aYpDZ1J#%nI$jV>kF-YoSIpUBrqJMw0 z1@&#vcI%Np(r(KC?7tWJM-}Q`!T>CpVjWC4)gcn(cnacp!>*p3c&71JdG(UumyE>!rKu9QccM0oxeJfVyxU!z;YxUpF>Ylk z#F~PEh4j7E=HUbQq5rs@T)=xzRc6@aC6=!wL!xh);W=jk$6Z*Vf0jTaG~7RR(zTX(u+2l|1i&8T z&@E1Hee;ohaoxQdM^F3mby0^sgtS#=RuN{b-}b|RmNsp>Fs-tjuNBxXyP)fyD8`Y309sEiHMPWQEGkJF z(MML*az#}g7LE9WQKhC+m)oepObMccwm8lFP{$tcyg95LDH+T3{SS16&Z+c0Kp@XP z^=85Pj!cSB$F-Mt8V3O5r}V8r5L+@Ku}3vx?kg=v4an7E98HR8H&VSS^2|hR_Y8vm z3a4lRqZI zZ%U^iwJ{V_DK`#=n?hH{+8D%yCUXE_&+lw~o}=Pb_S&L;r$Oo6Qb!X0X|`RD6RVx$ zgj;21(ZJVkqJ9>KEbas;ch_wz57k zB!m_2Op+(j{6!=X$N(ZCcuyXLVVtT1EhzJ;wmPb`vGH*iVP4@shp%0Ex0)REOseiD z3Dutj2vSkqV#ANVMC3(~I^K+fJ>AiT5x`v8Q`PlYFIGls%%mmHwJ(HC_YV`g5VKcm z-@Aec?~m*bt@(DCSl9nVlzrv@V!%35tqn4N1n12{d)}Oqs2fX|qXHuxK?JqyVeLw! z{o8q73AmKVqr4feC?ea=t;ScB1}Y5r!6sv}u|QUyeacwoNTqmtq3e^0^Y*O=%z{pX zo!_n7E)?p?%eHNCP(x073P9bdCI!?J3jfyD`LgcL*;by0V9@7R?{DqLzS2rQ?Cl|i zb5pN7ux7=nmrIAHyar@fbGnk$|0T16uVlHVbS|~JEU0U{9dgg}4CSsCra@ff#$vVZedd?LR?iX>+{vqgH;rL&hCZ)9At2#&J9klV~}tpDA@)jZYr02`}Rl*NCyfs|X+$hP5}^o}_nA|CpLl;F+M2YAY05GbmbY(p2H>?UcO1MI_ekwgeOBl|zjxO$!tsIcc%tLH5PX@{0BO8LcpYVfUqtw{ zmh?z3&rZ&aEkE$$7|%a))DL6!W8Lp&n9700(*!$b#{RPz{HdqP6fbag#bIp-TWzV2 zi(?;X_GIG_#S_{fqI3kOZx{Gcx1v0BfAbRhuxGj^Nhc}M?=4b&KY=_ir@N5f*CgF@ynZcaN~2dS`B#fEOO;B_Pz;Q0{rX6AEPZ2 z04n-^TCeTw zj)#L_nnVN=Gmzj$_^qmY_zGD{(Siidn;N8EHQG>$%KcEZUf7mu;-KG+?oT^FudU#RN^Y88 zlKeB%KnBDCyp{eG${3tFK8T?4a>N|x*;bO3v5_s*>9^{ZYDwI5T}tF%!R=To?W5q< z`0+0RVyu&_$Vk;>K#Zc-pDDEE$NB2(PmqFsv4&3hZ$`jKFyZo!9{=v?Vpo8vyksec z1d-4cW^hv7^`EJR8x!7Dv{8HZa>;}QR{2n9m2^R_1>Ymf4m4w!3s8_$cKvfia(TXi z1-dqq$ARI-Db0ub)DKvZd9z?;IbC3R;@?*o7bV588B%StMnZB=4oPMvuwR8a4dO!= z&doVn_hn!FXJfS$TUkukcyo4w1Stal`n6`hmahw{>ZoX3-1Y#zGQ~6#J=AoLrQ&H9W{9M5y#R z76Fh>jbt@_FN8q*+pdyLX_=jku9F6P=BH2ngsEGH*Q-5yEZM=Dy5fLVD1R&Myvo=V8=6L&vxB9v-4aL3N599FHfX|h6J}W3tk2UWFo0Yn`+efJ$iay;oUeY zv8;Ec^!n=a>|#&y#?nhIdfn;fp_7BU6Ya)dT$3^Pyy42JLvL zr>tVEdKp8aiP+}DVfII9(La6wV*ow)dXe1+UIu)G#9N;MEwxNoUBfej$Yg9Yo_R#S zoWyPV7llB@ukJ}I8!U86nHNSUC?z(F@9o(5X!YMSUTp_Oh<$(Z87Y!b2j{dIR9&ob zGe4$h_W4}YKOtI2Rc#*o?k~;zc(DItAO~KgwPY~-Vo~0e)n_-M_8D8pRh<_ZOnhr- zxdQw03H&yMv3~IIHHTaw_-CODTmtZ((bik^H@X^yT-dvLxhDDn3f6U2RlRAyC7-yr zFG62M{P>2BFs;*M0T85X2&AfIoG2S8;xt@kB~d|PrX4xOtj=7QQSLp!=dsukDFlo( zH2`JP=(m2)W*Wuc8jh^Qq8r7P_l-s9ckYKBB=*-bjdgkM`$l~G%{aE%6>+^FA5`!;bA`dQv>k;CfM9&WF=i3Jc38~?D{G(ViCJ-0wO z2Bz!PZq^hT!Lux!;EJPQl$00gs|jubTv386%!!3MOV;(9ETR9=p*-@pvfp=~_r)T_ zG*}{Ux+f�dHm^*>faOX5$qx{Afy8^_r1-sx)S<-llJ{>qbE6uX|vFQXx-2JT zqlw0x{CdU*Y*nQ>nJ;n!mipg9*!>+q2rCO0I(CKh;%7U)^~ObPo1E1;UcAp+1kC9m z&_AEi(?4*2-}o#_<~z9m2S>c=lBbdQ;34u~(jh&)Y|xTva4;!Olnl#Edx}+*^YDAJ z^vs(uA+~ki@cxZwO8Qqkh5*+}saQCUdw>1?cwe9-x?fkbfSs<+d!;x!WPAueO6XF6 zK85z})~3vzs*fkGO}>!b!H9D# z#wYlRDJYKT{o@kHmA+oD{u&`NjFTSUBuuV7_Y_P$i%ORT-tzYH!jaqQva0C|MTAd> zF65-D6a`dYb`V$Lb6a3OhLDYZ_F;|=xK5591shhu(}6U#ib$hBzv;F9WEXtoTexfq zQ(uJF9jRnD&Ao%OOBd;EhrHZoWQf->wWaIL&@1mr`@3Z)zEGQzs zNPO&j7%NJxvN|9oCJ&Jp^V*0j%M{pj!(K;|mVJZ>V1$oxzPvgRWVPXpxbDWp0Fnrq z_5yjq4pmR*jN1ipk)Z4+Cq!)jH5WkgEyD=G;i1E6b2^T=Tf{ZsV;A(PL{k4r9*O*2 zpu<~Hu~FphP+aDoIf5r?#yp;KH78a z1^~j6g;3s|38IiBHoE)P5qrBadS=!@cayvSE0tJKSk6SqWg7y&%(2*sFb7t8M$)B5 zaEvT?-2&4D&Z zepF0MFTi!3>Nv28mc2!8xZZc0*H$U_c2-T8dihMD{v$*2PsxunmsDu6F~1OCfwJtw zdw)wPOkVNZ@CGu-^Gy|1_CDz8>51~mE~gu+PPb%*1Q-CF0Fc%TQaHo@1qk2nu25(E553go3Ji8~ z=mfd3mxu~CeDxLPBJREvPz7sad-s(5<9k701QmH}IN|#-78I)m+HrwuSRMSPjaWgH z_hkuhWJc``_{mX%EcWANzre22Xhk8rJ+R|eTbklYf=`h9-o&=GD9B!Tp8_M2Ca>R@ zS|Kf)as864Enkc!oJ(};k0dp7Awbro2t=mGnQj!OZ*QzWenuZ(!2A>WiS&+DjxfD8 zp|sT8o9Jp_Z!h|rH*H4zk+1v1nONuDM$Ju^p7KV%B<&A1iQ*DMVBhqq8|{31tIg*$ z-m%rw_^l=NxA#^Csd=icg$1tMsa?~;h35kbH6qIA&;5_FF}Bf@?~^A+rVr@;NjNVO z*Hj!WIzM@Qw3zN%@?#V`+CBF67?}D+zI*u*^gTau2A{}O`}vJ$f!3QRey7$K4s{O~ z7Z)Wd%)~*C5Lsc&#>NKe7T^AVSBMytKF&n#R?wOY^oa_ASsJcr?nh!P){dRZmT2)^$I8~VFTa9&e7+QmCdBE#>!*orsXM1m8Xg|* zJA*4LC}gW-j;Xok)zmGX{iv+`FoyHmETb!joWXs$D_7Zcpx}M@Wo^oh$>vDLYceHK z_nr|F{x(-P&o+S=w4N6z>`k-H)l6mIvqvVrxGz{DVBVmBYMlPw8X_cm~?b`h@zeJMTr>{XZnht*6t?VGCJqPn*+Itp$Bo@nU6Z80qM6fG5MAB8$S# z!pYmEalaG6oTjbI9!!O{9GO8gMP~xm)KRk><0{)A?Z2bK96r$}bF=~P*LYFE(f$Fw zBRKd&`N5=;M!$&qtD?=OzLF9L*%l?BR0W=kR)yL&PVAs%gW)KcFNRAVK4?LxOX;YMUGXQu) z5wdqd0Ta96JGz};0ykUtNU2utCzptu+Ey-DKJjfDV@*Z7R{?VaB#G#*5Gr&$3~5RF z^Dv;=kSFt^3E-@@GTb8V{q(A!dzEdmFZHFR^Q89UhY+-N@ul6ul9;oN4dd{#y#1?i zM^LgZ;$e7KXyFKMuUmt7xI6N8IFMuX{aOK62-`d9N990Jtt&}|N)<%MnkAZSki`4I ziy6i2!23&ob}ik4N#S&$b;|^ozTlLgR__d@w@EY|AutEziR(K3dYbvCrezyozOs0} z3!57pQS)Np8Ql1W$mCA;-tlAoXkYyO6C4_E>S1Z;(pBGiwe1ttc~IGM(y5v>&K`A2 ztjk3Vj31{W#d#L=u~`-6m(>e?{8I#1s}9E}dkoaT*l%`AaF-vsi1O+kq{~VlFZ~s9 z$n{t)AAa1)&I4mp(+Rj&4MNv9$3)x_c8Yl;mNTC1TW46RLK*tgK}pow`VcmKXzPg5 zDu5fg)gY~@TVr5b!>(WS+hpJe|AG8DwPec$prb_sNAkXL^pDWNaTUo-=L|M0K*qj+$yWSWdYI11pn`@4+1` zNfd#{k7b z_vkD4pK*1*rTi8Ms)%j16^YTK|HqFe1ft#MWmhzF7}2|${w;olm4k^1Si&V;rzMnF z5B*<^ePuwD(Yo#+D3~B10)lj>gfv442oeGc(%s$7pp<}gcY}0y4AR|=bhk9aFf;eV zz3)9|pL6cr*MFkJx4t#&t>=B7^)@wmoVT11lOmlio+-GlkP5b!TU_y#rkSvC{qR`t z7+Y~$TDAS2H>JZ)xWOwAJkZ_UebAVb75)#;%pN!NChpDB7OBou-7Neb5ocL_q5RCu zbO9qPanBOF!HW_ItnR9XK=ZUH&|3YCkg6S{$K1|^`x~n=_rh^UT#dF{upC4mD>Ft? zWm4t5d4H3_i5e2u*@np`npfv zE^saX7&Xed>F#DHsZ+LinbJg?k_ywS`4**znTox6n-;Gc5exH+I)KI`?L9jIrVo+- z4UDE+DY`#PIn8-~c|D|it$6BZVObeivDWhH-TWejS3onP%SyJGgN-fwnZloUJFE)4 z62R@^rlzJiz(j!mDMFEfZg_;wM$>grjq$4XX{?LbaoV3>!v2KUSynB=dqb;~&RLd` zOL!1Z9{%;TuCAD(++X)WAbg|hPa8R=Z3R?bbQS*nsEI4O02~>ucgX&9uj8uxG!It| zziAy z{?H6lE^d~fh8M9JguGwyLhVC59dU?*o4SAh3qhCqjh&9e!e+zOm5I(69{cwyb5_snHfR1N7z^ZTp8pzj4Z zQ-&NB%pRw}of7{kcBWft!`-D0Q^QdkQ8L*v?jITnKq6L6*Kh~27CU9mrty^EGS7Zz zL^O);>Kgo}@XcdygJzB&l&inw?9rxBTMO_&>Nz8 z#99YiU>tI*zx5SQkSA(kwb!un+qZkb$pNDHI~=-VYBzoHGdSEm>&d|B?GkJHA|7jh z|HuAb%b`9t5t=13u-V?i{1*T0t7;D_P(&<4y0T${a?Z#M19yML(cX3ldpg(UO}OkL zCo{>p^pQptn8`Ovc_WvT`Evs?$n|1>7#OcLdf>8Md98pu0(~&5v$LzM!jN-EcJa79m-QjC%w_qJy#J8 zhm3@Vo3A11eKJ_$v9_WAV^$V?{_oXPNrKLl`56+n4Hy(LZw={+M$$lIH}T#mm+dSM zl+D8nfqS%irwTZJ3F@BV>>hLb;of6wvc-d#K3E;zi$`uNE!|HVIwL4_RAO7o1ubT6 zWIa=^Pa`ZA^jumNZLTB<1z@ z;RG$P*;Leo{no)Fh*$5@)v?sk!^632f5=$L%ZJcy9I=kXgj>iT`@ZC+gP?{;i*2=@ zi|5b9-iyfY3xo?VvLEK7_@T1*^XuKO8n)9ks*H!QFg5M_Xuz;O1zc3!!eMG^DhRlJ zfYVWcpCsMq1PFwsi!+Opct5HA0%U|)u6Hw#9K8*lfuf$m zqg;Z>*`f$9?z6VwVkM&7M5;*uCuo2=MW!FM@#8j=E>;N)A?Z1J`1*Kf2Nd%ONb`NO z4zilazL#kuR%bN87k>8mElstr*|%96U^hjqj2%q5Sro0(2Pea*Kx8Knk>ijVnlk#Q z;W)S~)2){y;fKkm*(PkIlPNd3C^cXMlDIAW(1AO&9-zupidYC9gFa5aIqN83A7%$s zCGyobAN*ardD9XT8KV0h^k`AsY?kp~y1)tJFhSoy4@DzzzdMM4MxLr25E<1!2n3&? z(y)H@a@5r{yb?MTLYpVbLCfgODXb#yui9`A24~L$oJmX*c+YF_h`2%c^2-$)-X>>h zpXGBtNE^rHDjrTL%r`9 zFJ0y*^_GV6oY~^`EXWCt+z{&HdJ>B43NK`|c%##URt~p*6)a}Z^%e^2;YpTC`$Wg2 zvQ%5Czqb69^p%dcfl1LW6#I6A3K!(s?i+|bYj%$`>BnFX>v0?ny+X%^|leYP26GQrR#)n#LrB}H;CI`v-MjY7Igt3CR0Q7O!hPLB&?SImpZvV_1N!ZfyYLp!+-t<}zZ;+qU4T;jJTeK0dx&Xn7vDKK1A> zi`K57&?%cV`M5c^ms5NQ9*$10@tmK08R8&_-6YWB^>r3cbtvKk&CQd!uQx4+QI-k` z4BSTALUrsF3#gT#oMCxn_Qn~^?-_;J9ha0q00!XS$5&^b!PXvO81ch>&^qMMw$AM_ zoj_B{5{_IIZg3V?_wQh$0XFFB{N@XNS3D_@zJgH?73!r?CWhF1SHmueB5RG#)N4@K ze>*KXDz`Qk#Zg1o92O>1t3$UMoiP z^n-llDVh#{Gdy^4A4KebkWy#~GTnR1b%g;qp@XIO`W>ckzTk$M>lqc8X z)J6y1nQe15RDTw!F;c?P$FmF-9C|eT_SJ>8(A~;Hypxm=<;?k*6ub3hq}+^rBc;eU@^_RUKCd^@I=+e6AuBUPgE zKJfeyJyl61R^ZPTYtkTC6r-$Klrov_<%F_t6sue`sx=g)wVv*PDl64iDImRdqA@Zz z(5o4Xa!SHNts1HcWg~>^;#7VgD|1Xv zN8UnOI1QT&x*`Tq^h&BuC-*sRP@B^PNI36B_;@ej8D#KQ;nCHtI6dENKn-f%y79D6 zfZ#&#ViOqgc-*S_(!SRFV!CLd_qYDKp}IkMcgIYikZ&RwWJ_}VN5Uv9vj6|Wg6=>f}@B=gXMd)6K#W@e903%Nn=8R*CD*G)gNPyZ@uHv= zx1AI0DsZzJ3%IFYkVR(zJ%M9$BVrK12L#yIj?Z5UP5lu(w&i{2!2e4(V!d8ZV<$H| zE$wyH+>Zg|Sot_73^OiNc@j%V@qN^Ws9=+`yh^kKqsL0ILM{gAG4i<>9+PtJIqyvt z`{!IGry7z+Uq9dH(Axa1e>0;of4v{C_YT`8Q=d6^bASrGE85rybG|~cGO%%_$de*tNaTDwz>d@kMI@K+ABq}U z1k9OL0&^+3<~TsWjl18s98>PXYnC)JIVtIfBo!^n^ov>2ExO1~8y#DVA^2CQ9Hy9V z+Ndp#Orc^Nh?@5H&6yOI9DSwvSmK`nsn@Xbo9Tv$IgQs%D2e59N^XbiZ0rS&Epyv> z9o~Od>x3mHP~LMhWk^;#7{_R7aZ$^-zgbVuu7U3?^Mm=)>|9Mz$w-8>7GIsAn$t57 z$-z_C_Cd@GAstfZxt8ZP31~irnsNZzY@TLpmzx-a)AIrbt32-p))E>oS} z*bmTJKp};tD9;qWj~+~mA5DbRGO8k0x@r#((|lXdiYN@c&8C0rcQ7o9%MI;nI=)wX zn=A(ayxl;pUF^yj0NlYYF%1RLD7w?4HV%^0P;dzcl90n7u@f(EXuqf}LaV z-#7cpHLdMFq2)jkGP$Yg)PtV;G!TAI(ZAoac3LGk$M&sI-#-gzLdigZ+f9m>19nal z-5d_a`Vl_stoSGRZq*+fCS%kLKjLDH&6C~KS6mHm2!l6rr$o1MyCQcXo7Z2p!0q>S0Htv`Z_iGhLdFi@Twm|?x^Sgl?% zNJP zS=t%7y^a00FYP+>DO>AfKl1PRmL~{lgX83;!wYA!k&f-5VTZyhw&X-?&_JGX+0r!8 zcaETrCycPos_+JTyIvyey{Dx`E(gb#b3&24JtqCRP7(pIT*(l5zPe5aK|dwQVHUoc-J5m5q2$TJcD>jnX!4Img5UpAoj1dq6KqIGBaU$d7Ee{tkp%I> zk&#}y8QE_+3AHIhW?ZHa#ps!Gh17xU7*TS!32@*t?T|-s^{0|s!JrH9{ zBIkql%C+j=Lz+w@zu@pjv$&C%4^Z%O<4^sJ$cbM|B25Cfzr1^nG$94IoRX*W;16`veauK(6qz*L53QDs8MQP#@9m_8I+1TF&WSVqo|wQRL9|Nwf>gl$@-e8Hld&xfuP6iZCq;WW{CM zjECpp(A9FoNkXlFCD#rYTSK$#CuLL9;%Arl_nQ#D7H|W*(y`U$#Z@(7?)=dRyTxx= zRsB_Kf(nti8fkBpS%qc+uKTVNoOA)Z=`ig(7)YFmL!7NE^kl*SqYokvTb{DPc<|z) z#ci6eD}bF;wfB7>+s&!!;V_6LUi#EASYxD=a6TBD!tWh+G%82`rWOGo32&)wv0A=y zM@fv7eOrx29hFTt*Ld|SlqK@j9}|Ag>3KYgC5QF<3_Ejzt}hh=H?yJ)-r zW~Nv=@rRP3>L636p$zZc5V@lm^NW`T_3M&fo|!u9vg?!$&X8Fp)a6e!mA0A;P~r0IS?ItRWfzw{zPQMewB#x{B0%zNEJgmmgVF69-~#n!&$Wxo@sKj63!7rH)pFfg zHWuOI9v-byFbAcgseJx!F|Xbbm{_%bn@lV}{LUU2*FSKhgh%Qv_1<*3!f%=tCG$9v ztEL{w!u=&D9DxB2GBhZoEm_~-Q&Kf2Y|v(zC7=c%2T8vZ{iK#SnUb~sa*5~aK`ogx zw5hIXk?80|3XNQ$hk-0heWVf&5~2=1@oV*wh~5UEuOdc-9luKK?9j5^g&I~_E4+@=aPQgb)=VDRD%^($yQeo)Cy(m|Z#F>`k8R)>O$^Jfs8S`&bx^uoE(EZ?3S z9GC%hn)3$1+HOu`qq-sD@kiXd7v$(NZEOv=M-kEJEa-yD7f6N^CbpV(pE8xU*fPq; zfmW))VpzGtwXl|#;JDN?xChi=fZG&bNjK577r4=EQwMbls{soh2XjxGGP~KUnHDm;s1G9+tuzL-} zUiA?S?fhasEPDX6`J`Cf{thRUk%lxeE)#p=-Ee*Qj zEi#adj128TMfWRF9DRP$e*tH)Xi@jRcfWrzS99F_o`6w19AVvZ%4rOxtMM*7oXO;%-E(g}5Y~Bao8fqzQG~&dVpx2QhcMUpb z0@iBjFmkL+bx#}1BeKC;G#|YNXBT?H5R-mPVRNMok+-K#O(t-?D>$6QS)mgz4^3Al ztg*D0om-))ew>TP8OV(i6A$>&Ij871y78?K3gY@4(yGOM9Xpobbkg3g z18%O1{W>45T>y|Ii&RR8XZ~RLaig#(VjlGNOFZ83K>lf(Pfr17x9h9)CVWgstkl*h zK&8@8L}-jr(c`gIkAdzc;M+i0ppQ(3A&i5Qz*u45make`O8}ms?(XYsg8{0XJmoLa zlaKTU7M&eKLvyI`vsY!=Ybgd;Hyh5t6eB>Jpj;ngK@Ux|H^>uI3-`wn+r1x+3VK-x zf#*IwXn)08^VITkq+U~-Lphsh=wiFLxOlG0B;3Lii^6Zz7Pu3^CNGb*m^~?O$-O6T z8M$5n7gjOlN>qx<_4S6lkGd)muvRmdS-+{O24-6<91`SA=_K-<@i65HLw6$T^_XM|1-8-8 z+h&wpMNuDeqdvJwIJ>xn7IeQi;&Xte%4!M=8V(Q=rX*mXxr)^PPEp zZ>4GPYKF;Y{H4m5Nsye)5=v%aknFd!-t+sU7U{5_2W;@F*ed2vy{%`dVCZ&X^FcdN zv}YxT1yC{3=T=-49lL#k?O;MVqPmIV%Nol}rM(Q4lNa&sf81foRW3Bo8%Z=y**G?8 z|64m5-HSp2QKpGa$#Sr`U#Jn%&-Zz5j@TdEAAy@S8IxG_+0t}x!F#r)wP5Bb$j^)b zv={u;B*rjRIW;x=bBbf_vUT15#mxjT(b#qKOgS_ygK<`>xF_%FG!LFPpki~HuJ=Fs7` z3axciROgTOuhS9$DqVKCbDPoHa}-MbcA-wK+DWCOE`M9iQ6}2`lmFGK?U!8QpHT;w z5*ssuMyg3s6V$Z z@Gh@xZ_hmPf=xs^CJrK`99O#4%p`vvDCBb475~(mf-`Pdu*z|(^Ia@K`4$<4-?->b z8+=4?!hUVRsA~XH-b@H~e)Ntu`UO7zkA3mtqT`3}Fx79~FmDHq+8pc=%OZ8=rcaKR zjyi@TI$qxQAt}ikHcv2`VoTGnC6;e+UpC}$qZ-tCJ85HX*TokG5~GM7Wp)|4KP4@g z{ZLs+i4s6D#eoK#yj0QDBT_LAwGx$L7;q0vA6Vuga3BB=LH@fExfu@?J~<4|eKEtr zmR9dN`UdM8;W5Fo9>4eR?*i}9+ZLJ*ffi0H?<~-i`w})oZv>PhZe0OgvBoXI{4GPb z(owg~x!WPW!&sc`Os_(!$av0|nsYSMBfUdp=^4#PpDKM#Na_o*Ow*KAcq+p$3s@f( z1((d;{4sKG`uU?7>W0r*H~67ps;NbM2am8z=~a{rRcJ3(=DQV=-4B;cWAt*VD>5cs zjT6o6a{G$;V5Q!^^IDlRF3$e?mJcMfIXaZg`!RgBmfCc;(sA^@Kw_kOHh|5>2ypHglNPsSaSKH*Pcm zXI2daEvGTYfVv^GN7#I6a?S9Iyrj%CK3}koikfkM^`=?PqeQm;C$9@4qSW#~OZni4 zVMRlz1%ciD)+U6)5u@z*+a^*ef&$Aj8A3RwlrV7sz!$Es@)_5a_Q#fyBZ!lSR1s&d zB&J@n30qDELv%{ep~iV!DodI1aT&4ZM7XonVSg3fW@(+iUk^XK>qH~ zUk5P91P%bao+dW$ey&=+yE8-9zd?o)ZmGTk)di5UVUAxeVjj~}=+gSzk-U8JKw(#7 z?-0Psof>?rWhJ-FW9yh%sAw<-P9V#_YqL9E8GHf#k=$t&IZt5#x{=K~r1oBJy+&8> zTmd}2kcq?7rgSPxHO{a=^2|28%mxtntNzgAdku7!kDk0p{zQ$n6`+|^$`CrTjmPh2h&!>{dDkcbj+its)C<#PQ)Gwtp9{yu z0Z2VN_&AM~bF(>bbNlXmm2$n{`lB%gPj_2~5T;>!3{k_6)FTdcEJX*lf!OB{45aeb z)|r<#g9J}S*E5k3i+Gu8@(fhl_(Y`|9}UMCm>dFNOaO@lb$e=C$3sb>Yo2yFo(Kcs z1b5BwT*9c(A72v@)RJ9#eq46wXkR-XWY}>G6kPq9d`?u!Wc(5zpFT#02DHOT?@60y(CGZkhK|z1%f`o_@vcqFo z7R;2Yg}$Q|I%Bu4W+aH+ImYLD&nJKV_x6x{z$Qqy_A>;atpXr^@}}S77#X5`7t!(z z!o^BKt}$^8jGJ;)G4s<4UuXmqn`pp&L4Qh;ezOlD;YF9W%%wRw%LPog&Q@tEqPXH~ z?_FqUXztiA|A1%a@mixMBL4}`0J}`__jpjmFk^_OD1{t92t?Ie3KE838k}gBP->Le zKFAWckugMKh1=p^U2HJ!(V@z%>Yj@9J{F5C4fWl|hTmXO-)LOtZy7MP_iWX#Qd%Zkw6wbjcAw!^=G{QV` z&xiEzwZiodTwp|8hkX7p^wYMvx2@!f&RY`42l|44)M>Z}3U$=Ykl=&;giQY2;WR zd#M!zeBV*E@cADf94(FS%`Xd;rzT6YU`wt)54ist4#&pc;qzY8C(Mi4EZ@xSYGEz-(q5b+6i&DsZyff7Up&;a8v1oXMFedCi zbjDuTE1MznvZw=uFNm@0B%wtL{xm{58b48*WaXTpm=9Hxr$Smwl7Y(r`HoMUC4QLU zttRJ)`*yNNVCl0JH(`Qp&`NH};j3gg=3`9f>L%1luA`{g^euqc%e$4^WE}e_NltI= z4-(%8?Zi~(Di6l;?GX+neX6E{o6l`Qt})HMZ=_#l^*}-O!%1=8jdIpVLocBUFvgH<~3%iKQbfULTSQ*EG*P2EqZ13HW)6!C1wEiQM2B?)1>=L* z-$Xy2W8f2V6bDR5uK)u&WpepLZDi$y(-i=WSz0z94*=3D5&$}M=<@Nl8*-y3Q~Dtg ze4hZp7pS@3HSRK%mfY)#lm$=DulcmkhZ=NPe6MwJG=`7@O z-V5>d3ngl%$Z$E?g0y*|P?NhYfj|o%@Talah0otV=H})Gyp-&JVB-JEKWpD0dx&m< zRNQMbLSY4cmCYnhS$FwQ%;)T+OR(#omFW@N%_6B?HEXzrCut#QY$a2>O^@hVH}L^MOX{*uuKvr1f!r zYjersQ5fN;Nu#P$wx80*r1gDY}D_VIxB9{I3z3oJzDjwdsZ7FD+-r%)s#zt&m~F}`ziJJ23yvsQ^-Z8i$4le z#*bAx`Qhbh3Wk09zFplT9VJ`fYU^p+oSlvSQ3(JI2=g2|9*0A(%}jTN6tr!U4Qu%R zX1At->?OxS?#tOWO%~S8HdC7pgV~x#5F~7SBpnQ32!%yOnEzz=ko0Xv373wUMhScO z{kUZxYN@VSe3hq`w(YipwxMJ%U$KgLRzl#_9Yazh)^1jJ<Ky+fvnq7wdU$io3OP{gzh|5{$`8|7Pb+P@VTN}~n;80YQxj>eu2Kw3mSx2}?*Zpg0 zcUEkq)XN7R@Qa^aYIjS8t?=FGIQsH>Bp*X_K%JTJ#j1xTyCA==)6zdY>zl)$3VVIkhxa6^*T~yJfYfD5erM*h#zvb*}vmgp=AF z?kz1J?mzAXwVC(U8wMBwjju^TxjfGV1)t5WL6Xp{(q3|k?}Djf+`r%8_1zWt?f_#N z5Z`3=#i3ubg1&H@+ZpnjoNP zx$Bz`PXQ*5VIJ(Gh{?h{6mXV{B3Op7p2VZg*`-=xX|5}7W2F{wx{Zzx&3Q#RhO z4ILbgt_^cRPy9P@8;2M(=J19N<#2S8+W7eefoN7DJEr@sz`Rhp@iA(Ru^2yDT zyiI)rPg^gk2@E^716_Bp=3A4@t$w6J%-Qgpg%Y zH-)98w0+WFG=4||&=l9i=;)t~XoWD(!6BYy!8gBB9sn+!;c+~IDrWe9(@cY2e(zqF z%D;96WvH!XcfSt9TtN$Cp51o!R>;4qN!0>tePWc&$nP@bc<$%=)`p&e;W@uw4h5E& zY(7;b%O-%FPT$_WBY5ci{M_?$&V-6Ax*y1t{yxS}Ne99n#}LnGgg)V}Y|nl3M2#09 zvH#LcUtdj_Bm;rkxtemjVzv9Ay~b?e`+9Ll0hcbaE|ea*LAwQ_(t&-Dq(pwFy~58$ zeqn1||HwZb-CNBaN{Wt8{D}Nzl?*>34DA>|y}42L%6W@;e3xJDwI(9YB1$3^H0yzE zO$nP8WkxF?0RO=`4Crqr{+UCXnY#F=Ba5BjdOQPYqCS!ovC=gD0Y6-v7XX1dUJv2t zPh7+P`X}!YD7%YOKJ~7evtm3lJ~6D!$*+A-95Jtxmy7g5q; zy2_h6s`eyE%#H0FSd5;V+Yye*iNXkE8$Uz`eWRp#EECGZ%E-WONKbQM;9VzFsOz?r zqjGya7)4U@EzChZx0qi+TxJ|#TK)egGZY(aH^}hES-;;H1an0GeLP@xJ2~GfuBG4} z9_V8n%3GCt2c0XQLM|t^T6sOHU9KWgC(PJh$ON@)5J%NL~ffaGj8Dj(R6^BLT z+1zFE9+;uDip)x0V;W} z5(7|QDl02f-4V9`@8#OqyEZdO3~Tu*83lEQiLi~ANu3bDJO)S$b1kdF5N1LH^_y=& z2RcPDiBqRKK~5{0B6o00rdtKdgw0c$xXgB)oWW6GRv%kk9xE z75z#b{!Q_r4~ve+L{??B_5uK5ndFz(6t-RV0;c- zkjzlx;Sm;(V-@Rb9}vLCX`KO=)6D_!ts0|SL>L~(I5dl2mAWbB&iknX$m@we5Z?}0 zfcQ4*>Y?B9OBH%#A7~2F&W+#~uOLfTJ6%Plp$nlQdnJ+7qUC~^;sh;4hUi^=97-Jd zvwwz~jPA-86B84KHU?Qx*~|yU7*ln`zXfN2y)?WS zhn0$Z{fcYVjpw-s0$RLw{&QK%x63zM$hJ01sG@{cQ-o`z=(By{AMm`P>DvmU#Q1XVz^>W>&W&D9 zZxgWjAl|fil7c0ZFY?(D{k@$zBxr3NBAKYRH6(pUOh;%_kz@A$trob${me z@(SJ_JI~wqAHX$Y1l#pkUXIg>VmJ-zUAsb*K%>w6gr#!sLvYp|%={-duCnt^tI)AQ z7WxwK8)i@Mz9@t9U&y74&=%hyFTbrY*$Z?~@B6;1ZH?_XLY}t*GY1uLN?CnDg(6M9 z2}+T>0Mq#_8UR~t=K(x)l)CP&R+NF5ZefFz9@-f)>#6g>SUK`bOIBZ>* z;J+m5sv02uM=SvozvMj(e+RmIbpEfGdfE$fbKBuH=l7+*>AG3l3ZXtS@~P=7q4`Wj z*nQUebwKHrvKh))$bWne5B*E6lbF~W*}nkn9LU!GKZ@w*Vy3QNVBt8#xdVFi)Bl?b zu$Y>5b#*anCr=yeDs~QF5bWKjf+J*e9b{PKG=6}J@C`Gl?6VTkIpa(ET7$@wZD3Fq zzGVLwZFP%j6zbo$Tf(ET?|MU6a<2iohJqz;J)%5t(^ZL5@5@oIF%tcpv2Zluq+~Be zgbOM00W6KXE#JqN&j^3t0nJ&x>-W=M3E#*68x)Ft{`^&H$?Ef`-ieaC{K6>BZoR=H zXLR6aax%cF+yMAE)Wml$%U|&FZJm{wlLXN-5NHkHm|4VD_OKRBF&F`2Vfqf%MEy7% zUGBmkg&W5R*7yM?M#d=bIxz-%dSHIZo!$NWdXt7o9!~YhJWs!^ z)T7&Qou#soh*Y7jlxwI8!@zWk@hj+9M4urH23vPQ;SB@1^NFG;sPze z!VVkZ>kz3c!k4T+QyCq2yl<}$txllG-KG~ks7KC_nyr#UWpDSP!C&V3h40ydmz4nn zTJycLN*njniMXi+0jejjOd8f{;WaXe!BO37K-*VEb^yNgTkNa;J79HTU;7}qZ<@4W z69bFN5Y!*Jfns8g+0S^$bTT1iI1{V;PVu^P(Zc}5_}ZDb=fvg>6&0?(B)-SoS~VD# ztoT;yeJ60u9O(S&D*V*dP1V43BfjTsIkT2=`rh&mFtx+qKB*(13}SlI?O{Bo2!_E%6bcI zp1yyXm0u?#ZV*U>9-nyV7hoSyjhX9fx*-Lx`T1A8jXb<)7@#*as%L-t&8{>*w&<%b zscou#U>Y*{oIuNQhUwGY-91&Ls<1&7R&=pHW3x~z2b2lEMsrZap$*XO1(ulI(t-fg zM|gk?eXiN<%p~3G4oE*6fgsODGsS_EP7-uUqrcZh&YJ=Q24(8aHjg%KzHA|e>4h9&R%c?T@+AV4r~u0A`KR<(a;1(0AAg4 z>5xE?;D-zlW;ruco(hE0qHXKM_0IhQycruPO|wpL3w)#%pgc1)isEcTFxQ8>tQIjbh-; zG07Q1FhAM@jbOniL-(Zn1CCzGL@8yg_lyKe8RP^X%)97zA4C<b}88B?N ze>~deW@dc+^caHj*f1z^&NiZlyl|&w!UP&gn zrfjbNu1L>+=q;L9&|b{SX(T^1@xuZesu(4ct3SV^0XUvb`^;>S4m{j6KgOM zrpWi@Q)K*i9zZHrDe?*4BQ%n^&rHlOEy;#H*32CWz9EY3kCR7 zq|W|8>c`xJ^04r5&>`=)1(iczh2`A_8E*H+KX~D^u0|u#EnA`#Cz|`Nh&_tBYqI)B z7X2Ho0UW$HMo5r|VoU<@8-U;?>C*=}{dvZsX6aQ&nxU`TIXCt~<;BZ{aDk7?H9HHR zeRYUJ)n2kO-(GZhXb}Ar+SN$;hj<(d6U#o7Bp)$m=mWTs+-g(Az}4@Q;ugmfBIUES zQY{-HPZJ(Nc$#4X7OE?>->i=(_DrD#>@9nn zp*Q^r_+W)jg9cpxLc__OH0J5qJ1`&sKd6c(fz1M(rX*@0;m`cEwRpaUckEAqu$l@z zv;SNSEh#_txDq^>j~%cPf;lpv)UQuod#A~0{Z~>pM-6riIhYPF}>9m7SLg&_+)>J z0nl6MhgLLHaU-|7>V(caq)<1*WXGO?=-QM_Ey!OwDe>voL@w61JmhRqloIMjd!=F~YH>z$(ZFrM zYrw7AuV3;%;(8KkMN1amLlp2FaBII7kz-iEo-#UZR-zg@c+0w0L%(UpYbYseI2o2v zEO4W)HX6E&K%l~LBc9)poY0zq7vC#$WTInrjEh+1JLWn8`ZeIxqMrhW925LSH9h3> zXG2mgQ>mRnNi4opZ=YywP3*4l#CV$UTuvijHFW-Jxx@ zxEIQ`%#}y`lvH`=N`_@zVe6ff4LCF}X;IR&oT|fG>U@Jw0kU)J!H#10c%n(%a7ul7 zH>NSt6t}$(;-~a{WYms4x5sfIHwR?r%r)VL&-F#EsUqJ8SmVY$T2F@{-FQj7uU7+=sngzsxi~qEaUn*Z)t|nb{8*QhldJf0@C%bnHWK(Z`HAyJr+rOU)!8G~4sd%H?&;n)yZAvS%{1t;9**CbRDWu3V-hD1?{jK%y>x&tk{JXabe@8i6 z%iaJ~c=_nS)Ct1?ASmUMv}QT^Av}S{F{@SH#G*(p*(DzE zl{4dD{JCB6M)~%ENfu^3cdkDI8q~Af6|@2z>@aLE{s-yVfA?m&nSpzqWaxT@=x!fL z$Zn*_ffWw8>W>;%p~>LNBFA>y2Fc{?d12ungB3^AK0b!bRd+cp!h&d!R_RuJ#X7d} zRvYsCU4Q>9v1bpwU$nXf5bVsHHvUJB{pBlU6sq65buj#t9;(%^1)TMTqs{~4-yYgn zFCkVrp^zhyF_$v6VgXp)J>9Ll!knWm5#neLd@d7%87W1d+Z(KMdxMM%)NzCT5CKw`Fvd zEQ9VhK5$1LXazsw)>kV6hW;DNz4Pt|ZGQqc$Zi4$$uh7LV8HJKfdatPs8MV~BnTmf z#J>AM2T2rU=P?HaB(N%v2K?P;^+_KJ>jb`xYR$;dwXJg#;6R1G;(0CLY-wA^w=-yb z)*bpvAzfH#^6PnaZLRZqcW68t_SJ)JU?dsv|DHMFBNm_#6ifGRLigPV^Kx_fbfxNc z4(pbY*X!Yd{K3e@uy#D)NxFn$r(iM9iz!tg6U5glb1lp2Fh z$oTAIpli0huJS#Q;b_}#bg6gs|D~Qfecl*_5 z>l+8D&lA!T-NLrrpKk(XApWt@52;b&9fc?g)G-WIC}?5(T*g?qeq#ppijr_GVsr(Y zy`CKdr0Z&NYj%~N4#`IDf`_cLR@@X75PR`>_2Q%xLru36Jn3G@p~&>TR}Es&2$b7( zK|+JD{H+83)G=aWKbc7^4gTSjZ83m9PQw$&sMI~2Xou9Ct8$cOKHJUH z`UpW@eS>^9e`*Pjy(~&-kat=CvXAZEsCULQZ^mZnW~3dzl3FEOe>*V)nfn8ae1n1y zH$hLKM;<23c*QmSDcHgcW}liPFJ!-eV0-){f=HS;*5!nQ}M}=fE$v71LCuQCJr3Ak{cUN<>@WtEQj~}WZzOZB3Y$u-m#O1Riy(Nj!e1$Dzc3Ka1fe%aWDgs>ANlr z<1w4(>Py(uN@Rc#_vg22=p%L~xEc1LoWI{7Ann^jjUClCi2b6sq)vsedel)JaXit} z-nH;#w$l$IjzZYOUue0~Zu!q@VlH&N;ZtEkFS=PFw*QO0w~A`B3;IXtOARXx6t@!G z-6d@a?(U?xLvbf^Xpp6TcQ<$Zym%5T0IzBqD+9=qxK={tsnhjSatWNtuT665+-KJ0b-AW!KwMtqy4V6aur- znYptM>a%iOXvD6#wN+0`UxLh8uh2#A@l0ech8KO^FE!87c82Hz&q>B@3JBA@SnuP? z5m4{B@XW}4E#8N}eT^LRhs^_}{slA*bP*xQl!~EGBZy@^>dW)D+^6-bv7qhT_r)x2 z$sK#Gvt;`e@PGbTHTnfH@9m#Eorts;z-KxXl?x)JNvoi$Xp`}pv zV`(Ne@J%~=vY?}F>3)kKQLFe2lM&S*D9#;wBd+jh50;w0V%QzB%7(s4V4dGc;&VeJsrR9bR!gF7Z3%?TBTAYIpQ@&i zP4#DKBhA<9A;KkJUN2U8igwf!H$ji$&HvH%MoM5KyUrY~zE@fq;BRq{@~ zHoCtU^C-LZ`wssn(ECX}-_*^`bG$3pYw5(jwY5pDWff&!U#c_5!Js|<+N$fH-WRDj zR7WmR3O*X#Ib4gFd&&q)e_!v5qbc_I`UA4vj}D5q_p?<~7W_wXqvLrB$=6|6DSnU~ z@nHE&mW9GCsnCZV&_f!=ykxI3MVb&cibx3!+Awv&FA#a26~4S^)KAfL8r zGO|z^#%)iRpParob^NFbDvPBc!^b^uqaU6(ML&L6egeH)fk;ruLxvIMj`rO5G)ImO zrrl>1I*REvJKsvX%qQ|wafMiG%lsd%HTK~02!{tNnF|Nd`Dk|csbz?4nvp}c<5_-X zKY~4l4SD(NMP&FE{ltUs!gr4*zn-cni3tM4_(-)L5|s?<;$XgK<&5Xv-tGJcd(Fr1 zdZk09>+J_OuRAG*=xmGQlseoAitjh?QFncNL;ZQ&oc`b-vO|RSMntClblj#-^=Z%- zduK;v^i9_YY|+;UD9Jp^J8fOn@9i!TiFZOiw~kqm4DpGI#izFS${~|(v?-v|!@k2j z9`oA^IP%lJ!C<&kN1bJJx(YqEB~6h6v5m|F#c)D^iCF9BuP_G@_fLj|_6`mCsnAd=lqQn;GcrA>@|DOVP#bOSK(}AewLzKTY?lfu z_l13;V$ghTWBu8r?{ArG{W-zx0yZRsFr;e=-eqFVmwH;~KpNXJf7B0W85&Ahp~^>< zy_wZJt;?X;eRpHboh(SM)N*n4JNOrc`JT^{xW2OvT#yxSiwDns(GeL=8rJksgnS~| zEIWjcpFcKLAEH^r^Leya3_VY2y<;~f2h+XFc+2cj3lPEhM^6!VY3<@} z%nhn4nnWQMZrN3Z)gQQGn*+y?!0v17h&YM4a#Cy*rcsqHT=tgoNua+Qt6N^(I2{4i z+2Uq@vV2FRT^>?vw4Cw}+C>Vl^_WTsT0n<+mKm^cB!lMU;41If&QQ$bYq_UPMp(F0%=NB?R-AE2VmH=nxu|Ci@0svl~OLc$CM)xir^yf(r z)?D*9y&3U?%K=*S*nG6eNCEdq-{d@p1Gm*+0r6wg*(s9+kf!2_XlPMuP+}P(6cMS?*0y4BJT=&)Gx1e zYWnvT8SYHuj$B%oASfiK)VG##rDMI2zcV|QNOQD4Rb1DiZ)SM4yXLVpm3X+oKeaZ` zExs_~#i=O%Ww)4R4)R4Tby63EX=lC$n#*+Eby>HC+?s$TOLDp z(&_jH0ZqEtnsw}6qOmGr?O*VBmveP3I87%3rg%bfSWVX;Rc)`rTlBf7^gPy2(~f4N zr4JGNqWRt4?~6+v?daELrLt-pzx87VOJOlA3H^H9tgCuzg0+C9naQLPAZ?9L*1T&p z$@>zI*k%f;p})K!XSqw(XY<>JnOQJf+DEbeH1cubDpc*aG4?%6aR}y@p<7iQIBeNc zfW!f!#oPTOZPr-9do!lTsrV|^V!76+xIB?$$Dha_88&SgRq9LU5bSbcICXD-d=_{l zrJU-r#Zkr=cpQ+3+*q4gxwqjEseU82M`Ws#KLG8wo|h?UM3Kly?gv$;T>JQl6d?no zk9LVd_)Aw5X3OKb7574BxA3vqn{CvGXC;h^U()BWLO)fG-=!b~IPUGsYJ*B{PN3RN z3rvXJ4(f*%N8fYlXVcgHPixnQwn2xsz{Z^W%Zld}b{Aj|XSnEjq9TUr!(pI#d(2?c z@_~&N^+ov2U)`waAASEgpL<`8TyqSR>DxVx$B@A6L%y~i(o_gvBJTq#IzJlh5YTJr&*isrua>`W zu%wxK_?KPtG(2QIF6~OFaMD2ANyDXa$;{2TI|BDr@=m9+Uuoj7hDJs~u>fCfBd2ML zF9XVZN+XN&;Y>Js&@yd&5FuJ_i0Wl%6oe%Ic9uGy{MDGS*LIe6^AnwVAx=q$J`Hp? z`JldkUUkO(MAHS5r>v| z?xM@ihD5Zmb#6+YN*i+g(afdkFe}DV%a%Y7^lkO{EGn*E*mfK%?Gj6w#7sG`6_zj6 zl9F#sL7eO7WFx@3?awP6u{t63>AHWIDX|$A*|Iz!)KxNIeG-u6LwGfse0cepD$Rz*`NDbh3HrNfKagOFlQhi^Ogj1{>| zVW!UEs=7W!)uftQys7w9aE<++opZy2w$3KN*lz}Fm0|Gyb0-BSwe^!`vgubsu;Y&w z3OZ)B3C0E?!tupt{Sz{}gRjurj-qw!e}#SfO{7`U6tKG6JuG3l5L5k@U!N!;O+Sa# zmm_614vz#e?g8)qQ^}#^r{rta{_2~r2t-#csmh?lUxePdbk6uaig-XFJ!fB7^^C+?S5BwzC z11-}k-OCCVni~p=wwxs>w%k%$72_kr7D`%%e!0yWuc!fdII@dQs}4J4EDT)BGJC%E zn_vLD=8vFVG&0oYhvRg&C$bT5L#j}?25?w0;)euc&4!bO+|mq_?Q%n=6pV@z>$ z;dV9NRslPoY|=kmOl%fmU03`gRq-%^lgOf&Fr+vL^0IMx+98^;_hUXkE;JLn=wMK?*J2 z#H=XvSV{p%*nh_}Hq~X+{CLj8W%?{KvK+mbS+&9im<>*cD1?S%PcX@QyT&=H5*>{7 zQANeM@^^-|y2m3MNip`2;JY=%# zGp-5^Ca0cSw_P5`$7jWsY`@E-G1(15jW_;BP0?;#eX&RCJKB#0p>75ss@v5wMaGF8 zXR0>$-z3_>#H3R9?;pqN@FW(RpD9*I2MNuQ=Vy2os}fP$(`yqjs_?->+T3q(r&>?z z4@b*QymA>{XkhiEST};KnAiJ@h6?<8@N1z-=A&vC{Rz&V4dEPSKSShNOiJdpGP=C; zr5Myxq>#O%n{GXZ%PmCA?NXw0aM#bgwqNoQBBcRT%pD{*^w2p~oU9O9s$<2lAm=7` zm|tVb_>9F+{Pdwdp(yIFBr^d6n+}b?qz^2zyHg5F(Z(2H#cRR~bRi83$l9REj-RNz zw3S3Px%z5q9xCf6I@U;*$59DBp+zcX_u0MmFrMaT%oIU{KR3M$Og0}fV;tH~(jU;& zdO7od&H`}o?oNtE%%VyG5aU>^Hk~&4cD-B-9aLPDMnERW^k%YZ3Q2XH4=K$bm>Vv# z>kCk6^=h8YQEk;|@(S&Rb;_$}^$5LfRgyxN$EMj%S>g&8AsGxll;^Ijrf9%a?ALHa z-JHkul%mszByo+XdS$jaVdg#mR@kiIcb+wAJ3%x{U)%TIsh#uVLR@TR-|H5BI=iIW zg1ss#yptC6br#LcBPN$bxf{r%?f*&g_L|B`Psf>v*sbJla!B$Pe$67`roZ~8hZ%H_j{X;1X*;)3L#1$U4!15?#aDVET38e2OCbhPLiKv&Y(+qme(L9l!_+BtpE*zc2kjGS_XytFNnjzJaKQUS zs#0bB6s_HQj108zA}HOFcumRVsJaO`=(X1(j3+k7B3PVvi`t^=V%Q*Wy9m1Ylsv2n zJob=@@9ZUFPLhxea!ZGhvuRAh2yoA~*+zN^PDv)q?uxIoyxaO2lb9(SoN9~N*~YOx zSc-^ByTK)cv3MmVQ-+~THBH|p{s{t>+H7 zBn!=-;z46RQ{R^v&@pAThgfsJ4A`{tUO1K8@$qp%G`_!|`=kzw?$&pVeKl^bf~B8W^Dk&sKaMm*I#TZei-zP5D(B@NA0CDlA>3|KlTp?Pd@v^+w?4%%e z5VVnT{OY9A>asbtdQ<;+GJAqfY2|6zjU$z>Q#{ohR?7J-B=w~S(=3C8dZkgyS`y{C zUsvJm>(jqS19VQ97$FA5E(xMBYG;VY4je2ECaPmDK!&f|UQoD>IsPmV`p%5hEf%}D zuN9kIvsCa9us+9G7c&xc?a0!(AC&-EDcL9&%8TDVeE2MCRjuSR)q4qOYT7-`@cL8d z6T}5j3x<`dQmNT0O6Ikdf zDgic|A~etOTd6fWA5+1;Kquz5{{v1ba)yM)eYdnW^>%VT8bv?<1}{EcJrMe zEc549;EtpY1}Lg_wiOs*^+&Fm@d7uOH~$CRVXTOt(pw} z`I_eSP`JX~kHO!!7vvQAyL?O-CZ{4w4(gLrU`ZIvnE3rioqZo43UV_J_ARNVOGXk2 ztwf4~lG7s`;3GWoQr_CO=j zk>r_wUIfi*r!8XANz85%0nJ4pQY2C^UnrOp!-Ptk~fVJ2e2WH?2*v_&Q|(e6CR zCa0z>EcyQR7=A)Y?DI@)ihL3div#CDniQ#*ft+9523x~(i{Fgintr<|q5NsUN+i#d z*pqvRM@z*lpE|qYQ$=s7DYxmKJH;9u*hSl#o*;O81NlihiJY6o?vb+mjCIe70p^%e7$1(1RHwq zUBn9>({^B0jaa_r6p<(j1`l?Ya%d24(T1z%u`2Zz(g4Q}`j{`Kln*2Hy zjpUx$ubwN6ZM5cCsh3X?NEIb%^LK`--p!&vQ%9&+u#@oPPLlK1B1Q;yGEF7MFB!)X zFLC~Ep0=P9iXx_U>5N$y|L*@&m^4Dwb@?h96r-mxK7h+bXt^=fi=Qw6gXy^KlD7)h zey^f7LYzjcpsucuw+w%HU1OwqrI!So_p`mZm*dNZH4E$GWW%tDtmNYLfU~v@v&OqQ zLLV|!81-{C?H1CumBF-r%uHixsVx){NVHc$|GAmpp_Q8bE#)%FOnp9XjqT=mHmdm? zS~*2@eIiBG8~x#&_Ij?a7S8~H25vJAb3dt2ucCHsbMQ^t2AB12VWCKWk&+fcDaK?; zsM3N1ExmA^uM2Gl*x$2XB_nftE7}$(hp%aT{Ec}@{siCHmRJ;(wW}$-p|!U%!wokSeyPHp!_t zBG8&7#eN>3WSDepwZ&4@BiTJ@32%z;HmZAxR`=q6+;9=jf2bwE*wD1k#<0b>bT^II zdTJKqGVnS_n7;@Eol1I(sw_C1C1#fR#z5*ry~6Im>rfN^1kHoKZ|N-XEBW>exo|8l zX8C>?jHJiXAk>Kn^XatPh2^E$26OItl%BqI-HIFixNqAW@QzofsL&Z(l#v9$90YnZ+X z?Eg^aPmPhzL?4k?CD8iOM#l2wu>bF3VW>S2DC?OMKBVZ)$mAy<(-@iSH1;Doi<0N% z_L&>)@t*LsIDRd?NR0C;p?KO@rD_}IY0A2H;KxEVRy$SDdm3J1| zE4w|EizlJ553yl#&Un6j4aFuO=-SaF)rMCn#rE`P=o5O=p^J%!#u&;nZUn;CcKh$t zp_%VwEYRn3ah5;Y0l%pHwhh!?vrLMVLHntC)6Dd*>HRM7Pv&Ppm&JiB+{5d;1#!g; zd`^ANR`x9O>sL~z_KffuFEDr_YfoT@;N>&3=7e2!$H^xx<$^r!=L{9ap_7!Xy}EIC z``INrRYGGe19)ZFh)jByakt{hjhwRSexiZ?CHX!dQP5Of+oCHqMyyJ@)@U;M{feKt(0_-Cvbe#@64oS>VO#H*fO%Sw0jT?>-WwwOW6!@#>q zqsUR*XO>x(au*><+G3=hmi$ACsxjJFxoDC|*O`D5yLc9oQ})|EojMBjfY(`l{&cy2 z3XCB6Ox`R^Yd>gL8n%}h7LIc6nfW%4>(3}`?9+|(GY6M+a!zq@PTTycwF`ZPvqkjW z&)Lv!Mq=YG6%_qARNH0ZmgNCQa!L;+mN$+Q-MNk&Y+@?W*9vOvlKHH07bJzMelil=UX0$<=z?lw|1QAUp@x)T2kVlZ71Gt& zW&h0+n%6-shN$q#RNj1Qg$6YN*LpuJj}dQazS#Crczp5t>ZP{?3>Rh8CmAq|AF$Wh zW7!o3CwWP};hC=yt9nW$*H|Bs83R3}UMEFEGnp=r*>8&$Q!g}<%*Z=9|LI$psV-M? zvr~uzbi?r4S{jdwsvE+$8(Qp9qL_aIrJ^8Y{AdsgkTDC3a~UXq<^<1ttF>O%TV((E z|Kl4yH{8ay17*QQ-irNKJ)3)FkS%A2Y9G{TBr~_Pkw$UMZpmMr<$nX0ZKS&XU5`%FgLP`Dr+jKXWK6BpQ%sFpA3{E!cn9I z&;O8`NW<`^3>-hHRD(tVuHP%SQ(IkwvUzor*UF(c5Ms3Fxc`~j&q#i$4LL~zYlYE_ zNp!!uWj5NP0hIq9?D@Qgf-ax5a=jW-n79jD-_`|E z?CLzv0*bkM+xP?p35p8Y8rl`?+0WtYH}X>-II9iMWByVsIUq}K5m&q4txE2BA%?1V zh!SZOz56Fk$NbQ}^e%b7^SbeFBt9VFe)&P^sgzrCtqX^Oj9I%{&^G_Uj;H&{bJK-6 zGZL3Lve)38vJ6N<1-&w;!0BH@B}j>y*8I+d^^Lua(WsCv(Xx*o8-z8Nwrxp7H0uM) zM`P;gY~EUfxPJ1A|4*bd9DiZuy*=~$j6%q1Jm~IvhZI<(p4T&w|LxkB9YRbS<G&u`N zr5Uli+ZM4hMCxMq-u&_&9gE$IydS|G5B{-~=!1w@S)Ai4>9B(Zl26aiggL_HkC z{gP|{66k_Ls5f!ASLaNZnv9ZcO+HI{?Tto!(|`<^mz~>!I(tdel~3719uqTSEJ@mY zB1;KUBBLoE>!D#i04Fh?tKQxo!~_COVAfolj*PJ-OltPB(a|V3NFl}BW;e33C{9z- z_5+`77Jgk_K5}kRr!O3zn(o)T8Fs(zFUKIO|ZzwRP?821|aId4t#BZCcb!&tR(^ zMRLE}Kj9XxKwqV@@wQg6OV;O*&&Ee!S);1m;HRG^~_?kqx`ldELN$_4-5sHpwm|DYlY z#Is7F=@YX&$+$XtDRHs4Kb2W8)N?4zby1s!aA6^^P+>~QFXJ6RsLeFs-hP=~A==fZ z=lK;jf>CB{R^T(oM_9`s-p)XzjdkSe9JW6&$OKbgZ_X6tT^iYm2l5HN)ep!mM+c2AD-?I6m1qoj&qum6RojDbB6I&-{2amaU;dT04J;-oiNit1KfcJ02m~8y3?! z9-Y2@l`j?j{{k{oNdjA2h9)^og{;JR94+wDl)i5>^;bctUcVIIuLO7`` z@6Q8#z@n>2ql~!z2N74h@71SURX3#B?4{t@Y=P!c<$bIri8e;V=3SsBg!J@78u@JUeIf;u)7J8J`Q3vU z5!VHE)Ysg)1;zAG(-^7p={J`2C8HtiW;1a%=aLC|V5uRqu&LZlipkw#`6|Q+s|yjf z>+PVjw=T9`& zbL~}Ziuh2p|JWtZKVwDmHQj^(C-;HdSMkm0r1t4rE^j%S-mxXo9C<3D>1!3y?~s$| z{k!k~EPPaDdOKMqhjc-2C!I@BeQ+0R!nXT5vo6r&O_a21(A;av>Cr=Fa9QP~2&Q_# zgmsh>fqb4+ITO5OaKtaSuJ5lOe&tjTmx#%S05h&5+w3Vz^XBNcrAlGGK>bCa;!~mr_z`)pErs zN#7=u-k7CWC`tVa89k^W-G2b)>Jk!+n(F+!k$+YvdBj{3Txg$9KSp#De)(=X3^FyU z7W~C0eJNS;{ir6m^H||JZU6VzqLLtN072*J+8N25BfjK)nP-GJhhB=zY|7V}FD0nj zcPPlV{~e{1`}srRZ^E`1HYk2u+4A;B;f;TLHmWT3s%H=Z%cHeuecNjY&ug6}c^=+n zZG9`DzAS~h8;TMoToeYIQI2iq18M3d+*nzC=H)$)}spGy!62F6jUG?N@+KTENnT^{U0G)AyHDvvs9u8|BEJeWu=d zOfH1)Zw@uo&!qYCqut-O)LgTzUvTp&U4RQHV-HNEaLJ?Z^S4)id7DKytHR%_tx4WL zJ4LKk+;pHq(UO%A=%wMg)Xb%gG+_d9J2F^WTdA*bxt!A#=MKnGI*5WCGkqJ8Ck;kR z=tVr$$^7@4E$7t@waGA;L4$wBousOZwtNHn{Vy>c&1G`$dmo9?2+#M_AsNZ*PWK2~ zWCQZSQ6XVz`rviz)dSnf(qZUnJ_NbnS$nFL#Wmnh);ia=g?C}ZpLb%aOti;>L(wzt zIJ|I_)l#;c>!p6tdL|46u70`;%#hF8kbUCUEhbnZg1kA9XSgv4T+29FZISgjJ4wXa zAmBIx{&{FG-(pXNeX~HV?`LZMRmxt2mIN{0rIAqR zRFY)WQx>VqmT=A7DebDM{*x-{lo#d!$VENurh!4C6gIlu8!QQmfp@N;sxK}_dg6VN z=fsG3Mpeg4SWQmzmw&o2VoutZpAW9S1{~8rKZ3HXcBWnui3W_MIDFv_9QDR89sFCxZY^8(0BE5eK#y2$D((N2!Tse9hB@{gsRT2^Y2Z z-Wo$9s(jF9WPo^ky{TIN;-TMU{H))sr$SrtTk~P=^2n$fO+LE)bNRHwS@*Ivs26?! zmKu57b=?}P9=ZR1lq%m5c#`$>)E3vXyClUU%VsH5xWY!Oc?)OyEJK`cscj_dBJFU1 zM{C@?BN4v&3y1Vk1025MTlNKX<}ouKL;OB5%p687M=>H z6m7n>(nzd-W5@RVtBv>gfKjxJZH}~lH3;?Ua_h(E@1x#|ea+SXnj4AJu2;FGA_j20T+viMg^UZ|{)nld`zmw7Xd=WSGG^>rSS=}eVbpLt` z$S^@j*ZUDX_y2<5-XM^^@an1(BUx;zs$S2$s?finG=pr$iN#R7NwLGrEVH2TN2B+< zTSqRth2pkP@~D&H{vMH+e|JrvSWKZ#E)NblEKQpBm2Yx1%+@?^WFJ4_1gy8MoXP7K zx%1D!>W`H{Klo>wd5(UQZD=ciznrcwOGeCIPy2DC|G)!yVNg-ps!kem0j3?BJ$yA} z?m8olnuwIrWNr`B%!P(OK{4G?aN)71lh{L%k83osT$2n@?z>c)y!@HDP) zjY%xsPYqzv4BjOK+UQ(st;ijH{z{ZPE;!X6kb(_=fMvox2TKaS6jPmG?e7Wdx^Er$ z6fW3MuPUDTth@@VoKx{Dgfo!W-BnnxB-#j2h~}3xnXLM-h9=c$@o#>(u2?=SVY8)O zz4E==Op_$^yU8<6W!DLozDW0{T2to46!Aq_R<2E-Kmcyi`n zSi0Erc)bjYVtpwav-o|v@;yS+(|LYm^M>%`n6i#`wJhhmIEw zH@a%O;sq$$Y1_GDY877}+NfbmoiR^*efB;6MbC>ywS!+06^gsQjeb!=kyI%qyZTj?>+PA1=G&(-d>Ap)f`bLc!vO z)|_sGd9Zk^zpbf6drwNOgIMLtIsralGQA#pE=x6Aavv2@%vLV9TwLC4(qu-htZY1{ z?vije$!*It>-A{^cN2?Zty{9lXgCiZXf!~T0e>@LX?dRV?N%VYtQG(e;GGFL3+^A_ z&q!@DAZ)(*73px15SK#Gbz|eg5>wOK*CoAPv;GjB^O15=v&W%jG=|IZ)}ZA)yvvNH zcGP60VI+n0#|`xUpM%{qV`P_|I$S{I9cDrsQH>EyG z_P2d1i!!3((<1|0L6GH|-qLx(vi-Z&-()sC7(vHjTRAf5Yqtvs=}#Y<59InXOj9Rt z2om-C;27AX$PeI8^*`!zZIwDV)7AJI1C_9hh**kR4l9$_-@9hduaI)I(Y?>tV6aO# zt$}1f0U{Af&`R6DD z>#U?ir=nQPEfi)qfAX!(m7vUDmy#m~E5C@RwNP_c7r)`D+?^U_La+}h{|0%Vpn1jX z(QU3U!r_~bCii1)2n{|bBDUt+HOTWzuRy`flaGxHKhDF%q1ySo=pkDBh2@*!7zmKd z0EfLURMoMdN(c6VXi5M3CkCMW;!e1utvjMs&5?#Glhc5**}Lp@RrI5=73}8RzBuBu zUi|80n1NiNicERq%*MXdaqOZ#7bifbeEeI?M}57RPQ!0=A*EE>!-yVOIiVriQsZrz z&bR*SIz;V=!!u{z(?Z{`Wp?YZ2@C3he3-TrYp>GC5Ii3PDH*<1+739W;qe56$;;gw z!Rem5!VwZRFQP;ns%mp)Zv&Q@mQKv&Us3p2D=$+ldWHEN?_jdv-CL9;;?A=3 zPUIsGdbK)|{;H_HjABhx)W=eeua3SvYf?MhkClid$YtGLJFZK7OW-kJv^^$D=DBMSyH_sEgm+%MyTU~tWI+dtKP z19?rScC~Xn`O|k*Uj!+Sl}BzO1OR{$fvcu7*nQNs%3>%10#TN@)B>GE@6j68ZEsVX zzYKKj#Db8Y^wv178II%ybt~(zUtX+57iJnutQ7i_THH%uBCL*hmiu7=;xZb(M0;D) zR!jHyF$C#}(Op($6hujh&d3MN%d+$ZJ^lQVt7a2{`RN$y)S8Pl`A+=POXT4LI!4@G z${<}xWQ&H$u1JDAdVNyro`-gvZ=(lQvQEvC9W`+E`65>^j3z3n#y|lKUh+Sn`KKTk zRhaSm>cyE zhDL&Rd2b=(o*OgHNX1gwU`Nyt3oQN3%zb61xI;+U3R?=z2|aKgp8n#T5Lii4OTjrI64-f3Aj&3fvFo~<8n^jOph4jyN5X68???nl%k!E!R&@ZeGr+)_c$TqN86x@L7I#5?0K3Ll4M)azmwHtU)#)RnCh4@DMj$@-3 zfRzTEh<&%TlA&t#$&bL3B*uiM<2*&;4w|A8QQNKWdKpok4GTvr5V^H^pG+4qqWdw& zQSu$Eaa5>FQL)BvYo_iS(e~cq0lyTbxR5U|K(|%YIn#2}wn((5^b3qfigN#OQega9hBgU6lClDZ68!a2WAR#DBCodLu3r#`@evnt!Q>IsG#3(lc{eCu6z#Cmnt$jCr!goF53xs2iz- z++0V{XK^`0JtzTusm0|ZJ>??Y;LeqxUi5^kQy6SZgx!o6pnC4*0&*ke{Sh|ntd?(g z{;l3=`uTYBZa9rL491Z0hHBwKWRWUk=NC<7n$WE|Iv{4FGnoUYwz8uJ&JV0i&>*!? ztb>vUu|Oz)PvaeT--11zMZbcb1+oaoYoU|)svBSM=dr=$E0v6(w$V=fZvLSD%`@laaXbMTYj%vaAmMZ`AU zPQj6^^2q(|D~=f?fEu7`Z+>tnRhd>WJ1fZk@O+;9ZR##0e?Sk`?ak!d;Eylr2V-?> zT*LwM(m!OrZD0ZDBP#a5QbwAkcAIlM?ltxEmrXDjYvV3~2zyY^@VBLX!5=Wcr`kCftxRmeE^vDIC8TdL_rO`YL1 zH5* z@wc*|znk6fYW76^KjDSN8(G#LF6p=`>3o}TMMCyQ{SzENv5E!y;%4nqO`bkfJMMs# z3a*SY{f3`Az95RaAX&4UfVM6!CcWce2aKAZ_DP{$7-SBec0R+OY6t=U#m%zwV$3mZ z7FXD&d){yISLJ?mK4rO23o5MCQ~`m}t)@6zk_D z4hGluH}2Ucl;qiaNmsJ}T~~`DyQjQ$cmdR2iJhes1=_UMAHY@ypnKLgIe!lWh#ZFD z{ij;MRW1YESAeJv8lb-Vfg}*vNb~Vr)PoZBHl6bu{eFRUaA4=SbyR5}TzBHgpi%ZA z=qO@0ij}|(r4CmH%C7)UzH?Dt9l770B#J$ns!u$@zmkxte|EH%268qr`Z0@v0OFm4Ug85a*mK+MsRp@$~ zc_*fnfWiD7DIkKfox8Cy?myo!KkCs&+((_!xRj^i+Q1mx)Z69PR@-lb9YH|30p5Dgk{ z%$v%{lBU;&r~QD#r?rAlT6rUVt|D*Jx7fO*cN3G(;P?zBRiIM)3iqc|suG>nAm;>& z;=G&Bdqo!sBi#jE}?5q0El@MzzYRO!-^7BiymFw(VWO*4PqDYqrIPn)>wo zuOj9LD*Kn7gXGsgwt6q&ocs9km)U z1laM_l(+AQ@-19s+t={o<0zf~d-vq5E0lYVJw<=Mie6cO-KjTDdg8-`N!1zu2=w3! zjWQ9bLK@fc@nso~`)ota72Qxo-QZen)YtP=9!qHTh&{&A6T2ySHn4VF?%pX>Sv;{< zj(lYfenKZOPnQAn^i}d9{hd*zD@@uglp%0@@k{)b&oZ1(%&A+H+^4*GbHBF3vW?ZT zz@fGB;AaC-PF(OVg}7~FE|FhJnimN>TvDts~wl!A%h7Bw|=b^{a-DbF`w$iGenOdOEot+yXWdCXK|PB z;dHrW$~29L{q$HcQ@~I#wIa?oS%?L0z1UK<@^y2aG=R=^NdXnb(LPkQKTkeAhC+aY zD^z*4{)IPc9J?Hvms5y_#>3c9m>}gzUyEoZ06YYO{0bZCKIRF1A|^&DQTo-t%GLzSiPT1r>u zAfh?=zy(;ZTGoabtIK7EkP|>+!j9ScwfLB={Ra<=No5PtAv*0ST_6xBpWTJBIkgX% zfS4XnTo5V#sz@-b_oJY5fG&?G@R;V8zw9N37>w0Y$(+2x5f)sxRXqv@C$j5@!Uhvi zfmWsOj?AAB4bm ze%%5$OJxE^J+gL{Q}eK13+}qMjxHWE&o(Q9N5t4kOxOn+2|Mbc2I}{$wXT&Oc2?R`5+(GGPbnnNErNyaABe&0jKYJ@ zc*?i3duTR!rU%bA-x`$fxfmXeByR127)f%y=D!0&=6`1i=nT5c_x0V}Q3CyYY3g28 zn2~Vs-#ozpxFJugR-sz%8hm^5MUz3<882VI?0AaWeEak6p8Ls$1qtxn5UKIUG<+}V zu;cSp&EvtBWZOTmShH{X&BodKIU*xpvFLB@VB&K4rK1o-F-!C;H*E(Kl?z3X3+ z=wpjR?w!({@{$Kj3P5U|lcvTS)sNp#6P)GYjj6p3{E&_Y^4hn|pJBZVnZ^6myA`|L zpP=0}ewwAF*P6ARK`Yq`Ps8$&PKHT=Fa>)KWOi20QAjkI`z2~2o((70vcq6h{ztx) z09gy^$X1*0i)D^#Tg85KPr$SbNyL zdD~y>+}0vph*}{#>P~{pgklA*46OHLBXsP}R%m05=;5acbyPu)oS}J5;v3td{Z6b< z?~f1P#|amHNNa~Y!%NoR^$-F5c+E&wJZ|E$auH(OJo=SZMgG87)$BQz3Ez_ z!#tRw#qgdYLLZ#ehAehn%F5$5y51c+x0>93OWxdQVw4x z*+a&iyZE;=RJ!krtBKT8_u}{&jghqJ4$@@ZxR@VJ;T)6sJ}8cBJB4}+pRlmysQal} zGMpH~1^pfZuUH-<=zp$#{gJoJJcj71Dk|}X*(&y9WD%OBg#L_m#kEy^?+RToc*AR08Ed@fB21mlf`*>D`>TD8pG~dFI0B}$BHFoo3exZnM zafb6gf5jPJa$tWYs*~<^fR-?)OOkA13Qme<*rwd?x694T%Ruiv3SU>i}*c~iV6 z42r~fAXRIYKGi12{)PSLsqMcwQ;4R$ju6l*Qnu*HpL|yg2urj>Q+?0g^PpV4&QN_y z$`@ywMf0Ps8gtd@XZKS`*Xw>6@WeJ3$_m&zzVFW8#4Jrf_n&L$XJt?{afXt)xJG9! zR@;-1&y`5>JI8-@KA}@q*F0-~OChwUFj;tk(M9V;DDd<0pfzMZsu|@2&>7K_0FQ5yTwRw(-myz)=2jEZ)NS~h1sQ$t>+75 zur;4<61$ogcc>2jZuL_C5~ski8Ctt(X(f+Gr)?~;%OL&U=}A=Z&-!zB>^LS@ev0Up zlIHSmCUO2gq0D_Up@~G_LPP*+Bh!7xMslC3r~E0)?q0pXfPxqHY{O$&y}op1KOUCa z+}G-5yiYz>sU;)fN&70gONX^<8}KI+2VAdu7pe~9*S|fVdXTAck&hj^RI=;ncRfIb zHZ(OwwWbQ}ashQKYWm-p2(XR_W-53F`H|3bn1F)%PCMU~$xN<`Rs#DkG^Z}KfaW?I{r`Ce~!4{uDhxkB$r z7Bj!T*nFgNtVD{I?4UD}de{h3Pl}o1l)*CvjO~oZ$K$s<$U>VPkIt%~z<18&z;(CI!yaMw^XjDY909S+>;98F2;ICq#$nLt z6m`=tFU7%>S4salR6o!s=Ilr(|Xf(tCR@uC`7y1;h-OamL#xt(*s+yn!` z*Qdg=4gGn$g2s4DJlZm^U%%wn^$e@ee(ikxLH)gpBy!^0dU9H+o3WJZ8k&ZprSR93 z$Xx-s1{kon+Q+^S6g636Z%yeK0IRuIYxFNHd41>)O~ej@1kde&FZ@>I^5@eMA17(2 z;IopC&IG{IdS28R%z%Gyj)E@IOS=(aGF)}W8Tu&1+-R!pdU+Zfw7kQkH%e_|*M>j) zJOR&HL(=G@#kjnpdL!DSy}KD^DSSHsj_S`=x5GS3k)ArDZ`AJfg6}P`sM~sB14vUJ-C%Vqiay z>e+6^g$jNXux_6X%F|cbt+e(mG9AIUWxRHX*#2{Utf$O<8h(x{_Eqebo&5VOs?_H9Ofx8*w^{Ti#fleeED$Q>tyk$wY>nI(>VYSI+ByCUsTS4nds6MqV`!*e&TO8l%r-(u7Uk!qX;FmvwJK#(BLI3%} z@(RI!G^?VN9iYEYK$C2Xb5TR9g7%>10;^z?FK-HXe=zohl)mT(IMgMhp&~A z9v$Pz4}O@s5aShKV8~HkrL#}aeBx=tbw7b1cOLx;(26C0*BuI%D)G2hZyURv&R`axKexE-zA{eL%gxP-? z=@@QIxmHS5t5qTymzo*|YAOAE2uhxqntCfK$*mhm2Dv(|30a+SSY-CEwYIS_X}J58 zTAKcxDJLrj_N_odj-Ci#i^Sl0x?=CzQSeU;VZMs~Wi8N*IILU90{C=mJej?PT^4J! zUA4s5l(QSejAJUwVBy(p!idA>Fu}ABhtI4rCVN1Gd@(cZgg57+-%VP`PZ-ac{q--tE)!T zQ3B>8l)w+f@v^Gg+NnuNeJ-?i<51|yMxy?b>$I*s*(&4Z+O8AOvD@G~Z7k?!7Li@+ z^yt}z9Na|g(8%ADGO-8Wcr^h@dYk0uWU#-rYeNHWEC%@EIIEKGdvcB+YF7OdKLnGd z=?Nse8M}fd3%2L=6z0zUX;?-m@TeG0IeVw5?Jv5qLGLMvT2B=lHr%ZZjfx_Ba5u+( zlf}r$XvT&sWjre@6)d`nNSMJD8c8V6ACu^h*aEZyj+XI$BdX5AsRz25A8?@2-<)Th9{4b zIonI>bb!jzrVWoS3@c~#Tr2MA04{O)kq+QYxw9DH))kLomu6P7Jxnam?AQT2Gax$3 z-u;ZFbDQY_ur5>Fp#RYV0L#>1{PiXFR^Z3KjymJf3XQ>#L(0wSE(i^Mlqcs<)X*TD zGuG!qa*#7-F*grPW`{437X}o#2JR1;1w_@V^6Ix@p8`&s@K_WJ%OePwt((8w+1V*A z<;*K`psChc93StvxdL?C%G&zo3O4-OmX<9485}W(3gC`6Hzfqx+5n%2r2lPr<<(cj zU%q^4Q$seq|BjxhOzSGLbER0PK4zQMfaoL}hKJQz-(u+_kG_M|_TvR<*v%N-zI|Je zk}GAG=ux1Lc;+_iLeSvG6#@Y)((CeBn=;|qTb%!G_i3LuynW*zu&_j_J3wt`ogshs ztE4A?ccvyODXDuwnyv8gF6WX&tuGIbZM zS`-5Y#&}JN>OI!|&I!PKdBm@-t_nH-25j~kXc3AN_y%ryS*BLxDXQn-zzq$o3sD4$ zCznKohnqAI2moW7o0|)~wmjLJ)~l^vqxg}P=b((yF!vGtmuzv{q01?NM_c~7eELTS z=@Hc{eswfRUQ>W%JVnU#C&)VI+z21}K7Qm%} z0;J}^n?%ddhmmoa0smWZ5M)y?HL1B@G_1GqeIGRb^Hnn@y#l_ny~7VAL1{d|tVw!r zeR{_JdNqRR z*XfjIX3_xGjh{MfhBQeSv4Du&$E$hj^#B6qIn0KAJ@|p56GQz&oY@iN*Pvp(+ULFp zOCNW44=;0}K8OR}vhG%1cJb-vIi+a9#MK~RC3}9RMGIkOKDV$IXuC2uN7wi+D2~<; zLk|e)&Xpbk58L)F@#u7H<{ElQ!xl8#Hqjo<`A$)`<8_F%pzdOg&)Wx3RbXCfQ;HRK zYZ5u;;}KFZRV-X<$@?roH$svw=#=K4E zMG!~JTF?158KLO5q5YNEOc>6}vmCf}Z=+0|IPk8f9D~SVJLn5%~3N|)0+&s;Mm8+Z_4zNO^ZVcfpJI%6kR#r}~3|?Jw-`PFNcHI_zkt)ZgKd9t+ zvK~6!;)lBIZaIpL=a4M+qY!TJE)Hlw6B7D*l{#L7o@9~L<{Smxj$%@0LDM@oOK+&4E2U1X=?6o{mYsuqpRL zU5$b;KH-6YGmy8+_I&w4_`}a~J4FZTxu1hiN*HstQ}TRLQ&WqIilmp!dcvOx=+I+f z-Dy#;fs6XIKut_5H7ee>;@2*%cN*a?v0r??vS*4fxnFs*5|nQ(amNFnJyTIK28()3 z)esi+>X4(Q^w&D`$Kmv5u2E#e&gNjY&y2+0gQs5(;lj9@^ZHhE$L;O(7-jx&-*ZW5 zrEe4wk=hq)veKn%J>b#b^3WP@Vr&4U^{KUC9pBC>N47|sSFo{9&mO! z34U!dxzi(mjrQ7>sLw<9K2xK^JAw<%KtQ%#LcXrQsVZEQ)q0syr8BO~AsX7Xc8j(4 znQS{}R%?Xlu|t}+08=wFJ6H>9q2BpVlt%hAvS80%TR^O`RbGxhRKeRWXfJl()1Bmr zI2X;ztKLW|?MAo#O!R5~`uch_OiWySDR=TVR$V7xL5sbmJEw%y%$G=k+Amip_id7q zJp%E==_u)J2~^9HW8+CVqIWKGkJr^KEMnMYd);K7sROPZW2UQu*R5Y+l%S{p8+%e?AphQS_pjAFi~mIW?R-s&6M} z!u&BV`c(Yc97f4X1BeFvm60>j6>{*=kA(`WhYrzazY^3D5NG4Ejd-5q$FtN}qWu^j zRm7}N{H&RPrmbWo+I>&k=3Ft>)!{M<7yqZt)CG(u7(OpxND9)W1nJsT%v`pJOHM3U zjgoqH@+yW_~7Xav+=vJ0v36Fykm}| zvLsyhLXn=B#W~Z@BaFegR~Pa*vLXjO8*eMSfO>yh!u7_;yz-8lB=VV8+UYg<2&1po z_C#gpDj#i5-fG)5q2xgI&UyvR%1U<8@rRkh-3w|#Flo{qflE~`dmW9rpJi4r1%ox% zLm#Yz8k`Kuh+E2{#*7A8KKiOH%m~z3SpDj$>+>9MJG#_qt0q7%=Bj9VXweD~5cX4p zlH?kBmKGt~uc|}1!gQl+E-p(;oRdp`SucMD14a5fr+=Fwg;iX{d5xXf<4s$U@)>*E zO~V>Kq?S%6~vB0;$mtd5X$j?(3QJiTj*K+M}^Q614Ct8zPee-)T{0n zt1_JC28}JFVX00}aCH;aaf9&)?ZU&la?4^LbYSW!tB2Ik`JTcVpo@X z7i0k2w49s(7rrcYyDk!$YR%bDiq98y<~}KiJ~+S7A|WAh{yk&}Y~Te&ML+<0`;-6n zn+3_7F$cl4_i4EIgeFYk_4trDO;%n~u$Ran>7wnRM;Gd?~Z-3k8I zHtng6Cc%@{BseOXf2&pI@&(73yx4&bw>Gr@Bz`T znt5Y4nlg(PQ8|z_Irkf&Ro}V3)%JigAM(9-ZVqA&u~Og|O_v3K;4OWe*++nGZ~QZb zhi3mPro70o1k;6vgaGmLP5ed_@&i+GNFDVjsC1ebKF}0aWnG02m+kOruH}d6*EuhJ zOO0^&SiH?g!}(z&-umh-k=d6Y126LLMNJIRK)|bifr>*?qayttN7KFsS_{^BCekJ^nq z7!`J|DC|Z+3Og_LHDQ?^S2fn2D-Vd#^_##-D;yaFwgeo~ynz?QZ$&G*fx1zj|HWP$ za&E$p_&lghHSr|aqrnW*DWLRlJqld`u^xZMrhQe$_O^O|F_X05Ad;fsV&wq@V`6Hp z7+t7r?IQlgGsPD4Q`BZAJ zu|9e8tOewP1~Rv&O5W)(ALa1ar}VYAMi#5eMTYF5A%uV61#m?naf{#ZPyZ|O4piqj zQ&(4Kik4~0-^_fQ1H`FIl@pom2EVplh)oJw;n?@~2FyBt`@&M$r>b!}XtZCR^tc1l z8z*ZjFF&+*hy~)*p7z~ffqe5m;ZF9cTi0KlbJfTaR)H=F#E9At0FHs6)o`Bq`MHwY zU#Mv*yRL4gr~~ZkTK%5+G~@73pq_E@1)Q#^uB|Qdx&suo%WLqXWU#p2gf81C9$oZd z_){;DodX$vYDrZUA0y-2<=sllcN4wlJ6gDAMueliOlstUb)7H=Nb9h{2+IdwRkz$^ zjFV>HTTe-sV3Iv>UH zz^ix8ojTQxRQ;7}*^-(tR$$pQ+*^q%P8!3^t{8!%CjIJuzLXwYt`j9r& z{d`(QL3k~GQ-hCp2y@?kz^qbQaQ5Y!JxpognS-*iue5!v|39DXt#rB??u`?k2hPBw z|KlY}-}Q`*bIGM}@QXxs`aL(i>Z~1kTKukH_J)C5e5J5j1rnQ&r8Ex^jA4`Q zn^&SfMSJKCXb_uO-P-*5K}P4JNyzx`mT1$l_JZTSXIX%$?Kk~=0e}No9^9_zjG)-x z@9x~a4RLula9&Ku0VPq81U4A?#AP)gAbXqRc)Bkd_%_GV-k!!+M9Gl*=||RhkqZmR zEOvNYp$N?=C(_kzWoH_5qN;iFt&Ds*mS#7-KQovTuh4eakF9ka!F{q@2u>@o-{R(X zPqCVBSQ;oAVr#8esrqrfe57X8ivF5sUS`!QcJDq{{)> zk6Pob6|c4}E>#bVhUKM}O=t5|zd_BpMyYtf^7lus`6rzcfvnB_;}_w1&?bS_LU`7& z85;)&m0br0#;^nP(eZdcX5cIs%>0Ia#?5;OE-<7o>bgb!DHx^qU?tOVptXeT`Vqfx z@GNi^^50c=TmRkxhL)=H4;puLZZg_>yJ~0iUo1N!J?~+ud+q0kJbj3jWj35}G4&C~w1VxQdlDcSv6zvt!5 zzmdEpI)WO^Z}l5pC8>N9RD25pvtG#nzkRi*1g0JBav|ZS{&j+n)z*9|*##FVT{%R2 z=}e<7)~x&roEcfg#r2*Ogtw&U2j6Ux%@%e_bkX7Er;8p*tfW3iU7;Aj5<-6~62b&F zb#g4L3efG;O$@@kt+LQriS_5%$wU|?^-(wRR}A0ur>7K9xBV9Q2z6VUVZFWPW7$$D zSm6)lM3o7T*SWhC_1=z>b1?-#e@%%FeX0AF_+qtk5G%QXBbz+7BZsYXY^>!YBlv90 zrB_czYns;a$IpF*Bi>>~&wb?7DzsJFe?0&1o~7fH!kQSxjb|w$<-f6!!66VkzZDX$o-yYt&<$W!zf*F28>+&BM>b zPEXg4)0v=(8|LoZx{m>{ah2g;-kt)*Rj4`^78$VPp*^NQ zlSRhy?aE8iKU_xpb^5gX%UuOQ-25)*5EASJPpRmTKG!8+RNhr~126lxT0`ZoW!tL7 zgtY9h;6xou&N>$_2fzsqabs58f@bg)A7CB>AiCF;#h%Ux^ka@0eCjZY2qUW9|2s{q6|by#v84yOSMO-4tr zP{Oyi9Nbi*LPK{z8^Hh`y%5l=WT{cl!0{$yR3c<7gjeFahuBovQ>g@*S20;Xq3@oL z9?z8%sDoj(z&i&!j^(_9`>d*+1l5mIN-Ix(Md31WZkMHSlHLnNe!VywQv7Rj>Uil8T5*)O8J+ zQ$4Wfrp;UHLSTT4qN1W2QDp^yO#LK~-ki_R@|I2uSMj*}M*=-kOZ?hzBqi)xecsAA zS5`wHV`BQXUg=VOrg8yy-LQRK@>+hh_>MhU;>sPT@C0vi89LA=bgmdQ+%Ge}TG{Aw zl@_4lnt3K|GVCj8rP5oZfDNZXwMdeNF+|Il4JEzo;Lpm+0y=`#?}nb;eY|uM6;_NT zD)AR=A59k#_zoqKe>xz>_Ip3ZlmaBv$P>e7ukPX2iw3%|aEZ9>&CFF>V~(Q03*PYN z17_#jKY#vA;x)$ufEOj;ATLRGl#u}@kNDcTau*w$S2sBI>Erw9ie%583Hdc%9(>q6 zk0PPc74v-&ZPYHXT?8N$E{m(VecH@Az25zg6gh?bpStQ2+|_#FJ44hZV+T&i#6*t5+GH?X6m8-}MU z7*U@G!%phE9D0jP71D)5zpnZXBNM;O)WTTEoPrM6m(iB_U-8z$GUNHIB1VFSuh@2z zT+e1kCU^Bfen;Euvv#GAz6Kn^-Bjzb!M`$unHT9RaiM!l%pRE!3gPC{Bmq8+VXq336oip)3-PjvfHhkD{~vA9Hh-Ra((k#U+Z zF1b=+y6f(n;OI*>>sj+(aOu%15Mg&$*C||k`-q+>M27D)z4C3C?Rw8O7bU0V)|PH= znWnwfKHog2xljC*jN}Ls46m?+`>E(I&UyEXK`ipRl*rdpXbNeK!TJ4OGGm9`_grCp zF_xFW}KW{Y`hz;O=VPyrX1AJ<9K$X1o#J z*u>1t(_$&WgA*se^L*|LEvu|LE_qguU0WOza-ULCquLZPn_RX(8|UD#JCE%Sd*KX! zh}FXW>V?Rw&XVi2FI`42ZzAk}{=ou1;@>j^yj<<7Q$6asQwVhiN)#8@wy&83@RX